re PR target/49713 (Conflicting types for 'arm_dbx_register_number')
[gcc.git] / gcc / dwarf2cfi.c
1 /* Dwarf2 Call Frame Information helper routines.
2 Copyright (C) 1992, 1993, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
3 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011
4 Free Software Foundation, Inc.
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "tm.h"
26 #include "version.h"
27 #include "flags.h"
28 #include "rtl.h"
29 #include "function.h"
30 #include "dwarf2.h"
31 #include "dwarf2out.h"
32 #include "dwarf2asm.h"
33 #include "ggc.h"
34 #include "tm_p.h"
35 #include "target.h"
36 #include "common/common-target.h"
37 #include "tree-pass.h"
38
39 #include "except.h" /* expand_builtin_dwarf_sp_column */
40 #include "expr.h" /* init_return_column_size */
41 #include "regs.h" /* expand_builtin_init_dwarf_reg_sizes */
42 #include "output.h" /* asm_out_file */
43 #include "debug.h" /* dwarf2out_do_frame, dwarf2out_do_cfi_asm */
44
45
46 /* ??? Poison these here until it can be done generically. They've been
47 totally replaced in this file; make sure it stays that way. */
48 #undef DWARF2_UNWIND_INFO
49 #undef DWARF2_FRAME_INFO
50 #if (GCC_VERSION >= 3000)
51 #pragma GCC poison DWARF2_UNWIND_INFO DWARF2_FRAME_INFO
52 #endif
53
54 #ifndef INCOMING_RETURN_ADDR_RTX
55 #define INCOMING_RETURN_ADDR_RTX (gcc_unreachable (), NULL_RTX)
56 #endif
57
58 /* Maximum size (in bytes) of an artificially generated label. */
59 #define MAX_ARTIFICIAL_LABEL_BYTES 30
60 \f
61 /* A vector of call frame insns for the CIE. */
62 cfi_vec cie_cfi_vec;
63
64 static GTY(()) unsigned long dwarf2out_cfi_label_num;
65
66 /* The insn after which a new CFI note should be emitted. */
67 static rtx cfi_insn;
68
69 /* When non-null, add_cfi will add the CFI to this vector. */
70 static cfi_vec *add_cfi_vec;
71
72 /* True if remember_state should be emitted before following CFI directive. */
73 static bool emit_cfa_remember;
74
75 /* True if any CFI directives were emitted at the current insn. */
76 static bool any_cfis_emitted;
77
78 /* Short-hand for commonly used register numbers. */
79 static unsigned dw_stack_pointer_regnum;
80 static unsigned dw_frame_pointer_regnum;
81 \f
82
83 static void dwarf2out_cfi_begin_epilogue (rtx insn);
84 static void dwarf2out_frame_debug_restore_state (void);
85
86 \f
87 /* Hook used by __throw. */
88
89 rtx
90 expand_builtin_dwarf_sp_column (void)
91 {
92 unsigned int dwarf_regnum = DWARF_FRAME_REGNUM (STACK_POINTER_REGNUM);
93 return GEN_INT (DWARF2_FRAME_REG_OUT (dwarf_regnum, 1));
94 }
95
96 /* MEM is a memory reference for the register size table, each element of
97 which has mode MODE. Initialize column C as a return address column. */
98
99 static void
100 init_return_column_size (enum machine_mode mode, rtx mem, unsigned int c)
101 {
102 HOST_WIDE_INT offset = c * GET_MODE_SIZE (mode);
103 HOST_WIDE_INT size = GET_MODE_SIZE (Pmode);
104 emit_move_insn (adjust_address (mem, mode, offset), GEN_INT (size));
105 }
106
107 /* Generate code to initialize the register size table. */
108
109 void
110 expand_builtin_init_dwarf_reg_sizes (tree address)
111 {
112 unsigned int i;
113 enum machine_mode mode = TYPE_MODE (char_type_node);
114 rtx addr = expand_normal (address);
115 rtx mem = gen_rtx_MEM (BLKmode, addr);
116 bool wrote_return_column = false;
117
118 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
119 {
120 unsigned int dnum = DWARF_FRAME_REGNUM (i);
121 unsigned int rnum = DWARF2_FRAME_REG_OUT (dnum, 1);
122
123 if (rnum < DWARF_FRAME_REGISTERS)
124 {
125 HOST_WIDE_INT offset = rnum * GET_MODE_SIZE (mode);
126 enum machine_mode save_mode = reg_raw_mode[i];
127 HOST_WIDE_INT size;
128
129 if (HARD_REGNO_CALL_PART_CLOBBERED (i, save_mode))
130 save_mode = choose_hard_reg_mode (i, 1, true);
131 if (dnum == DWARF_FRAME_RETURN_COLUMN)
132 {
133 if (save_mode == VOIDmode)
134 continue;
135 wrote_return_column = true;
136 }
137 size = GET_MODE_SIZE (save_mode);
138 if (offset < 0)
139 continue;
140
141 emit_move_insn (adjust_address (mem, mode, offset),
142 gen_int_mode (size, mode));
143 }
144 }
145
146 if (!wrote_return_column)
147 init_return_column_size (mode, mem, DWARF_FRAME_RETURN_COLUMN);
148
149 #ifdef DWARF_ALT_FRAME_RETURN_COLUMN
150 init_return_column_size (mode, mem, DWARF_ALT_FRAME_RETURN_COLUMN);
151 #endif
152
153 targetm.init_dwarf_reg_sizes_extra (address);
154 }
155
156 /* Divide OFF by DWARF_CIE_DATA_ALIGNMENT, asserting no remainder. */
157
158 static inline HOST_WIDE_INT
159 div_data_align (HOST_WIDE_INT off)
160 {
161 HOST_WIDE_INT r = off / DWARF_CIE_DATA_ALIGNMENT;
162 gcc_assert (r * DWARF_CIE_DATA_ALIGNMENT == off);
163 return r;
164 }
165
166 /* Return true if we need a signed version of a given opcode
167 (e.g. DW_CFA_offset_extended_sf vs DW_CFA_offset_extended). */
168
169 static inline bool
170 need_data_align_sf_opcode (HOST_WIDE_INT off)
171 {
172 return DWARF_CIE_DATA_ALIGNMENT < 0 ? off > 0 : off < 0;
173 }
174
175 /* Return a pointer to a newly allocated Call Frame Instruction. */
176
177 static inline dw_cfi_ref
178 new_cfi (void)
179 {
180 dw_cfi_ref cfi = ggc_alloc_dw_cfi_node ();
181
182 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = 0;
183 cfi->dw_cfi_oprnd2.dw_cfi_reg_num = 0;
184
185 return cfi;
186 }
187
188 /* Generate a new label for the CFI info to refer to. */
189
190 static char *
191 dwarf2out_cfi_label (void)
192 {
193 int num = dwarf2out_cfi_label_num++;
194 char label[20];
195
196 ASM_GENERATE_INTERNAL_LABEL (label, "LCFI", num);
197
198 return xstrdup (label);
199 }
200
201 /* Add CFI either to the current insn stream or to a vector, or both. */
202
203 static void
204 add_cfi (dw_cfi_ref cfi)
205 {
206 if (emit_cfa_remember)
207 {
208 dw_cfi_ref cfi_remember;
209
210 /* Emit the state save. */
211 emit_cfa_remember = false;
212 cfi_remember = new_cfi ();
213 cfi_remember->dw_cfi_opc = DW_CFA_remember_state;
214 add_cfi (cfi_remember);
215 }
216
217 any_cfis_emitted = true;
218 if (cfi_insn != NULL)
219 {
220 cfi_insn = emit_note_after (NOTE_INSN_CFI, cfi_insn);
221 NOTE_CFI (cfi_insn) = cfi;
222 }
223 if (add_cfi_vec != NULL)
224 VEC_safe_push (dw_cfi_ref, gc, *add_cfi_vec, cfi);
225 }
226
227 /* This function fills in aa dw_cfa_location structure from a dwarf location
228 descriptor sequence. */
229
230 static void
231 get_cfa_from_loc_descr (dw_cfa_location *cfa, struct dw_loc_descr_struct *loc)
232 {
233 struct dw_loc_descr_struct *ptr;
234 cfa->offset = 0;
235 cfa->base_offset = 0;
236 cfa->indirect = 0;
237 cfa->reg = -1;
238
239 for (ptr = loc; ptr != NULL; ptr = ptr->dw_loc_next)
240 {
241 enum dwarf_location_atom op = ptr->dw_loc_opc;
242
243 switch (op)
244 {
245 case DW_OP_reg0:
246 case DW_OP_reg1:
247 case DW_OP_reg2:
248 case DW_OP_reg3:
249 case DW_OP_reg4:
250 case DW_OP_reg5:
251 case DW_OP_reg6:
252 case DW_OP_reg7:
253 case DW_OP_reg8:
254 case DW_OP_reg9:
255 case DW_OP_reg10:
256 case DW_OP_reg11:
257 case DW_OP_reg12:
258 case DW_OP_reg13:
259 case DW_OP_reg14:
260 case DW_OP_reg15:
261 case DW_OP_reg16:
262 case DW_OP_reg17:
263 case DW_OP_reg18:
264 case DW_OP_reg19:
265 case DW_OP_reg20:
266 case DW_OP_reg21:
267 case DW_OP_reg22:
268 case DW_OP_reg23:
269 case DW_OP_reg24:
270 case DW_OP_reg25:
271 case DW_OP_reg26:
272 case DW_OP_reg27:
273 case DW_OP_reg28:
274 case DW_OP_reg29:
275 case DW_OP_reg30:
276 case DW_OP_reg31:
277 cfa->reg = op - DW_OP_reg0;
278 break;
279 case DW_OP_regx:
280 cfa->reg = ptr->dw_loc_oprnd1.v.val_int;
281 break;
282 case DW_OP_breg0:
283 case DW_OP_breg1:
284 case DW_OP_breg2:
285 case DW_OP_breg3:
286 case DW_OP_breg4:
287 case DW_OP_breg5:
288 case DW_OP_breg6:
289 case DW_OP_breg7:
290 case DW_OP_breg8:
291 case DW_OP_breg9:
292 case DW_OP_breg10:
293 case DW_OP_breg11:
294 case DW_OP_breg12:
295 case DW_OP_breg13:
296 case DW_OP_breg14:
297 case DW_OP_breg15:
298 case DW_OP_breg16:
299 case DW_OP_breg17:
300 case DW_OP_breg18:
301 case DW_OP_breg19:
302 case DW_OP_breg20:
303 case DW_OP_breg21:
304 case DW_OP_breg22:
305 case DW_OP_breg23:
306 case DW_OP_breg24:
307 case DW_OP_breg25:
308 case DW_OP_breg26:
309 case DW_OP_breg27:
310 case DW_OP_breg28:
311 case DW_OP_breg29:
312 case DW_OP_breg30:
313 case DW_OP_breg31:
314 cfa->reg = op - DW_OP_breg0;
315 cfa->base_offset = ptr->dw_loc_oprnd1.v.val_int;
316 break;
317 case DW_OP_bregx:
318 cfa->reg = ptr->dw_loc_oprnd1.v.val_int;
319 cfa->base_offset = ptr->dw_loc_oprnd2.v.val_int;
320 break;
321 case DW_OP_deref:
322 cfa->indirect = 1;
323 break;
324 case DW_OP_plus_uconst:
325 cfa->offset = ptr->dw_loc_oprnd1.v.val_unsigned;
326 break;
327 default:
328 gcc_unreachable ();
329 }
330 }
331 }
332
333 /* Find the previous value for the CFA, iteratively. CFI is the opcode
334 to interpret, *LOC will be updated as necessary, *REMEMBER is used for
335 one level of remember/restore state processing. */
336
337 void
338 lookup_cfa_1 (dw_cfi_ref cfi, dw_cfa_location *loc, dw_cfa_location *remember)
339 {
340 switch (cfi->dw_cfi_opc)
341 {
342 case DW_CFA_def_cfa_offset:
343 case DW_CFA_def_cfa_offset_sf:
344 loc->offset = cfi->dw_cfi_oprnd1.dw_cfi_offset;
345 break;
346 case DW_CFA_def_cfa_register:
347 loc->reg = cfi->dw_cfi_oprnd1.dw_cfi_reg_num;
348 break;
349 case DW_CFA_def_cfa:
350 case DW_CFA_def_cfa_sf:
351 loc->reg = cfi->dw_cfi_oprnd1.dw_cfi_reg_num;
352 loc->offset = cfi->dw_cfi_oprnd2.dw_cfi_offset;
353 break;
354 case DW_CFA_def_cfa_expression:
355 get_cfa_from_loc_descr (loc, cfi->dw_cfi_oprnd1.dw_cfi_loc);
356 break;
357
358 case DW_CFA_remember_state:
359 gcc_assert (!remember->in_use);
360 *remember = *loc;
361 remember->in_use = 1;
362 break;
363 case DW_CFA_restore_state:
364 gcc_assert (remember->in_use);
365 *loc = *remember;
366 remember->in_use = 0;
367 break;
368
369 default:
370 break;
371 }
372 }
373
374 /* The current rule for calculating the DWARF2 canonical frame address. */
375 static dw_cfa_location cfa;
376
377 /* A copy of the CFA, for comparison purposes. */
378 static dw_cfa_location old_cfa;
379
380 /* The register used for saving registers to the stack, and its offset
381 from the CFA. */
382 static dw_cfa_location cfa_store;
383
384 /* The current save location around an epilogue. */
385 static dw_cfa_location cfa_remember;
386
387 /* Like cfa_remember, but a copy of old_cfa. */
388 static dw_cfa_location old_cfa_remember;
389
390 /* The running total of the size of arguments pushed onto the stack. */
391 static HOST_WIDE_INT args_size;
392
393 /* The last args_size we actually output. */
394 static HOST_WIDE_INT old_args_size;
395
396 /* Determine if two dw_cfa_location structures define the same data. */
397
398 bool
399 cfa_equal_p (const dw_cfa_location *loc1, const dw_cfa_location *loc2)
400 {
401 return (loc1->reg == loc2->reg
402 && loc1->offset == loc2->offset
403 && loc1->indirect == loc2->indirect
404 && (loc1->indirect == 0
405 || loc1->base_offset == loc2->base_offset));
406 }
407
408 /* This routine does the actual work. The CFA is now calculated from
409 the dw_cfa_location structure. */
410
411 static void
412 def_cfa_1 (dw_cfa_location *loc_p)
413 {
414 dw_cfi_ref cfi;
415 dw_cfa_location loc;
416
417 cfa = *loc_p;
418 loc = *loc_p;
419
420 if (cfa_store.reg == loc.reg && loc.indirect == 0)
421 cfa_store.offset = loc.offset;
422
423 /* If nothing changed, no need to issue any call frame instructions. */
424 if (cfa_equal_p (&loc, &old_cfa))
425 return;
426
427 cfi = new_cfi ();
428
429 if (loc.reg == old_cfa.reg && !loc.indirect && !old_cfa.indirect)
430 {
431 /* Construct a "DW_CFA_def_cfa_offset <offset>" instruction, indicating
432 the CFA register did not change but the offset did. The data
433 factoring for DW_CFA_def_cfa_offset_sf happens in output_cfi, or
434 in the assembler via the .cfi_def_cfa_offset directive. */
435 if (loc.offset < 0)
436 cfi->dw_cfi_opc = DW_CFA_def_cfa_offset_sf;
437 else
438 cfi->dw_cfi_opc = DW_CFA_def_cfa_offset;
439 cfi->dw_cfi_oprnd1.dw_cfi_offset = loc.offset;
440 }
441
442 #ifndef MIPS_DEBUGGING_INFO /* SGI dbx thinks this means no offset. */
443 else if (loc.offset == old_cfa.offset
444 && old_cfa.reg != INVALID_REGNUM
445 && !loc.indirect
446 && !old_cfa.indirect)
447 {
448 /* Construct a "DW_CFA_def_cfa_register <register>" instruction,
449 indicating the CFA register has changed to <register> but the
450 offset has not changed. */
451 cfi->dw_cfi_opc = DW_CFA_def_cfa_register;
452 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = loc.reg;
453 }
454 #endif
455
456 else if (loc.indirect == 0)
457 {
458 /* Construct a "DW_CFA_def_cfa <register> <offset>" instruction,
459 indicating the CFA register has changed to <register> with
460 the specified offset. The data factoring for DW_CFA_def_cfa_sf
461 happens in output_cfi, or in the assembler via the .cfi_def_cfa
462 directive. */
463 if (loc.offset < 0)
464 cfi->dw_cfi_opc = DW_CFA_def_cfa_sf;
465 else
466 cfi->dw_cfi_opc = DW_CFA_def_cfa;
467 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = loc.reg;
468 cfi->dw_cfi_oprnd2.dw_cfi_offset = loc.offset;
469 }
470 else
471 {
472 /* Construct a DW_CFA_def_cfa_expression instruction to
473 calculate the CFA using a full location expression since no
474 register-offset pair is available. */
475 struct dw_loc_descr_struct *loc_list;
476
477 cfi->dw_cfi_opc = DW_CFA_def_cfa_expression;
478 loc_list = build_cfa_loc (&loc, 0);
479 cfi->dw_cfi_oprnd1.dw_cfi_loc = loc_list;
480 }
481
482 add_cfi (cfi);
483 old_cfa = loc;
484 }
485
486 /* Add the CFI for saving a register. REG is the CFA column number.
487 If SREG is -1, the register is saved at OFFSET from the CFA;
488 otherwise it is saved in SREG. */
489
490 static void
491 reg_save (unsigned int reg, unsigned int sreg, HOST_WIDE_INT offset)
492 {
493 dw_fde_ref fde = cfun ? cfun->fde : NULL;
494 dw_cfi_ref cfi = new_cfi ();
495
496 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = reg;
497
498 /* When stack is aligned, store REG using DW_CFA_expression with FP. */
499 if (fde
500 && fde->stack_realign
501 && sreg == INVALID_REGNUM)
502 {
503 cfi->dw_cfi_opc = DW_CFA_expression;
504 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = reg;
505 cfi->dw_cfi_oprnd2.dw_cfi_loc
506 = build_cfa_aligned_loc (&cfa, offset, fde->stack_realignment);
507 }
508 else if (sreg == INVALID_REGNUM)
509 {
510 if (need_data_align_sf_opcode (offset))
511 cfi->dw_cfi_opc = DW_CFA_offset_extended_sf;
512 else if (reg & ~0x3f)
513 cfi->dw_cfi_opc = DW_CFA_offset_extended;
514 else
515 cfi->dw_cfi_opc = DW_CFA_offset;
516 cfi->dw_cfi_oprnd2.dw_cfi_offset = offset;
517 }
518 else if (sreg == reg)
519 cfi->dw_cfi_opc = DW_CFA_same_value;
520 else
521 {
522 cfi->dw_cfi_opc = DW_CFA_register;
523 cfi->dw_cfi_oprnd2.dw_cfi_reg_num = sreg;
524 }
525
526 add_cfi (cfi);
527 }
528
529 /* Given a SET, calculate the amount of stack adjustment it
530 contains. */
531
532 static HOST_WIDE_INT
533 stack_adjust_offset (const_rtx pattern, HOST_WIDE_INT cur_args_size,
534 HOST_WIDE_INT cur_offset)
535 {
536 const_rtx src = SET_SRC (pattern);
537 const_rtx dest = SET_DEST (pattern);
538 HOST_WIDE_INT offset = 0;
539 enum rtx_code code;
540
541 if (dest == stack_pointer_rtx)
542 {
543 code = GET_CODE (src);
544
545 /* Assume (set (reg sp) (reg whatever)) sets args_size
546 level to 0. */
547 if (code == REG && src != stack_pointer_rtx)
548 {
549 offset = -cur_args_size;
550 #ifndef STACK_GROWS_DOWNWARD
551 offset = -offset;
552 #endif
553 return offset - cur_offset;
554 }
555
556 if (! (code == PLUS || code == MINUS)
557 || XEXP (src, 0) != stack_pointer_rtx
558 || !CONST_INT_P (XEXP (src, 1)))
559 return 0;
560
561 /* (set (reg sp) (plus (reg sp) (const_int))) */
562 offset = INTVAL (XEXP (src, 1));
563 if (code == PLUS)
564 offset = -offset;
565 return offset;
566 }
567
568 if (MEM_P (src) && !MEM_P (dest))
569 dest = src;
570 if (MEM_P (dest))
571 {
572 /* (set (mem (pre_dec (reg sp))) (foo)) */
573 src = XEXP (dest, 0);
574 code = GET_CODE (src);
575
576 switch (code)
577 {
578 case PRE_MODIFY:
579 case POST_MODIFY:
580 if (XEXP (src, 0) == stack_pointer_rtx)
581 {
582 rtx val = XEXP (XEXP (src, 1), 1);
583 /* We handle only adjustments by constant amount. */
584 gcc_assert (GET_CODE (XEXP (src, 1)) == PLUS
585 && CONST_INT_P (val));
586 offset = -INTVAL (val);
587 break;
588 }
589 return 0;
590
591 case PRE_DEC:
592 case POST_DEC:
593 if (XEXP (src, 0) == stack_pointer_rtx)
594 {
595 offset = GET_MODE_SIZE (GET_MODE (dest));
596 break;
597 }
598 return 0;
599
600 case PRE_INC:
601 case POST_INC:
602 if (XEXP (src, 0) == stack_pointer_rtx)
603 {
604 offset = -GET_MODE_SIZE (GET_MODE (dest));
605 break;
606 }
607 return 0;
608
609 default:
610 return 0;
611 }
612 }
613 else
614 return 0;
615
616 return offset;
617 }
618
619 /* Precomputed args_size for CODE_LABELs and BARRIERs preceeding them,
620 indexed by INSN_UID. */
621
622 static HOST_WIDE_INT *barrier_args_size;
623
624 /* Helper function for compute_barrier_args_size. Handle one insn. */
625
626 static HOST_WIDE_INT
627 compute_barrier_args_size_1 (rtx insn, HOST_WIDE_INT cur_args_size,
628 VEC (rtx, heap) **next)
629 {
630 HOST_WIDE_INT offset = 0;
631 int i;
632
633 if (! RTX_FRAME_RELATED_P (insn))
634 {
635 if (prologue_epilogue_contains (insn))
636 /* Nothing */;
637 else if (GET_CODE (PATTERN (insn)) == SET)
638 offset = stack_adjust_offset (PATTERN (insn), cur_args_size, 0);
639 else if (GET_CODE (PATTERN (insn)) == PARALLEL
640 || GET_CODE (PATTERN (insn)) == SEQUENCE)
641 {
642 /* There may be stack adjustments inside compound insns. Search
643 for them. */
644 for (i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--)
645 if (GET_CODE (XVECEXP (PATTERN (insn), 0, i)) == SET)
646 offset += stack_adjust_offset (XVECEXP (PATTERN (insn), 0, i),
647 cur_args_size, offset);
648 }
649 }
650 else
651 {
652 rtx expr = find_reg_note (insn, REG_FRAME_RELATED_EXPR, NULL_RTX);
653
654 if (expr)
655 {
656 expr = XEXP (expr, 0);
657 if (GET_CODE (expr) == PARALLEL
658 || GET_CODE (expr) == SEQUENCE)
659 for (i = 1; i < XVECLEN (expr, 0); i++)
660 {
661 rtx elem = XVECEXP (expr, 0, i);
662
663 if (GET_CODE (elem) == SET && !RTX_FRAME_RELATED_P (elem))
664 offset += stack_adjust_offset (elem, cur_args_size, offset);
665 }
666 }
667 }
668
669 #ifndef STACK_GROWS_DOWNWARD
670 offset = -offset;
671 #endif
672
673 cur_args_size += offset;
674 if (cur_args_size < 0)
675 cur_args_size = 0;
676
677 if (JUMP_P (insn))
678 {
679 rtx dest = JUMP_LABEL (insn);
680
681 if (dest)
682 {
683 if (barrier_args_size [INSN_UID (dest)] < 0)
684 {
685 barrier_args_size [INSN_UID (dest)] = cur_args_size;
686 VEC_safe_push (rtx, heap, *next, dest);
687 }
688 }
689 }
690
691 return cur_args_size;
692 }
693
694 /* Walk the whole function and compute args_size on BARRIERs. */
695
696 static void
697 compute_barrier_args_size (void)
698 {
699 int max_uid = get_max_uid (), i;
700 rtx insn;
701 VEC (rtx, heap) *worklist, *next, *tmp;
702
703 barrier_args_size = XNEWVEC (HOST_WIDE_INT, max_uid);
704 for (i = 0; i < max_uid; i++)
705 barrier_args_size[i] = -1;
706
707 worklist = VEC_alloc (rtx, heap, 20);
708 next = VEC_alloc (rtx, heap, 20);
709 insn = get_insns ();
710 barrier_args_size[INSN_UID (insn)] = 0;
711 VEC_quick_push (rtx, worklist, insn);
712 for (;;)
713 {
714 while (!VEC_empty (rtx, worklist))
715 {
716 rtx prev, body, first_insn;
717 HOST_WIDE_INT cur_args_size;
718
719 first_insn = insn = VEC_pop (rtx, worklist);
720 cur_args_size = barrier_args_size[INSN_UID (insn)];
721 prev = prev_nonnote_insn (insn);
722 if (prev && BARRIER_P (prev))
723 barrier_args_size[INSN_UID (prev)] = cur_args_size;
724
725 for (; insn; insn = NEXT_INSN (insn))
726 {
727 if (INSN_DELETED_P (insn) || NOTE_P (insn))
728 continue;
729 if (BARRIER_P (insn))
730 break;
731
732 if (LABEL_P (insn))
733 {
734 if (insn == first_insn)
735 continue;
736 else if (barrier_args_size[INSN_UID (insn)] < 0)
737 {
738 barrier_args_size[INSN_UID (insn)] = cur_args_size;
739 continue;
740 }
741 else
742 {
743 /* The insns starting with this label have been
744 already scanned or are in the worklist. */
745 break;
746 }
747 }
748
749 body = PATTERN (insn);
750 if (GET_CODE (body) == SEQUENCE)
751 {
752 HOST_WIDE_INT dest_args_size = cur_args_size;
753 for (i = 1; i < XVECLEN (body, 0); i++)
754 if (INSN_ANNULLED_BRANCH_P (XVECEXP (body, 0, 0))
755 && INSN_FROM_TARGET_P (XVECEXP (body, 0, i)))
756 dest_args_size
757 = compute_barrier_args_size_1 (XVECEXP (body, 0, i),
758 dest_args_size, &next);
759 else
760 cur_args_size
761 = compute_barrier_args_size_1 (XVECEXP (body, 0, i),
762 cur_args_size, &next);
763
764 if (INSN_ANNULLED_BRANCH_P (XVECEXP (body, 0, 0)))
765 compute_barrier_args_size_1 (XVECEXP (body, 0, 0),
766 dest_args_size, &next);
767 else
768 cur_args_size
769 = compute_barrier_args_size_1 (XVECEXP (body, 0, 0),
770 cur_args_size, &next);
771 }
772 else
773 cur_args_size
774 = compute_barrier_args_size_1 (insn, cur_args_size, &next);
775 }
776 }
777
778 if (VEC_empty (rtx, next))
779 break;
780
781 /* Swap WORKLIST with NEXT and truncate NEXT for next iteration. */
782 tmp = next;
783 next = worklist;
784 worklist = tmp;
785 VEC_truncate (rtx, next, 0);
786 }
787
788 VEC_free (rtx, heap, worklist);
789 VEC_free (rtx, heap, next);
790 }
791
792 /* Add a CFI to update the running total of the size of arguments
793 pushed onto the stack. */
794
795 static void
796 dwarf2out_args_size (HOST_WIDE_INT size)
797 {
798 dw_cfi_ref cfi;
799
800 if (size == old_args_size)
801 return;
802
803 old_args_size = size;
804
805 cfi = new_cfi ();
806 cfi->dw_cfi_opc = DW_CFA_GNU_args_size;
807 cfi->dw_cfi_oprnd1.dw_cfi_offset = size;
808 add_cfi (cfi);
809 }
810
811 /* Record a stack adjustment of OFFSET bytes. */
812
813 static void
814 dwarf2out_stack_adjust (HOST_WIDE_INT offset)
815 {
816 if (cfa.reg == dw_stack_pointer_regnum)
817 cfa.offset += offset;
818
819 if (cfa_store.reg == dw_stack_pointer_regnum)
820 cfa_store.offset += offset;
821
822 if (ACCUMULATE_OUTGOING_ARGS)
823 return;
824
825 #ifndef STACK_GROWS_DOWNWARD
826 offset = -offset;
827 #endif
828
829 args_size += offset;
830 if (args_size < 0)
831 args_size = 0;
832
833 def_cfa_1 (&cfa);
834 if (flag_asynchronous_unwind_tables)
835 dwarf2out_args_size (args_size);
836 }
837
838 /* Check INSN to see if it looks like a push or a stack adjustment, and
839 make a note of it if it does. EH uses this information to find out
840 how much extra space it needs to pop off the stack. */
841
842 static void
843 dwarf2out_notice_stack_adjust (rtx insn, bool after_p)
844 {
845 HOST_WIDE_INT offset;
846 int i;
847
848 /* Don't handle epilogues at all. Certainly it would be wrong to do so
849 with this function. Proper support would require all frame-related
850 insns to be marked, and to be able to handle saving state around
851 epilogues textually in the middle of the function. */
852 if (prologue_epilogue_contains (insn))
853 return;
854
855 /* If INSN is an instruction from target of an annulled branch, the
856 effects are for the target only and so current argument size
857 shouldn't change at all. */
858 if (final_sequence
859 && INSN_ANNULLED_BRANCH_P (XVECEXP (final_sequence, 0, 0))
860 && INSN_FROM_TARGET_P (insn))
861 return;
862
863 /* If only calls can throw, and we have a frame pointer,
864 save up adjustments until we see the CALL_INSN. */
865 if (!flag_asynchronous_unwind_tables && cfa.reg != dw_stack_pointer_regnum)
866 {
867 if (CALL_P (insn) && !after_p)
868 {
869 /* Extract the size of the args from the CALL rtx itself. */
870 insn = PATTERN (insn);
871 if (GET_CODE (insn) == PARALLEL)
872 insn = XVECEXP (insn, 0, 0);
873 if (GET_CODE (insn) == SET)
874 insn = SET_SRC (insn);
875 gcc_assert (GET_CODE (insn) == CALL);
876 dwarf2out_args_size (INTVAL (XEXP (insn, 1)));
877 }
878 return;
879 }
880
881 if (CALL_P (insn) && !after_p)
882 {
883 if (!flag_asynchronous_unwind_tables)
884 dwarf2out_args_size (args_size);
885 return;
886 }
887 else if (BARRIER_P (insn))
888 {
889 /* Don't call compute_barrier_args_size () if the only
890 BARRIER is at the end of function. */
891 if (barrier_args_size == NULL && next_nonnote_insn (insn))
892 compute_barrier_args_size ();
893 if (barrier_args_size == NULL)
894 offset = 0;
895 else
896 {
897 offset = barrier_args_size[INSN_UID (insn)];
898 if (offset < 0)
899 offset = 0;
900 }
901
902 offset -= args_size;
903 #ifndef STACK_GROWS_DOWNWARD
904 offset = -offset;
905 #endif
906 }
907 else if (GET_CODE (PATTERN (insn)) == SET)
908 offset = stack_adjust_offset (PATTERN (insn), args_size, 0);
909 else if (GET_CODE (PATTERN (insn)) == PARALLEL
910 || GET_CODE (PATTERN (insn)) == SEQUENCE)
911 {
912 /* There may be stack adjustments inside compound insns. Search
913 for them. */
914 for (offset = 0, i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--)
915 if (GET_CODE (XVECEXP (PATTERN (insn), 0, i)) == SET)
916 offset += stack_adjust_offset (XVECEXP (PATTERN (insn), 0, i),
917 args_size, offset);
918 }
919 else
920 return;
921
922 if (offset == 0)
923 return;
924
925 dwarf2out_stack_adjust (offset);
926 }
927
928 /* We delay emitting a register save until either (a) we reach the end
929 of the prologue or (b) the register is clobbered. This clusters
930 register saves so that there are fewer pc advances. */
931
932 struct GTY(()) queued_reg_save {
933 struct queued_reg_save *next;
934 rtx reg;
935 HOST_WIDE_INT cfa_offset;
936 rtx saved_reg;
937 };
938
939 static GTY(()) struct queued_reg_save *queued_reg_saves;
940
941 /* The caller's ORIG_REG is saved in SAVED_IN_REG. */
942 typedef struct GTY(()) reg_saved_in_data {
943 rtx orig_reg;
944 rtx saved_in_reg;
945 } reg_saved_in_data;
946
947 DEF_VEC_O (reg_saved_in_data);
948 DEF_VEC_ALLOC_O (reg_saved_in_data, gc);
949
950 /* A set of registers saved in other registers. This is implemented as
951 a flat array because it normally contains zero or 1 entry, depending
952 on the target. IA-64 is the big spender here, using a maximum of
953 5 entries. */
954 static GTY(()) VEC(reg_saved_in_data, gc) *regs_saved_in_regs;
955
956 static GTY(()) reg_saved_in_data *cie_return_save;
957
958 /* Short-hand inline for the very common D_F_R (REGNO (x)) operation. */
959 /* ??? This ought to go into dwarf2out.h, except that dwarf2out.h is
960 used in places where rtl is prohibited. */
961
962 static inline unsigned
963 dwf_regno (const_rtx reg)
964 {
965 return DWARF_FRAME_REGNUM (REGNO (reg));
966 }
967
968 /* Compare X and Y for equivalence. The inputs may be REGs or PC_RTX. */
969
970 static bool
971 compare_reg_or_pc (rtx x, rtx y)
972 {
973 if (REG_P (x) && REG_P (y))
974 return REGNO (x) == REGNO (y);
975 return x == y;
976 }
977
978 /* Record SRC as being saved in DEST. DEST may be null to delete an
979 existing entry. SRC may be a register or PC_RTX. */
980
981 static void
982 record_reg_saved_in_reg (rtx dest, rtx src)
983 {
984 reg_saved_in_data *elt;
985 size_t i;
986
987 FOR_EACH_VEC_ELT (reg_saved_in_data, regs_saved_in_regs, i, elt)
988 if (compare_reg_or_pc (elt->orig_reg, src))
989 {
990 if (dest == NULL)
991 VEC_unordered_remove(reg_saved_in_data, regs_saved_in_regs, i);
992 else
993 elt->saved_in_reg = dest;
994 return;
995 }
996
997 if (dest == NULL)
998 return;
999
1000 elt = VEC_safe_push(reg_saved_in_data, gc, regs_saved_in_regs, NULL);
1001 elt->orig_reg = src;
1002 elt->saved_in_reg = dest;
1003 }
1004
1005 /* Add an entry to QUEUED_REG_SAVES saying that REG is now saved at
1006 SREG, or if SREG is NULL then it is saved at OFFSET to the CFA. */
1007
1008 static void
1009 queue_reg_save (rtx reg, rtx sreg, HOST_WIDE_INT offset)
1010 {
1011 struct queued_reg_save *q;
1012
1013 /* Duplicates waste space, but it's also necessary to remove them
1014 for correctness, since the queue gets output in reverse order. */
1015 for (q = queued_reg_saves; q != NULL; q = q->next)
1016 if (compare_reg_or_pc (q->reg, reg))
1017 break;
1018
1019 if (q == NULL)
1020 {
1021 q = ggc_alloc_queued_reg_save ();
1022 q->next = queued_reg_saves;
1023 queued_reg_saves = q;
1024 }
1025
1026 q->reg = reg;
1027 q->cfa_offset = offset;
1028 q->saved_reg = sreg;
1029 }
1030
1031 /* Output all the entries in QUEUED_REG_SAVES. */
1032
1033 static void
1034 dwarf2out_flush_queued_reg_saves (void)
1035 {
1036 struct queued_reg_save *q;
1037
1038 for (q = queued_reg_saves; q; q = q->next)
1039 {
1040 unsigned int reg, sreg;
1041
1042 record_reg_saved_in_reg (q->saved_reg, q->reg);
1043
1044 if (q->reg == pc_rtx)
1045 reg = DWARF_FRAME_RETURN_COLUMN;
1046 else
1047 reg = dwf_regno (q->reg);
1048 if (q->saved_reg)
1049 sreg = dwf_regno (q->saved_reg);
1050 else
1051 sreg = INVALID_REGNUM;
1052 reg_save (reg, sreg, q->cfa_offset);
1053 }
1054
1055 queued_reg_saves = NULL;
1056 }
1057
1058 /* Does INSN clobber any register which QUEUED_REG_SAVES lists a saved
1059 location for? Or, does it clobber a register which we've previously
1060 said that some other register is saved in, and for which we now
1061 have a new location for? */
1062
1063 static bool
1064 clobbers_queued_reg_save (const_rtx insn)
1065 {
1066 struct queued_reg_save *q;
1067
1068 for (q = queued_reg_saves; q; q = q->next)
1069 {
1070 size_t i;
1071 reg_saved_in_data *rir;
1072
1073 if (modified_in_p (q->reg, insn))
1074 return true;
1075
1076 FOR_EACH_VEC_ELT (reg_saved_in_data, regs_saved_in_regs, i, rir)
1077 if (compare_reg_or_pc (q->reg, rir->orig_reg)
1078 && modified_in_p (rir->saved_in_reg, insn))
1079 return true;
1080 }
1081
1082 return false;
1083 }
1084
1085 /* What register, if any, is currently saved in REG? */
1086
1087 static rtx
1088 reg_saved_in (rtx reg)
1089 {
1090 unsigned int regn = REGNO (reg);
1091 struct queued_reg_save *q;
1092 reg_saved_in_data *rir;
1093 size_t i;
1094
1095 for (q = queued_reg_saves; q; q = q->next)
1096 if (q->saved_reg && regn == REGNO (q->saved_reg))
1097 return q->reg;
1098
1099 FOR_EACH_VEC_ELT (reg_saved_in_data, regs_saved_in_regs, i, rir)
1100 if (regn == REGNO (rir->saved_in_reg))
1101 return rir->orig_reg;
1102
1103 return NULL_RTX;
1104 }
1105
1106
1107 /* A temporary register holding an integral value used in adjusting SP
1108 or setting up the store_reg. The "offset" field holds the integer
1109 value, not an offset. */
1110 static dw_cfa_location cfa_temp;
1111
1112 /* A subroutine of dwarf2out_frame_debug, process a REG_DEF_CFA note. */
1113
1114 static void
1115 dwarf2out_frame_debug_def_cfa (rtx pat)
1116 {
1117 memset (&cfa, 0, sizeof (cfa));
1118
1119 switch (GET_CODE (pat))
1120 {
1121 case PLUS:
1122 cfa.reg = dwf_regno (XEXP (pat, 0));
1123 cfa.offset = INTVAL (XEXP (pat, 1));
1124 break;
1125
1126 case REG:
1127 cfa.reg = dwf_regno (pat);
1128 break;
1129
1130 case MEM:
1131 cfa.indirect = 1;
1132 pat = XEXP (pat, 0);
1133 if (GET_CODE (pat) == PLUS)
1134 {
1135 cfa.base_offset = INTVAL (XEXP (pat, 1));
1136 pat = XEXP (pat, 0);
1137 }
1138 cfa.reg = dwf_regno (pat);
1139 break;
1140
1141 default:
1142 /* Recurse and define an expression. */
1143 gcc_unreachable ();
1144 }
1145
1146 def_cfa_1 (&cfa);
1147 }
1148
1149 /* A subroutine of dwarf2out_frame_debug, process a REG_ADJUST_CFA note. */
1150
1151 static void
1152 dwarf2out_frame_debug_adjust_cfa (rtx pat)
1153 {
1154 rtx src, dest;
1155
1156 gcc_assert (GET_CODE (pat) == SET);
1157 dest = XEXP (pat, 0);
1158 src = XEXP (pat, 1);
1159
1160 switch (GET_CODE (src))
1161 {
1162 case PLUS:
1163 gcc_assert (dwf_regno (XEXP (src, 0)) == cfa.reg);
1164 cfa.offset -= INTVAL (XEXP (src, 1));
1165 break;
1166
1167 case REG:
1168 break;
1169
1170 default:
1171 gcc_unreachable ();
1172 }
1173
1174 cfa.reg = dwf_regno (dest);
1175 gcc_assert (cfa.indirect == 0);
1176
1177 def_cfa_1 (&cfa);
1178 }
1179
1180 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_OFFSET note. */
1181
1182 static void
1183 dwarf2out_frame_debug_cfa_offset (rtx set)
1184 {
1185 HOST_WIDE_INT offset;
1186 rtx src, addr, span;
1187 unsigned int sregno;
1188
1189 src = XEXP (set, 1);
1190 addr = XEXP (set, 0);
1191 gcc_assert (MEM_P (addr));
1192 addr = XEXP (addr, 0);
1193
1194 /* As documented, only consider extremely simple addresses. */
1195 switch (GET_CODE (addr))
1196 {
1197 case REG:
1198 gcc_assert (dwf_regno (addr) == cfa.reg);
1199 offset = -cfa.offset;
1200 break;
1201 case PLUS:
1202 gcc_assert (dwf_regno (XEXP (addr, 0)) == cfa.reg);
1203 offset = INTVAL (XEXP (addr, 1)) - cfa.offset;
1204 break;
1205 default:
1206 gcc_unreachable ();
1207 }
1208
1209 if (src == pc_rtx)
1210 {
1211 span = NULL;
1212 sregno = DWARF_FRAME_RETURN_COLUMN;
1213 }
1214 else
1215 {
1216 span = targetm.dwarf_register_span (src);
1217 sregno = dwf_regno (src);
1218 }
1219
1220 /* ??? We'd like to use queue_reg_save, but we need to come up with
1221 a different flushing heuristic for epilogues. */
1222 if (!span)
1223 reg_save (sregno, INVALID_REGNUM, offset);
1224 else
1225 {
1226 /* We have a PARALLEL describing where the contents of SRC live.
1227 Queue register saves for each piece of the PARALLEL. */
1228 int par_index;
1229 int limit;
1230 HOST_WIDE_INT span_offset = offset;
1231
1232 gcc_assert (GET_CODE (span) == PARALLEL);
1233
1234 limit = XVECLEN (span, 0);
1235 for (par_index = 0; par_index < limit; par_index++)
1236 {
1237 rtx elem = XVECEXP (span, 0, par_index);
1238
1239 sregno = dwf_regno (src);
1240 reg_save (sregno, INVALID_REGNUM, span_offset);
1241 span_offset += GET_MODE_SIZE (GET_MODE (elem));
1242 }
1243 }
1244 }
1245
1246 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_REGISTER note. */
1247
1248 static void
1249 dwarf2out_frame_debug_cfa_register (rtx set)
1250 {
1251 rtx src, dest;
1252 unsigned sregno, dregno;
1253
1254 src = XEXP (set, 1);
1255 dest = XEXP (set, 0);
1256
1257 record_reg_saved_in_reg (dest, src);
1258 if (src == pc_rtx)
1259 sregno = DWARF_FRAME_RETURN_COLUMN;
1260 else
1261 sregno = dwf_regno (src);
1262
1263 dregno = dwf_regno (dest);
1264
1265 /* ??? We'd like to use queue_reg_save, but we need to come up with
1266 a different flushing heuristic for epilogues. */
1267 reg_save (sregno, dregno, 0);
1268 }
1269
1270 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_EXPRESSION note. */
1271
1272 static void
1273 dwarf2out_frame_debug_cfa_expression (rtx set)
1274 {
1275 rtx src, dest, span;
1276 dw_cfi_ref cfi = new_cfi ();
1277
1278 dest = SET_DEST (set);
1279 src = SET_SRC (set);
1280
1281 gcc_assert (REG_P (src));
1282 gcc_assert (MEM_P (dest));
1283
1284 span = targetm.dwarf_register_span (src);
1285 gcc_assert (!span);
1286
1287 cfi->dw_cfi_opc = DW_CFA_expression;
1288 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = dwf_regno (src);
1289 cfi->dw_cfi_oprnd2.dw_cfi_loc
1290 = mem_loc_descriptor (XEXP (dest, 0), get_address_mode (dest),
1291 GET_MODE (dest), VAR_INIT_STATUS_INITIALIZED);
1292
1293 /* ??? We'd like to use queue_reg_save, were the interface different,
1294 and, as above, we could manage flushing for epilogues. */
1295 add_cfi (cfi);
1296 }
1297
1298 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_RESTORE note. */
1299
1300 static void
1301 dwarf2out_frame_debug_cfa_restore (rtx reg)
1302 {
1303 dw_cfi_ref cfi = new_cfi ();
1304 unsigned int regno = dwf_regno (reg);
1305
1306 cfi->dw_cfi_opc = (regno & ~0x3f ? DW_CFA_restore_extended : DW_CFA_restore);
1307 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = regno;
1308
1309 add_cfi (cfi);
1310 }
1311
1312 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_WINDOW_SAVE.
1313 ??? Perhaps we should note in the CIE where windows are saved (instead of
1314 assuming 0(cfa)) and what registers are in the window. */
1315
1316 static void
1317 dwarf2out_frame_debug_cfa_window_save (void)
1318 {
1319 dw_cfi_ref cfi = new_cfi ();
1320
1321 cfi->dw_cfi_opc = DW_CFA_GNU_window_save;
1322 add_cfi (cfi);
1323 }
1324
1325 /* Record call frame debugging information for an expression EXPR,
1326 which either sets SP or FP (adjusting how we calculate the frame
1327 address) or saves a register to the stack or another register.
1328 LABEL indicates the address of EXPR.
1329
1330 This function encodes a state machine mapping rtxes to actions on
1331 cfa, cfa_store, and cfa_temp.reg. We describe these rules so
1332 users need not read the source code.
1333
1334 The High-Level Picture
1335
1336 Changes in the register we use to calculate the CFA: Currently we
1337 assume that if you copy the CFA register into another register, we
1338 should take the other one as the new CFA register; this seems to
1339 work pretty well. If it's wrong for some target, it's simple
1340 enough not to set RTX_FRAME_RELATED_P on the insn in question.
1341
1342 Changes in the register we use for saving registers to the stack:
1343 This is usually SP, but not always. Again, we deduce that if you
1344 copy SP into another register (and SP is not the CFA register),
1345 then the new register is the one we will be using for register
1346 saves. This also seems to work.
1347
1348 Register saves: There's not much guesswork about this one; if
1349 RTX_FRAME_RELATED_P is set on an insn which modifies memory, it's a
1350 register save, and the register used to calculate the destination
1351 had better be the one we think we're using for this purpose.
1352 It's also assumed that a copy from a call-saved register to another
1353 register is saving that register if RTX_FRAME_RELATED_P is set on
1354 that instruction. If the copy is from a call-saved register to
1355 the *same* register, that means that the register is now the same
1356 value as in the caller.
1357
1358 Except: If the register being saved is the CFA register, and the
1359 offset is nonzero, we are saving the CFA, so we assume we have to
1360 use DW_CFA_def_cfa_expression. If the offset is 0, we assume that
1361 the intent is to save the value of SP from the previous frame.
1362
1363 In addition, if a register has previously been saved to a different
1364 register,
1365
1366 Invariants / Summaries of Rules
1367
1368 cfa current rule for calculating the CFA. It usually
1369 consists of a register and an offset.
1370 cfa_store register used by prologue code to save things to the stack
1371 cfa_store.offset is the offset from the value of
1372 cfa_store.reg to the actual CFA
1373 cfa_temp register holding an integral value. cfa_temp.offset
1374 stores the value, which will be used to adjust the
1375 stack pointer. cfa_temp is also used like cfa_store,
1376 to track stores to the stack via fp or a temp reg.
1377
1378 Rules 1- 4: Setting a register's value to cfa.reg or an expression
1379 with cfa.reg as the first operand changes the cfa.reg and its
1380 cfa.offset. Rule 1 and 4 also set cfa_temp.reg and
1381 cfa_temp.offset.
1382
1383 Rules 6- 9: Set a non-cfa.reg register value to a constant or an
1384 expression yielding a constant. This sets cfa_temp.reg
1385 and cfa_temp.offset.
1386
1387 Rule 5: Create a new register cfa_store used to save items to the
1388 stack.
1389
1390 Rules 10-14: Save a register to the stack. Define offset as the
1391 difference of the original location and cfa_store's
1392 location (or cfa_temp's location if cfa_temp is used).
1393
1394 Rules 16-20: If AND operation happens on sp in prologue, we assume
1395 stack is realigned. We will use a group of DW_OP_XXX
1396 expressions to represent the location of the stored
1397 register instead of CFA+offset.
1398
1399 The Rules
1400
1401 "{a,b}" indicates a choice of a xor b.
1402 "<reg>:cfa.reg" indicates that <reg> must equal cfa.reg.
1403
1404 Rule 1:
1405 (set <reg1> <reg2>:cfa.reg)
1406 effects: cfa.reg = <reg1>
1407 cfa.offset unchanged
1408 cfa_temp.reg = <reg1>
1409 cfa_temp.offset = cfa.offset
1410
1411 Rule 2:
1412 (set sp ({minus,plus,losum} {sp,fp}:cfa.reg
1413 {<const_int>,<reg>:cfa_temp.reg}))
1414 effects: cfa.reg = sp if fp used
1415 cfa.offset += {+/- <const_int>, cfa_temp.offset} if cfa.reg==sp
1416 cfa_store.offset += {+/- <const_int>, cfa_temp.offset}
1417 if cfa_store.reg==sp
1418
1419 Rule 3:
1420 (set fp ({minus,plus,losum} <reg>:cfa.reg <const_int>))
1421 effects: cfa.reg = fp
1422 cfa_offset += +/- <const_int>
1423
1424 Rule 4:
1425 (set <reg1> ({plus,losum} <reg2>:cfa.reg <const_int>))
1426 constraints: <reg1> != fp
1427 <reg1> != sp
1428 effects: cfa.reg = <reg1>
1429 cfa_temp.reg = <reg1>
1430 cfa_temp.offset = cfa.offset
1431
1432 Rule 5:
1433 (set <reg1> (plus <reg2>:cfa_temp.reg sp:cfa.reg))
1434 constraints: <reg1> != fp
1435 <reg1> != sp
1436 effects: cfa_store.reg = <reg1>
1437 cfa_store.offset = cfa.offset - cfa_temp.offset
1438
1439 Rule 6:
1440 (set <reg> <const_int>)
1441 effects: cfa_temp.reg = <reg>
1442 cfa_temp.offset = <const_int>
1443
1444 Rule 7:
1445 (set <reg1>:cfa_temp.reg (ior <reg2>:cfa_temp.reg <const_int>))
1446 effects: cfa_temp.reg = <reg1>
1447 cfa_temp.offset |= <const_int>
1448
1449 Rule 8:
1450 (set <reg> (high <exp>))
1451 effects: none
1452
1453 Rule 9:
1454 (set <reg> (lo_sum <exp> <const_int>))
1455 effects: cfa_temp.reg = <reg>
1456 cfa_temp.offset = <const_int>
1457
1458 Rule 10:
1459 (set (mem ({pre,post}_modify sp:cfa_store (???? <reg1> <const_int>))) <reg2>)
1460 effects: cfa_store.offset -= <const_int>
1461 cfa.offset = cfa_store.offset if cfa.reg == sp
1462 cfa.reg = sp
1463 cfa.base_offset = -cfa_store.offset
1464
1465 Rule 11:
1466 (set (mem ({pre_inc,pre_dec,post_dec} sp:cfa_store.reg)) <reg>)
1467 effects: cfa_store.offset += -/+ mode_size(mem)
1468 cfa.offset = cfa_store.offset if cfa.reg == sp
1469 cfa.reg = sp
1470 cfa.base_offset = -cfa_store.offset
1471
1472 Rule 12:
1473 (set (mem ({minus,plus,losum} <reg1>:{cfa_store,cfa_temp} <const_int>))
1474
1475 <reg2>)
1476 effects: cfa.reg = <reg1>
1477 cfa.base_offset = -/+ <const_int> - {cfa_store,cfa_temp}.offset
1478
1479 Rule 13:
1480 (set (mem <reg1>:{cfa_store,cfa_temp}) <reg2>)
1481 effects: cfa.reg = <reg1>
1482 cfa.base_offset = -{cfa_store,cfa_temp}.offset
1483
1484 Rule 14:
1485 (set (mem (post_inc <reg1>:cfa_temp <const_int>)) <reg2>)
1486 effects: cfa.reg = <reg1>
1487 cfa.base_offset = -cfa_temp.offset
1488 cfa_temp.offset -= mode_size(mem)
1489
1490 Rule 15:
1491 (set <reg> {unspec, unspec_volatile})
1492 effects: target-dependent
1493
1494 Rule 16:
1495 (set sp (and: sp <const_int>))
1496 constraints: cfa_store.reg == sp
1497 effects: cfun->fde.stack_realign = 1
1498 cfa_store.offset = 0
1499 fde->drap_reg = cfa.reg if cfa.reg != sp and cfa.reg != fp
1500
1501 Rule 17:
1502 (set (mem ({pre_inc, pre_dec} sp)) (mem (plus (cfa.reg) (const_int))))
1503 effects: cfa_store.offset += -/+ mode_size(mem)
1504
1505 Rule 18:
1506 (set (mem ({pre_inc, pre_dec} sp)) fp)
1507 constraints: fde->stack_realign == 1
1508 effects: cfa_store.offset = 0
1509 cfa.reg != HARD_FRAME_POINTER_REGNUM
1510
1511 Rule 19:
1512 (set (mem ({pre_inc, pre_dec} sp)) cfa.reg)
1513 constraints: fde->stack_realign == 1
1514 && cfa.offset == 0
1515 && cfa.indirect == 0
1516 && cfa.reg != HARD_FRAME_POINTER_REGNUM
1517 effects: Use DW_CFA_def_cfa_expression to define cfa
1518 cfa.reg == fde->drap_reg */
1519
1520 static void
1521 dwarf2out_frame_debug_expr (rtx expr)
1522 {
1523 rtx src, dest, span;
1524 HOST_WIDE_INT offset;
1525 dw_fde_ref fde;
1526
1527 /* If RTX_FRAME_RELATED_P is set on a PARALLEL, process each member of
1528 the PARALLEL independently. The first element is always processed if
1529 it is a SET. This is for backward compatibility. Other elements
1530 are processed only if they are SETs and the RTX_FRAME_RELATED_P
1531 flag is set in them. */
1532 if (GET_CODE (expr) == PARALLEL || GET_CODE (expr) == SEQUENCE)
1533 {
1534 int par_index;
1535 int limit = XVECLEN (expr, 0);
1536 rtx elem;
1537
1538 /* PARALLELs have strict read-modify-write semantics, so we
1539 ought to evaluate every rvalue before changing any lvalue.
1540 It's cumbersome to do that in general, but there's an
1541 easy approximation that is enough for all current users:
1542 handle register saves before register assignments. */
1543 if (GET_CODE (expr) == PARALLEL)
1544 for (par_index = 0; par_index < limit; par_index++)
1545 {
1546 elem = XVECEXP (expr, 0, par_index);
1547 if (GET_CODE (elem) == SET
1548 && MEM_P (SET_DEST (elem))
1549 && (RTX_FRAME_RELATED_P (elem) || par_index == 0))
1550 dwarf2out_frame_debug_expr (elem);
1551 }
1552
1553 for (par_index = 0; par_index < limit; par_index++)
1554 {
1555 elem = XVECEXP (expr, 0, par_index);
1556 if (GET_CODE (elem) == SET
1557 && (!MEM_P (SET_DEST (elem)) || GET_CODE (expr) == SEQUENCE)
1558 && (RTX_FRAME_RELATED_P (elem) || par_index == 0))
1559 dwarf2out_frame_debug_expr (elem);
1560 else if (GET_CODE (elem) == SET
1561 && par_index != 0
1562 && !RTX_FRAME_RELATED_P (elem))
1563 {
1564 /* Stack adjustment combining might combine some post-prologue
1565 stack adjustment into a prologue stack adjustment. */
1566 HOST_WIDE_INT offset = stack_adjust_offset (elem, args_size, 0);
1567
1568 if (offset != 0)
1569 dwarf2out_stack_adjust (offset);
1570 }
1571 }
1572 return;
1573 }
1574
1575 gcc_assert (GET_CODE (expr) == SET);
1576
1577 src = SET_SRC (expr);
1578 dest = SET_DEST (expr);
1579
1580 if (REG_P (src))
1581 {
1582 rtx rsi = reg_saved_in (src);
1583 if (rsi)
1584 src = rsi;
1585 }
1586
1587 fde = cfun->fde;
1588
1589 switch (GET_CODE (dest))
1590 {
1591 case REG:
1592 switch (GET_CODE (src))
1593 {
1594 /* Setting FP from SP. */
1595 case REG:
1596 if (cfa.reg == dwf_regno (src))
1597 {
1598 /* Rule 1 */
1599 /* Update the CFA rule wrt SP or FP. Make sure src is
1600 relative to the current CFA register.
1601
1602 We used to require that dest be either SP or FP, but the
1603 ARM copies SP to a temporary register, and from there to
1604 FP. So we just rely on the backends to only set
1605 RTX_FRAME_RELATED_P on appropriate insns. */
1606 cfa.reg = dwf_regno (dest);
1607 cfa_temp.reg = cfa.reg;
1608 cfa_temp.offset = cfa.offset;
1609 }
1610 else
1611 {
1612 /* Saving a register in a register. */
1613 gcc_assert (!fixed_regs [REGNO (dest)]
1614 /* For the SPARC and its register window. */
1615 || (dwf_regno (src) == DWARF_FRAME_RETURN_COLUMN));
1616
1617 /* After stack is aligned, we can only save SP in FP
1618 if drap register is used. In this case, we have
1619 to restore stack pointer with the CFA value and we
1620 don't generate this DWARF information. */
1621 if (fde
1622 && fde->stack_realign
1623 && REGNO (src) == STACK_POINTER_REGNUM)
1624 gcc_assert (REGNO (dest) == HARD_FRAME_POINTER_REGNUM
1625 && fde->drap_reg != INVALID_REGNUM
1626 && cfa.reg != dwf_regno (src));
1627 else
1628 queue_reg_save (src, dest, 0);
1629 }
1630 break;
1631
1632 case PLUS:
1633 case MINUS:
1634 case LO_SUM:
1635 if (dest == stack_pointer_rtx)
1636 {
1637 /* Rule 2 */
1638 /* Adjusting SP. */
1639 switch (GET_CODE (XEXP (src, 1)))
1640 {
1641 case CONST_INT:
1642 offset = INTVAL (XEXP (src, 1));
1643 break;
1644 case REG:
1645 gcc_assert (dwf_regno (XEXP (src, 1)) == cfa_temp.reg);
1646 offset = cfa_temp.offset;
1647 break;
1648 default:
1649 gcc_unreachable ();
1650 }
1651
1652 if (XEXP (src, 0) == hard_frame_pointer_rtx)
1653 {
1654 /* Restoring SP from FP in the epilogue. */
1655 gcc_assert (cfa.reg == dw_frame_pointer_regnum);
1656 cfa.reg = dw_stack_pointer_regnum;
1657 }
1658 else if (GET_CODE (src) == LO_SUM)
1659 /* Assume we've set the source reg of the LO_SUM from sp. */
1660 ;
1661 else
1662 gcc_assert (XEXP (src, 0) == stack_pointer_rtx);
1663
1664 if (GET_CODE (src) != MINUS)
1665 offset = -offset;
1666 if (cfa.reg == dw_stack_pointer_regnum)
1667 cfa.offset += offset;
1668 if (cfa_store.reg == dw_stack_pointer_regnum)
1669 cfa_store.offset += offset;
1670 }
1671 else if (dest == hard_frame_pointer_rtx)
1672 {
1673 /* Rule 3 */
1674 /* Either setting the FP from an offset of the SP,
1675 or adjusting the FP */
1676 gcc_assert (frame_pointer_needed);
1677
1678 gcc_assert (REG_P (XEXP (src, 0))
1679 && dwf_regno (XEXP (src, 0)) == cfa.reg
1680 && CONST_INT_P (XEXP (src, 1)));
1681 offset = INTVAL (XEXP (src, 1));
1682 if (GET_CODE (src) != MINUS)
1683 offset = -offset;
1684 cfa.offset += offset;
1685 cfa.reg = dw_frame_pointer_regnum;
1686 }
1687 else
1688 {
1689 gcc_assert (GET_CODE (src) != MINUS);
1690
1691 /* Rule 4 */
1692 if (REG_P (XEXP (src, 0))
1693 && dwf_regno (XEXP (src, 0)) == cfa.reg
1694 && CONST_INT_P (XEXP (src, 1)))
1695 {
1696 /* Setting a temporary CFA register that will be copied
1697 into the FP later on. */
1698 offset = - INTVAL (XEXP (src, 1));
1699 cfa.offset += offset;
1700 cfa.reg = dwf_regno (dest);
1701 /* Or used to save regs to the stack. */
1702 cfa_temp.reg = cfa.reg;
1703 cfa_temp.offset = cfa.offset;
1704 }
1705
1706 /* Rule 5 */
1707 else if (REG_P (XEXP (src, 0))
1708 && dwf_regno (XEXP (src, 0)) == cfa_temp.reg
1709 && XEXP (src, 1) == stack_pointer_rtx)
1710 {
1711 /* Setting a scratch register that we will use instead
1712 of SP for saving registers to the stack. */
1713 gcc_assert (cfa.reg == dw_stack_pointer_regnum);
1714 cfa_store.reg = dwf_regno (dest);
1715 cfa_store.offset = cfa.offset - cfa_temp.offset;
1716 }
1717
1718 /* Rule 9 */
1719 else if (GET_CODE (src) == LO_SUM
1720 && CONST_INT_P (XEXP (src, 1)))
1721 {
1722 cfa_temp.reg = dwf_regno (dest);
1723 cfa_temp.offset = INTVAL (XEXP (src, 1));
1724 }
1725 else
1726 gcc_unreachable ();
1727 }
1728 break;
1729
1730 /* Rule 6 */
1731 case CONST_INT:
1732 cfa_temp.reg = dwf_regno (dest);
1733 cfa_temp.offset = INTVAL (src);
1734 break;
1735
1736 /* Rule 7 */
1737 case IOR:
1738 gcc_assert (REG_P (XEXP (src, 0))
1739 && dwf_regno (XEXP (src, 0)) == cfa_temp.reg
1740 && CONST_INT_P (XEXP (src, 1)));
1741
1742 cfa_temp.reg = dwf_regno (dest);
1743 cfa_temp.offset |= INTVAL (XEXP (src, 1));
1744 break;
1745
1746 /* Skip over HIGH, assuming it will be followed by a LO_SUM,
1747 which will fill in all of the bits. */
1748 /* Rule 8 */
1749 case HIGH:
1750 break;
1751
1752 /* Rule 15 */
1753 case UNSPEC:
1754 case UNSPEC_VOLATILE:
1755 /* All unspecs should be represented by REG_CFA_* notes. */
1756 gcc_unreachable ();
1757 return;
1758
1759 /* Rule 16 */
1760 case AND:
1761 /* If this AND operation happens on stack pointer in prologue,
1762 we assume the stack is realigned and we extract the
1763 alignment. */
1764 if (fde && XEXP (src, 0) == stack_pointer_rtx)
1765 {
1766 /* We interpret reg_save differently with stack_realign set.
1767 Thus we must flush whatever we have queued first. */
1768 dwarf2out_flush_queued_reg_saves ();
1769
1770 gcc_assert (cfa_store.reg == dwf_regno (XEXP (src, 0)));
1771 fde->stack_realign = 1;
1772 fde->stack_realignment = INTVAL (XEXP (src, 1));
1773 cfa_store.offset = 0;
1774
1775 if (cfa.reg != dw_stack_pointer_regnum
1776 && cfa.reg != dw_frame_pointer_regnum)
1777 fde->drap_reg = cfa.reg;
1778 }
1779 return;
1780
1781 default:
1782 gcc_unreachable ();
1783 }
1784
1785 def_cfa_1 (&cfa);
1786 break;
1787
1788 case MEM:
1789
1790 /* Saving a register to the stack. Make sure dest is relative to the
1791 CFA register. */
1792 switch (GET_CODE (XEXP (dest, 0)))
1793 {
1794 /* Rule 10 */
1795 /* With a push. */
1796 case PRE_MODIFY:
1797 case POST_MODIFY:
1798 /* We can't handle variable size modifications. */
1799 gcc_assert (GET_CODE (XEXP (XEXP (XEXP (dest, 0), 1), 1))
1800 == CONST_INT);
1801 offset = -INTVAL (XEXP (XEXP (XEXP (dest, 0), 1), 1));
1802
1803 gcc_assert (REGNO (XEXP (XEXP (dest, 0), 0)) == STACK_POINTER_REGNUM
1804 && cfa_store.reg == dw_stack_pointer_regnum);
1805
1806 cfa_store.offset += offset;
1807 if (cfa.reg == dw_stack_pointer_regnum)
1808 cfa.offset = cfa_store.offset;
1809
1810 if (GET_CODE (XEXP (dest, 0)) == POST_MODIFY)
1811 offset -= cfa_store.offset;
1812 else
1813 offset = -cfa_store.offset;
1814 break;
1815
1816 /* Rule 11 */
1817 case PRE_INC:
1818 case PRE_DEC:
1819 case POST_DEC:
1820 offset = GET_MODE_SIZE (GET_MODE (dest));
1821 if (GET_CODE (XEXP (dest, 0)) == PRE_INC)
1822 offset = -offset;
1823
1824 gcc_assert ((REGNO (XEXP (XEXP (dest, 0), 0))
1825 == STACK_POINTER_REGNUM)
1826 && cfa_store.reg == dw_stack_pointer_regnum);
1827
1828 cfa_store.offset += offset;
1829
1830 /* Rule 18: If stack is aligned, we will use FP as a
1831 reference to represent the address of the stored
1832 regiser. */
1833 if (fde
1834 && fde->stack_realign
1835 && src == hard_frame_pointer_rtx)
1836 {
1837 gcc_assert (cfa.reg != dw_frame_pointer_regnum);
1838 cfa_store.offset = 0;
1839 }
1840
1841 if (cfa.reg == dw_stack_pointer_regnum)
1842 cfa.offset = cfa_store.offset;
1843
1844 if (GET_CODE (XEXP (dest, 0)) == POST_DEC)
1845 offset += -cfa_store.offset;
1846 else
1847 offset = -cfa_store.offset;
1848 break;
1849
1850 /* Rule 12 */
1851 /* With an offset. */
1852 case PLUS:
1853 case MINUS:
1854 case LO_SUM:
1855 {
1856 unsigned int regno;
1857
1858 gcc_assert (CONST_INT_P (XEXP (XEXP (dest, 0), 1))
1859 && REG_P (XEXP (XEXP (dest, 0), 0)));
1860 offset = INTVAL (XEXP (XEXP (dest, 0), 1));
1861 if (GET_CODE (XEXP (dest, 0)) == MINUS)
1862 offset = -offset;
1863
1864 regno = dwf_regno (XEXP (XEXP (dest, 0), 0));
1865
1866 if (cfa.reg == regno)
1867 offset -= cfa.offset;
1868 else if (cfa_store.reg == regno)
1869 offset -= cfa_store.offset;
1870 else
1871 {
1872 gcc_assert (cfa_temp.reg == regno);
1873 offset -= cfa_temp.offset;
1874 }
1875 }
1876 break;
1877
1878 /* Rule 13 */
1879 /* Without an offset. */
1880 case REG:
1881 {
1882 unsigned int regno = dwf_regno (XEXP (dest, 0));
1883
1884 if (cfa.reg == regno)
1885 offset = -cfa.offset;
1886 else if (cfa_store.reg == regno)
1887 offset = -cfa_store.offset;
1888 else
1889 {
1890 gcc_assert (cfa_temp.reg == regno);
1891 offset = -cfa_temp.offset;
1892 }
1893 }
1894 break;
1895
1896 /* Rule 14 */
1897 case POST_INC:
1898 gcc_assert (cfa_temp.reg == dwf_regno (XEXP (XEXP (dest, 0), 0)));
1899 offset = -cfa_temp.offset;
1900 cfa_temp.offset -= GET_MODE_SIZE (GET_MODE (dest));
1901 break;
1902
1903 default:
1904 gcc_unreachable ();
1905 }
1906
1907 /* Rule 17 */
1908 /* If the source operand of this MEM operation is a memory,
1909 we only care how much stack grew. */
1910 if (MEM_P (src))
1911 break;
1912
1913 if (REG_P (src)
1914 && REGNO (src) != STACK_POINTER_REGNUM
1915 && REGNO (src) != HARD_FRAME_POINTER_REGNUM
1916 && dwf_regno (src) == cfa.reg)
1917 {
1918 /* We're storing the current CFA reg into the stack. */
1919
1920 if (cfa.offset == 0)
1921 {
1922 /* Rule 19 */
1923 /* If stack is aligned, putting CFA reg into stack means
1924 we can no longer use reg + offset to represent CFA.
1925 Here we use DW_CFA_def_cfa_expression instead. The
1926 result of this expression equals to the original CFA
1927 value. */
1928 if (fde
1929 && fde->stack_realign
1930 && cfa.indirect == 0
1931 && cfa.reg != dw_frame_pointer_regnum)
1932 {
1933 dw_cfa_location cfa_exp;
1934
1935 gcc_assert (fde->drap_reg == cfa.reg);
1936
1937 cfa_exp.indirect = 1;
1938 cfa_exp.reg = dw_frame_pointer_regnum;
1939 cfa_exp.base_offset = offset;
1940 cfa_exp.offset = 0;
1941
1942 fde->drap_reg_saved = 1;
1943
1944 def_cfa_1 (&cfa_exp);
1945 break;
1946 }
1947
1948 /* If the source register is exactly the CFA, assume
1949 we're saving SP like any other register; this happens
1950 on the ARM. */
1951 def_cfa_1 (&cfa);
1952 queue_reg_save (stack_pointer_rtx, NULL_RTX, offset);
1953 break;
1954 }
1955 else
1956 {
1957 /* Otherwise, we'll need to look in the stack to
1958 calculate the CFA. */
1959 rtx x = XEXP (dest, 0);
1960
1961 if (!REG_P (x))
1962 x = XEXP (x, 0);
1963 gcc_assert (REG_P (x));
1964
1965 cfa.reg = dwf_regno (x);
1966 cfa.base_offset = offset;
1967 cfa.indirect = 1;
1968 def_cfa_1 (&cfa);
1969 break;
1970 }
1971 }
1972
1973 def_cfa_1 (&cfa);
1974
1975 span = NULL;
1976 if (REG_P (src))
1977 span = targetm.dwarf_register_span (src);
1978 if (!span)
1979 queue_reg_save (src, NULL_RTX, offset);
1980 else
1981 {
1982 /* We have a PARALLEL describing where the contents of SRC live.
1983 Queue register saves for each piece of the PARALLEL. */
1984 int par_index;
1985 int limit;
1986 HOST_WIDE_INT span_offset = offset;
1987
1988 gcc_assert (GET_CODE (span) == PARALLEL);
1989
1990 limit = XVECLEN (span, 0);
1991 for (par_index = 0; par_index < limit; par_index++)
1992 {
1993 rtx elem = XVECEXP (span, 0, par_index);
1994 queue_reg_save (elem, NULL_RTX, span_offset);
1995 span_offset += GET_MODE_SIZE (GET_MODE (elem));
1996 }
1997 }
1998 break;
1999
2000 default:
2001 gcc_unreachable ();
2002 }
2003 }
2004
2005 /* Record call frame debugging information for INSN, which either
2006 sets SP or FP (adjusting how we calculate the frame address) or saves a
2007 register to the stack. If INSN is NULL_RTX, initialize our state.
2008
2009 If AFTER_P is false, we're being called before the insn is emitted,
2010 otherwise after. Call instructions get invoked twice. */
2011
2012 static void
2013 dwarf2out_frame_debug (rtx insn, bool after_p)
2014 {
2015 rtx note, n;
2016 bool handled_one = false;
2017 bool need_flush = false;
2018
2019 if (!NONJUMP_INSN_P (insn) || clobbers_queued_reg_save (insn))
2020 dwarf2out_flush_queued_reg_saves ();
2021
2022 if (!RTX_FRAME_RELATED_P (insn))
2023 {
2024 /* ??? This should be done unconditionally since stack adjustments
2025 matter if the stack pointer is not the CFA register anymore but
2026 is still used to save registers. */
2027 if (!ACCUMULATE_OUTGOING_ARGS)
2028 dwarf2out_notice_stack_adjust (insn, after_p);
2029 return;
2030 }
2031
2032 any_cfis_emitted = false;
2033
2034 for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
2035 switch (REG_NOTE_KIND (note))
2036 {
2037 case REG_FRAME_RELATED_EXPR:
2038 insn = XEXP (note, 0);
2039 goto do_frame_expr;
2040
2041 case REG_CFA_DEF_CFA:
2042 dwarf2out_frame_debug_def_cfa (XEXP (note, 0));
2043 handled_one = true;
2044 break;
2045
2046 case REG_CFA_ADJUST_CFA:
2047 n = XEXP (note, 0);
2048 if (n == NULL)
2049 {
2050 n = PATTERN (insn);
2051 if (GET_CODE (n) == PARALLEL)
2052 n = XVECEXP (n, 0, 0);
2053 }
2054 dwarf2out_frame_debug_adjust_cfa (n);
2055 handled_one = true;
2056 break;
2057
2058 case REG_CFA_OFFSET:
2059 n = XEXP (note, 0);
2060 if (n == NULL)
2061 n = single_set (insn);
2062 dwarf2out_frame_debug_cfa_offset (n);
2063 handled_one = true;
2064 break;
2065
2066 case REG_CFA_REGISTER:
2067 n = XEXP (note, 0);
2068 if (n == NULL)
2069 {
2070 n = PATTERN (insn);
2071 if (GET_CODE (n) == PARALLEL)
2072 n = XVECEXP (n, 0, 0);
2073 }
2074 dwarf2out_frame_debug_cfa_register (n);
2075 handled_one = true;
2076 break;
2077
2078 case REG_CFA_EXPRESSION:
2079 n = XEXP (note, 0);
2080 if (n == NULL)
2081 n = single_set (insn);
2082 dwarf2out_frame_debug_cfa_expression (n);
2083 handled_one = true;
2084 break;
2085
2086 case REG_CFA_RESTORE:
2087 n = XEXP (note, 0);
2088 if (n == NULL)
2089 {
2090 n = PATTERN (insn);
2091 if (GET_CODE (n) == PARALLEL)
2092 n = XVECEXP (n, 0, 0);
2093 n = XEXP (n, 0);
2094 }
2095 dwarf2out_frame_debug_cfa_restore (n);
2096 handled_one = true;
2097 break;
2098
2099 case REG_CFA_SET_VDRAP:
2100 n = XEXP (note, 0);
2101 if (REG_P (n))
2102 {
2103 dw_fde_ref fde = cfun->fde;
2104 if (fde)
2105 {
2106 gcc_assert (fde->vdrap_reg == INVALID_REGNUM);
2107 if (REG_P (n))
2108 fde->vdrap_reg = dwf_regno (n);
2109 }
2110 }
2111 handled_one = true;
2112 break;
2113
2114 case REG_CFA_WINDOW_SAVE:
2115 dwarf2out_frame_debug_cfa_window_save ();
2116 handled_one = true;
2117 break;
2118
2119 case REG_CFA_FLUSH_QUEUE:
2120 /* The actual flush happens below. */
2121 need_flush = true;
2122 handled_one = true;
2123 break;
2124
2125 default:
2126 break;
2127 }
2128
2129 if (handled_one)
2130 {
2131 /* Minimize the number of advances by emitting the entire queue
2132 once anything is emitted. */
2133 need_flush |= any_cfis_emitted;
2134 }
2135 else
2136 {
2137 insn = PATTERN (insn);
2138 do_frame_expr:
2139 dwarf2out_frame_debug_expr (insn);
2140
2141 /* Check again. A parallel can save and update the same register.
2142 We could probably check just once, here, but this is safer than
2143 removing the check at the start of the function. */
2144 if (any_cfis_emitted || clobbers_queued_reg_save (insn))
2145 need_flush = true;
2146 }
2147
2148 if (need_flush)
2149 dwarf2out_flush_queued_reg_saves ();
2150 }
2151
2152 /* Examine CFI and return true if a cfi label and set_loc is needed
2153 beforehand. Even when generating CFI assembler instructions, we
2154 still have to add the cfi to the list so that lookup_cfa_1 works
2155 later on. When -g2 and above we even need to force emitting of
2156 CFI labels and add to list a DW_CFA_set_loc for convert_cfa_to_fb_loc_list
2157 purposes. If we're generating DWARF3 output we use DW_OP_call_frame_cfa
2158 and so don't use convert_cfa_to_fb_loc_list. */
2159
2160 static bool
2161 cfi_label_required_p (dw_cfi_ref cfi)
2162 {
2163 if (!dwarf2out_do_cfi_asm ())
2164 return true;
2165
2166 if (dwarf_version == 2
2167 && debug_info_level > DINFO_LEVEL_TERSE
2168 && (write_symbols == DWARF2_DEBUG
2169 || write_symbols == VMS_AND_DWARF2_DEBUG))
2170 {
2171 switch (cfi->dw_cfi_opc)
2172 {
2173 case DW_CFA_def_cfa_offset:
2174 case DW_CFA_def_cfa_offset_sf:
2175 case DW_CFA_def_cfa_register:
2176 case DW_CFA_def_cfa:
2177 case DW_CFA_def_cfa_sf:
2178 case DW_CFA_def_cfa_expression:
2179 case DW_CFA_restore_state:
2180 return true;
2181 default:
2182 return false;
2183 }
2184 }
2185 return false;
2186 }
2187
2188 /* Walk the function, looking for NOTE_INSN_CFI notes. Add the CFIs to the
2189 function's FDE, adding CFI labels and set_loc/advance_loc opcodes as
2190 necessary. */
2191 static void
2192 add_cfis_to_fde (void)
2193 {
2194 dw_fde_ref fde = cfun->fde;
2195 rtx insn, next;
2196 /* We always start with a function_begin label. */
2197 bool first = false;
2198
2199 for (insn = get_insns (); insn; insn = next)
2200 {
2201 next = NEXT_INSN (insn);
2202
2203 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS)
2204 {
2205 /* Don't attempt to advance_loc4 between labels
2206 in different sections. */
2207 first = true;
2208 }
2209
2210 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_CFI)
2211 {
2212 bool required = cfi_label_required_p (NOTE_CFI (insn));
2213 while (next && NOTE_P (next) && NOTE_KIND (next) == NOTE_INSN_CFI)
2214 {
2215 required |= cfi_label_required_p (NOTE_CFI (next));
2216 next = NEXT_INSN (next);
2217 }
2218 if (required)
2219 {
2220 int num = dwarf2out_cfi_label_num;
2221 const char *label = dwarf2out_cfi_label ();
2222 dw_cfi_ref xcfi;
2223 rtx tmp;
2224
2225 /* Set the location counter to the new label. */
2226 xcfi = new_cfi ();
2227 xcfi->dw_cfi_opc = (first ? DW_CFA_set_loc
2228 : DW_CFA_advance_loc4);
2229 xcfi->dw_cfi_oprnd1.dw_cfi_addr = label;
2230 VEC_safe_push (dw_cfi_ref, gc, fde->dw_fde_cfi, xcfi);
2231
2232 tmp = emit_note_before (NOTE_INSN_CFI_LABEL, insn);
2233 NOTE_LABEL_NUMBER (tmp) = num;
2234 }
2235
2236 do
2237 {
2238 VEC_safe_push (dw_cfi_ref, gc, fde->dw_fde_cfi, NOTE_CFI (insn));
2239 insn = NEXT_INSN (insn);
2240 }
2241 while (insn != next);
2242 first = false;
2243 }
2244 }
2245 }
2246
2247 /* Scan the function and create the initial set of CFI notes. */
2248
2249 static void
2250 create_cfi_notes (void)
2251 {
2252 rtx insn;
2253
2254 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
2255 {
2256 rtx pat;
2257
2258 cfi_insn = PREV_INSN (insn);
2259
2260 if (BARRIER_P (insn))
2261 {
2262 dwarf2out_frame_debug (insn, false);
2263 continue;
2264 }
2265
2266 if (NOTE_P (insn))
2267 {
2268 switch (NOTE_KIND (insn))
2269 {
2270 case NOTE_INSN_PROLOGUE_END:
2271 dwarf2out_flush_queued_reg_saves ();
2272 break;
2273
2274 case NOTE_INSN_EPILOGUE_BEG:
2275 #if defined(HAVE_epilogue)
2276 dwarf2out_cfi_begin_epilogue (insn);
2277 #endif
2278 break;
2279
2280 case NOTE_INSN_CFA_RESTORE_STATE:
2281 cfi_insn = insn;
2282 dwarf2out_frame_debug_restore_state ();
2283 break;
2284 }
2285 continue;
2286 }
2287
2288 if (!NONDEBUG_INSN_P (insn))
2289 continue;
2290
2291 pat = PATTERN (insn);
2292 if (asm_noperands (pat) >= 0)
2293 {
2294 dwarf2out_frame_debug (insn, false);
2295 continue;
2296 }
2297
2298 if (GET_CODE (pat) == SEQUENCE)
2299 {
2300 int i, n = XVECLEN (pat, 0);
2301 for (i = 1; i < n; ++i)
2302 dwarf2out_frame_debug (XVECEXP (pat, 0, i), false);
2303 }
2304
2305 if (CALL_P (insn)
2306 || find_reg_note (insn, REG_CFA_FLUSH_QUEUE, NULL))
2307 dwarf2out_frame_debug (insn, false);
2308
2309 /* Do not separate tablejump insns from their ADDR_DIFF_VEC.
2310 Putting the note after the VEC should be ok. */
2311 if (!tablejump_p (insn, NULL, &cfi_insn))
2312 cfi_insn = insn;
2313
2314 dwarf2out_frame_debug (insn, true);
2315 }
2316
2317 cfi_insn = NULL;
2318 }
2319
2320 /* Determine if we need to save and restore CFI information around this
2321 epilogue. If SIBCALL is true, then this is a sibcall epilogue. If
2322 we do need to save/restore, then emit the save now, and insert a
2323 NOTE_INSN_CFA_RESTORE_STATE at the appropriate place in the stream. */
2324
2325 static void
2326 dwarf2out_cfi_begin_epilogue (rtx insn)
2327 {
2328 bool saw_frp = false;
2329 rtx i;
2330
2331 /* Scan forward to the return insn, noticing if there are possible
2332 frame related insns. */
2333 for (i = NEXT_INSN (insn); i ; i = NEXT_INSN (i))
2334 {
2335 if (!INSN_P (i))
2336 continue;
2337
2338 /* Look for both regular and sibcalls to end the block. */
2339 if (returnjump_p (i))
2340 break;
2341 if (CALL_P (i) && SIBLING_CALL_P (i))
2342 break;
2343
2344 if (GET_CODE (PATTERN (i)) == SEQUENCE)
2345 {
2346 int idx;
2347 rtx seq = PATTERN (i);
2348
2349 if (returnjump_p (XVECEXP (seq, 0, 0)))
2350 break;
2351 if (CALL_P (XVECEXP (seq, 0, 0))
2352 && SIBLING_CALL_P (XVECEXP (seq, 0, 0)))
2353 break;
2354
2355 for (idx = 0; idx < XVECLEN (seq, 0); idx++)
2356 if (RTX_FRAME_RELATED_P (XVECEXP (seq, 0, idx)))
2357 saw_frp = true;
2358 }
2359
2360 if (RTX_FRAME_RELATED_P (i))
2361 saw_frp = true;
2362 }
2363
2364 /* If the port doesn't emit epilogue unwind info, we don't need a
2365 save/restore pair. */
2366 if (!saw_frp)
2367 return;
2368
2369 /* Otherwise, search forward to see if the return insn was the last
2370 basic block of the function. If so, we don't need save/restore. */
2371 gcc_assert (i != NULL);
2372 i = next_real_insn (i);
2373 if (i == NULL)
2374 return;
2375
2376 /* Insert the restore before that next real insn in the stream, and before
2377 a potential NOTE_INSN_EPILOGUE_BEG -- we do need these notes to be
2378 properly nested. This should be after any label or alignment. This
2379 will be pushed into the CFI stream by the function below. */
2380 while (1)
2381 {
2382 rtx p = PREV_INSN (i);
2383 if (!NOTE_P (p))
2384 break;
2385 if (NOTE_KIND (p) == NOTE_INSN_BASIC_BLOCK)
2386 break;
2387 i = p;
2388 }
2389 emit_note_before (NOTE_INSN_CFA_RESTORE_STATE, i);
2390
2391 emit_cfa_remember = true;
2392
2393 /* And emulate the state save. */
2394 gcc_assert (!cfa_remember.in_use);
2395 cfa_remember = cfa;
2396 old_cfa_remember = old_cfa;
2397 cfa_remember.in_use = 1;
2398 }
2399
2400 /* A "subroutine" of dwarf2out_cfi_begin_epilogue. Emit the restore
2401 required. */
2402
2403 static void
2404 dwarf2out_frame_debug_restore_state (void)
2405 {
2406 dw_cfi_ref cfi = new_cfi ();
2407
2408 cfi->dw_cfi_opc = DW_CFA_restore_state;
2409 add_cfi (cfi);
2410
2411 gcc_assert (cfa_remember.in_use);
2412 cfa = cfa_remember;
2413 old_cfa = old_cfa_remember;
2414 cfa_remember.in_use = 0;
2415 }
2416 \f
2417 /* Record the initial position of the return address. RTL is
2418 INCOMING_RETURN_ADDR_RTX. */
2419
2420 static void
2421 initial_return_save (rtx rtl)
2422 {
2423 unsigned int reg = INVALID_REGNUM;
2424 HOST_WIDE_INT offset = 0;
2425
2426 switch (GET_CODE (rtl))
2427 {
2428 case REG:
2429 /* RA is in a register. */
2430 reg = dwf_regno (rtl);
2431 break;
2432
2433 case MEM:
2434 /* RA is on the stack. */
2435 rtl = XEXP (rtl, 0);
2436 switch (GET_CODE (rtl))
2437 {
2438 case REG:
2439 gcc_assert (REGNO (rtl) == STACK_POINTER_REGNUM);
2440 offset = 0;
2441 break;
2442
2443 case PLUS:
2444 gcc_assert (REGNO (XEXP (rtl, 0)) == STACK_POINTER_REGNUM);
2445 offset = INTVAL (XEXP (rtl, 1));
2446 break;
2447
2448 case MINUS:
2449 gcc_assert (REGNO (XEXP (rtl, 0)) == STACK_POINTER_REGNUM);
2450 offset = -INTVAL (XEXP (rtl, 1));
2451 break;
2452
2453 default:
2454 gcc_unreachable ();
2455 }
2456
2457 break;
2458
2459 case PLUS:
2460 /* The return address is at some offset from any value we can
2461 actually load. For instance, on the SPARC it is in %i7+8. Just
2462 ignore the offset for now; it doesn't matter for unwinding frames. */
2463 gcc_assert (CONST_INT_P (XEXP (rtl, 1)));
2464 initial_return_save (XEXP (rtl, 0));
2465 return;
2466
2467 default:
2468 gcc_unreachable ();
2469 }
2470
2471 if (reg != DWARF_FRAME_RETURN_COLUMN)
2472 {
2473 if (reg != INVALID_REGNUM)
2474 record_reg_saved_in_reg (rtl, pc_rtx);
2475 reg_save (DWARF_FRAME_RETURN_COLUMN, reg, offset - cfa.offset);
2476 }
2477 }
2478
2479 /* Annotate the function with NOTE_INSN_CFI notes to record the CFI
2480 state at each location within the function. These notes will be
2481 emitted during pass_final. */
2482
2483 static unsigned int
2484 execute_dwarf2_frame (void)
2485 {
2486 /* The first time we're called, compute the incoming frame state. */
2487 if (cie_cfi_vec == NULL)
2488 {
2489 dw_cfa_location loc;
2490
2491 dw_stack_pointer_regnum = DWARF_FRAME_REGNUM (STACK_POINTER_REGNUM);
2492 dw_frame_pointer_regnum = DWARF_FRAME_REGNUM (HARD_FRAME_POINTER_REGNUM);
2493
2494 add_cfi_vec = &cie_cfi_vec;
2495
2496 memset (&old_cfa, 0, sizeof (old_cfa));
2497 old_cfa.reg = INVALID_REGNUM;
2498
2499 /* On entry, the Canonical Frame Address is at SP. */
2500 memset(&loc, 0, sizeof (loc));
2501 loc.reg = dw_stack_pointer_regnum;
2502 loc.offset = INCOMING_FRAME_SP_OFFSET;
2503 def_cfa_1 (&loc);
2504
2505 if (targetm.debug_unwind_info () == UI_DWARF2
2506 || targetm_common.except_unwind_info (&global_options) == UI_DWARF2)
2507 {
2508 initial_return_save (INCOMING_RETURN_ADDR_RTX);
2509
2510 /* For a few targets, we have the return address incoming into a
2511 register, but choose a different return column. This will result
2512 in a DW_CFA_register for the return, and an entry in
2513 regs_saved_in_regs to match. If the target later stores that
2514 return address register to the stack, we want to be able to emit
2515 the DW_CFA_offset against the return column, not the intermediate
2516 save register. Save the contents of regs_saved_in_regs so that
2517 we can re-initialize it at the start of each function. */
2518 switch (VEC_length (reg_saved_in_data, regs_saved_in_regs))
2519 {
2520 case 0:
2521 break;
2522 case 1:
2523 cie_return_save = ggc_alloc_reg_saved_in_data ();
2524 *cie_return_save = *VEC_index (reg_saved_in_data,
2525 regs_saved_in_regs, 0);
2526 regs_saved_in_regs = NULL;
2527 break;
2528 default:
2529 gcc_unreachable ();
2530 }
2531 }
2532
2533 add_cfi_vec = NULL;
2534 }
2535
2536 /* Set up state for generating call frame debug info. */
2537 gcc_checking_assert (queued_reg_saves == NULL);
2538 gcc_checking_assert (regs_saved_in_regs == NULL);
2539
2540 memset (&cfa, 0, sizeof(cfa));
2541 cfa.reg = dw_stack_pointer_regnum;
2542 cfa.offset = INCOMING_FRAME_SP_OFFSET;
2543
2544 old_cfa = cfa;
2545 cfa_store = cfa;
2546
2547 memset (&cfa_temp, 0, sizeof(cfa_temp));
2548 cfa_temp.reg = INVALID_REGNUM;
2549
2550 if (cie_return_save)
2551 VEC_safe_push (reg_saved_in_data, gc, regs_saved_in_regs, cie_return_save);
2552
2553 dwarf2out_alloc_current_fde ();
2554
2555 /* Do the work. */
2556 create_cfi_notes ();
2557 add_cfis_to_fde ();
2558
2559 /* Reset all function-specific information, particularly for GC. */
2560 XDELETEVEC (barrier_args_size);
2561 barrier_args_size = NULL;
2562 regs_saved_in_regs = NULL;
2563 queued_reg_saves = NULL;
2564
2565 return 0;
2566 }
2567 \f
2568 /* Convert a DWARF call frame info. operation to its string name */
2569
2570 static const char *
2571 dwarf_cfi_name (unsigned int cfi_opc)
2572 {
2573 switch (cfi_opc)
2574 {
2575 case DW_CFA_advance_loc:
2576 return "DW_CFA_advance_loc";
2577 case DW_CFA_offset:
2578 return "DW_CFA_offset";
2579 case DW_CFA_restore:
2580 return "DW_CFA_restore";
2581 case DW_CFA_nop:
2582 return "DW_CFA_nop";
2583 case DW_CFA_set_loc:
2584 return "DW_CFA_set_loc";
2585 case DW_CFA_advance_loc1:
2586 return "DW_CFA_advance_loc1";
2587 case DW_CFA_advance_loc2:
2588 return "DW_CFA_advance_loc2";
2589 case DW_CFA_advance_loc4:
2590 return "DW_CFA_advance_loc4";
2591 case DW_CFA_offset_extended:
2592 return "DW_CFA_offset_extended";
2593 case DW_CFA_restore_extended:
2594 return "DW_CFA_restore_extended";
2595 case DW_CFA_undefined:
2596 return "DW_CFA_undefined";
2597 case DW_CFA_same_value:
2598 return "DW_CFA_same_value";
2599 case DW_CFA_register:
2600 return "DW_CFA_register";
2601 case DW_CFA_remember_state:
2602 return "DW_CFA_remember_state";
2603 case DW_CFA_restore_state:
2604 return "DW_CFA_restore_state";
2605 case DW_CFA_def_cfa:
2606 return "DW_CFA_def_cfa";
2607 case DW_CFA_def_cfa_register:
2608 return "DW_CFA_def_cfa_register";
2609 case DW_CFA_def_cfa_offset:
2610 return "DW_CFA_def_cfa_offset";
2611
2612 /* DWARF 3 */
2613 case DW_CFA_def_cfa_expression:
2614 return "DW_CFA_def_cfa_expression";
2615 case DW_CFA_expression:
2616 return "DW_CFA_expression";
2617 case DW_CFA_offset_extended_sf:
2618 return "DW_CFA_offset_extended_sf";
2619 case DW_CFA_def_cfa_sf:
2620 return "DW_CFA_def_cfa_sf";
2621 case DW_CFA_def_cfa_offset_sf:
2622 return "DW_CFA_def_cfa_offset_sf";
2623
2624 /* SGI/MIPS specific */
2625 case DW_CFA_MIPS_advance_loc8:
2626 return "DW_CFA_MIPS_advance_loc8";
2627
2628 /* GNU extensions */
2629 case DW_CFA_GNU_window_save:
2630 return "DW_CFA_GNU_window_save";
2631 case DW_CFA_GNU_args_size:
2632 return "DW_CFA_GNU_args_size";
2633 case DW_CFA_GNU_negative_offset_extended:
2634 return "DW_CFA_GNU_negative_offset_extended";
2635
2636 default:
2637 return "DW_CFA_<unknown>";
2638 }
2639 }
2640
2641 /* This routine will generate the correct assembly data for a location
2642 description based on a cfi entry with a complex address. */
2643
2644 static void
2645 output_cfa_loc (dw_cfi_ref cfi, int for_eh)
2646 {
2647 dw_loc_descr_ref loc;
2648 unsigned long size;
2649
2650 if (cfi->dw_cfi_opc == DW_CFA_expression)
2651 {
2652 unsigned r =
2653 DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
2654 dw2_asm_output_data (1, r, NULL);
2655 loc = cfi->dw_cfi_oprnd2.dw_cfi_loc;
2656 }
2657 else
2658 loc = cfi->dw_cfi_oprnd1.dw_cfi_loc;
2659
2660 /* Output the size of the block. */
2661 size = size_of_locs (loc);
2662 dw2_asm_output_data_uleb128 (size, NULL);
2663
2664 /* Now output the operations themselves. */
2665 output_loc_sequence (loc, for_eh);
2666 }
2667
2668 /* Similar, but used for .cfi_escape. */
2669
2670 static void
2671 output_cfa_loc_raw (dw_cfi_ref cfi)
2672 {
2673 dw_loc_descr_ref loc;
2674 unsigned long size;
2675
2676 if (cfi->dw_cfi_opc == DW_CFA_expression)
2677 {
2678 unsigned r =
2679 DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
2680 fprintf (asm_out_file, "%#x,", r);
2681 loc = cfi->dw_cfi_oprnd2.dw_cfi_loc;
2682 }
2683 else
2684 loc = cfi->dw_cfi_oprnd1.dw_cfi_loc;
2685
2686 /* Output the size of the block. */
2687 size = size_of_locs (loc);
2688 dw2_asm_output_data_uleb128_raw (size);
2689 fputc (',', asm_out_file);
2690
2691 /* Now output the operations themselves. */
2692 output_loc_sequence_raw (loc);
2693 }
2694
2695 /* Output a Call Frame Information opcode and its operand(s). */
2696
2697 void
2698 output_cfi (dw_cfi_ref cfi, dw_fde_ref fde, int for_eh)
2699 {
2700 unsigned long r;
2701 HOST_WIDE_INT off;
2702
2703 if (cfi->dw_cfi_opc == DW_CFA_advance_loc)
2704 dw2_asm_output_data (1, (cfi->dw_cfi_opc
2705 | (cfi->dw_cfi_oprnd1.dw_cfi_offset & 0x3f)),
2706 "DW_CFA_advance_loc " HOST_WIDE_INT_PRINT_HEX,
2707 ((unsigned HOST_WIDE_INT)
2708 cfi->dw_cfi_oprnd1.dw_cfi_offset));
2709 else if (cfi->dw_cfi_opc == DW_CFA_offset)
2710 {
2711 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
2712 dw2_asm_output_data (1, (cfi->dw_cfi_opc | (r & 0x3f)),
2713 "DW_CFA_offset, column %#lx", r);
2714 off = div_data_align (cfi->dw_cfi_oprnd2.dw_cfi_offset);
2715 dw2_asm_output_data_uleb128 (off, NULL);
2716 }
2717 else if (cfi->dw_cfi_opc == DW_CFA_restore)
2718 {
2719 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
2720 dw2_asm_output_data (1, (cfi->dw_cfi_opc | (r & 0x3f)),
2721 "DW_CFA_restore, column %#lx", r);
2722 }
2723 else
2724 {
2725 dw2_asm_output_data (1, cfi->dw_cfi_opc,
2726 "%s", dwarf_cfi_name (cfi->dw_cfi_opc));
2727
2728 switch (cfi->dw_cfi_opc)
2729 {
2730 case DW_CFA_set_loc:
2731 if (for_eh)
2732 dw2_asm_output_encoded_addr_rtx (
2733 ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/1, /*global=*/0),
2734 gen_rtx_SYMBOL_REF (Pmode, cfi->dw_cfi_oprnd1.dw_cfi_addr),
2735 false, NULL);
2736 else
2737 dw2_asm_output_addr (DWARF2_ADDR_SIZE,
2738 cfi->dw_cfi_oprnd1.dw_cfi_addr, NULL);
2739 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
2740 break;
2741
2742 case DW_CFA_advance_loc1:
2743 dw2_asm_output_delta (1, cfi->dw_cfi_oprnd1.dw_cfi_addr,
2744 fde->dw_fde_current_label, NULL);
2745 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
2746 break;
2747
2748 case DW_CFA_advance_loc2:
2749 dw2_asm_output_delta (2, cfi->dw_cfi_oprnd1.dw_cfi_addr,
2750 fde->dw_fde_current_label, NULL);
2751 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
2752 break;
2753
2754 case DW_CFA_advance_loc4:
2755 dw2_asm_output_delta (4, cfi->dw_cfi_oprnd1.dw_cfi_addr,
2756 fde->dw_fde_current_label, NULL);
2757 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
2758 break;
2759
2760 case DW_CFA_MIPS_advance_loc8:
2761 dw2_asm_output_delta (8, cfi->dw_cfi_oprnd1.dw_cfi_addr,
2762 fde->dw_fde_current_label, NULL);
2763 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
2764 break;
2765
2766 case DW_CFA_offset_extended:
2767 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
2768 dw2_asm_output_data_uleb128 (r, NULL);
2769 off = div_data_align (cfi->dw_cfi_oprnd2.dw_cfi_offset);
2770 dw2_asm_output_data_uleb128 (off, NULL);
2771 break;
2772
2773 case DW_CFA_def_cfa:
2774 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
2775 dw2_asm_output_data_uleb128 (r, NULL);
2776 dw2_asm_output_data_uleb128 (cfi->dw_cfi_oprnd2.dw_cfi_offset, NULL);
2777 break;
2778
2779 case DW_CFA_offset_extended_sf:
2780 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
2781 dw2_asm_output_data_uleb128 (r, NULL);
2782 off = div_data_align (cfi->dw_cfi_oprnd2.dw_cfi_offset);
2783 dw2_asm_output_data_sleb128 (off, NULL);
2784 break;
2785
2786 case DW_CFA_def_cfa_sf:
2787 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
2788 dw2_asm_output_data_uleb128 (r, NULL);
2789 off = div_data_align (cfi->dw_cfi_oprnd2.dw_cfi_offset);
2790 dw2_asm_output_data_sleb128 (off, NULL);
2791 break;
2792
2793 case DW_CFA_restore_extended:
2794 case DW_CFA_undefined:
2795 case DW_CFA_same_value:
2796 case DW_CFA_def_cfa_register:
2797 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
2798 dw2_asm_output_data_uleb128 (r, NULL);
2799 break;
2800
2801 case DW_CFA_register:
2802 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
2803 dw2_asm_output_data_uleb128 (r, NULL);
2804 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd2.dw_cfi_reg_num, for_eh);
2805 dw2_asm_output_data_uleb128 (r, NULL);
2806 break;
2807
2808 case DW_CFA_def_cfa_offset:
2809 case DW_CFA_GNU_args_size:
2810 dw2_asm_output_data_uleb128 (cfi->dw_cfi_oprnd1.dw_cfi_offset, NULL);
2811 break;
2812
2813 case DW_CFA_def_cfa_offset_sf:
2814 off = div_data_align (cfi->dw_cfi_oprnd1.dw_cfi_offset);
2815 dw2_asm_output_data_sleb128 (off, NULL);
2816 break;
2817
2818 case DW_CFA_GNU_window_save:
2819 break;
2820
2821 case DW_CFA_def_cfa_expression:
2822 case DW_CFA_expression:
2823 output_cfa_loc (cfi, for_eh);
2824 break;
2825
2826 case DW_CFA_GNU_negative_offset_extended:
2827 /* Obsoleted by DW_CFA_offset_extended_sf. */
2828 gcc_unreachable ();
2829
2830 default:
2831 break;
2832 }
2833 }
2834 }
2835
2836 /* Similar, but do it via assembler directives instead. */
2837
2838 void
2839 output_cfi_directive (FILE *f, dw_cfi_ref cfi)
2840 {
2841 unsigned long r, r2;
2842
2843 switch (cfi->dw_cfi_opc)
2844 {
2845 case DW_CFA_advance_loc:
2846 case DW_CFA_advance_loc1:
2847 case DW_CFA_advance_loc2:
2848 case DW_CFA_advance_loc4:
2849 case DW_CFA_MIPS_advance_loc8:
2850 case DW_CFA_set_loc:
2851 /* Should only be created in a code path not followed when emitting
2852 via directives. The assembler is going to take care of this for
2853 us. But this routines is also used for debugging dumps, so
2854 print something. */
2855 gcc_assert (f != asm_out_file);
2856 fprintf (f, "\t.cfi_advance_loc\n");
2857 break;
2858
2859 case DW_CFA_offset:
2860 case DW_CFA_offset_extended:
2861 case DW_CFA_offset_extended_sf:
2862 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
2863 fprintf (f, "\t.cfi_offset %lu, "HOST_WIDE_INT_PRINT_DEC"\n",
2864 r, cfi->dw_cfi_oprnd2.dw_cfi_offset);
2865 break;
2866
2867 case DW_CFA_restore:
2868 case DW_CFA_restore_extended:
2869 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
2870 fprintf (f, "\t.cfi_restore %lu\n", r);
2871 break;
2872
2873 case DW_CFA_undefined:
2874 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
2875 fprintf (f, "\t.cfi_undefined %lu\n", r);
2876 break;
2877
2878 case DW_CFA_same_value:
2879 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
2880 fprintf (f, "\t.cfi_same_value %lu\n", r);
2881 break;
2882
2883 case DW_CFA_def_cfa:
2884 case DW_CFA_def_cfa_sf:
2885 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
2886 fprintf (f, "\t.cfi_def_cfa %lu, "HOST_WIDE_INT_PRINT_DEC"\n",
2887 r, cfi->dw_cfi_oprnd2.dw_cfi_offset);
2888 break;
2889
2890 case DW_CFA_def_cfa_register:
2891 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
2892 fprintf (f, "\t.cfi_def_cfa_register %lu\n", r);
2893 break;
2894
2895 case DW_CFA_register:
2896 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
2897 r2 = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd2.dw_cfi_reg_num, 1);
2898 fprintf (f, "\t.cfi_register %lu, %lu\n", r, r2);
2899 break;
2900
2901 case DW_CFA_def_cfa_offset:
2902 case DW_CFA_def_cfa_offset_sf:
2903 fprintf (f, "\t.cfi_def_cfa_offset "
2904 HOST_WIDE_INT_PRINT_DEC"\n",
2905 cfi->dw_cfi_oprnd1.dw_cfi_offset);
2906 break;
2907
2908 case DW_CFA_remember_state:
2909 fprintf (f, "\t.cfi_remember_state\n");
2910 break;
2911 case DW_CFA_restore_state:
2912 fprintf (f, "\t.cfi_restore_state\n");
2913 break;
2914
2915 case DW_CFA_GNU_args_size:
2916 if (f == asm_out_file)
2917 {
2918 fprintf (f, "\t.cfi_escape %#x,", DW_CFA_GNU_args_size);
2919 dw2_asm_output_data_uleb128_raw (cfi->dw_cfi_oprnd1.dw_cfi_offset);
2920 if (flag_debug_asm)
2921 fprintf (f, "\t%s args_size "HOST_WIDE_INT_PRINT_DEC,
2922 ASM_COMMENT_START, cfi->dw_cfi_oprnd1.dw_cfi_offset);
2923 fputc ('\n', f);
2924 }
2925 else
2926 {
2927 fprintf (f, "\t.cfi_GNU_args_size "HOST_WIDE_INT_PRINT_DEC "\n",
2928 cfi->dw_cfi_oprnd1.dw_cfi_offset);
2929 }
2930 break;
2931
2932 case DW_CFA_GNU_window_save:
2933 fprintf (f, "\t.cfi_window_save\n");
2934 break;
2935
2936 case DW_CFA_def_cfa_expression:
2937 if (f != asm_out_file)
2938 {
2939 fprintf (f, "\t.cfi_def_cfa_expression ...\n");
2940 break;
2941 }
2942 /* FALLTHRU */
2943 case DW_CFA_expression:
2944 if (f != asm_out_file)
2945 {
2946 fprintf (f, "\t.cfi_cfa_expression ...\n");
2947 break;
2948 }
2949 fprintf (f, "\t.cfi_escape %#x,", cfi->dw_cfi_opc);
2950 output_cfa_loc_raw (cfi);
2951 fputc ('\n', f);
2952 break;
2953
2954 default:
2955 gcc_unreachable ();
2956 }
2957 }
2958
2959 void
2960 dwarf2out_emit_cfi (dw_cfi_ref cfi)
2961 {
2962 if (dwarf2out_do_cfi_asm ())
2963 output_cfi_directive (asm_out_file, cfi);
2964 }
2965
2966 /* Output CFIs from VEC, up to index UPTO, to bring current FDE to the
2967 same state as after executing CFIs in CFI chain. DO_CFI_ASM is
2968 true if .cfi_* directives shall be emitted, false otherwise. If it
2969 is false, FDE and FOR_EH are the other arguments to pass to
2970 output_cfi. */
2971
2972 void
2973 output_cfis (cfi_vec vec, int upto, bool do_cfi_asm,
2974 dw_fde_ref fde, bool for_eh)
2975 {
2976 int ix;
2977 struct dw_cfi_struct cfi_buf;
2978 dw_cfi_ref cfi2;
2979 dw_cfi_ref cfi_args_size = NULL, cfi_cfa = NULL, cfi_cfa_offset = NULL;
2980 VEC(dw_cfi_ref, heap) *regs = VEC_alloc (dw_cfi_ref, heap, 32);
2981 unsigned int len, idx;
2982
2983 for (ix = 0; ix < upto + 1; ix++)
2984 {
2985 dw_cfi_ref cfi = ix < upto ? VEC_index (dw_cfi_ref, vec, ix) : NULL;
2986 switch (cfi ? cfi->dw_cfi_opc : DW_CFA_nop)
2987 {
2988 case DW_CFA_advance_loc:
2989 case DW_CFA_advance_loc1:
2990 case DW_CFA_advance_loc2:
2991 case DW_CFA_advance_loc4:
2992 case DW_CFA_MIPS_advance_loc8:
2993 case DW_CFA_set_loc:
2994 /* All advances should be ignored. */
2995 break;
2996 case DW_CFA_remember_state:
2997 {
2998 dw_cfi_ref args_size = cfi_args_size;
2999
3000 /* Skip everything between .cfi_remember_state and
3001 .cfi_restore_state. */
3002 ix++;
3003 if (ix == upto)
3004 goto flush_all;
3005
3006 for (; ix < upto; ix++)
3007 {
3008 cfi2 = VEC_index (dw_cfi_ref, vec, ix);
3009 if (cfi2->dw_cfi_opc == DW_CFA_restore_state)
3010 break;
3011 else if (cfi2->dw_cfi_opc == DW_CFA_GNU_args_size)
3012 args_size = cfi2;
3013 else
3014 gcc_assert (cfi2->dw_cfi_opc != DW_CFA_remember_state);
3015 }
3016
3017 cfi_args_size = args_size;
3018 break;
3019 }
3020 case DW_CFA_GNU_args_size:
3021 cfi_args_size = cfi;
3022 break;
3023 case DW_CFA_GNU_window_save:
3024 goto flush_all;
3025 case DW_CFA_offset:
3026 case DW_CFA_offset_extended:
3027 case DW_CFA_offset_extended_sf:
3028 case DW_CFA_restore:
3029 case DW_CFA_restore_extended:
3030 case DW_CFA_undefined:
3031 case DW_CFA_same_value:
3032 case DW_CFA_register:
3033 case DW_CFA_val_offset:
3034 case DW_CFA_val_offset_sf:
3035 case DW_CFA_expression:
3036 case DW_CFA_val_expression:
3037 case DW_CFA_GNU_negative_offset_extended:
3038 if (VEC_length (dw_cfi_ref, regs)
3039 <= cfi->dw_cfi_oprnd1.dw_cfi_reg_num)
3040 VEC_safe_grow_cleared (dw_cfi_ref, heap, regs,
3041 cfi->dw_cfi_oprnd1.dw_cfi_reg_num + 1);
3042 VEC_replace (dw_cfi_ref, regs, cfi->dw_cfi_oprnd1.dw_cfi_reg_num,
3043 cfi);
3044 break;
3045 case DW_CFA_def_cfa:
3046 case DW_CFA_def_cfa_sf:
3047 case DW_CFA_def_cfa_expression:
3048 cfi_cfa = cfi;
3049 cfi_cfa_offset = cfi;
3050 break;
3051 case DW_CFA_def_cfa_register:
3052 cfi_cfa = cfi;
3053 break;
3054 case DW_CFA_def_cfa_offset:
3055 case DW_CFA_def_cfa_offset_sf:
3056 cfi_cfa_offset = cfi;
3057 break;
3058 case DW_CFA_nop:
3059 gcc_assert (cfi == NULL);
3060 flush_all:
3061 len = VEC_length (dw_cfi_ref, regs);
3062 for (idx = 0; idx < len; idx++)
3063 {
3064 cfi2 = VEC_replace (dw_cfi_ref, regs, idx, NULL);
3065 if (cfi2 != NULL
3066 && cfi2->dw_cfi_opc != DW_CFA_restore
3067 && cfi2->dw_cfi_opc != DW_CFA_restore_extended)
3068 {
3069 if (do_cfi_asm)
3070 output_cfi_directive (asm_out_file, cfi2);
3071 else
3072 output_cfi (cfi2, fde, for_eh);
3073 }
3074 }
3075 if (cfi_cfa && cfi_cfa_offset && cfi_cfa_offset != cfi_cfa)
3076 {
3077 gcc_assert (cfi_cfa->dw_cfi_opc != DW_CFA_def_cfa_expression);
3078 cfi_buf = *cfi_cfa;
3079 switch (cfi_cfa_offset->dw_cfi_opc)
3080 {
3081 case DW_CFA_def_cfa_offset:
3082 cfi_buf.dw_cfi_opc = DW_CFA_def_cfa;
3083 cfi_buf.dw_cfi_oprnd2 = cfi_cfa_offset->dw_cfi_oprnd1;
3084 break;
3085 case DW_CFA_def_cfa_offset_sf:
3086 cfi_buf.dw_cfi_opc = DW_CFA_def_cfa_sf;
3087 cfi_buf.dw_cfi_oprnd2 = cfi_cfa_offset->dw_cfi_oprnd1;
3088 break;
3089 case DW_CFA_def_cfa:
3090 case DW_CFA_def_cfa_sf:
3091 cfi_buf.dw_cfi_opc = cfi_cfa_offset->dw_cfi_opc;
3092 cfi_buf.dw_cfi_oprnd2 = cfi_cfa_offset->dw_cfi_oprnd2;
3093 break;
3094 default:
3095 gcc_unreachable ();
3096 }
3097 cfi_cfa = &cfi_buf;
3098 }
3099 else if (cfi_cfa_offset)
3100 cfi_cfa = cfi_cfa_offset;
3101 if (cfi_cfa)
3102 {
3103 if (do_cfi_asm)
3104 output_cfi_directive (asm_out_file, cfi_cfa);
3105 else
3106 output_cfi (cfi_cfa, fde, for_eh);
3107 }
3108 cfi_cfa = NULL;
3109 cfi_cfa_offset = NULL;
3110 if (cfi_args_size
3111 && cfi_args_size->dw_cfi_oprnd1.dw_cfi_offset)
3112 {
3113 if (do_cfi_asm)
3114 output_cfi_directive (asm_out_file, cfi_args_size);
3115 else
3116 output_cfi (cfi_args_size, fde, for_eh);
3117 }
3118 cfi_args_size = NULL;
3119 if (cfi == NULL)
3120 {
3121 VEC_free (dw_cfi_ref, heap, regs);
3122 return;
3123 }
3124 else if (do_cfi_asm)
3125 output_cfi_directive (asm_out_file, cfi);
3126 else
3127 output_cfi (cfi, fde, for_eh);
3128 break;
3129 default:
3130 gcc_unreachable ();
3131 }
3132 }
3133 }
3134 \f
3135
3136 /* Save the result of dwarf2out_do_frame across PCH.
3137 This variable is tri-state, with 0 unset, >0 true, <0 false. */
3138 static GTY(()) signed char saved_do_cfi_asm = 0;
3139
3140 /* Decide whether we want to emit frame unwind information for the current
3141 translation unit. */
3142
3143 bool
3144 dwarf2out_do_frame (void)
3145 {
3146 /* We want to emit correct CFA location expressions or lists, so we
3147 have to return true if we're going to output debug info, even if
3148 we're not going to output frame or unwind info. */
3149 if (write_symbols == DWARF2_DEBUG || write_symbols == VMS_AND_DWARF2_DEBUG)
3150 return true;
3151
3152 if (saved_do_cfi_asm > 0)
3153 return true;
3154
3155 if (targetm.debug_unwind_info () == UI_DWARF2)
3156 return true;
3157
3158 if ((flag_unwind_tables || flag_exceptions)
3159 && targetm_common.except_unwind_info (&global_options) == UI_DWARF2)
3160 return true;
3161
3162 return false;
3163 }
3164
3165 /* Decide whether to emit frame unwind via assembler directives. */
3166
3167 bool
3168 dwarf2out_do_cfi_asm (void)
3169 {
3170 int enc;
3171
3172 #ifdef MIPS_DEBUGGING_INFO
3173 return false;
3174 #endif
3175
3176 if (saved_do_cfi_asm != 0)
3177 return saved_do_cfi_asm > 0;
3178
3179 /* Assume failure for a moment. */
3180 saved_do_cfi_asm = -1;
3181
3182 if (!flag_dwarf2_cfi_asm || !dwarf2out_do_frame ())
3183 return false;
3184 if (!HAVE_GAS_CFI_PERSONALITY_DIRECTIVE)
3185 return false;
3186
3187 /* Make sure the personality encoding is one the assembler can support.
3188 In particular, aligned addresses can't be handled. */
3189 enc = ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/2,/*global=*/1);
3190 if ((enc & 0x70) != 0 && (enc & 0x70) != DW_EH_PE_pcrel)
3191 return false;
3192 enc = ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/0,/*global=*/0);
3193 if ((enc & 0x70) != 0 && (enc & 0x70) != DW_EH_PE_pcrel)
3194 return false;
3195
3196 /* If we can't get the assembler to emit only .debug_frame, and we don't need
3197 dwarf2 unwind info for exceptions, then emit .debug_frame by hand. */
3198 if (!HAVE_GAS_CFI_SECTIONS_DIRECTIVE
3199 && !flag_unwind_tables && !flag_exceptions
3200 && targetm_common.except_unwind_info (&global_options) != UI_DWARF2)
3201 return false;
3202
3203 /* Success! */
3204 saved_do_cfi_asm = 1;
3205 return true;
3206 }
3207
3208 static bool
3209 gate_dwarf2_frame (void)
3210 {
3211 #ifndef HAVE_prologue
3212 /* Targets which still implement the prologue in assembler text
3213 cannot use the generic dwarf2 unwinding. */
3214 return false;
3215 #endif
3216
3217 /* ??? What to do for UI_TARGET unwinding? They might be able to benefit
3218 from the optimized shrink-wrapping annotations that we will compute.
3219 For now, only produce the CFI notes for dwarf2. */
3220 return dwarf2out_do_frame ();
3221 }
3222
3223 struct rtl_opt_pass pass_dwarf2_frame =
3224 {
3225 {
3226 RTL_PASS,
3227 "dwarf2", /* name */
3228 gate_dwarf2_frame, /* gate */
3229 execute_dwarf2_frame, /* execute */
3230 NULL, /* sub */
3231 NULL, /* next */
3232 0, /* static_pass_number */
3233 TV_FINAL, /* tv_id */
3234 0, /* properties_required */
3235 0, /* properties_provided */
3236 0, /* properties_destroyed */
3237 0, /* todo_flags_start */
3238 0 /* todo_flags_finish */
3239 }
3240 };
3241
3242 #include "gt-dwarf2cfi.h"