dwarf2cfi: Populate CUR_ROW->REG_SAVE.
[gcc.git] / gcc / dwarf2cfi.c
1 /* Dwarf2 Call Frame Information helper routines.
2 Copyright (C) 1992, 1993, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
3 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011
4 Free Software Foundation, Inc.
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "tm.h"
26 #include "version.h"
27 #include "flags.h"
28 #include "rtl.h"
29 #include "function.h"
30 #include "dwarf2.h"
31 #include "dwarf2out.h"
32 #include "dwarf2asm.h"
33 #include "ggc.h"
34 #include "tm_p.h"
35 #include "target.h"
36 #include "common/common-target.h"
37 #include "tree-pass.h"
38
39 #include "except.h" /* expand_builtin_dwarf_sp_column */
40 #include "expr.h" /* init_return_column_size */
41 #include "regs.h" /* expand_builtin_init_dwarf_reg_sizes */
42 #include "output.h" /* asm_out_file */
43 #include "debug.h" /* dwarf2out_do_frame, dwarf2out_do_cfi_asm */
44
45
46 /* ??? Poison these here until it can be done generically. They've been
47 totally replaced in this file; make sure it stays that way. */
48 #undef DWARF2_UNWIND_INFO
49 #undef DWARF2_FRAME_INFO
50 #if (GCC_VERSION >= 3000)
51 #pragma GCC poison DWARF2_UNWIND_INFO DWARF2_FRAME_INFO
52 #endif
53
54 #ifndef INCOMING_RETURN_ADDR_RTX
55 #define INCOMING_RETURN_ADDR_RTX (gcc_unreachable (), NULL_RTX)
56 #endif
57
58 /* Maximum size (in bytes) of an artificially generated label. */
59 #define MAX_ARTIFICIAL_LABEL_BYTES 30
60 \f
61 /* A collected description of an entire row of the abstract CFI table. */
62 typedef struct GTY(()) dw_cfi_row_struct
63 {
64 /* The expression that computes the CFA, expressed in two different ways.
65 The CFA member for the simple cases, and the full CFI expression for
66 the complex cases. The later will be a DW_CFA_cfa_expression. */
67 dw_cfa_location cfa;
68 dw_cfi_ref cfa_cfi;
69
70 /* The expressions for any register column that is saved. */
71 cfi_vec reg_save;
72
73 /* The value of any DW_CFA_GNU_args_size. */
74 HOST_WIDE_INT args_size;
75 } dw_cfi_row;
76
77 typedef dw_cfi_row *dw_cfi_row_ref;
78 \f
79 /* A vector of call frame insns for the CIE. */
80 cfi_vec cie_cfi_vec;
81
82 /* The state of the first row of the FDE table, which includes the
83 state provided by the CIE. */
84 static GTY(()) dw_cfi_row_ref cie_cfi_row;
85
86 static GTY(()) unsigned long dwarf2out_cfi_label_num;
87
88 /* The insn after which a new CFI note should be emitted. */
89 static rtx add_cfi_insn;
90
91 /* When non-null, add_cfi will add the CFI to this vector. */
92 static cfi_vec *add_cfi_vec;
93
94 /* True if remember_state should be emitted before following CFI directive. */
95 static bool emit_cfa_remember;
96
97 /* True if any CFI directives were emitted at the current insn. */
98 static bool any_cfis_emitted;
99
100 /* Short-hand for commonly used register numbers. */
101 static unsigned dw_stack_pointer_regnum;
102 static unsigned dw_frame_pointer_regnum;
103 \f
104
105 static void dwarf2out_cfi_begin_epilogue (rtx insn);
106 static void dwarf2out_frame_debug_restore_state (void);
107
108 \f
109 /* Hook used by __throw. */
110
111 rtx
112 expand_builtin_dwarf_sp_column (void)
113 {
114 unsigned int dwarf_regnum = DWARF_FRAME_REGNUM (STACK_POINTER_REGNUM);
115 return GEN_INT (DWARF2_FRAME_REG_OUT (dwarf_regnum, 1));
116 }
117
118 /* MEM is a memory reference for the register size table, each element of
119 which has mode MODE. Initialize column C as a return address column. */
120
121 static void
122 init_return_column_size (enum machine_mode mode, rtx mem, unsigned int c)
123 {
124 HOST_WIDE_INT offset = c * GET_MODE_SIZE (mode);
125 HOST_WIDE_INT size = GET_MODE_SIZE (Pmode);
126 emit_move_insn (adjust_address (mem, mode, offset), GEN_INT (size));
127 }
128
129 /* Generate code to initialize the register size table. */
130
131 void
132 expand_builtin_init_dwarf_reg_sizes (tree address)
133 {
134 unsigned int i;
135 enum machine_mode mode = TYPE_MODE (char_type_node);
136 rtx addr = expand_normal (address);
137 rtx mem = gen_rtx_MEM (BLKmode, addr);
138 bool wrote_return_column = false;
139
140 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
141 {
142 unsigned int dnum = DWARF_FRAME_REGNUM (i);
143 unsigned int rnum = DWARF2_FRAME_REG_OUT (dnum, 1);
144
145 if (rnum < DWARF_FRAME_REGISTERS)
146 {
147 HOST_WIDE_INT offset = rnum * GET_MODE_SIZE (mode);
148 enum machine_mode save_mode = reg_raw_mode[i];
149 HOST_WIDE_INT size;
150
151 if (HARD_REGNO_CALL_PART_CLOBBERED (i, save_mode))
152 save_mode = choose_hard_reg_mode (i, 1, true);
153 if (dnum == DWARF_FRAME_RETURN_COLUMN)
154 {
155 if (save_mode == VOIDmode)
156 continue;
157 wrote_return_column = true;
158 }
159 size = GET_MODE_SIZE (save_mode);
160 if (offset < 0)
161 continue;
162
163 emit_move_insn (adjust_address (mem, mode, offset),
164 gen_int_mode (size, mode));
165 }
166 }
167
168 if (!wrote_return_column)
169 init_return_column_size (mode, mem, DWARF_FRAME_RETURN_COLUMN);
170
171 #ifdef DWARF_ALT_FRAME_RETURN_COLUMN
172 init_return_column_size (mode, mem, DWARF_ALT_FRAME_RETURN_COLUMN);
173 #endif
174
175 targetm.init_dwarf_reg_sizes_extra (address);
176 }
177
178 /* Divide OFF by DWARF_CIE_DATA_ALIGNMENT, asserting no remainder. */
179
180 static inline HOST_WIDE_INT
181 div_data_align (HOST_WIDE_INT off)
182 {
183 HOST_WIDE_INT r = off / DWARF_CIE_DATA_ALIGNMENT;
184 gcc_assert (r * DWARF_CIE_DATA_ALIGNMENT == off);
185 return r;
186 }
187
188 /* Return true if we need a signed version of a given opcode
189 (e.g. DW_CFA_offset_extended_sf vs DW_CFA_offset_extended). */
190
191 static inline bool
192 need_data_align_sf_opcode (HOST_WIDE_INT off)
193 {
194 return DWARF_CIE_DATA_ALIGNMENT < 0 ? off > 0 : off < 0;
195 }
196
197 /* Return a pointer to a newly allocated Call Frame Instruction. */
198
199 static inline dw_cfi_ref
200 new_cfi (void)
201 {
202 dw_cfi_ref cfi = ggc_alloc_dw_cfi_node ();
203
204 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = 0;
205 cfi->dw_cfi_oprnd2.dw_cfi_reg_num = 0;
206
207 return cfi;
208 }
209
210 /* Return a newly allocated CFI row, with no defined data. */
211
212 static dw_cfi_row_ref
213 new_cfi_row (void)
214 {
215 dw_cfi_row_ref row = ggc_alloc_cleared_dw_cfi_row ();
216
217 row->cfa.reg = INVALID_REGNUM;
218
219 return row;
220 }
221
222 /* Return a copy of an existing CFI row. */
223
224 static dw_cfi_row_ref
225 copy_cfi_row (dw_cfi_row_ref src)
226 {
227 dw_cfi_row_ref dst = ggc_alloc_dw_cfi_row ();
228
229 *dst = *src;
230 dst->reg_save = VEC_copy (dw_cfi_ref, gc, src->reg_save);
231
232 return dst;
233 }
234
235 /* Free an allocated CFI row. */
236
237 static void
238 free_cfi_row (dw_cfi_row_ref row)
239 {
240 if (row != NULL)
241 {
242 VEC_free (dw_cfi_ref, gc, row->reg_save);
243 ggc_free (row);
244 }
245 }
246
247 /* Generate a new label for the CFI info to refer to. */
248
249 static char *
250 dwarf2out_cfi_label (void)
251 {
252 int num = dwarf2out_cfi_label_num++;
253 char label[20];
254
255 ASM_GENERATE_INTERNAL_LABEL (label, "LCFI", num);
256
257 return xstrdup (label);
258 }
259
260 /* Add CFI either to the current insn stream or to a vector, or both. */
261
262 static void
263 add_cfi (dw_cfi_ref cfi)
264 {
265 if (emit_cfa_remember)
266 {
267 dw_cfi_ref cfi_remember;
268
269 /* Emit the state save. */
270 emit_cfa_remember = false;
271 cfi_remember = new_cfi ();
272 cfi_remember->dw_cfi_opc = DW_CFA_remember_state;
273 add_cfi (cfi_remember);
274 }
275
276 any_cfis_emitted = true;
277
278 if (add_cfi_insn != NULL)
279 {
280 add_cfi_insn = emit_note_after (NOTE_INSN_CFI, add_cfi_insn);
281 NOTE_CFI (add_cfi_insn) = cfi;
282 }
283
284 if (add_cfi_vec != NULL)
285 VEC_safe_push (dw_cfi_ref, gc, *add_cfi_vec, cfi);
286 }
287
288 /* Perform ROW->REG_SAVE[COLUMN] = CFI. CFI may be null, indicating
289 that the register column is no longer saved. */
290
291 static void
292 update_row_reg_save (dw_cfi_row_ref row, unsigned column, dw_cfi_ref cfi)
293 {
294 if (VEC_length (dw_cfi_ref, row->reg_save) <= column)
295 VEC_safe_grow_cleared (dw_cfi_ref, gc, row->reg_save, column + 1);
296 VEC_replace (dw_cfi_ref, row->reg_save, column, cfi);
297 }
298
299 /* This function fills in aa dw_cfa_location structure from a dwarf location
300 descriptor sequence. */
301
302 static void
303 get_cfa_from_loc_descr (dw_cfa_location *cfa, struct dw_loc_descr_struct *loc)
304 {
305 struct dw_loc_descr_struct *ptr;
306 cfa->offset = 0;
307 cfa->base_offset = 0;
308 cfa->indirect = 0;
309 cfa->reg = -1;
310
311 for (ptr = loc; ptr != NULL; ptr = ptr->dw_loc_next)
312 {
313 enum dwarf_location_atom op = ptr->dw_loc_opc;
314
315 switch (op)
316 {
317 case DW_OP_reg0:
318 case DW_OP_reg1:
319 case DW_OP_reg2:
320 case DW_OP_reg3:
321 case DW_OP_reg4:
322 case DW_OP_reg5:
323 case DW_OP_reg6:
324 case DW_OP_reg7:
325 case DW_OP_reg8:
326 case DW_OP_reg9:
327 case DW_OP_reg10:
328 case DW_OP_reg11:
329 case DW_OP_reg12:
330 case DW_OP_reg13:
331 case DW_OP_reg14:
332 case DW_OP_reg15:
333 case DW_OP_reg16:
334 case DW_OP_reg17:
335 case DW_OP_reg18:
336 case DW_OP_reg19:
337 case DW_OP_reg20:
338 case DW_OP_reg21:
339 case DW_OP_reg22:
340 case DW_OP_reg23:
341 case DW_OP_reg24:
342 case DW_OP_reg25:
343 case DW_OP_reg26:
344 case DW_OP_reg27:
345 case DW_OP_reg28:
346 case DW_OP_reg29:
347 case DW_OP_reg30:
348 case DW_OP_reg31:
349 cfa->reg = op - DW_OP_reg0;
350 break;
351 case DW_OP_regx:
352 cfa->reg = ptr->dw_loc_oprnd1.v.val_int;
353 break;
354 case DW_OP_breg0:
355 case DW_OP_breg1:
356 case DW_OP_breg2:
357 case DW_OP_breg3:
358 case DW_OP_breg4:
359 case DW_OP_breg5:
360 case DW_OP_breg6:
361 case DW_OP_breg7:
362 case DW_OP_breg8:
363 case DW_OP_breg9:
364 case DW_OP_breg10:
365 case DW_OP_breg11:
366 case DW_OP_breg12:
367 case DW_OP_breg13:
368 case DW_OP_breg14:
369 case DW_OP_breg15:
370 case DW_OP_breg16:
371 case DW_OP_breg17:
372 case DW_OP_breg18:
373 case DW_OP_breg19:
374 case DW_OP_breg20:
375 case DW_OP_breg21:
376 case DW_OP_breg22:
377 case DW_OP_breg23:
378 case DW_OP_breg24:
379 case DW_OP_breg25:
380 case DW_OP_breg26:
381 case DW_OP_breg27:
382 case DW_OP_breg28:
383 case DW_OP_breg29:
384 case DW_OP_breg30:
385 case DW_OP_breg31:
386 cfa->reg = op - DW_OP_breg0;
387 cfa->base_offset = ptr->dw_loc_oprnd1.v.val_int;
388 break;
389 case DW_OP_bregx:
390 cfa->reg = ptr->dw_loc_oprnd1.v.val_int;
391 cfa->base_offset = ptr->dw_loc_oprnd2.v.val_int;
392 break;
393 case DW_OP_deref:
394 cfa->indirect = 1;
395 break;
396 case DW_OP_plus_uconst:
397 cfa->offset = ptr->dw_loc_oprnd1.v.val_unsigned;
398 break;
399 default:
400 gcc_unreachable ();
401 }
402 }
403 }
404
405 /* Find the previous value for the CFA, iteratively. CFI is the opcode
406 to interpret, *LOC will be updated as necessary, *REMEMBER is used for
407 one level of remember/restore state processing. */
408
409 void
410 lookup_cfa_1 (dw_cfi_ref cfi, dw_cfa_location *loc, dw_cfa_location *remember)
411 {
412 switch (cfi->dw_cfi_opc)
413 {
414 case DW_CFA_def_cfa_offset:
415 case DW_CFA_def_cfa_offset_sf:
416 loc->offset = cfi->dw_cfi_oprnd1.dw_cfi_offset;
417 break;
418 case DW_CFA_def_cfa_register:
419 loc->reg = cfi->dw_cfi_oprnd1.dw_cfi_reg_num;
420 break;
421 case DW_CFA_def_cfa:
422 case DW_CFA_def_cfa_sf:
423 loc->reg = cfi->dw_cfi_oprnd1.dw_cfi_reg_num;
424 loc->offset = cfi->dw_cfi_oprnd2.dw_cfi_offset;
425 break;
426 case DW_CFA_def_cfa_expression:
427 get_cfa_from_loc_descr (loc, cfi->dw_cfi_oprnd1.dw_cfi_loc);
428 break;
429
430 case DW_CFA_remember_state:
431 gcc_assert (!remember->in_use);
432 *remember = *loc;
433 remember->in_use = 1;
434 break;
435 case DW_CFA_restore_state:
436 gcc_assert (remember->in_use);
437 *loc = *remember;
438 remember->in_use = 0;
439 break;
440
441 default:
442 break;
443 }
444 }
445
446 /* The current, i.e. most recently generated, row of the CFI table. */
447 static dw_cfi_row_ref cur_row;
448
449 /* The row state from a preceeding DW_CFA_remember_state. */
450 static dw_cfi_row_ref remember_row;
451
452 /* The register used for saving registers to the stack, and its offset
453 from the CFA. */
454 static dw_cfa_location cfa_store;
455
456 /* A temporary register holding an integral value used in adjusting SP
457 or setting up the store_reg. The "offset" field holds the integer
458 value, not an offset. */
459 static dw_cfa_location cfa_temp;
460
461 /* The (really) current value for DW_CFA_GNU_args_size. We delay actually
462 emitting this data, i.e. updating CUR_ROW, without async unwind. */
463 static HOST_WIDE_INT args_size;
464
465 /* Determine if two dw_cfa_location structures define the same data. */
466
467 bool
468 cfa_equal_p (const dw_cfa_location *loc1, const dw_cfa_location *loc2)
469 {
470 return (loc1->reg == loc2->reg
471 && loc1->offset == loc2->offset
472 && loc1->indirect == loc2->indirect
473 && (loc1->indirect == 0
474 || loc1->base_offset == loc2->base_offset));
475 }
476
477 /* This routine does the actual work. The CFA is now calculated from
478 the dw_cfa_location structure. */
479
480 static void
481 def_cfa_1 (dw_cfa_location *loc_p)
482 {
483 dw_cfi_ref cfi;
484 dw_cfa_location loc = *loc_p;
485
486 if (cfa_store.reg == loc.reg && loc.indirect == 0)
487 cfa_store.offset = loc.offset;
488
489 /* If nothing changed, no need to issue any call frame instructions. */
490 if (cfa_equal_p (&loc, &cur_row->cfa))
491 return;
492
493 cfi = new_cfi ();
494
495 if (loc.reg == cur_row->cfa.reg && !loc.indirect && !cur_row->cfa.indirect)
496 {
497 /* Construct a "DW_CFA_def_cfa_offset <offset>" instruction, indicating
498 the CFA register did not change but the offset did. The data
499 factoring for DW_CFA_def_cfa_offset_sf happens in output_cfi, or
500 in the assembler via the .cfi_def_cfa_offset directive. */
501 if (loc.offset < 0)
502 cfi->dw_cfi_opc = DW_CFA_def_cfa_offset_sf;
503 else
504 cfi->dw_cfi_opc = DW_CFA_def_cfa_offset;
505 cfi->dw_cfi_oprnd1.dw_cfi_offset = loc.offset;
506 }
507
508 #ifndef MIPS_DEBUGGING_INFO /* SGI dbx thinks this means no offset. */
509 else if (loc.offset == cur_row->cfa.offset
510 && cur_row->cfa.reg != INVALID_REGNUM
511 && !loc.indirect
512 && !cur_row->cfa.indirect)
513 {
514 /* Construct a "DW_CFA_def_cfa_register <register>" instruction,
515 indicating the CFA register has changed to <register> but the
516 offset has not changed. */
517 cfi->dw_cfi_opc = DW_CFA_def_cfa_register;
518 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = loc.reg;
519 }
520 #endif
521
522 else if (loc.indirect == 0)
523 {
524 /* Construct a "DW_CFA_def_cfa <register> <offset>" instruction,
525 indicating the CFA register has changed to <register> with
526 the specified offset. The data factoring for DW_CFA_def_cfa_sf
527 happens in output_cfi, or in the assembler via the .cfi_def_cfa
528 directive. */
529 if (loc.offset < 0)
530 cfi->dw_cfi_opc = DW_CFA_def_cfa_sf;
531 else
532 cfi->dw_cfi_opc = DW_CFA_def_cfa;
533 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = loc.reg;
534 cfi->dw_cfi_oprnd2.dw_cfi_offset = loc.offset;
535 }
536 else
537 {
538 /* Construct a DW_CFA_def_cfa_expression instruction to
539 calculate the CFA using a full location expression since no
540 register-offset pair is available. */
541 struct dw_loc_descr_struct *loc_list;
542
543 cfi->dw_cfi_opc = DW_CFA_def_cfa_expression;
544 loc_list = build_cfa_loc (&loc, 0);
545 cfi->dw_cfi_oprnd1.dw_cfi_loc = loc_list;
546
547 cur_row->cfa_cfi = cfi;
548 }
549
550 add_cfi (cfi);
551 cur_row->cfa = loc;
552 }
553
554 /* Add the CFI for saving a register. REG is the CFA column number.
555 If SREG is -1, the register is saved at OFFSET from the CFA;
556 otherwise it is saved in SREG. */
557
558 static void
559 reg_save (unsigned int reg, unsigned int sreg, HOST_WIDE_INT offset)
560 {
561 dw_fde_ref fde = cfun ? cfun->fde : NULL;
562 dw_cfi_ref cfi = new_cfi ();
563
564 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = reg;
565
566 /* When stack is aligned, store REG using DW_CFA_expression with FP. */
567 if (fde
568 && fde->stack_realign
569 && sreg == INVALID_REGNUM)
570 {
571 cfi->dw_cfi_opc = DW_CFA_expression;
572 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = reg;
573 cfi->dw_cfi_oprnd2.dw_cfi_loc
574 = build_cfa_aligned_loc (&cur_row->cfa, offset,
575 fde->stack_realignment);
576 }
577 else if (sreg == INVALID_REGNUM)
578 {
579 if (need_data_align_sf_opcode (offset))
580 cfi->dw_cfi_opc = DW_CFA_offset_extended_sf;
581 else if (reg & ~0x3f)
582 cfi->dw_cfi_opc = DW_CFA_offset_extended;
583 else
584 cfi->dw_cfi_opc = DW_CFA_offset;
585 cfi->dw_cfi_oprnd2.dw_cfi_offset = offset;
586 }
587 else if (sreg == reg)
588 {
589 /* While we could emit something like DW_CFA_same_value or
590 DW_CFA_restore, we never expect to see something like that
591 in a prologue. This is more likely to be a bug. A backend
592 can always bypass this by using REG_CFA_RESTORE directly. */
593 gcc_unreachable ();
594 }
595 else
596 {
597 cfi->dw_cfi_opc = DW_CFA_register;
598 cfi->dw_cfi_oprnd2.dw_cfi_reg_num = sreg;
599 }
600
601 add_cfi (cfi);
602 update_row_reg_save (cur_row, reg, cfi);
603 }
604
605 /* Given a SET, calculate the amount of stack adjustment it
606 contains. */
607
608 static HOST_WIDE_INT
609 stack_adjust_offset (const_rtx pattern, HOST_WIDE_INT cur_args_size,
610 HOST_WIDE_INT cur_offset)
611 {
612 const_rtx src = SET_SRC (pattern);
613 const_rtx dest = SET_DEST (pattern);
614 HOST_WIDE_INT offset = 0;
615 enum rtx_code code;
616
617 if (dest == stack_pointer_rtx)
618 {
619 code = GET_CODE (src);
620
621 /* Assume (set (reg sp) (reg whatever)) sets args_size
622 level to 0. */
623 if (code == REG && src != stack_pointer_rtx)
624 {
625 offset = -cur_args_size;
626 #ifndef STACK_GROWS_DOWNWARD
627 offset = -offset;
628 #endif
629 return offset - cur_offset;
630 }
631
632 if (! (code == PLUS || code == MINUS)
633 || XEXP (src, 0) != stack_pointer_rtx
634 || !CONST_INT_P (XEXP (src, 1)))
635 return 0;
636
637 /* (set (reg sp) (plus (reg sp) (const_int))) */
638 offset = INTVAL (XEXP (src, 1));
639 if (code == PLUS)
640 offset = -offset;
641 return offset;
642 }
643
644 if (MEM_P (src) && !MEM_P (dest))
645 dest = src;
646 if (MEM_P (dest))
647 {
648 /* (set (mem (pre_dec (reg sp))) (foo)) */
649 src = XEXP (dest, 0);
650 code = GET_CODE (src);
651
652 switch (code)
653 {
654 case PRE_MODIFY:
655 case POST_MODIFY:
656 if (XEXP (src, 0) == stack_pointer_rtx)
657 {
658 rtx val = XEXP (XEXP (src, 1), 1);
659 /* We handle only adjustments by constant amount. */
660 gcc_assert (GET_CODE (XEXP (src, 1)) == PLUS
661 && CONST_INT_P (val));
662 offset = -INTVAL (val);
663 break;
664 }
665 return 0;
666
667 case PRE_DEC:
668 case POST_DEC:
669 if (XEXP (src, 0) == stack_pointer_rtx)
670 {
671 offset = GET_MODE_SIZE (GET_MODE (dest));
672 break;
673 }
674 return 0;
675
676 case PRE_INC:
677 case POST_INC:
678 if (XEXP (src, 0) == stack_pointer_rtx)
679 {
680 offset = -GET_MODE_SIZE (GET_MODE (dest));
681 break;
682 }
683 return 0;
684
685 default:
686 return 0;
687 }
688 }
689 else
690 return 0;
691
692 return offset;
693 }
694
695 /* Precomputed args_size for CODE_LABELs and BARRIERs preceeding them,
696 indexed by INSN_UID. */
697
698 static HOST_WIDE_INT *barrier_args_size;
699
700 /* Helper function for compute_barrier_args_size. Handle one insn. */
701
702 static HOST_WIDE_INT
703 compute_barrier_args_size_1 (rtx insn, HOST_WIDE_INT cur_args_size,
704 VEC (rtx, heap) **next)
705 {
706 HOST_WIDE_INT offset = 0;
707 int i;
708
709 if (! RTX_FRAME_RELATED_P (insn))
710 {
711 if (prologue_epilogue_contains (insn))
712 /* Nothing */;
713 else if (GET_CODE (PATTERN (insn)) == SET)
714 offset = stack_adjust_offset (PATTERN (insn), cur_args_size, 0);
715 else if (GET_CODE (PATTERN (insn)) == PARALLEL
716 || GET_CODE (PATTERN (insn)) == SEQUENCE)
717 {
718 /* There may be stack adjustments inside compound insns. Search
719 for them. */
720 for (i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--)
721 if (GET_CODE (XVECEXP (PATTERN (insn), 0, i)) == SET)
722 offset += stack_adjust_offset (XVECEXP (PATTERN (insn), 0, i),
723 cur_args_size, offset);
724 }
725 }
726 else
727 {
728 rtx expr = find_reg_note (insn, REG_FRAME_RELATED_EXPR, NULL_RTX);
729
730 if (expr)
731 {
732 expr = XEXP (expr, 0);
733 if (GET_CODE (expr) == PARALLEL
734 || GET_CODE (expr) == SEQUENCE)
735 for (i = 1; i < XVECLEN (expr, 0); i++)
736 {
737 rtx elem = XVECEXP (expr, 0, i);
738
739 if (GET_CODE (elem) == SET && !RTX_FRAME_RELATED_P (elem))
740 offset += stack_adjust_offset (elem, cur_args_size, offset);
741 }
742 }
743 }
744
745 #ifndef STACK_GROWS_DOWNWARD
746 offset = -offset;
747 #endif
748
749 cur_args_size += offset;
750 if (cur_args_size < 0)
751 cur_args_size = 0;
752
753 if (JUMP_P (insn))
754 {
755 rtx dest = JUMP_LABEL (insn);
756
757 if (dest)
758 {
759 if (barrier_args_size [INSN_UID (dest)] < 0)
760 {
761 barrier_args_size [INSN_UID (dest)] = cur_args_size;
762 VEC_safe_push (rtx, heap, *next, dest);
763 }
764 }
765 }
766
767 return cur_args_size;
768 }
769
770 /* Walk the whole function and compute args_size on BARRIERs. */
771
772 static void
773 compute_barrier_args_size (void)
774 {
775 int max_uid = get_max_uid (), i;
776 rtx insn;
777 VEC (rtx, heap) *worklist, *next, *tmp;
778
779 barrier_args_size = XNEWVEC (HOST_WIDE_INT, max_uid);
780 for (i = 0; i < max_uid; i++)
781 barrier_args_size[i] = -1;
782
783 worklist = VEC_alloc (rtx, heap, 20);
784 next = VEC_alloc (rtx, heap, 20);
785 insn = get_insns ();
786 barrier_args_size[INSN_UID (insn)] = 0;
787 VEC_quick_push (rtx, worklist, insn);
788 for (;;)
789 {
790 while (!VEC_empty (rtx, worklist))
791 {
792 rtx prev, body, first_insn;
793 HOST_WIDE_INT cur_args_size;
794
795 first_insn = insn = VEC_pop (rtx, worklist);
796 cur_args_size = barrier_args_size[INSN_UID (insn)];
797 prev = prev_nonnote_insn (insn);
798 if (prev && BARRIER_P (prev))
799 barrier_args_size[INSN_UID (prev)] = cur_args_size;
800
801 for (; insn; insn = NEXT_INSN (insn))
802 {
803 if (INSN_DELETED_P (insn) || NOTE_P (insn))
804 continue;
805 if (BARRIER_P (insn))
806 break;
807
808 if (LABEL_P (insn))
809 {
810 if (insn == first_insn)
811 continue;
812 else if (barrier_args_size[INSN_UID (insn)] < 0)
813 {
814 barrier_args_size[INSN_UID (insn)] = cur_args_size;
815 continue;
816 }
817 else
818 {
819 /* The insns starting with this label have been
820 already scanned or are in the worklist. */
821 break;
822 }
823 }
824
825 body = PATTERN (insn);
826 if (GET_CODE (body) == SEQUENCE)
827 {
828 HOST_WIDE_INT dest_args_size = cur_args_size;
829 for (i = 1; i < XVECLEN (body, 0); i++)
830 if (INSN_ANNULLED_BRANCH_P (XVECEXP (body, 0, 0))
831 && INSN_FROM_TARGET_P (XVECEXP (body, 0, i)))
832 dest_args_size
833 = compute_barrier_args_size_1 (XVECEXP (body, 0, i),
834 dest_args_size, &next);
835 else
836 cur_args_size
837 = compute_barrier_args_size_1 (XVECEXP (body, 0, i),
838 cur_args_size, &next);
839
840 if (INSN_ANNULLED_BRANCH_P (XVECEXP (body, 0, 0)))
841 compute_barrier_args_size_1 (XVECEXP (body, 0, 0),
842 dest_args_size, &next);
843 else
844 cur_args_size
845 = compute_barrier_args_size_1 (XVECEXP (body, 0, 0),
846 cur_args_size, &next);
847 }
848 else
849 cur_args_size
850 = compute_barrier_args_size_1 (insn, cur_args_size, &next);
851 }
852 }
853
854 if (VEC_empty (rtx, next))
855 break;
856
857 /* Swap WORKLIST with NEXT and truncate NEXT for next iteration. */
858 tmp = next;
859 next = worklist;
860 worklist = tmp;
861 VEC_truncate (rtx, next, 0);
862 }
863
864 VEC_free (rtx, heap, worklist);
865 VEC_free (rtx, heap, next);
866 }
867
868 /* Add a CFI to update the running total of the size of arguments
869 pushed onto the stack. */
870
871 static void
872 dwarf2out_args_size (HOST_WIDE_INT size)
873 {
874 dw_cfi_ref cfi;
875
876 if (size == cur_row->args_size)
877 return;
878
879 cur_row->args_size = size;
880
881 cfi = new_cfi ();
882 cfi->dw_cfi_opc = DW_CFA_GNU_args_size;
883 cfi->dw_cfi_oprnd1.dw_cfi_offset = size;
884 add_cfi (cfi);
885 }
886
887 /* Record a stack adjustment of OFFSET bytes. */
888
889 static void
890 dwarf2out_stack_adjust (HOST_WIDE_INT offset)
891 {
892 dw_cfa_location loc = cur_row->cfa;
893
894 if (loc.reg == dw_stack_pointer_regnum)
895 loc.offset += offset;
896
897 if (cfa_store.reg == dw_stack_pointer_regnum)
898 cfa_store.offset += offset;
899
900 /* ??? The assumption seems to be that if A_O_A, the only CFA adjustments
901 involving the stack pointer are inside the prologue and marked as
902 RTX_FRAME_RELATED_P. That said, should we not verify this assumption
903 by *asserting* A_O_A at this point? Why else would we have a change
904 to the stack pointer? */
905 if (ACCUMULATE_OUTGOING_ARGS)
906 return;
907
908 #ifndef STACK_GROWS_DOWNWARD
909 offset = -offset;
910 #endif
911
912 args_size += offset;
913 if (args_size < 0)
914 args_size = 0;
915
916 def_cfa_1 (&loc);
917 if (flag_asynchronous_unwind_tables)
918 dwarf2out_args_size (args_size);
919 }
920
921 /* Check INSN to see if it looks like a push or a stack adjustment, and
922 make a note of it if it does. EH uses this information to find out
923 how much extra space it needs to pop off the stack. */
924
925 static void
926 dwarf2out_notice_stack_adjust (rtx insn, bool after_p)
927 {
928 HOST_WIDE_INT offset;
929 int i;
930
931 /* Don't handle epilogues at all. Certainly it would be wrong to do so
932 with this function. Proper support would require all frame-related
933 insns to be marked, and to be able to handle saving state around
934 epilogues textually in the middle of the function. */
935 if (prologue_epilogue_contains (insn))
936 return;
937
938 /* If INSN is an instruction from target of an annulled branch, the
939 effects are for the target only and so current argument size
940 shouldn't change at all. */
941 if (final_sequence
942 && INSN_ANNULLED_BRANCH_P (XVECEXP (final_sequence, 0, 0))
943 && INSN_FROM_TARGET_P (insn))
944 return;
945
946 /* If only calls can throw, and we have a frame pointer,
947 save up adjustments until we see the CALL_INSN. */
948 if (!flag_asynchronous_unwind_tables
949 && cur_row->cfa.reg != dw_stack_pointer_regnum)
950 {
951 if (CALL_P (insn) && !after_p)
952 {
953 /* Extract the size of the args from the CALL rtx itself. */
954 insn = PATTERN (insn);
955 if (GET_CODE (insn) == PARALLEL)
956 insn = XVECEXP (insn, 0, 0);
957 if (GET_CODE (insn) == SET)
958 insn = SET_SRC (insn);
959 gcc_assert (GET_CODE (insn) == CALL);
960 dwarf2out_args_size (INTVAL (XEXP (insn, 1)));
961 }
962 return;
963 }
964
965 if (CALL_P (insn) && !after_p)
966 {
967 if (!flag_asynchronous_unwind_tables)
968 dwarf2out_args_size (args_size);
969 return;
970 }
971 else if (BARRIER_P (insn))
972 {
973 /* Don't call compute_barrier_args_size () if the only
974 BARRIER is at the end of function. */
975 if (barrier_args_size == NULL && next_nonnote_insn (insn))
976 compute_barrier_args_size ();
977 if (barrier_args_size == NULL)
978 offset = 0;
979 else
980 {
981 offset = barrier_args_size[INSN_UID (insn)];
982 if (offset < 0)
983 offset = 0;
984 }
985
986 offset -= args_size;
987 #ifndef STACK_GROWS_DOWNWARD
988 offset = -offset;
989 #endif
990 }
991 else if (GET_CODE (PATTERN (insn)) == SET)
992 offset = stack_adjust_offset (PATTERN (insn), args_size, 0);
993 else if (GET_CODE (PATTERN (insn)) == PARALLEL
994 || GET_CODE (PATTERN (insn)) == SEQUENCE)
995 {
996 /* There may be stack adjustments inside compound insns. Search
997 for them. */
998 for (offset = 0, i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--)
999 if (GET_CODE (XVECEXP (PATTERN (insn), 0, i)) == SET)
1000 offset += stack_adjust_offset (XVECEXP (PATTERN (insn), 0, i),
1001 args_size, offset);
1002 }
1003 else
1004 return;
1005
1006 if (offset == 0)
1007 return;
1008
1009 dwarf2out_stack_adjust (offset);
1010 }
1011
1012 /* We delay emitting a register save until either (a) we reach the end
1013 of the prologue or (b) the register is clobbered. This clusters
1014 register saves so that there are fewer pc advances. */
1015
1016 struct GTY(()) queued_reg_save {
1017 struct queued_reg_save *next;
1018 rtx reg;
1019 HOST_WIDE_INT cfa_offset;
1020 rtx saved_reg;
1021 };
1022
1023 static GTY(()) struct queued_reg_save *queued_reg_saves;
1024
1025 /* The caller's ORIG_REG is saved in SAVED_IN_REG. */
1026 typedef struct GTY(()) reg_saved_in_data {
1027 rtx orig_reg;
1028 rtx saved_in_reg;
1029 } reg_saved_in_data;
1030
1031 DEF_VEC_O (reg_saved_in_data);
1032 DEF_VEC_ALLOC_O (reg_saved_in_data, gc);
1033
1034 /* A set of registers saved in other registers. This is implemented as
1035 a flat array because it normally contains zero or 1 entry, depending
1036 on the target. IA-64 is the big spender here, using a maximum of
1037 5 entries. */
1038 static GTY(()) VEC(reg_saved_in_data, gc) *regs_saved_in_regs;
1039
1040 static GTY(()) reg_saved_in_data *cie_return_save;
1041
1042 /* Short-hand inline for the very common D_F_R (REGNO (x)) operation. */
1043 /* ??? This ought to go into dwarf2out.h, except that dwarf2out.h is
1044 used in places where rtl is prohibited. */
1045
1046 static inline unsigned
1047 dwf_regno (const_rtx reg)
1048 {
1049 return DWARF_FRAME_REGNUM (REGNO (reg));
1050 }
1051
1052 /* Compare X and Y for equivalence. The inputs may be REGs or PC_RTX. */
1053
1054 static bool
1055 compare_reg_or_pc (rtx x, rtx y)
1056 {
1057 if (REG_P (x) && REG_P (y))
1058 return REGNO (x) == REGNO (y);
1059 return x == y;
1060 }
1061
1062 /* Record SRC as being saved in DEST. DEST may be null to delete an
1063 existing entry. SRC may be a register or PC_RTX. */
1064
1065 static void
1066 record_reg_saved_in_reg (rtx dest, rtx src)
1067 {
1068 reg_saved_in_data *elt;
1069 size_t i;
1070
1071 FOR_EACH_VEC_ELT (reg_saved_in_data, regs_saved_in_regs, i, elt)
1072 if (compare_reg_or_pc (elt->orig_reg, src))
1073 {
1074 if (dest == NULL)
1075 VEC_unordered_remove(reg_saved_in_data, regs_saved_in_regs, i);
1076 else
1077 elt->saved_in_reg = dest;
1078 return;
1079 }
1080
1081 if (dest == NULL)
1082 return;
1083
1084 elt = VEC_safe_push(reg_saved_in_data, gc, regs_saved_in_regs, NULL);
1085 elt->orig_reg = src;
1086 elt->saved_in_reg = dest;
1087 }
1088
1089 /* Add an entry to QUEUED_REG_SAVES saying that REG is now saved at
1090 SREG, or if SREG is NULL then it is saved at OFFSET to the CFA. */
1091
1092 static void
1093 queue_reg_save (rtx reg, rtx sreg, HOST_WIDE_INT offset)
1094 {
1095 struct queued_reg_save *q;
1096
1097 /* Duplicates waste space, but it's also necessary to remove them
1098 for correctness, since the queue gets output in reverse order. */
1099 for (q = queued_reg_saves; q != NULL; q = q->next)
1100 if (compare_reg_or_pc (q->reg, reg))
1101 break;
1102
1103 if (q == NULL)
1104 {
1105 q = ggc_alloc_queued_reg_save ();
1106 q->next = queued_reg_saves;
1107 queued_reg_saves = q;
1108 }
1109
1110 q->reg = reg;
1111 q->cfa_offset = offset;
1112 q->saved_reg = sreg;
1113 }
1114
1115 /* Output all the entries in QUEUED_REG_SAVES. */
1116
1117 static void
1118 dwarf2out_flush_queued_reg_saves (void)
1119 {
1120 struct queued_reg_save *q;
1121
1122 for (q = queued_reg_saves; q; q = q->next)
1123 {
1124 unsigned int reg, sreg;
1125
1126 record_reg_saved_in_reg (q->saved_reg, q->reg);
1127
1128 if (q->reg == pc_rtx)
1129 reg = DWARF_FRAME_RETURN_COLUMN;
1130 else
1131 reg = dwf_regno (q->reg);
1132 if (q->saved_reg)
1133 sreg = dwf_regno (q->saved_reg);
1134 else
1135 sreg = INVALID_REGNUM;
1136 reg_save (reg, sreg, q->cfa_offset);
1137 }
1138
1139 queued_reg_saves = NULL;
1140 }
1141
1142 /* Does INSN clobber any register which QUEUED_REG_SAVES lists a saved
1143 location for? Or, does it clobber a register which we've previously
1144 said that some other register is saved in, and for which we now
1145 have a new location for? */
1146
1147 static bool
1148 clobbers_queued_reg_save (const_rtx insn)
1149 {
1150 struct queued_reg_save *q;
1151
1152 for (q = queued_reg_saves; q; q = q->next)
1153 {
1154 size_t i;
1155 reg_saved_in_data *rir;
1156
1157 if (modified_in_p (q->reg, insn))
1158 return true;
1159
1160 FOR_EACH_VEC_ELT (reg_saved_in_data, regs_saved_in_regs, i, rir)
1161 if (compare_reg_or_pc (q->reg, rir->orig_reg)
1162 && modified_in_p (rir->saved_in_reg, insn))
1163 return true;
1164 }
1165
1166 return false;
1167 }
1168
1169 /* What register, if any, is currently saved in REG? */
1170
1171 static rtx
1172 reg_saved_in (rtx reg)
1173 {
1174 unsigned int regn = REGNO (reg);
1175 struct queued_reg_save *q;
1176 reg_saved_in_data *rir;
1177 size_t i;
1178
1179 for (q = queued_reg_saves; q; q = q->next)
1180 if (q->saved_reg && regn == REGNO (q->saved_reg))
1181 return q->reg;
1182
1183 FOR_EACH_VEC_ELT (reg_saved_in_data, regs_saved_in_regs, i, rir)
1184 if (regn == REGNO (rir->saved_in_reg))
1185 return rir->orig_reg;
1186
1187 return NULL_RTX;
1188 }
1189
1190 /* A subroutine of dwarf2out_frame_debug, process a REG_DEF_CFA note. */
1191
1192 static void
1193 dwarf2out_frame_debug_def_cfa (rtx pat)
1194 {
1195 dw_cfa_location loc;
1196
1197 memset (&loc, 0, sizeof (loc));
1198
1199 switch (GET_CODE (pat))
1200 {
1201 case PLUS:
1202 loc.reg = dwf_regno (XEXP (pat, 0));
1203 loc.offset = INTVAL (XEXP (pat, 1));
1204 break;
1205
1206 case REG:
1207 loc.reg = dwf_regno (pat);
1208 break;
1209
1210 case MEM:
1211 loc.indirect = 1;
1212 pat = XEXP (pat, 0);
1213 if (GET_CODE (pat) == PLUS)
1214 {
1215 loc.base_offset = INTVAL (XEXP (pat, 1));
1216 pat = XEXP (pat, 0);
1217 }
1218 loc.reg = dwf_regno (pat);
1219 break;
1220
1221 default:
1222 /* Recurse and define an expression. */
1223 gcc_unreachable ();
1224 }
1225
1226 def_cfa_1 (&loc);
1227 }
1228
1229 /* A subroutine of dwarf2out_frame_debug, process a REG_ADJUST_CFA note. */
1230
1231 static void
1232 dwarf2out_frame_debug_adjust_cfa (rtx pat)
1233 {
1234 dw_cfa_location loc = cur_row->cfa;
1235 rtx src, dest;
1236
1237 gcc_assert (GET_CODE (pat) == SET);
1238 dest = XEXP (pat, 0);
1239 src = XEXP (pat, 1);
1240
1241 switch (GET_CODE (src))
1242 {
1243 case PLUS:
1244 gcc_assert (dwf_regno (XEXP (src, 0)) == loc.reg);
1245 loc.offset -= INTVAL (XEXP (src, 1));
1246 break;
1247
1248 case REG:
1249 break;
1250
1251 default:
1252 gcc_unreachable ();
1253 }
1254
1255 loc.reg = dwf_regno (dest);
1256 gcc_assert (loc.indirect == 0);
1257
1258 def_cfa_1 (&loc);
1259 }
1260
1261 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_OFFSET note. */
1262
1263 static void
1264 dwarf2out_frame_debug_cfa_offset (rtx set)
1265 {
1266 HOST_WIDE_INT offset;
1267 rtx src, addr, span;
1268 unsigned int sregno;
1269
1270 src = XEXP (set, 1);
1271 addr = XEXP (set, 0);
1272 gcc_assert (MEM_P (addr));
1273 addr = XEXP (addr, 0);
1274
1275 /* As documented, only consider extremely simple addresses. */
1276 switch (GET_CODE (addr))
1277 {
1278 case REG:
1279 gcc_assert (dwf_regno (addr) == cur_row->cfa.reg);
1280 offset = -cur_row->cfa.offset;
1281 break;
1282 case PLUS:
1283 gcc_assert (dwf_regno (XEXP (addr, 0)) == cur_row->cfa.reg);
1284 offset = INTVAL (XEXP (addr, 1)) - cur_row->cfa.offset;
1285 break;
1286 default:
1287 gcc_unreachable ();
1288 }
1289
1290 if (src == pc_rtx)
1291 {
1292 span = NULL;
1293 sregno = DWARF_FRAME_RETURN_COLUMN;
1294 }
1295 else
1296 {
1297 span = targetm.dwarf_register_span (src);
1298 sregno = dwf_regno (src);
1299 }
1300
1301 /* ??? We'd like to use queue_reg_save, but we need to come up with
1302 a different flushing heuristic for epilogues. */
1303 if (!span)
1304 reg_save (sregno, INVALID_REGNUM, offset);
1305 else
1306 {
1307 /* We have a PARALLEL describing where the contents of SRC live.
1308 Queue register saves for each piece of the PARALLEL. */
1309 int par_index;
1310 int limit;
1311 HOST_WIDE_INT span_offset = offset;
1312
1313 gcc_assert (GET_CODE (span) == PARALLEL);
1314
1315 limit = XVECLEN (span, 0);
1316 for (par_index = 0; par_index < limit; par_index++)
1317 {
1318 rtx elem = XVECEXP (span, 0, par_index);
1319
1320 sregno = dwf_regno (src);
1321 reg_save (sregno, INVALID_REGNUM, span_offset);
1322 span_offset += GET_MODE_SIZE (GET_MODE (elem));
1323 }
1324 }
1325 }
1326
1327 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_REGISTER note. */
1328
1329 static void
1330 dwarf2out_frame_debug_cfa_register (rtx set)
1331 {
1332 rtx src, dest;
1333 unsigned sregno, dregno;
1334
1335 src = XEXP (set, 1);
1336 dest = XEXP (set, 0);
1337
1338 record_reg_saved_in_reg (dest, src);
1339 if (src == pc_rtx)
1340 sregno = DWARF_FRAME_RETURN_COLUMN;
1341 else
1342 sregno = dwf_regno (src);
1343
1344 dregno = dwf_regno (dest);
1345
1346 /* ??? We'd like to use queue_reg_save, but we need to come up with
1347 a different flushing heuristic for epilogues. */
1348 reg_save (sregno, dregno, 0);
1349 }
1350
1351 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_EXPRESSION note. */
1352
1353 static void
1354 dwarf2out_frame_debug_cfa_expression (rtx set)
1355 {
1356 rtx src, dest, span;
1357 dw_cfi_ref cfi = new_cfi ();
1358 unsigned regno;
1359
1360 dest = SET_DEST (set);
1361 src = SET_SRC (set);
1362
1363 gcc_assert (REG_P (src));
1364 gcc_assert (MEM_P (dest));
1365
1366 span = targetm.dwarf_register_span (src);
1367 gcc_assert (!span);
1368
1369 regno = dwf_regno (src);
1370
1371 cfi->dw_cfi_opc = DW_CFA_expression;
1372 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = regno;
1373 cfi->dw_cfi_oprnd2.dw_cfi_loc
1374 = mem_loc_descriptor (XEXP (dest, 0), get_address_mode (dest),
1375 GET_MODE (dest), VAR_INIT_STATUS_INITIALIZED);
1376
1377 /* ??? We'd like to use queue_reg_save, were the interface different,
1378 and, as above, we could manage flushing for epilogues. */
1379 add_cfi (cfi);
1380 update_row_reg_save (cur_row, regno, cfi);
1381 }
1382
1383 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_RESTORE note. */
1384
1385 static void
1386 dwarf2out_frame_debug_cfa_restore (rtx reg)
1387 {
1388 dw_cfi_ref cfi = new_cfi ();
1389 unsigned int regno = dwf_regno (reg);
1390
1391 cfi->dw_cfi_opc = (regno & ~0x3f ? DW_CFA_restore_extended : DW_CFA_restore);
1392 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = regno;
1393
1394 add_cfi (cfi);
1395 update_row_reg_save (cur_row, regno, NULL);
1396 }
1397
1398 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_WINDOW_SAVE.
1399 ??? Perhaps we should note in the CIE where windows are saved (instead of
1400 assuming 0(cfa)) and what registers are in the window. */
1401
1402 static void
1403 dwarf2out_frame_debug_cfa_window_save (void)
1404 {
1405 dw_cfi_ref cfi = new_cfi ();
1406
1407 cfi->dw_cfi_opc = DW_CFA_GNU_window_save;
1408 add_cfi (cfi);
1409 }
1410
1411 /* Record call frame debugging information for an expression EXPR,
1412 which either sets SP or FP (adjusting how we calculate the frame
1413 address) or saves a register to the stack or another register.
1414 LABEL indicates the address of EXPR.
1415
1416 This function encodes a state machine mapping rtxes to actions on
1417 cfa, cfa_store, and cfa_temp.reg. We describe these rules so
1418 users need not read the source code.
1419
1420 The High-Level Picture
1421
1422 Changes in the register we use to calculate the CFA: Currently we
1423 assume that if you copy the CFA register into another register, we
1424 should take the other one as the new CFA register; this seems to
1425 work pretty well. If it's wrong for some target, it's simple
1426 enough not to set RTX_FRAME_RELATED_P on the insn in question.
1427
1428 Changes in the register we use for saving registers to the stack:
1429 This is usually SP, but not always. Again, we deduce that if you
1430 copy SP into another register (and SP is not the CFA register),
1431 then the new register is the one we will be using for register
1432 saves. This also seems to work.
1433
1434 Register saves: There's not much guesswork about this one; if
1435 RTX_FRAME_RELATED_P is set on an insn which modifies memory, it's a
1436 register save, and the register used to calculate the destination
1437 had better be the one we think we're using for this purpose.
1438 It's also assumed that a copy from a call-saved register to another
1439 register is saving that register if RTX_FRAME_RELATED_P is set on
1440 that instruction. If the copy is from a call-saved register to
1441 the *same* register, that means that the register is now the same
1442 value as in the caller.
1443
1444 Except: If the register being saved is the CFA register, and the
1445 offset is nonzero, we are saving the CFA, so we assume we have to
1446 use DW_CFA_def_cfa_expression. If the offset is 0, we assume that
1447 the intent is to save the value of SP from the previous frame.
1448
1449 In addition, if a register has previously been saved to a different
1450 register,
1451
1452 Invariants / Summaries of Rules
1453
1454 cfa current rule for calculating the CFA. It usually
1455 consists of a register and an offset. This is
1456 actually stored in cur_row->cfa, but abbreviated
1457 for the purposes of this documentation.
1458 cfa_store register used by prologue code to save things to the stack
1459 cfa_store.offset is the offset from the value of
1460 cfa_store.reg to the actual CFA
1461 cfa_temp register holding an integral value. cfa_temp.offset
1462 stores the value, which will be used to adjust the
1463 stack pointer. cfa_temp is also used like cfa_store,
1464 to track stores to the stack via fp or a temp reg.
1465
1466 Rules 1- 4: Setting a register's value to cfa.reg or an expression
1467 with cfa.reg as the first operand changes the cfa.reg and its
1468 cfa.offset. Rule 1 and 4 also set cfa_temp.reg and
1469 cfa_temp.offset.
1470
1471 Rules 6- 9: Set a non-cfa.reg register value to a constant or an
1472 expression yielding a constant. This sets cfa_temp.reg
1473 and cfa_temp.offset.
1474
1475 Rule 5: Create a new register cfa_store used to save items to the
1476 stack.
1477
1478 Rules 10-14: Save a register to the stack. Define offset as the
1479 difference of the original location and cfa_store's
1480 location (or cfa_temp's location if cfa_temp is used).
1481
1482 Rules 16-20: If AND operation happens on sp in prologue, we assume
1483 stack is realigned. We will use a group of DW_OP_XXX
1484 expressions to represent the location of the stored
1485 register instead of CFA+offset.
1486
1487 The Rules
1488
1489 "{a,b}" indicates a choice of a xor b.
1490 "<reg>:cfa.reg" indicates that <reg> must equal cfa.reg.
1491
1492 Rule 1:
1493 (set <reg1> <reg2>:cfa.reg)
1494 effects: cfa.reg = <reg1>
1495 cfa.offset unchanged
1496 cfa_temp.reg = <reg1>
1497 cfa_temp.offset = cfa.offset
1498
1499 Rule 2:
1500 (set sp ({minus,plus,losum} {sp,fp}:cfa.reg
1501 {<const_int>,<reg>:cfa_temp.reg}))
1502 effects: cfa.reg = sp if fp used
1503 cfa.offset += {+/- <const_int>, cfa_temp.offset} if cfa.reg==sp
1504 cfa_store.offset += {+/- <const_int>, cfa_temp.offset}
1505 if cfa_store.reg==sp
1506
1507 Rule 3:
1508 (set fp ({minus,plus,losum} <reg>:cfa.reg <const_int>))
1509 effects: cfa.reg = fp
1510 cfa_offset += +/- <const_int>
1511
1512 Rule 4:
1513 (set <reg1> ({plus,losum} <reg2>:cfa.reg <const_int>))
1514 constraints: <reg1> != fp
1515 <reg1> != sp
1516 effects: cfa.reg = <reg1>
1517 cfa_temp.reg = <reg1>
1518 cfa_temp.offset = cfa.offset
1519
1520 Rule 5:
1521 (set <reg1> (plus <reg2>:cfa_temp.reg sp:cfa.reg))
1522 constraints: <reg1> != fp
1523 <reg1> != sp
1524 effects: cfa_store.reg = <reg1>
1525 cfa_store.offset = cfa.offset - cfa_temp.offset
1526
1527 Rule 6:
1528 (set <reg> <const_int>)
1529 effects: cfa_temp.reg = <reg>
1530 cfa_temp.offset = <const_int>
1531
1532 Rule 7:
1533 (set <reg1>:cfa_temp.reg (ior <reg2>:cfa_temp.reg <const_int>))
1534 effects: cfa_temp.reg = <reg1>
1535 cfa_temp.offset |= <const_int>
1536
1537 Rule 8:
1538 (set <reg> (high <exp>))
1539 effects: none
1540
1541 Rule 9:
1542 (set <reg> (lo_sum <exp> <const_int>))
1543 effects: cfa_temp.reg = <reg>
1544 cfa_temp.offset = <const_int>
1545
1546 Rule 10:
1547 (set (mem ({pre,post}_modify sp:cfa_store (???? <reg1> <const_int>))) <reg2>)
1548 effects: cfa_store.offset -= <const_int>
1549 cfa.offset = cfa_store.offset if cfa.reg == sp
1550 cfa.reg = sp
1551 cfa.base_offset = -cfa_store.offset
1552
1553 Rule 11:
1554 (set (mem ({pre_inc,pre_dec,post_dec} sp:cfa_store.reg)) <reg>)
1555 effects: cfa_store.offset += -/+ mode_size(mem)
1556 cfa.offset = cfa_store.offset if cfa.reg == sp
1557 cfa.reg = sp
1558 cfa.base_offset = -cfa_store.offset
1559
1560 Rule 12:
1561 (set (mem ({minus,plus,losum} <reg1>:{cfa_store,cfa_temp} <const_int>))
1562
1563 <reg2>)
1564 effects: cfa.reg = <reg1>
1565 cfa.base_offset = -/+ <const_int> - {cfa_store,cfa_temp}.offset
1566
1567 Rule 13:
1568 (set (mem <reg1>:{cfa_store,cfa_temp}) <reg2>)
1569 effects: cfa.reg = <reg1>
1570 cfa.base_offset = -{cfa_store,cfa_temp}.offset
1571
1572 Rule 14:
1573 (set (mem (post_inc <reg1>:cfa_temp <const_int>)) <reg2>)
1574 effects: cfa.reg = <reg1>
1575 cfa.base_offset = -cfa_temp.offset
1576 cfa_temp.offset -= mode_size(mem)
1577
1578 Rule 15:
1579 (set <reg> {unspec, unspec_volatile})
1580 effects: target-dependent
1581
1582 Rule 16:
1583 (set sp (and: sp <const_int>))
1584 constraints: cfa_store.reg == sp
1585 effects: cfun->fde.stack_realign = 1
1586 cfa_store.offset = 0
1587 fde->drap_reg = cfa.reg if cfa.reg != sp and cfa.reg != fp
1588
1589 Rule 17:
1590 (set (mem ({pre_inc, pre_dec} sp)) (mem (plus (cfa.reg) (const_int))))
1591 effects: cfa_store.offset += -/+ mode_size(mem)
1592
1593 Rule 18:
1594 (set (mem ({pre_inc, pre_dec} sp)) fp)
1595 constraints: fde->stack_realign == 1
1596 effects: cfa_store.offset = 0
1597 cfa.reg != HARD_FRAME_POINTER_REGNUM
1598
1599 Rule 19:
1600 (set (mem ({pre_inc, pre_dec} sp)) cfa.reg)
1601 constraints: fde->stack_realign == 1
1602 && cfa.offset == 0
1603 && cfa.indirect == 0
1604 && cfa.reg != HARD_FRAME_POINTER_REGNUM
1605 effects: Use DW_CFA_def_cfa_expression to define cfa
1606 cfa.reg == fde->drap_reg */
1607
1608 static void
1609 dwarf2out_frame_debug_expr (rtx expr)
1610 {
1611 dw_cfa_location cfa = cur_row->cfa;
1612 rtx src, dest, span;
1613 HOST_WIDE_INT offset;
1614 dw_fde_ref fde;
1615
1616 /* If RTX_FRAME_RELATED_P is set on a PARALLEL, process each member of
1617 the PARALLEL independently. The first element is always processed if
1618 it is a SET. This is for backward compatibility. Other elements
1619 are processed only if they are SETs and the RTX_FRAME_RELATED_P
1620 flag is set in them. */
1621 if (GET_CODE (expr) == PARALLEL || GET_CODE (expr) == SEQUENCE)
1622 {
1623 int par_index;
1624 int limit = XVECLEN (expr, 0);
1625 rtx elem;
1626
1627 /* PARALLELs have strict read-modify-write semantics, so we
1628 ought to evaluate every rvalue before changing any lvalue.
1629 It's cumbersome to do that in general, but there's an
1630 easy approximation that is enough for all current users:
1631 handle register saves before register assignments. */
1632 if (GET_CODE (expr) == PARALLEL)
1633 for (par_index = 0; par_index < limit; par_index++)
1634 {
1635 elem = XVECEXP (expr, 0, par_index);
1636 if (GET_CODE (elem) == SET
1637 && MEM_P (SET_DEST (elem))
1638 && (RTX_FRAME_RELATED_P (elem) || par_index == 0))
1639 dwarf2out_frame_debug_expr (elem);
1640 }
1641
1642 for (par_index = 0; par_index < limit; par_index++)
1643 {
1644 elem = XVECEXP (expr, 0, par_index);
1645 if (GET_CODE (elem) == SET
1646 && (!MEM_P (SET_DEST (elem)) || GET_CODE (expr) == SEQUENCE)
1647 && (RTX_FRAME_RELATED_P (elem) || par_index == 0))
1648 dwarf2out_frame_debug_expr (elem);
1649 else if (GET_CODE (elem) == SET
1650 && par_index != 0
1651 && !RTX_FRAME_RELATED_P (elem))
1652 {
1653 /* Stack adjustment combining might combine some post-prologue
1654 stack adjustment into a prologue stack adjustment. */
1655 HOST_WIDE_INT offset = stack_adjust_offset (elem, args_size, 0);
1656
1657 if (offset != 0)
1658 dwarf2out_stack_adjust (offset);
1659 }
1660 }
1661 return;
1662 }
1663
1664 gcc_assert (GET_CODE (expr) == SET);
1665
1666 src = SET_SRC (expr);
1667 dest = SET_DEST (expr);
1668
1669 if (REG_P (src))
1670 {
1671 rtx rsi = reg_saved_in (src);
1672 if (rsi)
1673 src = rsi;
1674 }
1675
1676 fde = cfun->fde;
1677
1678 switch (GET_CODE (dest))
1679 {
1680 case REG:
1681 switch (GET_CODE (src))
1682 {
1683 /* Setting FP from SP. */
1684 case REG:
1685 if (cfa.reg == dwf_regno (src))
1686 {
1687 /* Rule 1 */
1688 /* Update the CFA rule wrt SP or FP. Make sure src is
1689 relative to the current CFA register.
1690
1691 We used to require that dest be either SP or FP, but the
1692 ARM copies SP to a temporary register, and from there to
1693 FP. So we just rely on the backends to only set
1694 RTX_FRAME_RELATED_P on appropriate insns. */
1695 cfa.reg = dwf_regno (dest);
1696 cfa_temp.reg = cfa.reg;
1697 cfa_temp.offset = cfa.offset;
1698 }
1699 else
1700 {
1701 /* Saving a register in a register. */
1702 gcc_assert (!fixed_regs [REGNO (dest)]
1703 /* For the SPARC and its register window. */
1704 || (dwf_regno (src) == DWARF_FRAME_RETURN_COLUMN));
1705
1706 /* After stack is aligned, we can only save SP in FP
1707 if drap register is used. In this case, we have
1708 to restore stack pointer with the CFA value and we
1709 don't generate this DWARF information. */
1710 if (fde
1711 && fde->stack_realign
1712 && REGNO (src) == STACK_POINTER_REGNUM)
1713 gcc_assert (REGNO (dest) == HARD_FRAME_POINTER_REGNUM
1714 && fde->drap_reg != INVALID_REGNUM
1715 && cfa.reg != dwf_regno (src));
1716 else
1717 queue_reg_save (src, dest, 0);
1718 }
1719 break;
1720
1721 case PLUS:
1722 case MINUS:
1723 case LO_SUM:
1724 if (dest == stack_pointer_rtx)
1725 {
1726 /* Rule 2 */
1727 /* Adjusting SP. */
1728 switch (GET_CODE (XEXP (src, 1)))
1729 {
1730 case CONST_INT:
1731 offset = INTVAL (XEXP (src, 1));
1732 break;
1733 case REG:
1734 gcc_assert (dwf_regno (XEXP (src, 1)) == cfa_temp.reg);
1735 offset = cfa_temp.offset;
1736 break;
1737 default:
1738 gcc_unreachable ();
1739 }
1740
1741 if (XEXP (src, 0) == hard_frame_pointer_rtx)
1742 {
1743 /* Restoring SP from FP in the epilogue. */
1744 gcc_assert (cfa.reg == dw_frame_pointer_regnum);
1745 cfa.reg = dw_stack_pointer_regnum;
1746 }
1747 else if (GET_CODE (src) == LO_SUM)
1748 /* Assume we've set the source reg of the LO_SUM from sp. */
1749 ;
1750 else
1751 gcc_assert (XEXP (src, 0) == stack_pointer_rtx);
1752
1753 if (GET_CODE (src) != MINUS)
1754 offset = -offset;
1755 if (cfa.reg == dw_stack_pointer_regnum)
1756 cfa.offset += offset;
1757 if (cfa_store.reg == dw_stack_pointer_regnum)
1758 cfa_store.offset += offset;
1759 }
1760 else if (dest == hard_frame_pointer_rtx)
1761 {
1762 /* Rule 3 */
1763 /* Either setting the FP from an offset of the SP,
1764 or adjusting the FP */
1765 gcc_assert (frame_pointer_needed);
1766
1767 gcc_assert (REG_P (XEXP (src, 0))
1768 && dwf_regno (XEXP (src, 0)) == cfa.reg
1769 && CONST_INT_P (XEXP (src, 1)));
1770 offset = INTVAL (XEXP (src, 1));
1771 if (GET_CODE (src) != MINUS)
1772 offset = -offset;
1773 cfa.offset += offset;
1774 cfa.reg = dw_frame_pointer_regnum;
1775 }
1776 else
1777 {
1778 gcc_assert (GET_CODE (src) != MINUS);
1779
1780 /* Rule 4 */
1781 if (REG_P (XEXP (src, 0))
1782 && dwf_regno (XEXP (src, 0)) == cfa.reg
1783 && CONST_INT_P (XEXP (src, 1)))
1784 {
1785 /* Setting a temporary CFA register that will be copied
1786 into the FP later on. */
1787 offset = - INTVAL (XEXP (src, 1));
1788 cfa.offset += offset;
1789 cfa.reg = dwf_regno (dest);
1790 /* Or used to save regs to the stack. */
1791 cfa_temp.reg = cfa.reg;
1792 cfa_temp.offset = cfa.offset;
1793 }
1794
1795 /* Rule 5 */
1796 else if (REG_P (XEXP (src, 0))
1797 && dwf_regno (XEXP (src, 0)) == cfa_temp.reg
1798 && XEXP (src, 1) == stack_pointer_rtx)
1799 {
1800 /* Setting a scratch register that we will use instead
1801 of SP for saving registers to the stack. */
1802 gcc_assert (cfa.reg == dw_stack_pointer_regnum);
1803 cfa_store.reg = dwf_regno (dest);
1804 cfa_store.offset = cfa.offset - cfa_temp.offset;
1805 }
1806
1807 /* Rule 9 */
1808 else if (GET_CODE (src) == LO_SUM
1809 && CONST_INT_P (XEXP (src, 1)))
1810 {
1811 cfa_temp.reg = dwf_regno (dest);
1812 cfa_temp.offset = INTVAL (XEXP (src, 1));
1813 }
1814 else
1815 gcc_unreachable ();
1816 }
1817 break;
1818
1819 /* Rule 6 */
1820 case CONST_INT:
1821 cfa_temp.reg = dwf_regno (dest);
1822 cfa_temp.offset = INTVAL (src);
1823 break;
1824
1825 /* Rule 7 */
1826 case IOR:
1827 gcc_assert (REG_P (XEXP (src, 0))
1828 && dwf_regno (XEXP (src, 0)) == cfa_temp.reg
1829 && CONST_INT_P (XEXP (src, 1)));
1830
1831 cfa_temp.reg = dwf_regno (dest);
1832 cfa_temp.offset |= INTVAL (XEXP (src, 1));
1833 break;
1834
1835 /* Skip over HIGH, assuming it will be followed by a LO_SUM,
1836 which will fill in all of the bits. */
1837 /* Rule 8 */
1838 case HIGH:
1839 break;
1840
1841 /* Rule 15 */
1842 case UNSPEC:
1843 case UNSPEC_VOLATILE:
1844 /* All unspecs should be represented by REG_CFA_* notes. */
1845 gcc_unreachable ();
1846 return;
1847
1848 /* Rule 16 */
1849 case AND:
1850 /* If this AND operation happens on stack pointer in prologue,
1851 we assume the stack is realigned and we extract the
1852 alignment. */
1853 if (fde && XEXP (src, 0) == stack_pointer_rtx)
1854 {
1855 /* We interpret reg_save differently with stack_realign set.
1856 Thus we must flush whatever we have queued first. */
1857 dwarf2out_flush_queued_reg_saves ();
1858
1859 gcc_assert (cfa_store.reg == dwf_regno (XEXP (src, 0)));
1860 fde->stack_realign = 1;
1861 fde->stack_realignment = INTVAL (XEXP (src, 1));
1862 cfa_store.offset = 0;
1863
1864 if (cfa.reg != dw_stack_pointer_regnum
1865 && cfa.reg != dw_frame_pointer_regnum)
1866 fde->drap_reg = cfa.reg;
1867 }
1868 return;
1869
1870 default:
1871 gcc_unreachable ();
1872 }
1873
1874 def_cfa_1 (&cfa);
1875 break;
1876
1877 case MEM:
1878
1879 /* Saving a register to the stack. Make sure dest is relative to the
1880 CFA register. */
1881 switch (GET_CODE (XEXP (dest, 0)))
1882 {
1883 /* Rule 10 */
1884 /* With a push. */
1885 case PRE_MODIFY:
1886 case POST_MODIFY:
1887 /* We can't handle variable size modifications. */
1888 gcc_assert (GET_CODE (XEXP (XEXP (XEXP (dest, 0), 1), 1))
1889 == CONST_INT);
1890 offset = -INTVAL (XEXP (XEXP (XEXP (dest, 0), 1), 1));
1891
1892 gcc_assert (REGNO (XEXP (XEXP (dest, 0), 0)) == STACK_POINTER_REGNUM
1893 && cfa_store.reg == dw_stack_pointer_regnum);
1894
1895 cfa_store.offset += offset;
1896 if (cfa.reg == dw_stack_pointer_regnum)
1897 cfa.offset = cfa_store.offset;
1898
1899 if (GET_CODE (XEXP (dest, 0)) == POST_MODIFY)
1900 offset -= cfa_store.offset;
1901 else
1902 offset = -cfa_store.offset;
1903 break;
1904
1905 /* Rule 11 */
1906 case PRE_INC:
1907 case PRE_DEC:
1908 case POST_DEC:
1909 offset = GET_MODE_SIZE (GET_MODE (dest));
1910 if (GET_CODE (XEXP (dest, 0)) == PRE_INC)
1911 offset = -offset;
1912
1913 gcc_assert ((REGNO (XEXP (XEXP (dest, 0), 0))
1914 == STACK_POINTER_REGNUM)
1915 && cfa_store.reg == dw_stack_pointer_regnum);
1916
1917 cfa_store.offset += offset;
1918
1919 /* Rule 18: If stack is aligned, we will use FP as a
1920 reference to represent the address of the stored
1921 regiser. */
1922 if (fde
1923 && fde->stack_realign
1924 && src == hard_frame_pointer_rtx)
1925 {
1926 gcc_assert (cfa.reg != dw_frame_pointer_regnum);
1927 cfa_store.offset = 0;
1928 }
1929
1930 if (cfa.reg == dw_stack_pointer_regnum)
1931 cfa.offset = cfa_store.offset;
1932
1933 if (GET_CODE (XEXP (dest, 0)) == POST_DEC)
1934 offset += -cfa_store.offset;
1935 else
1936 offset = -cfa_store.offset;
1937 break;
1938
1939 /* Rule 12 */
1940 /* With an offset. */
1941 case PLUS:
1942 case MINUS:
1943 case LO_SUM:
1944 {
1945 unsigned int regno;
1946
1947 gcc_assert (CONST_INT_P (XEXP (XEXP (dest, 0), 1))
1948 && REG_P (XEXP (XEXP (dest, 0), 0)));
1949 offset = INTVAL (XEXP (XEXP (dest, 0), 1));
1950 if (GET_CODE (XEXP (dest, 0)) == MINUS)
1951 offset = -offset;
1952
1953 regno = dwf_regno (XEXP (XEXP (dest, 0), 0));
1954
1955 if (cfa.reg == regno)
1956 offset -= cfa.offset;
1957 else if (cfa_store.reg == regno)
1958 offset -= cfa_store.offset;
1959 else
1960 {
1961 gcc_assert (cfa_temp.reg == regno);
1962 offset -= cfa_temp.offset;
1963 }
1964 }
1965 break;
1966
1967 /* Rule 13 */
1968 /* Without an offset. */
1969 case REG:
1970 {
1971 unsigned int regno = dwf_regno (XEXP (dest, 0));
1972
1973 if (cfa.reg == regno)
1974 offset = -cfa.offset;
1975 else if (cfa_store.reg == regno)
1976 offset = -cfa_store.offset;
1977 else
1978 {
1979 gcc_assert (cfa_temp.reg == regno);
1980 offset = -cfa_temp.offset;
1981 }
1982 }
1983 break;
1984
1985 /* Rule 14 */
1986 case POST_INC:
1987 gcc_assert (cfa_temp.reg == dwf_regno (XEXP (XEXP (dest, 0), 0)));
1988 offset = -cfa_temp.offset;
1989 cfa_temp.offset -= GET_MODE_SIZE (GET_MODE (dest));
1990 break;
1991
1992 default:
1993 gcc_unreachable ();
1994 }
1995
1996 /* Rule 17 */
1997 /* If the source operand of this MEM operation is a memory,
1998 we only care how much stack grew. */
1999 if (MEM_P (src))
2000 break;
2001
2002 if (REG_P (src)
2003 && REGNO (src) != STACK_POINTER_REGNUM
2004 && REGNO (src) != HARD_FRAME_POINTER_REGNUM
2005 && dwf_regno (src) == cfa.reg)
2006 {
2007 /* We're storing the current CFA reg into the stack. */
2008
2009 if (cfa.offset == 0)
2010 {
2011 /* Rule 19 */
2012 /* If stack is aligned, putting CFA reg into stack means
2013 we can no longer use reg + offset to represent CFA.
2014 Here we use DW_CFA_def_cfa_expression instead. The
2015 result of this expression equals to the original CFA
2016 value. */
2017 if (fde
2018 && fde->stack_realign
2019 && cfa.indirect == 0
2020 && cfa.reg != dw_frame_pointer_regnum)
2021 {
2022 dw_cfa_location cfa_exp;
2023
2024 gcc_assert (fde->drap_reg == cfa.reg);
2025
2026 cfa_exp.indirect = 1;
2027 cfa_exp.reg = dw_frame_pointer_regnum;
2028 cfa_exp.base_offset = offset;
2029 cfa_exp.offset = 0;
2030
2031 fde->drap_reg_saved = 1;
2032
2033 def_cfa_1 (&cfa_exp);
2034 break;
2035 }
2036
2037 /* If the source register is exactly the CFA, assume
2038 we're saving SP like any other register; this happens
2039 on the ARM. */
2040 def_cfa_1 (&cfa);
2041 queue_reg_save (stack_pointer_rtx, NULL_RTX, offset);
2042 break;
2043 }
2044 else
2045 {
2046 /* Otherwise, we'll need to look in the stack to
2047 calculate the CFA. */
2048 rtx x = XEXP (dest, 0);
2049
2050 if (!REG_P (x))
2051 x = XEXP (x, 0);
2052 gcc_assert (REG_P (x));
2053
2054 cfa.reg = dwf_regno (x);
2055 cfa.base_offset = offset;
2056 cfa.indirect = 1;
2057 def_cfa_1 (&cfa);
2058 break;
2059 }
2060 }
2061
2062 def_cfa_1 (&cfa);
2063
2064 span = NULL;
2065 if (REG_P (src))
2066 span = targetm.dwarf_register_span (src);
2067 if (!span)
2068 queue_reg_save (src, NULL_RTX, offset);
2069 else
2070 {
2071 /* We have a PARALLEL describing where the contents of SRC live.
2072 Queue register saves for each piece of the PARALLEL. */
2073 int par_index;
2074 int limit;
2075 HOST_WIDE_INT span_offset = offset;
2076
2077 gcc_assert (GET_CODE (span) == PARALLEL);
2078
2079 limit = XVECLEN (span, 0);
2080 for (par_index = 0; par_index < limit; par_index++)
2081 {
2082 rtx elem = XVECEXP (span, 0, par_index);
2083 queue_reg_save (elem, NULL_RTX, span_offset);
2084 span_offset += GET_MODE_SIZE (GET_MODE (elem));
2085 }
2086 }
2087 break;
2088
2089 default:
2090 gcc_unreachable ();
2091 }
2092 }
2093
2094 /* Record call frame debugging information for INSN, which either
2095 sets SP or FP (adjusting how we calculate the frame address) or saves a
2096 register to the stack. If INSN is NULL_RTX, initialize our state.
2097
2098 If AFTER_P is false, we're being called before the insn is emitted,
2099 otherwise after. Call instructions get invoked twice. */
2100
2101 static void
2102 dwarf2out_frame_debug (rtx insn, bool after_p)
2103 {
2104 rtx note, n;
2105 bool handled_one = false;
2106 bool need_flush = false;
2107
2108 if (!NONJUMP_INSN_P (insn) || clobbers_queued_reg_save (insn))
2109 dwarf2out_flush_queued_reg_saves ();
2110
2111 if (!RTX_FRAME_RELATED_P (insn))
2112 {
2113 /* ??? This should be done unconditionally since stack adjustments
2114 matter if the stack pointer is not the CFA register anymore but
2115 is still used to save registers. */
2116 if (!ACCUMULATE_OUTGOING_ARGS)
2117 dwarf2out_notice_stack_adjust (insn, after_p);
2118 return;
2119 }
2120
2121 any_cfis_emitted = false;
2122
2123 for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
2124 switch (REG_NOTE_KIND (note))
2125 {
2126 case REG_FRAME_RELATED_EXPR:
2127 insn = XEXP (note, 0);
2128 goto do_frame_expr;
2129
2130 case REG_CFA_DEF_CFA:
2131 dwarf2out_frame_debug_def_cfa (XEXP (note, 0));
2132 handled_one = true;
2133 break;
2134
2135 case REG_CFA_ADJUST_CFA:
2136 n = XEXP (note, 0);
2137 if (n == NULL)
2138 {
2139 n = PATTERN (insn);
2140 if (GET_CODE (n) == PARALLEL)
2141 n = XVECEXP (n, 0, 0);
2142 }
2143 dwarf2out_frame_debug_adjust_cfa (n);
2144 handled_one = true;
2145 break;
2146
2147 case REG_CFA_OFFSET:
2148 n = XEXP (note, 0);
2149 if (n == NULL)
2150 n = single_set (insn);
2151 dwarf2out_frame_debug_cfa_offset (n);
2152 handled_one = true;
2153 break;
2154
2155 case REG_CFA_REGISTER:
2156 n = XEXP (note, 0);
2157 if (n == NULL)
2158 {
2159 n = PATTERN (insn);
2160 if (GET_CODE (n) == PARALLEL)
2161 n = XVECEXP (n, 0, 0);
2162 }
2163 dwarf2out_frame_debug_cfa_register (n);
2164 handled_one = true;
2165 break;
2166
2167 case REG_CFA_EXPRESSION:
2168 n = XEXP (note, 0);
2169 if (n == NULL)
2170 n = single_set (insn);
2171 dwarf2out_frame_debug_cfa_expression (n);
2172 handled_one = true;
2173 break;
2174
2175 case REG_CFA_RESTORE:
2176 n = XEXP (note, 0);
2177 if (n == NULL)
2178 {
2179 n = PATTERN (insn);
2180 if (GET_CODE (n) == PARALLEL)
2181 n = XVECEXP (n, 0, 0);
2182 n = XEXP (n, 0);
2183 }
2184 dwarf2out_frame_debug_cfa_restore (n);
2185 handled_one = true;
2186 break;
2187
2188 case REG_CFA_SET_VDRAP:
2189 n = XEXP (note, 0);
2190 if (REG_P (n))
2191 {
2192 dw_fde_ref fde = cfun->fde;
2193 if (fde)
2194 {
2195 gcc_assert (fde->vdrap_reg == INVALID_REGNUM);
2196 if (REG_P (n))
2197 fde->vdrap_reg = dwf_regno (n);
2198 }
2199 }
2200 handled_one = true;
2201 break;
2202
2203 case REG_CFA_WINDOW_SAVE:
2204 dwarf2out_frame_debug_cfa_window_save ();
2205 handled_one = true;
2206 break;
2207
2208 case REG_CFA_FLUSH_QUEUE:
2209 /* The actual flush happens below. */
2210 need_flush = true;
2211 handled_one = true;
2212 break;
2213
2214 default:
2215 break;
2216 }
2217
2218 if (handled_one)
2219 {
2220 /* Minimize the number of advances by emitting the entire queue
2221 once anything is emitted. */
2222 need_flush |= any_cfis_emitted;
2223 }
2224 else
2225 {
2226 insn = PATTERN (insn);
2227 do_frame_expr:
2228 dwarf2out_frame_debug_expr (insn);
2229
2230 /* Check again. A parallel can save and update the same register.
2231 We could probably check just once, here, but this is safer than
2232 removing the check at the start of the function. */
2233 if (any_cfis_emitted || clobbers_queued_reg_save (insn))
2234 need_flush = true;
2235 }
2236
2237 if (need_flush)
2238 dwarf2out_flush_queued_reg_saves ();
2239 }
2240
2241 /* Examine CFI and return true if a cfi label and set_loc is needed
2242 beforehand. Even when generating CFI assembler instructions, we
2243 still have to add the cfi to the list so that lookup_cfa_1 works
2244 later on. When -g2 and above we even need to force emitting of
2245 CFI labels and add to list a DW_CFA_set_loc for convert_cfa_to_fb_loc_list
2246 purposes. If we're generating DWARF3 output we use DW_OP_call_frame_cfa
2247 and so don't use convert_cfa_to_fb_loc_list. */
2248
2249 static bool
2250 cfi_label_required_p (dw_cfi_ref cfi)
2251 {
2252 if (!dwarf2out_do_cfi_asm ())
2253 return true;
2254
2255 if (dwarf_version == 2
2256 && debug_info_level > DINFO_LEVEL_TERSE
2257 && (write_symbols == DWARF2_DEBUG
2258 || write_symbols == VMS_AND_DWARF2_DEBUG))
2259 {
2260 switch (cfi->dw_cfi_opc)
2261 {
2262 case DW_CFA_def_cfa_offset:
2263 case DW_CFA_def_cfa_offset_sf:
2264 case DW_CFA_def_cfa_register:
2265 case DW_CFA_def_cfa:
2266 case DW_CFA_def_cfa_sf:
2267 case DW_CFA_def_cfa_expression:
2268 case DW_CFA_restore_state:
2269 return true;
2270 default:
2271 return false;
2272 }
2273 }
2274 return false;
2275 }
2276
2277 /* Walk the function, looking for NOTE_INSN_CFI notes. Add the CFIs to the
2278 function's FDE, adding CFI labels and set_loc/advance_loc opcodes as
2279 necessary. */
2280 static void
2281 add_cfis_to_fde (void)
2282 {
2283 dw_fde_ref fde = cfun->fde;
2284 rtx insn, next;
2285 /* We always start with a function_begin label. */
2286 bool first = false;
2287
2288 for (insn = get_insns (); insn; insn = next)
2289 {
2290 next = NEXT_INSN (insn);
2291
2292 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS)
2293 {
2294 /* Don't attempt to advance_loc4 between labels
2295 in different sections. */
2296 first = true;
2297 }
2298
2299 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_CFI)
2300 {
2301 bool required = cfi_label_required_p (NOTE_CFI (insn));
2302 while (next && NOTE_P (next) && NOTE_KIND (next) == NOTE_INSN_CFI)
2303 {
2304 required |= cfi_label_required_p (NOTE_CFI (next));
2305 next = NEXT_INSN (next);
2306 }
2307 if (required)
2308 {
2309 int num = dwarf2out_cfi_label_num;
2310 const char *label = dwarf2out_cfi_label ();
2311 dw_cfi_ref xcfi;
2312 rtx tmp;
2313
2314 /* Set the location counter to the new label. */
2315 xcfi = new_cfi ();
2316 xcfi->dw_cfi_opc = (first ? DW_CFA_set_loc
2317 : DW_CFA_advance_loc4);
2318 xcfi->dw_cfi_oprnd1.dw_cfi_addr = label;
2319 VEC_safe_push (dw_cfi_ref, gc, fde->dw_fde_cfi, xcfi);
2320
2321 tmp = emit_note_before (NOTE_INSN_CFI_LABEL, insn);
2322 NOTE_LABEL_NUMBER (tmp) = num;
2323 }
2324
2325 do
2326 {
2327 VEC_safe_push (dw_cfi_ref, gc, fde->dw_fde_cfi, NOTE_CFI (insn));
2328 insn = NEXT_INSN (insn);
2329 }
2330 while (insn != next);
2331 first = false;
2332 }
2333 }
2334 }
2335
2336 /* Scan the function and create the initial set of CFI notes. */
2337
2338 static void
2339 create_cfi_notes (void)
2340 {
2341 rtx insn;
2342
2343 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
2344 {
2345 rtx pat;
2346
2347 add_cfi_insn = PREV_INSN (insn);
2348
2349 if (BARRIER_P (insn))
2350 {
2351 dwarf2out_frame_debug (insn, false);
2352 continue;
2353 }
2354
2355 if (NOTE_P (insn))
2356 {
2357 switch (NOTE_KIND (insn))
2358 {
2359 case NOTE_INSN_PROLOGUE_END:
2360 dwarf2out_flush_queued_reg_saves ();
2361 break;
2362
2363 case NOTE_INSN_EPILOGUE_BEG:
2364 #if defined(HAVE_epilogue)
2365 dwarf2out_cfi_begin_epilogue (insn);
2366 #endif
2367 break;
2368
2369 case NOTE_INSN_CFA_RESTORE_STATE:
2370 add_cfi_insn = insn;
2371 dwarf2out_frame_debug_restore_state ();
2372 break;
2373 }
2374 continue;
2375 }
2376
2377 if (!NONDEBUG_INSN_P (insn))
2378 continue;
2379
2380 pat = PATTERN (insn);
2381 if (asm_noperands (pat) >= 0)
2382 {
2383 dwarf2out_frame_debug (insn, false);
2384 continue;
2385 }
2386
2387 if (GET_CODE (pat) == SEQUENCE)
2388 {
2389 int i, n = XVECLEN (pat, 0);
2390 for (i = 1; i < n; ++i)
2391 dwarf2out_frame_debug (XVECEXP (pat, 0, i), false);
2392 }
2393
2394 if (CALL_P (insn)
2395 || find_reg_note (insn, REG_CFA_FLUSH_QUEUE, NULL))
2396 dwarf2out_frame_debug (insn, false);
2397
2398 /* Do not separate tablejump insns from their ADDR_DIFF_VEC.
2399 Putting the note after the VEC should be ok. */
2400 if (!tablejump_p (insn, NULL, &add_cfi_insn))
2401 add_cfi_insn = insn;
2402
2403 dwarf2out_frame_debug (insn, true);
2404 }
2405
2406 add_cfi_insn = NULL;
2407 }
2408
2409 /* Determine if we need to save and restore CFI information around this
2410 epilogue. If SIBCALL is true, then this is a sibcall epilogue. If
2411 we do need to save/restore, then emit the save now, and insert a
2412 NOTE_INSN_CFA_RESTORE_STATE at the appropriate place in the stream. */
2413
2414 static void
2415 dwarf2out_cfi_begin_epilogue (rtx insn)
2416 {
2417 bool saw_frp = false;
2418 rtx i;
2419
2420 /* Scan forward to the return insn, noticing if there are possible
2421 frame related insns. */
2422 for (i = NEXT_INSN (insn); i ; i = NEXT_INSN (i))
2423 {
2424 if (!INSN_P (i))
2425 continue;
2426
2427 /* Look for both regular and sibcalls to end the block. */
2428 if (returnjump_p (i))
2429 break;
2430 if (CALL_P (i) && SIBLING_CALL_P (i))
2431 break;
2432
2433 if (GET_CODE (PATTERN (i)) == SEQUENCE)
2434 {
2435 int idx;
2436 rtx seq = PATTERN (i);
2437
2438 if (returnjump_p (XVECEXP (seq, 0, 0)))
2439 break;
2440 if (CALL_P (XVECEXP (seq, 0, 0))
2441 && SIBLING_CALL_P (XVECEXP (seq, 0, 0)))
2442 break;
2443
2444 for (idx = 0; idx < XVECLEN (seq, 0); idx++)
2445 if (RTX_FRAME_RELATED_P (XVECEXP (seq, 0, idx)))
2446 saw_frp = true;
2447 }
2448
2449 if (RTX_FRAME_RELATED_P (i))
2450 saw_frp = true;
2451 }
2452
2453 /* If the port doesn't emit epilogue unwind info, we don't need a
2454 save/restore pair. */
2455 if (!saw_frp)
2456 return;
2457
2458 /* Otherwise, search forward to see if the return insn was the last
2459 basic block of the function. If so, we don't need save/restore. */
2460 gcc_assert (i != NULL);
2461 i = next_real_insn (i);
2462 if (i == NULL)
2463 return;
2464
2465 /* Insert the restore before that next real insn in the stream, and before
2466 a potential NOTE_INSN_EPILOGUE_BEG -- we do need these notes to be
2467 properly nested. This should be after any label or alignment. This
2468 will be pushed into the CFI stream by the function below. */
2469 while (1)
2470 {
2471 rtx p = PREV_INSN (i);
2472 if (!NOTE_P (p))
2473 break;
2474 if (NOTE_KIND (p) == NOTE_INSN_BASIC_BLOCK)
2475 break;
2476 i = p;
2477 }
2478 emit_note_before (NOTE_INSN_CFA_RESTORE_STATE, i);
2479
2480 emit_cfa_remember = true;
2481
2482 /* And emulate the state save. */
2483 gcc_assert (remember_row == NULL);
2484 remember_row = copy_cfi_row (cur_row);
2485 }
2486
2487 /* A "subroutine" of dwarf2out_cfi_begin_epilogue. Emit the restore
2488 required. */
2489
2490 static void
2491 dwarf2out_frame_debug_restore_state (void)
2492 {
2493 dw_cfi_ref cfi = new_cfi ();
2494
2495 cfi->dw_cfi_opc = DW_CFA_restore_state;
2496 add_cfi (cfi);
2497
2498 gcc_assert (remember_row != NULL);
2499 free_cfi_row (cur_row);
2500 cur_row = remember_row;
2501 remember_row = NULL;
2502 }
2503 \f
2504 /* Record the initial position of the return address. RTL is
2505 INCOMING_RETURN_ADDR_RTX. */
2506
2507 static void
2508 initial_return_save (rtx rtl)
2509 {
2510 unsigned int reg = INVALID_REGNUM;
2511 HOST_WIDE_INT offset = 0;
2512
2513 switch (GET_CODE (rtl))
2514 {
2515 case REG:
2516 /* RA is in a register. */
2517 reg = dwf_regno (rtl);
2518 break;
2519
2520 case MEM:
2521 /* RA is on the stack. */
2522 rtl = XEXP (rtl, 0);
2523 switch (GET_CODE (rtl))
2524 {
2525 case REG:
2526 gcc_assert (REGNO (rtl) == STACK_POINTER_REGNUM);
2527 offset = 0;
2528 break;
2529
2530 case PLUS:
2531 gcc_assert (REGNO (XEXP (rtl, 0)) == STACK_POINTER_REGNUM);
2532 offset = INTVAL (XEXP (rtl, 1));
2533 break;
2534
2535 case MINUS:
2536 gcc_assert (REGNO (XEXP (rtl, 0)) == STACK_POINTER_REGNUM);
2537 offset = -INTVAL (XEXP (rtl, 1));
2538 break;
2539
2540 default:
2541 gcc_unreachable ();
2542 }
2543
2544 break;
2545
2546 case PLUS:
2547 /* The return address is at some offset from any value we can
2548 actually load. For instance, on the SPARC it is in %i7+8. Just
2549 ignore the offset for now; it doesn't matter for unwinding frames. */
2550 gcc_assert (CONST_INT_P (XEXP (rtl, 1)));
2551 initial_return_save (XEXP (rtl, 0));
2552 return;
2553
2554 default:
2555 gcc_unreachable ();
2556 }
2557
2558 if (reg != DWARF_FRAME_RETURN_COLUMN)
2559 {
2560 if (reg != INVALID_REGNUM)
2561 record_reg_saved_in_reg (rtl, pc_rtx);
2562 reg_save (DWARF_FRAME_RETURN_COLUMN, reg, offset - cur_row->cfa.offset);
2563 }
2564 }
2565
2566 /* Annotate the function with NOTE_INSN_CFI notes to record the CFI
2567 state at each location within the function. These notes will be
2568 emitted during pass_final. */
2569
2570 static unsigned int
2571 execute_dwarf2_frame (void)
2572 {
2573 /* The first time we're called, compute the incoming frame state. */
2574 if (cie_cfi_vec == NULL)
2575 {
2576 dw_cfa_location loc;
2577
2578 dw_stack_pointer_regnum = DWARF_FRAME_REGNUM (STACK_POINTER_REGNUM);
2579 dw_frame_pointer_regnum = DWARF_FRAME_REGNUM (HARD_FRAME_POINTER_REGNUM);
2580
2581 add_cfi_vec = &cie_cfi_vec;
2582 cie_cfi_row = cur_row = new_cfi_row ();
2583
2584 /* On entry, the Canonical Frame Address is at SP. */
2585 memset(&loc, 0, sizeof (loc));
2586 loc.reg = dw_stack_pointer_regnum;
2587 loc.offset = INCOMING_FRAME_SP_OFFSET;
2588 def_cfa_1 (&loc);
2589
2590 if (targetm.debug_unwind_info () == UI_DWARF2
2591 || targetm_common.except_unwind_info (&global_options) == UI_DWARF2)
2592 {
2593 initial_return_save (INCOMING_RETURN_ADDR_RTX);
2594
2595 /* For a few targets, we have the return address incoming into a
2596 register, but choose a different return column. This will result
2597 in a DW_CFA_register for the return, and an entry in
2598 regs_saved_in_regs to match. If the target later stores that
2599 return address register to the stack, we want to be able to emit
2600 the DW_CFA_offset against the return column, not the intermediate
2601 save register. Save the contents of regs_saved_in_regs so that
2602 we can re-initialize it at the start of each function. */
2603 switch (VEC_length (reg_saved_in_data, regs_saved_in_regs))
2604 {
2605 case 0:
2606 break;
2607 case 1:
2608 cie_return_save = ggc_alloc_reg_saved_in_data ();
2609 *cie_return_save = *VEC_index (reg_saved_in_data,
2610 regs_saved_in_regs, 0);
2611 regs_saved_in_regs = NULL;
2612 break;
2613 default:
2614 gcc_unreachable ();
2615 }
2616 }
2617
2618 add_cfi_vec = NULL;
2619 }
2620
2621 /* Set up state for generating call frame debug info. */
2622 gcc_checking_assert (queued_reg_saves == NULL);
2623 gcc_checking_assert (regs_saved_in_regs == NULL);
2624
2625 cur_row = copy_cfi_row (cie_cfi_row);
2626 if (cie_return_save)
2627 VEC_safe_push (reg_saved_in_data, gc, regs_saved_in_regs, cie_return_save);
2628
2629 cfa_store = cur_row->cfa;
2630 args_size = 0;
2631
2632 memset (&cfa_temp, 0, sizeof(cfa_temp));
2633 cfa_temp.reg = INVALID_REGNUM;
2634
2635 dwarf2out_alloc_current_fde ();
2636
2637 /* Do the work. */
2638 create_cfi_notes ();
2639 add_cfis_to_fde ();
2640
2641 /* Reset all function-specific information, particularly for GC. */
2642 XDELETEVEC (barrier_args_size);
2643 barrier_args_size = NULL;
2644 regs_saved_in_regs = NULL;
2645 queued_reg_saves = NULL;
2646
2647 free_cfi_row (cur_row);
2648 cur_row = NULL;
2649
2650 return 0;
2651 }
2652 \f
2653 /* Convert a DWARF call frame info. operation to its string name */
2654
2655 static const char *
2656 dwarf_cfi_name (unsigned int cfi_opc)
2657 {
2658 switch (cfi_opc)
2659 {
2660 case DW_CFA_advance_loc:
2661 return "DW_CFA_advance_loc";
2662 case DW_CFA_offset:
2663 return "DW_CFA_offset";
2664 case DW_CFA_restore:
2665 return "DW_CFA_restore";
2666 case DW_CFA_nop:
2667 return "DW_CFA_nop";
2668 case DW_CFA_set_loc:
2669 return "DW_CFA_set_loc";
2670 case DW_CFA_advance_loc1:
2671 return "DW_CFA_advance_loc1";
2672 case DW_CFA_advance_loc2:
2673 return "DW_CFA_advance_loc2";
2674 case DW_CFA_advance_loc4:
2675 return "DW_CFA_advance_loc4";
2676 case DW_CFA_offset_extended:
2677 return "DW_CFA_offset_extended";
2678 case DW_CFA_restore_extended:
2679 return "DW_CFA_restore_extended";
2680 case DW_CFA_undefined:
2681 return "DW_CFA_undefined";
2682 case DW_CFA_same_value:
2683 return "DW_CFA_same_value";
2684 case DW_CFA_register:
2685 return "DW_CFA_register";
2686 case DW_CFA_remember_state:
2687 return "DW_CFA_remember_state";
2688 case DW_CFA_restore_state:
2689 return "DW_CFA_restore_state";
2690 case DW_CFA_def_cfa:
2691 return "DW_CFA_def_cfa";
2692 case DW_CFA_def_cfa_register:
2693 return "DW_CFA_def_cfa_register";
2694 case DW_CFA_def_cfa_offset:
2695 return "DW_CFA_def_cfa_offset";
2696
2697 /* DWARF 3 */
2698 case DW_CFA_def_cfa_expression:
2699 return "DW_CFA_def_cfa_expression";
2700 case DW_CFA_expression:
2701 return "DW_CFA_expression";
2702 case DW_CFA_offset_extended_sf:
2703 return "DW_CFA_offset_extended_sf";
2704 case DW_CFA_def_cfa_sf:
2705 return "DW_CFA_def_cfa_sf";
2706 case DW_CFA_def_cfa_offset_sf:
2707 return "DW_CFA_def_cfa_offset_sf";
2708
2709 /* SGI/MIPS specific */
2710 case DW_CFA_MIPS_advance_loc8:
2711 return "DW_CFA_MIPS_advance_loc8";
2712
2713 /* GNU extensions */
2714 case DW_CFA_GNU_window_save:
2715 return "DW_CFA_GNU_window_save";
2716 case DW_CFA_GNU_args_size:
2717 return "DW_CFA_GNU_args_size";
2718 case DW_CFA_GNU_negative_offset_extended:
2719 return "DW_CFA_GNU_negative_offset_extended";
2720
2721 default:
2722 return "DW_CFA_<unknown>";
2723 }
2724 }
2725
2726 /* This routine will generate the correct assembly data for a location
2727 description based on a cfi entry with a complex address. */
2728
2729 static void
2730 output_cfa_loc (dw_cfi_ref cfi, int for_eh)
2731 {
2732 dw_loc_descr_ref loc;
2733 unsigned long size;
2734
2735 if (cfi->dw_cfi_opc == DW_CFA_expression)
2736 {
2737 unsigned r =
2738 DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
2739 dw2_asm_output_data (1, r, NULL);
2740 loc = cfi->dw_cfi_oprnd2.dw_cfi_loc;
2741 }
2742 else
2743 loc = cfi->dw_cfi_oprnd1.dw_cfi_loc;
2744
2745 /* Output the size of the block. */
2746 size = size_of_locs (loc);
2747 dw2_asm_output_data_uleb128 (size, NULL);
2748
2749 /* Now output the operations themselves. */
2750 output_loc_sequence (loc, for_eh);
2751 }
2752
2753 /* Similar, but used for .cfi_escape. */
2754
2755 static void
2756 output_cfa_loc_raw (dw_cfi_ref cfi)
2757 {
2758 dw_loc_descr_ref loc;
2759 unsigned long size;
2760
2761 if (cfi->dw_cfi_opc == DW_CFA_expression)
2762 {
2763 unsigned r =
2764 DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
2765 fprintf (asm_out_file, "%#x,", r);
2766 loc = cfi->dw_cfi_oprnd2.dw_cfi_loc;
2767 }
2768 else
2769 loc = cfi->dw_cfi_oprnd1.dw_cfi_loc;
2770
2771 /* Output the size of the block. */
2772 size = size_of_locs (loc);
2773 dw2_asm_output_data_uleb128_raw (size);
2774 fputc (',', asm_out_file);
2775
2776 /* Now output the operations themselves. */
2777 output_loc_sequence_raw (loc);
2778 }
2779
2780 /* Output a Call Frame Information opcode and its operand(s). */
2781
2782 void
2783 output_cfi (dw_cfi_ref cfi, dw_fde_ref fde, int for_eh)
2784 {
2785 unsigned long r;
2786 HOST_WIDE_INT off;
2787
2788 if (cfi->dw_cfi_opc == DW_CFA_advance_loc)
2789 dw2_asm_output_data (1, (cfi->dw_cfi_opc
2790 | (cfi->dw_cfi_oprnd1.dw_cfi_offset & 0x3f)),
2791 "DW_CFA_advance_loc " HOST_WIDE_INT_PRINT_HEX,
2792 ((unsigned HOST_WIDE_INT)
2793 cfi->dw_cfi_oprnd1.dw_cfi_offset));
2794 else if (cfi->dw_cfi_opc == DW_CFA_offset)
2795 {
2796 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
2797 dw2_asm_output_data (1, (cfi->dw_cfi_opc | (r & 0x3f)),
2798 "DW_CFA_offset, column %#lx", r);
2799 off = div_data_align (cfi->dw_cfi_oprnd2.dw_cfi_offset);
2800 dw2_asm_output_data_uleb128 (off, NULL);
2801 }
2802 else if (cfi->dw_cfi_opc == DW_CFA_restore)
2803 {
2804 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
2805 dw2_asm_output_data (1, (cfi->dw_cfi_opc | (r & 0x3f)),
2806 "DW_CFA_restore, column %#lx", r);
2807 }
2808 else
2809 {
2810 dw2_asm_output_data (1, cfi->dw_cfi_opc,
2811 "%s", dwarf_cfi_name (cfi->dw_cfi_opc));
2812
2813 switch (cfi->dw_cfi_opc)
2814 {
2815 case DW_CFA_set_loc:
2816 if (for_eh)
2817 dw2_asm_output_encoded_addr_rtx (
2818 ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/1, /*global=*/0),
2819 gen_rtx_SYMBOL_REF (Pmode, cfi->dw_cfi_oprnd1.dw_cfi_addr),
2820 false, NULL);
2821 else
2822 dw2_asm_output_addr (DWARF2_ADDR_SIZE,
2823 cfi->dw_cfi_oprnd1.dw_cfi_addr, NULL);
2824 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
2825 break;
2826
2827 case DW_CFA_advance_loc1:
2828 dw2_asm_output_delta (1, cfi->dw_cfi_oprnd1.dw_cfi_addr,
2829 fde->dw_fde_current_label, NULL);
2830 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
2831 break;
2832
2833 case DW_CFA_advance_loc2:
2834 dw2_asm_output_delta (2, cfi->dw_cfi_oprnd1.dw_cfi_addr,
2835 fde->dw_fde_current_label, NULL);
2836 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
2837 break;
2838
2839 case DW_CFA_advance_loc4:
2840 dw2_asm_output_delta (4, cfi->dw_cfi_oprnd1.dw_cfi_addr,
2841 fde->dw_fde_current_label, NULL);
2842 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
2843 break;
2844
2845 case DW_CFA_MIPS_advance_loc8:
2846 dw2_asm_output_delta (8, cfi->dw_cfi_oprnd1.dw_cfi_addr,
2847 fde->dw_fde_current_label, NULL);
2848 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
2849 break;
2850
2851 case DW_CFA_offset_extended:
2852 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
2853 dw2_asm_output_data_uleb128 (r, NULL);
2854 off = div_data_align (cfi->dw_cfi_oprnd2.dw_cfi_offset);
2855 dw2_asm_output_data_uleb128 (off, NULL);
2856 break;
2857
2858 case DW_CFA_def_cfa:
2859 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
2860 dw2_asm_output_data_uleb128 (r, NULL);
2861 dw2_asm_output_data_uleb128 (cfi->dw_cfi_oprnd2.dw_cfi_offset, NULL);
2862 break;
2863
2864 case DW_CFA_offset_extended_sf:
2865 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
2866 dw2_asm_output_data_uleb128 (r, NULL);
2867 off = div_data_align (cfi->dw_cfi_oprnd2.dw_cfi_offset);
2868 dw2_asm_output_data_sleb128 (off, NULL);
2869 break;
2870
2871 case DW_CFA_def_cfa_sf:
2872 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
2873 dw2_asm_output_data_uleb128 (r, NULL);
2874 off = div_data_align (cfi->dw_cfi_oprnd2.dw_cfi_offset);
2875 dw2_asm_output_data_sleb128 (off, NULL);
2876 break;
2877
2878 case DW_CFA_restore_extended:
2879 case DW_CFA_undefined:
2880 case DW_CFA_same_value:
2881 case DW_CFA_def_cfa_register:
2882 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
2883 dw2_asm_output_data_uleb128 (r, NULL);
2884 break;
2885
2886 case DW_CFA_register:
2887 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
2888 dw2_asm_output_data_uleb128 (r, NULL);
2889 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd2.dw_cfi_reg_num, for_eh);
2890 dw2_asm_output_data_uleb128 (r, NULL);
2891 break;
2892
2893 case DW_CFA_def_cfa_offset:
2894 case DW_CFA_GNU_args_size:
2895 dw2_asm_output_data_uleb128 (cfi->dw_cfi_oprnd1.dw_cfi_offset, NULL);
2896 break;
2897
2898 case DW_CFA_def_cfa_offset_sf:
2899 off = div_data_align (cfi->dw_cfi_oprnd1.dw_cfi_offset);
2900 dw2_asm_output_data_sleb128 (off, NULL);
2901 break;
2902
2903 case DW_CFA_GNU_window_save:
2904 break;
2905
2906 case DW_CFA_def_cfa_expression:
2907 case DW_CFA_expression:
2908 output_cfa_loc (cfi, for_eh);
2909 break;
2910
2911 case DW_CFA_GNU_negative_offset_extended:
2912 /* Obsoleted by DW_CFA_offset_extended_sf. */
2913 gcc_unreachable ();
2914
2915 default:
2916 break;
2917 }
2918 }
2919 }
2920
2921 /* Similar, but do it via assembler directives instead. */
2922
2923 void
2924 output_cfi_directive (FILE *f, dw_cfi_ref cfi)
2925 {
2926 unsigned long r, r2;
2927
2928 switch (cfi->dw_cfi_opc)
2929 {
2930 case DW_CFA_advance_loc:
2931 case DW_CFA_advance_loc1:
2932 case DW_CFA_advance_loc2:
2933 case DW_CFA_advance_loc4:
2934 case DW_CFA_MIPS_advance_loc8:
2935 case DW_CFA_set_loc:
2936 /* Should only be created in a code path not followed when emitting
2937 via directives. The assembler is going to take care of this for
2938 us. But this routines is also used for debugging dumps, so
2939 print something. */
2940 gcc_assert (f != asm_out_file);
2941 fprintf (f, "\t.cfi_advance_loc\n");
2942 break;
2943
2944 case DW_CFA_offset:
2945 case DW_CFA_offset_extended:
2946 case DW_CFA_offset_extended_sf:
2947 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
2948 fprintf (f, "\t.cfi_offset %lu, "HOST_WIDE_INT_PRINT_DEC"\n",
2949 r, cfi->dw_cfi_oprnd2.dw_cfi_offset);
2950 break;
2951
2952 case DW_CFA_restore:
2953 case DW_CFA_restore_extended:
2954 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
2955 fprintf (f, "\t.cfi_restore %lu\n", r);
2956 break;
2957
2958 case DW_CFA_undefined:
2959 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
2960 fprintf (f, "\t.cfi_undefined %lu\n", r);
2961 break;
2962
2963 case DW_CFA_same_value:
2964 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
2965 fprintf (f, "\t.cfi_same_value %lu\n", r);
2966 break;
2967
2968 case DW_CFA_def_cfa:
2969 case DW_CFA_def_cfa_sf:
2970 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
2971 fprintf (f, "\t.cfi_def_cfa %lu, "HOST_WIDE_INT_PRINT_DEC"\n",
2972 r, cfi->dw_cfi_oprnd2.dw_cfi_offset);
2973 break;
2974
2975 case DW_CFA_def_cfa_register:
2976 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
2977 fprintf (f, "\t.cfi_def_cfa_register %lu\n", r);
2978 break;
2979
2980 case DW_CFA_register:
2981 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
2982 r2 = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd2.dw_cfi_reg_num, 1);
2983 fprintf (f, "\t.cfi_register %lu, %lu\n", r, r2);
2984 break;
2985
2986 case DW_CFA_def_cfa_offset:
2987 case DW_CFA_def_cfa_offset_sf:
2988 fprintf (f, "\t.cfi_def_cfa_offset "
2989 HOST_WIDE_INT_PRINT_DEC"\n",
2990 cfi->dw_cfi_oprnd1.dw_cfi_offset);
2991 break;
2992
2993 case DW_CFA_remember_state:
2994 fprintf (f, "\t.cfi_remember_state\n");
2995 break;
2996 case DW_CFA_restore_state:
2997 fprintf (f, "\t.cfi_restore_state\n");
2998 break;
2999
3000 case DW_CFA_GNU_args_size:
3001 if (f == asm_out_file)
3002 {
3003 fprintf (f, "\t.cfi_escape %#x,", DW_CFA_GNU_args_size);
3004 dw2_asm_output_data_uleb128_raw (cfi->dw_cfi_oprnd1.dw_cfi_offset);
3005 if (flag_debug_asm)
3006 fprintf (f, "\t%s args_size "HOST_WIDE_INT_PRINT_DEC,
3007 ASM_COMMENT_START, cfi->dw_cfi_oprnd1.dw_cfi_offset);
3008 fputc ('\n', f);
3009 }
3010 else
3011 {
3012 fprintf (f, "\t.cfi_GNU_args_size "HOST_WIDE_INT_PRINT_DEC "\n",
3013 cfi->dw_cfi_oprnd1.dw_cfi_offset);
3014 }
3015 break;
3016
3017 case DW_CFA_GNU_window_save:
3018 fprintf (f, "\t.cfi_window_save\n");
3019 break;
3020
3021 case DW_CFA_def_cfa_expression:
3022 if (f != asm_out_file)
3023 {
3024 fprintf (f, "\t.cfi_def_cfa_expression ...\n");
3025 break;
3026 }
3027 /* FALLTHRU */
3028 case DW_CFA_expression:
3029 if (f != asm_out_file)
3030 {
3031 fprintf (f, "\t.cfi_cfa_expression ...\n");
3032 break;
3033 }
3034 fprintf (f, "\t.cfi_escape %#x,", cfi->dw_cfi_opc);
3035 output_cfa_loc_raw (cfi);
3036 fputc ('\n', f);
3037 break;
3038
3039 default:
3040 gcc_unreachable ();
3041 }
3042 }
3043
3044 void
3045 dwarf2out_emit_cfi (dw_cfi_ref cfi)
3046 {
3047 if (dwarf2out_do_cfi_asm ())
3048 output_cfi_directive (asm_out_file, cfi);
3049 }
3050
3051 /* Output CFIs from VEC, up to index UPTO, to bring current FDE to the
3052 same state as after executing CFIs in CFI chain. DO_CFI_ASM is
3053 true if .cfi_* directives shall be emitted, false otherwise. If it
3054 is false, FDE and FOR_EH are the other arguments to pass to
3055 output_cfi. */
3056
3057 void
3058 output_cfis (cfi_vec vec, int upto, bool do_cfi_asm,
3059 dw_fde_ref fde, bool for_eh)
3060 {
3061 int ix;
3062 struct dw_cfi_struct cfi_buf;
3063 dw_cfi_ref cfi2;
3064 dw_cfi_ref cfi_args_size = NULL, cfi_cfa = NULL, cfi_cfa_offset = NULL;
3065 VEC(dw_cfi_ref, heap) *regs = VEC_alloc (dw_cfi_ref, heap, 32);
3066 unsigned int len, idx;
3067
3068 for (ix = 0; ix < upto + 1; ix++)
3069 {
3070 dw_cfi_ref cfi = ix < upto ? VEC_index (dw_cfi_ref, vec, ix) : NULL;
3071 switch (cfi ? cfi->dw_cfi_opc : DW_CFA_nop)
3072 {
3073 case DW_CFA_advance_loc:
3074 case DW_CFA_advance_loc1:
3075 case DW_CFA_advance_loc2:
3076 case DW_CFA_advance_loc4:
3077 case DW_CFA_MIPS_advance_loc8:
3078 case DW_CFA_set_loc:
3079 /* All advances should be ignored. */
3080 break;
3081 case DW_CFA_remember_state:
3082 {
3083 dw_cfi_ref args_size = cfi_args_size;
3084
3085 /* Skip everything between .cfi_remember_state and
3086 .cfi_restore_state. */
3087 ix++;
3088 if (ix == upto)
3089 goto flush_all;
3090
3091 for (; ix < upto; ix++)
3092 {
3093 cfi2 = VEC_index (dw_cfi_ref, vec, ix);
3094 if (cfi2->dw_cfi_opc == DW_CFA_restore_state)
3095 break;
3096 else if (cfi2->dw_cfi_opc == DW_CFA_GNU_args_size)
3097 args_size = cfi2;
3098 else
3099 gcc_assert (cfi2->dw_cfi_opc != DW_CFA_remember_state);
3100 }
3101
3102 cfi_args_size = args_size;
3103 break;
3104 }
3105 case DW_CFA_GNU_args_size:
3106 cfi_args_size = cfi;
3107 break;
3108 case DW_CFA_GNU_window_save:
3109 goto flush_all;
3110 case DW_CFA_offset:
3111 case DW_CFA_offset_extended:
3112 case DW_CFA_offset_extended_sf:
3113 case DW_CFA_restore:
3114 case DW_CFA_restore_extended:
3115 case DW_CFA_undefined:
3116 case DW_CFA_same_value:
3117 case DW_CFA_register:
3118 case DW_CFA_val_offset:
3119 case DW_CFA_val_offset_sf:
3120 case DW_CFA_expression:
3121 case DW_CFA_val_expression:
3122 case DW_CFA_GNU_negative_offset_extended:
3123 if (VEC_length (dw_cfi_ref, regs)
3124 <= cfi->dw_cfi_oprnd1.dw_cfi_reg_num)
3125 VEC_safe_grow_cleared (dw_cfi_ref, heap, regs,
3126 cfi->dw_cfi_oprnd1.dw_cfi_reg_num + 1);
3127 VEC_replace (dw_cfi_ref, regs, cfi->dw_cfi_oprnd1.dw_cfi_reg_num,
3128 cfi);
3129 break;
3130 case DW_CFA_def_cfa:
3131 case DW_CFA_def_cfa_sf:
3132 case DW_CFA_def_cfa_expression:
3133 cfi_cfa = cfi;
3134 cfi_cfa_offset = cfi;
3135 break;
3136 case DW_CFA_def_cfa_register:
3137 cfi_cfa = cfi;
3138 break;
3139 case DW_CFA_def_cfa_offset:
3140 case DW_CFA_def_cfa_offset_sf:
3141 cfi_cfa_offset = cfi;
3142 break;
3143 case DW_CFA_nop:
3144 gcc_assert (cfi == NULL);
3145 flush_all:
3146 len = VEC_length (dw_cfi_ref, regs);
3147 for (idx = 0; idx < len; idx++)
3148 {
3149 cfi2 = VEC_replace (dw_cfi_ref, regs, idx, NULL);
3150 if (cfi2 != NULL
3151 && cfi2->dw_cfi_opc != DW_CFA_restore
3152 && cfi2->dw_cfi_opc != DW_CFA_restore_extended)
3153 {
3154 if (do_cfi_asm)
3155 output_cfi_directive (asm_out_file, cfi2);
3156 else
3157 output_cfi (cfi2, fde, for_eh);
3158 }
3159 }
3160 if (cfi_cfa && cfi_cfa_offset && cfi_cfa_offset != cfi_cfa)
3161 {
3162 gcc_assert (cfi_cfa->dw_cfi_opc != DW_CFA_def_cfa_expression);
3163 cfi_buf = *cfi_cfa;
3164 switch (cfi_cfa_offset->dw_cfi_opc)
3165 {
3166 case DW_CFA_def_cfa_offset:
3167 cfi_buf.dw_cfi_opc = DW_CFA_def_cfa;
3168 cfi_buf.dw_cfi_oprnd2 = cfi_cfa_offset->dw_cfi_oprnd1;
3169 break;
3170 case DW_CFA_def_cfa_offset_sf:
3171 cfi_buf.dw_cfi_opc = DW_CFA_def_cfa_sf;
3172 cfi_buf.dw_cfi_oprnd2 = cfi_cfa_offset->dw_cfi_oprnd1;
3173 break;
3174 case DW_CFA_def_cfa:
3175 case DW_CFA_def_cfa_sf:
3176 cfi_buf.dw_cfi_opc = cfi_cfa_offset->dw_cfi_opc;
3177 cfi_buf.dw_cfi_oprnd2 = cfi_cfa_offset->dw_cfi_oprnd2;
3178 break;
3179 default:
3180 gcc_unreachable ();
3181 }
3182 cfi_cfa = &cfi_buf;
3183 }
3184 else if (cfi_cfa_offset)
3185 cfi_cfa = cfi_cfa_offset;
3186 if (cfi_cfa)
3187 {
3188 if (do_cfi_asm)
3189 output_cfi_directive (asm_out_file, cfi_cfa);
3190 else
3191 output_cfi (cfi_cfa, fde, for_eh);
3192 }
3193 cfi_cfa = NULL;
3194 cfi_cfa_offset = NULL;
3195 if (cfi_args_size
3196 && cfi_args_size->dw_cfi_oprnd1.dw_cfi_offset)
3197 {
3198 if (do_cfi_asm)
3199 output_cfi_directive (asm_out_file, cfi_args_size);
3200 else
3201 output_cfi (cfi_args_size, fde, for_eh);
3202 }
3203 cfi_args_size = NULL;
3204 if (cfi == NULL)
3205 {
3206 VEC_free (dw_cfi_ref, heap, regs);
3207 return;
3208 }
3209 else if (do_cfi_asm)
3210 output_cfi_directive (asm_out_file, cfi);
3211 else
3212 output_cfi (cfi, fde, for_eh);
3213 break;
3214 default:
3215 gcc_unreachable ();
3216 }
3217 }
3218 }
3219 \f
3220
3221 /* Save the result of dwarf2out_do_frame across PCH.
3222 This variable is tri-state, with 0 unset, >0 true, <0 false. */
3223 static GTY(()) signed char saved_do_cfi_asm = 0;
3224
3225 /* Decide whether we want to emit frame unwind information for the current
3226 translation unit. */
3227
3228 bool
3229 dwarf2out_do_frame (void)
3230 {
3231 /* We want to emit correct CFA location expressions or lists, so we
3232 have to return true if we're going to output debug info, even if
3233 we're not going to output frame or unwind info. */
3234 if (write_symbols == DWARF2_DEBUG || write_symbols == VMS_AND_DWARF2_DEBUG)
3235 return true;
3236
3237 if (saved_do_cfi_asm > 0)
3238 return true;
3239
3240 if (targetm.debug_unwind_info () == UI_DWARF2)
3241 return true;
3242
3243 if ((flag_unwind_tables || flag_exceptions)
3244 && targetm_common.except_unwind_info (&global_options) == UI_DWARF2)
3245 return true;
3246
3247 return false;
3248 }
3249
3250 /* Decide whether to emit frame unwind via assembler directives. */
3251
3252 bool
3253 dwarf2out_do_cfi_asm (void)
3254 {
3255 int enc;
3256
3257 #ifdef MIPS_DEBUGGING_INFO
3258 return false;
3259 #endif
3260
3261 if (saved_do_cfi_asm != 0)
3262 return saved_do_cfi_asm > 0;
3263
3264 /* Assume failure for a moment. */
3265 saved_do_cfi_asm = -1;
3266
3267 if (!flag_dwarf2_cfi_asm || !dwarf2out_do_frame ())
3268 return false;
3269 if (!HAVE_GAS_CFI_PERSONALITY_DIRECTIVE)
3270 return false;
3271
3272 /* Make sure the personality encoding is one the assembler can support.
3273 In particular, aligned addresses can't be handled. */
3274 enc = ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/2,/*global=*/1);
3275 if ((enc & 0x70) != 0 && (enc & 0x70) != DW_EH_PE_pcrel)
3276 return false;
3277 enc = ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/0,/*global=*/0);
3278 if ((enc & 0x70) != 0 && (enc & 0x70) != DW_EH_PE_pcrel)
3279 return false;
3280
3281 /* If we can't get the assembler to emit only .debug_frame, and we don't need
3282 dwarf2 unwind info for exceptions, then emit .debug_frame by hand. */
3283 if (!HAVE_GAS_CFI_SECTIONS_DIRECTIVE
3284 && !flag_unwind_tables && !flag_exceptions
3285 && targetm_common.except_unwind_info (&global_options) != UI_DWARF2)
3286 return false;
3287
3288 /* Success! */
3289 saved_do_cfi_asm = 1;
3290 return true;
3291 }
3292
3293 static bool
3294 gate_dwarf2_frame (void)
3295 {
3296 #ifndef HAVE_prologue
3297 /* Targets which still implement the prologue in assembler text
3298 cannot use the generic dwarf2 unwinding. */
3299 return false;
3300 #endif
3301
3302 /* ??? What to do for UI_TARGET unwinding? They might be able to benefit
3303 from the optimized shrink-wrapping annotations that we will compute.
3304 For now, only produce the CFI notes for dwarf2. */
3305 return dwarf2out_do_frame ();
3306 }
3307
3308 struct rtl_opt_pass pass_dwarf2_frame =
3309 {
3310 {
3311 RTL_PASS,
3312 "dwarf2", /* name */
3313 gate_dwarf2_frame, /* gate */
3314 execute_dwarf2_frame, /* execute */
3315 NULL, /* sub */
3316 NULL, /* next */
3317 0, /* static_pass_number */
3318 TV_FINAL, /* tv_id */
3319 0, /* properties_required */
3320 0, /* properties_provided */
3321 0, /* properties_destroyed */
3322 0, /* todo_flags_start */
3323 0 /* todo_flags_finish */
3324 }
3325 };
3326
3327 #include "gt-dwarf2cfi.h"