dwarf2cfi: Convert queued_reg_save to a VEC.
[gcc.git] / gcc / dwarf2cfi.c
1 /* Dwarf2 Call Frame Information helper routines.
2 Copyright (C) 1992, 1993, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
3 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011
4 Free Software Foundation, Inc.
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "tm.h"
26 #include "version.h"
27 #include "flags.h"
28 #include "rtl.h"
29 #include "function.h"
30 #include "dwarf2.h"
31 #include "dwarf2out.h"
32 #include "dwarf2asm.h"
33 #include "ggc.h"
34 #include "tm_p.h"
35 #include "target.h"
36 #include "common/common-target.h"
37 #include "tree-pass.h"
38
39 #include "except.h" /* expand_builtin_dwarf_sp_column */
40 #include "expr.h" /* init_return_column_size */
41 #include "regs.h" /* expand_builtin_init_dwarf_reg_sizes */
42 #include "output.h" /* asm_out_file */
43 #include "debug.h" /* dwarf2out_do_frame, dwarf2out_do_cfi_asm */
44
45
46 /* ??? Poison these here until it can be done generically. They've been
47 totally replaced in this file; make sure it stays that way. */
48 #undef DWARF2_UNWIND_INFO
49 #undef DWARF2_FRAME_INFO
50 #if (GCC_VERSION >= 3000)
51 #pragma GCC poison DWARF2_UNWIND_INFO DWARF2_FRAME_INFO
52 #endif
53
54 #ifndef INCOMING_RETURN_ADDR_RTX
55 #define INCOMING_RETURN_ADDR_RTX (gcc_unreachable (), NULL_RTX)
56 #endif
57
58 /* Maximum size (in bytes) of an artificially generated label. */
59 #define MAX_ARTIFICIAL_LABEL_BYTES 30
60 \f
61 /* A collected description of an entire row of the abstract CFI table. */
62 typedef struct GTY(()) dw_cfi_row_struct
63 {
64 /* The expression that computes the CFA, expressed in two different ways.
65 The CFA member for the simple cases, and the full CFI expression for
66 the complex cases. The later will be a DW_CFA_cfa_expression. */
67 dw_cfa_location cfa;
68 dw_cfi_ref cfa_cfi;
69
70 /* The expressions for any register column that is saved. */
71 cfi_vec reg_save;
72
73 /* The value of any DW_CFA_GNU_args_size. */
74 HOST_WIDE_INT args_size;
75 } dw_cfi_row;
76
77 \f
78 /* A vector of call frame insns for the CIE. */
79 cfi_vec cie_cfi_vec;
80
81 /* The state of the first row of the FDE table, which includes the
82 state provided by the CIE. */
83 static GTY(()) dw_cfi_row *cie_cfi_row;
84
85 static GTY(()) unsigned long dwarf2out_cfi_label_num;
86
87 /* The insn after which a new CFI note should be emitted. */
88 static rtx add_cfi_insn;
89
90 /* When non-null, add_cfi will add the CFI to this vector. */
91 static cfi_vec *add_cfi_vec;
92
93 /* True if remember_state should be emitted before following CFI directive. */
94 static bool emit_cfa_remember;
95
96 /* True if any CFI directives were emitted at the current insn. */
97 static bool any_cfis_emitted;
98
99 /* Short-hand for commonly used register numbers. */
100 static unsigned dw_stack_pointer_regnum;
101 static unsigned dw_frame_pointer_regnum;
102 \f
103
104 static void dwarf2out_cfi_begin_epilogue (rtx insn);
105 static void dwarf2out_frame_debug_restore_state (void);
106
107 \f
108 /* Hook used by __throw. */
109
110 rtx
111 expand_builtin_dwarf_sp_column (void)
112 {
113 unsigned int dwarf_regnum = DWARF_FRAME_REGNUM (STACK_POINTER_REGNUM);
114 return GEN_INT (DWARF2_FRAME_REG_OUT (dwarf_regnum, 1));
115 }
116
117 /* MEM is a memory reference for the register size table, each element of
118 which has mode MODE. Initialize column C as a return address column. */
119
120 static void
121 init_return_column_size (enum machine_mode mode, rtx mem, unsigned int c)
122 {
123 HOST_WIDE_INT offset = c * GET_MODE_SIZE (mode);
124 HOST_WIDE_INT size = GET_MODE_SIZE (Pmode);
125 emit_move_insn (adjust_address (mem, mode, offset), GEN_INT (size));
126 }
127
128 /* Generate code to initialize the register size table. */
129
130 void
131 expand_builtin_init_dwarf_reg_sizes (tree address)
132 {
133 unsigned int i;
134 enum machine_mode mode = TYPE_MODE (char_type_node);
135 rtx addr = expand_normal (address);
136 rtx mem = gen_rtx_MEM (BLKmode, addr);
137 bool wrote_return_column = false;
138
139 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
140 {
141 unsigned int dnum = DWARF_FRAME_REGNUM (i);
142 unsigned int rnum = DWARF2_FRAME_REG_OUT (dnum, 1);
143
144 if (rnum < DWARF_FRAME_REGISTERS)
145 {
146 HOST_WIDE_INT offset = rnum * GET_MODE_SIZE (mode);
147 enum machine_mode save_mode = reg_raw_mode[i];
148 HOST_WIDE_INT size;
149
150 if (HARD_REGNO_CALL_PART_CLOBBERED (i, save_mode))
151 save_mode = choose_hard_reg_mode (i, 1, true);
152 if (dnum == DWARF_FRAME_RETURN_COLUMN)
153 {
154 if (save_mode == VOIDmode)
155 continue;
156 wrote_return_column = true;
157 }
158 size = GET_MODE_SIZE (save_mode);
159 if (offset < 0)
160 continue;
161
162 emit_move_insn (adjust_address (mem, mode, offset),
163 gen_int_mode (size, mode));
164 }
165 }
166
167 if (!wrote_return_column)
168 init_return_column_size (mode, mem, DWARF_FRAME_RETURN_COLUMN);
169
170 #ifdef DWARF_ALT_FRAME_RETURN_COLUMN
171 init_return_column_size (mode, mem, DWARF_ALT_FRAME_RETURN_COLUMN);
172 #endif
173
174 targetm.init_dwarf_reg_sizes_extra (address);
175 }
176
177 /* Divide OFF by DWARF_CIE_DATA_ALIGNMENT, asserting no remainder. */
178
179 static inline HOST_WIDE_INT
180 div_data_align (HOST_WIDE_INT off)
181 {
182 HOST_WIDE_INT r = off / DWARF_CIE_DATA_ALIGNMENT;
183 gcc_assert (r * DWARF_CIE_DATA_ALIGNMENT == off);
184 return r;
185 }
186
187 /* Return true if we need a signed version of a given opcode
188 (e.g. DW_CFA_offset_extended_sf vs DW_CFA_offset_extended). */
189
190 static inline bool
191 need_data_align_sf_opcode (HOST_WIDE_INT off)
192 {
193 return DWARF_CIE_DATA_ALIGNMENT < 0 ? off > 0 : off < 0;
194 }
195
196 /* Return a pointer to a newly allocated Call Frame Instruction. */
197
198 static inline dw_cfi_ref
199 new_cfi (void)
200 {
201 dw_cfi_ref cfi = ggc_alloc_dw_cfi_node ();
202
203 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = 0;
204 cfi->dw_cfi_oprnd2.dw_cfi_reg_num = 0;
205
206 return cfi;
207 }
208
209 /* Return a newly allocated CFI row, with no defined data. */
210
211 static dw_cfi_row *
212 new_cfi_row (void)
213 {
214 dw_cfi_row *row = ggc_alloc_cleared_dw_cfi_row ();
215
216 row->cfa.reg = INVALID_REGNUM;
217
218 return row;
219 }
220
221 /* Return a copy of an existing CFI row. */
222
223 static dw_cfi_row *
224 copy_cfi_row (dw_cfi_row *src)
225 {
226 dw_cfi_row *dst = ggc_alloc_dw_cfi_row ();
227
228 *dst = *src;
229 dst->reg_save = VEC_copy (dw_cfi_ref, gc, src->reg_save);
230
231 return dst;
232 }
233
234 /* Free an allocated CFI row. */
235
236 static void
237 free_cfi_row (dw_cfi_row *row)
238 {
239 if (row != NULL)
240 {
241 VEC_free (dw_cfi_ref, gc, row->reg_save);
242 ggc_free (row);
243 }
244 }
245
246 /* Generate a new label for the CFI info to refer to. */
247
248 static char *
249 dwarf2out_cfi_label (void)
250 {
251 int num = dwarf2out_cfi_label_num++;
252 char label[20];
253
254 ASM_GENERATE_INTERNAL_LABEL (label, "LCFI", num);
255
256 return xstrdup (label);
257 }
258
259 /* Add CFI either to the current insn stream or to a vector, or both. */
260
261 static void
262 add_cfi (dw_cfi_ref cfi)
263 {
264 if (emit_cfa_remember)
265 {
266 dw_cfi_ref cfi_remember;
267
268 /* Emit the state save. */
269 emit_cfa_remember = false;
270 cfi_remember = new_cfi ();
271 cfi_remember->dw_cfi_opc = DW_CFA_remember_state;
272 add_cfi (cfi_remember);
273 }
274
275 any_cfis_emitted = true;
276
277 if (add_cfi_insn != NULL)
278 {
279 add_cfi_insn = emit_note_after (NOTE_INSN_CFI, add_cfi_insn);
280 NOTE_CFI (add_cfi_insn) = cfi;
281 }
282
283 if (add_cfi_vec != NULL)
284 VEC_safe_push (dw_cfi_ref, gc, *add_cfi_vec, cfi);
285 }
286
287 static void
288 add_cfi_args_size (HOST_WIDE_INT size)
289 {
290 dw_cfi_ref cfi = new_cfi ();
291
292 cfi->dw_cfi_opc = DW_CFA_GNU_args_size;
293 cfi->dw_cfi_oprnd1.dw_cfi_offset = size;
294
295 add_cfi (cfi);
296 }
297
298 static void
299 add_cfi_restore (unsigned reg)
300 {
301 dw_cfi_ref cfi = new_cfi ();
302
303 cfi->dw_cfi_opc = (reg & ~0x3f ? DW_CFA_restore_extended : DW_CFA_restore);
304 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = reg;
305
306 add_cfi (cfi);
307 }
308
309 /* Perform ROW->REG_SAVE[COLUMN] = CFI. CFI may be null, indicating
310 that the register column is no longer saved. */
311
312 static void
313 update_row_reg_save (dw_cfi_row *row, unsigned column, dw_cfi_ref cfi)
314 {
315 if (VEC_length (dw_cfi_ref, row->reg_save) <= column)
316 VEC_safe_grow_cleared (dw_cfi_ref, gc, row->reg_save, column + 1);
317 VEC_replace (dw_cfi_ref, row->reg_save, column, cfi);
318 }
319
320 /* This function fills in aa dw_cfa_location structure from a dwarf location
321 descriptor sequence. */
322
323 static void
324 get_cfa_from_loc_descr (dw_cfa_location *cfa, struct dw_loc_descr_struct *loc)
325 {
326 struct dw_loc_descr_struct *ptr;
327 cfa->offset = 0;
328 cfa->base_offset = 0;
329 cfa->indirect = 0;
330 cfa->reg = -1;
331
332 for (ptr = loc; ptr != NULL; ptr = ptr->dw_loc_next)
333 {
334 enum dwarf_location_atom op = ptr->dw_loc_opc;
335
336 switch (op)
337 {
338 case DW_OP_reg0:
339 case DW_OP_reg1:
340 case DW_OP_reg2:
341 case DW_OP_reg3:
342 case DW_OP_reg4:
343 case DW_OP_reg5:
344 case DW_OP_reg6:
345 case DW_OP_reg7:
346 case DW_OP_reg8:
347 case DW_OP_reg9:
348 case DW_OP_reg10:
349 case DW_OP_reg11:
350 case DW_OP_reg12:
351 case DW_OP_reg13:
352 case DW_OP_reg14:
353 case DW_OP_reg15:
354 case DW_OP_reg16:
355 case DW_OP_reg17:
356 case DW_OP_reg18:
357 case DW_OP_reg19:
358 case DW_OP_reg20:
359 case DW_OP_reg21:
360 case DW_OP_reg22:
361 case DW_OP_reg23:
362 case DW_OP_reg24:
363 case DW_OP_reg25:
364 case DW_OP_reg26:
365 case DW_OP_reg27:
366 case DW_OP_reg28:
367 case DW_OP_reg29:
368 case DW_OP_reg30:
369 case DW_OP_reg31:
370 cfa->reg = op - DW_OP_reg0;
371 break;
372 case DW_OP_regx:
373 cfa->reg = ptr->dw_loc_oprnd1.v.val_int;
374 break;
375 case DW_OP_breg0:
376 case DW_OP_breg1:
377 case DW_OP_breg2:
378 case DW_OP_breg3:
379 case DW_OP_breg4:
380 case DW_OP_breg5:
381 case DW_OP_breg6:
382 case DW_OP_breg7:
383 case DW_OP_breg8:
384 case DW_OP_breg9:
385 case DW_OP_breg10:
386 case DW_OP_breg11:
387 case DW_OP_breg12:
388 case DW_OP_breg13:
389 case DW_OP_breg14:
390 case DW_OP_breg15:
391 case DW_OP_breg16:
392 case DW_OP_breg17:
393 case DW_OP_breg18:
394 case DW_OP_breg19:
395 case DW_OP_breg20:
396 case DW_OP_breg21:
397 case DW_OP_breg22:
398 case DW_OP_breg23:
399 case DW_OP_breg24:
400 case DW_OP_breg25:
401 case DW_OP_breg26:
402 case DW_OP_breg27:
403 case DW_OP_breg28:
404 case DW_OP_breg29:
405 case DW_OP_breg30:
406 case DW_OP_breg31:
407 cfa->reg = op - DW_OP_breg0;
408 cfa->base_offset = ptr->dw_loc_oprnd1.v.val_int;
409 break;
410 case DW_OP_bregx:
411 cfa->reg = ptr->dw_loc_oprnd1.v.val_int;
412 cfa->base_offset = ptr->dw_loc_oprnd2.v.val_int;
413 break;
414 case DW_OP_deref:
415 cfa->indirect = 1;
416 break;
417 case DW_OP_plus_uconst:
418 cfa->offset = ptr->dw_loc_oprnd1.v.val_unsigned;
419 break;
420 default:
421 gcc_unreachable ();
422 }
423 }
424 }
425
426 /* Find the previous value for the CFA, iteratively. CFI is the opcode
427 to interpret, *LOC will be updated as necessary, *REMEMBER is used for
428 one level of remember/restore state processing. */
429
430 void
431 lookup_cfa_1 (dw_cfi_ref cfi, dw_cfa_location *loc, dw_cfa_location *remember)
432 {
433 switch (cfi->dw_cfi_opc)
434 {
435 case DW_CFA_def_cfa_offset:
436 case DW_CFA_def_cfa_offset_sf:
437 loc->offset = cfi->dw_cfi_oprnd1.dw_cfi_offset;
438 break;
439 case DW_CFA_def_cfa_register:
440 loc->reg = cfi->dw_cfi_oprnd1.dw_cfi_reg_num;
441 break;
442 case DW_CFA_def_cfa:
443 case DW_CFA_def_cfa_sf:
444 loc->reg = cfi->dw_cfi_oprnd1.dw_cfi_reg_num;
445 loc->offset = cfi->dw_cfi_oprnd2.dw_cfi_offset;
446 break;
447 case DW_CFA_def_cfa_expression:
448 get_cfa_from_loc_descr (loc, cfi->dw_cfi_oprnd1.dw_cfi_loc);
449 break;
450
451 case DW_CFA_remember_state:
452 gcc_assert (!remember->in_use);
453 *remember = *loc;
454 remember->in_use = 1;
455 break;
456 case DW_CFA_restore_state:
457 gcc_assert (remember->in_use);
458 *loc = *remember;
459 remember->in_use = 0;
460 break;
461
462 default:
463 break;
464 }
465 }
466
467 /* The current, i.e. most recently generated, row of the CFI table. */
468 static dw_cfi_row *cur_row;
469
470 /* The row state from a preceeding DW_CFA_remember_state. */
471 static dw_cfi_row *remember_row;
472
473 /* The register used for saving registers to the stack, and its offset
474 from the CFA. */
475 static dw_cfa_location cfa_store;
476
477 /* A temporary register holding an integral value used in adjusting SP
478 or setting up the store_reg. The "offset" field holds the integer
479 value, not an offset. */
480 static dw_cfa_location cfa_temp;
481
482 /* The (really) current value for DW_CFA_GNU_args_size. We delay actually
483 emitting this data, i.e. updating CUR_ROW, without async unwind. */
484 static HOST_WIDE_INT args_size;
485
486 /* Determine if two dw_cfa_location structures define the same data. */
487
488 bool
489 cfa_equal_p (const dw_cfa_location *loc1, const dw_cfa_location *loc2)
490 {
491 return (loc1->reg == loc2->reg
492 && loc1->offset == loc2->offset
493 && loc1->indirect == loc2->indirect
494 && (loc1->indirect == 0
495 || loc1->base_offset == loc2->base_offset));
496 }
497
498 /* Determine if two CFI operands are identical. */
499
500 static bool
501 cfi_oprnd_equal_p (enum dw_cfi_oprnd_type t, dw_cfi_oprnd *a, dw_cfi_oprnd *b)
502 {
503 switch (t)
504 {
505 case dw_cfi_oprnd_unused:
506 return true;
507 case dw_cfi_oprnd_reg_num:
508 return a->dw_cfi_reg_num == b->dw_cfi_reg_num;
509 case dw_cfi_oprnd_offset:
510 return a->dw_cfi_offset == b->dw_cfi_offset;
511 case dw_cfi_oprnd_addr:
512 return (a->dw_cfi_addr == b->dw_cfi_addr
513 || strcmp (a->dw_cfi_addr, b->dw_cfi_addr) == 0);
514 case dw_cfi_oprnd_loc:
515 return loc_descr_equal_p (a->dw_cfi_loc, b->dw_cfi_loc);
516 }
517 gcc_unreachable ();
518 }
519
520 /* Determine if two CFI entries are identical. */
521
522 static bool
523 cfi_equal_p (dw_cfi_ref a, dw_cfi_ref b)
524 {
525 enum dwarf_call_frame_info opc;
526
527 /* Make things easier for our callers, including missing operands. */
528 if (a == b)
529 return true;
530 if (a == NULL || b == NULL)
531 return false;
532
533 /* Obviously, the opcodes must match. */
534 opc = a->dw_cfi_opc;
535 if (opc != b->dw_cfi_opc)
536 return false;
537
538 /* Compare the two operands, re-using the type of the operands as
539 already exposed elsewhere. */
540 return (cfi_oprnd_equal_p (dw_cfi_oprnd1_desc (opc),
541 &a->dw_cfi_oprnd1, &b->dw_cfi_oprnd1)
542 && cfi_oprnd_equal_p (dw_cfi_oprnd2_desc (opc),
543 &a->dw_cfi_oprnd2, &b->dw_cfi_oprnd2));
544 }
545
546 /* The CFA is now calculated from NEW_CFA. Consider OLD_CFA in determining
547 what opcode to emit. Returns the CFI opcode to effect the change, or
548 NULL if NEW_CFA == OLD_CFA. */
549
550 static dw_cfi_ref
551 def_cfa_0 (dw_cfa_location *old_cfa, dw_cfa_location *new_cfa)
552 {
553 dw_cfi_ref cfi;
554
555 /* If nothing changed, no need to issue any call frame instructions. */
556 if (cfa_equal_p (old_cfa, new_cfa))
557 return NULL;
558
559 cfi = new_cfi ();
560
561 if (new_cfa->reg == old_cfa->reg && !new_cfa->indirect && !old_cfa->indirect)
562 {
563 /* Construct a "DW_CFA_def_cfa_offset <offset>" instruction, indicating
564 the CFA register did not change but the offset did. The data
565 factoring for DW_CFA_def_cfa_offset_sf happens in output_cfi, or
566 in the assembler via the .cfi_def_cfa_offset directive. */
567 if (new_cfa->offset < 0)
568 cfi->dw_cfi_opc = DW_CFA_def_cfa_offset_sf;
569 else
570 cfi->dw_cfi_opc = DW_CFA_def_cfa_offset;
571 cfi->dw_cfi_oprnd1.dw_cfi_offset = new_cfa->offset;
572 }
573
574 #ifndef MIPS_DEBUGGING_INFO /* SGI dbx thinks this means no offset. */
575 else if (new_cfa->offset == old_cfa->offset
576 && old_cfa->reg != INVALID_REGNUM
577 && !new_cfa->indirect
578 && !old_cfa->indirect)
579 {
580 /* Construct a "DW_CFA_def_cfa_register <register>" instruction,
581 indicating the CFA register has changed to <register> but the
582 offset has not changed. */
583 cfi->dw_cfi_opc = DW_CFA_def_cfa_register;
584 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = new_cfa->reg;
585 }
586 #endif
587
588 else if (new_cfa->indirect == 0)
589 {
590 /* Construct a "DW_CFA_def_cfa <register> <offset>" instruction,
591 indicating the CFA register has changed to <register> with
592 the specified offset. The data factoring for DW_CFA_def_cfa_sf
593 happens in output_cfi, or in the assembler via the .cfi_def_cfa
594 directive. */
595 if (new_cfa->offset < 0)
596 cfi->dw_cfi_opc = DW_CFA_def_cfa_sf;
597 else
598 cfi->dw_cfi_opc = DW_CFA_def_cfa;
599 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = new_cfa->reg;
600 cfi->dw_cfi_oprnd2.dw_cfi_offset = new_cfa->offset;
601 }
602 else
603 {
604 /* Construct a DW_CFA_def_cfa_expression instruction to
605 calculate the CFA using a full location expression since no
606 register-offset pair is available. */
607 struct dw_loc_descr_struct *loc_list;
608
609 cfi->dw_cfi_opc = DW_CFA_def_cfa_expression;
610 loc_list = build_cfa_loc (new_cfa, 0);
611 cfi->dw_cfi_oprnd1.dw_cfi_loc = loc_list;
612 }
613
614 return cfi;
615 }
616
617 /* Similarly, but take OLD_CFA from CUR_ROW, and update it after the fact. */
618
619 static void
620 def_cfa_1 (dw_cfa_location *new_cfa)
621 {
622 dw_cfi_ref cfi;
623
624 if (cfa_store.reg == new_cfa->reg && new_cfa->indirect == 0)
625 cfa_store.offset = new_cfa->offset;
626
627 cfi = def_cfa_0 (&cur_row->cfa, new_cfa);
628 if (cfi)
629 {
630 cur_row->cfa = *new_cfa;
631 if (cfi->dw_cfi_opc == DW_CFA_def_cfa_expression)
632 cur_row->cfa_cfi = cfi;
633
634 add_cfi (cfi);
635 }
636 }
637
638 /* Add the CFI for saving a register. REG is the CFA column number.
639 If SREG is -1, the register is saved at OFFSET from the CFA;
640 otherwise it is saved in SREG. */
641
642 static void
643 reg_save (unsigned int reg, unsigned int sreg, HOST_WIDE_INT offset)
644 {
645 dw_fde_ref fde = cfun ? cfun->fde : NULL;
646 dw_cfi_ref cfi = new_cfi ();
647
648 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = reg;
649
650 /* When stack is aligned, store REG using DW_CFA_expression with FP. */
651 if (fde
652 && fde->stack_realign
653 && sreg == INVALID_REGNUM)
654 {
655 cfi->dw_cfi_opc = DW_CFA_expression;
656 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = reg;
657 cfi->dw_cfi_oprnd2.dw_cfi_loc
658 = build_cfa_aligned_loc (&cur_row->cfa, offset,
659 fde->stack_realignment);
660 }
661 else if (sreg == INVALID_REGNUM)
662 {
663 if (need_data_align_sf_opcode (offset))
664 cfi->dw_cfi_opc = DW_CFA_offset_extended_sf;
665 else if (reg & ~0x3f)
666 cfi->dw_cfi_opc = DW_CFA_offset_extended;
667 else
668 cfi->dw_cfi_opc = DW_CFA_offset;
669 cfi->dw_cfi_oprnd2.dw_cfi_offset = offset;
670 }
671 else if (sreg == reg)
672 {
673 /* While we could emit something like DW_CFA_same_value or
674 DW_CFA_restore, we never expect to see something like that
675 in a prologue. This is more likely to be a bug. A backend
676 can always bypass this by using REG_CFA_RESTORE directly. */
677 gcc_unreachable ();
678 }
679 else
680 {
681 cfi->dw_cfi_opc = DW_CFA_register;
682 cfi->dw_cfi_oprnd2.dw_cfi_reg_num = sreg;
683 }
684
685 add_cfi (cfi);
686 update_row_reg_save (cur_row, reg, cfi);
687 }
688
689 /* Given a SET, calculate the amount of stack adjustment it
690 contains. */
691
692 static HOST_WIDE_INT
693 stack_adjust_offset (const_rtx pattern, HOST_WIDE_INT cur_args_size,
694 HOST_WIDE_INT cur_offset)
695 {
696 const_rtx src = SET_SRC (pattern);
697 const_rtx dest = SET_DEST (pattern);
698 HOST_WIDE_INT offset = 0;
699 enum rtx_code code;
700
701 if (dest == stack_pointer_rtx)
702 {
703 code = GET_CODE (src);
704
705 /* Assume (set (reg sp) (reg whatever)) sets args_size
706 level to 0. */
707 if (code == REG && src != stack_pointer_rtx)
708 {
709 offset = -cur_args_size;
710 #ifndef STACK_GROWS_DOWNWARD
711 offset = -offset;
712 #endif
713 return offset - cur_offset;
714 }
715
716 if (! (code == PLUS || code == MINUS)
717 || XEXP (src, 0) != stack_pointer_rtx
718 || !CONST_INT_P (XEXP (src, 1)))
719 return 0;
720
721 /* (set (reg sp) (plus (reg sp) (const_int))) */
722 offset = INTVAL (XEXP (src, 1));
723 if (code == PLUS)
724 offset = -offset;
725 return offset;
726 }
727
728 if (MEM_P (src) && !MEM_P (dest))
729 dest = src;
730 if (MEM_P (dest))
731 {
732 /* (set (mem (pre_dec (reg sp))) (foo)) */
733 src = XEXP (dest, 0);
734 code = GET_CODE (src);
735
736 switch (code)
737 {
738 case PRE_MODIFY:
739 case POST_MODIFY:
740 if (XEXP (src, 0) == stack_pointer_rtx)
741 {
742 rtx val = XEXP (XEXP (src, 1), 1);
743 /* We handle only adjustments by constant amount. */
744 gcc_assert (GET_CODE (XEXP (src, 1)) == PLUS
745 && CONST_INT_P (val));
746 offset = -INTVAL (val);
747 break;
748 }
749 return 0;
750
751 case PRE_DEC:
752 case POST_DEC:
753 if (XEXP (src, 0) == stack_pointer_rtx)
754 {
755 offset = GET_MODE_SIZE (GET_MODE (dest));
756 break;
757 }
758 return 0;
759
760 case PRE_INC:
761 case POST_INC:
762 if (XEXP (src, 0) == stack_pointer_rtx)
763 {
764 offset = -GET_MODE_SIZE (GET_MODE (dest));
765 break;
766 }
767 return 0;
768
769 default:
770 return 0;
771 }
772 }
773 else
774 return 0;
775
776 return offset;
777 }
778
779 /* Precomputed args_size for CODE_LABELs and BARRIERs preceeding them,
780 indexed by INSN_UID. */
781
782 static HOST_WIDE_INT *barrier_args_size;
783
784 /* Helper function for compute_barrier_args_size. Handle one insn. */
785
786 static HOST_WIDE_INT
787 compute_barrier_args_size_1 (rtx insn, HOST_WIDE_INT cur_args_size,
788 VEC (rtx, heap) **next)
789 {
790 HOST_WIDE_INT offset = 0;
791 int i;
792
793 if (! RTX_FRAME_RELATED_P (insn))
794 {
795 if (prologue_epilogue_contains (insn))
796 /* Nothing */;
797 else if (GET_CODE (PATTERN (insn)) == SET)
798 offset = stack_adjust_offset (PATTERN (insn), cur_args_size, 0);
799 else if (GET_CODE (PATTERN (insn)) == PARALLEL
800 || GET_CODE (PATTERN (insn)) == SEQUENCE)
801 {
802 /* There may be stack adjustments inside compound insns. Search
803 for them. */
804 for (i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--)
805 if (GET_CODE (XVECEXP (PATTERN (insn), 0, i)) == SET)
806 offset += stack_adjust_offset (XVECEXP (PATTERN (insn), 0, i),
807 cur_args_size, offset);
808 }
809 }
810 else
811 {
812 rtx expr = find_reg_note (insn, REG_FRAME_RELATED_EXPR, NULL_RTX);
813
814 if (expr)
815 {
816 expr = XEXP (expr, 0);
817 if (GET_CODE (expr) == PARALLEL
818 || GET_CODE (expr) == SEQUENCE)
819 for (i = 1; i < XVECLEN (expr, 0); i++)
820 {
821 rtx elem = XVECEXP (expr, 0, i);
822
823 if (GET_CODE (elem) == SET && !RTX_FRAME_RELATED_P (elem))
824 offset += stack_adjust_offset (elem, cur_args_size, offset);
825 }
826 }
827 }
828
829 #ifndef STACK_GROWS_DOWNWARD
830 offset = -offset;
831 #endif
832
833 cur_args_size += offset;
834 if (cur_args_size < 0)
835 cur_args_size = 0;
836
837 if (JUMP_P (insn))
838 {
839 rtx dest = JUMP_LABEL (insn);
840
841 if (dest)
842 {
843 if (barrier_args_size [INSN_UID (dest)] < 0)
844 {
845 barrier_args_size [INSN_UID (dest)] = cur_args_size;
846 VEC_safe_push (rtx, heap, *next, dest);
847 }
848 }
849 }
850
851 return cur_args_size;
852 }
853
854 /* Walk the whole function and compute args_size on BARRIERs. */
855
856 static void
857 compute_barrier_args_size (void)
858 {
859 int max_uid = get_max_uid (), i;
860 rtx insn;
861 VEC (rtx, heap) *worklist, *next, *tmp;
862
863 barrier_args_size = XNEWVEC (HOST_WIDE_INT, max_uid);
864 for (i = 0; i < max_uid; i++)
865 barrier_args_size[i] = -1;
866
867 worklist = VEC_alloc (rtx, heap, 20);
868 next = VEC_alloc (rtx, heap, 20);
869 insn = get_insns ();
870 barrier_args_size[INSN_UID (insn)] = 0;
871 VEC_quick_push (rtx, worklist, insn);
872 for (;;)
873 {
874 while (!VEC_empty (rtx, worklist))
875 {
876 rtx prev, body, first_insn;
877 HOST_WIDE_INT cur_args_size;
878
879 first_insn = insn = VEC_pop (rtx, worklist);
880 cur_args_size = barrier_args_size[INSN_UID (insn)];
881 prev = prev_nonnote_insn (insn);
882 if (prev && BARRIER_P (prev))
883 barrier_args_size[INSN_UID (prev)] = cur_args_size;
884
885 for (; insn; insn = NEXT_INSN (insn))
886 {
887 if (INSN_DELETED_P (insn) || NOTE_P (insn))
888 continue;
889 if (BARRIER_P (insn))
890 break;
891
892 if (LABEL_P (insn))
893 {
894 if (insn == first_insn)
895 continue;
896 else if (barrier_args_size[INSN_UID (insn)] < 0)
897 {
898 barrier_args_size[INSN_UID (insn)] = cur_args_size;
899 continue;
900 }
901 else
902 {
903 /* The insns starting with this label have been
904 already scanned or are in the worklist. */
905 break;
906 }
907 }
908
909 body = PATTERN (insn);
910 if (GET_CODE (body) == SEQUENCE)
911 {
912 HOST_WIDE_INT dest_args_size = cur_args_size;
913 for (i = 1; i < XVECLEN (body, 0); i++)
914 if (INSN_ANNULLED_BRANCH_P (XVECEXP (body, 0, 0))
915 && INSN_FROM_TARGET_P (XVECEXP (body, 0, i)))
916 dest_args_size
917 = compute_barrier_args_size_1 (XVECEXP (body, 0, i),
918 dest_args_size, &next);
919 else
920 cur_args_size
921 = compute_barrier_args_size_1 (XVECEXP (body, 0, i),
922 cur_args_size, &next);
923
924 if (INSN_ANNULLED_BRANCH_P (XVECEXP (body, 0, 0)))
925 compute_barrier_args_size_1 (XVECEXP (body, 0, 0),
926 dest_args_size, &next);
927 else
928 cur_args_size
929 = compute_barrier_args_size_1 (XVECEXP (body, 0, 0),
930 cur_args_size, &next);
931 }
932 else
933 cur_args_size
934 = compute_barrier_args_size_1 (insn, cur_args_size, &next);
935 }
936 }
937
938 if (VEC_empty (rtx, next))
939 break;
940
941 /* Swap WORKLIST with NEXT and truncate NEXT for next iteration. */
942 tmp = next;
943 next = worklist;
944 worklist = tmp;
945 VEC_truncate (rtx, next, 0);
946 }
947
948 VEC_free (rtx, heap, worklist);
949 VEC_free (rtx, heap, next);
950 }
951
952 /* Add a CFI to update the running total of the size of arguments
953 pushed onto the stack. */
954
955 static void
956 dwarf2out_args_size (HOST_WIDE_INT size)
957 {
958 if (size == cur_row->args_size)
959 return;
960
961 cur_row->args_size = size;
962 add_cfi_args_size (size);
963 }
964
965 /* Record a stack adjustment of OFFSET bytes. */
966
967 static void
968 dwarf2out_stack_adjust (HOST_WIDE_INT offset)
969 {
970 dw_cfa_location loc = cur_row->cfa;
971
972 if (loc.reg == dw_stack_pointer_regnum)
973 loc.offset += offset;
974
975 if (cfa_store.reg == dw_stack_pointer_regnum)
976 cfa_store.offset += offset;
977
978 /* ??? The assumption seems to be that if A_O_A, the only CFA adjustments
979 involving the stack pointer are inside the prologue and marked as
980 RTX_FRAME_RELATED_P. That said, should we not verify this assumption
981 by *asserting* A_O_A at this point? Why else would we have a change
982 to the stack pointer? */
983 if (ACCUMULATE_OUTGOING_ARGS)
984 return;
985
986 #ifndef STACK_GROWS_DOWNWARD
987 offset = -offset;
988 #endif
989
990 args_size += offset;
991 if (args_size < 0)
992 args_size = 0;
993
994 def_cfa_1 (&loc);
995 if (flag_asynchronous_unwind_tables)
996 dwarf2out_args_size (args_size);
997 }
998
999 /* Check INSN to see if it looks like a push or a stack adjustment, and
1000 make a note of it if it does. EH uses this information to find out
1001 how much extra space it needs to pop off the stack. */
1002
1003 static void
1004 dwarf2out_notice_stack_adjust (rtx insn, bool after_p)
1005 {
1006 HOST_WIDE_INT offset;
1007 int i;
1008
1009 /* Don't handle epilogues at all. Certainly it would be wrong to do so
1010 with this function. Proper support would require all frame-related
1011 insns to be marked, and to be able to handle saving state around
1012 epilogues textually in the middle of the function. */
1013 if (prologue_epilogue_contains (insn))
1014 return;
1015
1016 /* If INSN is an instruction from target of an annulled branch, the
1017 effects are for the target only and so current argument size
1018 shouldn't change at all. */
1019 if (final_sequence
1020 && INSN_ANNULLED_BRANCH_P (XVECEXP (final_sequence, 0, 0))
1021 && INSN_FROM_TARGET_P (insn))
1022 return;
1023
1024 /* If only calls can throw, and we have a frame pointer,
1025 save up adjustments until we see the CALL_INSN. */
1026 if (!flag_asynchronous_unwind_tables
1027 && cur_row->cfa.reg != dw_stack_pointer_regnum)
1028 {
1029 if (CALL_P (insn) && !after_p)
1030 {
1031 /* Extract the size of the args from the CALL rtx itself. */
1032 insn = PATTERN (insn);
1033 if (GET_CODE (insn) == PARALLEL)
1034 insn = XVECEXP (insn, 0, 0);
1035 if (GET_CODE (insn) == SET)
1036 insn = SET_SRC (insn);
1037 gcc_assert (GET_CODE (insn) == CALL);
1038 dwarf2out_args_size (INTVAL (XEXP (insn, 1)));
1039 }
1040 return;
1041 }
1042
1043 if (CALL_P (insn) && !after_p)
1044 {
1045 if (!flag_asynchronous_unwind_tables)
1046 dwarf2out_args_size (args_size);
1047 return;
1048 }
1049 else if (BARRIER_P (insn))
1050 {
1051 /* Don't call compute_barrier_args_size () if the only
1052 BARRIER is at the end of function. */
1053 if (barrier_args_size == NULL && next_nonnote_insn (insn))
1054 compute_barrier_args_size ();
1055 if (barrier_args_size == NULL)
1056 offset = 0;
1057 else
1058 {
1059 offset = barrier_args_size[INSN_UID (insn)];
1060 if (offset < 0)
1061 offset = 0;
1062 }
1063
1064 offset -= args_size;
1065 #ifndef STACK_GROWS_DOWNWARD
1066 offset = -offset;
1067 #endif
1068 }
1069 else if (GET_CODE (PATTERN (insn)) == SET)
1070 offset = stack_adjust_offset (PATTERN (insn), args_size, 0);
1071 else if (GET_CODE (PATTERN (insn)) == PARALLEL
1072 || GET_CODE (PATTERN (insn)) == SEQUENCE)
1073 {
1074 /* There may be stack adjustments inside compound insns. Search
1075 for them. */
1076 for (offset = 0, i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--)
1077 if (GET_CODE (XVECEXP (PATTERN (insn), 0, i)) == SET)
1078 offset += stack_adjust_offset (XVECEXP (PATTERN (insn), 0, i),
1079 args_size, offset);
1080 }
1081 else
1082 return;
1083
1084 if (offset == 0)
1085 return;
1086
1087 dwarf2out_stack_adjust (offset);
1088 }
1089
1090 /* We delay emitting a register save until either (a) we reach the end
1091 of the prologue or (b) the register is clobbered. This clusters
1092 register saves so that there are fewer pc advances. */
1093
1094 typedef struct {
1095 rtx reg;
1096 rtx saved_reg;
1097 HOST_WIDE_INT cfa_offset;
1098 } queued_reg_save;
1099
1100 DEF_VEC_O (queued_reg_save);
1101 DEF_VEC_ALLOC_O (queued_reg_save, heap);
1102
1103 static VEC(queued_reg_save, heap) *queued_reg_saves;
1104
1105 /* The caller's ORIG_REG is saved in SAVED_IN_REG. */
1106 typedef struct GTY(()) reg_saved_in_data {
1107 rtx orig_reg;
1108 rtx saved_in_reg;
1109 } reg_saved_in_data;
1110
1111 DEF_VEC_O (reg_saved_in_data);
1112 DEF_VEC_ALLOC_O (reg_saved_in_data, gc);
1113
1114 /* A set of registers saved in other registers. This is implemented as
1115 a flat array because it normally contains zero or 1 entry, depending
1116 on the target. IA-64 is the big spender here, using a maximum of
1117 5 entries. */
1118 static GTY(()) VEC(reg_saved_in_data, gc) *regs_saved_in_regs;
1119
1120 static GTY(()) reg_saved_in_data *cie_return_save;
1121
1122 /* Short-hand inline for the very common D_F_R (REGNO (x)) operation. */
1123 /* ??? This ought to go into dwarf2out.h, except that dwarf2out.h is
1124 used in places where rtl is prohibited. */
1125
1126 static inline unsigned
1127 dwf_regno (const_rtx reg)
1128 {
1129 return DWARF_FRAME_REGNUM (REGNO (reg));
1130 }
1131
1132 /* Compare X and Y for equivalence. The inputs may be REGs or PC_RTX. */
1133
1134 static bool
1135 compare_reg_or_pc (rtx x, rtx y)
1136 {
1137 if (REG_P (x) && REG_P (y))
1138 return REGNO (x) == REGNO (y);
1139 return x == y;
1140 }
1141
1142 /* Record SRC as being saved in DEST. DEST may be null to delete an
1143 existing entry. SRC may be a register or PC_RTX. */
1144
1145 static void
1146 record_reg_saved_in_reg (rtx dest, rtx src)
1147 {
1148 reg_saved_in_data *elt;
1149 size_t i;
1150
1151 FOR_EACH_VEC_ELT (reg_saved_in_data, regs_saved_in_regs, i, elt)
1152 if (compare_reg_or_pc (elt->orig_reg, src))
1153 {
1154 if (dest == NULL)
1155 VEC_unordered_remove(reg_saved_in_data, regs_saved_in_regs, i);
1156 else
1157 elt->saved_in_reg = dest;
1158 return;
1159 }
1160
1161 if (dest == NULL)
1162 return;
1163
1164 elt = VEC_safe_push(reg_saved_in_data, gc, regs_saved_in_regs, NULL);
1165 elt->orig_reg = src;
1166 elt->saved_in_reg = dest;
1167 }
1168
1169 /* Add an entry to QUEUED_REG_SAVES saying that REG is now saved at
1170 SREG, or if SREG is NULL then it is saved at OFFSET to the CFA. */
1171
1172 static void
1173 queue_reg_save (rtx reg, rtx sreg, HOST_WIDE_INT offset)
1174 {
1175 queued_reg_save *q;
1176 size_t i;
1177
1178 /* Duplicates waste space, but it's also necessary to remove them
1179 for correctness, since the queue gets output in reverse order. */
1180 FOR_EACH_VEC_ELT (queued_reg_save, queued_reg_saves, i, q)
1181 if (compare_reg_or_pc (q->reg, reg))
1182 goto found;
1183
1184 q = VEC_safe_push (queued_reg_save, heap, queued_reg_saves, NULL);
1185
1186 found:
1187 q->reg = reg;
1188 q->saved_reg = sreg;
1189 q->cfa_offset = offset;
1190 }
1191
1192 /* Output all the entries in QUEUED_REG_SAVES. */
1193
1194 static void
1195 dwarf2out_flush_queued_reg_saves (void)
1196 {
1197 queued_reg_save *q;
1198 size_t i;
1199
1200 FOR_EACH_VEC_ELT (queued_reg_save, queued_reg_saves, i, q)
1201 {
1202 unsigned int reg, sreg;
1203
1204 record_reg_saved_in_reg (q->saved_reg, q->reg);
1205
1206 if (q->reg == pc_rtx)
1207 reg = DWARF_FRAME_RETURN_COLUMN;
1208 else
1209 reg = dwf_regno (q->reg);
1210 if (q->saved_reg)
1211 sreg = dwf_regno (q->saved_reg);
1212 else
1213 sreg = INVALID_REGNUM;
1214 reg_save (reg, sreg, q->cfa_offset);
1215 }
1216
1217 VEC_truncate (queued_reg_save, queued_reg_saves, 0);
1218 }
1219
1220 /* Does INSN clobber any register which QUEUED_REG_SAVES lists a saved
1221 location for? Or, does it clobber a register which we've previously
1222 said that some other register is saved in, and for which we now
1223 have a new location for? */
1224
1225 static bool
1226 clobbers_queued_reg_save (const_rtx insn)
1227 {
1228 queued_reg_save *q;
1229 size_t iq;
1230
1231 FOR_EACH_VEC_ELT (queued_reg_save, queued_reg_saves, iq, q)
1232 {
1233 size_t ir;
1234 reg_saved_in_data *rir;
1235
1236 if (modified_in_p (q->reg, insn))
1237 return true;
1238
1239 FOR_EACH_VEC_ELT (reg_saved_in_data, regs_saved_in_regs, ir, rir)
1240 if (compare_reg_or_pc (q->reg, rir->orig_reg)
1241 && modified_in_p (rir->saved_in_reg, insn))
1242 return true;
1243 }
1244
1245 return false;
1246 }
1247
1248 /* What register, if any, is currently saved in REG? */
1249
1250 static rtx
1251 reg_saved_in (rtx reg)
1252 {
1253 unsigned int regn = REGNO (reg);
1254 queued_reg_save *q;
1255 reg_saved_in_data *rir;
1256 size_t i;
1257
1258 FOR_EACH_VEC_ELT (queued_reg_save, queued_reg_saves, i, q)
1259 if (q->saved_reg && regn == REGNO (q->saved_reg))
1260 return q->reg;
1261
1262 FOR_EACH_VEC_ELT (reg_saved_in_data, regs_saved_in_regs, i, rir)
1263 if (regn == REGNO (rir->saved_in_reg))
1264 return rir->orig_reg;
1265
1266 return NULL_RTX;
1267 }
1268
1269 /* A subroutine of dwarf2out_frame_debug, process a REG_DEF_CFA note. */
1270
1271 static void
1272 dwarf2out_frame_debug_def_cfa (rtx pat)
1273 {
1274 dw_cfa_location loc;
1275
1276 memset (&loc, 0, sizeof (loc));
1277
1278 switch (GET_CODE (pat))
1279 {
1280 case PLUS:
1281 loc.reg = dwf_regno (XEXP (pat, 0));
1282 loc.offset = INTVAL (XEXP (pat, 1));
1283 break;
1284
1285 case REG:
1286 loc.reg = dwf_regno (pat);
1287 break;
1288
1289 case MEM:
1290 loc.indirect = 1;
1291 pat = XEXP (pat, 0);
1292 if (GET_CODE (pat) == PLUS)
1293 {
1294 loc.base_offset = INTVAL (XEXP (pat, 1));
1295 pat = XEXP (pat, 0);
1296 }
1297 loc.reg = dwf_regno (pat);
1298 break;
1299
1300 default:
1301 /* Recurse and define an expression. */
1302 gcc_unreachable ();
1303 }
1304
1305 def_cfa_1 (&loc);
1306 }
1307
1308 /* A subroutine of dwarf2out_frame_debug, process a REG_ADJUST_CFA note. */
1309
1310 static void
1311 dwarf2out_frame_debug_adjust_cfa (rtx pat)
1312 {
1313 dw_cfa_location loc = cur_row->cfa;
1314 rtx src, dest;
1315
1316 gcc_assert (GET_CODE (pat) == SET);
1317 dest = XEXP (pat, 0);
1318 src = XEXP (pat, 1);
1319
1320 switch (GET_CODE (src))
1321 {
1322 case PLUS:
1323 gcc_assert (dwf_regno (XEXP (src, 0)) == loc.reg);
1324 loc.offset -= INTVAL (XEXP (src, 1));
1325 break;
1326
1327 case REG:
1328 break;
1329
1330 default:
1331 gcc_unreachable ();
1332 }
1333
1334 loc.reg = dwf_regno (dest);
1335 gcc_assert (loc.indirect == 0);
1336
1337 def_cfa_1 (&loc);
1338 }
1339
1340 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_OFFSET note. */
1341
1342 static void
1343 dwarf2out_frame_debug_cfa_offset (rtx set)
1344 {
1345 HOST_WIDE_INT offset;
1346 rtx src, addr, span;
1347 unsigned int sregno;
1348
1349 src = XEXP (set, 1);
1350 addr = XEXP (set, 0);
1351 gcc_assert (MEM_P (addr));
1352 addr = XEXP (addr, 0);
1353
1354 /* As documented, only consider extremely simple addresses. */
1355 switch (GET_CODE (addr))
1356 {
1357 case REG:
1358 gcc_assert (dwf_regno (addr) == cur_row->cfa.reg);
1359 offset = -cur_row->cfa.offset;
1360 break;
1361 case PLUS:
1362 gcc_assert (dwf_regno (XEXP (addr, 0)) == cur_row->cfa.reg);
1363 offset = INTVAL (XEXP (addr, 1)) - cur_row->cfa.offset;
1364 break;
1365 default:
1366 gcc_unreachable ();
1367 }
1368
1369 if (src == pc_rtx)
1370 {
1371 span = NULL;
1372 sregno = DWARF_FRAME_RETURN_COLUMN;
1373 }
1374 else
1375 {
1376 span = targetm.dwarf_register_span (src);
1377 sregno = dwf_regno (src);
1378 }
1379
1380 /* ??? We'd like to use queue_reg_save, but we need to come up with
1381 a different flushing heuristic for epilogues. */
1382 if (!span)
1383 reg_save (sregno, INVALID_REGNUM, offset);
1384 else
1385 {
1386 /* We have a PARALLEL describing where the contents of SRC live.
1387 Queue register saves for each piece of the PARALLEL. */
1388 int par_index;
1389 int limit;
1390 HOST_WIDE_INT span_offset = offset;
1391
1392 gcc_assert (GET_CODE (span) == PARALLEL);
1393
1394 limit = XVECLEN (span, 0);
1395 for (par_index = 0; par_index < limit; par_index++)
1396 {
1397 rtx elem = XVECEXP (span, 0, par_index);
1398
1399 sregno = dwf_regno (src);
1400 reg_save (sregno, INVALID_REGNUM, span_offset);
1401 span_offset += GET_MODE_SIZE (GET_MODE (elem));
1402 }
1403 }
1404 }
1405
1406 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_REGISTER note. */
1407
1408 static void
1409 dwarf2out_frame_debug_cfa_register (rtx set)
1410 {
1411 rtx src, dest;
1412 unsigned sregno, dregno;
1413
1414 src = XEXP (set, 1);
1415 dest = XEXP (set, 0);
1416
1417 record_reg_saved_in_reg (dest, src);
1418 if (src == pc_rtx)
1419 sregno = DWARF_FRAME_RETURN_COLUMN;
1420 else
1421 sregno = dwf_regno (src);
1422
1423 dregno = dwf_regno (dest);
1424
1425 /* ??? We'd like to use queue_reg_save, but we need to come up with
1426 a different flushing heuristic for epilogues. */
1427 reg_save (sregno, dregno, 0);
1428 }
1429
1430 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_EXPRESSION note. */
1431
1432 static void
1433 dwarf2out_frame_debug_cfa_expression (rtx set)
1434 {
1435 rtx src, dest, span;
1436 dw_cfi_ref cfi = new_cfi ();
1437 unsigned regno;
1438
1439 dest = SET_DEST (set);
1440 src = SET_SRC (set);
1441
1442 gcc_assert (REG_P (src));
1443 gcc_assert (MEM_P (dest));
1444
1445 span = targetm.dwarf_register_span (src);
1446 gcc_assert (!span);
1447
1448 regno = dwf_regno (src);
1449
1450 cfi->dw_cfi_opc = DW_CFA_expression;
1451 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = regno;
1452 cfi->dw_cfi_oprnd2.dw_cfi_loc
1453 = mem_loc_descriptor (XEXP (dest, 0), get_address_mode (dest),
1454 GET_MODE (dest), VAR_INIT_STATUS_INITIALIZED);
1455
1456 /* ??? We'd like to use queue_reg_save, were the interface different,
1457 and, as above, we could manage flushing for epilogues. */
1458 add_cfi (cfi);
1459 update_row_reg_save (cur_row, regno, cfi);
1460 }
1461
1462 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_RESTORE note. */
1463
1464 static void
1465 dwarf2out_frame_debug_cfa_restore (rtx reg)
1466 {
1467 unsigned int regno = dwf_regno (reg);
1468
1469 add_cfi_restore (regno);
1470 update_row_reg_save (cur_row, regno, NULL);
1471 }
1472
1473 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_WINDOW_SAVE.
1474 ??? Perhaps we should note in the CIE where windows are saved (instead of
1475 assuming 0(cfa)) and what registers are in the window. */
1476
1477 static void
1478 dwarf2out_frame_debug_cfa_window_save (void)
1479 {
1480 dw_cfi_ref cfi = new_cfi ();
1481
1482 cfi->dw_cfi_opc = DW_CFA_GNU_window_save;
1483 add_cfi (cfi);
1484 }
1485
1486 /* Record call frame debugging information for an expression EXPR,
1487 which either sets SP or FP (adjusting how we calculate the frame
1488 address) or saves a register to the stack or another register.
1489 LABEL indicates the address of EXPR.
1490
1491 This function encodes a state machine mapping rtxes to actions on
1492 cfa, cfa_store, and cfa_temp.reg. We describe these rules so
1493 users need not read the source code.
1494
1495 The High-Level Picture
1496
1497 Changes in the register we use to calculate the CFA: Currently we
1498 assume that if you copy the CFA register into another register, we
1499 should take the other one as the new CFA register; this seems to
1500 work pretty well. If it's wrong for some target, it's simple
1501 enough not to set RTX_FRAME_RELATED_P on the insn in question.
1502
1503 Changes in the register we use for saving registers to the stack:
1504 This is usually SP, but not always. Again, we deduce that if you
1505 copy SP into another register (and SP is not the CFA register),
1506 then the new register is the one we will be using for register
1507 saves. This also seems to work.
1508
1509 Register saves: There's not much guesswork about this one; if
1510 RTX_FRAME_RELATED_P is set on an insn which modifies memory, it's a
1511 register save, and the register used to calculate the destination
1512 had better be the one we think we're using for this purpose.
1513 It's also assumed that a copy from a call-saved register to another
1514 register is saving that register if RTX_FRAME_RELATED_P is set on
1515 that instruction. If the copy is from a call-saved register to
1516 the *same* register, that means that the register is now the same
1517 value as in the caller.
1518
1519 Except: If the register being saved is the CFA register, and the
1520 offset is nonzero, we are saving the CFA, so we assume we have to
1521 use DW_CFA_def_cfa_expression. If the offset is 0, we assume that
1522 the intent is to save the value of SP from the previous frame.
1523
1524 In addition, if a register has previously been saved to a different
1525 register,
1526
1527 Invariants / Summaries of Rules
1528
1529 cfa current rule for calculating the CFA. It usually
1530 consists of a register and an offset. This is
1531 actually stored in cur_row->cfa, but abbreviated
1532 for the purposes of this documentation.
1533 cfa_store register used by prologue code to save things to the stack
1534 cfa_store.offset is the offset from the value of
1535 cfa_store.reg to the actual CFA
1536 cfa_temp register holding an integral value. cfa_temp.offset
1537 stores the value, which will be used to adjust the
1538 stack pointer. cfa_temp is also used like cfa_store,
1539 to track stores to the stack via fp or a temp reg.
1540
1541 Rules 1- 4: Setting a register's value to cfa.reg or an expression
1542 with cfa.reg as the first operand changes the cfa.reg and its
1543 cfa.offset. Rule 1 and 4 also set cfa_temp.reg and
1544 cfa_temp.offset.
1545
1546 Rules 6- 9: Set a non-cfa.reg register value to a constant or an
1547 expression yielding a constant. This sets cfa_temp.reg
1548 and cfa_temp.offset.
1549
1550 Rule 5: Create a new register cfa_store used to save items to the
1551 stack.
1552
1553 Rules 10-14: Save a register to the stack. Define offset as the
1554 difference of the original location and cfa_store's
1555 location (or cfa_temp's location if cfa_temp is used).
1556
1557 Rules 16-20: If AND operation happens on sp in prologue, we assume
1558 stack is realigned. We will use a group of DW_OP_XXX
1559 expressions to represent the location of the stored
1560 register instead of CFA+offset.
1561
1562 The Rules
1563
1564 "{a,b}" indicates a choice of a xor b.
1565 "<reg>:cfa.reg" indicates that <reg> must equal cfa.reg.
1566
1567 Rule 1:
1568 (set <reg1> <reg2>:cfa.reg)
1569 effects: cfa.reg = <reg1>
1570 cfa.offset unchanged
1571 cfa_temp.reg = <reg1>
1572 cfa_temp.offset = cfa.offset
1573
1574 Rule 2:
1575 (set sp ({minus,plus,losum} {sp,fp}:cfa.reg
1576 {<const_int>,<reg>:cfa_temp.reg}))
1577 effects: cfa.reg = sp if fp used
1578 cfa.offset += {+/- <const_int>, cfa_temp.offset} if cfa.reg==sp
1579 cfa_store.offset += {+/- <const_int>, cfa_temp.offset}
1580 if cfa_store.reg==sp
1581
1582 Rule 3:
1583 (set fp ({minus,plus,losum} <reg>:cfa.reg <const_int>))
1584 effects: cfa.reg = fp
1585 cfa_offset += +/- <const_int>
1586
1587 Rule 4:
1588 (set <reg1> ({plus,losum} <reg2>:cfa.reg <const_int>))
1589 constraints: <reg1> != fp
1590 <reg1> != sp
1591 effects: cfa.reg = <reg1>
1592 cfa_temp.reg = <reg1>
1593 cfa_temp.offset = cfa.offset
1594
1595 Rule 5:
1596 (set <reg1> (plus <reg2>:cfa_temp.reg sp:cfa.reg))
1597 constraints: <reg1> != fp
1598 <reg1> != sp
1599 effects: cfa_store.reg = <reg1>
1600 cfa_store.offset = cfa.offset - cfa_temp.offset
1601
1602 Rule 6:
1603 (set <reg> <const_int>)
1604 effects: cfa_temp.reg = <reg>
1605 cfa_temp.offset = <const_int>
1606
1607 Rule 7:
1608 (set <reg1>:cfa_temp.reg (ior <reg2>:cfa_temp.reg <const_int>))
1609 effects: cfa_temp.reg = <reg1>
1610 cfa_temp.offset |= <const_int>
1611
1612 Rule 8:
1613 (set <reg> (high <exp>))
1614 effects: none
1615
1616 Rule 9:
1617 (set <reg> (lo_sum <exp> <const_int>))
1618 effects: cfa_temp.reg = <reg>
1619 cfa_temp.offset = <const_int>
1620
1621 Rule 10:
1622 (set (mem ({pre,post}_modify sp:cfa_store (???? <reg1> <const_int>))) <reg2>)
1623 effects: cfa_store.offset -= <const_int>
1624 cfa.offset = cfa_store.offset if cfa.reg == sp
1625 cfa.reg = sp
1626 cfa.base_offset = -cfa_store.offset
1627
1628 Rule 11:
1629 (set (mem ({pre_inc,pre_dec,post_dec} sp:cfa_store.reg)) <reg>)
1630 effects: cfa_store.offset += -/+ mode_size(mem)
1631 cfa.offset = cfa_store.offset if cfa.reg == sp
1632 cfa.reg = sp
1633 cfa.base_offset = -cfa_store.offset
1634
1635 Rule 12:
1636 (set (mem ({minus,plus,losum} <reg1>:{cfa_store,cfa_temp} <const_int>))
1637
1638 <reg2>)
1639 effects: cfa.reg = <reg1>
1640 cfa.base_offset = -/+ <const_int> - {cfa_store,cfa_temp}.offset
1641
1642 Rule 13:
1643 (set (mem <reg1>:{cfa_store,cfa_temp}) <reg2>)
1644 effects: cfa.reg = <reg1>
1645 cfa.base_offset = -{cfa_store,cfa_temp}.offset
1646
1647 Rule 14:
1648 (set (mem (post_inc <reg1>:cfa_temp <const_int>)) <reg2>)
1649 effects: cfa.reg = <reg1>
1650 cfa.base_offset = -cfa_temp.offset
1651 cfa_temp.offset -= mode_size(mem)
1652
1653 Rule 15:
1654 (set <reg> {unspec, unspec_volatile})
1655 effects: target-dependent
1656
1657 Rule 16:
1658 (set sp (and: sp <const_int>))
1659 constraints: cfa_store.reg == sp
1660 effects: cfun->fde.stack_realign = 1
1661 cfa_store.offset = 0
1662 fde->drap_reg = cfa.reg if cfa.reg != sp and cfa.reg != fp
1663
1664 Rule 17:
1665 (set (mem ({pre_inc, pre_dec} sp)) (mem (plus (cfa.reg) (const_int))))
1666 effects: cfa_store.offset += -/+ mode_size(mem)
1667
1668 Rule 18:
1669 (set (mem ({pre_inc, pre_dec} sp)) fp)
1670 constraints: fde->stack_realign == 1
1671 effects: cfa_store.offset = 0
1672 cfa.reg != HARD_FRAME_POINTER_REGNUM
1673
1674 Rule 19:
1675 (set (mem ({pre_inc, pre_dec} sp)) cfa.reg)
1676 constraints: fde->stack_realign == 1
1677 && cfa.offset == 0
1678 && cfa.indirect == 0
1679 && cfa.reg != HARD_FRAME_POINTER_REGNUM
1680 effects: Use DW_CFA_def_cfa_expression to define cfa
1681 cfa.reg == fde->drap_reg */
1682
1683 static void
1684 dwarf2out_frame_debug_expr (rtx expr)
1685 {
1686 dw_cfa_location cfa = cur_row->cfa;
1687 rtx src, dest, span;
1688 HOST_WIDE_INT offset;
1689 dw_fde_ref fde;
1690
1691 /* If RTX_FRAME_RELATED_P is set on a PARALLEL, process each member of
1692 the PARALLEL independently. The first element is always processed if
1693 it is a SET. This is for backward compatibility. Other elements
1694 are processed only if they are SETs and the RTX_FRAME_RELATED_P
1695 flag is set in them. */
1696 if (GET_CODE (expr) == PARALLEL || GET_CODE (expr) == SEQUENCE)
1697 {
1698 int par_index;
1699 int limit = XVECLEN (expr, 0);
1700 rtx elem;
1701
1702 /* PARALLELs have strict read-modify-write semantics, so we
1703 ought to evaluate every rvalue before changing any lvalue.
1704 It's cumbersome to do that in general, but there's an
1705 easy approximation that is enough for all current users:
1706 handle register saves before register assignments. */
1707 if (GET_CODE (expr) == PARALLEL)
1708 for (par_index = 0; par_index < limit; par_index++)
1709 {
1710 elem = XVECEXP (expr, 0, par_index);
1711 if (GET_CODE (elem) == SET
1712 && MEM_P (SET_DEST (elem))
1713 && (RTX_FRAME_RELATED_P (elem) || par_index == 0))
1714 dwarf2out_frame_debug_expr (elem);
1715 }
1716
1717 for (par_index = 0; par_index < limit; par_index++)
1718 {
1719 elem = XVECEXP (expr, 0, par_index);
1720 if (GET_CODE (elem) == SET
1721 && (!MEM_P (SET_DEST (elem)) || GET_CODE (expr) == SEQUENCE)
1722 && (RTX_FRAME_RELATED_P (elem) || par_index == 0))
1723 dwarf2out_frame_debug_expr (elem);
1724 else if (GET_CODE (elem) == SET
1725 && par_index != 0
1726 && !RTX_FRAME_RELATED_P (elem))
1727 {
1728 /* Stack adjustment combining might combine some post-prologue
1729 stack adjustment into a prologue stack adjustment. */
1730 HOST_WIDE_INT offset = stack_adjust_offset (elem, args_size, 0);
1731
1732 if (offset != 0)
1733 dwarf2out_stack_adjust (offset);
1734 }
1735 }
1736 return;
1737 }
1738
1739 gcc_assert (GET_CODE (expr) == SET);
1740
1741 src = SET_SRC (expr);
1742 dest = SET_DEST (expr);
1743
1744 if (REG_P (src))
1745 {
1746 rtx rsi = reg_saved_in (src);
1747 if (rsi)
1748 src = rsi;
1749 }
1750
1751 fde = cfun->fde;
1752
1753 switch (GET_CODE (dest))
1754 {
1755 case REG:
1756 switch (GET_CODE (src))
1757 {
1758 /* Setting FP from SP. */
1759 case REG:
1760 if (cfa.reg == dwf_regno (src))
1761 {
1762 /* Rule 1 */
1763 /* Update the CFA rule wrt SP or FP. Make sure src is
1764 relative to the current CFA register.
1765
1766 We used to require that dest be either SP or FP, but the
1767 ARM copies SP to a temporary register, and from there to
1768 FP. So we just rely on the backends to only set
1769 RTX_FRAME_RELATED_P on appropriate insns. */
1770 cfa.reg = dwf_regno (dest);
1771 cfa_temp.reg = cfa.reg;
1772 cfa_temp.offset = cfa.offset;
1773 }
1774 else
1775 {
1776 /* Saving a register in a register. */
1777 gcc_assert (!fixed_regs [REGNO (dest)]
1778 /* For the SPARC and its register window. */
1779 || (dwf_regno (src) == DWARF_FRAME_RETURN_COLUMN));
1780
1781 /* After stack is aligned, we can only save SP in FP
1782 if drap register is used. In this case, we have
1783 to restore stack pointer with the CFA value and we
1784 don't generate this DWARF information. */
1785 if (fde
1786 && fde->stack_realign
1787 && REGNO (src) == STACK_POINTER_REGNUM)
1788 gcc_assert (REGNO (dest) == HARD_FRAME_POINTER_REGNUM
1789 && fde->drap_reg != INVALID_REGNUM
1790 && cfa.reg != dwf_regno (src));
1791 else
1792 queue_reg_save (src, dest, 0);
1793 }
1794 break;
1795
1796 case PLUS:
1797 case MINUS:
1798 case LO_SUM:
1799 if (dest == stack_pointer_rtx)
1800 {
1801 /* Rule 2 */
1802 /* Adjusting SP. */
1803 switch (GET_CODE (XEXP (src, 1)))
1804 {
1805 case CONST_INT:
1806 offset = INTVAL (XEXP (src, 1));
1807 break;
1808 case REG:
1809 gcc_assert (dwf_regno (XEXP (src, 1)) == cfa_temp.reg);
1810 offset = cfa_temp.offset;
1811 break;
1812 default:
1813 gcc_unreachable ();
1814 }
1815
1816 if (XEXP (src, 0) == hard_frame_pointer_rtx)
1817 {
1818 /* Restoring SP from FP in the epilogue. */
1819 gcc_assert (cfa.reg == dw_frame_pointer_regnum);
1820 cfa.reg = dw_stack_pointer_regnum;
1821 }
1822 else if (GET_CODE (src) == LO_SUM)
1823 /* Assume we've set the source reg of the LO_SUM from sp. */
1824 ;
1825 else
1826 gcc_assert (XEXP (src, 0) == stack_pointer_rtx);
1827
1828 if (GET_CODE (src) != MINUS)
1829 offset = -offset;
1830 if (cfa.reg == dw_stack_pointer_regnum)
1831 cfa.offset += offset;
1832 if (cfa_store.reg == dw_stack_pointer_regnum)
1833 cfa_store.offset += offset;
1834 }
1835 else if (dest == hard_frame_pointer_rtx)
1836 {
1837 /* Rule 3 */
1838 /* Either setting the FP from an offset of the SP,
1839 or adjusting the FP */
1840 gcc_assert (frame_pointer_needed);
1841
1842 gcc_assert (REG_P (XEXP (src, 0))
1843 && dwf_regno (XEXP (src, 0)) == cfa.reg
1844 && CONST_INT_P (XEXP (src, 1)));
1845 offset = INTVAL (XEXP (src, 1));
1846 if (GET_CODE (src) != MINUS)
1847 offset = -offset;
1848 cfa.offset += offset;
1849 cfa.reg = dw_frame_pointer_regnum;
1850 }
1851 else
1852 {
1853 gcc_assert (GET_CODE (src) != MINUS);
1854
1855 /* Rule 4 */
1856 if (REG_P (XEXP (src, 0))
1857 && dwf_regno (XEXP (src, 0)) == cfa.reg
1858 && CONST_INT_P (XEXP (src, 1)))
1859 {
1860 /* Setting a temporary CFA register that will be copied
1861 into the FP later on. */
1862 offset = - INTVAL (XEXP (src, 1));
1863 cfa.offset += offset;
1864 cfa.reg = dwf_regno (dest);
1865 /* Or used to save regs to the stack. */
1866 cfa_temp.reg = cfa.reg;
1867 cfa_temp.offset = cfa.offset;
1868 }
1869
1870 /* Rule 5 */
1871 else if (REG_P (XEXP (src, 0))
1872 && dwf_regno (XEXP (src, 0)) == cfa_temp.reg
1873 && XEXP (src, 1) == stack_pointer_rtx)
1874 {
1875 /* Setting a scratch register that we will use instead
1876 of SP for saving registers to the stack. */
1877 gcc_assert (cfa.reg == dw_stack_pointer_regnum);
1878 cfa_store.reg = dwf_regno (dest);
1879 cfa_store.offset = cfa.offset - cfa_temp.offset;
1880 }
1881
1882 /* Rule 9 */
1883 else if (GET_CODE (src) == LO_SUM
1884 && CONST_INT_P (XEXP (src, 1)))
1885 {
1886 cfa_temp.reg = dwf_regno (dest);
1887 cfa_temp.offset = INTVAL (XEXP (src, 1));
1888 }
1889 else
1890 gcc_unreachable ();
1891 }
1892 break;
1893
1894 /* Rule 6 */
1895 case CONST_INT:
1896 cfa_temp.reg = dwf_regno (dest);
1897 cfa_temp.offset = INTVAL (src);
1898 break;
1899
1900 /* Rule 7 */
1901 case IOR:
1902 gcc_assert (REG_P (XEXP (src, 0))
1903 && dwf_regno (XEXP (src, 0)) == cfa_temp.reg
1904 && CONST_INT_P (XEXP (src, 1)));
1905
1906 cfa_temp.reg = dwf_regno (dest);
1907 cfa_temp.offset |= INTVAL (XEXP (src, 1));
1908 break;
1909
1910 /* Skip over HIGH, assuming it will be followed by a LO_SUM,
1911 which will fill in all of the bits. */
1912 /* Rule 8 */
1913 case HIGH:
1914 break;
1915
1916 /* Rule 15 */
1917 case UNSPEC:
1918 case UNSPEC_VOLATILE:
1919 /* All unspecs should be represented by REG_CFA_* notes. */
1920 gcc_unreachable ();
1921 return;
1922
1923 /* Rule 16 */
1924 case AND:
1925 /* If this AND operation happens on stack pointer in prologue,
1926 we assume the stack is realigned and we extract the
1927 alignment. */
1928 if (fde && XEXP (src, 0) == stack_pointer_rtx)
1929 {
1930 /* We interpret reg_save differently with stack_realign set.
1931 Thus we must flush whatever we have queued first. */
1932 dwarf2out_flush_queued_reg_saves ();
1933
1934 gcc_assert (cfa_store.reg == dwf_regno (XEXP (src, 0)));
1935 fde->stack_realign = 1;
1936 fde->stack_realignment = INTVAL (XEXP (src, 1));
1937 cfa_store.offset = 0;
1938
1939 if (cfa.reg != dw_stack_pointer_regnum
1940 && cfa.reg != dw_frame_pointer_regnum)
1941 fde->drap_reg = cfa.reg;
1942 }
1943 return;
1944
1945 default:
1946 gcc_unreachable ();
1947 }
1948
1949 def_cfa_1 (&cfa);
1950 break;
1951
1952 case MEM:
1953
1954 /* Saving a register to the stack. Make sure dest is relative to the
1955 CFA register. */
1956 switch (GET_CODE (XEXP (dest, 0)))
1957 {
1958 /* Rule 10 */
1959 /* With a push. */
1960 case PRE_MODIFY:
1961 case POST_MODIFY:
1962 /* We can't handle variable size modifications. */
1963 gcc_assert (GET_CODE (XEXP (XEXP (XEXP (dest, 0), 1), 1))
1964 == CONST_INT);
1965 offset = -INTVAL (XEXP (XEXP (XEXP (dest, 0), 1), 1));
1966
1967 gcc_assert (REGNO (XEXP (XEXP (dest, 0), 0)) == STACK_POINTER_REGNUM
1968 && cfa_store.reg == dw_stack_pointer_regnum);
1969
1970 cfa_store.offset += offset;
1971 if (cfa.reg == dw_stack_pointer_regnum)
1972 cfa.offset = cfa_store.offset;
1973
1974 if (GET_CODE (XEXP (dest, 0)) == POST_MODIFY)
1975 offset -= cfa_store.offset;
1976 else
1977 offset = -cfa_store.offset;
1978 break;
1979
1980 /* Rule 11 */
1981 case PRE_INC:
1982 case PRE_DEC:
1983 case POST_DEC:
1984 offset = GET_MODE_SIZE (GET_MODE (dest));
1985 if (GET_CODE (XEXP (dest, 0)) == PRE_INC)
1986 offset = -offset;
1987
1988 gcc_assert ((REGNO (XEXP (XEXP (dest, 0), 0))
1989 == STACK_POINTER_REGNUM)
1990 && cfa_store.reg == dw_stack_pointer_regnum);
1991
1992 cfa_store.offset += offset;
1993
1994 /* Rule 18: If stack is aligned, we will use FP as a
1995 reference to represent the address of the stored
1996 regiser. */
1997 if (fde
1998 && fde->stack_realign
1999 && src == hard_frame_pointer_rtx)
2000 {
2001 gcc_assert (cfa.reg != dw_frame_pointer_regnum);
2002 cfa_store.offset = 0;
2003 }
2004
2005 if (cfa.reg == dw_stack_pointer_regnum)
2006 cfa.offset = cfa_store.offset;
2007
2008 if (GET_CODE (XEXP (dest, 0)) == POST_DEC)
2009 offset += -cfa_store.offset;
2010 else
2011 offset = -cfa_store.offset;
2012 break;
2013
2014 /* Rule 12 */
2015 /* With an offset. */
2016 case PLUS:
2017 case MINUS:
2018 case LO_SUM:
2019 {
2020 unsigned int regno;
2021
2022 gcc_assert (CONST_INT_P (XEXP (XEXP (dest, 0), 1))
2023 && REG_P (XEXP (XEXP (dest, 0), 0)));
2024 offset = INTVAL (XEXP (XEXP (dest, 0), 1));
2025 if (GET_CODE (XEXP (dest, 0)) == MINUS)
2026 offset = -offset;
2027
2028 regno = dwf_regno (XEXP (XEXP (dest, 0), 0));
2029
2030 if (cfa.reg == regno)
2031 offset -= cfa.offset;
2032 else if (cfa_store.reg == regno)
2033 offset -= cfa_store.offset;
2034 else
2035 {
2036 gcc_assert (cfa_temp.reg == regno);
2037 offset -= cfa_temp.offset;
2038 }
2039 }
2040 break;
2041
2042 /* Rule 13 */
2043 /* Without an offset. */
2044 case REG:
2045 {
2046 unsigned int regno = dwf_regno (XEXP (dest, 0));
2047
2048 if (cfa.reg == regno)
2049 offset = -cfa.offset;
2050 else if (cfa_store.reg == regno)
2051 offset = -cfa_store.offset;
2052 else
2053 {
2054 gcc_assert (cfa_temp.reg == regno);
2055 offset = -cfa_temp.offset;
2056 }
2057 }
2058 break;
2059
2060 /* Rule 14 */
2061 case POST_INC:
2062 gcc_assert (cfa_temp.reg == dwf_regno (XEXP (XEXP (dest, 0), 0)));
2063 offset = -cfa_temp.offset;
2064 cfa_temp.offset -= GET_MODE_SIZE (GET_MODE (dest));
2065 break;
2066
2067 default:
2068 gcc_unreachable ();
2069 }
2070
2071 /* Rule 17 */
2072 /* If the source operand of this MEM operation is a memory,
2073 we only care how much stack grew. */
2074 if (MEM_P (src))
2075 break;
2076
2077 if (REG_P (src)
2078 && REGNO (src) != STACK_POINTER_REGNUM
2079 && REGNO (src) != HARD_FRAME_POINTER_REGNUM
2080 && dwf_regno (src) == cfa.reg)
2081 {
2082 /* We're storing the current CFA reg into the stack. */
2083
2084 if (cfa.offset == 0)
2085 {
2086 /* Rule 19 */
2087 /* If stack is aligned, putting CFA reg into stack means
2088 we can no longer use reg + offset to represent CFA.
2089 Here we use DW_CFA_def_cfa_expression instead. The
2090 result of this expression equals to the original CFA
2091 value. */
2092 if (fde
2093 && fde->stack_realign
2094 && cfa.indirect == 0
2095 && cfa.reg != dw_frame_pointer_regnum)
2096 {
2097 dw_cfa_location cfa_exp;
2098
2099 gcc_assert (fde->drap_reg == cfa.reg);
2100
2101 cfa_exp.indirect = 1;
2102 cfa_exp.reg = dw_frame_pointer_regnum;
2103 cfa_exp.base_offset = offset;
2104 cfa_exp.offset = 0;
2105
2106 fde->drap_reg_saved = 1;
2107
2108 def_cfa_1 (&cfa_exp);
2109 break;
2110 }
2111
2112 /* If the source register is exactly the CFA, assume
2113 we're saving SP like any other register; this happens
2114 on the ARM. */
2115 def_cfa_1 (&cfa);
2116 queue_reg_save (stack_pointer_rtx, NULL_RTX, offset);
2117 break;
2118 }
2119 else
2120 {
2121 /* Otherwise, we'll need to look in the stack to
2122 calculate the CFA. */
2123 rtx x = XEXP (dest, 0);
2124
2125 if (!REG_P (x))
2126 x = XEXP (x, 0);
2127 gcc_assert (REG_P (x));
2128
2129 cfa.reg = dwf_regno (x);
2130 cfa.base_offset = offset;
2131 cfa.indirect = 1;
2132 def_cfa_1 (&cfa);
2133 break;
2134 }
2135 }
2136
2137 def_cfa_1 (&cfa);
2138
2139 span = NULL;
2140 if (REG_P (src))
2141 span = targetm.dwarf_register_span (src);
2142 if (!span)
2143 queue_reg_save (src, NULL_RTX, offset);
2144 else
2145 {
2146 /* We have a PARALLEL describing where the contents of SRC live.
2147 Queue register saves for each piece of the PARALLEL. */
2148 int par_index;
2149 int limit;
2150 HOST_WIDE_INT span_offset = offset;
2151
2152 gcc_assert (GET_CODE (span) == PARALLEL);
2153
2154 limit = XVECLEN (span, 0);
2155 for (par_index = 0; par_index < limit; par_index++)
2156 {
2157 rtx elem = XVECEXP (span, 0, par_index);
2158 queue_reg_save (elem, NULL_RTX, span_offset);
2159 span_offset += GET_MODE_SIZE (GET_MODE (elem));
2160 }
2161 }
2162 break;
2163
2164 default:
2165 gcc_unreachable ();
2166 }
2167 }
2168
2169 /* Record call frame debugging information for INSN, which either
2170 sets SP or FP (adjusting how we calculate the frame address) or saves a
2171 register to the stack. If INSN is NULL_RTX, initialize our state.
2172
2173 If AFTER_P is false, we're being called before the insn is emitted,
2174 otherwise after. Call instructions get invoked twice. */
2175
2176 static void
2177 dwarf2out_frame_debug (rtx insn, bool after_p)
2178 {
2179 rtx note, n;
2180 bool handled_one = false;
2181 bool need_flush = false;
2182
2183 if (!NONJUMP_INSN_P (insn) || clobbers_queued_reg_save (insn))
2184 dwarf2out_flush_queued_reg_saves ();
2185
2186 if (!RTX_FRAME_RELATED_P (insn))
2187 {
2188 /* ??? This should be done unconditionally since stack adjustments
2189 matter if the stack pointer is not the CFA register anymore but
2190 is still used to save registers. */
2191 if (!ACCUMULATE_OUTGOING_ARGS)
2192 dwarf2out_notice_stack_adjust (insn, after_p);
2193 return;
2194 }
2195
2196 any_cfis_emitted = false;
2197
2198 for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
2199 switch (REG_NOTE_KIND (note))
2200 {
2201 case REG_FRAME_RELATED_EXPR:
2202 insn = XEXP (note, 0);
2203 goto do_frame_expr;
2204
2205 case REG_CFA_DEF_CFA:
2206 dwarf2out_frame_debug_def_cfa (XEXP (note, 0));
2207 handled_one = true;
2208 break;
2209
2210 case REG_CFA_ADJUST_CFA:
2211 n = XEXP (note, 0);
2212 if (n == NULL)
2213 {
2214 n = PATTERN (insn);
2215 if (GET_CODE (n) == PARALLEL)
2216 n = XVECEXP (n, 0, 0);
2217 }
2218 dwarf2out_frame_debug_adjust_cfa (n);
2219 handled_one = true;
2220 break;
2221
2222 case REG_CFA_OFFSET:
2223 n = XEXP (note, 0);
2224 if (n == NULL)
2225 n = single_set (insn);
2226 dwarf2out_frame_debug_cfa_offset (n);
2227 handled_one = true;
2228 break;
2229
2230 case REG_CFA_REGISTER:
2231 n = XEXP (note, 0);
2232 if (n == NULL)
2233 {
2234 n = PATTERN (insn);
2235 if (GET_CODE (n) == PARALLEL)
2236 n = XVECEXP (n, 0, 0);
2237 }
2238 dwarf2out_frame_debug_cfa_register (n);
2239 handled_one = true;
2240 break;
2241
2242 case REG_CFA_EXPRESSION:
2243 n = XEXP (note, 0);
2244 if (n == NULL)
2245 n = single_set (insn);
2246 dwarf2out_frame_debug_cfa_expression (n);
2247 handled_one = true;
2248 break;
2249
2250 case REG_CFA_RESTORE:
2251 n = XEXP (note, 0);
2252 if (n == NULL)
2253 {
2254 n = PATTERN (insn);
2255 if (GET_CODE (n) == PARALLEL)
2256 n = XVECEXP (n, 0, 0);
2257 n = XEXP (n, 0);
2258 }
2259 dwarf2out_frame_debug_cfa_restore (n);
2260 handled_one = true;
2261 break;
2262
2263 case REG_CFA_SET_VDRAP:
2264 n = XEXP (note, 0);
2265 if (REG_P (n))
2266 {
2267 dw_fde_ref fde = cfun->fde;
2268 if (fde)
2269 {
2270 gcc_assert (fde->vdrap_reg == INVALID_REGNUM);
2271 if (REG_P (n))
2272 fde->vdrap_reg = dwf_regno (n);
2273 }
2274 }
2275 handled_one = true;
2276 break;
2277
2278 case REG_CFA_WINDOW_SAVE:
2279 dwarf2out_frame_debug_cfa_window_save ();
2280 handled_one = true;
2281 break;
2282
2283 case REG_CFA_FLUSH_QUEUE:
2284 /* The actual flush happens below. */
2285 need_flush = true;
2286 handled_one = true;
2287 break;
2288
2289 default:
2290 break;
2291 }
2292
2293 if (handled_one)
2294 {
2295 /* Minimize the number of advances by emitting the entire queue
2296 once anything is emitted. */
2297 need_flush |= any_cfis_emitted;
2298 }
2299 else
2300 {
2301 insn = PATTERN (insn);
2302 do_frame_expr:
2303 dwarf2out_frame_debug_expr (insn);
2304
2305 /* Check again. A parallel can save and update the same register.
2306 We could probably check just once, here, but this is safer than
2307 removing the check at the start of the function. */
2308 if (any_cfis_emitted || clobbers_queued_reg_save (insn))
2309 need_flush = true;
2310 }
2311
2312 if (need_flush)
2313 dwarf2out_flush_queued_reg_saves ();
2314 }
2315
2316 /* Emit CFI info to change the state from OLD_ROW to NEW_ROW. */
2317
2318 static void
2319 change_cfi_row (dw_cfi_row *old_row, dw_cfi_row *new_row)
2320 {
2321 size_t i, n_old, n_new, n_max;
2322 dw_cfi_ref cfi;
2323
2324 if (new_row->cfa_cfi && !cfi_equal_p (old_row->cfa_cfi, new_row->cfa_cfi))
2325 add_cfi (new_row->cfa_cfi);
2326 else
2327 {
2328 cfi = def_cfa_0 (&old_row->cfa, &new_row->cfa);
2329 if (cfi)
2330 add_cfi (cfi);
2331 }
2332
2333 if (old_row->args_size != new_row->args_size)
2334 add_cfi_args_size (new_row->args_size);
2335
2336 n_old = VEC_length (dw_cfi_ref, old_row->reg_save);
2337 n_new = VEC_length (dw_cfi_ref, new_row->reg_save);
2338 n_max = MAX (n_old, n_new);
2339
2340 for (i = 0; i < n_max; ++i)
2341 {
2342 dw_cfi_ref r_old = NULL, r_new = NULL;
2343
2344 if (i < n_old)
2345 r_old = VEC_index (dw_cfi_ref, old_row->reg_save, i);
2346 if (i < n_new)
2347 r_new = VEC_index (dw_cfi_ref, new_row->reg_save, i);
2348
2349 if (r_old == r_new)
2350 ;
2351 else if (r_new == NULL)
2352 add_cfi_restore (i);
2353 else if (!cfi_equal_p (r_old, r_new))
2354 add_cfi (r_new);
2355 }
2356 }
2357
2358 /* Examine CFI and return true if a cfi label and set_loc is needed
2359 beforehand. Even when generating CFI assembler instructions, we
2360 still have to add the cfi to the list so that lookup_cfa_1 works
2361 later on. When -g2 and above we even need to force emitting of
2362 CFI labels and add to list a DW_CFA_set_loc for convert_cfa_to_fb_loc_list
2363 purposes. If we're generating DWARF3 output we use DW_OP_call_frame_cfa
2364 and so don't use convert_cfa_to_fb_loc_list. */
2365
2366 static bool
2367 cfi_label_required_p (dw_cfi_ref cfi)
2368 {
2369 if (!dwarf2out_do_cfi_asm ())
2370 return true;
2371
2372 if (dwarf_version == 2
2373 && debug_info_level > DINFO_LEVEL_TERSE
2374 && (write_symbols == DWARF2_DEBUG
2375 || write_symbols == VMS_AND_DWARF2_DEBUG))
2376 {
2377 switch (cfi->dw_cfi_opc)
2378 {
2379 case DW_CFA_def_cfa_offset:
2380 case DW_CFA_def_cfa_offset_sf:
2381 case DW_CFA_def_cfa_register:
2382 case DW_CFA_def_cfa:
2383 case DW_CFA_def_cfa_sf:
2384 case DW_CFA_def_cfa_expression:
2385 case DW_CFA_restore_state:
2386 return true;
2387 default:
2388 return false;
2389 }
2390 }
2391 return false;
2392 }
2393
2394 /* Walk the function, looking for NOTE_INSN_CFI notes. Add the CFIs to the
2395 function's FDE, adding CFI labels and set_loc/advance_loc opcodes as
2396 necessary. */
2397 static void
2398 add_cfis_to_fde (void)
2399 {
2400 dw_fde_ref fde = cfun->fde;
2401 rtx insn, next;
2402 /* We always start with a function_begin label. */
2403 bool first = false;
2404
2405 for (insn = get_insns (); insn; insn = next)
2406 {
2407 next = NEXT_INSN (insn);
2408
2409 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS)
2410 {
2411 fde->dw_fde_switch_cfi_index
2412 = VEC_length (dw_cfi_ref, fde->dw_fde_cfi);
2413 /* Don't attempt to advance_loc4 between labels
2414 in different sections. */
2415 first = true;
2416 }
2417
2418 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_CFI)
2419 {
2420 bool required = cfi_label_required_p (NOTE_CFI (insn));
2421 while (next && NOTE_P (next) && NOTE_KIND (next) == NOTE_INSN_CFI)
2422 {
2423 required |= cfi_label_required_p (NOTE_CFI (next));
2424 next = NEXT_INSN (next);
2425 }
2426 if (required)
2427 {
2428 int num = dwarf2out_cfi_label_num;
2429 const char *label = dwarf2out_cfi_label ();
2430 dw_cfi_ref xcfi;
2431 rtx tmp;
2432
2433 /* Set the location counter to the new label. */
2434 xcfi = new_cfi ();
2435 xcfi->dw_cfi_opc = (first ? DW_CFA_set_loc
2436 : DW_CFA_advance_loc4);
2437 xcfi->dw_cfi_oprnd1.dw_cfi_addr = label;
2438 VEC_safe_push (dw_cfi_ref, gc, fde->dw_fde_cfi, xcfi);
2439
2440 tmp = emit_note_before (NOTE_INSN_CFI_LABEL, insn);
2441 NOTE_LABEL_NUMBER (tmp) = num;
2442 }
2443
2444 do
2445 {
2446 VEC_safe_push (dw_cfi_ref, gc, fde->dw_fde_cfi, NOTE_CFI (insn));
2447 insn = NEXT_INSN (insn);
2448 }
2449 while (insn != next);
2450 first = false;
2451 }
2452 }
2453 }
2454
2455 /* Scan the function and create the initial set of CFI notes. */
2456
2457 static void
2458 create_cfi_notes (void)
2459 {
2460 rtx insn;
2461
2462 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
2463 {
2464 rtx pat;
2465
2466 add_cfi_insn = PREV_INSN (insn);
2467
2468 if (BARRIER_P (insn))
2469 {
2470 dwarf2out_frame_debug (insn, false);
2471 continue;
2472 }
2473
2474 if (NOTE_P (insn))
2475 {
2476 switch (NOTE_KIND (insn))
2477 {
2478 case NOTE_INSN_PROLOGUE_END:
2479 dwarf2out_flush_queued_reg_saves ();
2480 break;
2481
2482 case NOTE_INSN_EPILOGUE_BEG:
2483 #if defined(HAVE_epilogue)
2484 dwarf2out_cfi_begin_epilogue (insn);
2485 #endif
2486 break;
2487
2488 case NOTE_INSN_CFA_RESTORE_STATE:
2489 add_cfi_insn = insn;
2490 dwarf2out_frame_debug_restore_state ();
2491 break;
2492
2493 case NOTE_INSN_SWITCH_TEXT_SECTIONS:
2494 /* In dwarf2out_switch_text_section, we'll begin a new FDE
2495 for the portion of the function in the alternate text
2496 section. The row state at the very beginning of that
2497 new FDE will be exactly the row state from the CIE.
2498 Emit whatever CFIs are necessary to make CUR_ROW current. */
2499 add_cfi_insn = insn;
2500 change_cfi_row (cie_cfi_row, cur_row);
2501 break;
2502 }
2503 continue;
2504 }
2505
2506 if (!NONDEBUG_INSN_P (insn))
2507 continue;
2508
2509 pat = PATTERN (insn);
2510 if (asm_noperands (pat) >= 0)
2511 {
2512 dwarf2out_frame_debug (insn, false);
2513 continue;
2514 }
2515
2516 if (GET_CODE (pat) == SEQUENCE)
2517 {
2518 int i, n = XVECLEN (pat, 0);
2519 for (i = 1; i < n; ++i)
2520 dwarf2out_frame_debug (XVECEXP (pat, 0, i), false);
2521 }
2522
2523 if (CALL_P (insn)
2524 || find_reg_note (insn, REG_CFA_FLUSH_QUEUE, NULL))
2525 dwarf2out_frame_debug (insn, false);
2526
2527 /* Do not separate tablejump insns from their ADDR_DIFF_VEC.
2528 Putting the note after the VEC should be ok. */
2529 if (!tablejump_p (insn, NULL, &add_cfi_insn))
2530 add_cfi_insn = insn;
2531
2532 dwarf2out_frame_debug (insn, true);
2533 }
2534
2535 add_cfi_insn = NULL;
2536 }
2537
2538 /* Determine if we need to save and restore CFI information around this
2539 epilogue. If SIBCALL is true, then this is a sibcall epilogue. If
2540 we do need to save/restore, then emit the save now, and insert a
2541 NOTE_INSN_CFA_RESTORE_STATE at the appropriate place in the stream. */
2542
2543 static void
2544 dwarf2out_cfi_begin_epilogue (rtx insn)
2545 {
2546 bool saw_frp = false;
2547 rtx i;
2548
2549 /* Scan forward to the return insn, noticing if there are possible
2550 frame related insns. */
2551 for (i = NEXT_INSN (insn); i ; i = NEXT_INSN (i))
2552 {
2553 if (!INSN_P (i))
2554 continue;
2555
2556 /* Look for both regular and sibcalls to end the block. */
2557 if (returnjump_p (i))
2558 break;
2559 if (CALL_P (i) && SIBLING_CALL_P (i))
2560 break;
2561
2562 if (GET_CODE (PATTERN (i)) == SEQUENCE)
2563 {
2564 int idx;
2565 rtx seq = PATTERN (i);
2566
2567 if (returnjump_p (XVECEXP (seq, 0, 0)))
2568 break;
2569 if (CALL_P (XVECEXP (seq, 0, 0))
2570 && SIBLING_CALL_P (XVECEXP (seq, 0, 0)))
2571 break;
2572
2573 for (idx = 0; idx < XVECLEN (seq, 0); idx++)
2574 if (RTX_FRAME_RELATED_P (XVECEXP (seq, 0, idx)))
2575 saw_frp = true;
2576 }
2577
2578 if (RTX_FRAME_RELATED_P (i))
2579 saw_frp = true;
2580 }
2581
2582 /* If the port doesn't emit epilogue unwind info, we don't need a
2583 save/restore pair. */
2584 if (!saw_frp)
2585 return;
2586
2587 /* Otherwise, search forward to see if the return insn was the last
2588 basic block of the function. If so, we don't need save/restore. */
2589 gcc_assert (i != NULL);
2590 i = next_real_insn (i);
2591 if (i == NULL)
2592 return;
2593
2594 /* Insert the restore before that next real insn in the stream, and before
2595 a potential NOTE_INSN_EPILOGUE_BEG -- we do need these notes to be
2596 properly nested. This should be after any label or alignment. This
2597 will be pushed into the CFI stream by the function below. */
2598 while (1)
2599 {
2600 rtx p = PREV_INSN (i);
2601 if (!NOTE_P (p))
2602 break;
2603 if (NOTE_KIND (p) == NOTE_INSN_BASIC_BLOCK)
2604 break;
2605 i = p;
2606 }
2607 emit_note_before (NOTE_INSN_CFA_RESTORE_STATE, i);
2608
2609 emit_cfa_remember = true;
2610
2611 /* And emulate the state save. */
2612 gcc_assert (remember_row == NULL);
2613 remember_row = copy_cfi_row (cur_row);
2614 }
2615
2616 /* A "subroutine" of dwarf2out_cfi_begin_epilogue. Emit the restore
2617 required. */
2618
2619 static void
2620 dwarf2out_frame_debug_restore_state (void)
2621 {
2622 dw_cfi_ref cfi = new_cfi ();
2623
2624 cfi->dw_cfi_opc = DW_CFA_restore_state;
2625 add_cfi (cfi);
2626
2627 gcc_assert (remember_row != NULL);
2628 free_cfi_row (cur_row);
2629 cur_row = remember_row;
2630 remember_row = NULL;
2631 }
2632 \f
2633 /* Record the initial position of the return address. RTL is
2634 INCOMING_RETURN_ADDR_RTX. */
2635
2636 static void
2637 initial_return_save (rtx rtl)
2638 {
2639 unsigned int reg = INVALID_REGNUM;
2640 HOST_WIDE_INT offset = 0;
2641
2642 switch (GET_CODE (rtl))
2643 {
2644 case REG:
2645 /* RA is in a register. */
2646 reg = dwf_regno (rtl);
2647 break;
2648
2649 case MEM:
2650 /* RA is on the stack. */
2651 rtl = XEXP (rtl, 0);
2652 switch (GET_CODE (rtl))
2653 {
2654 case REG:
2655 gcc_assert (REGNO (rtl) == STACK_POINTER_REGNUM);
2656 offset = 0;
2657 break;
2658
2659 case PLUS:
2660 gcc_assert (REGNO (XEXP (rtl, 0)) == STACK_POINTER_REGNUM);
2661 offset = INTVAL (XEXP (rtl, 1));
2662 break;
2663
2664 case MINUS:
2665 gcc_assert (REGNO (XEXP (rtl, 0)) == STACK_POINTER_REGNUM);
2666 offset = -INTVAL (XEXP (rtl, 1));
2667 break;
2668
2669 default:
2670 gcc_unreachable ();
2671 }
2672
2673 break;
2674
2675 case PLUS:
2676 /* The return address is at some offset from any value we can
2677 actually load. For instance, on the SPARC it is in %i7+8. Just
2678 ignore the offset for now; it doesn't matter for unwinding frames. */
2679 gcc_assert (CONST_INT_P (XEXP (rtl, 1)));
2680 initial_return_save (XEXP (rtl, 0));
2681 return;
2682
2683 default:
2684 gcc_unreachable ();
2685 }
2686
2687 if (reg != DWARF_FRAME_RETURN_COLUMN)
2688 {
2689 if (reg != INVALID_REGNUM)
2690 record_reg_saved_in_reg (rtl, pc_rtx);
2691 reg_save (DWARF_FRAME_RETURN_COLUMN, reg, offset - cur_row->cfa.offset);
2692 }
2693 }
2694
2695 /* Annotate the function with NOTE_INSN_CFI notes to record the CFI
2696 state at each location within the function. These notes will be
2697 emitted during pass_final. */
2698
2699 static unsigned int
2700 execute_dwarf2_frame (void)
2701 {
2702 /* The first time we're called, compute the incoming frame state. */
2703 if (cie_cfi_vec == NULL)
2704 {
2705 dw_cfa_location loc;
2706
2707 dw_stack_pointer_regnum = DWARF_FRAME_REGNUM (STACK_POINTER_REGNUM);
2708 dw_frame_pointer_regnum = DWARF_FRAME_REGNUM (HARD_FRAME_POINTER_REGNUM);
2709
2710 add_cfi_vec = &cie_cfi_vec;
2711 cie_cfi_row = cur_row = new_cfi_row ();
2712
2713 /* On entry, the Canonical Frame Address is at SP. */
2714 memset(&loc, 0, sizeof (loc));
2715 loc.reg = dw_stack_pointer_regnum;
2716 loc.offset = INCOMING_FRAME_SP_OFFSET;
2717 def_cfa_1 (&loc);
2718
2719 if (targetm.debug_unwind_info () == UI_DWARF2
2720 || targetm_common.except_unwind_info (&global_options) == UI_DWARF2)
2721 {
2722 initial_return_save (INCOMING_RETURN_ADDR_RTX);
2723
2724 /* For a few targets, we have the return address incoming into a
2725 register, but choose a different return column. This will result
2726 in a DW_CFA_register for the return, and an entry in
2727 regs_saved_in_regs to match. If the target later stores that
2728 return address register to the stack, we want to be able to emit
2729 the DW_CFA_offset against the return column, not the intermediate
2730 save register. Save the contents of regs_saved_in_regs so that
2731 we can re-initialize it at the start of each function. */
2732 switch (VEC_length (reg_saved_in_data, regs_saved_in_regs))
2733 {
2734 case 0:
2735 break;
2736 case 1:
2737 cie_return_save = ggc_alloc_reg_saved_in_data ();
2738 *cie_return_save = *VEC_index (reg_saved_in_data,
2739 regs_saved_in_regs, 0);
2740 regs_saved_in_regs = NULL;
2741 break;
2742 default:
2743 gcc_unreachable ();
2744 }
2745 }
2746
2747 add_cfi_vec = NULL;
2748 }
2749
2750 /* Set up state for generating call frame debug info. */
2751 gcc_checking_assert (queued_reg_saves == NULL);
2752 gcc_checking_assert (regs_saved_in_regs == NULL);
2753
2754 cur_row = copy_cfi_row (cie_cfi_row);
2755 if (cie_return_save)
2756 VEC_safe_push (reg_saved_in_data, gc, regs_saved_in_regs, cie_return_save);
2757
2758 cfa_store = cur_row->cfa;
2759 args_size = 0;
2760
2761 memset (&cfa_temp, 0, sizeof(cfa_temp));
2762 cfa_temp.reg = INVALID_REGNUM;
2763
2764 dwarf2out_alloc_current_fde ();
2765
2766 /* Do the work. */
2767 create_cfi_notes ();
2768 add_cfis_to_fde ();
2769
2770 /* Reset all function-specific information, particularly for GC. */
2771 XDELETEVEC (barrier_args_size);
2772 barrier_args_size = NULL;
2773 regs_saved_in_regs = NULL;
2774 VEC_free (queued_reg_save, heap, queued_reg_saves);
2775
2776 free_cfi_row (cur_row);
2777 cur_row = NULL;
2778
2779 return 0;
2780 }
2781 \f
2782 /* Convert a DWARF call frame info. operation to its string name */
2783
2784 static const char *
2785 dwarf_cfi_name (unsigned int cfi_opc)
2786 {
2787 switch (cfi_opc)
2788 {
2789 case DW_CFA_advance_loc:
2790 return "DW_CFA_advance_loc";
2791 case DW_CFA_offset:
2792 return "DW_CFA_offset";
2793 case DW_CFA_restore:
2794 return "DW_CFA_restore";
2795 case DW_CFA_nop:
2796 return "DW_CFA_nop";
2797 case DW_CFA_set_loc:
2798 return "DW_CFA_set_loc";
2799 case DW_CFA_advance_loc1:
2800 return "DW_CFA_advance_loc1";
2801 case DW_CFA_advance_loc2:
2802 return "DW_CFA_advance_loc2";
2803 case DW_CFA_advance_loc4:
2804 return "DW_CFA_advance_loc4";
2805 case DW_CFA_offset_extended:
2806 return "DW_CFA_offset_extended";
2807 case DW_CFA_restore_extended:
2808 return "DW_CFA_restore_extended";
2809 case DW_CFA_undefined:
2810 return "DW_CFA_undefined";
2811 case DW_CFA_same_value:
2812 return "DW_CFA_same_value";
2813 case DW_CFA_register:
2814 return "DW_CFA_register";
2815 case DW_CFA_remember_state:
2816 return "DW_CFA_remember_state";
2817 case DW_CFA_restore_state:
2818 return "DW_CFA_restore_state";
2819 case DW_CFA_def_cfa:
2820 return "DW_CFA_def_cfa";
2821 case DW_CFA_def_cfa_register:
2822 return "DW_CFA_def_cfa_register";
2823 case DW_CFA_def_cfa_offset:
2824 return "DW_CFA_def_cfa_offset";
2825
2826 /* DWARF 3 */
2827 case DW_CFA_def_cfa_expression:
2828 return "DW_CFA_def_cfa_expression";
2829 case DW_CFA_expression:
2830 return "DW_CFA_expression";
2831 case DW_CFA_offset_extended_sf:
2832 return "DW_CFA_offset_extended_sf";
2833 case DW_CFA_def_cfa_sf:
2834 return "DW_CFA_def_cfa_sf";
2835 case DW_CFA_def_cfa_offset_sf:
2836 return "DW_CFA_def_cfa_offset_sf";
2837
2838 /* SGI/MIPS specific */
2839 case DW_CFA_MIPS_advance_loc8:
2840 return "DW_CFA_MIPS_advance_loc8";
2841
2842 /* GNU extensions */
2843 case DW_CFA_GNU_window_save:
2844 return "DW_CFA_GNU_window_save";
2845 case DW_CFA_GNU_args_size:
2846 return "DW_CFA_GNU_args_size";
2847 case DW_CFA_GNU_negative_offset_extended:
2848 return "DW_CFA_GNU_negative_offset_extended";
2849
2850 default:
2851 return "DW_CFA_<unknown>";
2852 }
2853 }
2854
2855 /* This routine will generate the correct assembly data for a location
2856 description based on a cfi entry with a complex address. */
2857
2858 static void
2859 output_cfa_loc (dw_cfi_ref cfi, int for_eh)
2860 {
2861 dw_loc_descr_ref loc;
2862 unsigned long size;
2863
2864 if (cfi->dw_cfi_opc == DW_CFA_expression)
2865 {
2866 unsigned r =
2867 DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
2868 dw2_asm_output_data (1, r, NULL);
2869 loc = cfi->dw_cfi_oprnd2.dw_cfi_loc;
2870 }
2871 else
2872 loc = cfi->dw_cfi_oprnd1.dw_cfi_loc;
2873
2874 /* Output the size of the block. */
2875 size = size_of_locs (loc);
2876 dw2_asm_output_data_uleb128 (size, NULL);
2877
2878 /* Now output the operations themselves. */
2879 output_loc_sequence (loc, for_eh);
2880 }
2881
2882 /* Similar, but used for .cfi_escape. */
2883
2884 static void
2885 output_cfa_loc_raw (dw_cfi_ref cfi)
2886 {
2887 dw_loc_descr_ref loc;
2888 unsigned long size;
2889
2890 if (cfi->dw_cfi_opc == DW_CFA_expression)
2891 {
2892 unsigned r =
2893 DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
2894 fprintf (asm_out_file, "%#x,", r);
2895 loc = cfi->dw_cfi_oprnd2.dw_cfi_loc;
2896 }
2897 else
2898 loc = cfi->dw_cfi_oprnd1.dw_cfi_loc;
2899
2900 /* Output the size of the block. */
2901 size = size_of_locs (loc);
2902 dw2_asm_output_data_uleb128_raw (size);
2903 fputc (',', asm_out_file);
2904
2905 /* Now output the operations themselves. */
2906 output_loc_sequence_raw (loc);
2907 }
2908
2909 /* Output a Call Frame Information opcode and its operand(s). */
2910
2911 void
2912 output_cfi (dw_cfi_ref cfi, dw_fde_ref fde, int for_eh)
2913 {
2914 unsigned long r;
2915 HOST_WIDE_INT off;
2916
2917 if (cfi->dw_cfi_opc == DW_CFA_advance_loc)
2918 dw2_asm_output_data (1, (cfi->dw_cfi_opc
2919 | (cfi->dw_cfi_oprnd1.dw_cfi_offset & 0x3f)),
2920 "DW_CFA_advance_loc " HOST_WIDE_INT_PRINT_HEX,
2921 ((unsigned HOST_WIDE_INT)
2922 cfi->dw_cfi_oprnd1.dw_cfi_offset));
2923 else if (cfi->dw_cfi_opc == DW_CFA_offset)
2924 {
2925 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
2926 dw2_asm_output_data (1, (cfi->dw_cfi_opc | (r & 0x3f)),
2927 "DW_CFA_offset, column %#lx", r);
2928 off = div_data_align (cfi->dw_cfi_oprnd2.dw_cfi_offset);
2929 dw2_asm_output_data_uleb128 (off, NULL);
2930 }
2931 else if (cfi->dw_cfi_opc == DW_CFA_restore)
2932 {
2933 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
2934 dw2_asm_output_data (1, (cfi->dw_cfi_opc | (r & 0x3f)),
2935 "DW_CFA_restore, column %#lx", r);
2936 }
2937 else
2938 {
2939 dw2_asm_output_data (1, cfi->dw_cfi_opc,
2940 "%s", dwarf_cfi_name (cfi->dw_cfi_opc));
2941
2942 switch (cfi->dw_cfi_opc)
2943 {
2944 case DW_CFA_set_loc:
2945 if (for_eh)
2946 dw2_asm_output_encoded_addr_rtx (
2947 ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/1, /*global=*/0),
2948 gen_rtx_SYMBOL_REF (Pmode, cfi->dw_cfi_oprnd1.dw_cfi_addr),
2949 false, NULL);
2950 else
2951 dw2_asm_output_addr (DWARF2_ADDR_SIZE,
2952 cfi->dw_cfi_oprnd1.dw_cfi_addr, NULL);
2953 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
2954 break;
2955
2956 case DW_CFA_advance_loc1:
2957 dw2_asm_output_delta (1, cfi->dw_cfi_oprnd1.dw_cfi_addr,
2958 fde->dw_fde_current_label, NULL);
2959 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
2960 break;
2961
2962 case DW_CFA_advance_loc2:
2963 dw2_asm_output_delta (2, cfi->dw_cfi_oprnd1.dw_cfi_addr,
2964 fde->dw_fde_current_label, NULL);
2965 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
2966 break;
2967
2968 case DW_CFA_advance_loc4:
2969 dw2_asm_output_delta (4, cfi->dw_cfi_oprnd1.dw_cfi_addr,
2970 fde->dw_fde_current_label, NULL);
2971 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
2972 break;
2973
2974 case DW_CFA_MIPS_advance_loc8:
2975 dw2_asm_output_delta (8, cfi->dw_cfi_oprnd1.dw_cfi_addr,
2976 fde->dw_fde_current_label, NULL);
2977 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
2978 break;
2979
2980 case DW_CFA_offset_extended:
2981 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
2982 dw2_asm_output_data_uleb128 (r, NULL);
2983 off = div_data_align (cfi->dw_cfi_oprnd2.dw_cfi_offset);
2984 dw2_asm_output_data_uleb128 (off, NULL);
2985 break;
2986
2987 case DW_CFA_def_cfa:
2988 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
2989 dw2_asm_output_data_uleb128 (r, NULL);
2990 dw2_asm_output_data_uleb128 (cfi->dw_cfi_oprnd2.dw_cfi_offset, NULL);
2991 break;
2992
2993 case DW_CFA_offset_extended_sf:
2994 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
2995 dw2_asm_output_data_uleb128 (r, NULL);
2996 off = div_data_align (cfi->dw_cfi_oprnd2.dw_cfi_offset);
2997 dw2_asm_output_data_sleb128 (off, NULL);
2998 break;
2999
3000 case DW_CFA_def_cfa_sf:
3001 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3002 dw2_asm_output_data_uleb128 (r, NULL);
3003 off = div_data_align (cfi->dw_cfi_oprnd2.dw_cfi_offset);
3004 dw2_asm_output_data_sleb128 (off, NULL);
3005 break;
3006
3007 case DW_CFA_restore_extended:
3008 case DW_CFA_undefined:
3009 case DW_CFA_same_value:
3010 case DW_CFA_def_cfa_register:
3011 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3012 dw2_asm_output_data_uleb128 (r, NULL);
3013 break;
3014
3015 case DW_CFA_register:
3016 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3017 dw2_asm_output_data_uleb128 (r, NULL);
3018 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd2.dw_cfi_reg_num, for_eh);
3019 dw2_asm_output_data_uleb128 (r, NULL);
3020 break;
3021
3022 case DW_CFA_def_cfa_offset:
3023 case DW_CFA_GNU_args_size:
3024 dw2_asm_output_data_uleb128 (cfi->dw_cfi_oprnd1.dw_cfi_offset, NULL);
3025 break;
3026
3027 case DW_CFA_def_cfa_offset_sf:
3028 off = div_data_align (cfi->dw_cfi_oprnd1.dw_cfi_offset);
3029 dw2_asm_output_data_sleb128 (off, NULL);
3030 break;
3031
3032 case DW_CFA_GNU_window_save:
3033 break;
3034
3035 case DW_CFA_def_cfa_expression:
3036 case DW_CFA_expression:
3037 output_cfa_loc (cfi, for_eh);
3038 break;
3039
3040 case DW_CFA_GNU_negative_offset_extended:
3041 /* Obsoleted by DW_CFA_offset_extended_sf. */
3042 gcc_unreachable ();
3043
3044 default:
3045 break;
3046 }
3047 }
3048 }
3049
3050 /* Similar, but do it via assembler directives instead. */
3051
3052 void
3053 output_cfi_directive (FILE *f, dw_cfi_ref cfi)
3054 {
3055 unsigned long r, r2;
3056
3057 switch (cfi->dw_cfi_opc)
3058 {
3059 case DW_CFA_advance_loc:
3060 case DW_CFA_advance_loc1:
3061 case DW_CFA_advance_loc2:
3062 case DW_CFA_advance_loc4:
3063 case DW_CFA_MIPS_advance_loc8:
3064 case DW_CFA_set_loc:
3065 /* Should only be created in a code path not followed when emitting
3066 via directives. The assembler is going to take care of this for
3067 us. But this routines is also used for debugging dumps, so
3068 print something. */
3069 gcc_assert (f != asm_out_file);
3070 fprintf (f, "\t.cfi_advance_loc\n");
3071 break;
3072
3073 case DW_CFA_offset:
3074 case DW_CFA_offset_extended:
3075 case DW_CFA_offset_extended_sf:
3076 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3077 fprintf (f, "\t.cfi_offset %lu, "HOST_WIDE_INT_PRINT_DEC"\n",
3078 r, cfi->dw_cfi_oprnd2.dw_cfi_offset);
3079 break;
3080
3081 case DW_CFA_restore:
3082 case DW_CFA_restore_extended:
3083 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3084 fprintf (f, "\t.cfi_restore %lu\n", r);
3085 break;
3086
3087 case DW_CFA_undefined:
3088 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3089 fprintf (f, "\t.cfi_undefined %lu\n", r);
3090 break;
3091
3092 case DW_CFA_same_value:
3093 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3094 fprintf (f, "\t.cfi_same_value %lu\n", r);
3095 break;
3096
3097 case DW_CFA_def_cfa:
3098 case DW_CFA_def_cfa_sf:
3099 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3100 fprintf (f, "\t.cfi_def_cfa %lu, "HOST_WIDE_INT_PRINT_DEC"\n",
3101 r, cfi->dw_cfi_oprnd2.dw_cfi_offset);
3102 break;
3103
3104 case DW_CFA_def_cfa_register:
3105 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3106 fprintf (f, "\t.cfi_def_cfa_register %lu\n", r);
3107 break;
3108
3109 case DW_CFA_register:
3110 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3111 r2 = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd2.dw_cfi_reg_num, 1);
3112 fprintf (f, "\t.cfi_register %lu, %lu\n", r, r2);
3113 break;
3114
3115 case DW_CFA_def_cfa_offset:
3116 case DW_CFA_def_cfa_offset_sf:
3117 fprintf (f, "\t.cfi_def_cfa_offset "
3118 HOST_WIDE_INT_PRINT_DEC"\n",
3119 cfi->dw_cfi_oprnd1.dw_cfi_offset);
3120 break;
3121
3122 case DW_CFA_remember_state:
3123 fprintf (f, "\t.cfi_remember_state\n");
3124 break;
3125 case DW_CFA_restore_state:
3126 fprintf (f, "\t.cfi_restore_state\n");
3127 break;
3128
3129 case DW_CFA_GNU_args_size:
3130 if (f == asm_out_file)
3131 {
3132 fprintf (f, "\t.cfi_escape %#x,", DW_CFA_GNU_args_size);
3133 dw2_asm_output_data_uleb128_raw (cfi->dw_cfi_oprnd1.dw_cfi_offset);
3134 if (flag_debug_asm)
3135 fprintf (f, "\t%s args_size "HOST_WIDE_INT_PRINT_DEC,
3136 ASM_COMMENT_START, cfi->dw_cfi_oprnd1.dw_cfi_offset);
3137 fputc ('\n', f);
3138 }
3139 else
3140 {
3141 fprintf (f, "\t.cfi_GNU_args_size "HOST_WIDE_INT_PRINT_DEC "\n",
3142 cfi->dw_cfi_oprnd1.dw_cfi_offset);
3143 }
3144 break;
3145
3146 case DW_CFA_GNU_window_save:
3147 fprintf (f, "\t.cfi_window_save\n");
3148 break;
3149
3150 case DW_CFA_def_cfa_expression:
3151 if (f != asm_out_file)
3152 {
3153 fprintf (f, "\t.cfi_def_cfa_expression ...\n");
3154 break;
3155 }
3156 /* FALLTHRU */
3157 case DW_CFA_expression:
3158 if (f != asm_out_file)
3159 {
3160 fprintf (f, "\t.cfi_cfa_expression ...\n");
3161 break;
3162 }
3163 fprintf (f, "\t.cfi_escape %#x,", cfi->dw_cfi_opc);
3164 output_cfa_loc_raw (cfi);
3165 fputc ('\n', f);
3166 break;
3167
3168 default:
3169 gcc_unreachable ();
3170 }
3171 }
3172
3173 void
3174 dwarf2out_emit_cfi (dw_cfi_ref cfi)
3175 {
3176 if (dwarf2out_do_cfi_asm ())
3177 output_cfi_directive (asm_out_file, cfi);
3178 }
3179 \f
3180
3181 /* Save the result of dwarf2out_do_frame across PCH.
3182 This variable is tri-state, with 0 unset, >0 true, <0 false. */
3183 static GTY(()) signed char saved_do_cfi_asm = 0;
3184
3185 /* Decide whether we want to emit frame unwind information for the current
3186 translation unit. */
3187
3188 bool
3189 dwarf2out_do_frame (void)
3190 {
3191 /* We want to emit correct CFA location expressions or lists, so we
3192 have to return true if we're going to output debug info, even if
3193 we're not going to output frame or unwind info. */
3194 if (write_symbols == DWARF2_DEBUG || write_symbols == VMS_AND_DWARF2_DEBUG)
3195 return true;
3196
3197 if (saved_do_cfi_asm > 0)
3198 return true;
3199
3200 if (targetm.debug_unwind_info () == UI_DWARF2)
3201 return true;
3202
3203 if ((flag_unwind_tables || flag_exceptions)
3204 && targetm_common.except_unwind_info (&global_options) == UI_DWARF2)
3205 return true;
3206
3207 return false;
3208 }
3209
3210 /* Decide whether to emit frame unwind via assembler directives. */
3211
3212 bool
3213 dwarf2out_do_cfi_asm (void)
3214 {
3215 int enc;
3216
3217 #ifdef MIPS_DEBUGGING_INFO
3218 return false;
3219 #endif
3220
3221 if (saved_do_cfi_asm != 0)
3222 return saved_do_cfi_asm > 0;
3223
3224 /* Assume failure for a moment. */
3225 saved_do_cfi_asm = -1;
3226
3227 if (!flag_dwarf2_cfi_asm || !dwarf2out_do_frame ())
3228 return false;
3229 if (!HAVE_GAS_CFI_PERSONALITY_DIRECTIVE)
3230 return false;
3231
3232 /* Make sure the personality encoding is one the assembler can support.
3233 In particular, aligned addresses can't be handled. */
3234 enc = ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/2,/*global=*/1);
3235 if ((enc & 0x70) != 0 && (enc & 0x70) != DW_EH_PE_pcrel)
3236 return false;
3237 enc = ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/0,/*global=*/0);
3238 if ((enc & 0x70) != 0 && (enc & 0x70) != DW_EH_PE_pcrel)
3239 return false;
3240
3241 /* If we can't get the assembler to emit only .debug_frame, and we don't need
3242 dwarf2 unwind info for exceptions, then emit .debug_frame by hand. */
3243 if (!HAVE_GAS_CFI_SECTIONS_DIRECTIVE
3244 && !flag_unwind_tables && !flag_exceptions
3245 && targetm_common.except_unwind_info (&global_options) != UI_DWARF2)
3246 return false;
3247
3248 /* Success! */
3249 saved_do_cfi_asm = 1;
3250 return true;
3251 }
3252
3253 static bool
3254 gate_dwarf2_frame (void)
3255 {
3256 #ifndef HAVE_prologue
3257 /* Targets which still implement the prologue in assembler text
3258 cannot use the generic dwarf2 unwinding. */
3259 return false;
3260 #endif
3261
3262 /* ??? What to do for UI_TARGET unwinding? They might be able to benefit
3263 from the optimized shrink-wrapping annotations that we will compute.
3264 For now, only produce the CFI notes for dwarf2. */
3265 return dwarf2out_do_frame ();
3266 }
3267
3268 struct rtl_opt_pass pass_dwarf2_frame =
3269 {
3270 {
3271 RTL_PASS,
3272 "dwarf2", /* name */
3273 gate_dwarf2_frame, /* gate */
3274 execute_dwarf2_frame, /* execute */
3275 NULL, /* sub */
3276 NULL, /* next */
3277 0, /* static_pass_number */
3278 TV_FINAL, /* tv_id */
3279 0, /* properties_required */
3280 0, /* properties_provided */
3281 0, /* properties_destroyed */
3282 0, /* todo_flags_start */
3283 0 /* todo_flags_finish */
3284 }
3285 };
3286
3287 #include "gt-dwarf2cfi.h"