dwarf2cfi: Handle return column save from CIE.
[gcc.git] / gcc / dwarf2cfi.c
1 /* Dwarf2 Call Frame Information helper routines.
2 Copyright (C) 1992, 1993, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
3 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011
4 Free Software Foundation, Inc.
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "tm.h"
26 #include "version.h"
27 #include "flags.h"
28 #include "rtl.h"
29 #include "function.h"
30 #include "dwarf2.h"
31 #include "dwarf2out.h"
32 #include "dwarf2asm.h"
33 #include "ggc.h"
34 #include "tm_p.h"
35 #include "target.h"
36 #include "common/common-target.h"
37 #include "tree-pass.h"
38
39 #include "except.h" /* expand_builtin_dwarf_sp_column */
40 #include "expr.h" /* init_return_column_size */
41 #include "regs.h" /* expand_builtin_init_dwarf_reg_sizes */
42 #include "output.h" /* asm_out_file */
43 #include "debug.h" /* dwarf2out_do_frame, dwarf2out_do_cfi_asm */
44
45
46 /* ??? Poison these here until it can be done generically. They've been
47 totally replaced in this file; make sure it stays that way. */
48 #undef DWARF2_UNWIND_INFO
49 #undef DWARF2_FRAME_INFO
50 #if (GCC_VERSION >= 3000)
51 #pragma GCC poison DWARF2_UNWIND_INFO DWARF2_FRAME_INFO
52 #endif
53
54 #ifndef INCOMING_RETURN_ADDR_RTX
55 #define INCOMING_RETURN_ADDR_RTX (gcc_unreachable (), NULL_RTX)
56 #endif
57
58 /* The size of the target's pointer type. */
59 #ifndef PTR_SIZE
60 #define PTR_SIZE (POINTER_SIZE / BITS_PER_UNIT)
61 #endif
62
63 /* Maximum size (in bytes) of an artificially generated label. */
64 #define MAX_ARTIFICIAL_LABEL_BYTES 30
65
66 /* The size of addresses as they appear in the Dwarf 2 data.
67 Some architectures use word addresses to refer to code locations,
68 but Dwarf 2 info always uses byte addresses. On such machines,
69 Dwarf 2 addresses need to be larger than the architecture's
70 pointers. */
71 #ifndef DWARF2_ADDR_SIZE
72 #define DWARF2_ADDR_SIZE (POINTER_SIZE / BITS_PER_UNIT)
73 #endif
74
75 /* The size in bytes of a DWARF field indicating an offset or length
76 relative to a debug info section, specified to be 4 bytes in the
77 DWARF-2 specification. The SGI/MIPS ABI defines it to be the same
78 as PTR_SIZE. */
79
80 #ifndef DWARF_OFFSET_SIZE
81 #define DWARF_OFFSET_SIZE 4
82 #endif
83
84 /* According to the (draft) DWARF 3 specification, the initial length
85 should either be 4 or 12 bytes. When it's 12 bytes, the first 4
86 bytes are 0xffffffff, followed by the length stored in the next 8
87 bytes.
88
89 However, the SGI/MIPS ABI uses an initial length which is equal to
90 DWARF_OFFSET_SIZE. It is defined (elsewhere) accordingly. */
91
92 #ifndef DWARF_INITIAL_LENGTH_SIZE
93 #define DWARF_INITIAL_LENGTH_SIZE (DWARF_OFFSET_SIZE == 4 ? 4 : 12)
94 #endif
95
96 /* Round SIZE up to the nearest BOUNDARY. */
97 #define DWARF_ROUND(SIZE,BOUNDARY) \
98 ((((SIZE) + (BOUNDARY) - 1) / (BOUNDARY)) * (BOUNDARY))
99
100 /* Offsets recorded in opcodes are a multiple of this alignment factor. */
101 #ifndef DWARF_CIE_DATA_ALIGNMENT
102 #ifdef STACK_GROWS_DOWNWARD
103 #define DWARF_CIE_DATA_ALIGNMENT (-((int) UNITS_PER_WORD))
104 #else
105 #define DWARF_CIE_DATA_ALIGNMENT ((int) UNITS_PER_WORD)
106 #endif
107 #endif
108
109 /* CIE identifier. */
110 #if HOST_BITS_PER_WIDE_INT >= 64
111 #define DWARF_CIE_ID \
112 (unsigned HOST_WIDE_INT) (DWARF_OFFSET_SIZE == 4 ? DW_CIE_ID : DW64_CIE_ID)
113 #else
114 #define DWARF_CIE_ID DW_CIE_ID
115 #endif
116
117 /* The DWARF 2 CFA column which tracks the return address. Normally this
118 is the column for PC, or the first column after all of the hard
119 registers. */
120 #ifndef DWARF_FRAME_RETURN_COLUMN
121 #ifdef PC_REGNUM
122 #define DWARF_FRAME_RETURN_COLUMN DWARF_FRAME_REGNUM (PC_REGNUM)
123 #else
124 #define DWARF_FRAME_RETURN_COLUMN DWARF_FRAME_REGISTERS
125 #endif
126 #endif
127
128 /* The mapping from gcc register number to DWARF 2 CFA column number. By
129 default, we just provide columns for all registers. */
130 #ifndef DWARF_FRAME_REGNUM
131 #define DWARF_FRAME_REGNUM(REG) DBX_REGISTER_NUMBER (REG)
132 #endif
133
134 /* Map register numbers held in the call frame info that gcc has
135 collected using DWARF_FRAME_REGNUM to those that should be output in
136 .debug_frame and .eh_frame. */
137 #ifndef DWARF2_FRAME_REG_OUT
138 #define DWARF2_FRAME_REG_OUT(REGNO, FOR_EH) (REGNO)
139 #endif
140 \f
141 /* A vector of call frame insns for the CIE. */
142 cfi_vec cie_cfi_vec;
143
144 static GTY(()) unsigned long dwarf2out_cfi_label_num;
145
146 /* The insn after which a new CFI note should be emitted. */
147 static rtx cfi_insn;
148
149 /* When non-null, add_cfi will add the CFI to this vector. */
150 static cfi_vec *add_cfi_vec;
151
152 /* True if remember_state should be emitted before following CFI directive. */
153 static bool emit_cfa_remember;
154
155 /* True if any CFI directives were emitted at the current insn. */
156 static bool any_cfis_emitted;
157 \f
158
159 static void dwarf2out_cfi_begin_epilogue (rtx insn);
160 static void dwarf2out_frame_debug_restore_state (void);
161
162 \f
163 /* Hook used by __throw. */
164
165 rtx
166 expand_builtin_dwarf_sp_column (void)
167 {
168 unsigned int dwarf_regnum = DWARF_FRAME_REGNUM (STACK_POINTER_REGNUM);
169 return GEN_INT (DWARF2_FRAME_REG_OUT (dwarf_regnum, 1));
170 }
171
172 /* MEM is a memory reference for the register size table, each element of
173 which has mode MODE. Initialize column C as a return address column. */
174
175 static void
176 init_return_column_size (enum machine_mode mode, rtx mem, unsigned int c)
177 {
178 HOST_WIDE_INT offset = c * GET_MODE_SIZE (mode);
179 HOST_WIDE_INT size = GET_MODE_SIZE (Pmode);
180 emit_move_insn (adjust_address (mem, mode, offset), GEN_INT (size));
181 }
182
183 /* Generate code to initialize the register size table. */
184
185 void
186 expand_builtin_init_dwarf_reg_sizes (tree address)
187 {
188 unsigned int i;
189 enum machine_mode mode = TYPE_MODE (char_type_node);
190 rtx addr = expand_normal (address);
191 rtx mem = gen_rtx_MEM (BLKmode, addr);
192 bool wrote_return_column = false;
193
194 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
195 {
196 int rnum = DWARF2_FRAME_REG_OUT (DWARF_FRAME_REGNUM (i), 1);
197
198 if (rnum < DWARF_FRAME_REGISTERS)
199 {
200 HOST_WIDE_INT offset = rnum * GET_MODE_SIZE (mode);
201 enum machine_mode save_mode = reg_raw_mode[i];
202 HOST_WIDE_INT size;
203
204 if (HARD_REGNO_CALL_PART_CLOBBERED (i, save_mode))
205 save_mode = choose_hard_reg_mode (i, 1, true);
206 if (DWARF_FRAME_REGNUM (i) == DWARF_FRAME_RETURN_COLUMN)
207 {
208 if (save_mode == VOIDmode)
209 continue;
210 wrote_return_column = true;
211 }
212 size = GET_MODE_SIZE (save_mode);
213 if (offset < 0)
214 continue;
215
216 emit_move_insn (adjust_address (mem, mode, offset),
217 gen_int_mode (size, mode));
218 }
219 }
220
221 if (!wrote_return_column)
222 init_return_column_size (mode, mem, DWARF_FRAME_RETURN_COLUMN);
223
224 #ifdef DWARF_ALT_FRAME_RETURN_COLUMN
225 init_return_column_size (mode, mem, DWARF_ALT_FRAME_RETURN_COLUMN);
226 #endif
227
228 targetm.init_dwarf_reg_sizes_extra (address);
229 }
230
231 /* Divide OFF by DWARF_CIE_DATA_ALIGNMENT, asserting no remainder. */
232
233 static inline HOST_WIDE_INT
234 div_data_align (HOST_WIDE_INT off)
235 {
236 HOST_WIDE_INT r = off / DWARF_CIE_DATA_ALIGNMENT;
237 gcc_assert (r * DWARF_CIE_DATA_ALIGNMENT == off);
238 return r;
239 }
240
241 /* Return true if we need a signed version of a given opcode
242 (e.g. DW_CFA_offset_extended_sf vs DW_CFA_offset_extended). */
243
244 static inline bool
245 need_data_align_sf_opcode (HOST_WIDE_INT off)
246 {
247 return DWARF_CIE_DATA_ALIGNMENT < 0 ? off > 0 : off < 0;
248 }
249
250 /* Return a pointer to a newly allocated Call Frame Instruction. */
251
252 static inline dw_cfi_ref
253 new_cfi (void)
254 {
255 dw_cfi_ref cfi = ggc_alloc_dw_cfi_node ();
256
257 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = 0;
258 cfi->dw_cfi_oprnd2.dw_cfi_reg_num = 0;
259
260 return cfi;
261 }
262
263 /* Generate a new label for the CFI info to refer to. */
264
265 static char *
266 dwarf2out_cfi_label (void)
267 {
268 int num = dwarf2out_cfi_label_num++;
269 char label[20];
270
271 ASM_GENERATE_INTERNAL_LABEL (label, "LCFI", num);
272
273 return xstrdup (label);
274 }
275
276 /* Add CFI either to the current insn stream or to a vector, or both. */
277
278 static void
279 add_cfi (dw_cfi_ref cfi)
280 {
281 if (emit_cfa_remember)
282 {
283 dw_cfi_ref cfi_remember;
284
285 /* Emit the state save. */
286 emit_cfa_remember = false;
287 cfi_remember = new_cfi ();
288 cfi_remember->dw_cfi_opc = DW_CFA_remember_state;
289 add_cfi (cfi_remember);
290 }
291
292 any_cfis_emitted = true;
293 if (cfi_insn != NULL)
294 {
295 cfi_insn = emit_note_after (NOTE_INSN_CFI, cfi_insn);
296 NOTE_CFI (cfi_insn) = cfi;
297 }
298 if (add_cfi_vec != NULL)
299 VEC_safe_push (dw_cfi_ref, gc, *add_cfi_vec, cfi);
300 }
301
302 /* This function fills in aa dw_cfa_location structure from a dwarf location
303 descriptor sequence. */
304
305 static void
306 get_cfa_from_loc_descr (dw_cfa_location *cfa, struct dw_loc_descr_struct *loc)
307 {
308 struct dw_loc_descr_struct *ptr;
309 cfa->offset = 0;
310 cfa->base_offset = 0;
311 cfa->indirect = 0;
312 cfa->reg = -1;
313
314 for (ptr = loc; ptr != NULL; ptr = ptr->dw_loc_next)
315 {
316 enum dwarf_location_atom op = ptr->dw_loc_opc;
317
318 switch (op)
319 {
320 case DW_OP_reg0:
321 case DW_OP_reg1:
322 case DW_OP_reg2:
323 case DW_OP_reg3:
324 case DW_OP_reg4:
325 case DW_OP_reg5:
326 case DW_OP_reg6:
327 case DW_OP_reg7:
328 case DW_OP_reg8:
329 case DW_OP_reg9:
330 case DW_OP_reg10:
331 case DW_OP_reg11:
332 case DW_OP_reg12:
333 case DW_OP_reg13:
334 case DW_OP_reg14:
335 case DW_OP_reg15:
336 case DW_OP_reg16:
337 case DW_OP_reg17:
338 case DW_OP_reg18:
339 case DW_OP_reg19:
340 case DW_OP_reg20:
341 case DW_OP_reg21:
342 case DW_OP_reg22:
343 case DW_OP_reg23:
344 case DW_OP_reg24:
345 case DW_OP_reg25:
346 case DW_OP_reg26:
347 case DW_OP_reg27:
348 case DW_OP_reg28:
349 case DW_OP_reg29:
350 case DW_OP_reg30:
351 case DW_OP_reg31:
352 cfa->reg = op - DW_OP_reg0;
353 break;
354 case DW_OP_regx:
355 cfa->reg = ptr->dw_loc_oprnd1.v.val_int;
356 break;
357 case DW_OP_breg0:
358 case DW_OP_breg1:
359 case DW_OP_breg2:
360 case DW_OP_breg3:
361 case DW_OP_breg4:
362 case DW_OP_breg5:
363 case DW_OP_breg6:
364 case DW_OP_breg7:
365 case DW_OP_breg8:
366 case DW_OP_breg9:
367 case DW_OP_breg10:
368 case DW_OP_breg11:
369 case DW_OP_breg12:
370 case DW_OP_breg13:
371 case DW_OP_breg14:
372 case DW_OP_breg15:
373 case DW_OP_breg16:
374 case DW_OP_breg17:
375 case DW_OP_breg18:
376 case DW_OP_breg19:
377 case DW_OP_breg20:
378 case DW_OP_breg21:
379 case DW_OP_breg22:
380 case DW_OP_breg23:
381 case DW_OP_breg24:
382 case DW_OP_breg25:
383 case DW_OP_breg26:
384 case DW_OP_breg27:
385 case DW_OP_breg28:
386 case DW_OP_breg29:
387 case DW_OP_breg30:
388 case DW_OP_breg31:
389 cfa->reg = op - DW_OP_breg0;
390 cfa->base_offset = ptr->dw_loc_oprnd1.v.val_int;
391 break;
392 case DW_OP_bregx:
393 cfa->reg = ptr->dw_loc_oprnd1.v.val_int;
394 cfa->base_offset = ptr->dw_loc_oprnd2.v.val_int;
395 break;
396 case DW_OP_deref:
397 cfa->indirect = 1;
398 break;
399 case DW_OP_plus_uconst:
400 cfa->offset = ptr->dw_loc_oprnd1.v.val_unsigned;
401 break;
402 default:
403 gcc_unreachable ();
404 }
405 }
406 }
407
408 /* Find the previous value for the CFA, iteratively. CFI is the opcode
409 to interpret, *LOC will be updated as necessary, *REMEMBER is used for
410 one level of remember/restore state processing. */
411
412 void
413 lookup_cfa_1 (dw_cfi_ref cfi, dw_cfa_location *loc, dw_cfa_location *remember)
414 {
415 switch (cfi->dw_cfi_opc)
416 {
417 case DW_CFA_def_cfa_offset:
418 case DW_CFA_def_cfa_offset_sf:
419 loc->offset = cfi->dw_cfi_oprnd1.dw_cfi_offset;
420 break;
421 case DW_CFA_def_cfa_register:
422 loc->reg = cfi->dw_cfi_oprnd1.dw_cfi_reg_num;
423 break;
424 case DW_CFA_def_cfa:
425 case DW_CFA_def_cfa_sf:
426 loc->reg = cfi->dw_cfi_oprnd1.dw_cfi_reg_num;
427 loc->offset = cfi->dw_cfi_oprnd2.dw_cfi_offset;
428 break;
429 case DW_CFA_def_cfa_expression:
430 get_cfa_from_loc_descr (loc, cfi->dw_cfi_oprnd1.dw_cfi_loc);
431 break;
432
433 case DW_CFA_remember_state:
434 gcc_assert (!remember->in_use);
435 *remember = *loc;
436 remember->in_use = 1;
437 break;
438 case DW_CFA_restore_state:
439 gcc_assert (remember->in_use);
440 *loc = *remember;
441 remember->in_use = 0;
442 break;
443
444 default:
445 break;
446 }
447 }
448
449 /* The current rule for calculating the DWARF2 canonical frame address. */
450 static dw_cfa_location cfa;
451
452 /* A copy of the CFA, for comparison purposes. */
453 static dw_cfa_location old_cfa;
454
455 /* The register used for saving registers to the stack, and its offset
456 from the CFA. */
457 static dw_cfa_location cfa_store;
458
459 /* The current save location around an epilogue. */
460 static dw_cfa_location cfa_remember;
461
462 /* Like cfa_remember, but a copy of old_cfa. */
463 static dw_cfa_location old_cfa_remember;
464
465 /* The running total of the size of arguments pushed onto the stack. */
466 static HOST_WIDE_INT args_size;
467
468 /* The last args_size we actually output. */
469 static HOST_WIDE_INT old_args_size;
470
471 /* Determine if two dw_cfa_location structures define the same data. */
472
473 bool
474 cfa_equal_p (const dw_cfa_location *loc1, const dw_cfa_location *loc2)
475 {
476 return (loc1->reg == loc2->reg
477 && loc1->offset == loc2->offset
478 && loc1->indirect == loc2->indirect
479 && (loc1->indirect == 0
480 || loc1->base_offset == loc2->base_offset));
481 }
482
483 /* This routine does the actual work. The CFA is now calculated from
484 the dw_cfa_location structure. */
485
486 static void
487 def_cfa_1 (dw_cfa_location *loc_p)
488 {
489 dw_cfi_ref cfi;
490 dw_cfa_location loc;
491
492 cfa = *loc_p;
493 loc = *loc_p;
494
495 if (cfa_store.reg == loc.reg && loc.indirect == 0)
496 cfa_store.offset = loc.offset;
497
498 loc.reg = DWARF_FRAME_REGNUM (loc.reg);
499
500 /* If nothing changed, no need to issue any call frame instructions. */
501 if (cfa_equal_p (&loc, &old_cfa))
502 return;
503
504 cfi = new_cfi ();
505
506 if (loc.reg == old_cfa.reg && !loc.indirect && !old_cfa.indirect)
507 {
508 /* Construct a "DW_CFA_def_cfa_offset <offset>" instruction, indicating
509 the CFA register did not change but the offset did. The data
510 factoring for DW_CFA_def_cfa_offset_sf happens in output_cfi, or
511 in the assembler via the .cfi_def_cfa_offset directive. */
512 if (loc.offset < 0)
513 cfi->dw_cfi_opc = DW_CFA_def_cfa_offset_sf;
514 else
515 cfi->dw_cfi_opc = DW_CFA_def_cfa_offset;
516 cfi->dw_cfi_oprnd1.dw_cfi_offset = loc.offset;
517 }
518
519 #ifndef MIPS_DEBUGGING_INFO /* SGI dbx thinks this means no offset. */
520 else if (loc.offset == old_cfa.offset
521 && old_cfa.reg != INVALID_REGNUM
522 && !loc.indirect
523 && !old_cfa.indirect)
524 {
525 /* Construct a "DW_CFA_def_cfa_register <register>" instruction,
526 indicating the CFA register has changed to <register> but the
527 offset has not changed. */
528 cfi->dw_cfi_opc = DW_CFA_def_cfa_register;
529 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = loc.reg;
530 }
531 #endif
532
533 else if (loc.indirect == 0)
534 {
535 /* Construct a "DW_CFA_def_cfa <register> <offset>" instruction,
536 indicating the CFA register has changed to <register> with
537 the specified offset. The data factoring for DW_CFA_def_cfa_sf
538 happens in output_cfi, or in the assembler via the .cfi_def_cfa
539 directive. */
540 if (loc.offset < 0)
541 cfi->dw_cfi_opc = DW_CFA_def_cfa_sf;
542 else
543 cfi->dw_cfi_opc = DW_CFA_def_cfa;
544 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = loc.reg;
545 cfi->dw_cfi_oprnd2.dw_cfi_offset = loc.offset;
546 }
547 else
548 {
549 /* Construct a DW_CFA_def_cfa_expression instruction to
550 calculate the CFA using a full location expression since no
551 register-offset pair is available. */
552 struct dw_loc_descr_struct *loc_list;
553
554 cfi->dw_cfi_opc = DW_CFA_def_cfa_expression;
555 loc_list = build_cfa_loc (&loc, 0);
556 cfi->dw_cfi_oprnd1.dw_cfi_loc = loc_list;
557 }
558
559 add_cfi (cfi);
560 old_cfa = loc;
561 }
562
563 /* Add the CFI for saving a register. REG is the CFA column number.
564 If SREG is -1, the register is saved at OFFSET from the CFA;
565 otherwise it is saved in SREG. */
566
567 static void
568 reg_save (unsigned int reg, unsigned int sreg, HOST_WIDE_INT offset)
569 {
570 dw_fde_ref fde = cfun ? cfun->fde : NULL;
571 dw_cfi_ref cfi = new_cfi ();
572
573 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = reg;
574
575 /* When stack is aligned, store REG using DW_CFA_expression with FP. */
576 if (fde
577 && fde->stack_realign
578 && sreg == INVALID_REGNUM)
579 {
580 cfi->dw_cfi_opc = DW_CFA_expression;
581 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = reg;
582 cfi->dw_cfi_oprnd2.dw_cfi_loc
583 = build_cfa_aligned_loc (&cfa, offset, fde->stack_realignment);
584 }
585 else if (sreg == INVALID_REGNUM)
586 {
587 if (need_data_align_sf_opcode (offset))
588 cfi->dw_cfi_opc = DW_CFA_offset_extended_sf;
589 else if (reg & ~0x3f)
590 cfi->dw_cfi_opc = DW_CFA_offset_extended;
591 else
592 cfi->dw_cfi_opc = DW_CFA_offset;
593 cfi->dw_cfi_oprnd2.dw_cfi_offset = offset;
594 }
595 else if (sreg == reg)
596 cfi->dw_cfi_opc = DW_CFA_same_value;
597 else
598 {
599 cfi->dw_cfi_opc = DW_CFA_register;
600 cfi->dw_cfi_oprnd2.dw_cfi_reg_num = sreg;
601 }
602
603 add_cfi (cfi);
604 }
605
606 /* Given a SET, calculate the amount of stack adjustment it
607 contains. */
608
609 static HOST_WIDE_INT
610 stack_adjust_offset (const_rtx pattern, HOST_WIDE_INT cur_args_size,
611 HOST_WIDE_INT cur_offset)
612 {
613 const_rtx src = SET_SRC (pattern);
614 const_rtx dest = SET_DEST (pattern);
615 HOST_WIDE_INT offset = 0;
616 enum rtx_code code;
617
618 if (dest == stack_pointer_rtx)
619 {
620 code = GET_CODE (src);
621
622 /* Assume (set (reg sp) (reg whatever)) sets args_size
623 level to 0. */
624 if (code == REG && src != stack_pointer_rtx)
625 {
626 offset = -cur_args_size;
627 #ifndef STACK_GROWS_DOWNWARD
628 offset = -offset;
629 #endif
630 return offset - cur_offset;
631 }
632
633 if (! (code == PLUS || code == MINUS)
634 || XEXP (src, 0) != stack_pointer_rtx
635 || !CONST_INT_P (XEXP (src, 1)))
636 return 0;
637
638 /* (set (reg sp) (plus (reg sp) (const_int))) */
639 offset = INTVAL (XEXP (src, 1));
640 if (code == PLUS)
641 offset = -offset;
642 return offset;
643 }
644
645 if (MEM_P (src) && !MEM_P (dest))
646 dest = src;
647 if (MEM_P (dest))
648 {
649 /* (set (mem (pre_dec (reg sp))) (foo)) */
650 src = XEXP (dest, 0);
651 code = GET_CODE (src);
652
653 switch (code)
654 {
655 case PRE_MODIFY:
656 case POST_MODIFY:
657 if (XEXP (src, 0) == stack_pointer_rtx)
658 {
659 rtx val = XEXP (XEXP (src, 1), 1);
660 /* We handle only adjustments by constant amount. */
661 gcc_assert (GET_CODE (XEXP (src, 1)) == PLUS
662 && CONST_INT_P (val));
663 offset = -INTVAL (val);
664 break;
665 }
666 return 0;
667
668 case PRE_DEC:
669 case POST_DEC:
670 if (XEXP (src, 0) == stack_pointer_rtx)
671 {
672 offset = GET_MODE_SIZE (GET_MODE (dest));
673 break;
674 }
675 return 0;
676
677 case PRE_INC:
678 case POST_INC:
679 if (XEXP (src, 0) == stack_pointer_rtx)
680 {
681 offset = -GET_MODE_SIZE (GET_MODE (dest));
682 break;
683 }
684 return 0;
685
686 default:
687 return 0;
688 }
689 }
690 else
691 return 0;
692
693 return offset;
694 }
695
696 /* Precomputed args_size for CODE_LABELs and BARRIERs preceeding them,
697 indexed by INSN_UID. */
698
699 static HOST_WIDE_INT *barrier_args_size;
700
701 /* Helper function for compute_barrier_args_size. Handle one insn. */
702
703 static HOST_WIDE_INT
704 compute_barrier_args_size_1 (rtx insn, HOST_WIDE_INT cur_args_size,
705 VEC (rtx, heap) **next)
706 {
707 HOST_WIDE_INT offset = 0;
708 int i;
709
710 if (! RTX_FRAME_RELATED_P (insn))
711 {
712 if (prologue_epilogue_contains (insn))
713 /* Nothing */;
714 else if (GET_CODE (PATTERN (insn)) == SET)
715 offset = stack_adjust_offset (PATTERN (insn), cur_args_size, 0);
716 else if (GET_CODE (PATTERN (insn)) == PARALLEL
717 || GET_CODE (PATTERN (insn)) == SEQUENCE)
718 {
719 /* There may be stack adjustments inside compound insns. Search
720 for them. */
721 for (i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--)
722 if (GET_CODE (XVECEXP (PATTERN (insn), 0, i)) == SET)
723 offset += stack_adjust_offset (XVECEXP (PATTERN (insn), 0, i),
724 cur_args_size, offset);
725 }
726 }
727 else
728 {
729 rtx expr = find_reg_note (insn, REG_FRAME_RELATED_EXPR, NULL_RTX);
730
731 if (expr)
732 {
733 expr = XEXP (expr, 0);
734 if (GET_CODE (expr) == PARALLEL
735 || GET_CODE (expr) == SEQUENCE)
736 for (i = 1; i < XVECLEN (expr, 0); i++)
737 {
738 rtx elem = XVECEXP (expr, 0, i);
739
740 if (GET_CODE (elem) == SET && !RTX_FRAME_RELATED_P (elem))
741 offset += stack_adjust_offset (elem, cur_args_size, offset);
742 }
743 }
744 }
745
746 #ifndef STACK_GROWS_DOWNWARD
747 offset = -offset;
748 #endif
749
750 cur_args_size += offset;
751 if (cur_args_size < 0)
752 cur_args_size = 0;
753
754 if (JUMP_P (insn))
755 {
756 rtx dest = JUMP_LABEL (insn);
757
758 if (dest)
759 {
760 if (barrier_args_size [INSN_UID (dest)] < 0)
761 {
762 barrier_args_size [INSN_UID (dest)] = cur_args_size;
763 VEC_safe_push (rtx, heap, *next, dest);
764 }
765 }
766 }
767
768 return cur_args_size;
769 }
770
771 /* Walk the whole function and compute args_size on BARRIERs. */
772
773 static void
774 compute_barrier_args_size (void)
775 {
776 int max_uid = get_max_uid (), i;
777 rtx insn;
778 VEC (rtx, heap) *worklist, *next, *tmp;
779
780 barrier_args_size = XNEWVEC (HOST_WIDE_INT, max_uid);
781 for (i = 0; i < max_uid; i++)
782 barrier_args_size[i] = -1;
783
784 worklist = VEC_alloc (rtx, heap, 20);
785 next = VEC_alloc (rtx, heap, 20);
786 insn = get_insns ();
787 barrier_args_size[INSN_UID (insn)] = 0;
788 VEC_quick_push (rtx, worklist, insn);
789 for (;;)
790 {
791 while (!VEC_empty (rtx, worklist))
792 {
793 rtx prev, body, first_insn;
794 HOST_WIDE_INT cur_args_size;
795
796 first_insn = insn = VEC_pop (rtx, worklist);
797 cur_args_size = barrier_args_size[INSN_UID (insn)];
798 prev = prev_nonnote_insn (insn);
799 if (prev && BARRIER_P (prev))
800 barrier_args_size[INSN_UID (prev)] = cur_args_size;
801
802 for (; insn; insn = NEXT_INSN (insn))
803 {
804 if (INSN_DELETED_P (insn) || NOTE_P (insn))
805 continue;
806 if (BARRIER_P (insn))
807 break;
808
809 if (LABEL_P (insn))
810 {
811 if (insn == first_insn)
812 continue;
813 else if (barrier_args_size[INSN_UID (insn)] < 0)
814 {
815 barrier_args_size[INSN_UID (insn)] = cur_args_size;
816 continue;
817 }
818 else
819 {
820 /* The insns starting with this label have been
821 already scanned or are in the worklist. */
822 break;
823 }
824 }
825
826 body = PATTERN (insn);
827 if (GET_CODE (body) == SEQUENCE)
828 {
829 HOST_WIDE_INT dest_args_size = cur_args_size;
830 for (i = 1; i < XVECLEN (body, 0); i++)
831 if (INSN_ANNULLED_BRANCH_P (XVECEXP (body, 0, 0))
832 && INSN_FROM_TARGET_P (XVECEXP (body, 0, i)))
833 dest_args_size
834 = compute_barrier_args_size_1 (XVECEXP (body, 0, i),
835 dest_args_size, &next);
836 else
837 cur_args_size
838 = compute_barrier_args_size_1 (XVECEXP (body, 0, i),
839 cur_args_size, &next);
840
841 if (INSN_ANNULLED_BRANCH_P (XVECEXP (body, 0, 0)))
842 compute_barrier_args_size_1 (XVECEXP (body, 0, 0),
843 dest_args_size, &next);
844 else
845 cur_args_size
846 = compute_barrier_args_size_1 (XVECEXP (body, 0, 0),
847 cur_args_size, &next);
848 }
849 else
850 cur_args_size
851 = compute_barrier_args_size_1 (insn, cur_args_size, &next);
852 }
853 }
854
855 if (VEC_empty (rtx, next))
856 break;
857
858 /* Swap WORKLIST with NEXT and truncate NEXT for next iteration. */
859 tmp = next;
860 next = worklist;
861 worklist = tmp;
862 VEC_truncate (rtx, next, 0);
863 }
864
865 VEC_free (rtx, heap, worklist);
866 VEC_free (rtx, heap, next);
867 }
868
869 /* Add a CFI to update the running total of the size of arguments
870 pushed onto the stack. */
871
872 static void
873 dwarf2out_args_size (HOST_WIDE_INT size)
874 {
875 dw_cfi_ref cfi;
876
877 if (size == old_args_size)
878 return;
879
880 old_args_size = size;
881
882 cfi = new_cfi ();
883 cfi->dw_cfi_opc = DW_CFA_GNU_args_size;
884 cfi->dw_cfi_oprnd1.dw_cfi_offset = size;
885 add_cfi (cfi);
886 }
887
888 /* Record a stack adjustment of OFFSET bytes. */
889
890 static void
891 dwarf2out_stack_adjust (HOST_WIDE_INT offset)
892 {
893 if (cfa.reg == STACK_POINTER_REGNUM)
894 cfa.offset += offset;
895
896 if (cfa_store.reg == STACK_POINTER_REGNUM)
897 cfa_store.offset += offset;
898
899 if (ACCUMULATE_OUTGOING_ARGS)
900 return;
901
902 #ifndef STACK_GROWS_DOWNWARD
903 offset = -offset;
904 #endif
905
906 args_size += offset;
907 if (args_size < 0)
908 args_size = 0;
909
910 def_cfa_1 (&cfa);
911 if (flag_asynchronous_unwind_tables)
912 dwarf2out_args_size (args_size);
913 }
914
915 /* Check INSN to see if it looks like a push or a stack adjustment, and
916 make a note of it if it does. EH uses this information to find out
917 how much extra space it needs to pop off the stack. */
918
919 static void
920 dwarf2out_notice_stack_adjust (rtx insn, bool after_p)
921 {
922 HOST_WIDE_INT offset;
923 int i;
924
925 /* Don't handle epilogues at all. Certainly it would be wrong to do so
926 with this function. Proper support would require all frame-related
927 insns to be marked, and to be able to handle saving state around
928 epilogues textually in the middle of the function. */
929 if (prologue_epilogue_contains (insn))
930 return;
931
932 /* If INSN is an instruction from target of an annulled branch, the
933 effects are for the target only and so current argument size
934 shouldn't change at all. */
935 if (final_sequence
936 && INSN_ANNULLED_BRANCH_P (XVECEXP (final_sequence, 0, 0))
937 && INSN_FROM_TARGET_P (insn))
938 return;
939
940 /* If only calls can throw, and we have a frame pointer,
941 save up adjustments until we see the CALL_INSN. */
942 if (!flag_asynchronous_unwind_tables && cfa.reg != STACK_POINTER_REGNUM)
943 {
944 if (CALL_P (insn) && !after_p)
945 {
946 /* Extract the size of the args from the CALL rtx itself. */
947 insn = PATTERN (insn);
948 if (GET_CODE (insn) == PARALLEL)
949 insn = XVECEXP (insn, 0, 0);
950 if (GET_CODE (insn) == SET)
951 insn = SET_SRC (insn);
952 gcc_assert (GET_CODE (insn) == CALL);
953 dwarf2out_args_size (INTVAL (XEXP (insn, 1)));
954 }
955 return;
956 }
957
958 if (CALL_P (insn) && !after_p)
959 {
960 if (!flag_asynchronous_unwind_tables)
961 dwarf2out_args_size (args_size);
962 return;
963 }
964 else if (BARRIER_P (insn))
965 {
966 /* Don't call compute_barrier_args_size () if the only
967 BARRIER is at the end of function. */
968 if (barrier_args_size == NULL && next_nonnote_insn (insn))
969 compute_barrier_args_size ();
970 if (barrier_args_size == NULL)
971 offset = 0;
972 else
973 {
974 offset = barrier_args_size[INSN_UID (insn)];
975 if (offset < 0)
976 offset = 0;
977 }
978
979 offset -= args_size;
980 #ifndef STACK_GROWS_DOWNWARD
981 offset = -offset;
982 #endif
983 }
984 else if (GET_CODE (PATTERN (insn)) == SET)
985 offset = stack_adjust_offset (PATTERN (insn), args_size, 0);
986 else if (GET_CODE (PATTERN (insn)) == PARALLEL
987 || GET_CODE (PATTERN (insn)) == SEQUENCE)
988 {
989 /* There may be stack adjustments inside compound insns. Search
990 for them. */
991 for (offset = 0, i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--)
992 if (GET_CODE (XVECEXP (PATTERN (insn), 0, i)) == SET)
993 offset += stack_adjust_offset (XVECEXP (PATTERN (insn), 0, i),
994 args_size, offset);
995 }
996 else
997 return;
998
999 if (offset == 0)
1000 return;
1001
1002 dwarf2out_stack_adjust (offset);
1003 }
1004
1005 /* We delay emitting a register save until either (a) we reach the end
1006 of the prologue or (b) the register is clobbered. This clusters
1007 register saves so that there are fewer pc advances. */
1008
1009 struct GTY(()) queued_reg_save {
1010 struct queued_reg_save *next;
1011 rtx reg;
1012 HOST_WIDE_INT cfa_offset;
1013 rtx saved_reg;
1014 };
1015
1016 static GTY(()) struct queued_reg_save *queued_reg_saves;
1017
1018 /* The caller's ORIG_REG is saved in SAVED_IN_REG. */
1019 typedef struct GTY(()) reg_saved_in_data {
1020 rtx orig_reg;
1021 rtx saved_in_reg;
1022 } reg_saved_in_data;
1023
1024 DEF_VEC_O (reg_saved_in_data);
1025 DEF_VEC_ALLOC_O (reg_saved_in_data, gc);
1026
1027 /* A set of registers saved in other registers. This is implemented as
1028 a flat array because it normally contains zero or 1 entry, depending
1029 on the target. IA-64 is the big spender here, using a maximum of
1030 5 entries. */
1031 static GTY(()) VEC(reg_saved_in_data, gc) *regs_saved_in_regs;
1032
1033 static GTY(()) reg_saved_in_data *cie_return_save;
1034
1035 /* Compare X and Y for equivalence. The inputs may be REGs or PC_RTX. */
1036
1037 static bool
1038 compare_reg_or_pc (rtx x, rtx y)
1039 {
1040 if (REG_P (x) && REG_P (y))
1041 return REGNO (x) == REGNO (y);
1042 return x == y;
1043 }
1044
1045 /* Record SRC as being saved in DEST. DEST may be null to delete an
1046 existing entry. SRC may be a register or PC_RTX. */
1047
1048 static void
1049 record_reg_saved_in_reg (rtx dest, rtx src)
1050 {
1051 reg_saved_in_data *elt;
1052 size_t i;
1053
1054 FOR_EACH_VEC_ELT (reg_saved_in_data, regs_saved_in_regs, i, elt)
1055 if (compare_reg_or_pc (elt->orig_reg, src))
1056 {
1057 if (dest == NULL)
1058 VEC_unordered_remove(reg_saved_in_data, regs_saved_in_regs, i);
1059 else
1060 elt->saved_in_reg = dest;
1061 return;
1062 }
1063
1064 if (dest == NULL)
1065 return;
1066
1067 elt = VEC_safe_push(reg_saved_in_data, gc, regs_saved_in_regs, NULL);
1068 elt->orig_reg = src;
1069 elt->saved_in_reg = dest;
1070 }
1071
1072 /* Add an entry to QUEUED_REG_SAVES saying that REG is now saved at
1073 SREG, or if SREG is NULL then it is saved at OFFSET to the CFA. */
1074
1075 static void
1076 queue_reg_save (rtx reg, rtx sreg, HOST_WIDE_INT offset)
1077 {
1078 struct queued_reg_save *q;
1079
1080 /* Duplicates waste space, but it's also necessary to remove them
1081 for correctness, since the queue gets output in reverse order. */
1082 for (q = queued_reg_saves; q != NULL; q = q->next)
1083 if (compare_reg_or_pc (q->reg, reg))
1084 break;
1085
1086 if (q == NULL)
1087 {
1088 q = ggc_alloc_queued_reg_save ();
1089 q->next = queued_reg_saves;
1090 queued_reg_saves = q;
1091 }
1092
1093 q->reg = reg;
1094 q->cfa_offset = offset;
1095 q->saved_reg = sreg;
1096 }
1097
1098 /* Output all the entries in QUEUED_REG_SAVES. */
1099
1100 static void
1101 dwarf2out_flush_queued_reg_saves (void)
1102 {
1103 struct queued_reg_save *q;
1104
1105 for (q = queued_reg_saves; q; q = q->next)
1106 {
1107 unsigned int reg, sreg;
1108
1109 record_reg_saved_in_reg (q->saved_reg, q->reg);
1110
1111 if (q->reg == pc_rtx)
1112 reg = DWARF_FRAME_RETURN_COLUMN;
1113 else
1114 reg = DWARF_FRAME_REGNUM (REGNO (q->reg));
1115 if (q->saved_reg)
1116 sreg = DWARF_FRAME_REGNUM (REGNO (q->saved_reg));
1117 else
1118 sreg = INVALID_REGNUM;
1119 reg_save (reg, sreg, q->cfa_offset);
1120 }
1121
1122 queued_reg_saves = NULL;
1123 }
1124
1125 /* Does INSN clobber any register which QUEUED_REG_SAVES lists a saved
1126 location for? Or, does it clobber a register which we've previously
1127 said that some other register is saved in, and for which we now
1128 have a new location for? */
1129
1130 static bool
1131 clobbers_queued_reg_save (const_rtx insn)
1132 {
1133 struct queued_reg_save *q;
1134
1135 for (q = queued_reg_saves; q; q = q->next)
1136 {
1137 size_t i;
1138 reg_saved_in_data *rir;
1139
1140 if (modified_in_p (q->reg, insn))
1141 return true;
1142
1143 FOR_EACH_VEC_ELT (reg_saved_in_data, regs_saved_in_regs, i, rir)
1144 if (compare_reg_or_pc (q->reg, rir->orig_reg)
1145 && modified_in_p (rir->saved_in_reg, insn))
1146 return true;
1147 }
1148
1149 return false;
1150 }
1151
1152 /* What register, if any, is currently saved in REG? */
1153
1154 static rtx
1155 reg_saved_in (rtx reg)
1156 {
1157 unsigned int regn = REGNO (reg);
1158 struct queued_reg_save *q;
1159 reg_saved_in_data *rir;
1160 size_t i;
1161
1162 for (q = queued_reg_saves; q; q = q->next)
1163 if (q->saved_reg && regn == REGNO (q->saved_reg))
1164 return q->reg;
1165
1166 FOR_EACH_VEC_ELT (reg_saved_in_data, regs_saved_in_regs, i, rir)
1167 if (regn == REGNO (rir->saved_in_reg))
1168 return rir->orig_reg;
1169
1170 return NULL_RTX;
1171 }
1172
1173
1174 /* A temporary register holding an integral value used in adjusting SP
1175 or setting up the store_reg. The "offset" field holds the integer
1176 value, not an offset. */
1177 static dw_cfa_location cfa_temp;
1178
1179 /* A subroutine of dwarf2out_frame_debug, process a REG_DEF_CFA note. */
1180
1181 static void
1182 dwarf2out_frame_debug_def_cfa (rtx pat)
1183 {
1184 memset (&cfa, 0, sizeof (cfa));
1185
1186 switch (GET_CODE (pat))
1187 {
1188 case PLUS:
1189 cfa.reg = REGNO (XEXP (pat, 0));
1190 cfa.offset = INTVAL (XEXP (pat, 1));
1191 break;
1192
1193 case REG:
1194 cfa.reg = REGNO (pat);
1195 break;
1196
1197 case MEM:
1198 cfa.indirect = 1;
1199 pat = XEXP (pat, 0);
1200 if (GET_CODE (pat) == PLUS)
1201 {
1202 cfa.base_offset = INTVAL (XEXP (pat, 1));
1203 pat = XEXP (pat, 0);
1204 }
1205 cfa.reg = REGNO (pat);
1206 break;
1207
1208 default:
1209 /* Recurse and define an expression. */
1210 gcc_unreachable ();
1211 }
1212
1213 def_cfa_1 (&cfa);
1214 }
1215
1216 /* A subroutine of dwarf2out_frame_debug, process a REG_ADJUST_CFA note. */
1217
1218 static void
1219 dwarf2out_frame_debug_adjust_cfa (rtx pat)
1220 {
1221 rtx src, dest;
1222
1223 gcc_assert (GET_CODE (pat) == SET);
1224 dest = XEXP (pat, 0);
1225 src = XEXP (pat, 1);
1226
1227 switch (GET_CODE (src))
1228 {
1229 case PLUS:
1230 gcc_assert (REGNO (XEXP (src, 0)) == cfa.reg);
1231 cfa.offset -= INTVAL (XEXP (src, 1));
1232 break;
1233
1234 case REG:
1235 break;
1236
1237 default:
1238 gcc_unreachable ();
1239 }
1240
1241 cfa.reg = REGNO (dest);
1242 gcc_assert (cfa.indirect == 0);
1243
1244 def_cfa_1 (&cfa);
1245 }
1246
1247 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_OFFSET note. */
1248
1249 static void
1250 dwarf2out_frame_debug_cfa_offset (rtx set)
1251 {
1252 HOST_WIDE_INT offset;
1253 rtx src, addr, span;
1254 unsigned int sregno;
1255
1256 src = XEXP (set, 1);
1257 addr = XEXP (set, 0);
1258 gcc_assert (MEM_P (addr));
1259 addr = XEXP (addr, 0);
1260
1261 /* As documented, only consider extremely simple addresses. */
1262 switch (GET_CODE (addr))
1263 {
1264 case REG:
1265 gcc_assert (REGNO (addr) == cfa.reg);
1266 offset = -cfa.offset;
1267 break;
1268 case PLUS:
1269 gcc_assert (REGNO (XEXP (addr, 0)) == cfa.reg);
1270 offset = INTVAL (XEXP (addr, 1)) - cfa.offset;
1271 break;
1272 default:
1273 gcc_unreachable ();
1274 }
1275
1276 if (src == pc_rtx)
1277 {
1278 span = NULL;
1279 sregno = DWARF_FRAME_RETURN_COLUMN;
1280 }
1281 else
1282 {
1283 span = targetm.dwarf_register_span (src);
1284 sregno = DWARF_FRAME_REGNUM (REGNO (src));
1285 }
1286
1287 /* ??? We'd like to use queue_reg_save, but we need to come up with
1288 a different flushing heuristic for epilogues. */
1289 if (!span)
1290 reg_save (sregno, INVALID_REGNUM, offset);
1291 else
1292 {
1293 /* We have a PARALLEL describing where the contents of SRC live.
1294 Queue register saves for each piece of the PARALLEL. */
1295 int par_index;
1296 int limit;
1297 HOST_WIDE_INT span_offset = offset;
1298
1299 gcc_assert (GET_CODE (span) == PARALLEL);
1300
1301 limit = XVECLEN (span, 0);
1302 for (par_index = 0; par_index < limit; par_index++)
1303 {
1304 rtx elem = XVECEXP (span, 0, par_index);
1305
1306 sregno = DWARF_FRAME_REGNUM (REGNO (src));
1307 reg_save (sregno, INVALID_REGNUM, span_offset);
1308 span_offset += GET_MODE_SIZE (GET_MODE (elem));
1309 }
1310 }
1311 }
1312
1313 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_REGISTER note. */
1314
1315 static void
1316 dwarf2out_frame_debug_cfa_register (rtx set)
1317 {
1318 rtx src, dest;
1319 unsigned sregno, dregno;
1320
1321 src = XEXP (set, 1);
1322 dest = XEXP (set, 0);
1323
1324 record_reg_saved_in_reg (dest, src);
1325 if (src == pc_rtx)
1326 sregno = DWARF_FRAME_RETURN_COLUMN;
1327 else
1328 sregno = DWARF_FRAME_REGNUM (REGNO (src));
1329
1330 dregno = DWARF_FRAME_REGNUM (REGNO (dest));
1331
1332 /* ??? We'd like to use queue_reg_save, but we need to come up with
1333 a different flushing heuristic for epilogues. */
1334 reg_save (sregno, dregno, 0);
1335 }
1336
1337 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_EXPRESSION note. */
1338
1339 static void
1340 dwarf2out_frame_debug_cfa_expression (rtx set)
1341 {
1342 rtx src, dest, span;
1343 dw_cfi_ref cfi = new_cfi ();
1344
1345 dest = SET_DEST (set);
1346 src = SET_SRC (set);
1347
1348 gcc_assert (REG_P (src));
1349 gcc_assert (MEM_P (dest));
1350
1351 span = targetm.dwarf_register_span (src);
1352 gcc_assert (!span);
1353
1354 cfi->dw_cfi_opc = DW_CFA_expression;
1355 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = DWARF_FRAME_REGNUM (REGNO (src));
1356 cfi->dw_cfi_oprnd2.dw_cfi_loc
1357 = mem_loc_descriptor (XEXP (dest, 0), get_address_mode (dest),
1358 GET_MODE (dest), VAR_INIT_STATUS_INITIALIZED);
1359
1360 /* ??? We'd like to use queue_reg_save, were the interface different,
1361 and, as above, we could manage flushing for epilogues. */
1362 add_cfi (cfi);
1363 }
1364
1365 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_RESTORE note. */
1366
1367 static void
1368 dwarf2out_frame_debug_cfa_restore (rtx reg)
1369 {
1370 dw_cfi_ref cfi = new_cfi ();
1371 unsigned int regno = DWARF_FRAME_REGNUM (REGNO (reg));
1372
1373 cfi->dw_cfi_opc = (regno & ~0x3f ? DW_CFA_restore_extended : DW_CFA_restore);
1374 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = regno;
1375
1376 add_cfi (cfi);
1377 }
1378
1379 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_WINDOW_SAVE.
1380 ??? Perhaps we should note in the CIE where windows are saved (instead of
1381 assuming 0(cfa)) and what registers are in the window. */
1382
1383 static void
1384 dwarf2out_frame_debug_cfa_window_save (void)
1385 {
1386 dw_cfi_ref cfi = new_cfi ();
1387
1388 cfi->dw_cfi_opc = DW_CFA_GNU_window_save;
1389 add_cfi (cfi);
1390 }
1391
1392 /* Record call frame debugging information for an expression EXPR,
1393 which either sets SP or FP (adjusting how we calculate the frame
1394 address) or saves a register to the stack or another register.
1395 LABEL indicates the address of EXPR.
1396
1397 This function encodes a state machine mapping rtxes to actions on
1398 cfa, cfa_store, and cfa_temp.reg. We describe these rules so
1399 users need not read the source code.
1400
1401 The High-Level Picture
1402
1403 Changes in the register we use to calculate the CFA: Currently we
1404 assume that if you copy the CFA register into another register, we
1405 should take the other one as the new CFA register; this seems to
1406 work pretty well. If it's wrong for some target, it's simple
1407 enough not to set RTX_FRAME_RELATED_P on the insn in question.
1408
1409 Changes in the register we use for saving registers to the stack:
1410 This is usually SP, but not always. Again, we deduce that if you
1411 copy SP into another register (and SP is not the CFA register),
1412 then the new register is the one we will be using for register
1413 saves. This also seems to work.
1414
1415 Register saves: There's not much guesswork about this one; if
1416 RTX_FRAME_RELATED_P is set on an insn which modifies memory, it's a
1417 register save, and the register used to calculate the destination
1418 had better be the one we think we're using for this purpose.
1419 It's also assumed that a copy from a call-saved register to another
1420 register is saving that register if RTX_FRAME_RELATED_P is set on
1421 that instruction. If the copy is from a call-saved register to
1422 the *same* register, that means that the register is now the same
1423 value as in the caller.
1424
1425 Except: If the register being saved is the CFA register, and the
1426 offset is nonzero, we are saving the CFA, so we assume we have to
1427 use DW_CFA_def_cfa_expression. If the offset is 0, we assume that
1428 the intent is to save the value of SP from the previous frame.
1429
1430 In addition, if a register has previously been saved to a different
1431 register,
1432
1433 Invariants / Summaries of Rules
1434
1435 cfa current rule for calculating the CFA. It usually
1436 consists of a register and an offset.
1437 cfa_store register used by prologue code to save things to the stack
1438 cfa_store.offset is the offset from the value of
1439 cfa_store.reg to the actual CFA
1440 cfa_temp register holding an integral value. cfa_temp.offset
1441 stores the value, which will be used to adjust the
1442 stack pointer. cfa_temp is also used like cfa_store,
1443 to track stores to the stack via fp or a temp reg.
1444
1445 Rules 1- 4: Setting a register's value to cfa.reg or an expression
1446 with cfa.reg as the first operand changes the cfa.reg and its
1447 cfa.offset. Rule 1 and 4 also set cfa_temp.reg and
1448 cfa_temp.offset.
1449
1450 Rules 6- 9: Set a non-cfa.reg register value to a constant or an
1451 expression yielding a constant. This sets cfa_temp.reg
1452 and cfa_temp.offset.
1453
1454 Rule 5: Create a new register cfa_store used to save items to the
1455 stack.
1456
1457 Rules 10-14: Save a register to the stack. Define offset as the
1458 difference of the original location and cfa_store's
1459 location (or cfa_temp's location if cfa_temp is used).
1460
1461 Rules 16-20: If AND operation happens on sp in prologue, we assume
1462 stack is realigned. We will use a group of DW_OP_XXX
1463 expressions to represent the location of the stored
1464 register instead of CFA+offset.
1465
1466 The Rules
1467
1468 "{a,b}" indicates a choice of a xor b.
1469 "<reg>:cfa.reg" indicates that <reg> must equal cfa.reg.
1470
1471 Rule 1:
1472 (set <reg1> <reg2>:cfa.reg)
1473 effects: cfa.reg = <reg1>
1474 cfa.offset unchanged
1475 cfa_temp.reg = <reg1>
1476 cfa_temp.offset = cfa.offset
1477
1478 Rule 2:
1479 (set sp ({minus,plus,losum} {sp,fp}:cfa.reg
1480 {<const_int>,<reg>:cfa_temp.reg}))
1481 effects: cfa.reg = sp if fp used
1482 cfa.offset += {+/- <const_int>, cfa_temp.offset} if cfa.reg==sp
1483 cfa_store.offset += {+/- <const_int>, cfa_temp.offset}
1484 if cfa_store.reg==sp
1485
1486 Rule 3:
1487 (set fp ({minus,plus,losum} <reg>:cfa.reg <const_int>))
1488 effects: cfa.reg = fp
1489 cfa_offset += +/- <const_int>
1490
1491 Rule 4:
1492 (set <reg1> ({plus,losum} <reg2>:cfa.reg <const_int>))
1493 constraints: <reg1> != fp
1494 <reg1> != sp
1495 effects: cfa.reg = <reg1>
1496 cfa_temp.reg = <reg1>
1497 cfa_temp.offset = cfa.offset
1498
1499 Rule 5:
1500 (set <reg1> (plus <reg2>:cfa_temp.reg sp:cfa.reg))
1501 constraints: <reg1> != fp
1502 <reg1> != sp
1503 effects: cfa_store.reg = <reg1>
1504 cfa_store.offset = cfa.offset - cfa_temp.offset
1505
1506 Rule 6:
1507 (set <reg> <const_int>)
1508 effects: cfa_temp.reg = <reg>
1509 cfa_temp.offset = <const_int>
1510
1511 Rule 7:
1512 (set <reg1>:cfa_temp.reg (ior <reg2>:cfa_temp.reg <const_int>))
1513 effects: cfa_temp.reg = <reg1>
1514 cfa_temp.offset |= <const_int>
1515
1516 Rule 8:
1517 (set <reg> (high <exp>))
1518 effects: none
1519
1520 Rule 9:
1521 (set <reg> (lo_sum <exp> <const_int>))
1522 effects: cfa_temp.reg = <reg>
1523 cfa_temp.offset = <const_int>
1524
1525 Rule 10:
1526 (set (mem ({pre,post}_modify sp:cfa_store (???? <reg1> <const_int>))) <reg2>)
1527 effects: cfa_store.offset -= <const_int>
1528 cfa.offset = cfa_store.offset if cfa.reg == sp
1529 cfa.reg = sp
1530 cfa.base_offset = -cfa_store.offset
1531
1532 Rule 11:
1533 (set (mem ({pre_inc,pre_dec,post_dec} sp:cfa_store.reg)) <reg>)
1534 effects: cfa_store.offset += -/+ mode_size(mem)
1535 cfa.offset = cfa_store.offset if cfa.reg == sp
1536 cfa.reg = sp
1537 cfa.base_offset = -cfa_store.offset
1538
1539 Rule 12:
1540 (set (mem ({minus,plus,losum} <reg1>:{cfa_store,cfa_temp} <const_int>))
1541
1542 <reg2>)
1543 effects: cfa.reg = <reg1>
1544 cfa.base_offset = -/+ <const_int> - {cfa_store,cfa_temp}.offset
1545
1546 Rule 13:
1547 (set (mem <reg1>:{cfa_store,cfa_temp}) <reg2>)
1548 effects: cfa.reg = <reg1>
1549 cfa.base_offset = -{cfa_store,cfa_temp}.offset
1550
1551 Rule 14:
1552 (set (mem (post_inc <reg1>:cfa_temp <const_int>)) <reg2>)
1553 effects: cfa.reg = <reg1>
1554 cfa.base_offset = -cfa_temp.offset
1555 cfa_temp.offset -= mode_size(mem)
1556
1557 Rule 15:
1558 (set <reg> {unspec, unspec_volatile})
1559 effects: target-dependent
1560
1561 Rule 16:
1562 (set sp (and: sp <const_int>))
1563 constraints: cfa_store.reg == sp
1564 effects: cfun->fde.stack_realign = 1
1565 cfa_store.offset = 0
1566 fde->drap_reg = cfa.reg if cfa.reg != sp and cfa.reg != fp
1567
1568 Rule 17:
1569 (set (mem ({pre_inc, pre_dec} sp)) (mem (plus (cfa.reg) (const_int))))
1570 effects: cfa_store.offset += -/+ mode_size(mem)
1571
1572 Rule 18:
1573 (set (mem ({pre_inc, pre_dec} sp)) fp)
1574 constraints: fde->stack_realign == 1
1575 effects: cfa_store.offset = 0
1576 cfa.reg != HARD_FRAME_POINTER_REGNUM
1577
1578 Rule 19:
1579 (set (mem ({pre_inc, pre_dec} sp)) cfa.reg)
1580 constraints: fde->stack_realign == 1
1581 && cfa.offset == 0
1582 && cfa.indirect == 0
1583 && cfa.reg != HARD_FRAME_POINTER_REGNUM
1584 effects: Use DW_CFA_def_cfa_expression to define cfa
1585 cfa.reg == fde->drap_reg */
1586
1587 static void
1588 dwarf2out_frame_debug_expr (rtx expr)
1589 {
1590 rtx src, dest, span;
1591 HOST_WIDE_INT offset;
1592 dw_fde_ref fde;
1593
1594 /* If RTX_FRAME_RELATED_P is set on a PARALLEL, process each member of
1595 the PARALLEL independently. The first element is always processed if
1596 it is a SET. This is for backward compatibility. Other elements
1597 are processed only if they are SETs and the RTX_FRAME_RELATED_P
1598 flag is set in them. */
1599 if (GET_CODE (expr) == PARALLEL || GET_CODE (expr) == SEQUENCE)
1600 {
1601 int par_index;
1602 int limit = XVECLEN (expr, 0);
1603 rtx elem;
1604
1605 /* PARALLELs have strict read-modify-write semantics, so we
1606 ought to evaluate every rvalue before changing any lvalue.
1607 It's cumbersome to do that in general, but there's an
1608 easy approximation that is enough for all current users:
1609 handle register saves before register assignments. */
1610 if (GET_CODE (expr) == PARALLEL)
1611 for (par_index = 0; par_index < limit; par_index++)
1612 {
1613 elem = XVECEXP (expr, 0, par_index);
1614 if (GET_CODE (elem) == SET
1615 && MEM_P (SET_DEST (elem))
1616 && (RTX_FRAME_RELATED_P (elem) || par_index == 0))
1617 dwarf2out_frame_debug_expr (elem);
1618 }
1619
1620 for (par_index = 0; par_index < limit; par_index++)
1621 {
1622 elem = XVECEXP (expr, 0, par_index);
1623 if (GET_CODE (elem) == SET
1624 && (!MEM_P (SET_DEST (elem)) || GET_CODE (expr) == SEQUENCE)
1625 && (RTX_FRAME_RELATED_P (elem) || par_index == 0))
1626 dwarf2out_frame_debug_expr (elem);
1627 else if (GET_CODE (elem) == SET
1628 && par_index != 0
1629 && !RTX_FRAME_RELATED_P (elem))
1630 {
1631 /* Stack adjustment combining might combine some post-prologue
1632 stack adjustment into a prologue stack adjustment. */
1633 HOST_WIDE_INT offset = stack_adjust_offset (elem, args_size, 0);
1634
1635 if (offset != 0)
1636 dwarf2out_stack_adjust (offset);
1637 }
1638 }
1639 return;
1640 }
1641
1642 gcc_assert (GET_CODE (expr) == SET);
1643
1644 src = SET_SRC (expr);
1645 dest = SET_DEST (expr);
1646
1647 if (REG_P (src))
1648 {
1649 rtx rsi = reg_saved_in (src);
1650 if (rsi)
1651 src = rsi;
1652 }
1653
1654 fde = cfun->fde;
1655
1656 switch (GET_CODE (dest))
1657 {
1658 case REG:
1659 switch (GET_CODE (src))
1660 {
1661 /* Setting FP from SP. */
1662 case REG:
1663 if (cfa.reg == (unsigned) REGNO (src))
1664 {
1665 /* Rule 1 */
1666 /* Update the CFA rule wrt SP or FP. Make sure src is
1667 relative to the current CFA register.
1668
1669 We used to require that dest be either SP or FP, but the
1670 ARM copies SP to a temporary register, and from there to
1671 FP. So we just rely on the backends to only set
1672 RTX_FRAME_RELATED_P on appropriate insns. */
1673 cfa.reg = REGNO (dest);
1674 cfa_temp.reg = cfa.reg;
1675 cfa_temp.offset = cfa.offset;
1676 }
1677 else
1678 {
1679 /* Saving a register in a register. */
1680 gcc_assert (!fixed_regs [REGNO (dest)]
1681 /* For the SPARC and its register window. */
1682 || (DWARF_FRAME_REGNUM (REGNO (src))
1683 == DWARF_FRAME_RETURN_COLUMN));
1684
1685 /* After stack is aligned, we can only save SP in FP
1686 if drap register is used. In this case, we have
1687 to restore stack pointer with the CFA value and we
1688 don't generate this DWARF information. */
1689 if (fde
1690 && fde->stack_realign
1691 && REGNO (src) == STACK_POINTER_REGNUM)
1692 gcc_assert (REGNO (dest) == HARD_FRAME_POINTER_REGNUM
1693 && fde->drap_reg != INVALID_REGNUM
1694 && cfa.reg != REGNO (src));
1695 else
1696 queue_reg_save (src, dest, 0);
1697 }
1698 break;
1699
1700 case PLUS:
1701 case MINUS:
1702 case LO_SUM:
1703 if (dest == stack_pointer_rtx)
1704 {
1705 /* Rule 2 */
1706 /* Adjusting SP. */
1707 switch (GET_CODE (XEXP (src, 1)))
1708 {
1709 case CONST_INT:
1710 offset = INTVAL (XEXP (src, 1));
1711 break;
1712 case REG:
1713 gcc_assert ((unsigned) REGNO (XEXP (src, 1))
1714 == cfa_temp.reg);
1715 offset = cfa_temp.offset;
1716 break;
1717 default:
1718 gcc_unreachable ();
1719 }
1720
1721 if (XEXP (src, 0) == hard_frame_pointer_rtx)
1722 {
1723 /* Restoring SP from FP in the epilogue. */
1724 gcc_assert (cfa.reg == (unsigned) HARD_FRAME_POINTER_REGNUM);
1725 cfa.reg = STACK_POINTER_REGNUM;
1726 }
1727 else if (GET_CODE (src) == LO_SUM)
1728 /* Assume we've set the source reg of the LO_SUM from sp. */
1729 ;
1730 else
1731 gcc_assert (XEXP (src, 0) == stack_pointer_rtx);
1732
1733 if (GET_CODE (src) != MINUS)
1734 offset = -offset;
1735 if (cfa.reg == STACK_POINTER_REGNUM)
1736 cfa.offset += offset;
1737 if (cfa_store.reg == STACK_POINTER_REGNUM)
1738 cfa_store.offset += offset;
1739 }
1740 else if (dest == hard_frame_pointer_rtx)
1741 {
1742 /* Rule 3 */
1743 /* Either setting the FP from an offset of the SP,
1744 or adjusting the FP */
1745 gcc_assert (frame_pointer_needed);
1746
1747 gcc_assert (REG_P (XEXP (src, 0))
1748 && (unsigned) REGNO (XEXP (src, 0)) == cfa.reg
1749 && CONST_INT_P (XEXP (src, 1)));
1750 offset = INTVAL (XEXP (src, 1));
1751 if (GET_CODE (src) != MINUS)
1752 offset = -offset;
1753 cfa.offset += offset;
1754 cfa.reg = HARD_FRAME_POINTER_REGNUM;
1755 }
1756 else
1757 {
1758 gcc_assert (GET_CODE (src) != MINUS);
1759
1760 /* Rule 4 */
1761 if (REG_P (XEXP (src, 0))
1762 && REGNO (XEXP (src, 0)) == cfa.reg
1763 && CONST_INT_P (XEXP (src, 1)))
1764 {
1765 /* Setting a temporary CFA register that will be copied
1766 into the FP later on. */
1767 offset = - INTVAL (XEXP (src, 1));
1768 cfa.offset += offset;
1769 cfa.reg = REGNO (dest);
1770 /* Or used to save regs to the stack. */
1771 cfa_temp.reg = cfa.reg;
1772 cfa_temp.offset = cfa.offset;
1773 }
1774
1775 /* Rule 5 */
1776 else if (REG_P (XEXP (src, 0))
1777 && REGNO (XEXP (src, 0)) == cfa_temp.reg
1778 && XEXP (src, 1) == stack_pointer_rtx)
1779 {
1780 /* Setting a scratch register that we will use instead
1781 of SP for saving registers to the stack. */
1782 gcc_assert (cfa.reg == STACK_POINTER_REGNUM);
1783 cfa_store.reg = REGNO (dest);
1784 cfa_store.offset = cfa.offset - cfa_temp.offset;
1785 }
1786
1787 /* Rule 9 */
1788 else if (GET_CODE (src) == LO_SUM
1789 && CONST_INT_P (XEXP (src, 1)))
1790 {
1791 cfa_temp.reg = REGNO (dest);
1792 cfa_temp.offset = INTVAL (XEXP (src, 1));
1793 }
1794 else
1795 gcc_unreachable ();
1796 }
1797 break;
1798
1799 /* Rule 6 */
1800 case CONST_INT:
1801 cfa_temp.reg = REGNO (dest);
1802 cfa_temp.offset = INTVAL (src);
1803 break;
1804
1805 /* Rule 7 */
1806 case IOR:
1807 gcc_assert (REG_P (XEXP (src, 0))
1808 && (unsigned) REGNO (XEXP (src, 0)) == cfa_temp.reg
1809 && CONST_INT_P (XEXP (src, 1)));
1810
1811 if ((unsigned) REGNO (dest) != cfa_temp.reg)
1812 cfa_temp.reg = REGNO (dest);
1813 cfa_temp.offset |= INTVAL (XEXP (src, 1));
1814 break;
1815
1816 /* Skip over HIGH, assuming it will be followed by a LO_SUM,
1817 which will fill in all of the bits. */
1818 /* Rule 8 */
1819 case HIGH:
1820 break;
1821
1822 /* Rule 15 */
1823 case UNSPEC:
1824 case UNSPEC_VOLATILE:
1825 /* All unspecs should be represented by REG_CFA_* notes. */
1826 gcc_unreachable ();
1827 return;
1828
1829 /* Rule 16 */
1830 case AND:
1831 /* If this AND operation happens on stack pointer in prologue,
1832 we assume the stack is realigned and we extract the
1833 alignment. */
1834 if (fde && XEXP (src, 0) == stack_pointer_rtx)
1835 {
1836 /* We interpret reg_save differently with stack_realign set.
1837 Thus we must flush whatever we have queued first. */
1838 dwarf2out_flush_queued_reg_saves ();
1839
1840 gcc_assert (cfa_store.reg == REGNO (XEXP (src, 0)));
1841 fde->stack_realign = 1;
1842 fde->stack_realignment = INTVAL (XEXP (src, 1));
1843 cfa_store.offset = 0;
1844
1845 if (cfa.reg != STACK_POINTER_REGNUM
1846 && cfa.reg != HARD_FRAME_POINTER_REGNUM)
1847 fde->drap_reg = cfa.reg;
1848 }
1849 return;
1850
1851 default:
1852 gcc_unreachable ();
1853 }
1854
1855 def_cfa_1 (&cfa);
1856 break;
1857
1858 case MEM:
1859
1860 /* Saving a register to the stack. Make sure dest is relative to the
1861 CFA register. */
1862 switch (GET_CODE (XEXP (dest, 0)))
1863 {
1864 /* Rule 10 */
1865 /* With a push. */
1866 case PRE_MODIFY:
1867 case POST_MODIFY:
1868 /* We can't handle variable size modifications. */
1869 gcc_assert (GET_CODE (XEXP (XEXP (XEXP (dest, 0), 1), 1))
1870 == CONST_INT);
1871 offset = -INTVAL (XEXP (XEXP (XEXP (dest, 0), 1), 1));
1872
1873 gcc_assert (REGNO (XEXP (XEXP (dest, 0), 0)) == STACK_POINTER_REGNUM
1874 && cfa_store.reg == STACK_POINTER_REGNUM);
1875
1876 cfa_store.offset += offset;
1877 if (cfa.reg == STACK_POINTER_REGNUM)
1878 cfa.offset = cfa_store.offset;
1879
1880 if (GET_CODE (XEXP (dest, 0)) == POST_MODIFY)
1881 offset -= cfa_store.offset;
1882 else
1883 offset = -cfa_store.offset;
1884 break;
1885
1886 /* Rule 11 */
1887 case PRE_INC:
1888 case PRE_DEC:
1889 case POST_DEC:
1890 offset = GET_MODE_SIZE (GET_MODE (dest));
1891 if (GET_CODE (XEXP (dest, 0)) == PRE_INC)
1892 offset = -offset;
1893
1894 gcc_assert ((REGNO (XEXP (XEXP (dest, 0), 0))
1895 == STACK_POINTER_REGNUM)
1896 && cfa_store.reg == STACK_POINTER_REGNUM);
1897
1898 cfa_store.offset += offset;
1899
1900 /* Rule 18: If stack is aligned, we will use FP as a
1901 reference to represent the address of the stored
1902 regiser. */
1903 if (fde
1904 && fde->stack_realign
1905 && src == hard_frame_pointer_rtx)
1906 {
1907 gcc_assert (cfa.reg != HARD_FRAME_POINTER_REGNUM);
1908 cfa_store.offset = 0;
1909 }
1910
1911 if (cfa.reg == STACK_POINTER_REGNUM)
1912 cfa.offset = cfa_store.offset;
1913
1914 if (GET_CODE (XEXP (dest, 0)) == POST_DEC)
1915 offset += -cfa_store.offset;
1916 else
1917 offset = -cfa_store.offset;
1918 break;
1919
1920 /* Rule 12 */
1921 /* With an offset. */
1922 case PLUS:
1923 case MINUS:
1924 case LO_SUM:
1925 {
1926 int regno;
1927
1928 gcc_assert (CONST_INT_P (XEXP (XEXP (dest, 0), 1))
1929 && REG_P (XEXP (XEXP (dest, 0), 0)));
1930 offset = INTVAL (XEXP (XEXP (dest, 0), 1));
1931 if (GET_CODE (XEXP (dest, 0)) == MINUS)
1932 offset = -offset;
1933
1934 regno = REGNO (XEXP (XEXP (dest, 0), 0));
1935
1936 if (cfa.reg == (unsigned) regno)
1937 offset -= cfa.offset;
1938 else if (cfa_store.reg == (unsigned) regno)
1939 offset -= cfa_store.offset;
1940 else
1941 {
1942 gcc_assert (cfa_temp.reg == (unsigned) regno);
1943 offset -= cfa_temp.offset;
1944 }
1945 }
1946 break;
1947
1948 /* Rule 13 */
1949 /* Without an offset. */
1950 case REG:
1951 {
1952 int regno = REGNO (XEXP (dest, 0));
1953
1954 if (cfa.reg == (unsigned) regno)
1955 offset = -cfa.offset;
1956 else if (cfa_store.reg == (unsigned) regno)
1957 offset = -cfa_store.offset;
1958 else
1959 {
1960 gcc_assert (cfa_temp.reg == (unsigned) regno);
1961 offset = -cfa_temp.offset;
1962 }
1963 }
1964 break;
1965
1966 /* Rule 14 */
1967 case POST_INC:
1968 gcc_assert (cfa_temp.reg
1969 == (unsigned) REGNO (XEXP (XEXP (dest, 0), 0)));
1970 offset = -cfa_temp.offset;
1971 cfa_temp.offset -= GET_MODE_SIZE (GET_MODE (dest));
1972 break;
1973
1974 default:
1975 gcc_unreachable ();
1976 }
1977
1978 /* Rule 17 */
1979 /* If the source operand of this MEM operation is a memory,
1980 we only care how much stack grew. */
1981 if (MEM_P (src))
1982 break;
1983
1984 if (REG_P (src)
1985 && REGNO (src) != STACK_POINTER_REGNUM
1986 && REGNO (src) != HARD_FRAME_POINTER_REGNUM
1987 && (unsigned) REGNO (src) == cfa.reg)
1988 {
1989 /* We're storing the current CFA reg into the stack. */
1990
1991 if (cfa.offset == 0)
1992 {
1993 /* Rule 19 */
1994 /* If stack is aligned, putting CFA reg into stack means
1995 we can no longer use reg + offset to represent CFA.
1996 Here we use DW_CFA_def_cfa_expression instead. The
1997 result of this expression equals to the original CFA
1998 value. */
1999 if (fde
2000 && fde->stack_realign
2001 && cfa.indirect == 0
2002 && cfa.reg != HARD_FRAME_POINTER_REGNUM)
2003 {
2004 dw_cfa_location cfa_exp;
2005
2006 gcc_assert (fde->drap_reg == cfa.reg);
2007
2008 cfa_exp.indirect = 1;
2009 cfa_exp.reg = HARD_FRAME_POINTER_REGNUM;
2010 cfa_exp.base_offset = offset;
2011 cfa_exp.offset = 0;
2012
2013 fde->drap_reg_saved = 1;
2014
2015 def_cfa_1 (&cfa_exp);
2016 break;
2017 }
2018
2019 /* If the source register is exactly the CFA, assume
2020 we're saving SP like any other register; this happens
2021 on the ARM. */
2022 def_cfa_1 (&cfa);
2023 queue_reg_save (stack_pointer_rtx, NULL_RTX, offset);
2024 break;
2025 }
2026 else
2027 {
2028 /* Otherwise, we'll need to look in the stack to
2029 calculate the CFA. */
2030 rtx x = XEXP (dest, 0);
2031
2032 if (!REG_P (x))
2033 x = XEXP (x, 0);
2034 gcc_assert (REG_P (x));
2035
2036 cfa.reg = REGNO (x);
2037 cfa.base_offset = offset;
2038 cfa.indirect = 1;
2039 def_cfa_1 (&cfa);
2040 break;
2041 }
2042 }
2043
2044 def_cfa_1 (&cfa);
2045
2046 span = NULL;
2047 if (REG_P (src))
2048 span = targetm.dwarf_register_span (src);
2049 if (!span)
2050 queue_reg_save (src, NULL_RTX, offset);
2051 else
2052 {
2053 /* We have a PARALLEL describing where the contents of SRC live.
2054 Queue register saves for each piece of the PARALLEL. */
2055 int par_index;
2056 int limit;
2057 HOST_WIDE_INT span_offset = offset;
2058
2059 gcc_assert (GET_CODE (span) == PARALLEL);
2060
2061 limit = XVECLEN (span, 0);
2062 for (par_index = 0; par_index < limit; par_index++)
2063 {
2064 rtx elem = XVECEXP (span, 0, par_index);
2065 queue_reg_save (elem, NULL_RTX, span_offset);
2066 span_offset += GET_MODE_SIZE (GET_MODE (elem));
2067 }
2068 }
2069 break;
2070
2071 default:
2072 gcc_unreachable ();
2073 }
2074 }
2075
2076 /* Record call frame debugging information for INSN, which either
2077 sets SP or FP (adjusting how we calculate the frame address) or saves a
2078 register to the stack. If INSN is NULL_RTX, initialize our state.
2079
2080 If AFTER_P is false, we're being called before the insn is emitted,
2081 otherwise after. Call instructions get invoked twice. */
2082
2083 static void
2084 dwarf2out_frame_debug (rtx insn, bool after_p)
2085 {
2086 rtx note, n;
2087 bool handled_one = false;
2088 bool need_flush = false;
2089
2090 if (!NONJUMP_INSN_P (insn) || clobbers_queued_reg_save (insn))
2091 dwarf2out_flush_queued_reg_saves ();
2092
2093 if (!RTX_FRAME_RELATED_P (insn))
2094 {
2095 /* ??? This should be done unconditionally since stack adjustments
2096 matter if the stack pointer is not the CFA register anymore but
2097 is still used to save registers. */
2098 if (!ACCUMULATE_OUTGOING_ARGS)
2099 dwarf2out_notice_stack_adjust (insn, after_p);
2100 return;
2101 }
2102
2103 any_cfis_emitted = false;
2104
2105 for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
2106 switch (REG_NOTE_KIND (note))
2107 {
2108 case REG_FRAME_RELATED_EXPR:
2109 insn = XEXP (note, 0);
2110 goto do_frame_expr;
2111
2112 case REG_CFA_DEF_CFA:
2113 dwarf2out_frame_debug_def_cfa (XEXP (note, 0));
2114 handled_one = true;
2115 break;
2116
2117 case REG_CFA_ADJUST_CFA:
2118 n = XEXP (note, 0);
2119 if (n == NULL)
2120 {
2121 n = PATTERN (insn);
2122 if (GET_CODE (n) == PARALLEL)
2123 n = XVECEXP (n, 0, 0);
2124 }
2125 dwarf2out_frame_debug_adjust_cfa (n);
2126 handled_one = true;
2127 break;
2128
2129 case REG_CFA_OFFSET:
2130 n = XEXP (note, 0);
2131 if (n == NULL)
2132 n = single_set (insn);
2133 dwarf2out_frame_debug_cfa_offset (n);
2134 handled_one = true;
2135 break;
2136
2137 case REG_CFA_REGISTER:
2138 n = XEXP (note, 0);
2139 if (n == NULL)
2140 {
2141 n = PATTERN (insn);
2142 if (GET_CODE (n) == PARALLEL)
2143 n = XVECEXP (n, 0, 0);
2144 }
2145 dwarf2out_frame_debug_cfa_register (n);
2146 handled_one = true;
2147 break;
2148
2149 case REG_CFA_EXPRESSION:
2150 n = XEXP (note, 0);
2151 if (n == NULL)
2152 n = single_set (insn);
2153 dwarf2out_frame_debug_cfa_expression (n);
2154 handled_one = true;
2155 break;
2156
2157 case REG_CFA_RESTORE:
2158 n = XEXP (note, 0);
2159 if (n == NULL)
2160 {
2161 n = PATTERN (insn);
2162 if (GET_CODE (n) == PARALLEL)
2163 n = XVECEXP (n, 0, 0);
2164 n = XEXP (n, 0);
2165 }
2166 dwarf2out_frame_debug_cfa_restore (n);
2167 handled_one = true;
2168 break;
2169
2170 case REG_CFA_SET_VDRAP:
2171 n = XEXP (note, 0);
2172 if (REG_P (n))
2173 {
2174 dw_fde_ref fde = cfun->fde;
2175 if (fde)
2176 {
2177 gcc_assert (fde->vdrap_reg == INVALID_REGNUM);
2178 if (REG_P (n))
2179 fde->vdrap_reg = REGNO (n);
2180 }
2181 }
2182 handled_one = true;
2183 break;
2184
2185 case REG_CFA_WINDOW_SAVE:
2186 dwarf2out_frame_debug_cfa_window_save ();
2187 handled_one = true;
2188 break;
2189
2190 case REG_CFA_FLUSH_QUEUE:
2191 /* The actual flush happens below. */
2192 need_flush = true;
2193 handled_one = true;
2194 break;
2195
2196 default:
2197 break;
2198 }
2199
2200 if (handled_one)
2201 {
2202 /* Minimize the number of advances by emitting the entire queue
2203 once anything is emitted. */
2204 need_flush |= any_cfis_emitted;
2205 }
2206 else
2207 {
2208 insn = PATTERN (insn);
2209 do_frame_expr:
2210 dwarf2out_frame_debug_expr (insn);
2211
2212 /* Check again. A parallel can save and update the same register.
2213 We could probably check just once, here, but this is safer than
2214 removing the check at the start of the function. */
2215 if (any_cfis_emitted || clobbers_queued_reg_save (insn))
2216 need_flush = true;
2217 }
2218
2219 if (need_flush)
2220 dwarf2out_flush_queued_reg_saves ();
2221 }
2222
2223 /* Examine CFI and return true if a cfi label and set_loc is needed
2224 beforehand. Even when generating CFI assembler instructions, we
2225 still have to add the cfi to the list so that lookup_cfa_1 works
2226 later on. When -g2 and above we even need to force emitting of
2227 CFI labels and add to list a DW_CFA_set_loc for convert_cfa_to_fb_loc_list
2228 purposes. If we're generating DWARF3 output we use DW_OP_call_frame_cfa
2229 and so don't use convert_cfa_to_fb_loc_list. */
2230
2231 static bool
2232 cfi_label_required_p (dw_cfi_ref cfi)
2233 {
2234 if (!dwarf2out_do_cfi_asm ())
2235 return true;
2236
2237 if (dwarf_version == 2
2238 && debug_info_level > DINFO_LEVEL_TERSE
2239 && (write_symbols == DWARF2_DEBUG
2240 || write_symbols == VMS_AND_DWARF2_DEBUG))
2241 {
2242 switch (cfi->dw_cfi_opc)
2243 {
2244 case DW_CFA_def_cfa_offset:
2245 case DW_CFA_def_cfa_offset_sf:
2246 case DW_CFA_def_cfa_register:
2247 case DW_CFA_def_cfa:
2248 case DW_CFA_def_cfa_sf:
2249 case DW_CFA_def_cfa_expression:
2250 case DW_CFA_restore_state:
2251 return true;
2252 default:
2253 return false;
2254 }
2255 }
2256 return false;
2257 }
2258
2259 /* Walk the function, looking for NOTE_INSN_CFI notes. Add the CFIs to the
2260 function's FDE, adding CFI labels and set_loc/advance_loc opcodes as
2261 necessary. */
2262 static void
2263 add_cfis_to_fde (void)
2264 {
2265 dw_fde_ref fde = cfun->fde;
2266 rtx insn, next;
2267 /* We always start with a function_begin label. */
2268 bool first = false;
2269
2270 for (insn = get_insns (); insn; insn = next)
2271 {
2272 next = NEXT_INSN (insn);
2273
2274 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS)
2275 {
2276 /* Don't attempt to advance_loc4 between labels
2277 in different sections. */
2278 first = true;
2279 }
2280
2281 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_CFI)
2282 {
2283 bool required = cfi_label_required_p (NOTE_CFI (insn));
2284 while (next && NOTE_P (next) && NOTE_KIND (next) == NOTE_INSN_CFI)
2285 {
2286 required |= cfi_label_required_p (NOTE_CFI (next));
2287 next = NEXT_INSN (next);
2288 }
2289 if (required)
2290 {
2291 int num = dwarf2out_cfi_label_num;
2292 const char *label = dwarf2out_cfi_label ();
2293 dw_cfi_ref xcfi;
2294 rtx tmp;
2295
2296 /* Set the location counter to the new label. */
2297 xcfi = new_cfi ();
2298 xcfi->dw_cfi_opc = (first ? DW_CFA_set_loc
2299 : DW_CFA_advance_loc4);
2300 xcfi->dw_cfi_oprnd1.dw_cfi_addr = label;
2301 VEC_safe_push (dw_cfi_ref, gc, fde->dw_fde_cfi, xcfi);
2302
2303 tmp = emit_note_before (NOTE_INSN_CFI_LABEL, insn);
2304 NOTE_LABEL_NUMBER (tmp) = num;
2305 }
2306
2307 do
2308 {
2309 VEC_safe_push (dw_cfi_ref, gc, fde->dw_fde_cfi, NOTE_CFI (insn));
2310 insn = NEXT_INSN (insn);
2311 }
2312 while (insn != next);
2313 first = false;
2314 }
2315 }
2316 }
2317
2318 /* Scan the function and create the initial set of CFI notes. */
2319
2320 static void
2321 create_cfi_notes (void)
2322 {
2323 rtx insn;
2324
2325 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
2326 {
2327 rtx pat;
2328
2329 cfi_insn = PREV_INSN (insn);
2330
2331 if (BARRIER_P (insn))
2332 {
2333 dwarf2out_frame_debug (insn, false);
2334 continue;
2335 }
2336
2337 if (NOTE_P (insn))
2338 {
2339 switch (NOTE_KIND (insn))
2340 {
2341 case NOTE_INSN_PROLOGUE_END:
2342 dwarf2out_flush_queued_reg_saves ();
2343 break;
2344
2345 case NOTE_INSN_EPILOGUE_BEG:
2346 #if defined(HAVE_epilogue)
2347 dwarf2out_cfi_begin_epilogue (insn);
2348 #endif
2349 break;
2350
2351 case NOTE_INSN_CFA_RESTORE_STATE:
2352 cfi_insn = insn;
2353 dwarf2out_frame_debug_restore_state ();
2354 break;
2355 }
2356 continue;
2357 }
2358
2359 if (!NONDEBUG_INSN_P (insn))
2360 continue;
2361
2362 pat = PATTERN (insn);
2363 if (asm_noperands (pat) >= 0)
2364 {
2365 dwarf2out_frame_debug (insn, false);
2366 continue;
2367 }
2368
2369 if (GET_CODE (pat) == SEQUENCE)
2370 {
2371 int i, n = XVECLEN (pat, 0);
2372 for (i = 1; i < n; ++i)
2373 dwarf2out_frame_debug (XVECEXP (pat, 0, i), false);
2374 }
2375
2376 if (CALL_P (insn)
2377 || find_reg_note (insn, REG_CFA_FLUSH_QUEUE, NULL))
2378 dwarf2out_frame_debug (insn, false);
2379
2380 /* Do not separate tablejump insns from their ADDR_DIFF_VEC.
2381 Putting the note after the VEC should be ok. */
2382 if (!tablejump_p (insn, NULL, &cfi_insn))
2383 cfi_insn = insn;
2384
2385 dwarf2out_frame_debug (insn, true);
2386 }
2387
2388 cfi_insn = NULL;
2389 }
2390
2391 /* Determine if we need to save and restore CFI information around this
2392 epilogue. If SIBCALL is true, then this is a sibcall epilogue. If
2393 we do need to save/restore, then emit the save now, and insert a
2394 NOTE_INSN_CFA_RESTORE_STATE at the appropriate place in the stream. */
2395
2396 static void
2397 dwarf2out_cfi_begin_epilogue (rtx insn)
2398 {
2399 bool saw_frp = false;
2400 rtx i;
2401
2402 /* Scan forward to the return insn, noticing if there are possible
2403 frame related insns. */
2404 for (i = NEXT_INSN (insn); i ; i = NEXT_INSN (i))
2405 {
2406 if (!INSN_P (i))
2407 continue;
2408
2409 /* Look for both regular and sibcalls to end the block. */
2410 if (returnjump_p (i))
2411 break;
2412 if (CALL_P (i) && SIBLING_CALL_P (i))
2413 break;
2414
2415 if (GET_CODE (PATTERN (i)) == SEQUENCE)
2416 {
2417 int idx;
2418 rtx seq = PATTERN (i);
2419
2420 if (returnjump_p (XVECEXP (seq, 0, 0)))
2421 break;
2422 if (CALL_P (XVECEXP (seq, 0, 0))
2423 && SIBLING_CALL_P (XVECEXP (seq, 0, 0)))
2424 break;
2425
2426 for (idx = 0; idx < XVECLEN (seq, 0); idx++)
2427 if (RTX_FRAME_RELATED_P (XVECEXP (seq, 0, idx)))
2428 saw_frp = true;
2429 }
2430
2431 if (RTX_FRAME_RELATED_P (i))
2432 saw_frp = true;
2433 }
2434
2435 /* If the port doesn't emit epilogue unwind info, we don't need a
2436 save/restore pair. */
2437 if (!saw_frp)
2438 return;
2439
2440 /* Otherwise, search forward to see if the return insn was the last
2441 basic block of the function. If so, we don't need save/restore. */
2442 gcc_assert (i != NULL);
2443 i = next_real_insn (i);
2444 if (i == NULL)
2445 return;
2446
2447 /* Insert the restore before that next real insn in the stream, and before
2448 a potential NOTE_INSN_EPILOGUE_BEG -- we do need these notes to be
2449 properly nested. This should be after any label or alignment. This
2450 will be pushed into the CFI stream by the function below. */
2451 while (1)
2452 {
2453 rtx p = PREV_INSN (i);
2454 if (!NOTE_P (p))
2455 break;
2456 if (NOTE_KIND (p) == NOTE_INSN_BASIC_BLOCK)
2457 break;
2458 i = p;
2459 }
2460 emit_note_before (NOTE_INSN_CFA_RESTORE_STATE, i);
2461
2462 emit_cfa_remember = true;
2463
2464 /* And emulate the state save. */
2465 gcc_assert (!cfa_remember.in_use);
2466 cfa_remember = cfa;
2467 old_cfa_remember = old_cfa;
2468 cfa_remember.in_use = 1;
2469 }
2470
2471 /* A "subroutine" of dwarf2out_cfi_begin_epilogue. Emit the restore
2472 required. */
2473
2474 static void
2475 dwarf2out_frame_debug_restore_state (void)
2476 {
2477 dw_cfi_ref cfi = new_cfi ();
2478
2479 cfi->dw_cfi_opc = DW_CFA_restore_state;
2480 add_cfi (cfi);
2481
2482 gcc_assert (cfa_remember.in_use);
2483 cfa = cfa_remember;
2484 old_cfa = old_cfa_remember;
2485 cfa_remember.in_use = 0;
2486 }
2487 \f
2488 /* Record the initial position of the return address. RTL is
2489 INCOMING_RETURN_ADDR_RTX. */
2490
2491 static void
2492 initial_return_save (rtx rtl)
2493 {
2494 unsigned int reg = INVALID_REGNUM;
2495 HOST_WIDE_INT offset = 0;
2496
2497 switch (GET_CODE (rtl))
2498 {
2499 case REG:
2500 /* RA is in a register. */
2501 reg = DWARF_FRAME_REGNUM (REGNO (rtl));
2502 break;
2503
2504 case MEM:
2505 /* RA is on the stack. */
2506 rtl = XEXP (rtl, 0);
2507 switch (GET_CODE (rtl))
2508 {
2509 case REG:
2510 gcc_assert (REGNO (rtl) == STACK_POINTER_REGNUM);
2511 offset = 0;
2512 break;
2513
2514 case PLUS:
2515 gcc_assert (REGNO (XEXP (rtl, 0)) == STACK_POINTER_REGNUM);
2516 offset = INTVAL (XEXP (rtl, 1));
2517 break;
2518
2519 case MINUS:
2520 gcc_assert (REGNO (XEXP (rtl, 0)) == STACK_POINTER_REGNUM);
2521 offset = -INTVAL (XEXP (rtl, 1));
2522 break;
2523
2524 default:
2525 gcc_unreachable ();
2526 }
2527
2528 break;
2529
2530 case PLUS:
2531 /* The return address is at some offset from any value we can
2532 actually load. For instance, on the SPARC it is in %i7+8. Just
2533 ignore the offset for now; it doesn't matter for unwinding frames. */
2534 gcc_assert (CONST_INT_P (XEXP (rtl, 1)));
2535 initial_return_save (XEXP (rtl, 0));
2536 return;
2537
2538 default:
2539 gcc_unreachable ();
2540 }
2541
2542 if (reg != DWARF_FRAME_RETURN_COLUMN)
2543 {
2544 if (reg != INVALID_REGNUM)
2545 record_reg_saved_in_reg (rtl, pc_rtx);
2546 reg_save (DWARF_FRAME_RETURN_COLUMN, reg, offset - cfa.offset);
2547 }
2548 }
2549
2550 /* Annotate the function with NOTE_INSN_CFI notes to record the CFI
2551 state at each location within the function. These notes will be
2552 emitted during pass_final. */
2553
2554 static unsigned int
2555 execute_dwarf2_frame (void)
2556 {
2557 /* The first time we're called, compute the incoming frame state. */
2558 if (cie_cfi_vec == NULL)
2559 {
2560 dw_cfa_location loc;
2561
2562 add_cfi_vec = &cie_cfi_vec;
2563
2564 memset(&old_cfa, 0, sizeof (old_cfa));
2565 old_cfa.reg = INVALID_REGNUM;
2566
2567 /* On entry, the Canonical Frame Address is at SP. */
2568 memset(&loc, 0, sizeof (loc));
2569 loc.reg = STACK_POINTER_REGNUM;
2570 loc.offset = INCOMING_FRAME_SP_OFFSET;
2571 def_cfa_1 (&loc);
2572
2573 if (targetm.debug_unwind_info () == UI_DWARF2
2574 || targetm_common.except_unwind_info (&global_options) == UI_DWARF2)
2575 {
2576 initial_return_save (INCOMING_RETURN_ADDR_RTX);
2577
2578 /* For a few targets, we have the return address incoming into a
2579 register, but choose a different return column. This will result
2580 in a DW_CFA_register for the return, and an entry in
2581 regs_saved_in_regs to match. If the target later stores that
2582 return address register to the stack, we want to be able to emit
2583 the DW_CFA_offset against the return column, not the intermediate
2584 save register. Save the contents of regs_saved_in_regs so that
2585 we can re-initialize it at the start of each function. */
2586 switch (VEC_length (reg_saved_in_data, regs_saved_in_regs))
2587 {
2588 case 0:
2589 break;
2590 case 1:
2591 cie_return_save = ggc_alloc_reg_saved_in_data ();
2592 *cie_return_save = *VEC_index (reg_saved_in_data,
2593 regs_saved_in_regs, 0);
2594 regs_saved_in_regs = NULL;
2595 break;
2596 default:
2597 gcc_unreachable ();
2598 }
2599 }
2600
2601 add_cfi_vec = NULL;
2602 }
2603
2604 /* Set up state for generating call frame debug info. */
2605 gcc_checking_assert (queued_reg_saves == NULL);
2606 gcc_checking_assert (regs_saved_in_regs == NULL);
2607
2608 memset (&cfa, 0, sizeof(cfa));
2609 cfa.reg = STACK_POINTER_REGNUM;
2610 cfa.offset = INCOMING_FRAME_SP_OFFSET;
2611
2612 old_cfa = cfa;
2613 cfa_store = cfa;
2614
2615 memset (&cfa_temp, 0, sizeof(cfa_temp));
2616 cfa_temp.reg = INVALID_REGNUM;
2617
2618 if (cie_return_save)
2619 VEC_safe_push (reg_saved_in_data, gc, regs_saved_in_regs, cie_return_save);
2620
2621 dwarf2out_alloc_current_fde ();
2622
2623 /* Do the work. */
2624 create_cfi_notes ();
2625 add_cfis_to_fde ();
2626
2627 /* Reset all function-specific information, particularly for GC. */
2628 XDELETEVEC (barrier_args_size);
2629 barrier_args_size = NULL;
2630 regs_saved_in_regs = NULL;
2631 queued_reg_saves = NULL;
2632
2633 return 0;
2634 }
2635 \f
2636
2637 /* Save the result of dwarf2out_do_frame across PCH.
2638 This variable is tri-state, with 0 unset, >0 true, <0 false. */
2639 static GTY(()) signed char saved_do_cfi_asm = 0;
2640
2641 /* Decide whether we want to emit frame unwind information for the current
2642 translation unit. */
2643
2644 bool
2645 dwarf2out_do_frame (void)
2646 {
2647 /* We want to emit correct CFA location expressions or lists, so we
2648 have to return true if we're going to output debug info, even if
2649 we're not going to output frame or unwind info. */
2650 if (write_symbols == DWARF2_DEBUG || write_symbols == VMS_AND_DWARF2_DEBUG)
2651 return true;
2652
2653 if (saved_do_cfi_asm > 0)
2654 return true;
2655
2656 if (targetm.debug_unwind_info () == UI_DWARF2)
2657 return true;
2658
2659 if ((flag_unwind_tables || flag_exceptions)
2660 && targetm_common.except_unwind_info (&global_options) == UI_DWARF2)
2661 return true;
2662
2663 return false;
2664 }
2665
2666 /* Decide whether to emit frame unwind via assembler directives. */
2667
2668 bool
2669 dwarf2out_do_cfi_asm (void)
2670 {
2671 int enc;
2672
2673 #ifdef MIPS_DEBUGGING_INFO
2674 return false;
2675 #endif
2676
2677 if (saved_do_cfi_asm != 0)
2678 return saved_do_cfi_asm > 0;
2679
2680 /* Assume failure for a moment. */
2681 saved_do_cfi_asm = -1;
2682
2683 if (!flag_dwarf2_cfi_asm || !dwarf2out_do_frame ())
2684 return false;
2685 if (!HAVE_GAS_CFI_PERSONALITY_DIRECTIVE)
2686 return false;
2687
2688 /* Make sure the personality encoding is one the assembler can support.
2689 In particular, aligned addresses can't be handled. */
2690 enc = ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/2,/*global=*/1);
2691 if ((enc & 0x70) != 0 && (enc & 0x70) != DW_EH_PE_pcrel)
2692 return false;
2693 enc = ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/0,/*global=*/0);
2694 if ((enc & 0x70) != 0 && (enc & 0x70) != DW_EH_PE_pcrel)
2695 return false;
2696
2697 /* If we can't get the assembler to emit only .debug_frame, and we don't need
2698 dwarf2 unwind info for exceptions, then emit .debug_frame by hand. */
2699 if (!HAVE_GAS_CFI_SECTIONS_DIRECTIVE
2700 && !flag_unwind_tables && !flag_exceptions
2701 && targetm_common.except_unwind_info (&global_options) != UI_DWARF2)
2702 return false;
2703
2704 /* Success! */
2705 saved_do_cfi_asm = 1;
2706 return true;
2707 }
2708
2709 static bool
2710 gate_dwarf2_frame (void)
2711 {
2712 #ifndef HAVE_prologue
2713 /* Targets which still implement the prologue in assembler text
2714 cannot use the generic dwarf2 unwinding. */
2715 return false;
2716 #endif
2717
2718 /* ??? What to do for UI_TARGET unwinding? They might be able to benefit
2719 from the optimized shrink-wrapping annotations that we will compute.
2720 For now, only produce the CFI notes for dwarf2. */
2721 return dwarf2out_do_frame ();
2722 }
2723
2724 struct rtl_opt_pass pass_dwarf2_frame =
2725 {
2726 {
2727 RTL_PASS,
2728 "dwarf2", /* name */
2729 gate_dwarf2_frame, /* gate */
2730 execute_dwarf2_frame, /* execute */
2731 NULL, /* sub */
2732 NULL, /* next */
2733 0, /* static_pass_number */
2734 TV_FINAL, /* tv_id */
2735 0, /* properties_required */
2736 0, /* properties_provided */
2737 0, /* properties_destroyed */
2738 0, /* todo_flags_start */
2739 0 /* todo_flags_finish */
2740 }
2741 };
2742
2743 #include "gt-dwarf2cfi.h"