dwarf2cfi: Flush queued saves at the end of the prologue.
[gcc.git] / gcc / dwarf2cfi.c
1 /* Dwarf2 Call Frame Information helper routines.
2 Copyright (C) 1992, 1993, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
3 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011
4 Free Software Foundation, Inc.
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "tm.h"
26 #include "version.h"
27 #include "flags.h"
28 #include "rtl.h"
29 #include "function.h"
30 #include "dwarf2.h"
31 #include "dwarf2out.h"
32 #include "dwarf2asm.h"
33 #include "ggc.h"
34 #include "tm_p.h"
35 #include "target.h"
36 #include "common/common-target.h"
37 #include "tree-pass.h"
38
39 #include "except.h" /* expand_builtin_dwarf_sp_column */
40 #include "expr.h" /* init_return_column_size */
41 #include "regs.h" /* expand_builtin_init_dwarf_reg_sizes */
42 #include "output.h" /* asm_out_file */
43 #include "debug.h" /* dwarf2out_do_frame, dwarf2out_do_cfi_asm */
44
45
46 /* ??? Poison these here until it can be done generically. They've been
47 totally replaced in this file; make sure it stays that way. */
48 #undef DWARF2_UNWIND_INFO
49 #undef DWARF2_FRAME_INFO
50 #if (GCC_VERSION >= 3000)
51 #pragma GCC poison DWARF2_UNWIND_INFO DWARF2_FRAME_INFO
52 #endif
53
54 #ifndef INCOMING_RETURN_ADDR_RTX
55 #define INCOMING_RETURN_ADDR_RTX (gcc_unreachable (), NULL_RTX)
56 #endif
57
58 /* The size of the target's pointer type. */
59 #ifndef PTR_SIZE
60 #define PTR_SIZE (POINTER_SIZE / BITS_PER_UNIT)
61 #endif
62
63 /* Maximum size (in bytes) of an artificially generated label. */
64 #define MAX_ARTIFICIAL_LABEL_BYTES 30
65
66 /* The size of addresses as they appear in the Dwarf 2 data.
67 Some architectures use word addresses to refer to code locations,
68 but Dwarf 2 info always uses byte addresses. On such machines,
69 Dwarf 2 addresses need to be larger than the architecture's
70 pointers. */
71 #ifndef DWARF2_ADDR_SIZE
72 #define DWARF2_ADDR_SIZE (POINTER_SIZE / BITS_PER_UNIT)
73 #endif
74
75 /* The size in bytes of a DWARF field indicating an offset or length
76 relative to a debug info section, specified to be 4 bytes in the
77 DWARF-2 specification. The SGI/MIPS ABI defines it to be the same
78 as PTR_SIZE. */
79
80 #ifndef DWARF_OFFSET_SIZE
81 #define DWARF_OFFSET_SIZE 4
82 #endif
83
84 /* According to the (draft) DWARF 3 specification, the initial length
85 should either be 4 or 12 bytes. When it's 12 bytes, the first 4
86 bytes are 0xffffffff, followed by the length stored in the next 8
87 bytes.
88
89 However, the SGI/MIPS ABI uses an initial length which is equal to
90 DWARF_OFFSET_SIZE. It is defined (elsewhere) accordingly. */
91
92 #ifndef DWARF_INITIAL_LENGTH_SIZE
93 #define DWARF_INITIAL_LENGTH_SIZE (DWARF_OFFSET_SIZE == 4 ? 4 : 12)
94 #endif
95
96 /* Round SIZE up to the nearest BOUNDARY. */
97 #define DWARF_ROUND(SIZE,BOUNDARY) \
98 ((((SIZE) + (BOUNDARY) - 1) / (BOUNDARY)) * (BOUNDARY))
99
100 /* Offsets recorded in opcodes are a multiple of this alignment factor. */
101 #ifndef DWARF_CIE_DATA_ALIGNMENT
102 #ifdef STACK_GROWS_DOWNWARD
103 #define DWARF_CIE_DATA_ALIGNMENT (-((int) UNITS_PER_WORD))
104 #else
105 #define DWARF_CIE_DATA_ALIGNMENT ((int) UNITS_PER_WORD)
106 #endif
107 #endif
108
109 /* CIE identifier. */
110 #if HOST_BITS_PER_WIDE_INT >= 64
111 #define DWARF_CIE_ID \
112 (unsigned HOST_WIDE_INT) (DWARF_OFFSET_SIZE == 4 ? DW_CIE_ID : DW64_CIE_ID)
113 #else
114 #define DWARF_CIE_ID DW_CIE_ID
115 #endif
116
117 /* The DWARF 2 CFA column which tracks the return address. Normally this
118 is the column for PC, or the first column after all of the hard
119 registers. */
120 #ifndef DWARF_FRAME_RETURN_COLUMN
121 #ifdef PC_REGNUM
122 #define DWARF_FRAME_RETURN_COLUMN DWARF_FRAME_REGNUM (PC_REGNUM)
123 #else
124 #define DWARF_FRAME_RETURN_COLUMN DWARF_FRAME_REGISTERS
125 #endif
126 #endif
127
128 /* The mapping from gcc register number to DWARF 2 CFA column number. By
129 default, we just provide columns for all registers. */
130 #ifndef DWARF_FRAME_REGNUM
131 #define DWARF_FRAME_REGNUM(REG) DBX_REGISTER_NUMBER (REG)
132 #endif
133
134 /* Map register numbers held in the call frame info that gcc has
135 collected using DWARF_FRAME_REGNUM to those that should be output in
136 .debug_frame and .eh_frame. */
137 #ifndef DWARF2_FRAME_REG_OUT
138 #define DWARF2_FRAME_REG_OUT(REGNO, FOR_EH) (REGNO)
139 #endif
140 \f
141 /* A vector of call frame insns for the CIE. */
142 cfi_vec cie_cfi_vec;
143
144 static GTY(()) unsigned long dwarf2out_cfi_label_num;
145
146 /* The insn after which a new CFI note should be emitted. */
147 static rtx cfi_insn;
148
149 /* True if remember_state should be emitted before following CFI directive. */
150 static bool emit_cfa_remember;
151
152 /* True if any CFI directives were emitted at the current insn. */
153 static bool any_cfis_emitted;
154 \f
155
156 static void dwarf2out_cfi_begin_epilogue (rtx insn);
157 static void dwarf2out_frame_debug_restore_state (void);
158
159 \f
160 /* Hook used by __throw. */
161
162 rtx
163 expand_builtin_dwarf_sp_column (void)
164 {
165 unsigned int dwarf_regnum = DWARF_FRAME_REGNUM (STACK_POINTER_REGNUM);
166 return GEN_INT (DWARF2_FRAME_REG_OUT (dwarf_regnum, 1));
167 }
168
169 /* MEM is a memory reference for the register size table, each element of
170 which has mode MODE. Initialize column C as a return address column. */
171
172 static void
173 init_return_column_size (enum machine_mode mode, rtx mem, unsigned int c)
174 {
175 HOST_WIDE_INT offset = c * GET_MODE_SIZE (mode);
176 HOST_WIDE_INT size = GET_MODE_SIZE (Pmode);
177 emit_move_insn (adjust_address (mem, mode, offset), GEN_INT (size));
178 }
179
180 /* Generate code to initialize the register size table. */
181
182 void
183 expand_builtin_init_dwarf_reg_sizes (tree address)
184 {
185 unsigned int i;
186 enum machine_mode mode = TYPE_MODE (char_type_node);
187 rtx addr = expand_normal (address);
188 rtx mem = gen_rtx_MEM (BLKmode, addr);
189 bool wrote_return_column = false;
190
191 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
192 {
193 int rnum = DWARF2_FRAME_REG_OUT (DWARF_FRAME_REGNUM (i), 1);
194
195 if (rnum < DWARF_FRAME_REGISTERS)
196 {
197 HOST_WIDE_INT offset = rnum * GET_MODE_SIZE (mode);
198 enum machine_mode save_mode = reg_raw_mode[i];
199 HOST_WIDE_INT size;
200
201 if (HARD_REGNO_CALL_PART_CLOBBERED (i, save_mode))
202 save_mode = choose_hard_reg_mode (i, 1, true);
203 if (DWARF_FRAME_REGNUM (i) == DWARF_FRAME_RETURN_COLUMN)
204 {
205 if (save_mode == VOIDmode)
206 continue;
207 wrote_return_column = true;
208 }
209 size = GET_MODE_SIZE (save_mode);
210 if (offset < 0)
211 continue;
212
213 emit_move_insn (adjust_address (mem, mode, offset),
214 gen_int_mode (size, mode));
215 }
216 }
217
218 if (!wrote_return_column)
219 init_return_column_size (mode, mem, DWARF_FRAME_RETURN_COLUMN);
220
221 #ifdef DWARF_ALT_FRAME_RETURN_COLUMN
222 init_return_column_size (mode, mem, DWARF_ALT_FRAME_RETURN_COLUMN);
223 #endif
224
225 targetm.init_dwarf_reg_sizes_extra (address);
226 }
227
228 /* Divide OFF by DWARF_CIE_DATA_ALIGNMENT, asserting no remainder. */
229
230 static inline HOST_WIDE_INT
231 div_data_align (HOST_WIDE_INT off)
232 {
233 HOST_WIDE_INT r = off / DWARF_CIE_DATA_ALIGNMENT;
234 gcc_assert (r * DWARF_CIE_DATA_ALIGNMENT == off);
235 return r;
236 }
237
238 /* Return true if we need a signed version of a given opcode
239 (e.g. DW_CFA_offset_extended_sf vs DW_CFA_offset_extended). */
240
241 static inline bool
242 need_data_align_sf_opcode (HOST_WIDE_INT off)
243 {
244 return DWARF_CIE_DATA_ALIGNMENT < 0 ? off > 0 : off < 0;
245 }
246
247 /* Return a pointer to a newly allocated Call Frame Instruction. */
248
249 static inline dw_cfi_ref
250 new_cfi (void)
251 {
252 dw_cfi_ref cfi = ggc_alloc_dw_cfi_node ();
253
254 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = 0;
255 cfi->dw_cfi_oprnd2.dw_cfi_reg_num = 0;
256
257 return cfi;
258 }
259
260 /* Generate a new label for the CFI info to refer to. */
261
262 static char *
263 dwarf2out_cfi_label (void)
264 {
265 int num = dwarf2out_cfi_label_num++;
266 char label[20];
267
268 ASM_GENERATE_INTERNAL_LABEL (label, "LCFI", num);
269
270 return xstrdup (label);
271 }
272
273 /* Add CFI to the current fde. */
274
275 static void
276 add_fde_cfi (dw_cfi_ref cfi)
277 {
278 if (emit_cfa_remember)
279 {
280 dw_cfi_ref cfi_remember;
281
282 /* Emit the state save. */
283 emit_cfa_remember = false;
284 cfi_remember = new_cfi ();
285 cfi_remember->dw_cfi_opc = DW_CFA_remember_state;
286 add_fde_cfi (cfi_remember);
287 }
288
289 any_cfis_emitted = true;
290 if (cfi_insn != NULL)
291 {
292 cfi_insn = emit_note_after (NOTE_INSN_CFI, cfi_insn);
293 NOTE_CFI (cfi_insn) = cfi;
294 }
295 else
296 {
297 dw_fde_ref fde = cfun->fde;
298 VEC_safe_push (dw_cfi_ref, gc, fde->dw_fde_cfi, cfi);
299 dwarf2out_emit_cfi (cfi);
300 }
301 }
302
303 static void
304 add_cie_cfi (dw_cfi_ref cfi)
305 {
306 VEC_safe_push (dw_cfi_ref, gc, cie_cfi_vec, cfi);
307 }
308
309 /* This function fills in aa dw_cfa_location structure from a dwarf location
310 descriptor sequence. */
311
312 static void
313 get_cfa_from_loc_descr (dw_cfa_location *cfa, struct dw_loc_descr_struct *loc)
314 {
315 struct dw_loc_descr_struct *ptr;
316 cfa->offset = 0;
317 cfa->base_offset = 0;
318 cfa->indirect = 0;
319 cfa->reg = -1;
320
321 for (ptr = loc; ptr != NULL; ptr = ptr->dw_loc_next)
322 {
323 enum dwarf_location_atom op = ptr->dw_loc_opc;
324
325 switch (op)
326 {
327 case DW_OP_reg0:
328 case DW_OP_reg1:
329 case DW_OP_reg2:
330 case DW_OP_reg3:
331 case DW_OP_reg4:
332 case DW_OP_reg5:
333 case DW_OP_reg6:
334 case DW_OP_reg7:
335 case DW_OP_reg8:
336 case DW_OP_reg9:
337 case DW_OP_reg10:
338 case DW_OP_reg11:
339 case DW_OP_reg12:
340 case DW_OP_reg13:
341 case DW_OP_reg14:
342 case DW_OP_reg15:
343 case DW_OP_reg16:
344 case DW_OP_reg17:
345 case DW_OP_reg18:
346 case DW_OP_reg19:
347 case DW_OP_reg20:
348 case DW_OP_reg21:
349 case DW_OP_reg22:
350 case DW_OP_reg23:
351 case DW_OP_reg24:
352 case DW_OP_reg25:
353 case DW_OP_reg26:
354 case DW_OP_reg27:
355 case DW_OP_reg28:
356 case DW_OP_reg29:
357 case DW_OP_reg30:
358 case DW_OP_reg31:
359 cfa->reg = op - DW_OP_reg0;
360 break;
361 case DW_OP_regx:
362 cfa->reg = ptr->dw_loc_oprnd1.v.val_int;
363 break;
364 case DW_OP_breg0:
365 case DW_OP_breg1:
366 case DW_OP_breg2:
367 case DW_OP_breg3:
368 case DW_OP_breg4:
369 case DW_OP_breg5:
370 case DW_OP_breg6:
371 case DW_OP_breg7:
372 case DW_OP_breg8:
373 case DW_OP_breg9:
374 case DW_OP_breg10:
375 case DW_OP_breg11:
376 case DW_OP_breg12:
377 case DW_OP_breg13:
378 case DW_OP_breg14:
379 case DW_OP_breg15:
380 case DW_OP_breg16:
381 case DW_OP_breg17:
382 case DW_OP_breg18:
383 case DW_OP_breg19:
384 case DW_OP_breg20:
385 case DW_OP_breg21:
386 case DW_OP_breg22:
387 case DW_OP_breg23:
388 case DW_OP_breg24:
389 case DW_OP_breg25:
390 case DW_OP_breg26:
391 case DW_OP_breg27:
392 case DW_OP_breg28:
393 case DW_OP_breg29:
394 case DW_OP_breg30:
395 case DW_OP_breg31:
396 cfa->reg = op - DW_OP_breg0;
397 cfa->base_offset = ptr->dw_loc_oprnd1.v.val_int;
398 break;
399 case DW_OP_bregx:
400 cfa->reg = ptr->dw_loc_oprnd1.v.val_int;
401 cfa->base_offset = ptr->dw_loc_oprnd2.v.val_int;
402 break;
403 case DW_OP_deref:
404 cfa->indirect = 1;
405 break;
406 case DW_OP_plus_uconst:
407 cfa->offset = ptr->dw_loc_oprnd1.v.val_unsigned;
408 break;
409 default:
410 gcc_unreachable ();
411 }
412 }
413 }
414
415 /* Subroutine of lookup_cfa. */
416
417 void
418 lookup_cfa_1 (dw_cfi_ref cfi, dw_cfa_location *loc, dw_cfa_location *remember)
419 {
420 switch (cfi->dw_cfi_opc)
421 {
422 case DW_CFA_def_cfa_offset:
423 case DW_CFA_def_cfa_offset_sf:
424 loc->offset = cfi->dw_cfi_oprnd1.dw_cfi_offset;
425 break;
426 case DW_CFA_def_cfa_register:
427 loc->reg = cfi->dw_cfi_oprnd1.dw_cfi_reg_num;
428 break;
429 case DW_CFA_def_cfa:
430 case DW_CFA_def_cfa_sf:
431 loc->reg = cfi->dw_cfi_oprnd1.dw_cfi_reg_num;
432 loc->offset = cfi->dw_cfi_oprnd2.dw_cfi_offset;
433 break;
434 case DW_CFA_def_cfa_expression:
435 get_cfa_from_loc_descr (loc, cfi->dw_cfi_oprnd1.dw_cfi_loc);
436 break;
437
438 case DW_CFA_remember_state:
439 gcc_assert (!remember->in_use);
440 *remember = *loc;
441 remember->in_use = 1;
442 break;
443 case DW_CFA_restore_state:
444 gcc_assert (remember->in_use);
445 *loc = *remember;
446 remember->in_use = 0;
447 break;
448
449 default:
450 break;
451 }
452 }
453
454 /* Find the previous value for the CFA. */
455
456 static void
457 lookup_cfa (dw_cfa_location *loc)
458 {
459 int ix;
460 dw_cfi_ref cfi;
461 dw_fde_ref fde;
462 dw_cfa_location remember;
463
464 memset (loc, 0, sizeof (*loc));
465 loc->reg = INVALID_REGNUM;
466 remember = *loc;
467
468 FOR_EACH_VEC_ELT (dw_cfi_ref, cie_cfi_vec, ix, cfi)
469 lookup_cfa_1 (cfi, loc, &remember);
470
471 fde = cfun->fde;
472 if (fde)
473 FOR_EACH_VEC_ELT (dw_cfi_ref, fde->dw_fde_cfi, ix, cfi)
474 lookup_cfa_1 (cfi, loc, &remember);
475 }
476
477 /* The current rule for calculating the DWARF2 canonical frame address. */
478 static dw_cfa_location cfa;
479
480 /* A copy of the CFA, for comparison purposes. */
481 static dw_cfa_location old_cfa;
482
483 /* The register used for saving registers to the stack, and its offset
484 from the CFA. */
485 static dw_cfa_location cfa_store;
486
487 /* The current save location around an epilogue. */
488 static dw_cfa_location cfa_remember;
489
490 /* Like cfa_remember, but a copy of old_cfa. */
491 static dw_cfa_location old_cfa_remember;
492
493 /* The running total of the size of arguments pushed onto the stack. */
494 static HOST_WIDE_INT args_size;
495
496 /* The last args_size we actually output. */
497 static HOST_WIDE_INT old_args_size;
498
499 /* Determine if two dw_cfa_location structures define the same data. */
500
501 bool
502 cfa_equal_p (const dw_cfa_location *loc1, const dw_cfa_location *loc2)
503 {
504 return (loc1->reg == loc2->reg
505 && loc1->offset == loc2->offset
506 && loc1->indirect == loc2->indirect
507 && (loc1->indirect == 0
508 || loc1->base_offset == loc2->base_offset));
509 }
510
511 /* This routine does the actual work. The CFA is now calculated from
512 the dw_cfa_location structure. */
513
514 static void
515 def_cfa_1 (bool for_cie, dw_cfa_location *loc_p)
516 {
517 dw_cfi_ref cfi;
518 dw_cfa_location loc;
519
520 cfa = *loc_p;
521 loc = *loc_p;
522
523 if (cfa_store.reg == loc.reg && loc.indirect == 0)
524 cfa_store.offset = loc.offset;
525
526 loc.reg = DWARF_FRAME_REGNUM (loc.reg);
527
528 /* If nothing changed, no need to issue any call frame instructions. */
529 if (cfa_equal_p (&loc, &old_cfa))
530 return;
531
532 cfi = new_cfi ();
533
534 if (loc.reg == old_cfa.reg && !loc.indirect && !old_cfa.indirect)
535 {
536 /* Construct a "DW_CFA_def_cfa_offset <offset>" instruction, indicating
537 the CFA register did not change but the offset did. The data
538 factoring for DW_CFA_def_cfa_offset_sf happens in output_cfi, or
539 in the assembler via the .cfi_def_cfa_offset directive. */
540 if (loc.offset < 0)
541 cfi->dw_cfi_opc = DW_CFA_def_cfa_offset_sf;
542 else
543 cfi->dw_cfi_opc = DW_CFA_def_cfa_offset;
544 cfi->dw_cfi_oprnd1.dw_cfi_offset = loc.offset;
545 }
546
547 #ifndef MIPS_DEBUGGING_INFO /* SGI dbx thinks this means no offset. */
548 else if (loc.offset == old_cfa.offset
549 && old_cfa.reg != INVALID_REGNUM
550 && !loc.indirect
551 && !old_cfa.indirect)
552 {
553 /* Construct a "DW_CFA_def_cfa_register <register>" instruction,
554 indicating the CFA register has changed to <register> but the
555 offset has not changed. */
556 cfi->dw_cfi_opc = DW_CFA_def_cfa_register;
557 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = loc.reg;
558 }
559 #endif
560
561 else if (loc.indirect == 0)
562 {
563 /* Construct a "DW_CFA_def_cfa <register> <offset>" instruction,
564 indicating the CFA register has changed to <register> with
565 the specified offset. The data factoring for DW_CFA_def_cfa_sf
566 happens in output_cfi, or in the assembler via the .cfi_def_cfa
567 directive. */
568 if (loc.offset < 0)
569 cfi->dw_cfi_opc = DW_CFA_def_cfa_sf;
570 else
571 cfi->dw_cfi_opc = DW_CFA_def_cfa;
572 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = loc.reg;
573 cfi->dw_cfi_oprnd2.dw_cfi_offset = loc.offset;
574 }
575 else
576 {
577 /* Construct a DW_CFA_def_cfa_expression instruction to
578 calculate the CFA using a full location expression since no
579 register-offset pair is available. */
580 struct dw_loc_descr_struct *loc_list;
581
582 cfi->dw_cfi_opc = DW_CFA_def_cfa_expression;
583 loc_list = build_cfa_loc (&loc, 0);
584 cfi->dw_cfi_oprnd1.dw_cfi_loc = loc_list;
585 }
586
587 if (for_cie)
588 add_cie_cfi (cfi);
589 else
590 add_fde_cfi (cfi);
591 old_cfa = loc;
592 }
593
594 /* Add the CFI for saving a register. REG is the CFA column number.
595 If SREG is -1, the register is saved at OFFSET from the CFA;
596 otherwise it is saved in SREG. */
597
598 static void
599 reg_save (bool for_cie, unsigned int reg, unsigned int sreg,
600 HOST_WIDE_INT offset)
601 {
602 dw_fde_ref fde = for_cie ? NULL : cfun->fde;
603 dw_cfi_ref cfi = new_cfi ();
604
605 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = reg;
606
607 /* When stack is aligned, store REG using DW_CFA_expression with FP. */
608 if (fde
609 && fde->stack_realign
610 && sreg == INVALID_REGNUM)
611 {
612 cfi->dw_cfi_opc = DW_CFA_expression;
613 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = reg;
614 cfi->dw_cfi_oprnd2.dw_cfi_loc
615 = build_cfa_aligned_loc (&cfa, offset, fde->stack_realignment);
616 }
617 else if (sreg == INVALID_REGNUM)
618 {
619 if (need_data_align_sf_opcode (offset))
620 cfi->dw_cfi_opc = DW_CFA_offset_extended_sf;
621 else if (reg & ~0x3f)
622 cfi->dw_cfi_opc = DW_CFA_offset_extended;
623 else
624 cfi->dw_cfi_opc = DW_CFA_offset;
625 cfi->dw_cfi_oprnd2.dw_cfi_offset = offset;
626 }
627 else if (sreg == reg)
628 cfi->dw_cfi_opc = DW_CFA_same_value;
629 else
630 {
631 cfi->dw_cfi_opc = DW_CFA_register;
632 cfi->dw_cfi_oprnd2.dw_cfi_reg_num = sreg;
633 }
634
635 if (for_cie)
636 add_cie_cfi (cfi);
637 else
638 add_fde_cfi (cfi);
639 }
640
641 /* Record the initial position of the return address. RTL is
642 INCOMING_RETURN_ADDR_RTX. */
643
644 static void
645 initial_return_save (rtx rtl)
646 {
647 unsigned int reg = INVALID_REGNUM;
648 HOST_WIDE_INT offset = 0;
649
650 switch (GET_CODE (rtl))
651 {
652 case REG:
653 /* RA is in a register. */
654 reg = DWARF_FRAME_REGNUM (REGNO (rtl));
655 break;
656
657 case MEM:
658 /* RA is on the stack. */
659 rtl = XEXP (rtl, 0);
660 switch (GET_CODE (rtl))
661 {
662 case REG:
663 gcc_assert (REGNO (rtl) == STACK_POINTER_REGNUM);
664 offset = 0;
665 break;
666
667 case PLUS:
668 gcc_assert (REGNO (XEXP (rtl, 0)) == STACK_POINTER_REGNUM);
669 offset = INTVAL (XEXP (rtl, 1));
670 break;
671
672 case MINUS:
673 gcc_assert (REGNO (XEXP (rtl, 0)) == STACK_POINTER_REGNUM);
674 offset = -INTVAL (XEXP (rtl, 1));
675 break;
676
677 default:
678 gcc_unreachable ();
679 }
680
681 break;
682
683 case PLUS:
684 /* The return address is at some offset from any value we can
685 actually load. For instance, on the SPARC it is in %i7+8. Just
686 ignore the offset for now; it doesn't matter for unwinding frames. */
687 gcc_assert (CONST_INT_P (XEXP (rtl, 1)));
688 initial_return_save (XEXP (rtl, 0));
689 return;
690
691 default:
692 gcc_unreachable ();
693 }
694
695 if (reg != DWARF_FRAME_RETURN_COLUMN)
696 reg_save (true, DWARF_FRAME_RETURN_COLUMN, reg, offset - cfa.offset);
697 }
698
699 /* Given a SET, calculate the amount of stack adjustment it
700 contains. */
701
702 static HOST_WIDE_INT
703 stack_adjust_offset (const_rtx pattern, HOST_WIDE_INT cur_args_size,
704 HOST_WIDE_INT cur_offset)
705 {
706 const_rtx src = SET_SRC (pattern);
707 const_rtx dest = SET_DEST (pattern);
708 HOST_WIDE_INT offset = 0;
709 enum rtx_code code;
710
711 if (dest == stack_pointer_rtx)
712 {
713 code = GET_CODE (src);
714
715 /* Assume (set (reg sp) (reg whatever)) sets args_size
716 level to 0. */
717 if (code == REG && src != stack_pointer_rtx)
718 {
719 offset = -cur_args_size;
720 #ifndef STACK_GROWS_DOWNWARD
721 offset = -offset;
722 #endif
723 return offset - cur_offset;
724 }
725
726 if (! (code == PLUS || code == MINUS)
727 || XEXP (src, 0) != stack_pointer_rtx
728 || !CONST_INT_P (XEXP (src, 1)))
729 return 0;
730
731 /* (set (reg sp) (plus (reg sp) (const_int))) */
732 offset = INTVAL (XEXP (src, 1));
733 if (code == PLUS)
734 offset = -offset;
735 return offset;
736 }
737
738 if (MEM_P (src) && !MEM_P (dest))
739 dest = src;
740 if (MEM_P (dest))
741 {
742 /* (set (mem (pre_dec (reg sp))) (foo)) */
743 src = XEXP (dest, 0);
744 code = GET_CODE (src);
745
746 switch (code)
747 {
748 case PRE_MODIFY:
749 case POST_MODIFY:
750 if (XEXP (src, 0) == stack_pointer_rtx)
751 {
752 rtx val = XEXP (XEXP (src, 1), 1);
753 /* We handle only adjustments by constant amount. */
754 gcc_assert (GET_CODE (XEXP (src, 1)) == PLUS
755 && CONST_INT_P (val));
756 offset = -INTVAL (val);
757 break;
758 }
759 return 0;
760
761 case PRE_DEC:
762 case POST_DEC:
763 if (XEXP (src, 0) == stack_pointer_rtx)
764 {
765 offset = GET_MODE_SIZE (GET_MODE (dest));
766 break;
767 }
768 return 0;
769
770 case PRE_INC:
771 case POST_INC:
772 if (XEXP (src, 0) == stack_pointer_rtx)
773 {
774 offset = -GET_MODE_SIZE (GET_MODE (dest));
775 break;
776 }
777 return 0;
778
779 default:
780 return 0;
781 }
782 }
783 else
784 return 0;
785
786 return offset;
787 }
788
789 /* Precomputed args_size for CODE_LABELs and BARRIERs preceeding them,
790 indexed by INSN_UID. */
791
792 static HOST_WIDE_INT *barrier_args_size;
793
794 /* Helper function for compute_barrier_args_size. Handle one insn. */
795
796 static HOST_WIDE_INT
797 compute_barrier_args_size_1 (rtx insn, HOST_WIDE_INT cur_args_size,
798 VEC (rtx, heap) **next)
799 {
800 HOST_WIDE_INT offset = 0;
801 int i;
802
803 if (! RTX_FRAME_RELATED_P (insn))
804 {
805 if (prologue_epilogue_contains (insn))
806 /* Nothing */;
807 else if (GET_CODE (PATTERN (insn)) == SET)
808 offset = stack_adjust_offset (PATTERN (insn), cur_args_size, 0);
809 else if (GET_CODE (PATTERN (insn)) == PARALLEL
810 || GET_CODE (PATTERN (insn)) == SEQUENCE)
811 {
812 /* There may be stack adjustments inside compound insns. Search
813 for them. */
814 for (i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--)
815 if (GET_CODE (XVECEXP (PATTERN (insn), 0, i)) == SET)
816 offset += stack_adjust_offset (XVECEXP (PATTERN (insn), 0, i),
817 cur_args_size, offset);
818 }
819 }
820 else
821 {
822 rtx expr = find_reg_note (insn, REG_FRAME_RELATED_EXPR, NULL_RTX);
823
824 if (expr)
825 {
826 expr = XEXP (expr, 0);
827 if (GET_CODE (expr) == PARALLEL
828 || GET_CODE (expr) == SEQUENCE)
829 for (i = 1; i < XVECLEN (expr, 0); i++)
830 {
831 rtx elem = XVECEXP (expr, 0, i);
832
833 if (GET_CODE (elem) == SET && !RTX_FRAME_RELATED_P (elem))
834 offset += stack_adjust_offset (elem, cur_args_size, offset);
835 }
836 }
837 }
838
839 #ifndef STACK_GROWS_DOWNWARD
840 offset = -offset;
841 #endif
842
843 cur_args_size += offset;
844 if (cur_args_size < 0)
845 cur_args_size = 0;
846
847 if (JUMP_P (insn))
848 {
849 rtx dest = JUMP_LABEL (insn);
850
851 if (dest)
852 {
853 if (barrier_args_size [INSN_UID (dest)] < 0)
854 {
855 barrier_args_size [INSN_UID (dest)] = cur_args_size;
856 VEC_safe_push (rtx, heap, *next, dest);
857 }
858 }
859 }
860
861 return cur_args_size;
862 }
863
864 /* Walk the whole function and compute args_size on BARRIERs. */
865
866 static void
867 compute_barrier_args_size (void)
868 {
869 int max_uid = get_max_uid (), i;
870 rtx insn;
871 VEC (rtx, heap) *worklist, *next, *tmp;
872
873 barrier_args_size = XNEWVEC (HOST_WIDE_INT, max_uid);
874 for (i = 0; i < max_uid; i++)
875 barrier_args_size[i] = -1;
876
877 worklist = VEC_alloc (rtx, heap, 20);
878 next = VEC_alloc (rtx, heap, 20);
879 insn = get_insns ();
880 barrier_args_size[INSN_UID (insn)] = 0;
881 VEC_quick_push (rtx, worklist, insn);
882 for (;;)
883 {
884 while (!VEC_empty (rtx, worklist))
885 {
886 rtx prev, body, first_insn;
887 HOST_WIDE_INT cur_args_size;
888
889 first_insn = insn = VEC_pop (rtx, worklist);
890 cur_args_size = barrier_args_size[INSN_UID (insn)];
891 prev = prev_nonnote_insn (insn);
892 if (prev && BARRIER_P (prev))
893 barrier_args_size[INSN_UID (prev)] = cur_args_size;
894
895 for (; insn; insn = NEXT_INSN (insn))
896 {
897 if (INSN_DELETED_P (insn) || NOTE_P (insn))
898 continue;
899 if (BARRIER_P (insn))
900 break;
901
902 if (LABEL_P (insn))
903 {
904 if (insn == first_insn)
905 continue;
906 else if (barrier_args_size[INSN_UID (insn)] < 0)
907 {
908 barrier_args_size[INSN_UID (insn)] = cur_args_size;
909 continue;
910 }
911 else
912 {
913 /* The insns starting with this label have been
914 already scanned or are in the worklist. */
915 break;
916 }
917 }
918
919 body = PATTERN (insn);
920 if (GET_CODE (body) == SEQUENCE)
921 {
922 HOST_WIDE_INT dest_args_size = cur_args_size;
923 for (i = 1; i < XVECLEN (body, 0); i++)
924 if (INSN_ANNULLED_BRANCH_P (XVECEXP (body, 0, 0))
925 && INSN_FROM_TARGET_P (XVECEXP (body, 0, i)))
926 dest_args_size
927 = compute_barrier_args_size_1 (XVECEXP (body, 0, i),
928 dest_args_size, &next);
929 else
930 cur_args_size
931 = compute_barrier_args_size_1 (XVECEXP (body, 0, i),
932 cur_args_size, &next);
933
934 if (INSN_ANNULLED_BRANCH_P (XVECEXP (body, 0, 0)))
935 compute_barrier_args_size_1 (XVECEXP (body, 0, 0),
936 dest_args_size, &next);
937 else
938 cur_args_size
939 = compute_barrier_args_size_1 (XVECEXP (body, 0, 0),
940 cur_args_size, &next);
941 }
942 else
943 cur_args_size
944 = compute_barrier_args_size_1 (insn, cur_args_size, &next);
945 }
946 }
947
948 if (VEC_empty (rtx, next))
949 break;
950
951 /* Swap WORKLIST with NEXT and truncate NEXT for next iteration. */
952 tmp = next;
953 next = worklist;
954 worklist = tmp;
955 VEC_truncate (rtx, next, 0);
956 }
957
958 VEC_free (rtx, heap, worklist);
959 VEC_free (rtx, heap, next);
960 }
961
962 /* Add a CFI to update the running total of the size of arguments
963 pushed onto the stack. */
964
965 static void
966 dwarf2out_args_size (HOST_WIDE_INT size)
967 {
968 dw_cfi_ref cfi;
969
970 if (size == old_args_size)
971 return;
972
973 old_args_size = size;
974
975 cfi = new_cfi ();
976 cfi->dw_cfi_opc = DW_CFA_GNU_args_size;
977 cfi->dw_cfi_oprnd1.dw_cfi_offset = size;
978 add_fde_cfi (cfi);
979 }
980
981 /* Record a stack adjustment of OFFSET bytes. */
982
983 static void
984 dwarf2out_stack_adjust (HOST_WIDE_INT offset)
985 {
986 if (cfa.reg == STACK_POINTER_REGNUM)
987 cfa.offset += offset;
988
989 if (cfa_store.reg == STACK_POINTER_REGNUM)
990 cfa_store.offset += offset;
991
992 if (ACCUMULATE_OUTGOING_ARGS)
993 return;
994
995 #ifndef STACK_GROWS_DOWNWARD
996 offset = -offset;
997 #endif
998
999 args_size += offset;
1000 if (args_size < 0)
1001 args_size = 0;
1002
1003 def_cfa_1 (false, &cfa);
1004 if (flag_asynchronous_unwind_tables)
1005 dwarf2out_args_size (args_size);
1006 }
1007
1008 /* Check INSN to see if it looks like a push or a stack adjustment, and
1009 make a note of it if it does. EH uses this information to find out
1010 how much extra space it needs to pop off the stack. */
1011
1012 static void
1013 dwarf2out_notice_stack_adjust (rtx insn, bool after_p)
1014 {
1015 HOST_WIDE_INT offset;
1016 int i;
1017
1018 /* Don't handle epilogues at all. Certainly it would be wrong to do so
1019 with this function. Proper support would require all frame-related
1020 insns to be marked, and to be able to handle saving state around
1021 epilogues textually in the middle of the function. */
1022 if (prologue_epilogue_contains (insn))
1023 return;
1024
1025 /* If INSN is an instruction from target of an annulled branch, the
1026 effects are for the target only and so current argument size
1027 shouldn't change at all. */
1028 if (final_sequence
1029 && INSN_ANNULLED_BRANCH_P (XVECEXP (final_sequence, 0, 0))
1030 && INSN_FROM_TARGET_P (insn))
1031 return;
1032
1033 /* If only calls can throw, and we have a frame pointer,
1034 save up adjustments until we see the CALL_INSN. */
1035 if (!flag_asynchronous_unwind_tables && cfa.reg != STACK_POINTER_REGNUM)
1036 {
1037 if (CALL_P (insn) && !after_p)
1038 {
1039 /* Extract the size of the args from the CALL rtx itself. */
1040 insn = PATTERN (insn);
1041 if (GET_CODE (insn) == PARALLEL)
1042 insn = XVECEXP (insn, 0, 0);
1043 if (GET_CODE (insn) == SET)
1044 insn = SET_SRC (insn);
1045 gcc_assert (GET_CODE (insn) == CALL);
1046 dwarf2out_args_size (INTVAL (XEXP (insn, 1)));
1047 }
1048 return;
1049 }
1050
1051 if (CALL_P (insn) && !after_p)
1052 {
1053 if (!flag_asynchronous_unwind_tables)
1054 dwarf2out_args_size (args_size);
1055 return;
1056 }
1057 else if (BARRIER_P (insn))
1058 {
1059 /* Don't call compute_barrier_args_size () if the only
1060 BARRIER is at the end of function. */
1061 if (barrier_args_size == NULL && next_nonnote_insn (insn))
1062 compute_barrier_args_size ();
1063 if (barrier_args_size == NULL)
1064 offset = 0;
1065 else
1066 {
1067 offset = barrier_args_size[INSN_UID (insn)];
1068 if (offset < 0)
1069 offset = 0;
1070 }
1071
1072 offset -= args_size;
1073 #ifndef STACK_GROWS_DOWNWARD
1074 offset = -offset;
1075 #endif
1076 }
1077 else if (GET_CODE (PATTERN (insn)) == SET)
1078 offset = stack_adjust_offset (PATTERN (insn), args_size, 0);
1079 else if (GET_CODE (PATTERN (insn)) == PARALLEL
1080 || GET_CODE (PATTERN (insn)) == SEQUENCE)
1081 {
1082 /* There may be stack adjustments inside compound insns. Search
1083 for them. */
1084 for (offset = 0, i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--)
1085 if (GET_CODE (XVECEXP (PATTERN (insn), 0, i)) == SET)
1086 offset += stack_adjust_offset (XVECEXP (PATTERN (insn), 0, i),
1087 args_size, offset);
1088 }
1089 else
1090 return;
1091
1092 if (offset == 0)
1093 return;
1094
1095 dwarf2out_stack_adjust (offset);
1096 }
1097
1098 /* We delay emitting a register save until either (a) we reach the end
1099 of the prologue or (b) the register is clobbered. This clusters
1100 register saves so that there are fewer pc advances. */
1101
1102 struct GTY(()) queued_reg_save {
1103 struct queued_reg_save *next;
1104 rtx reg;
1105 HOST_WIDE_INT cfa_offset;
1106 rtx saved_reg;
1107 };
1108
1109 static GTY(()) struct queued_reg_save *queued_reg_saves;
1110
1111 /* The caller's ORIG_REG is saved in SAVED_IN_REG. */
1112 typedef struct GTY(()) reg_saved_in_data {
1113 rtx orig_reg;
1114 rtx saved_in_reg;
1115 } reg_saved_in_data;
1116
1117 DEF_VEC_O (reg_saved_in_data);
1118 DEF_VEC_ALLOC_O (reg_saved_in_data, gc);
1119
1120 /* A set of registers saved in other registers. This is implemented as
1121 a flat array because it normally contains zero or 1 entry, depending
1122 on the target. IA-64 is the big spender here, using a maximum of
1123 5 entries. */
1124 static GTY(()) VEC(reg_saved_in_data, gc) *regs_saved_in_regs;
1125
1126 /* Compare X and Y for equivalence. The inputs may be REGs or PC_RTX. */
1127
1128 static bool
1129 compare_reg_or_pc (rtx x, rtx y)
1130 {
1131 if (REG_P (x) && REG_P (y))
1132 return REGNO (x) == REGNO (y);
1133 return x == y;
1134 }
1135
1136 /* Record SRC as being saved in DEST. DEST may be null to delete an
1137 existing entry. SRC may be a register or PC_RTX. */
1138
1139 static void
1140 record_reg_saved_in_reg (rtx dest, rtx src)
1141 {
1142 reg_saved_in_data *elt;
1143 size_t i;
1144
1145 FOR_EACH_VEC_ELT (reg_saved_in_data, regs_saved_in_regs, i, elt)
1146 if (compare_reg_or_pc (elt->orig_reg, src))
1147 {
1148 if (dest == NULL)
1149 VEC_unordered_remove(reg_saved_in_data, regs_saved_in_regs, i);
1150 else
1151 elt->saved_in_reg = dest;
1152 return;
1153 }
1154
1155 if (dest == NULL)
1156 return;
1157
1158 elt = VEC_safe_push(reg_saved_in_data, gc, regs_saved_in_regs, NULL);
1159 elt->orig_reg = src;
1160 elt->saved_in_reg = dest;
1161 }
1162
1163 /* Add an entry to QUEUED_REG_SAVES saying that REG is now saved at
1164 SREG, or if SREG is NULL then it is saved at OFFSET to the CFA. */
1165
1166 static void
1167 queue_reg_save (rtx reg, rtx sreg, HOST_WIDE_INT offset)
1168 {
1169 struct queued_reg_save *q;
1170
1171 /* Duplicates waste space, but it's also necessary to remove them
1172 for correctness, since the queue gets output in reverse
1173 order. */
1174 for (q = queued_reg_saves; q != NULL; q = q->next)
1175 if (REGNO (q->reg) == REGNO (reg))
1176 break;
1177
1178 if (q == NULL)
1179 {
1180 q = ggc_alloc_queued_reg_save ();
1181 q->next = queued_reg_saves;
1182 queued_reg_saves = q;
1183 }
1184
1185 q->reg = reg;
1186 q->cfa_offset = offset;
1187 q->saved_reg = sreg;
1188 }
1189
1190 /* Output all the entries in QUEUED_REG_SAVES. */
1191
1192 static void
1193 dwarf2out_flush_queued_reg_saves (void)
1194 {
1195 struct queued_reg_save *q;
1196
1197 for (q = queued_reg_saves; q; q = q->next)
1198 {
1199 unsigned int reg, sreg;
1200
1201 record_reg_saved_in_reg (q->saved_reg, q->reg);
1202
1203 reg = DWARF_FRAME_REGNUM (REGNO (q->reg));
1204 if (q->saved_reg)
1205 sreg = DWARF_FRAME_REGNUM (REGNO (q->saved_reg));
1206 else
1207 sreg = INVALID_REGNUM;
1208 reg_save (false, reg, sreg, q->cfa_offset);
1209 }
1210
1211 queued_reg_saves = NULL;
1212 }
1213
1214 /* Does INSN clobber any register which QUEUED_REG_SAVES lists a saved
1215 location for? Or, does it clobber a register which we've previously
1216 said that some other register is saved in, and for which we now
1217 have a new location for? */
1218
1219 static bool
1220 clobbers_queued_reg_save (const_rtx insn)
1221 {
1222 struct queued_reg_save *q;
1223
1224 for (q = queued_reg_saves; q; q = q->next)
1225 {
1226 size_t i;
1227 reg_saved_in_data *rir;
1228
1229 if (modified_in_p (q->reg, insn))
1230 return true;
1231
1232 FOR_EACH_VEC_ELT (reg_saved_in_data, regs_saved_in_regs, i, rir)
1233 if (compare_reg_or_pc (q->reg, rir->orig_reg)
1234 && modified_in_p (rir->saved_in_reg, insn))
1235 return true;
1236 }
1237
1238 return false;
1239 }
1240
1241 /* What register, if any, is currently saved in REG? */
1242
1243 static rtx
1244 reg_saved_in (rtx reg)
1245 {
1246 unsigned int regn = REGNO (reg);
1247 struct queued_reg_save *q;
1248 reg_saved_in_data *rir;
1249 size_t i;
1250
1251 for (q = queued_reg_saves; q; q = q->next)
1252 if (q->saved_reg && regn == REGNO (q->saved_reg))
1253 return q->reg;
1254
1255 FOR_EACH_VEC_ELT (reg_saved_in_data, regs_saved_in_regs, i, rir)
1256 if (regn == REGNO (rir->saved_in_reg))
1257 return rir->orig_reg;
1258
1259 return NULL_RTX;
1260 }
1261
1262
1263 /* A temporary register holding an integral value used in adjusting SP
1264 or setting up the store_reg. The "offset" field holds the integer
1265 value, not an offset. */
1266 static dw_cfa_location cfa_temp;
1267
1268 /* A subroutine of dwarf2out_frame_debug, process a REG_DEF_CFA note. */
1269
1270 static void
1271 dwarf2out_frame_debug_def_cfa (rtx pat)
1272 {
1273 memset (&cfa, 0, sizeof (cfa));
1274
1275 switch (GET_CODE (pat))
1276 {
1277 case PLUS:
1278 cfa.reg = REGNO (XEXP (pat, 0));
1279 cfa.offset = INTVAL (XEXP (pat, 1));
1280 break;
1281
1282 case REG:
1283 cfa.reg = REGNO (pat);
1284 break;
1285
1286 case MEM:
1287 cfa.indirect = 1;
1288 pat = XEXP (pat, 0);
1289 if (GET_CODE (pat) == PLUS)
1290 {
1291 cfa.base_offset = INTVAL (XEXP (pat, 1));
1292 pat = XEXP (pat, 0);
1293 }
1294 cfa.reg = REGNO (pat);
1295 break;
1296
1297 default:
1298 /* Recurse and define an expression. */
1299 gcc_unreachable ();
1300 }
1301
1302 def_cfa_1 (false, &cfa);
1303 }
1304
1305 /* A subroutine of dwarf2out_frame_debug, process a REG_ADJUST_CFA note. */
1306
1307 static void
1308 dwarf2out_frame_debug_adjust_cfa (rtx pat)
1309 {
1310 rtx src, dest;
1311
1312 gcc_assert (GET_CODE (pat) == SET);
1313 dest = XEXP (pat, 0);
1314 src = XEXP (pat, 1);
1315
1316 switch (GET_CODE (src))
1317 {
1318 case PLUS:
1319 gcc_assert (REGNO (XEXP (src, 0)) == cfa.reg);
1320 cfa.offset -= INTVAL (XEXP (src, 1));
1321 break;
1322
1323 case REG:
1324 break;
1325
1326 default:
1327 gcc_unreachable ();
1328 }
1329
1330 cfa.reg = REGNO (dest);
1331 gcc_assert (cfa.indirect == 0);
1332
1333 def_cfa_1 (false, &cfa);
1334 }
1335
1336 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_OFFSET note. */
1337
1338 static void
1339 dwarf2out_frame_debug_cfa_offset (rtx set)
1340 {
1341 HOST_WIDE_INT offset;
1342 rtx src, addr, span;
1343 unsigned int sregno;
1344
1345 src = XEXP (set, 1);
1346 addr = XEXP (set, 0);
1347 gcc_assert (MEM_P (addr));
1348 addr = XEXP (addr, 0);
1349
1350 /* As documented, only consider extremely simple addresses. */
1351 switch (GET_CODE (addr))
1352 {
1353 case REG:
1354 gcc_assert (REGNO (addr) == cfa.reg);
1355 offset = -cfa.offset;
1356 break;
1357 case PLUS:
1358 gcc_assert (REGNO (XEXP (addr, 0)) == cfa.reg);
1359 offset = INTVAL (XEXP (addr, 1)) - cfa.offset;
1360 break;
1361 default:
1362 gcc_unreachable ();
1363 }
1364
1365 if (src == pc_rtx)
1366 {
1367 span = NULL;
1368 sregno = DWARF_FRAME_RETURN_COLUMN;
1369 }
1370 else
1371 {
1372 span = targetm.dwarf_register_span (src);
1373 sregno = DWARF_FRAME_REGNUM (REGNO (src));
1374 }
1375
1376 /* ??? We'd like to use queue_reg_save, but we need to come up with
1377 a different flushing heuristic for epilogues. */
1378 if (!span)
1379 reg_save (false, sregno, INVALID_REGNUM, offset);
1380 else
1381 {
1382 /* We have a PARALLEL describing where the contents of SRC live.
1383 Queue register saves for each piece of the PARALLEL. */
1384 int par_index;
1385 int limit;
1386 HOST_WIDE_INT span_offset = offset;
1387
1388 gcc_assert (GET_CODE (span) == PARALLEL);
1389
1390 limit = XVECLEN (span, 0);
1391 for (par_index = 0; par_index < limit; par_index++)
1392 {
1393 rtx elem = XVECEXP (span, 0, par_index);
1394
1395 sregno = DWARF_FRAME_REGNUM (REGNO (src));
1396 reg_save (false, sregno, INVALID_REGNUM, span_offset);
1397 span_offset += GET_MODE_SIZE (GET_MODE (elem));
1398 }
1399 }
1400 }
1401
1402 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_REGISTER note. */
1403
1404 static void
1405 dwarf2out_frame_debug_cfa_register (rtx set)
1406 {
1407 rtx src, dest;
1408 unsigned sregno, dregno;
1409
1410 src = XEXP (set, 1);
1411 dest = XEXP (set, 0);
1412
1413 if (src == pc_rtx)
1414 sregno = DWARF_FRAME_RETURN_COLUMN;
1415 else
1416 {
1417 record_reg_saved_in_reg (dest, src);
1418 sregno = DWARF_FRAME_REGNUM (REGNO (src));
1419 }
1420
1421 dregno = DWARF_FRAME_REGNUM (REGNO (dest));
1422
1423 /* ??? We'd like to use queue_reg_save, but we need to come up with
1424 a different flushing heuristic for epilogues. */
1425 reg_save (false, sregno, dregno, 0);
1426 }
1427
1428 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_EXPRESSION note. */
1429
1430 static void
1431 dwarf2out_frame_debug_cfa_expression (rtx set)
1432 {
1433 rtx src, dest, span;
1434 dw_cfi_ref cfi = new_cfi ();
1435
1436 dest = SET_DEST (set);
1437 src = SET_SRC (set);
1438
1439 gcc_assert (REG_P (src));
1440 gcc_assert (MEM_P (dest));
1441
1442 span = targetm.dwarf_register_span (src);
1443 gcc_assert (!span);
1444
1445 cfi->dw_cfi_opc = DW_CFA_expression;
1446 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = DWARF_FRAME_REGNUM (REGNO (src));
1447 cfi->dw_cfi_oprnd2.dw_cfi_loc
1448 = mem_loc_descriptor (XEXP (dest, 0), get_address_mode (dest),
1449 GET_MODE (dest), VAR_INIT_STATUS_INITIALIZED);
1450
1451 /* ??? We'd like to use queue_reg_save, were the interface different,
1452 and, as above, we could manage flushing for epilogues. */
1453 add_fde_cfi (cfi);
1454 }
1455
1456 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_RESTORE note. */
1457
1458 static void
1459 dwarf2out_frame_debug_cfa_restore (rtx reg)
1460 {
1461 dw_cfi_ref cfi = new_cfi ();
1462 unsigned int regno = DWARF_FRAME_REGNUM (REGNO (reg));
1463
1464 cfi->dw_cfi_opc = (regno & ~0x3f ? DW_CFA_restore_extended : DW_CFA_restore);
1465 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = regno;
1466
1467 add_fde_cfi (cfi);
1468 }
1469
1470 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_WINDOW_SAVE.
1471 ??? Perhaps we should note in the CIE where windows are saved (instead of
1472 assuming 0(cfa)) and what registers are in the window. */
1473
1474 static void
1475 dwarf2out_frame_debug_cfa_window_save (void)
1476 {
1477 dw_cfi_ref cfi = new_cfi ();
1478
1479 cfi->dw_cfi_opc = DW_CFA_GNU_window_save;
1480 add_fde_cfi (cfi);
1481 }
1482
1483 /* Record call frame debugging information for an expression EXPR,
1484 which either sets SP or FP (adjusting how we calculate the frame
1485 address) or saves a register to the stack or another register.
1486 LABEL indicates the address of EXPR.
1487
1488 This function encodes a state machine mapping rtxes to actions on
1489 cfa, cfa_store, and cfa_temp.reg. We describe these rules so
1490 users need not read the source code.
1491
1492 The High-Level Picture
1493
1494 Changes in the register we use to calculate the CFA: Currently we
1495 assume that if you copy the CFA register into another register, we
1496 should take the other one as the new CFA register; this seems to
1497 work pretty well. If it's wrong for some target, it's simple
1498 enough not to set RTX_FRAME_RELATED_P on the insn in question.
1499
1500 Changes in the register we use for saving registers to the stack:
1501 This is usually SP, but not always. Again, we deduce that if you
1502 copy SP into another register (and SP is not the CFA register),
1503 then the new register is the one we will be using for register
1504 saves. This also seems to work.
1505
1506 Register saves: There's not much guesswork about this one; if
1507 RTX_FRAME_RELATED_P is set on an insn which modifies memory, it's a
1508 register save, and the register used to calculate the destination
1509 had better be the one we think we're using for this purpose.
1510 It's also assumed that a copy from a call-saved register to another
1511 register is saving that register if RTX_FRAME_RELATED_P is set on
1512 that instruction. If the copy is from a call-saved register to
1513 the *same* register, that means that the register is now the same
1514 value as in the caller.
1515
1516 Except: If the register being saved is the CFA register, and the
1517 offset is nonzero, we are saving the CFA, so we assume we have to
1518 use DW_CFA_def_cfa_expression. If the offset is 0, we assume that
1519 the intent is to save the value of SP from the previous frame.
1520
1521 In addition, if a register has previously been saved to a different
1522 register,
1523
1524 Invariants / Summaries of Rules
1525
1526 cfa current rule for calculating the CFA. It usually
1527 consists of a register and an offset.
1528 cfa_store register used by prologue code to save things to the stack
1529 cfa_store.offset is the offset from the value of
1530 cfa_store.reg to the actual CFA
1531 cfa_temp register holding an integral value. cfa_temp.offset
1532 stores the value, which will be used to adjust the
1533 stack pointer. cfa_temp is also used like cfa_store,
1534 to track stores to the stack via fp or a temp reg.
1535
1536 Rules 1- 4: Setting a register's value to cfa.reg or an expression
1537 with cfa.reg as the first operand changes the cfa.reg and its
1538 cfa.offset. Rule 1 and 4 also set cfa_temp.reg and
1539 cfa_temp.offset.
1540
1541 Rules 6- 9: Set a non-cfa.reg register value to a constant or an
1542 expression yielding a constant. This sets cfa_temp.reg
1543 and cfa_temp.offset.
1544
1545 Rule 5: Create a new register cfa_store used to save items to the
1546 stack.
1547
1548 Rules 10-14: Save a register to the stack. Define offset as the
1549 difference of the original location and cfa_store's
1550 location (or cfa_temp's location if cfa_temp is used).
1551
1552 Rules 16-20: If AND operation happens on sp in prologue, we assume
1553 stack is realigned. We will use a group of DW_OP_XXX
1554 expressions to represent the location of the stored
1555 register instead of CFA+offset.
1556
1557 The Rules
1558
1559 "{a,b}" indicates a choice of a xor b.
1560 "<reg>:cfa.reg" indicates that <reg> must equal cfa.reg.
1561
1562 Rule 1:
1563 (set <reg1> <reg2>:cfa.reg)
1564 effects: cfa.reg = <reg1>
1565 cfa.offset unchanged
1566 cfa_temp.reg = <reg1>
1567 cfa_temp.offset = cfa.offset
1568
1569 Rule 2:
1570 (set sp ({minus,plus,losum} {sp,fp}:cfa.reg
1571 {<const_int>,<reg>:cfa_temp.reg}))
1572 effects: cfa.reg = sp if fp used
1573 cfa.offset += {+/- <const_int>, cfa_temp.offset} if cfa.reg==sp
1574 cfa_store.offset += {+/- <const_int>, cfa_temp.offset}
1575 if cfa_store.reg==sp
1576
1577 Rule 3:
1578 (set fp ({minus,plus,losum} <reg>:cfa.reg <const_int>))
1579 effects: cfa.reg = fp
1580 cfa_offset += +/- <const_int>
1581
1582 Rule 4:
1583 (set <reg1> ({plus,losum} <reg2>:cfa.reg <const_int>))
1584 constraints: <reg1> != fp
1585 <reg1> != sp
1586 effects: cfa.reg = <reg1>
1587 cfa_temp.reg = <reg1>
1588 cfa_temp.offset = cfa.offset
1589
1590 Rule 5:
1591 (set <reg1> (plus <reg2>:cfa_temp.reg sp:cfa.reg))
1592 constraints: <reg1> != fp
1593 <reg1> != sp
1594 effects: cfa_store.reg = <reg1>
1595 cfa_store.offset = cfa.offset - cfa_temp.offset
1596
1597 Rule 6:
1598 (set <reg> <const_int>)
1599 effects: cfa_temp.reg = <reg>
1600 cfa_temp.offset = <const_int>
1601
1602 Rule 7:
1603 (set <reg1>:cfa_temp.reg (ior <reg2>:cfa_temp.reg <const_int>))
1604 effects: cfa_temp.reg = <reg1>
1605 cfa_temp.offset |= <const_int>
1606
1607 Rule 8:
1608 (set <reg> (high <exp>))
1609 effects: none
1610
1611 Rule 9:
1612 (set <reg> (lo_sum <exp> <const_int>))
1613 effects: cfa_temp.reg = <reg>
1614 cfa_temp.offset = <const_int>
1615
1616 Rule 10:
1617 (set (mem ({pre,post}_modify sp:cfa_store (???? <reg1> <const_int>))) <reg2>)
1618 effects: cfa_store.offset -= <const_int>
1619 cfa.offset = cfa_store.offset if cfa.reg == sp
1620 cfa.reg = sp
1621 cfa.base_offset = -cfa_store.offset
1622
1623 Rule 11:
1624 (set (mem ({pre_inc,pre_dec,post_dec} sp:cfa_store.reg)) <reg>)
1625 effects: cfa_store.offset += -/+ mode_size(mem)
1626 cfa.offset = cfa_store.offset if cfa.reg == sp
1627 cfa.reg = sp
1628 cfa.base_offset = -cfa_store.offset
1629
1630 Rule 12:
1631 (set (mem ({minus,plus,losum} <reg1>:{cfa_store,cfa_temp} <const_int>))
1632
1633 <reg2>)
1634 effects: cfa.reg = <reg1>
1635 cfa.base_offset = -/+ <const_int> - {cfa_store,cfa_temp}.offset
1636
1637 Rule 13:
1638 (set (mem <reg1>:{cfa_store,cfa_temp}) <reg2>)
1639 effects: cfa.reg = <reg1>
1640 cfa.base_offset = -{cfa_store,cfa_temp}.offset
1641
1642 Rule 14:
1643 (set (mem (post_inc <reg1>:cfa_temp <const_int>)) <reg2>)
1644 effects: cfa.reg = <reg1>
1645 cfa.base_offset = -cfa_temp.offset
1646 cfa_temp.offset -= mode_size(mem)
1647
1648 Rule 15:
1649 (set <reg> {unspec, unspec_volatile})
1650 effects: target-dependent
1651
1652 Rule 16:
1653 (set sp (and: sp <const_int>))
1654 constraints: cfa_store.reg == sp
1655 effects: cfun->fde.stack_realign = 1
1656 cfa_store.offset = 0
1657 fde->drap_reg = cfa.reg if cfa.reg != sp and cfa.reg != fp
1658
1659 Rule 17:
1660 (set (mem ({pre_inc, pre_dec} sp)) (mem (plus (cfa.reg) (const_int))))
1661 effects: cfa_store.offset += -/+ mode_size(mem)
1662
1663 Rule 18:
1664 (set (mem ({pre_inc, pre_dec} sp)) fp)
1665 constraints: fde->stack_realign == 1
1666 effects: cfa_store.offset = 0
1667 cfa.reg != HARD_FRAME_POINTER_REGNUM
1668
1669 Rule 19:
1670 (set (mem ({pre_inc, pre_dec} sp)) cfa.reg)
1671 constraints: fde->stack_realign == 1
1672 && cfa.offset == 0
1673 && cfa.indirect == 0
1674 && cfa.reg != HARD_FRAME_POINTER_REGNUM
1675 effects: Use DW_CFA_def_cfa_expression to define cfa
1676 cfa.reg == fde->drap_reg */
1677
1678 static void
1679 dwarf2out_frame_debug_expr (rtx expr)
1680 {
1681 rtx src, dest, span;
1682 HOST_WIDE_INT offset;
1683 dw_fde_ref fde;
1684
1685 /* If RTX_FRAME_RELATED_P is set on a PARALLEL, process each member of
1686 the PARALLEL independently. The first element is always processed if
1687 it is a SET. This is for backward compatibility. Other elements
1688 are processed only if they are SETs and the RTX_FRAME_RELATED_P
1689 flag is set in them. */
1690 if (GET_CODE (expr) == PARALLEL || GET_CODE (expr) == SEQUENCE)
1691 {
1692 int par_index;
1693 int limit = XVECLEN (expr, 0);
1694 rtx elem;
1695
1696 /* PARALLELs have strict read-modify-write semantics, so we
1697 ought to evaluate every rvalue before changing any lvalue.
1698 It's cumbersome to do that in general, but there's an
1699 easy approximation that is enough for all current users:
1700 handle register saves before register assignments. */
1701 if (GET_CODE (expr) == PARALLEL)
1702 for (par_index = 0; par_index < limit; par_index++)
1703 {
1704 elem = XVECEXP (expr, 0, par_index);
1705 if (GET_CODE (elem) == SET
1706 && MEM_P (SET_DEST (elem))
1707 && (RTX_FRAME_RELATED_P (elem) || par_index == 0))
1708 dwarf2out_frame_debug_expr (elem);
1709 }
1710
1711 for (par_index = 0; par_index < limit; par_index++)
1712 {
1713 elem = XVECEXP (expr, 0, par_index);
1714 if (GET_CODE (elem) == SET
1715 && (!MEM_P (SET_DEST (elem)) || GET_CODE (expr) == SEQUENCE)
1716 && (RTX_FRAME_RELATED_P (elem) || par_index == 0))
1717 dwarf2out_frame_debug_expr (elem);
1718 else if (GET_CODE (elem) == SET
1719 && par_index != 0
1720 && !RTX_FRAME_RELATED_P (elem))
1721 {
1722 /* Stack adjustment combining might combine some post-prologue
1723 stack adjustment into a prologue stack adjustment. */
1724 HOST_WIDE_INT offset = stack_adjust_offset (elem, args_size, 0);
1725
1726 if (offset != 0)
1727 dwarf2out_stack_adjust (offset);
1728 }
1729 }
1730 return;
1731 }
1732
1733 gcc_assert (GET_CODE (expr) == SET);
1734
1735 src = SET_SRC (expr);
1736 dest = SET_DEST (expr);
1737
1738 if (REG_P (src))
1739 {
1740 rtx rsi = reg_saved_in (src);
1741 if (rsi)
1742 src = rsi;
1743 }
1744
1745 fde = cfun->fde;
1746
1747 switch (GET_CODE (dest))
1748 {
1749 case REG:
1750 switch (GET_CODE (src))
1751 {
1752 /* Setting FP from SP. */
1753 case REG:
1754 if (cfa.reg == (unsigned) REGNO (src))
1755 {
1756 /* Rule 1 */
1757 /* Update the CFA rule wrt SP or FP. Make sure src is
1758 relative to the current CFA register.
1759
1760 We used to require that dest be either SP or FP, but the
1761 ARM copies SP to a temporary register, and from there to
1762 FP. So we just rely on the backends to only set
1763 RTX_FRAME_RELATED_P on appropriate insns. */
1764 cfa.reg = REGNO (dest);
1765 cfa_temp.reg = cfa.reg;
1766 cfa_temp.offset = cfa.offset;
1767 }
1768 else
1769 {
1770 /* Saving a register in a register. */
1771 gcc_assert (!fixed_regs [REGNO (dest)]
1772 /* For the SPARC and its register window. */
1773 || (DWARF_FRAME_REGNUM (REGNO (src))
1774 == DWARF_FRAME_RETURN_COLUMN));
1775
1776 /* After stack is aligned, we can only save SP in FP
1777 if drap register is used. In this case, we have
1778 to restore stack pointer with the CFA value and we
1779 don't generate this DWARF information. */
1780 if (fde
1781 && fde->stack_realign
1782 && REGNO (src) == STACK_POINTER_REGNUM)
1783 gcc_assert (REGNO (dest) == HARD_FRAME_POINTER_REGNUM
1784 && fde->drap_reg != INVALID_REGNUM
1785 && cfa.reg != REGNO (src));
1786 else
1787 queue_reg_save (src, dest, 0);
1788 }
1789 break;
1790
1791 case PLUS:
1792 case MINUS:
1793 case LO_SUM:
1794 if (dest == stack_pointer_rtx)
1795 {
1796 /* Rule 2 */
1797 /* Adjusting SP. */
1798 switch (GET_CODE (XEXP (src, 1)))
1799 {
1800 case CONST_INT:
1801 offset = INTVAL (XEXP (src, 1));
1802 break;
1803 case REG:
1804 gcc_assert ((unsigned) REGNO (XEXP (src, 1))
1805 == cfa_temp.reg);
1806 offset = cfa_temp.offset;
1807 break;
1808 default:
1809 gcc_unreachable ();
1810 }
1811
1812 if (XEXP (src, 0) == hard_frame_pointer_rtx)
1813 {
1814 /* Restoring SP from FP in the epilogue. */
1815 gcc_assert (cfa.reg == (unsigned) HARD_FRAME_POINTER_REGNUM);
1816 cfa.reg = STACK_POINTER_REGNUM;
1817 }
1818 else if (GET_CODE (src) == LO_SUM)
1819 /* Assume we've set the source reg of the LO_SUM from sp. */
1820 ;
1821 else
1822 gcc_assert (XEXP (src, 0) == stack_pointer_rtx);
1823
1824 if (GET_CODE (src) != MINUS)
1825 offset = -offset;
1826 if (cfa.reg == STACK_POINTER_REGNUM)
1827 cfa.offset += offset;
1828 if (cfa_store.reg == STACK_POINTER_REGNUM)
1829 cfa_store.offset += offset;
1830 }
1831 else if (dest == hard_frame_pointer_rtx)
1832 {
1833 /* Rule 3 */
1834 /* Either setting the FP from an offset of the SP,
1835 or adjusting the FP */
1836 gcc_assert (frame_pointer_needed);
1837
1838 gcc_assert (REG_P (XEXP (src, 0))
1839 && (unsigned) REGNO (XEXP (src, 0)) == cfa.reg
1840 && CONST_INT_P (XEXP (src, 1)));
1841 offset = INTVAL (XEXP (src, 1));
1842 if (GET_CODE (src) != MINUS)
1843 offset = -offset;
1844 cfa.offset += offset;
1845 cfa.reg = HARD_FRAME_POINTER_REGNUM;
1846 }
1847 else
1848 {
1849 gcc_assert (GET_CODE (src) != MINUS);
1850
1851 /* Rule 4 */
1852 if (REG_P (XEXP (src, 0))
1853 && REGNO (XEXP (src, 0)) == cfa.reg
1854 && CONST_INT_P (XEXP (src, 1)))
1855 {
1856 /* Setting a temporary CFA register that will be copied
1857 into the FP later on. */
1858 offset = - INTVAL (XEXP (src, 1));
1859 cfa.offset += offset;
1860 cfa.reg = REGNO (dest);
1861 /* Or used to save regs to the stack. */
1862 cfa_temp.reg = cfa.reg;
1863 cfa_temp.offset = cfa.offset;
1864 }
1865
1866 /* Rule 5 */
1867 else if (REG_P (XEXP (src, 0))
1868 && REGNO (XEXP (src, 0)) == cfa_temp.reg
1869 && XEXP (src, 1) == stack_pointer_rtx)
1870 {
1871 /* Setting a scratch register that we will use instead
1872 of SP for saving registers to the stack. */
1873 gcc_assert (cfa.reg == STACK_POINTER_REGNUM);
1874 cfa_store.reg = REGNO (dest);
1875 cfa_store.offset = cfa.offset - cfa_temp.offset;
1876 }
1877
1878 /* Rule 9 */
1879 else if (GET_CODE (src) == LO_SUM
1880 && CONST_INT_P (XEXP (src, 1)))
1881 {
1882 cfa_temp.reg = REGNO (dest);
1883 cfa_temp.offset = INTVAL (XEXP (src, 1));
1884 }
1885 else
1886 gcc_unreachable ();
1887 }
1888 break;
1889
1890 /* Rule 6 */
1891 case CONST_INT:
1892 cfa_temp.reg = REGNO (dest);
1893 cfa_temp.offset = INTVAL (src);
1894 break;
1895
1896 /* Rule 7 */
1897 case IOR:
1898 gcc_assert (REG_P (XEXP (src, 0))
1899 && (unsigned) REGNO (XEXP (src, 0)) == cfa_temp.reg
1900 && CONST_INT_P (XEXP (src, 1)));
1901
1902 if ((unsigned) REGNO (dest) != cfa_temp.reg)
1903 cfa_temp.reg = REGNO (dest);
1904 cfa_temp.offset |= INTVAL (XEXP (src, 1));
1905 break;
1906
1907 /* Skip over HIGH, assuming it will be followed by a LO_SUM,
1908 which will fill in all of the bits. */
1909 /* Rule 8 */
1910 case HIGH:
1911 break;
1912
1913 /* Rule 15 */
1914 case UNSPEC:
1915 case UNSPEC_VOLATILE:
1916 /* All unspecs should be represented by REG_CFA_* notes. */
1917 gcc_unreachable ();
1918 return;
1919
1920 /* Rule 16 */
1921 case AND:
1922 /* If this AND operation happens on stack pointer in prologue,
1923 we assume the stack is realigned and we extract the
1924 alignment. */
1925 if (fde && XEXP (src, 0) == stack_pointer_rtx)
1926 {
1927 /* We interpret reg_save differently with stack_realign set.
1928 Thus we must flush whatever we have queued first. */
1929 dwarf2out_flush_queued_reg_saves ();
1930
1931 gcc_assert (cfa_store.reg == REGNO (XEXP (src, 0)));
1932 fde->stack_realign = 1;
1933 fde->stack_realignment = INTVAL (XEXP (src, 1));
1934 cfa_store.offset = 0;
1935
1936 if (cfa.reg != STACK_POINTER_REGNUM
1937 && cfa.reg != HARD_FRAME_POINTER_REGNUM)
1938 fde->drap_reg = cfa.reg;
1939 }
1940 return;
1941
1942 default:
1943 gcc_unreachable ();
1944 }
1945
1946 def_cfa_1 (false, &cfa);
1947 break;
1948
1949 case MEM:
1950
1951 /* Saving a register to the stack. Make sure dest is relative to the
1952 CFA register. */
1953 switch (GET_CODE (XEXP (dest, 0)))
1954 {
1955 /* Rule 10 */
1956 /* With a push. */
1957 case PRE_MODIFY:
1958 case POST_MODIFY:
1959 /* We can't handle variable size modifications. */
1960 gcc_assert (GET_CODE (XEXP (XEXP (XEXP (dest, 0), 1), 1))
1961 == CONST_INT);
1962 offset = -INTVAL (XEXP (XEXP (XEXP (dest, 0), 1), 1));
1963
1964 gcc_assert (REGNO (XEXP (XEXP (dest, 0), 0)) == STACK_POINTER_REGNUM
1965 && cfa_store.reg == STACK_POINTER_REGNUM);
1966
1967 cfa_store.offset += offset;
1968 if (cfa.reg == STACK_POINTER_REGNUM)
1969 cfa.offset = cfa_store.offset;
1970
1971 if (GET_CODE (XEXP (dest, 0)) == POST_MODIFY)
1972 offset -= cfa_store.offset;
1973 else
1974 offset = -cfa_store.offset;
1975 break;
1976
1977 /* Rule 11 */
1978 case PRE_INC:
1979 case PRE_DEC:
1980 case POST_DEC:
1981 offset = GET_MODE_SIZE (GET_MODE (dest));
1982 if (GET_CODE (XEXP (dest, 0)) == PRE_INC)
1983 offset = -offset;
1984
1985 gcc_assert ((REGNO (XEXP (XEXP (dest, 0), 0))
1986 == STACK_POINTER_REGNUM)
1987 && cfa_store.reg == STACK_POINTER_REGNUM);
1988
1989 cfa_store.offset += offset;
1990
1991 /* Rule 18: If stack is aligned, we will use FP as a
1992 reference to represent the address of the stored
1993 regiser. */
1994 if (fde
1995 && fde->stack_realign
1996 && src == hard_frame_pointer_rtx)
1997 {
1998 gcc_assert (cfa.reg != HARD_FRAME_POINTER_REGNUM);
1999 cfa_store.offset = 0;
2000 }
2001
2002 if (cfa.reg == STACK_POINTER_REGNUM)
2003 cfa.offset = cfa_store.offset;
2004
2005 if (GET_CODE (XEXP (dest, 0)) == POST_DEC)
2006 offset += -cfa_store.offset;
2007 else
2008 offset = -cfa_store.offset;
2009 break;
2010
2011 /* Rule 12 */
2012 /* With an offset. */
2013 case PLUS:
2014 case MINUS:
2015 case LO_SUM:
2016 {
2017 int regno;
2018
2019 gcc_assert (CONST_INT_P (XEXP (XEXP (dest, 0), 1))
2020 && REG_P (XEXP (XEXP (dest, 0), 0)));
2021 offset = INTVAL (XEXP (XEXP (dest, 0), 1));
2022 if (GET_CODE (XEXP (dest, 0)) == MINUS)
2023 offset = -offset;
2024
2025 regno = REGNO (XEXP (XEXP (dest, 0), 0));
2026
2027 if (cfa.reg == (unsigned) regno)
2028 offset -= cfa.offset;
2029 else if (cfa_store.reg == (unsigned) regno)
2030 offset -= cfa_store.offset;
2031 else
2032 {
2033 gcc_assert (cfa_temp.reg == (unsigned) regno);
2034 offset -= cfa_temp.offset;
2035 }
2036 }
2037 break;
2038
2039 /* Rule 13 */
2040 /* Without an offset. */
2041 case REG:
2042 {
2043 int regno = REGNO (XEXP (dest, 0));
2044
2045 if (cfa.reg == (unsigned) regno)
2046 offset = -cfa.offset;
2047 else if (cfa_store.reg == (unsigned) regno)
2048 offset = -cfa_store.offset;
2049 else
2050 {
2051 gcc_assert (cfa_temp.reg == (unsigned) regno);
2052 offset = -cfa_temp.offset;
2053 }
2054 }
2055 break;
2056
2057 /* Rule 14 */
2058 case POST_INC:
2059 gcc_assert (cfa_temp.reg
2060 == (unsigned) REGNO (XEXP (XEXP (dest, 0), 0)));
2061 offset = -cfa_temp.offset;
2062 cfa_temp.offset -= GET_MODE_SIZE (GET_MODE (dest));
2063 break;
2064
2065 default:
2066 gcc_unreachable ();
2067 }
2068
2069 /* Rule 17 */
2070 /* If the source operand of this MEM operation is not a
2071 register, basically the source is return address. Here
2072 we only care how much stack grew and we don't save it. */
2073 if (!REG_P (src))
2074 break;
2075
2076 if (REGNO (src) != STACK_POINTER_REGNUM
2077 && REGNO (src) != HARD_FRAME_POINTER_REGNUM
2078 && (unsigned) REGNO (src) == cfa.reg)
2079 {
2080 /* We're storing the current CFA reg into the stack. */
2081
2082 if (cfa.offset == 0)
2083 {
2084 /* Rule 19 */
2085 /* If stack is aligned, putting CFA reg into stack means
2086 we can no longer use reg + offset to represent CFA.
2087 Here we use DW_CFA_def_cfa_expression instead. The
2088 result of this expression equals to the original CFA
2089 value. */
2090 if (fde
2091 && fde->stack_realign
2092 && cfa.indirect == 0
2093 && cfa.reg != HARD_FRAME_POINTER_REGNUM)
2094 {
2095 dw_cfa_location cfa_exp;
2096
2097 gcc_assert (fde->drap_reg == cfa.reg);
2098
2099 cfa_exp.indirect = 1;
2100 cfa_exp.reg = HARD_FRAME_POINTER_REGNUM;
2101 cfa_exp.base_offset = offset;
2102 cfa_exp.offset = 0;
2103
2104 fde->drap_reg_saved = 1;
2105
2106 def_cfa_1 (false, &cfa_exp);
2107 break;
2108 }
2109
2110 /* If the source register is exactly the CFA, assume
2111 we're saving SP like any other register; this happens
2112 on the ARM. */
2113 def_cfa_1 (false, &cfa);
2114 queue_reg_save (stack_pointer_rtx, NULL_RTX, offset);
2115 break;
2116 }
2117 else
2118 {
2119 /* Otherwise, we'll need to look in the stack to
2120 calculate the CFA. */
2121 rtx x = XEXP (dest, 0);
2122
2123 if (!REG_P (x))
2124 x = XEXP (x, 0);
2125 gcc_assert (REG_P (x));
2126
2127 cfa.reg = REGNO (x);
2128 cfa.base_offset = offset;
2129 cfa.indirect = 1;
2130 def_cfa_1 (false, &cfa);
2131 break;
2132 }
2133 }
2134
2135 def_cfa_1 (false, &cfa);
2136 {
2137 span = targetm.dwarf_register_span (src);
2138
2139 if (!span)
2140 queue_reg_save (src, NULL_RTX, offset);
2141 else
2142 {
2143 /* We have a PARALLEL describing where the contents of SRC
2144 live. Queue register saves for each piece of the
2145 PARALLEL. */
2146 int par_index;
2147 int limit;
2148 HOST_WIDE_INT span_offset = offset;
2149
2150 gcc_assert (GET_CODE (span) == PARALLEL);
2151
2152 limit = XVECLEN (span, 0);
2153 for (par_index = 0; par_index < limit; par_index++)
2154 {
2155 rtx elem = XVECEXP (span, 0, par_index);
2156
2157 queue_reg_save (elem, NULL_RTX, span_offset);
2158 span_offset += GET_MODE_SIZE (GET_MODE (elem));
2159 }
2160 }
2161 }
2162 break;
2163
2164 default:
2165 gcc_unreachable ();
2166 }
2167 }
2168
2169 /* Record call frame debugging information for INSN, which either
2170 sets SP or FP (adjusting how we calculate the frame address) or saves a
2171 register to the stack. If INSN is NULL_RTX, initialize our state.
2172
2173 If AFTER_P is false, we're being called before the insn is emitted,
2174 otherwise after. Call instructions get invoked twice. */
2175
2176 static void
2177 dwarf2out_frame_debug (rtx insn, bool after_p)
2178 {
2179 rtx note, n;
2180 bool handled_one = false;
2181 bool need_flush = false;
2182
2183 /* Remember where we are to insert notes. Do not separate tablejump
2184 insns from their ADDR_DIFF_VEC. Putting the note after the VEC
2185 should be ok. */
2186 if (after_p)
2187 {
2188 if (!tablejump_p (insn, NULL, &cfi_insn))
2189 cfi_insn = insn;
2190 }
2191 else
2192 cfi_insn = PREV_INSN (insn);
2193
2194 if (!NONJUMP_INSN_P (insn) || clobbers_queued_reg_save (insn))
2195 dwarf2out_flush_queued_reg_saves ();
2196
2197 if (!RTX_FRAME_RELATED_P (insn))
2198 {
2199 /* ??? This should be done unconditionally since stack adjustments
2200 matter if the stack pointer is not the CFA register anymore but
2201 is still used to save registers. */
2202 if (!ACCUMULATE_OUTGOING_ARGS)
2203 dwarf2out_notice_stack_adjust (insn, after_p);
2204 cfi_insn = NULL;
2205 return;
2206 }
2207
2208 any_cfis_emitted = false;
2209
2210 for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
2211 switch (REG_NOTE_KIND (note))
2212 {
2213 case REG_FRAME_RELATED_EXPR:
2214 insn = XEXP (note, 0);
2215 goto do_frame_expr;
2216
2217 case REG_CFA_DEF_CFA:
2218 dwarf2out_frame_debug_def_cfa (XEXP (note, 0));
2219 handled_one = true;
2220 break;
2221
2222 case REG_CFA_ADJUST_CFA:
2223 n = XEXP (note, 0);
2224 if (n == NULL)
2225 {
2226 n = PATTERN (insn);
2227 if (GET_CODE (n) == PARALLEL)
2228 n = XVECEXP (n, 0, 0);
2229 }
2230 dwarf2out_frame_debug_adjust_cfa (n);
2231 handled_one = true;
2232 break;
2233
2234 case REG_CFA_OFFSET:
2235 n = XEXP (note, 0);
2236 if (n == NULL)
2237 n = single_set (insn);
2238 dwarf2out_frame_debug_cfa_offset (n);
2239 handled_one = true;
2240 break;
2241
2242 case REG_CFA_REGISTER:
2243 n = XEXP (note, 0);
2244 if (n == NULL)
2245 {
2246 n = PATTERN (insn);
2247 if (GET_CODE (n) == PARALLEL)
2248 n = XVECEXP (n, 0, 0);
2249 }
2250 dwarf2out_frame_debug_cfa_register (n);
2251 handled_one = true;
2252 break;
2253
2254 case REG_CFA_EXPRESSION:
2255 n = XEXP (note, 0);
2256 if (n == NULL)
2257 n = single_set (insn);
2258 dwarf2out_frame_debug_cfa_expression (n);
2259 handled_one = true;
2260 break;
2261
2262 case REG_CFA_RESTORE:
2263 n = XEXP (note, 0);
2264 if (n == NULL)
2265 {
2266 n = PATTERN (insn);
2267 if (GET_CODE (n) == PARALLEL)
2268 n = XVECEXP (n, 0, 0);
2269 n = XEXP (n, 0);
2270 }
2271 dwarf2out_frame_debug_cfa_restore (n);
2272 handled_one = true;
2273 break;
2274
2275 case REG_CFA_SET_VDRAP:
2276 n = XEXP (note, 0);
2277 if (REG_P (n))
2278 {
2279 dw_fde_ref fde = cfun->fde;
2280 if (fde)
2281 {
2282 gcc_assert (fde->vdrap_reg == INVALID_REGNUM);
2283 if (REG_P (n))
2284 fde->vdrap_reg = REGNO (n);
2285 }
2286 }
2287 handled_one = true;
2288 break;
2289
2290 case REG_CFA_WINDOW_SAVE:
2291 dwarf2out_frame_debug_cfa_window_save ();
2292 handled_one = true;
2293 break;
2294
2295 case REG_CFA_FLUSH_QUEUE:
2296 /* The actual flush happens below. */
2297 need_flush = true;
2298 handled_one = true;
2299 break;
2300
2301 default:
2302 break;
2303 }
2304
2305 if (handled_one)
2306 {
2307 /* Minimize the number of advances by emitting the entire queue
2308 once anything is emitted. */
2309 need_flush |= any_cfis_emitted;
2310 }
2311 else
2312 {
2313 insn = PATTERN (insn);
2314 do_frame_expr:
2315 dwarf2out_frame_debug_expr (insn);
2316
2317 /* Check again. A parallel can save and update the same register.
2318 We could probably check just once, here, but this is safer than
2319 removing the check at the start of the function. */
2320 if (any_cfis_emitted || clobbers_queued_reg_save (insn))
2321 need_flush = true;
2322 }
2323
2324 if (need_flush)
2325 dwarf2out_flush_queued_reg_saves ();
2326 cfi_insn = NULL;
2327 }
2328
2329 /* Examine CFI and return true if a cfi label and set_loc is needed
2330 beforehand. Even when generating CFI assembler instructions, we
2331 still have to add the cfi to the list so that lookup_cfa works
2332 later on. When -g2 and above we even need to force emitting of
2333 CFI labels and add to list a DW_CFA_set_loc for convert_cfa_to_fb_loc_list
2334 purposes. If we're generating DWARF3 output we use DW_OP_call_frame_cfa
2335 and so don't use convert_cfa_to_fb_loc_list. */
2336
2337 static bool
2338 cfi_label_required_p (dw_cfi_ref cfi)
2339 {
2340 if (!dwarf2out_do_cfi_asm ())
2341 return true;
2342
2343 if (dwarf_version == 2
2344 && debug_info_level > DINFO_LEVEL_TERSE
2345 && (write_symbols == DWARF2_DEBUG
2346 || write_symbols == VMS_AND_DWARF2_DEBUG))
2347 {
2348 switch (cfi->dw_cfi_opc)
2349 {
2350 case DW_CFA_def_cfa_offset:
2351 case DW_CFA_def_cfa_offset_sf:
2352 case DW_CFA_def_cfa_register:
2353 case DW_CFA_def_cfa:
2354 case DW_CFA_def_cfa_sf:
2355 case DW_CFA_def_cfa_expression:
2356 case DW_CFA_restore_state:
2357 return true;
2358 default:
2359 return false;
2360 }
2361 }
2362 return false;
2363 }
2364
2365 /* Walk the function, looking for NOTE_INSN_CFI notes. Add the CFIs to the
2366 function's FDE, adding CFI labels and set_loc/advance_loc opcodes as
2367 necessary. */
2368 static void
2369 add_cfis_to_fde (void)
2370 {
2371 dw_fde_ref fde = cfun->fde;
2372 rtx insn, next;
2373 /* We always start with a function_begin label. */
2374 bool first = false;
2375
2376 for (insn = get_insns (); insn; insn = next)
2377 {
2378 next = NEXT_INSN (insn);
2379
2380 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS)
2381 {
2382 /* Don't attempt to advance_loc4 between labels
2383 in different sections. */
2384 first = true;
2385 }
2386
2387 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_CFI)
2388 {
2389 bool required = cfi_label_required_p (NOTE_CFI (insn));
2390 while (next && NOTE_P (next) && NOTE_KIND (next) == NOTE_INSN_CFI)
2391 {
2392 required |= cfi_label_required_p (NOTE_CFI (next));
2393 next = NEXT_INSN (next);
2394 }
2395 if (required)
2396 {
2397 int num = dwarf2out_cfi_label_num;
2398 const char *label = dwarf2out_cfi_label ();
2399 dw_cfi_ref xcfi;
2400 rtx tmp;
2401
2402 /* Set the location counter to the new label. */
2403 xcfi = new_cfi ();
2404 xcfi->dw_cfi_opc = (first ? DW_CFA_set_loc
2405 : DW_CFA_advance_loc4);
2406 xcfi->dw_cfi_oprnd1.dw_cfi_addr = label;
2407 VEC_safe_push (dw_cfi_ref, gc, fde->dw_fde_cfi, xcfi);
2408
2409 tmp = emit_note_before (NOTE_INSN_CFI_LABEL, insn);
2410 NOTE_LABEL_NUMBER (tmp) = num;
2411 }
2412
2413 do
2414 {
2415 VEC_safe_push (dw_cfi_ref, gc, fde->dw_fde_cfi, NOTE_CFI (insn));
2416 insn = NEXT_INSN (insn);
2417 }
2418 while (insn != next);
2419 first = false;
2420 }
2421 }
2422 }
2423
2424 /* Scan the function and create the initial set of CFI notes. */
2425
2426 static void
2427 create_cfi_notes (void)
2428 {
2429 rtx insn;
2430
2431 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
2432 {
2433 rtx pat;
2434
2435 if (BARRIER_P (insn))
2436 {
2437 dwarf2out_frame_debug (insn, false);
2438 continue;
2439 }
2440
2441 if (NOTE_P (insn))
2442 {
2443 switch (NOTE_KIND (insn))
2444 {
2445 case NOTE_INSN_PROLOGUE_END:
2446 cfi_insn = PREV_INSN (insn);
2447 dwarf2out_flush_queued_reg_saves ();
2448 cfi_insn = NULL;
2449 break;
2450
2451 case NOTE_INSN_EPILOGUE_BEG:
2452 #if defined(HAVE_epilogue)
2453 dwarf2out_cfi_begin_epilogue (insn);
2454 #endif
2455 break;
2456
2457 case NOTE_INSN_CFA_RESTORE_STATE:
2458 cfi_insn = insn;
2459 dwarf2out_frame_debug_restore_state ();
2460 cfi_insn = NULL;
2461 break;
2462 }
2463 continue;
2464 }
2465
2466 if (!NONDEBUG_INSN_P (insn))
2467 continue;
2468
2469 pat = PATTERN (insn);
2470 if (asm_noperands (pat) >= 0)
2471 {
2472 dwarf2out_frame_debug (insn, false);
2473 continue;
2474 }
2475
2476 if (GET_CODE (pat) == SEQUENCE)
2477 {
2478 int i, n = XVECLEN (pat, 0);
2479 for (i = 1; i < n; ++i)
2480 dwarf2out_frame_debug (XVECEXP (pat, 0, i), false);
2481 }
2482
2483 if (CALL_P (insn)
2484 || find_reg_note (insn, REG_CFA_FLUSH_QUEUE, NULL))
2485 dwarf2out_frame_debug (insn, false);
2486
2487 dwarf2out_frame_debug (insn, true);
2488 }
2489 }
2490
2491 /* Determine if we need to save and restore CFI information around this
2492 epilogue. If SIBCALL is true, then this is a sibcall epilogue. If
2493 we do need to save/restore, then emit the save now, and insert a
2494 NOTE_INSN_CFA_RESTORE_STATE at the appropriate place in the stream. */
2495
2496 static void
2497 dwarf2out_cfi_begin_epilogue (rtx insn)
2498 {
2499 bool saw_frp = false;
2500 rtx i;
2501
2502 /* Scan forward to the return insn, noticing if there are possible
2503 frame related insns. */
2504 for (i = NEXT_INSN (insn); i ; i = NEXT_INSN (i))
2505 {
2506 if (!INSN_P (i))
2507 continue;
2508
2509 /* Look for both regular and sibcalls to end the block. */
2510 if (returnjump_p (i))
2511 break;
2512 if (CALL_P (i) && SIBLING_CALL_P (i))
2513 break;
2514
2515 if (GET_CODE (PATTERN (i)) == SEQUENCE)
2516 {
2517 int idx;
2518 rtx seq = PATTERN (i);
2519
2520 if (returnjump_p (XVECEXP (seq, 0, 0)))
2521 break;
2522 if (CALL_P (XVECEXP (seq, 0, 0))
2523 && SIBLING_CALL_P (XVECEXP (seq, 0, 0)))
2524 break;
2525
2526 for (idx = 0; idx < XVECLEN (seq, 0); idx++)
2527 if (RTX_FRAME_RELATED_P (XVECEXP (seq, 0, idx)))
2528 saw_frp = true;
2529 }
2530
2531 if (RTX_FRAME_RELATED_P (i))
2532 saw_frp = true;
2533 }
2534
2535 /* If the port doesn't emit epilogue unwind info, we don't need a
2536 save/restore pair. */
2537 if (!saw_frp)
2538 return;
2539
2540 /* Otherwise, search forward to see if the return insn was the last
2541 basic block of the function. If so, we don't need save/restore. */
2542 gcc_assert (i != NULL);
2543 i = next_real_insn (i);
2544 if (i == NULL)
2545 return;
2546
2547 /* Insert the restore before that next real insn in the stream, and before
2548 a potential NOTE_INSN_EPILOGUE_BEG -- we do need these notes to be
2549 properly nested. This should be after any label or alignment. This
2550 will be pushed into the CFI stream by the function below. */
2551 while (1)
2552 {
2553 rtx p = PREV_INSN (i);
2554 if (!NOTE_P (p))
2555 break;
2556 if (NOTE_KIND (p) == NOTE_INSN_BASIC_BLOCK)
2557 break;
2558 i = p;
2559 }
2560 emit_note_before (NOTE_INSN_CFA_RESTORE_STATE, i);
2561
2562 emit_cfa_remember = true;
2563
2564 /* And emulate the state save. */
2565 gcc_assert (!cfa_remember.in_use);
2566 cfa_remember = cfa;
2567 old_cfa_remember = old_cfa;
2568 cfa_remember.in_use = 1;
2569 }
2570
2571 /* A "subroutine" of dwarf2out_cfi_begin_epilogue. Emit the restore
2572 required. */
2573
2574 static void
2575 dwarf2out_frame_debug_restore_state (void)
2576 {
2577 dw_cfi_ref cfi = new_cfi ();
2578
2579 cfi->dw_cfi_opc = DW_CFA_restore_state;
2580 add_fde_cfi (cfi);
2581
2582 gcc_assert (cfa_remember.in_use);
2583 cfa = cfa_remember;
2584 old_cfa = old_cfa_remember;
2585 cfa_remember.in_use = 0;
2586 }
2587 \f
2588
2589 /* Annotate the function with NOTE_INSN_CFI notes to record the CFI
2590 state at each location within the function. These notes will be
2591 emitted during pass_final. */
2592
2593 static unsigned int
2594 execute_dwarf2_frame (void)
2595 {
2596 /* The first time we're called, compute the incoming frame state. */
2597 if (cie_cfi_vec == NULL)
2598 {
2599 dw_cfa_location loc;
2600
2601 memset(&old_cfa, 0, sizeof (old_cfa));
2602 old_cfa.reg = INVALID_REGNUM;
2603
2604 /* On entry, the Canonical Frame Address is at SP. */
2605 memset(&loc, 0, sizeof (loc));
2606 loc.reg = STACK_POINTER_REGNUM;
2607 loc.offset = INCOMING_FRAME_SP_OFFSET;
2608 def_cfa_1 (true, &loc);
2609
2610 if (targetm.debug_unwind_info () == UI_DWARF2
2611 || targetm_common.except_unwind_info (&global_options) == UI_DWARF2)
2612 initial_return_save (INCOMING_RETURN_ADDR_RTX);
2613 }
2614
2615 /* Set up state for generating call frame debug info. */
2616 lookup_cfa (&cfa);
2617 gcc_assert (cfa.reg
2618 == (unsigned long)DWARF_FRAME_REGNUM (STACK_POINTER_REGNUM));
2619
2620 old_cfa = cfa;
2621 cfa.reg = STACK_POINTER_REGNUM;
2622 cfa_store = cfa;
2623 cfa_temp.reg = -1;
2624 cfa_temp.offset = 0;
2625
2626 dwarf2out_alloc_current_fde ();
2627
2628 /* Do the work. */
2629 create_cfi_notes ();
2630 add_cfis_to_fde ();
2631
2632 /* Reset all function-specific information, particularly for GC. */
2633 XDELETEVEC (barrier_args_size);
2634 barrier_args_size = NULL;
2635 regs_saved_in_regs = NULL;
2636 queued_reg_saves = NULL;
2637 args_size = old_args_size = 0;
2638
2639 return 0;
2640 }
2641 \f
2642
2643 /* Save the result of dwarf2out_do_frame across PCH.
2644 This variable is tri-state, with 0 unset, >0 true, <0 false. */
2645 static GTY(()) signed char saved_do_cfi_asm = 0;
2646
2647 /* Decide whether we want to emit frame unwind information for the current
2648 translation unit. */
2649
2650 bool
2651 dwarf2out_do_frame (void)
2652 {
2653 /* We want to emit correct CFA location expressions or lists, so we
2654 have to return true if we're going to output debug info, even if
2655 we're not going to output frame or unwind info. */
2656 if (write_symbols == DWARF2_DEBUG || write_symbols == VMS_AND_DWARF2_DEBUG)
2657 return true;
2658
2659 if (saved_do_cfi_asm > 0)
2660 return true;
2661
2662 if (targetm.debug_unwind_info () == UI_DWARF2)
2663 return true;
2664
2665 if ((flag_unwind_tables || flag_exceptions)
2666 && targetm_common.except_unwind_info (&global_options) == UI_DWARF2)
2667 return true;
2668
2669 return false;
2670 }
2671
2672 /* Decide whether to emit frame unwind via assembler directives. */
2673
2674 bool
2675 dwarf2out_do_cfi_asm (void)
2676 {
2677 int enc;
2678
2679 #ifdef MIPS_DEBUGGING_INFO
2680 return false;
2681 #endif
2682
2683 if (saved_do_cfi_asm != 0)
2684 return saved_do_cfi_asm > 0;
2685
2686 /* Assume failure for a moment. */
2687 saved_do_cfi_asm = -1;
2688
2689 if (!flag_dwarf2_cfi_asm || !dwarf2out_do_frame ())
2690 return false;
2691 if (!HAVE_GAS_CFI_PERSONALITY_DIRECTIVE)
2692 return false;
2693
2694 /* Make sure the personality encoding is one the assembler can support.
2695 In particular, aligned addresses can't be handled. */
2696 enc = ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/2,/*global=*/1);
2697 if ((enc & 0x70) != 0 && (enc & 0x70) != DW_EH_PE_pcrel)
2698 return false;
2699 enc = ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/0,/*global=*/0);
2700 if ((enc & 0x70) != 0 && (enc & 0x70) != DW_EH_PE_pcrel)
2701 return false;
2702
2703 /* If we can't get the assembler to emit only .debug_frame, and we don't need
2704 dwarf2 unwind info for exceptions, then emit .debug_frame by hand. */
2705 if (!HAVE_GAS_CFI_SECTIONS_DIRECTIVE
2706 && !flag_unwind_tables && !flag_exceptions
2707 && targetm_common.except_unwind_info (&global_options) != UI_DWARF2)
2708 return false;
2709
2710 /* Success! */
2711 saved_do_cfi_asm = 1;
2712 return true;
2713 }
2714
2715 static bool
2716 gate_dwarf2_frame (void)
2717 {
2718 #ifndef HAVE_prologue
2719 /* Targets which still implement the prologue in assembler text
2720 cannot use the generic dwarf2 unwinding. */
2721 return false;
2722 #endif
2723
2724 /* ??? What to do for UI_TARGET unwinding? They might be able to benefit
2725 from the optimized shrink-wrapping annotations that we will compute.
2726 For now, only produce the CFI notes for dwarf2. */
2727 return dwarf2out_do_frame ();
2728 }
2729
2730 struct rtl_opt_pass pass_dwarf2_frame =
2731 {
2732 {
2733 RTL_PASS,
2734 "dwarf2", /* name */
2735 gate_dwarf2_frame, /* gate */
2736 execute_dwarf2_frame, /* execute */
2737 NULL, /* sub */
2738 NULL, /* next */
2739 0, /* static_pass_number */
2740 TV_FINAL, /* tv_id */
2741 0, /* properties_required */
2742 0, /* properties_provided */
2743 0, /* properties_destroyed */
2744 0, /* todo_flags_start */
2745 0 /* todo_flags_finish */
2746 }
2747 };
2748
2749 #include "gt-dwarf2cfi.h"