dwarf2out: Split CFI construction routines into a new file.
[gcc.git] / gcc / dwarf2cfi.c
1 /* Dwarf2 Call Frame Information helper routines.
2 Copyright (C) 1992, 1993, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
3 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011
4 Free Software Foundation, Inc.
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "tm.h"
26 #include "version.h"
27 #include "flags.h"
28 #include "rtl.h"
29 #include "function.h"
30 #include "dwarf2.h"
31 #include "dwarf2out.h"
32 #include "dwarf2asm.h"
33 #include "ggc.h"
34 #include "tm_p.h"
35 #include "target.h"
36 #include "common/common-target.h"
37 #include "tree-pass.h"
38
39 #include "except.h" /* expand_builtin_dwarf_sp_column */
40 #include "expr.h" /* init_return_column_size */
41 #include "regs.h" /* expand_builtin_init_dwarf_reg_sizes */
42 #include "output.h" /* asm_out_file */
43 #include "debug.h" /* dwarf2out_do_frame, dwarf2out_do_cfi_asm */
44
45
46 /* ??? Poison these here until it can be done generically. They've been
47 totally replaced in this file; make sure it stays that way. */
48 #undef DWARF2_UNWIND_INFO
49 #undef DWARF2_FRAME_INFO
50 #if (GCC_VERSION >= 3000)
51 #pragma GCC poison DWARF2_UNWIND_INFO DWARF2_FRAME_INFO
52 #endif
53
54 #ifndef INCOMING_RETURN_ADDR_RTX
55 #define INCOMING_RETURN_ADDR_RTX (gcc_unreachable (), NULL_RTX)
56 #endif
57
58 /* The size of the target's pointer type. */
59 #ifndef PTR_SIZE
60 #define PTR_SIZE (POINTER_SIZE / BITS_PER_UNIT)
61 #endif
62
63 /* Maximum size (in bytes) of an artificially generated label. */
64 #define MAX_ARTIFICIAL_LABEL_BYTES 30
65
66 /* The size of addresses as they appear in the Dwarf 2 data.
67 Some architectures use word addresses to refer to code locations,
68 but Dwarf 2 info always uses byte addresses. On such machines,
69 Dwarf 2 addresses need to be larger than the architecture's
70 pointers. */
71 #ifndef DWARF2_ADDR_SIZE
72 #define DWARF2_ADDR_SIZE (POINTER_SIZE / BITS_PER_UNIT)
73 #endif
74
75 /* The size in bytes of a DWARF field indicating an offset or length
76 relative to a debug info section, specified to be 4 bytes in the
77 DWARF-2 specification. The SGI/MIPS ABI defines it to be the same
78 as PTR_SIZE. */
79
80 #ifndef DWARF_OFFSET_SIZE
81 #define DWARF_OFFSET_SIZE 4
82 #endif
83
84 /* According to the (draft) DWARF 3 specification, the initial length
85 should either be 4 or 12 bytes. When it's 12 bytes, the first 4
86 bytes are 0xffffffff, followed by the length stored in the next 8
87 bytes.
88
89 However, the SGI/MIPS ABI uses an initial length which is equal to
90 DWARF_OFFSET_SIZE. It is defined (elsewhere) accordingly. */
91
92 #ifndef DWARF_INITIAL_LENGTH_SIZE
93 #define DWARF_INITIAL_LENGTH_SIZE (DWARF_OFFSET_SIZE == 4 ? 4 : 12)
94 #endif
95
96 /* Round SIZE up to the nearest BOUNDARY. */
97 #define DWARF_ROUND(SIZE,BOUNDARY) \
98 ((((SIZE) + (BOUNDARY) - 1) / (BOUNDARY)) * (BOUNDARY))
99
100 /* Offsets recorded in opcodes are a multiple of this alignment factor. */
101 #ifndef DWARF_CIE_DATA_ALIGNMENT
102 #ifdef STACK_GROWS_DOWNWARD
103 #define DWARF_CIE_DATA_ALIGNMENT (-((int) UNITS_PER_WORD))
104 #else
105 #define DWARF_CIE_DATA_ALIGNMENT ((int) UNITS_PER_WORD)
106 #endif
107 #endif
108
109 /* CIE identifier. */
110 #if HOST_BITS_PER_WIDE_INT >= 64
111 #define DWARF_CIE_ID \
112 (unsigned HOST_WIDE_INT) (DWARF_OFFSET_SIZE == 4 ? DW_CIE_ID : DW64_CIE_ID)
113 #else
114 #define DWARF_CIE_ID DW_CIE_ID
115 #endif
116
117 /* The DWARF 2 CFA column which tracks the return address. Normally this
118 is the column for PC, or the first column after all of the hard
119 registers. */
120 #ifndef DWARF_FRAME_RETURN_COLUMN
121 #ifdef PC_REGNUM
122 #define DWARF_FRAME_RETURN_COLUMN DWARF_FRAME_REGNUM (PC_REGNUM)
123 #else
124 #define DWARF_FRAME_RETURN_COLUMN DWARF_FRAME_REGISTERS
125 #endif
126 #endif
127
128 /* The mapping from gcc register number to DWARF 2 CFA column number. By
129 default, we just provide columns for all registers. */
130 #ifndef DWARF_FRAME_REGNUM
131 #define DWARF_FRAME_REGNUM(REG) DBX_REGISTER_NUMBER (REG)
132 #endif
133
134 /* Map register numbers held in the call frame info that gcc has
135 collected using DWARF_FRAME_REGNUM to those that should be output in
136 .debug_frame and .eh_frame. */
137 #ifndef DWARF2_FRAME_REG_OUT
138 #define DWARF2_FRAME_REG_OUT(REGNO, FOR_EH) (REGNO)
139 #endif
140 \f
141 /* A vector of call frame insns for the CIE. */
142 cfi_vec cie_cfi_vec;
143
144 static GTY(()) unsigned long dwarf2out_cfi_label_num;
145
146 \f
147 /* Hook used by __throw. */
148
149 rtx
150 expand_builtin_dwarf_sp_column (void)
151 {
152 unsigned int dwarf_regnum = DWARF_FRAME_REGNUM (STACK_POINTER_REGNUM);
153 return GEN_INT (DWARF2_FRAME_REG_OUT (dwarf_regnum, 1));
154 }
155
156 /* MEM is a memory reference for the register size table, each element of
157 which has mode MODE. Initialize column C as a return address column. */
158
159 static void
160 init_return_column_size (enum machine_mode mode, rtx mem, unsigned int c)
161 {
162 HOST_WIDE_INT offset = c * GET_MODE_SIZE (mode);
163 HOST_WIDE_INT size = GET_MODE_SIZE (Pmode);
164 emit_move_insn (adjust_address (mem, mode, offset), GEN_INT (size));
165 }
166
167 /* Generate code to initialize the register size table. */
168
169 void
170 expand_builtin_init_dwarf_reg_sizes (tree address)
171 {
172 unsigned int i;
173 enum machine_mode mode = TYPE_MODE (char_type_node);
174 rtx addr = expand_normal (address);
175 rtx mem = gen_rtx_MEM (BLKmode, addr);
176 bool wrote_return_column = false;
177
178 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
179 {
180 int rnum = DWARF2_FRAME_REG_OUT (DWARF_FRAME_REGNUM (i), 1);
181
182 if (rnum < DWARF_FRAME_REGISTERS)
183 {
184 HOST_WIDE_INT offset = rnum * GET_MODE_SIZE (mode);
185 enum machine_mode save_mode = reg_raw_mode[i];
186 HOST_WIDE_INT size;
187
188 if (HARD_REGNO_CALL_PART_CLOBBERED (i, save_mode))
189 save_mode = choose_hard_reg_mode (i, 1, true);
190 if (DWARF_FRAME_REGNUM (i) == DWARF_FRAME_RETURN_COLUMN)
191 {
192 if (save_mode == VOIDmode)
193 continue;
194 wrote_return_column = true;
195 }
196 size = GET_MODE_SIZE (save_mode);
197 if (offset < 0)
198 continue;
199
200 emit_move_insn (adjust_address (mem, mode, offset),
201 gen_int_mode (size, mode));
202 }
203 }
204
205 if (!wrote_return_column)
206 init_return_column_size (mode, mem, DWARF_FRAME_RETURN_COLUMN);
207
208 #ifdef DWARF_ALT_FRAME_RETURN_COLUMN
209 init_return_column_size (mode, mem, DWARF_ALT_FRAME_RETURN_COLUMN);
210 #endif
211
212 targetm.init_dwarf_reg_sizes_extra (address);
213 }
214
215 /* Divide OFF by DWARF_CIE_DATA_ALIGNMENT, asserting no remainder. */
216
217 static inline HOST_WIDE_INT
218 div_data_align (HOST_WIDE_INT off)
219 {
220 HOST_WIDE_INT r = off / DWARF_CIE_DATA_ALIGNMENT;
221 gcc_assert (r * DWARF_CIE_DATA_ALIGNMENT == off);
222 return r;
223 }
224
225 /* Return true if we need a signed version of a given opcode
226 (e.g. DW_CFA_offset_extended_sf vs DW_CFA_offset_extended). */
227
228 static inline bool
229 need_data_align_sf_opcode (HOST_WIDE_INT off)
230 {
231 return DWARF_CIE_DATA_ALIGNMENT < 0 ? off > 0 : off < 0;
232 }
233
234 /* Return a pointer to a newly allocated Call Frame Instruction. */
235
236 static inline dw_cfi_ref
237 new_cfi (void)
238 {
239 dw_cfi_ref cfi = ggc_alloc_dw_cfi_node ();
240
241 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = 0;
242 cfi->dw_cfi_oprnd2.dw_cfi_reg_num = 0;
243
244 return cfi;
245 }
246
247 /* Add a Call Frame Instruction to list of instructions. */
248
249 static inline void
250 add_cfi (cfi_vec *vec, dw_cfi_ref cfi)
251 {
252 dw_fde_ref fde = current_fde ();
253
254 /* When DRAP is used, CFA is defined with an expression. Redefine
255 CFA may lead to a different CFA value. */
256 /* ??? Of course, this heuristic fails when we're annotating epilogues,
257 because of course we'll always want to redefine the CFA back to the
258 stack pointer on the way out. Where should we move this check? */
259 if (0 && fde && fde->drap_reg != INVALID_REGNUM)
260 switch (cfi->dw_cfi_opc)
261 {
262 case DW_CFA_def_cfa_register:
263 case DW_CFA_def_cfa_offset:
264 case DW_CFA_def_cfa_offset_sf:
265 case DW_CFA_def_cfa:
266 case DW_CFA_def_cfa_sf:
267 gcc_unreachable ();
268
269 default:
270 break;
271 }
272
273 VEC_safe_push (dw_cfi_ref, gc, *vec, cfi);
274 }
275
276 /* Generate a new label for the CFI info to refer to. FORCE is true
277 if a label needs to be output even when using .cfi_* directives. */
278
279 static char *
280 dwarf2out_cfi_label (bool force)
281 {
282 static char label[20];
283
284 if (!force && dwarf2out_do_cfi_asm ())
285 {
286 /* In this case, we will be emitting the asm directive instead of
287 the label, so just return a placeholder to keep the rest of the
288 interfaces happy. */
289 strcpy (label, "<do not output>");
290 }
291 else
292 {
293 int num = dwarf2out_cfi_label_num++;
294 ASM_GENERATE_INTERNAL_LABEL (label, "LCFI", num);
295 ASM_OUTPUT_DEBUG_LABEL (asm_out_file, "LCFI", num);
296 }
297
298 return label;
299 }
300
301 /* True if remember_state should be emitted before following CFI directive. */
302 static bool emit_cfa_remember;
303
304 /* True if any CFI directives were emitted at the current insn. */
305 static bool any_cfis_emitted;
306
307 /* Add CFI to the current fde at the PC value indicated by LABEL if specified,
308 or to the CIE if LABEL is NULL. */
309
310 static void
311 add_fde_cfi (const char *label, dw_cfi_ref cfi)
312 {
313 cfi_vec *vec;
314
315 if (cie_cfi_vec == NULL)
316 cie_cfi_vec = VEC_alloc (dw_cfi_ref, gc, 20);
317
318 vec = &cie_cfi_vec;
319
320 if (emit_cfa_remember)
321 {
322 dw_cfi_ref cfi_remember;
323
324 /* Emit the state save. */
325 emit_cfa_remember = false;
326 cfi_remember = new_cfi ();
327 cfi_remember->dw_cfi_opc = DW_CFA_remember_state;
328 add_fde_cfi (label, cfi_remember);
329 }
330
331 if (dwarf2out_do_cfi_asm ())
332 {
333 if (label)
334 {
335 dw_fde_ref fde = current_fde ();
336
337 gcc_assert (fde != NULL);
338
339 /* We still have to add the cfi to the list so that lookup_cfa
340 works later on. When -g2 and above we even need to force
341 emitting of CFI labels and add to list a DW_CFA_set_loc for
342 convert_cfa_to_fb_loc_list purposes. If we're generating
343 DWARF3 output we use DW_OP_call_frame_cfa and so don't use
344 convert_cfa_to_fb_loc_list. */
345 if (dwarf_version == 2
346 && debug_info_level > DINFO_LEVEL_TERSE
347 && (write_symbols == DWARF2_DEBUG
348 || write_symbols == VMS_AND_DWARF2_DEBUG))
349 {
350 switch (cfi->dw_cfi_opc)
351 {
352 case DW_CFA_def_cfa_offset:
353 case DW_CFA_def_cfa_offset_sf:
354 case DW_CFA_def_cfa_register:
355 case DW_CFA_def_cfa:
356 case DW_CFA_def_cfa_sf:
357 case DW_CFA_def_cfa_expression:
358 case DW_CFA_restore_state:
359 if (*label == 0 || strcmp (label, "<do not output>") == 0)
360 label = dwarf2out_cfi_label (true);
361
362 if (fde->dw_fde_current_label == NULL
363 || strcmp (label, fde->dw_fde_current_label) != 0)
364 {
365 dw_cfi_ref xcfi;
366
367 label = xstrdup (label);
368
369 /* Set the location counter to the new label. */
370 xcfi = new_cfi ();
371 /* It doesn't metter whether DW_CFA_set_loc
372 or DW_CFA_advance_loc4 is added here, those aren't
373 emitted into assembly, only looked up by
374 convert_cfa_to_fb_loc_list. */
375 xcfi->dw_cfi_opc = DW_CFA_set_loc;
376 xcfi->dw_cfi_oprnd1.dw_cfi_addr = label;
377 add_cfi (&fde->dw_fde_cfi, xcfi);
378 fde->dw_fde_current_label = label;
379 }
380 break;
381 default:
382 break;
383 }
384 }
385
386 output_cfi_directive (cfi);
387
388 vec = &fde->dw_fde_cfi;
389 any_cfis_emitted = true;
390 }
391 /* ??? If this is a CFI for the CIE, we don't emit. This
392 assumes that the standard CIE contents that the assembler
393 uses matches the standard CIE contents that the compiler
394 uses. This is probably a bad assumption. I'm not quite
395 sure how to address this for now. */
396 }
397 else if (label)
398 {
399 dw_fde_ref fde = current_fde ();
400
401 gcc_assert (fde != NULL);
402
403 if (*label == 0)
404 label = dwarf2out_cfi_label (false);
405
406 if (fde->dw_fde_current_label == NULL
407 || strcmp (label, fde->dw_fde_current_label) != 0)
408 {
409 dw_cfi_ref xcfi;
410
411 label = xstrdup (label);
412
413 /* Set the location counter to the new label. */
414 xcfi = new_cfi ();
415 /* If we have a current label, advance from there, otherwise
416 set the location directly using set_loc. */
417 xcfi->dw_cfi_opc = fde->dw_fde_current_label
418 ? DW_CFA_advance_loc4
419 : DW_CFA_set_loc;
420 xcfi->dw_cfi_oprnd1.dw_cfi_addr = label;
421 add_cfi (&fde->dw_fde_cfi, xcfi);
422
423 fde->dw_fde_current_label = label;
424 }
425
426 vec = &fde->dw_fde_cfi;
427 any_cfis_emitted = true;
428 }
429
430 add_cfi (vec, cfi);
431 }
432
433 /* This function fills in aa dw_cfa_location structure from a dwarf location
434 descriptor sequence. */
435
436 static void
437 get_cfa_from_loc_descr (dw_cfa_location *cfa, struct dw_loc_descr_struct *loc)
438 {
439 struct dw_loc_descr_struct *ptr;
440 cfa->offset = 0;
441 cfa->base_offset = 0;
442 cfa->indirect = 0;
443 cfa->reg = -1;
444
445 for (ptr = loc; ptr != NULL; ptr = ptr->dw_loc_next)
446 {
447 enum dwarf_location_atom op = ptr->dw_loc_opc;
448
449 switch (op)
450 {
451 case DW_OP_reg0:
452 case DW_OP_reg1:
453 case DW_OP_reg2:
454 case DW_OP_reg3:
455 case DW_OP_reg4:
456 case DW_OP_reg5:
457 case DW_OP_reg6:
458 case DW_OP_reg7:
459 case DW_OP_reg8:
460 case DW_OP_reg9:
461 case DW_OP_reg10:
462 case DW_OP_reg11:
463 case DW_OP_reg12:
464 case DW_OP_reg13:
465 case DW_OP_reg14:
466 case DW_OP_reg15:
467 case DW_OP_reg16:
468 case DW_OP_reg17:
469 case DW_OP_reg18:
470 case DW_OP_reg19:
471 case DW_OP_reg20:
472 case DW_OP_reg21:
473 case DW_OP_reg22:
474 case DW_OP_reg23:
475 case DW_OP_reg24:
476 case DW_OP_reg25:
477 case DW_OP_reg26:
478 case DW_OP_reg27:
479 case DW_OP_reg28:
480 case DW_OP_reg29:
481 case DW_OP_reg30:
482 case DW_OP_reg31:
483 cfa->reg = op - DW_OP_reg0;
484 break;
485 case DW_OP_regx:
486 cfa->reg = ptr->dw_loc_oprnd1.v.val_int;
487 break;
488 case DW_OP_breg0:
489 case DW_OP_breg1:
490 case DW_OP_breg2:
491 case DW_OP_breg3:
492 case DW_OP_breg4:
493 case DW_OP_breg5:
494 case DW_OP_breg6:
495 case DW_OP_breg7:
496 case DW_OP_breg8:
497 case DW_OP_breg9:
498 case DW_OP_breg10:
499 case DW_OP_breg11:
500 case DW_OP_breg12:
501 case DW_OP_breg13:
502 case DW_OP_breg14:
503 case DW_OP_breg15:
504 case DW_OP_breg16:
505 case DW_OP_breg17:
506 case DW_OP_breg18:
507 case DW_OP_breg19:
508 case DW_OP_breg20:
509 case DW_OP_breg21:
510 case DW_OP_breg22:
511 case DW_OP_breg23:
512 case DW_OP_breg24:
513 case DW_OP_breg25:
514 case DW_OP_breg26:
515 case DW_OP_breg27:
516 case DW_OP_breg28:
517 case DW_OP_breg29:
518 case DW_OP_breg30:
519 case DW_OP_breg31:
520 cfa->reg = op - DW_OP_breg0;
521 cfa->base_offset = ptr->dw_loc_oprnd1.v.val_int;
522 break;
523 case DW_OP_bregx:
524 cfa->reg = ptr->dw_loc_oprnd1.v.val_int;
525 cfa->base_offset = ptr->dw_loc_oprnd2.v.val_int;
526 break;
527 case DW_OP_deref:
528 cfa->indirect = 1;
529 break;
530 case DW_OP_plus_uconst:
531 cfa->offset = ptr->dw_loc_oprnd1.v.val_unsigned;
532 break;
533 default:
534 gcc_unreachable ();
535 }
536 }
537 }
538
539 /* Subroutine of lookup_cfa. */
540
541 void
542 lookup_cfa_1 (dw_cfi_ref cfi, dw_cfa_location *loc, dw_cfa_location *remember)
543 {
544 switch (cfi->dw_cfi_opc)
545 {
546 case DW_CFA_def_cfa_offset:
547 case DW_CFA_def_cfa_offset_sf:
548 loc->offset = cfi->dw_cfi_oprnd1.dw_cfi_offset;
549 break;
550 case DW_CFA_def_cfa_register:
551 loc->reg = cfi->dw_cfi_oprnd1.dw_cfi_reg_num;
552 break;
553 case DW_CFA_def_cfa:
554 case DW_CFA_def_cfa_sf:
555 loc->reg = cfi->dw_cfi_oprnd1.dw_cfi_reg_num;
556 loc->offset = cfi->dw_cfi_oprnd2.dw_cfi_offset;
557 break;
558 case DW_CFA_def_cfa_expression:
559 get_cfa_from_loc_descr (loc, cfi->dw_cfi_oprnd1.dw_cfi_loc);
560 break;
561
562 case DW_CFA_remember_state:
563 gcc_assert (!remember->in_use);
564 *remember = *loc;
565 remember->in_use = 1;
566 break;
567 case DW_CFA_restore_state:
568 gcc_assert (remember->in_use);
569 *loc = *remember;
570 remember->in_use = 0;
571 break;
572
573 default:
574 break;
575 }
576 }
577
578 /* Find the previous value for the CFA. */
579
580 static void
581 lookup_cfa (dw_cfa_location *loc)
582 {
583 int ix;
584 dw_cfi_ref cfi;
585 dw_fde_ref fde;
586 dw_cfa_location remember;
587
588 memset (loc, 0, sizeof (*loc));
589 loc->reg = INVALID_REGNUM;
590 remember = *loc;
591
592 FOR_EACH_VEC_ELT (dw_cfi_ref, cie_cfi_vec, ix, cfi)
593 lookup_cfa_1 (cfi, loc, &remember);
594
595 fde = current_fde ();
596 if (fde)
597 FOR_EACH_VEC_ELT (dw_cfi_ref, fde->dw_fde_cfi, ix, cfi)
598 lookup_cfa_1 (cfi, loc, &remember);
599 }
600
601 /* The current rule for calculating the DWARF2 canonical frame address. */
602 static dw_cfa_location cfa;
603
604 /* The register used for saving registers to the stack, and its offset
605 from the CFA. */
606 static dw_cfa_location cfa_store;
607
608 /* The current save location around an epilogue. */
609 static dw_cfa_location cfa_remember;
610
611 /* The running total of the size of arguments pushed onto the stack. */
612 static HOST_WIDE_INT args_size;
613
614 /* The last args_size we actually output. */
615 static HOST_WIDE_INT old_args_size;
616
617 /* Determine if two dw_cfa_location structures define the same data. */
618
619 bool
620 cfa_equal_p (const dw_cfa_location *loc1, const dw_cfa_location *loc2)
621 {
622 return (loc1->reg == loc2->reg
623 && loc1->offset == loc2->offset
624 && loc1->indirect == loc2->indirect
625 && (loc1->indirect == 0
626 || loc1->base_offset == loc2->base_offset));
627 }
628
629 /* This routine does the actual work. The CFA is now calculated from
630 the dw_cfa_location structure. */
631
632 static void
633 def_cfa_1 (const char *label, dw_cfa_location *loc_p)
634 {
635 dw_cfi_ref cfi;
636 dw_cfa_location old_cfa, loc;
637
638 cfa = *loc_p;
639 loc = *loc_p;
640
641 if (cfa_store.reg == loc.reg && loc.indirect == 0)
642 cfa_store.offset = loc.offset;
643
644 loc.reg = DWARF_FRAME_REGNUM (loc.reg);
645 lookup_cfa (&old_cfa);
646
647 /* If nothing changed, no need to issue any call frame instructions. */
648 if (cfa_equal_p (&loc, &old_cfa))
649 return;
650
651 cfi = new_cfi ();
652
653 if (loc.reg == old_cfa.reg && !loc.indirect && !old_cfa.indirect)
654 {
655 /* Construct a "DW_CFA_def_cfa_offset <offset>" instruction, indicating
656 the CFA register did not change but the offset did. The data
657 factoring for DW_CFA_def_cfa_offset_sf happens in output_cfi, or
658 in the assembler via the .cfi_def_cfa_offset directive. */
659 if (loc.offset < 0)
660 cfi->dw_cfi_opc = DW_CFA_def_cfa_offset_sf;
661 else
662 cfi->dw_cfi_opc = DW_CFA_def_cfa_offset;
663 cfi->dw_cfi_oprnd1.dw_cfi_offset = loc.offset;
664 }
665
666 #ifndef MIPS_DEBUGGING_INFO /* SGI dbx thinks this means no offset. */
667 else if (loc.offset == old_cfa.offset
668 && old_cfa.reg != INVALID_REGNUM
669 && !loc.indirect
670 && !old_cfa.indirect)
671 {
672 /* Construct a "DW_CFA_def_cfa_register <register>" instruction,
673 indicating the CFA register has changed to <register> but the
674 offset has not changed. */
675 cfi->dw_cfi_opc = DW_CFA_def_cfa_register;
676 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = loc.reg;
677 }
678 #endif
679
680 else if (loc.indirect == 0)
681 {
682 /* Construct a "DW_CFA_def_cfa <register> <offset>" instruction,
683 indicating the CFA register has changed to <register> with
684 the specified offset. The data factoring for DW_CFA_def_cfa_sf
685 happens in output_cfi, or in the assembler via the .cfi_def_cfa
686 directive. */
687 if (loc.offset < 0)
688 cfi->dw_cfi_opc = DW_CFA_def_cfa_sf;
689 else
690 cfi->dw_cfi_opc = DW_CFA_def_cfa;
691 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = loc.reg;
692 cfi->dw_cfi_oprnd2.dw_cfi_offset = loc.offset;
693 }
694 else
695 {
696 /* Construct a DW_CFA_def_cfa_expression instruction to
697 calculate the CFA using a full location expression since no
698 register-offset pair is available. */
699 struct dw_loc_descr_struct *loc_list;
700
701 cfi->dw_cfi_opc = DW_CFA_def_cfa_expression;
702 loc_list = build_cfa_loc (&loc, 0);
703 cfi->dw_cfi_oprnd1.dw_cfi_loc = loc_list;
704 }
705
706 add_fde_cfi (label, cfi);
707 }
708
709 /* Add the CFI for saving a register. REG is the CFA column number.
710 LABEL is passed to add_fde_cfi.
711 If SREG is -1, the register is saved at OFFSET from the CFA;
712 otherwise it is saved in SREG. */
713
714 static void
715 reg_save (const char *label, unsigned int reg, unsigned int sreg,
716 HOST_WIDE_INT offset)
717 {
718 dw_cfi_ref cfi = new_cfi ();
719 dw_fde_ref fde = current_fde ();
720
721 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = reg;
722
723 /* When stack is aligned, store REG using DW_CFA_expression with FP. */
724 if (fde
725 && fde->stack_realign
726 && sreg == INVALID_REGNUM)
727 {
728 cfi->dw_cfi_opc = DW_CFA_expression;
729 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = reg;
730 cfi->dw_cfi_oprnd2.dw_cfi_loc
731 = build_cfa_aligned_loc (&cfa, offset, fde->stack_realignment);
732 }
733 else if (sreg == INVALID_REGNUM)
734 {
735 if (need_data_align_sf_opcode (offset))
736 cfi->dw_cfi_opc = DW_CFA_offset_extended_sf;
737 else if (reg & ~0x3f)
738 cfi->dw_cfi_opc = DW_CFA_offset_extended;
739 else
740 cfi->dw_cfi_opc = DW_CFA_offset;
741 cfi->dw_cfi_oprnd2.dw_cfi_offset = offset;
742 }
743 else if (sreg == reg)
744 cfi->dw_cfi_opc = DW_CFA_same_value;
745 else
746 {
747 cfi->dw_cfi_opc = DW_CFA_register;
748 cfi->dw_cfi_oprnd2.dw_cfi_reg_num = sreg;
749 }
750
751 add_fde_cfi (label, cfi);
752 }
753
754 /* Record the initial position of the return address. RTL is
755 INCOMING_RETURN_ADDR_RTX. */
756
757 static void
758 initial_return_save (rtx rtl)
759 {
760 unsigned int reg = INVALID_REGNUM;
761 HOST_WIDE_INT offset = 0;
762
763 switch (GET_CODE (rtl))
764 {
765 case REG:
766 /* RA is in a register. */
767 reg = DWARF_FRAME_REGNUM (REGNO (rtl));
768 break;
769
770 case MEM:
771 /* RA is on the stack. */
772 rtl = XEXP (rtl, 0);
773 switch (GET_CODE (rtl))
774 {
775 case REG:
776 gcc_assert (REGNO (rtl) == STACK_POINTER_REGNUM);
777 offset = 0;
778 break;
779
780 case PLUS:
781 gcc_assert (REGNO (XEXP (rtl, 0)) == STACK_POINTER_REGNUM);
782 offset = INTVAL (XEXP (rtl, 1));
783 break;
784
785 case MINUS:
786 gcc_assert (REGNO (XEXP (rtl, 0)) == STACK_POINTER_REGNUM);
787 offset = -INTVAL (XEXP (rtl, 1));
788 break;
789
790 default:
791 gcc_unreachable ();
792 }
793
794 break;
795
796 case PLUS:
797 /* The return address is at some offset from any value we can
798 actually load. For instance, on the SPARC it is in %i7+8. Just
799 ignore the offset for now; it doesn't matter for unwinding frames. */
800 gcc_assert (CONST_INT_P (XEXP (rtl, 1)));
801 initial_return_save (XEXP (rtl, 0));
802 return;
803
804 default:
805 gcc_unreachable ();
806 }
807
808 if (reg != DWARF_FRAME_RETURN_COLUMN)
809 reg_save (NULL, DWARF_FRAME_RETURN_COLUMN, reg, offset - cfa.offset);
810 }
811
812 /* Given a SET, calculate the amount of stack adjustment it
813 contains. */
814
815 static HOST_WIDE_INT
816 stack_adjust_offset (const_rtx pattern, HOST_WIDE_INT cur_args_size,
817 HOST_WIDE_INT cur_offset)
818 {
819 const_rtx src = SET_SRC (pattern);
820 const_rtx dest = SET_DEST (pattern);
821 HOST_WIDE_INT offset = 0;
822 enum rtx_code code;
823
824 if (dest == stack_pointer_rtx)
825 {
826 code = GET_CODE (src);
827
828 /* Assume (set (reg sp) (reg whatever)) sets args_size
829 level to 0. */
830 if (code == REG && src != stack_pointer_rtx)
831 {
832 offset = -cur_args_size;
833 #ifndef STACK_GROWS_DOWNWARD
834 offset = -offset;
835 #endif
836 return offset - cur_offset;
837 }
838
839 if (! (code == PLUS || code == MINUS)
840 || XEXP (src, 0) != stack_pointer_rtx
841 || !CONST_INT_P (XEXP (src, 1)))
842 return 0;
843
844 /* (set (reg sp) (plus (reg sp) (const_int))) */
845 offset = INTVAL (XEXP (src, 1));
846 if (code == PLUS)
847 offset = -offset;
848 return offset;
849 }
850
851 if (MEM_P (src) && !MEM_P (dest))
852 dest = src;
853 if (MEM_P (dest))
854 {
855 /* (set (mem (pre_dec (reg sp))) (foo)) */
856 src = XEXP (dest, 0);
857 code = GET_CODE (src);
858
859 switch (code)
860 {
861 case PRE_MODIFY:
862 case POST_MODIFY:
863 if (XEXP (src, 0) == stack_pointer_rtx)
864 {
865 rtx val = XEXP (XEXP (src, 1), 1);
866 /* We handle only adjustments by constant amount. */
867 gcc_assert (GET_CODE (XEXP (src, 1)) == PLUS
868 && CONST_INT_P (val));
869 offset = -INTVAL (val);
870 break;
871 }
872 return 0;
873
874 case PRE_DEC:
875 case POST_DEC:
876 if (XEXP (src, 0) == stack_pointer_rtx)
877 {
878 offset = GET_MODE_SIZE (GET_MODE (dest));
879 break;
880 }
881 return 0;
882
883 case PRE_INC:
884 case POST_INC:
885 if (XEXP (src, 0) == stack_pointer_rtx)
886 {
887 offset = -GET_MODE_SIZE (GET_MODE (dest));
888 break;
889 }
890 return 0;
891
892 default:
893 return 0;
894 }
895 }
896 else
897 return 0;
898
899 return offset;
900 }
901
902 /* Precomputed args_size for CODE_LABELs and BARRIERs preceeding them,
903 indexed by INSN_UID. */
904
905 static HOST_WIDE_INT *barrier_args_size;
906
907 /* Helper function for compute_barrier_args_size. Handle one insn. */
908
909 static HOST_WIDE_INT
910 compute_barrier_args_size_1 (rtx insn, HOST_WIDE_INT cur_args_size,
911 VEC (rtx, heap) **next)
912 {
913 HOST_WIDE_INT offset = 0;
914 int i;
915
916 if (! RTX_FRAME_RELATED_P (insn))
917 {
918 if (prologue_epilogue_contains (insn))
919 /* Nothing */;
920 else if (GET_CODE (PATTERN (insn)) == SET)
921 offset = stack_adjust_offset (PATTERN (insn), cur_args_size, 0);
922 else if (GET_CODE (PATTERN (insn)) == PARALLEL
923 || GET_CODE (PATTERN (insn)) == SEQUENCE)
924 {
925 /* There may be stack adjustments inside compound insns. Search
926 for them. */
927 for (i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--)
928 if (GET_CODE (XVECEXP (PATTERN (insn), 0, i)) == SET)
929 offset += stack_adjust_offset (XVECEXP (PATTERN (insn), 0, i),
930 cur_args_size, offset);
931 }
932 }
933 else
934 {
935 rtx expr = find_reg_note (insn, REG_FRAME_RELATED_EXPR, NULL_RTX);
936
937 if (expr)
938 {
939 expr = XEXP (expr, 0);
940 if (GET_CODE (expr) == PARALLEL
941 || GET_CODE (expr) == SEQUENCE)
942 for (i = 1; i < XVECLEN (expr, 0); i++)
943 {
944 rtx elem = XVECEXP (expr, 0, i);
945
946 if (GET_CODE (elem) == SET && !RTX_FRAME_RELATED_P (elem))
947 offset += stack_adjust_offset (elem, cur_args_size, offset);
948 }
949 }
950 }
951
952 #ifndef STACK_GROWS_DOWNWARD
953 offset = -offset;
954 #endif
955
956 cur_args_size += offset;
957 if (cur_args_size < 0)
958 cur_args_size = 0;
959
960 if (JUMP_P (insn))
961 {
962 rtx dest = JUMP_LABEL (insn);
963
964 if (dest)
965 {
966 if (barrier_args_size [INSN_UID (dest)] < 0)
967 {
968 barrier_args_size [INSN_UID (dest)] = cur_args_size;
969 VEC_safe_push (rtx, heap, *next, dest);
970 }
971 }
972 }
973
974 return cur_args_size;
975 }
976
977 /* Walk the whole function and compute args_size on BARRIERs. */
978
979 static void
980 compute_barrier_args_size (void)
981 {
982 int max_uid = get_max_uid (), i;
983 rtx insn;
984 VEC (rtx, heap) *worklist, *next, *tmp;
985
986 barrier_args_size = XNEWVEC (HOST_WIDE_INT, max_uid);
987 for (i = 0; i < max_uid; i++)
988 barrier_args_size[i] = -1;
989
990 worklist = VEC_alloc (rtx, heap, 20);
991 next = VEC_alloc (rtx, heap, 20);
992 insn = get_insns ();
993 barrier_args_size[INSN_UID (insn)] = 0;
994 VEC_quick_push (rtx, worklist, insn);
995 for (;;)
996 {
997 while (!VEC_empty (rtx, worklist))
998 {
999 rtx prev, body, first_insn;
1000 HOST_WIDE_INT cur_args_size;
1001
1002 first_insn = insn = VEC_pop (rtx, worklist);
1003 cur_args_size = barrier_args_size[INSN_UID (insn)];
1004 prev = prev_nonnote_insn (insn);
1005 if (prev && BARRIER_P (prev))
1006 barrier_args_size[INSN_UID (prev)] = cur_args_size;
1007
1008 for (; insn; insn = NEXT_INSN (insn))
1009 {
1010 if (INSN_DELETED_P (insn) || NOTE_P (insn))
1011 continue;
1012 if (BARRIER_P (insn))
1013 break;
1014
1015 if (LABEL_P (insn))
1016 {
1017 if (insn == first_insn)
1018 continue;
1019 else if (barrier_args_size[INSN_UID (insn)] < 0)
1020 {
1021 barrier_args_size[INSN_UID (insn)] = cur_args_size;
1022 continue;
1023 }
1024 else
1025 {
1026 /* The insns starting with this label have been
1027 already scanned or are in the worklist. */
1028 break;
1029 }
1030 }
1031
1032 body = PATTERN (insn);
1033 if (GET_CODE (body) == SEQUENCE)
1034 {
1035 HOST_WIDE_INT dest_args_size = cur_args_size;
1036 for (i = 1; i < XVECLEN (body, 0); i++)
1037 if (INSN_ANNULLED_BRANCH_P (XVECEXP (body, 0, 0))
1038 && INSN_FROM_TARGET_P (XVECEXP (body, 0, i)))
1039 dest_args_size
1040 = compute_barrier_args_size_1 (XVECEXP (body, 0, i),
1041 dest_args_size, &next);
1042 else
1043 cur_args_size
1044 = compute_barrier_args_size_1 (XVECEXP (body, 0, i),
1045 cur_args_size, &next);
1046
1047 if (INSN_ANNULLED_BRANCH_P (XVECEXP (body, 0, 0)))
1048 compute_barrier_args_size_1 (XVECEXP (body, 0, 0),
1049 dest_args_size, &next);
1050 else
1051 cur_args_size
1052 = compute_barrier_args_size_1 (XVECEXP (body, 0, 0),
1053 cur_args_size, &next);
1054 }
1055 else
1056 cur_args_size
1057 = compute_barrier_args_size_1 (insn, cur_args_size, &next);
1058 }
1059 }
1060
1061 if (VEC_empty (rtx, next))
1062 break;
1063
1064 /* Swap WORKLIST with NEXT and truncate NEXT for next iteration. */
1065 tmp = next;
1066 next = worklist;
1067 worklist = tmp;
1068 VEC_truncate (rtx, next, 0);
1069 }
1070
1071 VEC_free (rtx, heap, worklist);
1072 VEC_free (rtx, heap, next);
1073 }
1074
1075 /* Add a CFI to update the running total of the size of arguments
1076 pushed onto the stack. */
1077
1078 static void
1079 dwarf2out_args_size (const char *label, HOST_WIDE_INT size)
1080 {
1081 dw_cfi_ref cfi;
1082
1083 if (size == old_args_size)
1084 return;
1085
1086 old_args_size = size;
1087
1088 cfi = new_cfi ();
1089 cfi->dw_cfi_opc = DW_CFA_GNU_args_size;
1090 cfi->dw_cfi_oprnd1.dw_cfi_offset = size;
1091 add_fde_cfi (label, cfi);
1092 }
1093
1094 /* Record a stack adjustment of OFFSET bytes. */
1095
1096 static void
1097 dwarf2out_stack_adjust (HOST_WIDE_INT offset, const char *label)
1098 {
1099 if (cfa.reg == STACK_POINTER_REGNUM)
1100 cfa.offset += offset;
1101
1102 if (cfa_store.reg == STACK_POINTER_REGNUM)
1103 cfa_store.offset += offset;
1104
1105 if (ACCUMULATE_OUTGOING_ARGS)
1106 return;
1107
1108 #ifndef STACK_GROWS_DOWNWARD
1109 offset = -offset;
1110 #endif
1111
1112 args_size += offset;
1113 if (args_size < 0)
1114 args_size = 0;
1115
1116 def_cfa_1 (label, &cfa);
1117 if (flag_asynchronous_unwind_tables)
1118 dwarf2out_args_size (label, args_size);
1119 }
1120
1121 /* Check INSN to see if it looks like a push or a stack adjustment, and
1122 make a note of it if it does. EH uses this information to find out
1123 how much extra space it needs to pop off the stack. */
1124
1125 static void
1126 dwarf2out_notice_stack_adjust (rtx insn, bool after_p)
1127 {
1128 HOST_WIDE_INT offset;
1129 const char *label;
1130 int i;
1131
1132 /* Don't handle epilogues at all. Certainly it would be wrong to do so
1133 with this function. Proper support would require all frame-related
1134 insns to be marked, and to be able to handle saving state around
1135 epilogues textually in the middle of the function. */
1136 if (prologue_epilogue_contains (insn))
1137 return;
1138
1139 /* If INSN is an instruction from target of an annulled branch, the
1140 effects are for the target only and so current argument size
1141 shouldn't change at all. */
1142 if (final_sequence
1143 && INSN_ANNULLED_BRANCH_P (XVECEXP (final_sequence, 0, 0))
1144 && INSN_FROM_TARGET_P (insn))
1145 return;
1146
1147 /* If only calls can throw, and we have a frame pointer,
1148 save up adjustments until we see the CALL_INSN. */
1149 if (!flag_asynchronous_unwind_tables && cfa.reg != STACK_POINTER_REGNUM)
1150 {
1151 if (CALL_P (insn) && !after_p)
1152 {
1153 /* Extract the size of the args from the CALL rtx itself. */
1154 insn = PATTERN (insn);
1155 if (GET_CODE (insn) == PARALLEL)
1156 insn = XVECEXP (insn, 0, 0);
1157 if (GET_CODE (insn) == SET)
1158 insn = SET_SRC (insn);
1159 gcc_assert (GET_CODE (insn) == CALL);
1160 dwarf2out_args_size ("", INTVAL (XEXP (insn, 1)));
1161 }
1162 return;
1163 }
1164
1165 if (CALL_P (insn) && !after_p)
1166 {
1167 if (!flag_asynchronous_unwind_tables)
1168 dwarf2out_args_size ("", args_size);
1169 return;
1170 }
1171 else if (BARRIER_P (insn))
1172 {
1173 /* Don't call compute_barrier_args_size () if the only
1174 BARRIER is at the end of function. */
1175 if (barrier_args_size == NULL && next_nonnote_insn (insn))
1176 compute_barrier_args_size ();
1177 if (barrier_args_size == NULL)
1178 offset = 0;
1179 else
1180 {
1181 offset = barrier_args_size[INSN_UID (insn)];
1182 if (offset < 0)
1183 offset = 0;
1184 }
1185
1186 offset -= args_size;
1187 #ifndef STACK_GROWS_DOWNWARD
1188 offset = -offset;
1189 #endif
1190 }
1191 else if (GET_CODE (PATTERN (insn)) == SET)
1192 offset = stack_adjust_offset (PATTERN (insn), args_size, 0);
1193 else if (GET_CODE (PATTERN (insn)) == PARALLEL
1194 || GET_CODE (PATTERN (insn)) == SEQUENCE)
1195 {
1196 /* There may be stack adjustments inside compound insns. Search
1197 for them. */
1198 for (offset = 0, i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--)
1199 if (GET_CODE (XVECEXP (PATTERN (insn), 0, i)) == SET)
1200 offset += stack_adjust_offset (XVECEXP (PATTERN (insn), 0, i),
1201 args_size, offset);
1202 }
1203 else
1204 return;
1205
1206 if (offset == 0)
1207 return;
1208
1209 label = dwarf2out_cfi_label (false);
1210 dwarf2out_stack_adjust (offset, label);
1211 }
1212
1213 /* We delay emitting a register save until either (a) we reach the end
1214 of the prologue or (b) the register is clobbered. This clusters
1215 register saves so that there are fewer pc advances. */
1216
1217 struct GTY(()) queued_reg_save {
1218 struct queued_reg_save *next;
1219 rtx reg;
1220 HOST_WIDE_INT cfa_offset;
1221 rtx saved_reg;
1222 };
1223
1224 static GTY(()) struct queued_reg_save *queued_reg_saves;
1225
1226 /* The caller's ORIG_REG is saved in SAVED_IN_REG. */
1227 typedef struct GTY(()) reg_saved_in_data {
1228 rtx orig_reg;
1229 rtx saved_in_reg;
1230 } reg_saved_in_data;
1231
1232 DEF_VEC_O (reg_saved_in_data);
1233 DEF_VEC_ALLOC_O (reg_saved_in_data, gc);
1234
1235 /* A set of registers saved in other registers. This is implemented as
1236 a flat array because it normally contains zero or 1 entry, depending
1237 on the target. IA-64 is the big spender here, using a maximum of
1238 5 entries. */
1239 static GTY(()) VEC(reg_saved_in_data, gc) *regs_saved_in_regs;
1240
1241 /* Compare X and Y for equivalence. The inputs may be REGs or PC_RTX. */
1242
1243 static bool
1244 compare_reg_or_pc (rtx x, rtx y)
1245 {
1246 if (REG_P (x) && REG_P (y))
1247 return REGNO (x) == REGNO (y);
1248 return x == y;
1249 }
1250
1251 /* Record SRC as being saved in DEST. DEST may be null to delete an
1252 existing entry. SRC may be a register or PC_RTX. */
1253
1254 static void
1255 record_reg_saved_in_reg (rtx dest, rtx src)
1256 {
1257 reg_saved_in_data *elt;
1258 size_t i;
1259
1260 FOR_EACH_VEC_ELT (reg_saved_in_data, regs_saved_in_regs, i, elt)
1261 if (compare_reg_or_pc (elt->orig_reg, src))
1262 {
1263 if (dest == NULL)
1264 VEC_unordered_remove(reg_saved_in_data, regs_saved_in_regs, i);
1265 else
1266 elt->saved_in_reg = dest;
1267 return;
1268 }
1269
1270 if (dest == NULL)
1271 return;
1272
1273 elt = VEC_safe_push(reg_saved_in_data, gc, regs_saved_in_regs, NULL);
1274 elt->orig_reg = src;
1275 elt->saved_in_reg = dest;
1276 }
1277
1278 static const char *last_reg_save_label;
1279
1280 /* Add an entry to QUEUED_REG_SAVES saying that REG is now saved at
1281 SREG, or if SREG is NULL then it is saved at OFFSET to the CFA. */
1282
1283 static void
1284 queue_reg_save (const char *label, rtx reg, rtx sreg, HOST_WIDE_INT offset)
1285 {
1286 struct queued_reg_save *q;
1287
1288 /* Duplicates waste space, but it's also necessary to remove them
1289 for correctness, since the queue gets output in reverse
1290 order. */
1291 for (q = queued_reg_saves; q != NULL; q = q->next)
1292 if (REGNO (q->reg) == REGNO (reg))
1293 break;
1294
1295 if (q == NULL)
1296 {
1297 q = ggc_alloc_queued_reg_save ();
1298 q->next = queued_reg_saves;
1299 queued_reg_saves = q;
1300 }
1301
1302 q->reg = reg;
1303 q->cfa_offset = offset;
1304 q->saved_reg = sreg;
1305
1306 last_reg_save_label = label;
1307 }
1308
1309 /* Output all the entries in QUEUED_REG_SAVES. */
1310
1311 static void
1312 dwarf2out_flush_queued_reg_saves (void)
1313 {
1314 struct queued_reg_save *q;
1315
1316 for (q = queued_reg_saves; q; q = q->next)
1317 {
1318 unsigned int reg, sreg;
1319
1320 record_reg_saved_in_reg (q->saved_reg, q->reg);
1321
1322 reg = DWARF_FRAME_REGNUM (REGNO (q->reg));
1323 if (q->saved_reg)
1324 sreg = DWARF_FRAME_REGNUM (REGNO (q->saved_reg));
1325 else
1326 sreg = INVALID_REGNUM;
1327 reg_save (last_reg_save_label, reg, sreg, q->cfa_offset);
1328 }
1329
1330 queued_reg_saves = NULL;
1331 last_reg_save_label = NULL;
1332 }
1333
1334 /* Does INSN clobber any register which QUEUED_REG_SAVES lists a saved
1335 location for? Or, does it clobber a register which we've previously
1336 said that some other register is saved in, and for which we now
1337 have a new location for? */
1338
1339 static bool
1340 clobbers_queued_reg_save (const_rtx insn)
1341 {
1342 struct queued_reg_save *q;
1343
1344 for (q = queued_reg_saves; q; q = q->next)
1345 {
1346 size_t i;
1347 reg_saved_in_data *rir;
1348
1349 if (modified_in_p (q->reg, insn))
1350 return true;
1351
1352 FOR_EACH_VEC_ELT (reg_saved_in_data, regs_saved_in_regs, i, rir)
1353 if (compare_reg_or_pc (q->reg, rir->orig_reg)
1354 && modified_in_p (rir->saved_in_reg, insn))
1355 return true;
1356 }
1357
1358 return false;
1359 }
1360
1361 /* What register, if any, is currently saved in REG? */
1362
1363 static rtx
1364 reg_saved_in (rtx reg)
1365 {
1366 unsigned int regn = REGNO (reg);
1367 struct queued_reg_save *q;
1368 reg_saved_in_data *rir;
1369 size_t i;
1370
1371 for (q = queued_reg_saves; q; q = q->next)
1372 if (q->saved_reg && regn == REGNO (q->saved_reg))
1373 return q->reg;
1374
1375 FOR_EACH_VEC_ELT (reg_saved_in_data, regs_saved_in_regs, i, rir)
1376 if (regn == REGNO (rir->saved_in_reg))
1377 return rir->orig_reg;
1378
1379 return NULL_RTX;
1380 }
1381
1382
1383 /* A temporary register holding an integral value used in adjusting SP
1384 or setting up the store_reg. The "offset" field holds the integer
1385 value, not an offset. */
1386 static dw_cfa_location cfa_temp;
1387
1388 /* A subroutine of dwarf2out_frame_debug, process a REG_DEF_CFA note. */
1389
1390 static void
1391 dwarf2out_frame_debug_def_cfa (rtx pat, const char *label)
1392 {
1393 memset (&cfa, 0, sizeof (cfa));
1394
1395 switch (GET_CODE (pat))
1396 {
1397 case PLUS:
1398 cfa.reg = REGNO (XEXP (pat, 0));
1399 cfa.offset = INTVAL (XEXP (pat, 1));
1400 break;
1401
1402 case REG:
1403 cfa.reg = REGNO (pat);
1404 break;
1405
1406 case MEM:
1407 cfa.indirect = 1;
1408 pat = XEXP (pat, 0);
1409 if (GET_CODE (pat) == PLUS)
1410 {
1411 cfa.base_offset = INTVAL (XEXP (pat, 1));
1412 pat = XEXP (pat, 0);
1413 }
1414 cfa.reg = REGNO (pat);
1415 break;
1416
1417 default:
1418 /* Recurse and define an expression. */
1419 gcc_unreachable ();
1420 }
1421
1422 def_cfa_1 (label, &cfa);
1423 }
1424
1425 /* A subroutine of dwarf2out_frame_debug, process a REG_ADJUST_CFA note. */
1426
1427 static void
1428 dwarf2out_frame_debug_adjust_cfa (rtx pat, const char *label)
1429 {
1430 rtx src, dest;
1431
1432 gcc_assert (GET_CODE (pat) == SET);
1433 dest = XEXP (pat, 0);
1434 src = XEXP (pat, 1);
1435
1436 switch (GET_CODE (src))
1437 {
1438 case PLUS:
1439 gcc_assert (REGNO (XEXP (src, 0)) == cfa.reg);
1440 cfa.offset -= INTVAL (XEXP (src, 1));
1441 break;
1442
1443 case REG:
1444 break;
1445
1446 default:
1447 gcc_unreachable ();
1448 }
1449
1450 cfa.reg = REGNO (dest);
1451 gcc_assert (cfa.indirect == 0);
1452
1453 def_cfa_1 (label, &cfa);
1454 }
1455
1456 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_OFFSET note. */
1457
1458 static void
1459 dwarf2out_frame_debug_cfa_offset (rtx set, const char *label)
1460 {
1461 HOST_WIDE_INT offset;
1462 rtx src, addr, span;
1463 unsigned int sregno;
1464
1465 src = XEXP (set, 1);
1466 addr = XEXP (set, 0);
1467 gcc_assert (MEM_P (addr));
1468 addr = XEXP (addr, 0);
1469
1470 /* As documented, only consider extremely simple addresses. */
1471 switch (GET_CODE (addr))
1472 {
1473 case REG:
1474 gcc_assert (REGNO (addr) == cfa.reg);
1475 offset = -cfa.offset;
1476 break;
1477 case PLUS:
1478 gcc_assert (REGNO (XEXP (addr, 0)) == cfa.reg);
1479 offset = INTVAL (XEXP (addr, 1)) - cfa.offset;
1480 break;
1481 default:
1482 gcc_unreachable ();
1483 }
1484
1485 if (src == pc_rtx)
1486 {
1487 span = NULL;
1488 sregno = DWARF_FRAME_RETURN_COLUMN;
1489 }
1490 else
1491 {
1492 span = targetm.dwarf_register_span (src);
1493 sregno = DWARF_FRAME_REGNUM (REGNO (src));
1494 }
1495
1496 /* ??? We'd like to use queue_reg_save, but we need to come up with
1497 a different flushing heuristic for epilogues. */
1498 if (!span)
1499 reg_save (label, sregno, INVALID_REGNUM, offset);
1500 else
1501 {
1502 /* We have a PARALLEL describing where the contents of SRC live.
1503 Queue register saves for each piece of the PARALLEL. */
1504 int par_index;
1505 int limit;
1506 HOST_WIDE_INT span_offset = offset;
1507
1508 gcc_assert (GET_CODE (span) == PARALLEL);
1509
1510 limit = XVECLEN (span, 0);
1511 for (par_index = 0; par_index < limit; par_index++)
1512 {
1513 rtx elem = XVECEXP (span, 0, par_index);
1514
1515 sregno = DWARF_FRAME_REGNUM (REGNO (src));
1516 reg_save (label, sregno, INVALID_REGNUM, span_offset);
1517 span_offset += GET_MODE_SIZE (GET_MODE (elem));
1518 }
1519 }
1520 }
1521
1522 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_REGISTER note. */
1523
1524 static void
1525 dwarf2out_frame_debug_cfa_register (rtx set, const char *label)
1526 {
1527 rtx src, dest;
1528 unsigned sregno, dregno;
1529
1530 src = XEXP (set, 1);
1531 dest = XEXP (set, 0);
1532
1533 if (src == pc_rtx)
1534 sregno = DWARF_FRAME_RETURN_COLUMN;
1535 else
1536 {
1537 record_reg_saved_in_reg (dest, src);
1538 sregno = DWARF_FRAME_REGNUM (REGNO (src));
1539 }
1540
1541 dregno = DWARF_FRAME_REGNUM (REGNO (dest));
1542
1543 /* ??? We'd like to use queue_reg_save, but we need to come up with
1544 a different flushing heuristic for epilogues. */
1545 reg_save (label, sregno, dregno, 0);
1546 }
1547
1548 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_EXPRESSION note. */
1549
1550 static void
1551 dwarf2out_frame_debug_cfa_expression (rtx set, const char *label)
1552 {
1553 rtx src, dest, span;
1554 dw_cfi_ref cfi = new_cfi ();
1555
1556 dest = SET_DEST (set);
1557 src = SET_SRC (set);
1558
1559 gcc_assert (REG_P (src));
1560 gcc_assert (MEM_P (dest));
1561
1562 span = targetm.dwarf_register_span (src);
1563 gcc_assert (!span);
1564
1565 cfi->dw_cfi_opc = DW_CFA_expression;
1566 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = DWARF_FRAME_REGNUM (REGNO (src));
1567 cfi->dw_cfi_oprnd2.dw_cfi_loc
1568 = mem_loc_descriptor (XEXP (dest, 0), get_address_mode (dest),
1569 GET_MODE (dest), VAR_INIT_STATUS_INITIALIZED);
1570
1571 /* ??? We'd like to use queue_reg_save, were the interface different,
1572 and, as above, we could manage flushing for epilogues. */
1573 add_fde_cfi (label, cfi);
1574 }
1575
1576 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_RESTORE note. */
1577
1578 static void
1579 dwarf2out_frame_debug_cfa_restore (rtx reg, const char *label)
1580 {
1581 dw_cfi_ref cfi = new_cfi ();
1582 unsigned int regno = DWARF_FRAME_REGNUM (REGNO (reg));
1583
1584 cfi->dw_cfi_opc = (regno & ~0x3f ? DW_CFA_restore_extended : DW_CFA_restore);
1585 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = regno;
1586
1587 add_fde_cfi (label, cfi);
1588 }
1589
1590 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_WINDOW_SAVE.
1591 ??? Perhaps we should note in the CIE where windows are saved (instead of
1592 assuming 0(cfa)) and what registers are in the window. */
1593
1594 static void
1595 dwarf2out_frame_debug_cfa_window_save (const char *label)
1596 {
1597 dw_cfi_ref cfi = new_cfi ();
1598
1599 cfi->dw_cfi_opc = DW_CFA_GNU_window_save;
1600 add_fde_cfi (label, cfi);
1601 }
1602
1603 /* Record call frame debugging information for an expression EXPR,
1604 which either sets SP or FP (adjusting how we calculate the frame
1605 address) or saves a register to the stack or another register.
1606 LABEL indicates the address of EXPR.
1607
1608 This function encodes a state machine mapping rtxes to actions on
1609 cfa, cfa_store, and cfa_temp.reg. We describe these rules so
1610 users need not read the source code.
1611
1612 The High-Level Picture
1613
1614 Changes in the register we use to calculate the CFA: Currently we
1615 assume that if you copy the CFA register into another register, we
1616 should take the other one as the new CFA register; this seems to
1617 work pretty well. If it's wrong for some target, it's simple
1618 enough not to set RTX_FRAME_RELATED_P on the insn in question.
1619
1620 Changes in the register we use for saving registers to the stack:
1621 This is usually SP, but not always. Again, we deduce that if you
1622 copy SP into another register (and SP is not the CFA register),
1623 then the new register is the one we will be using for register
1624 saves. This also seems to work.
1625
1626 Register saves: There's not much guesswork about this one; if
1627 RTX_FRAME_RELATED_P is set on an insn which modifies memory, it's a
1628 register save, and the register used to calculate the destination
1629 had better be the one we think we're using for this purpose.
1630 It's also assumed that a copy from a call-saved register to another
1631 register is saving that register if RTX_FRAME_RELATED_P is set on
1632 that instruction. If the copy is from a call-saved register to
1633 the *same* register, that means that the register is now the same
1634 value as in the caller.
1635
1636 Except: If the register being saved is the CFA register, and the
1637 offset is nonzero, we are saving the CFA, so we assume we have to
1638 use DW_CFA_def_cfa_expression. If the offset is 0, we assume that
1639 the intent is to save the value of SP from the previous frame.
1640
1641 In addition, if a register has previously been saved to a different
1642 register,
1643
1644 Invariants / Summaries of Rules
1645
1646 cfa current rule for calculating the CFA. It usually
1647 consists of a register and an offset.
1648 cfa_store register used by prologue code to save things to the stack
1649 cfa_store.offset is the offset from the value of
1650 cfa_store.reg to the actual CFA
1651 cfa_temp register holding an integral value. cfa_temp.offset
1652 stores the value, which will be used to adjust the
1653 stack pointer. cfa_temp is also used like cfa_store,
1654 to track stores to the stack via fp or a temp reg.
1655
1656 Rules 1- 4: Setting a register's value to cfa.reg or an expression
1657 with cfa.reg as the first operand changes the cfa.reg and its
1658 cfa.offset. Rule 1 and 4 also set cfa_temp.reg and
1659 cfa_temp.offset.
1660
1661 Rules 6- 9: Set a non-cfa.reg register value to a constant or an
1662 expression yielding a constant. This sets cfa_temp.reg
1663 and cfa_temp.offset.
1664
1665 Rule 5: Create a new register cfa_store used to save items to the
1666 stack.
1667
1668 Rules 10-14: Save a register to the stack. Define offset as the
1669 difference of the original location and cfa_store's
1670 location (or cfa_temp's location if cfa_temp is used).
1671
1672 Rules 16-20: If AND operation happens on sp in prologue, we assume
1673 stack is realigned. We will use a group of DW_OP_XXX
1674 expressions to represent the location of the stored
1675 register instead of CFA+offset.
1676
1677 The Rules
1678
1679 "{a,b}" indicates a choice of a xor b.
1680 "<reg>:cfa.reg" indicates that <reg> must equal cfa.reg.
1681
1682 Rule 1:
1683 (set <reg1> <reg2>:cfa.reg)
1684 effects: cfa.reg = <reg1>
1685 cfa.offset unchanged
1686 cfa_temp.reg = <reg1>
1687 cfa_temp.offset = cfa.offset
1688
1689 Rule 2:
1690 (set sp ({minus,plus,losum} {sp,fp}:cfa.reg
1691 {<const_int>,<reg>:cfa_temp.reg}))
1692 effects: cfa.reg = sp if fp used
1693 cfa.offset += {+/- <const_int>, cfa_temp.offset} if cfa.reg==sp
1694 cfa_store.offset += {+/- <const_int>, cfa_temp.offset}
1695 if cfa_store.reg==sp
1696
1697 Rule 3:
1698 (set fp ({minus,plus,losum} <reg>:cfa.reg <const_int>))
1699 effects: cfa.reg = fp
1700 cfa_offset += +/- <const_int>
1701
1702 Rule 4:
1703 (set <reg1> ({plus,losum} <reg2>:cfa.reg <const_int>))
1704 constraints: <reg1> != fp
1705 <reg1> != sp
1706 effects: cfa.reg = <reg1>
1707 cfa_temp.reg = <reg1>
1708 cfa_temp.offset = cfa.offset
1709
1710 Rule 5:
1711 (set <reg1> (plus <reg2>:cfa_temp.reg sp:cfa.reg))
1712 constraints: <reg1> != fp
1713 <reg1> != sp
1714 effects: cfa_store.reg = <reg1>
1715 cfa_store.offset = cfa.offset - cfa_temp.offset
1716
1717 Rule 6:
1718 (set <reg> <const_int>)
1719 effects: cfa_temp.reg = <reg>
1720 cfa_temp.offset = <const_int>
1721
1722 Rule 7:
1723 (set <reg1>:cfa_temp.reg (ior <reg2>:cfa_temp.reg <const_int>))
1724 effects: cfa_temp.reg = <reg1>
1725 cfa_temp.offset |= <const_int>
1726
1727 Rule 8:
1728 (set <reg> (high <exp>))
1729 effects: none
1730
1731 Rule 9:
1732 (set <reg> (lo_sum <exp> <const_int>))
1733 effects: cfa_temp.reg = <reg>
1734 cfa_temp.offset = <const_int>
1735
1736 Rule 10:
1737 (set (mem ({pre,post}_modify sp:cfa_store (???? <reg1> <const_int>))) <reg2>)
1738 effects: cfa_store.offset -= <const_int>
1739 cfa.offset = cfa_store.offset if cfa.reg == sp
1740 cfa.reg = sp
1741 cfa.base_offset = -cfa_store.offset
1742
1743 Rule 11:
1744 (set (mem ({pre_inc,pre_dec,post_dec} sp:cfa_store.reg)) <reg>)
1745 effects: cfa_store.offset += -/+ mode_size(mem)
1746 cfa.offset = cfa_store.offset if cfa.reg == sp
1747 cfa.reg = sp
1748 cfa.base_offset = -cfa_store.offset
1749
1750 Rule 12:
1751 (set (mem ({minus,plus,losum} <reg1>:{cfa_store,cfa_temp} <const_int>))
1752
1753 <reg2>)
1754 effects: cfa.reg = <reg1>
1755 cfa.base_offset = -/+ <const_int> - {cfa_store,cfa_temp}.offset
1756
1757 Rule 13:
1758 (set (mem <reg1>:{cfa_store,cfa_temp}) <reg2>)
1759 effects: cfa.reg = <reg1>
1760 cfa.base_offset = -{cfa_store,cfa_temp}.offset
1761
1762 Rule 14:
1763 (set (mem (post_inc <reg1>:cfa_temp <const_int>)) <reg2>)
1764 effects: cfa.reg = <reg1>
1765 cfa.base_offset = -cfa_temp.offset
1766 cfa_temp.offset -= mode_size(mem)
1767
1768 Rule 15:
1769 (set <reg> {unspec, unspec_volatile})
1770 effects: target-dependent
1771
1772 Rule 16:
1773 (set sp (and: sp <const_int>))
1774 constraints: cfa_store.reg == sp
1775 effects: current_fde.stack_realign = 1
1776 cfa_store.offset = 0
1777 fde->drap_reg = cfa.reg if cfa.reg != sp and cfa.reg != fp
1778
1779 Rule 17:
1780 (set (mem ({pre_inc, pre_dec} sp)) (mem (plus (cfa.reg) (const_int))))
1781 effects: cfa_store.offset += -/+ mode_size(mem)
1782
1783 Rule 18:
1784 (set (mem ({pre_inc, pre_dec} sp)) fp)
1785 constraints: fde->stack_realign == 1
1786 effects: cfa_store.offset = 0
1787 cfa.reg != HARD_FRAME_POINTER_REGNUM
1788
1789 Rule 19:
1790 (set (mem ({pre_inc, pre_dec} sp)) cfa.reg)
1791 constraints: fde->stack_realign == 1
1792 && cfa.offset == 0
1793 && cfa.indirect == 0
1794 && cfa.reg != HARD_FRAME_POINTER_REGNUM
1795 effects: Use DW_CFA_def_cfa_expression to define cfa
1796 cfa.reg == fde->drap_reg */
1797
1798 static void
1799 dwarf2out_frame_debug_expr (rtx expr, const char *label)
1800 {
1801 rtx src, dest, span;
1802 HOST_WIDE_INT offset;
1803 dw_fde_ref fde;
1804
1805 /* If RTX_FRAME_RELATED_P is set on a PARALLEL, process each member of
1806 the PARALLEL independently. The first element is always processed if
1807 it is a SET. This is for backward compatibility. Other elements
1808 are processed only if they are SETs and the RTX_FRAME_RELATED_P
1809 flag is set in them. */
1810 if (GET_CODE (expr) == PARALLEL || GET_CODE (expr) == SEQUENCE)
1811 {
1812 int par_index;
1813 int limit = XVECLEN (expr, 0);
1814 rtx elem;
1815
1816 /* PARALLELs have strict read-modify-write semantics, so we
1817 ought to evaluate every rvalue before changing any lvalue.
1818 It's cumbersome to do that in general, but there's an
1819 easy approximation that is enough for all current users:
1820 handle register saves before register assignments. */
1821 if (GET_CODE (expr) == PARALLEL)
1822 for (par_index = 0; par_index < limit; par_index++)
1823 {
1824 elem = XVECEXP (expr, 0, par_index);
1825 if (GET_CODE (elem) == SET
1826 && MEM_P (SET_DEST (elem))
1827 && (RTX_FRAME_RELATED_P (elem) || par_index == 0))
1828 dwarf2out_frame_debug_expr (elem, label);
1829 }
1830
1831 for (par_index = 0; par_index < limit; par_index++)
1832 {
1833 elem = XVECEXP (expr, 0, par_index);
1834 if (GET_CODE (elem) == SET
1835 && (!MEM_P (SET_DEST (elem)) || GET_CODE (expr) == SEQUENCE)
1836 && (RTX_FRAME_RELATED_P (elem) || par_index == 0))
1837 dwarf2out_frame_debug_expr (elem, label);
1838 else if (GET_CODE (elem) == SET
1839 && par_index != 0
1840 && !RTX_FRAME_RELATED_P (elem))
1841 {
1842 /* Stack adjustment combining might combine some post-prologue
1843 stack adjustment into a prologue stack adjustment. */
1844 HOST_WIDE_INT offset = stack_adjust_offset (elem, args_size, 0);
1845
1846 if (offset != 0)
1847 dwarf2out_stack_adjust (offset, label);
1848 }
1849 }
1850 return;
1851 }
1852
1853 gcc_assert (GET_CODE (expr) == SET);
1854
1855 src = SET_SRC (expr);
1856 dest = SET_DEST (expr);
1857
1858 if (REG_P (src))
1859 {
1860 rtx rsi = reg_saved_in (src);
1861 if (rsi)
1862 src = rsi;
1863 }
1864
1865 fde = current_fde ();
1866
1867 switch (GET_CODE (dest))
1868 {
1869 case REG:
1870 switch (GET_CODE (src))
1871 {
1872 /* Setting FP from SP. */
1873 case REG:
1874 if (cfa.reg == (unsigned) REGNO (src))
1875 {
1876 /* Rule 1 */
1877 /* Update the CFA rule wrt SP or FP. Make sure src is
1878 relative to the current CFA register.
1879
1880 We used to require that dest be either SP or FP, but the
1881 ARM copies SP to a temporary register, and from there to
1882 FP. So we just rely on the backends to only set
1883 RTX_FRAME_RELATED_P on appropriate insns. */
1884 cfa.reg = REGNO (dest);
1885 cfa_temp.reg = cfa.reg;
1886 cfa_temp.offset = cfa.offset;
1887 }
1888 else
1889 {
1890 /* Saving a register in a register. */
1891 gcc_assert (!fixed_regs [REGNO (dest)]
1892 /* For the SPARC and its register window. */
1893 || (DWARF_FRAME_REGNUM (REGNO (src))
1894 == DWARF_FRAME_RETURN_COLUMN));
1895
1896 /* After stack is aligned, we can only save SP in FP
1897 if drap register is used. In this case, we have
1898 to restore stack pointer with the CFA value and we
1899 don't generate this DWARF information. */
1900 if (fde
1901 && fde->stack_realign
1902 && REGNO (src) == STACK_POINTER_REGNUM)
1903 gcc_assert (REGNO (dest) == HARD_FRAME_POINTER_REGNUM
1904 && fde->drap_reg != INVALID_REGNUM
1905 && cfa.reg != REGNO (src));
1906 else
1907 queue_reg_save (label, src, dest, 0);
1908 }
1909 break;
1910
1911 case PLUS:
1912 case MINUS:
1913 case LO_SUM:
1914 if (dest == stack_pointer_rtx)
1915 {
1916 /* Rule 2 */
1917 /* Adjusting SP. */
1918 switch (GET_CODE (XEXP (src, 1)))
1919 {
1920 case CONST_INT:
1921 offset = INTVAL (XEXP (src, 1));
1922 break;
1923 case REG:
1924 gcc_assert ((unsigned) REGNO (XEXP (src, 1))
1925 == cfa_temp.reg);
1926 offset = cfa_temp.offset;
1927 break;
1928 default:
1929 gcc_unreachable ();
1930 }
1931
1932 if (XEXP (src, 0) == hard_frame_pointer_rtx)
1933 {
1934 /* Restoring SP from FP in the epilogue. */
1935 gcc_assert (cfa.reg == (unsigned) HARD_FRAME_POINTER_REGNUM);
1936 cfa.reg = STACK_POINTER_REGNUM;
1937 }
1938 else if (GET_CODE (src) == LO_SUM)
1939 /* Assume we've set the source reg of the LO_SUM from sp. */
1940 ;
1941 else
1942 gcc_assert (XEXP (src, 0) == stack_pointer_rtx);
1943
1944 if (GET_CODE (src) != MINUS)
1945 offset = -offset;
1946 if (cfa.reg == STACK_POINTER_REGNUM)
1947 cfa.offset += offset;
1948 if (cfa_store.reg == STACK_POINTER_REGNUM)
1949 cfa_store.offset += offset;
1950 }
1951 else if (dest == hard_frame_pointer_rtx)
1952 {
1953 /* Rule 3 */
1954 /* Either setting the FP from an offset of the SP,
1955 or adjusting the FP */
1956 gcc_assert (frame_pointer_needed);
1957
1958 gcc_assert (REG_P (XEXP (src, 0))
1959 && (unsigned) REGNO (XEXP (src, 0)) == cfa.reg
1960 && CONST_INT_P (XEXP (src, 1)));
1961 offset = INTVAL (XEXP (src, 1));
1962 if (GET_CODE (src) != MINUS)
1963 offset = -offset;
1964 cfa.offset += offset;
1965 cfa.reg = HARD_FRAME_POINTER_REGNUM;
1966 }
1967 else
1968 {
1969 gcc_assert (GET_CODE (src) != MINUS);
1970
1971 /* Rule 4 */
1972 if (REG_P (XEXP (src, 0))
1973 && REGNO (XEXP (src, 0)) == cfa.reg
1974 && CONST_INT_P (XEXP (src, 1)))
1975 {
1976 /* Setting a temporary CFA register that will be copied
1977 into the FP later on. */
1978 offset = - INTVAL (XEXP (src, 1));
1979 cfa.offset += offset;
1980 cfa.reg = REGNO (dest);
1981 /* Or used to save regs to the stack. */
1982 cfa_temp.reg = cfa.reg;
1983 cfa_temp.offset = cfa.offset;
1984 }
1985
1986 /* Rule 5 */
1987 else if (REG_P (XEXP (src, 0))
1988 && REGNO (XEXP (src, 0)) == cfa_temp.reg
1989 && XEXP (src, 1) == stack_pointer_rtx)
1990 {
1991 /* Setting a scratch register that we will use instead
1992 of SP for saving registers to the stack. */
1993 gcc_assert (cfa.reg == STACK_POINTER_REGNUM);
1994 cfa_store.reg = REGNO (dest);
1995 cfa_store.offset = cfa.offset - cfa_temp.offset;
1996 }
1997
1998 /* Rule 9 */
1999 else if (GET_CODE (src) == LO_SUM
2000 && CONST_INT_P (XEXP (src, 1)))
2001 {
2002 cfa_temp.reg = REGNO (dest);
2003 cfa_temp.offset = INTVAL (XEXP (src, 1));
2004 }
2005 else
2006 gcc_unreachable ();
2007 }
2008 break;
2009
2010 /* Rule 6 */
2011 case CONST_INT:
2012 cfa_temp.reg = REGNO (dest);
2013 cfa_temp.offset = INTVAL (src);
2014 break;
2015
2016 /* Rule 7 */
2017 case IOR:
2018 gcc_assert (REG_P (XEXP (src, 0))
2019 && (unsigned) REGNO (XEXP (src, 0)) == cfa_temp.reg
2020 && CONST_INT_P (XEXP (src, 1)));
2021
2022 if ((unsigned) REGNO (dest) != cfa_temp.reg)
2023 cfa_temp.reg = REGNO (dest);
2024 cfa_temp.offset |= INTVAL (XEXP (src, 1));
2025 break;
2026
2027 /* Skip over HIGH, assuming it will be followed by a LO_SUM,
2028 which will fill in all of the bits. */
2029 /* Rule 8 */
2030 case HIGH:
2031 break;
2032
2033 /* Rule 15 */
2034 case UNSPEC:
2035 case UNSPEC_VOLATILE:
2036 gcc_assert (targetm.dwarf_handle_frame_unspec);
2037 targetm.dwarf_handle_frame_unspec (label, expr, XINT (src, 1));
2038 return;
2039
2040 /* Rule 16 */
2041 case AND:
2042 /* If this AND operation happens on stack pointer in prologue,
2043 we assume the stack is realigned and we extract the
2044 alignment. */
2045 if (fde && XEXP (src, 0) == stack_pointer_rtx)
2046 {
2047 /* We interpret reg_save differently with stack_realign set.
2048 Thus we must flush whatever we have queued first. */
2049 dwarf2out_flush_queued_reg_saves ();
2050
2051 gcc_assert (cfa_store.reg == REGNO (XEXP (src, 0)));
2052 fde->stack_realign = 1;
2053 fde->stack_realignment = INTVAL (XEXP (src, 1));
2054 cfa_store.offset = 0;
2055
2056 if (cfa.reg != STACK_POINTER_REGNUM
2057 && cfa.reg != HARD_FRAME_POINTER_REGNUM)
2058 fde->drap_reg = cfa.reg;
2059 }
2060 return;
2061
2062 default:
2063 gcc_unreachable ();
2064 }
2065
2066 def_cfa_1 (label, &cfa);
2067 break;
2068
2069 case MEM:
2070
2071 /* Saving a register to the stack. Make sure dest is relative to the
2072 CFA register. */
2073 switch (GET_CODE (XEXP (dest, 0)))
2074 {
2075 /* Rule 10 */
2076 /* With a push. */
2077 case PRE_MODIFY:
2078 case POST_MODIFY:
2079 /* We can't handle variable size modifications. */
2080 gcc_assert (GET_CODE (XEXP (XEXP (XEXP (dest, 0), 1), 1))
2081 == CONST_INT);
2082 offset = -INTVAL (XEXP (XEXP (XEXP (dest, 0), 1), 1));
2083
2084 gcc_assert (REGNO (XEXP (XEXP (dest, 0), 0)) == STACK_POINTER_REGNUM
2085 && cfa_store.reg == STACK_POINTER_REGNUM);
2086
2087 cfa_store.offset += offset;
2088 if (cfa.reg == STACK_POINTER_REGNUM)
2089 cfa.offset = cfa_store.offset;
2090
2091 if (GET_CODE (XEXP (dest, 0)) == POST_MODIFY)
2092 offset -= cfa_store.offset;
2093 else
2094 offset = -cfa_store.offset;
2095 break;
2096
2097 /* Rule 11 */
2098 case PRE_INC:
2099 case PRE_DEC:
2100 case POST_DEC:
2101 offset = GET_MODE_SIZE (GET_MODE (dest));
2102 if (GET_CODE (XEXP (dest, 0)) == PRE_INC)
2103 offset = -offset;
2104
2105 gcc_assert ((REGNO (XEXP (XEXP (dest, 0), 0))
2106 == STACK_POINTER_REGNUM)
2107 && cfa_store.reg == STACK_POINTER_REGNUM);
2108
2109 cfa_store.offset += offset;
2110
2111 /* Rule 18: If stack is aligned, we will use FP as a
2112 reference to represent the address of the stored
2113 regiser. */
2114 if (fde
2115 && fde->stack_realign
2116 && src == hard_frame_pointer_rtx)
2117 {
2118 gcc_assert (cfa.reg != HARD_FRAME_POINTER_REGNUM);
2119 cfa_store.offset = 0;
2120 }
2121
2122 if (cfa.reg == STACK_POINTER_REGNUM)
2123 cfa.offset = cfa_store.offset;
2124
2125 if (GET_CODE (XEXP (dest, 0)) == POST_DEC)
2126 offset += -cfa_store.offset;
2127 else
2128 offset = -cfa_store.offset;
2129 break;
2130
2131 /* Rule 12 */
2132 /* With an offset. */
2133 case PLUS:
2134 case MINUS:
2135 case LO_SUM:
2136 {
2137 int regno;
2138
2139 gcc_assert (CONST_INT_P (XEXP (XEXP (dest, 0), 1))
2140 && REG_P (XEXP (XEXP (dest, 0), 0)));
2141 offset = INTVAL (XEXP (XEXP (dest, 0), 1));
2142 if (GET_CODE (XEXP (dest, 0)) == MINUS)
2143 offset = -offset;
2144
2145 regno = REGNO (XEXP (XEXP (dest, 0), 0));
2146
2147 if (cfa.reg == (unsigned) regno)
2148 offset -= cfa.offset;
2149 else if (cfa_store.reg == (unsigned) regno)
2150 offset -= cfa_store.offset;
2151 else
2152 {
2153 gcc_assert (cfa_temp.reg == (unsigned) regno);
2154 offset -= cfa_temp.offset;
2155 }
2156 }
2157 break;
2158
2159 /* Rule 13 */
2160 /* Without an offset. */
2161 case REG:
2162 {
2163 int regno = REGNO (XEXP (dest, 0));
2164
2165 if (cfa.reg == (unsigned) regno)
2166 offset = -cfa.offset;
2167 else if (cfa_store.reg == (unsigned) regno)
2168 offset = -cfa_store.offset;
2169 else
2170 {
2171 gcc_assert (cfa_temp.reg == (unsigned) regno);
2172 offset = -cfa_temp.offset;
2173 }
2174 }
2175 break;
2176
2177 /* Rule 14 */
2178 case POST_INC:
2179 gcc_assert (cfa_temp.reg
2180 == (unsigned) REGNO (XEXP (XEXP (dest, 0), 0)));
2181 offset = -cfa_temp.offset;
2182 cfa_temp.offset -= GET_MODE_SIZE (GET_MODE (dest));
2183 break;
2184
2185 default:
2186 gcc_unreachable ();
2187 }
2188
2189 /* Rule 17 */
2190 /* If the source operand of this MEM operation is not a
2191 register, basically the source is return address. Here
2192 we only care how much stack grew and we don't save it. */
2193 if (!REG_P (src))
2194 break;
2195
2196 if (REGNO (src) != STACK_POINTER_REGNUM
2197 && REGNO (src) != HARD_FRAME_POINTER_REGNUM
2198 && (unsigned) REGNO (src) == cfa.reg)
2199 {
2200 /* We're storing the current CFA reg into the stack. */
2201
2202 if (cfa.offset == 0)
2203 {
2204 /* Rule 19 */
2205 /* If stack is aligned, putting CFA reg into stack means
2206 we can no longer use reg + offset to represent CFA.
2207 Here we use DW_CFA_def_cfa_expression instead. The
2208 result of this expression equals to the original CFA
2209 value. */
2210 if (fde
2211 && fde->stack_realign
2212 && cfa.indirect == 0
2213 && cfa.reg != HARD_FRAME_POINTER_REGNUM)
2214 {
2215 dw_cfa_location cfa_exp;
2216
2217 gcc_assert (fde->drap_reg == cfa.reg);
2218
2219 cfa_exp.indirect = 1;
2220 cfa_exp.reg = HARD_FRAME_POINTER_REGNUM;
2221 cfa_exp.base_offset = offset;
2222 cfa_exp.offset = 0;
2223
2224 fde->drap_reg_saved = 1;
2225
2226 def_cfa_1 (label, &cfa_exp);
2227 break;
2228 }
2229
2230 /* If the source register is exactly the CFA, assume
2231 we're saving SP like any other register; this happens
2232 on the ARM. */
2233 def_cfa_1 (label, &cfa);
2234 queue_reg_save (label, stack_pointer_rtx, NULL_RTX, offset);
2235 break;
2236 }
2237 else
2238 {
2239 /* Otherwise, we'll need to look in the stack to
2240 calculate the CFA. */
2241 rtx x = XEXP (dest, 0);
2242
2243 if (!REG_P (x))
2244 x = XEXP (x, 0);
2245 gcc_assert (REG_P (x));
2246
2247 cfa.reg = REGNO (x);
2248 cfa.base_offset = offset;
2249 cfa.indirect = 1;
2250 def_cfa_1 (label, &cfa);
2251 break;
2252 }
2253 }
2254
2255 def_cfa_1 (label, &cfa);
2256 {
2257 span = targetm.dwarf_register_span (src);
2258
2259 if (!span)
2260 queue_reg_save (label, src, NULL_RTX, offset);
2261 else
2262 {
2263 /* We have a PARALLEL describing where the contents of SRC
2264 live. Queue register saves for each piece of the
2265 PARALLEL. */
2266 int par_index;
2267 int limit;
2268 HOST_WIDE_INT span_offset = offset;
2269
2270 gcc_assert (GET_CODE (span) == PARALLEL);
2271
2272 limit = XVECLEN (span, 0);
2273 for (par_index = 0; par_index < limit; par_index++)
2274 {
2275 rtx elem = XVECEXP (span, 0, par_index);
2276
2277 queue_reg_save (label, elem, NULL_RTX, span_offset);
2278 span_offset += GET_MODE_SIZE (GET_MODE (elem));
2279 }
2280 }
2281 }
2282 break;
2283
2284 default:
2285 gcc_unreachable ();
2286 }
2287 }
2288
2289 /* Record call frame debugging information for INSN, which either
2290 sets SP or FP (adjusting how we calculate the frame address) or saves a
2291 register to the stack. If INSN is NULL_RTX, initialize our state.
2292
2293 If AFTER_P is false, we're being called before the insn is emitted,
2294 otherwise after. Call instructions get invoked twice. */
2295
2296 void
2297 dwarf2out_frame_debug (rtx insn, bool after_p)
2298 {
2299 const char *label;
2300 rtx note, n;
2301 bool handled_one = false;
2302 bool need_flush = false;
2303
2304 if (!NONJUMP_INSN_P (insn) || clobbers_queued_reg_save (insn))
2305 dwarf2out_flush_queued_reg_saves ();
2306
2307 if (!RTX_FRAME_RELATED_P (insn))
2308 {
2309 /* ??? This should be done unconditionally since stack adjustments
2310 matter if the stack pointer is not the CFA register anymore but
2311 is still used to save registers. */
2312 if (!ACCUMULATE_OUTGOING_ARGS)
2313 dwarf2out_notice_stack_adjust (insn, after_p);
2314 return;
2315 }
2316
2317 label = dwarf2out_cfi_label (false);
2318 any_cfis_emitted = false;
2319
2320 for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
2321 switch (REG_NOTE_KIND (note))
2322 {
2323 case REG_FRAME_RELATED_EXPR:
2324 insn = XEXP (note, 0);
2325 goto do_frame_expr;
2326
2327 case REG_CFA_DEF_CFA:
2328 dwarf2out_frame_debug_def_cfa (XEXP (note, 0), label);
2329 handled_one = true;
2330 break;
2331
2332 case REG_CFA_ADJUST_CFA:
2333 n = XEXP (note, 0);
2334 if (n == NULL)
2335 {
2336 n = PATTERN (insn);
2337 if (GET_CODE (n) == PARALLEL)
2338 n = XVECEXP (n, 0, 0);
2339 }
2340 dwarf2out_frame_debug_adjust_cfa (n, label);
2341 handled_one = true;
2342 break;
2343
2344 case REG_CFA_OFFSET:
2345 n = XEXP (note, 0);
2346 if (n == NULL)
2347 n = single_set (insn);
2348 dwarf2out_frame_debug_cfa_offset (n, label);
2349 handled_one = true;
2350 break;
2351
2352 case REG_CFA_REGISTER:
2353 n = XEXP (note, 0);
2354 if (n == NULL)
2355 {
2356 n = PATTERN (insn);
2357 if (GET_CODE (n) == PARALLEL)
2358 n = XVECEXP (n, 0, 0);
2359 }
2360 dwarf2out_frame_debug_cfa_register (n, label);
2361 handled_one = true;
2362 break;
2363
2364 case REG_CFA_EXPRESSION:
2365 n = XEXP (note, 0);
2366 if (n == NULL)
2367 n = single_set (insn);
2368 dwarf2out_frame_debug_cfa_expression (n, label);
2369 handled_one = true;
2370 break;
2371
2372 case REG_CFA_RESTORE:
2373 n = XEXP (note, 0);
2374 if (n == NULL)
2375 {
2376 n = PATTERN (insn);
2377 if (GET_CODE (n) == PARALLEL)
2378 n = XVECEXP (n, 0, 0);
2379 n = XEXP (n, 0);
2380 }
2381 dwarf2out_frame_debug_cfa_restore (n, label);
2382 handled_one = true;
2383 break;
2384
2385 case REG_CFA_SET_VDRAP:
2386 n = XEXP (note, 0);
2387 if (REG_P (n))
2388 {
2389 dw_fde_ref fde = current_fde ();
2390 if (fde)
2391 {
2392 gcc_assert (fde->vdrap_reg == INVALID_REGNUM);
2393 if (REG_P (n))
2394 fde->vdrap_reg = REGNO (n);
2395 }
2396 }
2397 handled_one = true;
2398 break;
2399
2400 case REG_CFA_WINDOW_SAVE:
2401 dwarf2out_frame_debug_cfa_window_save (label);
2402 handled_one = true;
2403 break;
2404
2405 case REG_CFA_FLUSH_QUEUE:
2406 /* The actual flush happens below. */
2407 need_flush = true;
2408 handled_one = true;
2409 break;
2410
2411 default:
2412 break;
2413 }
2414
2415 if (handled_one)
2416 {
2417 /* Minimize the number of advances by emitting the entire queue
2418 once anything is emitted. */
2419 need_flush |= any_cfis_emitted;
2420 }
2421 else
2422 {
2423 insn = PATTERN (insn);
2424 do_frame_expr:
2425 dwarf2out_frame_debug_expr (insn, label);
2426
2427 /* Check again. A parallel can save and update the same register.
2428 We could probably check just once, here, but this is safer than
2429 removing the check at the start of the function. */
2430 if (any_cfis_emitted || clobbers_queued_reg_save (insn))
2431 need_flush = true;
2432 }
2433
2434 if (need_flush)
2435 dwarf2out_flush_queued_reg_saves ();
2436 }
2437
2438 /* Called once at the start of final to initialize some data for the
2439 current function. */
2440 void
2441 dwarf2out_frame_debug_init (void)
2442 {
2443 /* Flush any queued register saves. */
2444 dwarf2out_flush_queued_reg_saves ();
2445
2446 /* Set up state for generating call frame debug info. */
2447 lookup_cfa (&cfa);
2448 gcc_assert (cfa.reg
2449 == (unsigned long)DWARF_FRAME_REGNUM (STACK_POINTER_REGNUM));
2450
2451 cfa.reg = STACK_POINTER_REGNUM;
2452 cfa_store = cfa;
2453 cfa_temp.reg = -1;
2454 cfa_temp.offset = 0;
2455
2456 regs_saved_in_regs = NULL;
2457
2458 if (barrier_args_size)
2459 {
2460 XDELETEVEC (barrier_args_size);
2461 barrier_args_size = NULL;
2462 }
2463 }
2464
2465 /* Determine if we need to save and restore CFI information around this
2466 epilogue. If SIBCALL is true, then this is a sibcall epilogue. If
2467 we do need to save/restore, then emit the save now, and insert a
2468 NOTE_INSN_CFA_RESTORE_STATE at the appropriate place in the stream. */
2469
2470 void
2471 dwarf2out_cfi_begin_epilogue (rtx insn)
2472 {
2473 bool saw_frp = false;
2474 rtx i;
2475
2476 /* Scan forward to the return insn, noticing if there are possible
2477 frame related insns. */
2478 for (i = NEXT_INSN (insn); i ; i = NEXT_INSN (i))
2479 {
2480 if (!INSN_P (i))
2481 continue;
2482
2483 /* Look for both regular and sibcalls to end the block. */
2484 if (returnjump_p (i))
2485 break;
2486 if (CALL_P (i) && SIBLING_CALL_P (i))
2487 break;
2488
2489 if (GET_CODE (PATTERN (i)) == SEQUENCE)
2490 {
2491 int idx;
2492 rtx seq = PATTERN (i);
2493
2494 if (returnjump_p (XVECEXP (seq, 0, 0)))
2495 break;
2496 if (CALL_P (XVECEXP (seq, 0, 0))
2497 && SIBLING_CALL_P (XVECEXP (seq, 0, 0)))
2498 break;
2499
2500 for (idx = 0; idx < XVECLEN (seq, 0); idx++)
2501 if (RTX_FRAME_RELATED_P (XVECEXP (seq, 0, idx)))
2502 saw_frp = true;
2503 }
2504
2505 if (RTX_FRAME_RELATED_P (i))
2506 saw_frp = true;
2507 }
2508
2509 /* If the port doesn't emit epilogue unwind info, we don't need a
2510 save/restore pair. */
2511 if (!saw_frp)
2512 return;
2513
2514 /* Otherwise, search forward to see if the return insn was the last
2515 basic block of the function. If so, we don't need save/restore. */
2516 gcc_assert (i != NULL);
2517 i = next_real_insn (i);
2518 if (i == NULL)
2519 return;
2520
2521 /* Insert the restore before that next real insn in the stream, and before
2522 a potential NOTE_INSN_EPILOGUE_BEG -- we do need these notes to be
2523 properly nested. This should be after any label or alignment. This
2524 will be pushed into the CFI stream by the function below. */
2525 while (1)
2526 {
2527 rtx p = PREV_INSN (i);
2528 if (!NOTE_P (p))
2529 break;
2530 if (NOTE_KIND (p) == NOTE_INSN_BASIC_BLOCK)
2531 break;
2532 i = p;
2533 }
2534 emit_note_before (NOTE_INSN_CFA_RESTORE_STATE, i);
2535
2536 emit_cfa_remember = true;
2537
2538 /* And emulate the state save. */
2539 gcc_assert (!cfa_remember.in_use);
2540 cfa_remember = cfa;
2541 cfa_remember.in_use = 1;
2542 }
2543
2544 /* A "subroutine" of dwarf2out_cfi_begin_epilogue. Emit the restore
2545 required. */
2546
2547 void
2548 dwarf2out_frame_debug_restore_state (void)
2549 {
2550 dw_cfi_ref cfi = new_cfi ();
2551 const char *label = dwarf2out_cfi_label (false);
2552
2553 cfi->dw_cfi_opc = DW_CFA_restore_state;
2554 add_fde_cfi (label, cfi);
2555
2556 gcc_assert (cfa_remember.in_use);
2557 cfa = cfa_remember;
2558 cfa_remember.in_use = 0;
2559 }
2560
2561 /* Run once per function. */
2562
2563 void
2564 dwarf2cfi_function_init (void)
2565 {
2566 args_size = old_args_size = 0;
2567 }
2568
2569 /* Run once. */
2570
2571 void
2572 dwarf2cfi_frame_init (void)
2573 {
2574 dw_cfa_location loc;
2575
2576 /* Generate the CFA instructions common to all FDE's. Do it now for the
2577 sake of lookup_cfa. */
2578
2579 /* On entry, the Canonical Frame Address is at SP. */
2580 memset(&loc, 0, sizeof (loc));
2581 loc.reg = STACK_POINTER_REGNUM;
2582 loc.offset = INCOMING_FRAME_SP_OFFSET;
2583 def_cfa_1 (NULL, &loc);
2584
2585 if (targetm.debug_unwind_info () == UI_DWARF2
2586 || targetm_common.except_unwind_info (&global_options) == UI_DWARF2)
2587 initial_return_save (INCOMING_RETURN_ADDR_RTX);
2588 }
2589 \f
2590
2591 /* Save the result of dwarf2out_do_frame across PCH. */
2592 static GTY(()) bool saved_do_cfi_asm = 0;
2593
2594 /* Decide whether we want to emit frame unwind information for the current
2595 translation unit. */
2596
2597 int
2598 dwarf2out_do_frame (void)
2599 {
2600 /* We want to emit correct CFA location expressions or lists, so we
2601 have to return true if we're going to output debug info, even if
2602 we're not going to output frame or unwind info. */
2603 if (write_symbols == DWARF2_DEBUG || write_symbols == VMS_AND_DWARF2_DEBUG)
2604 return true;
2605
2606 if (saved_do_cfi_asm)
2607 return true;
2608
2609 if (targetm.debug_unwind_info () == UI_DWARF2)
2610 return true;
2611
2612 if ((flag_unwind_tables || flag_exceptions)
2613 && targetm_common.except_unwind_info (&global_options) == UI_DWARF2)
2614 return true;
2615
2616 return false;
2617 }
2618
2619 /* Decide whether to emit frame unwind via assembler directives. */
2620
2621 int
2622 dwarf2out_do_cfi_asm (void)
2623 {
2624 int enc;
2625
2626 #ifdef MIPS_DEBUGGING_INFO
2627 return false;
2628 #endif
2629 if (saved_do_cfi_asm)
2630 return true;
2631 if (!flag_dwarf2_cfi_asm || !dwarf2out_do_frame ())
2632 return false;
2633 if (!HAVE_GAS_CFI_PERSONALITY_DIRECTIVE)
2634 return false;
2635
2636 /* Make sure the personality encoding is one the assembler can support.
2637 In particular, aligned addresses can't be handled. */
2638 enc = ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/2,/*global=*/1);
2639 if ((enc & 0x70) != 0 && (enc & 0x70) != DW_EH_PE_pcrel)
2640 return false;
2641 enc = ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/0,/*global=*/0);
2642 if ((enc & 0x70) != 0 && (enc & 0x70) != DW_EH_PE_pcrel)
2643 return false;
2644
2645 /* If we can't get the assembler to emit only .debug_frame, and we don't need
2646 dwarf2 unwind info for exceptions, then emit .debug_frame by hand. */
2647 if (!HAVE_GAS_CFI_SECTIONS_DIRECTIVE
2648 && !flag_unwind_tables && !flag_exceptions
2649 && targetm_common.except_unwind_info (&global_options) != UI_DWARF2)
2650 return false;
2651
2652 saved_do_cfi_asm = true;
2653 return true;
2654 }
2655
2656 #include "gt-dwarf2cfi.h"