decl.c (value_annotation_hasher::handle_cache_entry): Delete.
[gcc.git] / gcc / dwarf2cfi.c
1 /* Dwarf2 Call Frame Information helper routines.
2 Copyright (C) 1992-2015 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "tm.h"
24 #include "version.h"
25 #include "flags.h"
26 #include "rtl.h"
27 #include "alias.h"
28 #include "symtab.h"
29 #include "tree.h"
30 #include "stor-layout.h"
31 #include "hard-reg-set.h"
32 #include "function.h"
33 #include "cfgbuild.h"
34 #include "dwarf2.h"
35 #include "dwarf2out.h"
36 #include "dwarf2asm.h"
37 #include "tm_p.h"
38 #include "target.h"
39 #include "common/common-target.h"
40 #include "tree-pass.h"
41
42 #include "except.h" /* expand_builtin_dwarf_sp_column */
43 #include "insn-config.h"
44 #include "expmed.h"
45 #include "dojump.h"
46 #include "explow.h"
47 #include "calls.h"
48 #include "emit-rtl.h"
49 #include "varasm.h"
50 #include "stmt.h"
51 #include "expr.h" /* init_return_column_size */
52 #include "regs.h" /* expand_builtin_init_dwarf_reg_sizes */
53 #include "output.h" /* asm_out_file */
54 #include "debug.h" /* dwarf2out_do_frame, dwarf2out_do_cfi_asm */
55
56
57 /* ??? Poison these here until it can be done generically. They've been
58 totally replaced in this file; make sure it stays that way. */
59 #undef DWARF2_UNWIND_INFO
60 #undef DWARF2_FRAME_INFO
61 #if (GCC_VERSION >= 3000)
62 #pragma GCC poison DWARF2_UNWIND_INFO DWARF2_FRAME_INFO
63 #endif
64
65 #ifndef INCOMING_RETURN_ADDR_RTX
66 #define INCOMING_RETURN_ADDR_RTX (gcc_unreachable (), NULL_RTX)
67 #endif
68
69 /* Maximum size (in bytes) of an artificially generated label. */
70 #define MAX_ARTIFICIAL_LABEL_BYTES 30
71 \f
72 /* A collected description of an entire row of the abstract CFI table. */
73 typedef struct GTY(()) dw_cfi_row_struct
74 {
75 /* The expression that computes the CFA, expressed in two different ways.
76 The CFA member for the simple cases, and the full CFI expression for
77 the complex cases. The later will be a DW_CFA_cfa_expression. */
78 dw_cfa_location cfa;
79 dw_cfi_ref cfa_cfi;
80
81 /* The expressions for any register column that is saved. */
82 cfi_vec reg_save;
83 } dw_cfi_row;
84
85 /* The caller's ORIG_REG is saved in SAVED_IN_REG. */
86 typedef struct GTY(()) reg_saved_in_data_struct {
87 rtx orig_reg;
88 rtx saved_in_reg;
89 } reg_saved_in_data;
90
91
92 /* Since we no longer have a proper CFG, we're going to create a facsimile
93 of one on the fly while processing the frame-related insns.
94
95 We create dw_trace_info structures for each extended basic block beginning
96 and ending at a "save point". Save points are labels, barriers, certain
97 notes, and of course the beginning and end of the function.
98
99 As we encounter control transfer insns, we propagate the "current"
100 row state across the edges to the starts of traces. When checking is
101 enabled, we validate that we propagate the same data from all sources.
102
103 All traces are members of the TRACE_INFO array, in the order in which
104 they appear in the instruction stream.
105
106 All save points are present in the TRACE_INDEX hash, mapping the insn
107 starting a trace to the dw_trace_info describing the trace. */
108
109 typedef struct
110 {
111 /* The insn that begins the trace. */
112 rtx_insn *head;
113
114 /* The row state at the beginning and end of the trace. */
115 dw_cfi_row *beg_row, *end_row;
116
117 /* Tracking for DW_CFA_GNU_args_size. The "true" sizes are those we find
118 while scanning insns. However, the args_size value is irrelevant at
119 any point except can_throw_internal_p insns. Therefore the "delay"
120 sizes the values that must actually be emitted for this trace. */
121 HOST_WIDE_INT beg_true_args_size, end_true_args_size;
122 HOST_WIDE_INT beg_delay_args_size, end_delay_args_size;
123
124 /* The first EH insn in the trace, where beg_delay_args_size must be set. */
125 rtx_insn *eh_head;
126
127 /* The following variables contain data used in interpreting frame related
128 expressions. These are not part of the "real" row state as defined by
129 Dwarf, but it seems like they need to be propagated into a trace in case
130 frame related expressions have been sunk. */
131 /* ??? This seems fragile. These variables are fragments of a larger
132 expression. If we do not keep the entire expression together, we risk
133 not being able to put it together properly. Consider forcing targets
134 to generate self-contained expressions and dropping all of the magic
135 interpretation code in this file. Or at least refusing to shrink wrap
136 any frame related insn that doesn't contain a complete expression. */
137
138 /* The register used for saving registers to the stack, and its offset
139 from the CFA. */
140 dw_cfa_location cfa_store;
141
142 /* A temporary register holding an integral value used in adjusting SP
143 or setting up the store_reg. The "offset" field holds the integer
144 value, not an offset. */
145 dw_cfa_location cfa_temp;
146
147 /* A set of registers saved in other registers. This is the inverse of
148 the row->reg_save info, if the entry is a DW_CFA_register. This is
149 implemented as a flat array because it normally contains zero or 1
150 entry, depending on the target. IA-64 is the big spender here, using
151 a maximum of 5 entries. */
152 vec<reg_saved_in_data> regs_saved_in_regs;
153
154 /* An identifier for this trace. Used only for debugging dumps. */
155 unsigned id;
156
157 /* True if this trace immediately follows NOTE_INSN_SWITCH_TEXT_SECTIONS. */
158 bool switch_sections;
159
160 /* True if we've seen different values incoming to beg_true_args_size. */
161 bool args_size_undefined;
162 } dw_trace_info;
163
164
165 typedef dw_trace_info *dw_trace_info_ref;
166
167
168 /* Hashtable helpers. */
169
170 struct trace_info_hasher : typed_noop_remove <dw_trace_info>
171 {
172 typedef dw_trace_info *value_type;
173 typedef dw_trace_info *compare_type;
174 static inline hashval_t hash (const dw_trace_info *);
175 static inline bool equal (const dw_trace_info *, const dw_trace_info *);
176 };
177
178 inline hashval_t
179 trace_info_hasher::hash (const dw_trace_info *ti)
180 {
181 return INSN_UID (ti->head);
182 }
183
184 inline bool
185 trace_info_hasher::equal (const dw_trace_info *a, const dw_trace_info *b)
186 {
187 return a->head == b->head;
188 }
189
190
191 /* The variables making up the pseudo-cfg, as described above. */
192 static vec<dw_trace_info> trace_info;
193 static vec<dw_trace_info_ref> trace_work_list;
194 static hash_table<trace_info_hasher> *trace_index;
195
196 /* A vector of call frame insns for the CIE. */
197 cfi_vec cie_cfi_vec;
198
199 /* The state of the first row of the FDE table, which includes the
200 state provided by the CIE. */
201 static GTY(()) dw_cfi_row *cie_cfi_row;
202
203 static GTY(()) reg_saved_in_data *cie_return_save;
204
205 static GTY(()) unsigned long dwarf2out_cfi_label_num;
206
207 /* The insn after which a new CFI note should be emitted. */
208 static rtx_insn *add_cfi_insn;
209
210 /* When non-null, add_cfi will add the CFI to this vector. */
211 static cfi_vec *add_cfi_vec;
212
213 /* The current instruction trace. */
214 static dw_trace_info *cur_trace;
215
216 /* The current, i.e. most recently generated, row of the CFI table. */
217 static dw_cfi_row *cur_row;
218
219 /* A copy of the current CFA, for use during the processing of a
220 single insn. */
221 static dw_cfa_location *cur_cfa;
222
223 /* We delay emitting a register save until either (a) we reach the end
224 of the prologue or (b) the register is clobbered. This clusters
225 register saves so that there are fewer pc advances. */
226
227 typedef struct {
228 rtx reg;
229 rtx saved_reg;
230 HOST_WIDE_INT cfa_offset;
231 } queued_reg_save;
232
233
234 static vec<queued_reg_save> queued_reg_saves;
235
236 /* True if any CFI directives were emitted at the current insn. */
237 static bool any_cfis_emitted;
238
239 /* Short-hand for commonly used register numbers. */
240 static unsigned dw_stack_pointer_regnum;
241 static unsigned dw_frame_pointer_regnum;
242 \f
243 /* Hook used by __throw. */
244
245 rtx
246 expand_builtin_dwarf_sp_column (void)
247 {
248 unsigned int dwarf_regnum = DWARF_FRAME_REGNUM (STACK_POINTER_REGNUM);
249 return GEN_INT (DWARF2_FRAME_REG_OUT (dwarf_regnum, 1));
250 }
251
252 /* MEM is a memory reference for the register size table, each element of
253 which has mode MODE. Initialize column C as a return address column. */
254
255 static void
256 init_return_column_size (machine_mode mode, rtx mem, unsigned int c)
257 {
258 HOST_WIDE_INT offset = c * GET_MODE_SIZE (mode);
259 HOST_WIDE_INT size = GET_MODE_SIZE (Pmode);
260 emit_move_insn (adjust_address (mem, mode, offset),
261 gen_int_mode (size, mode));
262 }
263
264 /* Datastructure used by expand_builtin_init_dwarf_reg_sizes and
265 init_one_dwarf_reg_size to communicate on what has been done by the
266 latter. */
267
268 typedef struct
269 {
270 /* Whether the dwarf return column was initialized. */
271 bool wrote_return_column;
272
273 /* For each hard register REGNO, whether init_one_dwarf_reg_size
274 was given REGNO to process already. */
275 bool processed_regno [FIRST_PSEUDO_REGISTER];
276
277 } init_one_dwarf_reg_state;
278
279 /* Helper for expand_builtin_init_dwarf_reg_sizes. Generate code to
280 initialize the dwarf register size table entry corresponding to register
281 REGNO in REGMODE. TABLE is the table base address, SLOTMODE is the mode to
282 use for the size entry to initialize, and INIT_STATE is the communication
283 datastructure conveying what we're doing to our caller. */
284
285 static
286 void init_one_dwarf_reg_size (int regno, machine_mode regmode,
287 rtx table, machine_mode slotmode,
288 init_one_dwarf_reg_state *init_state)
289 {
290 const unsigned int dnum = DWARF_FRAME_REGNUM (regno);
291 const unsigned int rnum = DWARF2_FRAME_REG_OUT (dnum, 1);
292 const unsigned int dcol = DWARF_REG_TO_UNWIND_COLUMN (rnum);
293
294 const HOST_WIDE_INT slotoffset = dcol * GET_MODE_SIZE (slotmode);
295 const HOST_WIDE_INT regsize = GET_MODE_SIZE (regmode);
296
297 init_state->processed_regno[regno] = true;
298
299 if (rnum >= DWARF_FRAME_REGISTERS)
300 return;
301
302 if (dnum == DWARF_FRAME_RETURN_COLUMN)
303 {
304 if (regmode == VOIDmode)
305 return;
306 init_state->wrote_return_column = true;
307 }
308
309 if (slotoffset < 0)
310 return;
311
312 emit_move_insn (adjust_address (table, slotmode, slotoffset),
313 gen_int_mode (regsize, slotmode));
314 }
315
316 /* Generate code to initialize the dwarf register size table located
317 at the provided ADDRESS. */
318
319 void
320 expand_builtin_init_dwarf_reg_sizes (tree address)
321 {
322 unsigned int i;
323 machine_mode mode = TYPE_MODE (char_type_node);
324 rtx addr = expand_normal (address);
325 rtx mem = gen_rtx_MEM (BLKmode, addr);
326
327 init_one_dwarf_reg_state init_state;
328
329 memset ((char *)&init_state, 0, sizeof (init_state));
330
331 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
332 {
333 machine_mode save_mode;
334 rtx span;
335
336 /* No point in processing a register multiple times. This could happen
337 with register spans, e.g. when a reg is first processed as a piece of
338 a span, then as a register on its own later on. */
339
340 if (init_state.processed_regno[i])
341 continue;
342
343 save_mode = targetm.dwarf_frame_reg_mode (i);
344 span = targetm.dwarf_register_span (gen_rtx_REG (save_mode, i));
345
346 if (!span)
347 init_one_dwarf_reg_size (i, save_mode, mem, mode, &init_state);
348 else
349 {
350 for (int si = 0; si < XVECLEN (span, 0); si++)
351 {
352 rtx reg = XVECEXP (span, 0, si);
353
354 init_one_dwarf_reg_size
355 (REGNO (reg), GET_MODE (reg), mem, mode, &init_state);
356 }
357 }
358 }
359
360 if (!init_state.wrote_return_column)
361 init_return_column_size (mode, mem, DWARF_FRAME_RETURN_COLUMN);
362
363 #ifdef DWARF_ALT_FRAME_RETURN_COLUMN
364 init_return_column_size (mode, mem, DWARF_ALT_FRAME_RETURN_COLUMN);
365 #endif
366
367 targetm.init_dwarf_reg_sizes_extra (address);
368 }
369
370 \f
371 static dw_trace_info *
372 get_trace_info (rtx_insn *insn)
373 {
374 dw_trace_info dummy;
375 dummy.head = insn;
376 return trace_index->find_with_hash (&dummy, INSN_UID (insn));
377 }
378
379 static bool
380 save_point_p (rtx_insn *insn)
381 {
382 /* Labels, except those that are really jump tables. */
383 if (LABEL_P (insn))
384 return inside_basic_block_p (insn);
385
386 /* We split traces at the prologue/epilogue notes because those
387 are points at which the unwind info is usually stable. This
388 makes it easier to find spots with identical unwind info so
389 that we can use remember/restore_state opcodes. */
390 if (NOTE_P (insn))
391 switch (NOTE_KIND (insn))
392 {
393 case NOTE_INSN_PROLOGUE_END:
394 case NOTE_INSN_EPILOGUE_BEG:
395 return true;
396 }
397
398 return false;
399 }
400
401 /* Divide OFF by DWARF_CIE_DATA_ALIGNMENT, asserting no remainder. */
402
403 static inline HOST_WIDE_INT
404 div_data_align (HOST_WIDE_INT off)
405 {
406 HOST_WIDE_INT r = off / DWARF_CIE_DATA_ALIGNMENT;
407 gcc_assert (r * DWARF_CIE_DATA_ALIGNMENT == off);
408 return r;
409 }
410
411 /* Return true if we need a signed version of a given opcode
412 (e.g. DW_CFA_offset_extended_sf vs DW_CFA_offset_extended). */
413
414 static inline bool
415 need_data_align_sf_opcode (HOST_WIDE_INT off)
416 {
417 return DWARF_CIE_DATA_ALIGNMENT < 0 ? off > 0 : off < 0;
418 }
419
420 /* Return a pointer to a newly allocated Call Frame Instruction. */
421
422 static inline dw_cfi_ref
423 new_cfi (void)
424 {
425 dw_cfi_ref cfi = ggc_alloc<dw_cfi_node> ();
426
427 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = 0;
428 cfi->dw_cfi_oprnd2.dw_cfi_reg_num = 0;
429
430 return cfi;
431 }
432
433 /* Return a newly allocated CFI row, with no defined data. */
434
435 static dw_cfi_row *
436 new_cfi_row (void)
437 {
438 dw_cfi_row *row = ggc_cleared_alloc<dw_cfi_row> ();
439
440 row->cfa.reg = INVALID_REGNUM;
441
442 return row;
443 }
444
445 /* Return a copy of an existing CFI row. */
446
447 static dw_cfi_row *
448 copy_cfi_row (dw_cfi_row *src)
449 {
450 dw_cfi_row *dst = ggc_alloc<dw_cfi_row> ();
451
452 *dst = *src;
453 dst->reg_save = vec_safe_copy (src->reg_save);
454
455 return dst;
456 }
457
458 /* Generate a new label for the CFI info to refer to. */
459
460 static char *
461 dwarf2out_cfi_label (void)
462 {
463 int num = dwarf2out_cfi_label_num++;
464 char label[20];
465
466 ASM_GENERATE_INTERNAL_LABEL (label, "LCFI", num);
467
468 return xstrdup (label);
469 }
470
471 /* Add CFI either to the current insn stream or to a vector, or both. */
472
473 static void
474 add_cfi (dw_cfi_ref cfi)
475 {
476 any_cfis_emitted = true;
477
478 if (add_cfi_insn != NULL)
479 {
480 add_cfi_insn = emit_note_after (NOTE_INSN_CFI, add_cfi_insn);
481 NOTE_CFI (add_cfi_insn) = cfi;
482 }
483
484 if (add_cfi_vec != NULL)
485 vec_safe_push (*add_cfi_vec, cfi);
486 }
487
488 static void
489 add_cfi_args_size (HOST_WIDE_INT size)
490 {
491 dw_cfi_ref cfi = new_cfi ();
492
493 /* While we can occasionally have args_size < 0 internally, this state
494 should not persist at a point we actually need an opcode. */
495 gcc_assert (size >= 0);
496
497 cfi->dw_cfi_opc = DW_CFA_GNU_args_size;
498 cfi->dw_cfi_oprnd1.dw_cfi_offset = size;
499
500 add_cfi (cfi);
501 }
502
503 static void
504 add_cfi_restore (unsigned reg)
505 {
506 dw_cfi_ref cfi = new_cfi ();
507
508 cfi->dw_cfi_opc = (reg & ~0x3f ? DW_CFA_restore_extended : DW_CFA_restore);
509 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = reg;
510
511 add_cfi (cfi);
512 }
513
514 /* Perform ROW->REG_SAVE[COLUMN] = CFI. CFI may be null, indicating
515 that the register column is no longer saved. */
516
517 static void
518 update_row_reg_save (dw_cfi_row *row, unsigned column, dw_cfi_ref cfi)
519 {
520 if (vec_safe_length (row->reg_save) <= column)
521 vec_safe_grow_cleared (row->reg_save, column + 1);
522 (*row->reg_save)[column] = cfi;
523 }
524
525 /* This function fills in aa dw_cfa_location structure from a dwarf location
526 descriptor sequence. */
527
528 static void
529 get_cfa_from_loc_descr (dw_cfa_location *cfa, struct dw_loc_descr_node *loc)
530 {
531 struct dw_loc_descr_node *ptr;
532 cfa->offset = 0;
533 cfa->base_offset = 0;
534 cfa->indirect = 0;
535 cfa->reg = -1;
536
537 for (ptr = loc; ptr != NULL; ptr = ptr->dw_loc_next)
538 {
539 enum dwarf_location_atom op = ptr->dw_loc_opc;
540
541 switch (op)
542 {
543 case DW_OP_reg0:
544 case DW_OP_reg1:
545 case DW_OP_reg2:
546 case DW_OP_reg3:
547 case DW_OP_reg4:
548 case DW_OP_reg5:
549 case DW_OP_reg6:
550 case DW_OP_reg7:
551 case DW_OP_reg8:
552 case DW_OP_reg9:
553 case DW_OP_reg10:
554 case DW_OP_reg11:
555 case DW_OP_reg12:
556 case DW_OP_reg13:
557 case DW_OP_reg14:
558 case DW_OP_reg15:
559 case DW_OP_reg16:
560 case DW_OP_reg17:
561 case DW_OP_reg18:
562 case DW_OP_reg19:
563 case DW_OP_reg20:
564 case DW_OP_reg21:
565 case DW_OP_reg22:
566 case DW_OP_reg23:
567 case DW_OP_reg24:
568 case DW_OP_reg25:
569 case DW_OP_reg26:
570 case DW_OP_reg27:
571 case DW_OP_reg28:
572 case DW_OP_reg29:
573 case DW_OP_reg30:
574 case DW_OP_reg31:
575 cfa->reg = op - DW_OP_reg0;
576 break;
577 case DW_OP_regx:
578 cfa->reg = ptr->dw_loc_oprnd1.v.val_int;
579 break;
580 case DW_OP_breg0:
581 case DW_OP_breg1:
582 case DW_OP_breg2:
583 case DW_OP_breg3:
584 case DW_OP_breg4:
585 case DW_OP_breg5:
586 case DW_OP_breg6:
587 case DW_OP_breg7:
588 case DW_OP_breg8:
589 case DW_OP_breg9:
590 case DW_OP_breg10:
591 case DW_OP_breg11:
592 case DW_OP_breg12:
593 case DW_OP_breg13:
594 case DW_OP_breg14:
595 case DW_OP_breg15:
596 case DW_OP_breg16:
597 case DW_OP_breg17:
598 case DW_OP_breg18:
599 case DW_OP_breg19:
600 case DW_OP_breg20:
601 case DW_OP_breg21:
602 case DW_OP_breg22:
603 case DW_OP_breg23:
604 case DW_OP_breg24:
605 case DW_OP_breg25:
606 case DW_OP_breg26:
607 case DW_OP_breg27:
608 case DW_OP_breg28:
609 case DW_OP_breg29:
610 case DW_OP_breg30:
611 case DW_OP_breg31:
612 cfa->reg = op - DW_OP_breg0;
613 cfa->base_offset = ptr->dw_loc_oprnd1.v.val_int;
614 break;
615 case DW_OP_bregx:
616 cfa->reg = ptr->dw_loc_oprnd1.v.val_int;
617 cfa->base_offset = ptr->dw_loc_oprnd2.v.val_int;
618 break;
619 case DW_OP_deref:
620 cfa->indirect = 1;
621 break;
622 case DW_OP_plus_uconst:
623 cfa->offset = ptr->dw_loc_oprnd1.v.val_unsigned;
624 break;
625 default:
626 gcc_unreachable ();
627 }
628 }
629 }
630
631 /* Find the previous value for the CFA, iteratively. CFI is the opcode
632 to interpret, *LOC will be updated as necessary, *REMEMBER is used for
633 one level of remember/restore state processing. */
634
635 void
636 lookup_cfa_1 (dw_cfi_ref cfi, dw_cfa_location *loc, dw_cfa_location *remember)
637 {
638 switch (cfi->dw_cfi_opc)
639 {
640 case DW_CFA_def_cfa_offset:
641 case DW_CFA_def_cfa_offset_sf:
642 loc->offset = cfi->dw_cfi_oprnd1.dw_cfi_offset;
643 break;
644 case DW_CFA_def_cfa_register:
645 loc->reg = cfi->dw_cfi_oprnd1.dw_cfi_reg_num;
646 break;
647 case DW_CFA_def_cfa:
648 case DW_CFA_def_cfa_sf:
649 loc->reg = cfi->dw_cfi_oprnd1.dw_cfi_reg_num;
650 loc->offset = cfi->dw_cfi_oprnd2.dw_cfi_offset;
651 break;
652 case DW_CFA_def_cfa_expression:
653 get_cfa_from_loc_descr (loc, cfi->dw_cfi_oprnd1.dw_cfi_loc);
654 break;
655
656 case DW_CFA_remember_state:
657 gcc_assert (!remember->in_use);
658 *remember = *loc;
659 remember->in_use = 1;
660 break;
661 case DW_CFA_restore_state:
662 gcc_assert (remember->in_use);
663 *loc = *remember;
664 remember->in_use = 0;
665 break;
666
667 default:
668 break;
669 }
670 }
671
672 /* Determine if two dw_cfa_location structures define the same data. */
673
674 bool
675 cfa_equal_p (const dw_cfa_location *loc1, const dw_cfa_location *loc2)
676 {
677 return (loc1->reg == loc2->reg
678 && loc1->offset == loc2->offset
679 && loc1->indirect == loc2->indirect
680 && (loc1->indirect == 0
681 || loc1->base_offset == loc2->base_offset));
682 }
683
684 /* Determine if two CFI operands are identical. */
685
686 static bool
687 cfi_oprnd_equal_p (enum dw_cfi_oprnd_type t, dw_cfi_oprnd *a, dw_cfi_oprnd *b)
688 {
689 switch (t)
690 {
691 case dw_cfi_oprnd_unused:
692 return true;
693 case dw_cfi_oprnd_reg_num:
694 return a->dw_cfi_reg_num == b->dw_cfi_reg_num;
695 case dw_cfi_oprnd_offset:
696 return a->dw_cfi_offset == b->dw_cfi_offset;
697 case dw_cfi_oprnd_addr:
698 return (a->dw_cfi_addr == b->dw_cfi_addr
699 || strcmp (a->dw_cfi_addr, b->dw_cfi_addr) == 0);
700 case dw_cfi_oprnd_loc:
701 return loc_descr_equal_p (a->dw_cfi_loc, b->dw_cfi_loc);
702 }
703 gcc_unreachable ();
704 }
705
706 /* Determine if two CFI entries are identical. */
707
708 static bool
709 cfi_equal_p (dw_cfi_ref a, dw_cfi_ref b)
710 {
711 enum dwarf_call_frame_info opc;
712
713 /* Make things easier for our callers, including missing operands. */
714 if (a == b)
715 return true;
716 if (a == NULL || b == NULL)
717 return false;
718
719 /* Obviously, the opcodes must match. */
720 opc = a->dw_cfi_opc;
721 if (opc != b->dw_cfi_opc)
722 return false;
723
724 /* Compare the two operands, re-using the type of the operands as
725 already exposed elsewhere. */
726 return (cfi_oprnd_equal_p (dw_cfi_oprnd1_desc (opc),
727 &a->dw_cfi_oprnd1, &b->dw_cfi_oprnd1)
728 && cfi_oprnd_equal_p (dw_cfi_oprnd2_desc (opc),
729 &a->dw_cfi_oprnd2, &b->dw_cfi_oprnd2));
730 }
731
732 /* Determine if two CFI_ROW structures are identical. */
733
734 static bool
735 cfi_row_equal_p (dw_cfi_row *a, dw_cfi_row *b)
736 {
737 size_t i, n_a, n_b, n_max;
738
739 if (a->cfa_cfi)
740 {
741 if (!cfi_equal_p (a->cfa_cfi, b->cfa_cfi))
742 return false;
743 }
744 else if (!cfa_equal_p (&a->cfa, &b->cfa))
745 return false;
746
747 n_a = vec_safe_length (a->reg_save);
748 n_b = vec_safe_length (b->reg_save);
749 n_max = MAX (n_a, n_b);
750
751 for (i = 0; i < n_max; ++i)
752 {
753 dw_cfi_ref r_a = NULL, r_b = NULL;
754
755 if (i < n_a)
756 r_a = (*a->reg_save)[i];
757 if (i < n_b)
758 r_b = (*b->reg_save)[i];
759
760 if (!cfi_equal_p (r_a, r_b))
761 return false;
762 }
763
764 return true;
765 }
766
767 /* The CFA is now calculated from NEW_CFA. Consider OLD_CFA in determining
768 what opcode to emit. Returns the CFI opcode to effect the change, or
769 NULL if NEW_CFA == OLD_CFA. */
770
771 static dw_cfi_ref
772 def_cfa_0 (dw_cfa_location *old_cfa, dw_cfa_location *new_cfa)
773 {
774 dw_cfi_ref cfi;
775
776 /* If nothing changed, no need to issue any call frame instructions. */
777 if (cfa_equal_p (old_cfa, new_cfa))
778 return NULL;
779
780 cfi = new_cfi ();
781
782 if (new_cfa->reg == old_cfa->reg && !new_cfa->indirect && !old_cfa->indirect)
783 {
784 /* Construct a "DW_CFA_def_cfa_offset <offset>" instruction, indicating
785 the CFA register did not change but the offset did. The data
786 factoring for DW_CFA_def_cfa_offset_sf happens in output_cfi, or
787 in the assembler via the .cfi_def_cfa_offset directive. */
788 if (new_cfa->offset < 0)
789 cfi->dw_cfi_opc = DW_CFA_def_cfa_offset_sf;
790 else
791 cfi->dw_cfi_opc = DW_CFA_def_cfa_offset;
792 cfi->dw_cfi_oprnd1.dw_cfi_offset = new_cfa->offset;
793 }
794 else if (new_cfa->offset == old_cfa->offset
795 && old_cfa->reg != INVALID_REGNUM
796 && !new_cfa->indirect
797 && !old_cfa->indirect)
798 {
799 /* Construct a "DW_CFA_def_cfa_register <register>" instruction,
800 indicating the CFA register has changed to <register> but the
801 offset has not changed. */
802 cfi->dw_cfi_opc = DW_CFA_def_cfa_register;
803 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = new_cfa->reg;
804 }
805 else if (new_cfa->indirect == 0)
806 {
807 /* Construct a "DW_CFA_def_cfa <register> <offset>" instruction,
808 indicating the CFA register has changed to <register> with
809 the specified offset. The data factoring for DW_CFA_def_cfa_sf
810 happens in output_cfi, or in the assembler via the .cfi_def_cfa
811 directive. */
812 if (new_cfa->offset < 0)
813 cfi->dw_cfi_opc = DW_CFA_def_cfa_sf;
814 else
815 cfi->dw_cfi_opc = DW_CFA_def_cfa;
816 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = new_cfa->reg;
817 cfi->dw_cfi_oprnd2.dw_cfi_offset = new_cfa->offset;
818 }
819 else
820 {
821 /* Construct a DW_CFA_def_cfa_expression instruction to
822 calculate the CFA using a full location expression since no
823 register-offset pair is available. */
824 struct dw_loc_descr_node *loc_list;
825
826 cfi->dw_cfi_opc = DW_CFA_def_cfa_expression;
827 loc_list = build_cfa_loc (new_cfa, 0);
828 cfi->dw_cfi_oprnd1.dw_cfi_loc = loc_list;
829 }
830
831 return cfi;
832 }
833
834 /* Similarly, but take OLD_CFA from CUR_ROW, and update it after the fact. */
835
836 static void
837 def_cfa_1 (dw_cfa_location *new_cfa)
838 {
839 dw_cfi_ref cfi;
840
841 if (cur_trace->cfa_store.reg == new_cfa->reg && new_cfa->indirect == 0)
842 cur_trace->cfa_store.offset = new_cfa->offset;
843
844 cfi = def_cfa_0 (&cur_row->cfa, new_cfa);
845 if (cfi)
846 {
847 cur_row->cfa = *new_cfa;
848 cur_row->cfa_cfi = (cfi->dw_cfi_opc == DW_CFA_def_cfa_expression
849 ? cfi : NULL);
850
851 add_cfi (cfi);
852 }
853 }
854
855 /* Add the CFI for saving a register. REG is the CFA column number.
856 If SREG is -1, the register is saved at OFFSET from the CFA;
857 otherwise it is saved in SREG. */
858
859 static void
860 reg_save (unsigned int reg, unsigned int sreg, HOST_WIDE_INT offset)
861 {
862 dw_fde_ref fde = cfun ? cfun->fde : NULL;
863 dw_cfi_ref cfi = new_cfi ();
864
865 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = reg;
866
867 /* When stack is aligned, store REG using DW_CFA_expression with FP. */
868 if (fde
869 && fde->stack_realign
870 && sreg == INVALID_REGNUM)
871 {
872 cfi->dw_cfi_opc = DW_CFA_expression;
873 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = reg;
874 cfi->dw_cfi_oprnd2.dw_cfi_loc
875 = build_cfa_aligned_loc (&cur_row->cfa, offset,
876 fde->stack_realignment);
877 }
878 else if (sreg == INVALID_REGNUM)
879 {
880 if (need_data_align_sf_opcode (offset))
881 cfi->dw_cfi_opc = DW_CFA_offset_extended_sf;
882 else if (reg & ~0x3f)
883 cfi->dw_cfi_opc = DW_CFA_offset_extended;
884 else
885 cfi->dw_cfi_opc = DW_CFA_offset;
886 cfi->dw_cfi_oprnd2.dw_cfi_offset = offset;
887 }
888 else if (sreg == reg)
889 {
890 /* While we could emit something like DW_CFA_same_value or
891 DW_CFA_restore, we never expect to see something like that
892 in a prologue. This is more likely to be a bug. A backend
893 can always bypass this by using REG_CFA_RESTORE directly. */
894 gcc_unreachable ();
895 }
896 else
897 {
898 cfi->dw_cfi_opc = DW_CFA_register;
899 cfi->dw_cfi_oprnd2.dw_cfi_reg_num = sreg;
900 }
901
902 add_cfi (cfi);
903 update_row_reg_save (cur_row, reg, cfi);
904 }
905
906 /* A subroutine of scan_trace. Check INSN for a REG_ARGS_SIZE note
907 and adjust data structures to match. */
908
909 static void
910 notice_args_size (rtx_insn *insn)
911 {
912 HOST_WIDE_INT args_size, delta;
913 rtx note;
914
915 note = find_reg_note (insn, REG_ARGS_SIZE, NULL);
916 if (note == NULL)
917 return;
918
919 args_size = INTVAL (XEXP (note, 0));
920 delta = args_size - cur_trace->end_true_args_size;
921 if (delta == 0)
922 return;
923
924 cur_trace->end_true_args_size = args_size;
925
926 /* If the CFA is computed off the stack pointer, then we must adjust
927 the computation of the CFA as well. */
928 if (cur_cfa->reg == dw_stack_pointer_regnum)
929 {
930 gcc_assert (!cur_cfa->indirect);
931
932 /* Convert a change in args_size (always a positive in the
933 direction of stack growth) to a change in stack pointer. */
934 if (!STACK_GROWS_DOWNWARD)
935 delta = -delta;
936
937 cur_cfa->offset += delta;
938 }
939 }
940
941 /* A subroutine of scan_trace. INSN is can_throw_internal. Update the
942 data within the trace related to EH insns and args_size. */
943
944 static void
945 notice_eh_throw (rtx_insn *insn)
946 {
947 HOST_WIDE_INT args_size;
948
949 args_size = cur_trace->end_true_args_size;
950 if (cur_trace->eh_head == NULL)
951 {
952 cur_trace->eh_head = insn;
953 cur_trace->beg_delay_args_size = args_size;
954 cur_trace->end_delay_args_size = args_size;
955 }
956 else if (cur_trace->end_delay_args_size != args_size)
957 {
958 cur_trace->end_delay_args_size = args_size;
959
960 /* ??? If the CFA is the stack pointer, search backward for the last
961 CFI note and insert there. Given that the stack changed for the
962 args_size change, there *must* be such a note in between here and
963 the last eh insn. */
964 add_cfi_args_size (args_size);
965 }
966 }
967
968 /* Short-hand inline for the very common D_F_R (REGNO (x)) operation. */
969 /* ??? This ought to go into dwarf2out.h, except that dwarf2out.h is
970 used in places where rtl is prohibited. */
971
972 static inline unsigned
973 dwf_regno (const_rtx reg)
974 {
975 gcc_assert (REGNO (reg) < FIRST_PSEUDO_REGISTER);
976 return DWARF_FRAME_REGNUM (REGNO (reg));
977 }
978
979 /* Compare X and Y for equivalence. The inputs may be REGs or PC_RTX. */
980
981 static bool
982 compare_reg_or_pc (rtx x, rtx y)
983 {
984 if (REG_P (x) && REG_P (y))
985 return REGNO (x) == REGNO (y);
986 return x == y;
987 }
988
989 /* Record SRC as being saved in DEST. DEST may be null to delete an
990 existing entry. SRC may be a register or PC_RTX. */
991
992 static void
993 record_reg_saved_in_reg (rtx dest, rtx src)
994 {
995 reg_saved_in_data *elt;
996 size_t i;
997
998 FOR_EACH_VEC_ELT (cur_trace->regs_saved_in_regs, i, elt)
999 if (compare_reg_or_pc (elt->orig_reg, src))
1000 {
1001 if (dest == NULL)
1002 cur_trace->regs_saved_in_regs.unordered_remove (i);
1003 else
1004 elt->saved_in_reg = dest;
1005 return;
1006 }
1007
1008 if (dest == NULL)
1009 return;
1010
1011 reg_saved_in_data e = {src, dest};
1012 cur_trace->regs_saved_in_regs.safe_push (e);
1013 }
1014
1015 /* Add an entry to QUEUED_REG_SAVES saying that REG is now saved at
1016 SREG, or if SREG is NULL then it is saved at OFFSET to the CFA. */
1017
1018 static void
1019 queue_reg_save (rtx reg, rtx sreg, HOST_WIDE_INT offset)
1020 {
1021 queued_reg_save *q;
1022 queued_reg_save e = {reg, sreg, offset};
1023 size_t i;
1024
1025 /* Duplicates waste space, but it's also necessary to remove them
1026 for correctness, since the queue gets output in reverse order. */
1027 FOR_EACH_VEC_ELT (queued_reg_saves, i, q)
1028 if (compare_reg_or_pc (q->reg, reg))
1029 {
1030 *q = e;
1031 return;
1032 }
1033
1034 queued_reg_saves.safe_push (e);
1035 }
1036
1037 /* Output all the entries in QUEUED_REG_SAVES. */
1038
1039 static void
1040 dwarf2out_flush_queued_reg_saves (void)
1041 {
1042 queued_reg_save *q;
1043 size_t i;
1044
1045 FOR_EACH_VEC_ELT (queued_reg_saves, i, q)
1046 {
1047 unsigned int reg, sreg;
1048
1049 record_reg_saved_in_reg (q->saved_reg, q->reg);
1050
1051 if (q->reg == pc_rtx)
1052 reg = DWARF_FRAME_RETURN_COLUMN;
1053 else
1054 reg = dwf_regno (q->reg);
1055 if (q->saved_reg)
1056 sreg = dwf_regno (q->saved_reg);
1057 else
1058 sreg = INVALID_REGNUM;
1059 reg_save (reg, sreg, q->cfa_offset);
1060 }
1061
1062 queued_reg_saves.truncate (0);
1063 }
1064
1065 /* Does INSN clobber any register which QUEUED_REG_SAVES lists a saved
1066 location for? Or, does it clobber a register which we've previously
1067 said that some other register is saved in, and for which we now
1068 have a new location for? */
1069
1070 static bool
1071 clobbers_queued_reg_save (const_rtx insn)
1072 {
1073 queued_reg_save *q;
1074 size_t iq;
1075
1076 FOR_EACH_VEC_ELT (queued_reg_saves, iq, q)
1077 {
1078 size_t ir;
1079 reg_saved_in_data *rir;
1080
1081 if (modified_in_p (q->reg, insn))
1082 return true;
1083
1084 FOR_EACH_VEC_ELT (cur_trace->regs_saved_in_regs, ir, rir)
1085 if (compare_reg_or_pc (q->reg, rir->orig_reg)
1086 && modified_in_p (rir->saved_in_reg, insn))
1087 return true;
1088 }
1089
1090 return false;
1091 }
1092
1093 /* What register, if any, is currently saved in REG? */
1094
1095 static rtx
1096 reg_saved_in (rtx reg)
1097 {
1098 unsigned int regn = REGNO (reg);
1099 queued_reg_save *q;
1100 reg_saved_in_data *rir;
1101 size_t i;
1102
1103 FOR_EACH_VEC_ELT (queued_reg_saves, i, q)
1104 if (q->saved_reg && regn == REGNO (q->saved_reg))
1105 return q->reg;
1106
1107 FOR_EACH_VEC_ELT (cur_trace->regs_saved_in_regs, i, rir)
1108 if (regn == REGNO (rir->saved_in_reg))
1109 return rir->orig_reg;
1110
1111 return NULL_RTX;
1112 }
1113
1114 /* A subroutine of dwarf2out_frame_debug, process a REG_DEF_CFA note. */
1115
1116 static void
1117 dwarf2out_frame_debug_def_cfa (rtx pat)
1118 {
1119 memset (cur_cfa, 0, sizeof (*cur_cfa));
1120
1121 if (GET_CODE (pat) == PLUS)
1122 {
1123 cur_cfa->offset = INTVAL (XEXP (pat, 1));
1124 pat = XEXP (pat, 0);
1125 }
1126 if (MEM_P (pat))
1127 {
1128 cur_cfa->indirect = 1;
1129 pat = XEXP (pat, 0);
1130 if (GET_CODE (pat) == PLUS)
1131 {
1132 cur_cfa->base_offset = INTVAL (XEXP (pat, 1));
1133 pat = XEXP (pat, 0);
1134 }
1135 }
1136 /* ??? If this fails, we could be calling into the _loc functions to
1137 define a full expression. So far no port does that. */
1138 gcc_assert (REG_P (pat));
1139 cur_cfa->reg = dwf_regno (pat);
1140 }
1141
1142 /* A subroutine of dwarf2out_frame_debug, process a REG_ADJUST_CFA note. */
1143
1144 static void
1145 dwarf2out_frame_debug_adjust_cfa (rtx pat)
1146 {
1147 rtx src, dest;
1148
1149 gcc_assert (GET_CODE (pat) == SET);
1150 dest = XEXP (pat, 0);
1151 src = XEXP (pat, 1);
1152
1153 switch (GET_CODE (src))
1154 {
1155 case PLUS:
1156 gcc_assert (dwf_regno (XEXP (src, 0)) == cur_cfa->reg);
1157 cur_cfa->offset -= INTVAL (XEXP (src, 1));
1158 break;
1159
1160 case REG:
1161 break;
1162
1163 default:
1164 gcc_unreachable ();
1165 }
1166
1167 cur_cfa->reg = dwf_regno (dest);
1168 gcc_assert (cur_cfa->indirect == 0);
1169 }
1170
1171 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_OFFSET note. */
1172
1173 static void
1174 dwarf2out_frame_debug_cfa_offset (rtx set)
1175 {
1176 HOST_WIDE_INT offset;
1177 rtx src, addr, span;
1178 unsigned int sregno;
1179
1180 src = XEXP (set, 1);
1181 addr = XEXP (set, 0);
1182 gcc_assert (MEM_P (addr));
1183 addr = XEXP (addr, 0);
1184
1185 /* As documented, only consider extremely simple addresses. */
1186 switch (GET_CODE (addr))
1187 {
1188 case REG:
1189 gcc_assert (dwf_regno (addr) == cur_cfa->reg);
1190 offset = -cur_cfa->offset;
1191 break;
1192 case PLUS:
1193 gcc_assert (dwf_regno (XEXP (addr, 0)) == cur_cfa->reg);
1194 offset = INTVAL (XEXP (addr, 1)) - cur_cfa->offset;
1195 break;
1196 default:
1197 gcc_unreachable ();
1198 }
1199
1200 if (src == pc_rtx)
1201 {
1202 span = NULL;
1203 sregno = DWARF_FRAME_RETURN_COLUMN;
1204 }
1205 else
1206 {
1207 span = targetm.dwarf_register_span (src);
1208 sregno = dwf_regno (src);
1209 }
1210
1211 /* ??? We'd like to use queue_reg_save, but we need to come up with
1212 a different flushing heuristic for epilogues. */
1213 if (!span)
1214 reg_save (sregno, INVALID_REGNUM, offset);
1215 else
1216 {
1217 /* We have a PARALLEL describing where the contents of SRC live.
1218 Adjust the offset for each piece of the PARALLEL. */
1219 HOST_WIDE_INT span_offset = offset;
1220
1221 gcc_assert (GET_CODE (span) == PARALLEL);
1222
1223 const int par_len = XVECLEN (span, 0);
1224 for (int par_index = 0; par_index < par_len; par_index++)
1225 {
1226 rtx elem = XVECEXP (span, 0, par_index);
1227 sregno = dwf_regno (src);
1228 reg_save (sregno, INVALID_REGNUM, span_offset);
1229 span_offset += GET_MODE_SIZE (GET_MODE (elem));
1230 }
1231 }
1232 }
1233
1234 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_REGISTER note. */
1235
1236 static void
1237 dwarf2out_frame_debug_cfa_register (rtx set)
1238 {
1239 rtx src, dest;
1240 unsigned sregno, dregno;
1241
1242 src = XEXP (set, 1);
1243 dest = XEXP (set, 0);
1244
1245 record_reg_saved_in_reg (dest, src);
1246 if (src == pc_rtx)
1247 sregno = DWARF_FRAME_RETURN_COLUMN;
1248 else
1249 sregno = dwf_regno (src);
1250
1251 dregno = dwf_regno (dest);
1252
1253 /* ??? We'd like to use queue_reg_save, but we need to come up with
1254 a different flushing heuristic for epilogues. */
1255 reg_save (sregno, dregno, 0);
1256 }
1257
1258 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_EXPRESSION note. */
1259
1260 static void
1261 dwarf2out_frame_debug_cfa_expression (rtx set)
1262 {
1263 rtx src, dest, span;
1264 dw_cfi_ref cfi = new_cfi ();
1265 unsigned regno;
1266
1267 dest = SET_DEST (set);
1268 src = SET_SRC (set);
1269
1270 gcc_assert (REG_P (src));
1271 gcc_assert (MEM_P (dest));
1272
1273 span = targetm.dwarf_register_span (src);
1274 gcc_assert (!span);
1275
1276 regno = dwf_regno (src);
1277
1278 cfi->dw_cfi_opc = DW_CFA_expression;
1279 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = regno;
1280 cfi->dw_cfi_oprnd2.dw_cfi_loc
1281 = mem_loc_descriptor (XEXP (dest, 0), get_address_mode (dest),
1282 GET_MODE (dest), VAR_INIT_STATUS_INITIALIZED);
1283
1284 /* ??? We'd like to use queue_reg_save, were the interface different,
1285 and, as above, we could manage flushing for epilogues. */
1286 add_cfi (cfi);
1287 update_row_reg_save (cur_row, regno, cfi);
1288 }
1289
1290 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_RESTORE note. */
1291
1292 static void
1293 dwarf2out_frame_debug_cfa_restore (rtx reg)
1294 {
1295 gcc_assert (REG_P (reg));
1296
1297 rtx span = targetm.dwarf_register_span (reg);
1298 if (!span)
1299 {
1300 unsigned int regno = dwf_regno (reg);
1301 add_cfi_restore (regno);
1302 update_row_reg_save (cur_row, regno, NULL);
1303 }
1304 else
1305 {
1306 /* We have a PARALLEL describing where the contents of REG live.
1307 Restore the register for each piece of the PARALLEL. */
1308 gcc_assert (GET_CODE (span) == PARALLEL);
1309
1310 const int par_len = XVECLEN (span, 0);
1311 for (int par_index = 0; par_index < par_len; par_index++)
1312 {
1313 reg = XVECEXP (span, 0, par_index);
1314 gcc_assert (REG_P (reg));
1315 unsigned int regno = dwf_regno (reg);
1316 add_cfi_restore (regno);
1317 update_row_reg_save (cur_row, regno, NULL);
1318 }
1319 }
1320 }
1321
1322 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_WINDOW_SAVE.
1323 ??? Perhaps we should note in the CIE where windows are saved (instead of
1324 assuming 0(cfa)) and what registers are in the window. */
1325
1326 static void
1327 dwarf2out_frame_debug_cfa_window_save (void)
1328 {
1329 dw_cfi_ref cfi = new_cfi ();
1330
1331 cfi->dw_cfi_opc = DW_CFA_GNU_window_save;
1332 add_cfi (cfi);
1333 }
1334
1335 /* Record call frame debugging information for an expression EXPR,
1336 which either sets SP or FP (adjusting how we calculate the frame
1337 address) or saves a register to the stack or another register.
1338 LABEL indicates the address of EXPR.
1339
1340 This function encodes a state machine mapping rtxes to actions on
1341 cfa, cfa_store, and cfa_temp.reg. We describe these rules so
1342 users need not read the source code.
1343
1344 The High-Level Picture
1345
1346 Changes in the register we use to calculate the CFA: Currently we
1347 assume that if you copy the CFA register into another register, we
1348 should take the other one as the new CFA register; this seems to
1349 work pretty well. If it's wrong for some target, it's simple
1350 enough not to set RTX_FRAME_RELATED_P on the insn in question.
1351
1352 Changes in the register we use for saving registers to the stack:
1353 This is usually SP, but not always. Again, we deduce that if you
1354 copy SP into another register (and SP is not the CFA register),
1355 then the new register is the one we will be using for register
1356 saves. This also seems to work.
1357
1358 Register saves: There's not much guesswork about this one; if
1359 RTX_FRAME_RELATED_P is set on an insn which modifies memory, it's a
1360 register save, and the register used to calculate the destination
1361 had better be the one we think we're using for this purpose.
1362 It's also assumed that a copy from a call-saved register to another
1363 register is saving that register if RTX_FRAME_RELATED_P is set on
1364 that instruction. If the copy is from a call-saved register to
1365 the *same* register, that means that the register is now the same
1366 value as in the caller.
1367
1368 Except: If the register being saved is the CFA register, and the
1369 offset is nonzero, we are saving the CFA, so we assume we have to
1370 use DW_CFA_def_cfa_expression. If the offset is 0, we assume that
1371 the intent is to save the value of SP from the previous frame.
1372
1373 In addition, if a register has previously been saved to a different
1374 register,
1375
1376 Invariants / Summaries of Rules
1377
1378 cfa current rule for calculating the CFA. It usually
1379 consists of a register and an offset. This is
1380 actually stored in *cur_cfa, but abbreviated
1381 for the purposes of this documentation.
1382 cfa_store register used by prologue code to save things to the stack
1383 cfa_store.offset is the offset from the value of
1384 cfa_store.reg to the actual CFA
1385 cfa_temp register holding an integral value. cfa_temp.offset
1386 stores the value, which will be used to adjust the
1387 stack pointer. cfa_temp is also used like cfa_store,
1388 to track stores to the stack via fp or a temp reg.
1389
1390 Rules 1- 4: Setting a register's value to cfa.reg or an expression
1391 with cfa.reg as the first operand changes the cfa.reg and its
1392 cfa.offset. Rule 1 and 4 also set cfa_temp.reg and
1393 cfa_temp.offset.
1394
1395 Rules 6- 9: Set a non-cfa.reg register value to a constant or an
1396 expression yielding a constant. This sets cfa_temp.reg
1397 and cfa_temp.offset.
1398
1399 Rule 5: Create a new register cfa_store used to save items to the
1400 stack.
1401
1402 Rules 10-14: Save a register to the stack. Define offset as the
1403 difference of the original location and cfa_store's
1404 location (or cfa_temp's location if cfa_temp is used).
1405
1406 Rules 16-20: If AND operation happens on sp in prologue, we assume
1407 stack is realigned. We will use a group of DW_OP_XXX
1408 expressions to represent the location of the stored
1409 register instead of CFA+offset.
1410
1411 The Rules
1412
1413 "{a,b}" indicates a choice of a xor b.
1414 "<reg>:cfa.reg" indicates that <reg> must equal cfa.reg.
1415
1416 Rule 1:
1417 (set <reg1> <reg2>:cfa.reg)
1418 effects: cfa.reg = <reg1>
1419 cfa.offset unchanged
1420 cfa_temp.reg = <reg1>
1421 cfa_temp.offset = cfa.offset
1422
1423 Rule 2:
1424 (set sp ({minus,plus,losum} {sp,fp}:cfa.reg
1425 {<const_int>,<reg>:cfa_temp.reg}))
1426 effects: cfa.reg = sp if fp used
1427 cfa.offset += {+/- <const_int>, cfa_temp.offset} if cfa.reg==sp
1428 cfa_store.offset += {+/- <const_int>, cfa_temp.offset}
1429 if cfa_store.reg==sp
1430
1431 Rule 3:
1432 (set fp ({minus,plus,losum} <reg>:cfa.reg <const_int>))
1433 effects: cfa.reg = fp
1434 cfa_offset += +/- <const_int>
1435
1436 Rule 4:
1437 (set <reg1> ({plus,losum} <reg2>:cfa.reg <const_int>))
1438 constraints: <reg1> != fp
1439 <reg1> != sp
1440 effects: cfa.reg = <reg1>
1441 cfa_temp.reg = <reg1>
1442 cfa_temp.offset = cfa.offset
1443
1444 Rule 5:
1445 (set <reg1> (plus <reg2>:cfa_temp.reg sp:cfa.reg))
1446 constraints: <reg1> != fp
1447 <reg1> != sp
1448 effects: cfa_store.reg = <reg1>
1449 cfa_store.offset = cfa.offset - cfa_temp.offset
1450
1451 Rule 6:
1452 (set <reg> <const_int>)
1453 effects: cfa_temp.reg = <reg>
1454 cfa_temp.offset = <const_int>
1455
1456 Rule 7:
1457 (set <reg1>:cfa_temp.reg (ior <reg2>:cfa_temp.reg <const_int>))
1458 effects: cfa_temp.reg = <reg1>
1459 cfa_temp.offset |= <const_int>
1460
1461 Rule 8:
1462 (set <reg> (high <exp>))
1463 effects: none
1464
1465 Rule 9:
1466 (set <reg> (lo_sum <exp> <const_int>))
1467 effects: cfa_temp.reg = <reg>
1468 cfa_temp.offset = <const_int>
1469
1470 Rule 10:
1471 (set (mem ({pre,post}_modify sp:cfa_store (???? <reg1> <const_int>))) <reg2>)
1472 effects: cfa_store.offset -= <const_int>
1473 cfa.offset = cfa_store.offset if cfa.reg == sp
1474 cfa.reg = sp
1475 cfa.base_offset = -cfa_store.offset
1476
1477 Rule 11:
1478 (set (mem ({pre_inc,pre_dec,post_dec} sp:cfa_store.reg)) <reg>)
1479 effects: cfa_store.offset += -/+ mode_size(mem)
1480 cfa.offset = cfa_store.offset if cfa.reg == sp
1481 cfa.reg = sp
1482 cfa.base_offset = -cfa_store.offset
1483
1484 Rule 12:
1485 (set (mem ({minus,plus,losum} <reg1>:{cfa_store,cfa_temp} <const_int>))
1486
1487 <reg2>)
1488 effects: cfa.reg = <reg1>
1489 cfa.base_offset = -/+ <const_int> - {cfa_store,cfa_temp}.offset
1490
1491 Rule 13:
1492 (set (mem <reg1>:{cfa_store,cfa_temp}) <reg2>)
1493 effects: cfa.reg = <reg1>
1494 cfa.base_offset = -{cfa_store,cfa_temp}.offset
1495
1496 Rule 14:
1497 (set (mem (post_inc <reg1>:cfa_temp <const_int>)) <reg2>)
1498 effects: cfa.reg = <reg1>
1499 cfa.base_offset = -cfa_temp.offset
1500 cfa_temp.offset -= mode_size(mem)
1501
1502 Rule 15:
1503 (set <reg> {unspec, unspec_volatile})
1504 effects: target-dependent
1505
1506 Rule 16:
1507 (set sp (and: sp <const_int>))
1508 constraints: cfa_store.reg == sp
1509 effects: cfun->fde.stack_realign = 1
1510 cfa_store.offset = 0
1511 fde->drap_reg = cfa.reg if cfa.reg != sp and cfa.reg != fp
1512
1513 Rule 17:
1514 (set (mem ({pre_inc, pre_dec} sp)) (mem (plus (cfa.reg) (const_int))))
1515 effects: cfa_store.offset += -/+ mode_size(mem)
1516
1517 Rule 18:
1518 (set (mem ({pre_inc, pre_dec} sp)) fp)
1519 constraints: fde->stack_realign == 1
1520 effects: cfa_store.offset = 0
1521 cfa.reg != HARD_FRAME_POINTER_REGNUM
1522
1523 Rule 19:
1524 (set (mem ({pre_inc, pre_dec} sp)) cfa.reg)
1525 constraints: fde->stack_realign == 1
1526 && cfa.offset == 0
1527 && cfa.indirect == 0
1528 && cfa.reg != HARD_FRAME_POINTER_REGNUM
1529 effects: Use DW_CFA_def_cfa_expression to define cfa
1530 cfa.reg == fde->drap_reg */
1531
1532 static void
1533 dwarf2out_frame_debug_expr (rtx expr)
1534 {
1535 rtx src, dest, span;
1536 HOST_WIDE_INT offset;
1537 dw_fde_ref fde;
1538
1539 /* If RTX_FRAME_RELATED_P is set on a PARALLEL, process each member of
1540 the PARALLEL independently. The first element is always processed if
1541 it is a SET. This is for backward compatibility. Other elements
1542 are processed only if they are SETs and the RTX_FRAME_RELATED_P
1543 flag is set in them. */
1544 if (GET_CODE (expr) == PARALLEL || GET_CODE (expr) == SEQUENCE)
1545 {
1546 int par_index;
1547 int limit = XVECLEN (expr, 0);
1548 rtx elem;
1549
1550 /* PARALLELs have strict read-modify-write semantics, so we
1551 ought to evaluate every rvalue before changing any lvalue.
1552 It's cumbersome to do that in general, but there's an
1553 easy approximation that is enough for all current users:
1554 handle register saves before register assignments. */
1555 if (GET_CODE (expr) == PARALLEL)
1556 for (par_index = 0; par_index < limit; par_index++)
1557 {
1558 elem = XVECEXP (expr, 0, par_index);
1559 if (GET_CODE (elem) == SET
1560 && MEM_P (SET_DEST (elem))
1561 && (RTX_FRAME_RELATED_P (elem) || par_index == 0))
1562 dwarf2out_frame_debug_expr (elem);
1563 }
1564
1565 for (par_index = 0; par_index < limit; par_index++)
1566 {
1567 elem = XVECEXP (expr, 0, par_index);
1568 if (GET_CODE (elem) == SET
1569 && (!MEM_P (SET_DEST (elem)) || GET_CODE (expr) == SEQUENCE)
1570 && (RTX_FRAME_RELATED_P (elem) || par_index == 0))
1571 dwarf2out_frame_debug_expr (elem);
1572 }
1573 return;
1574 }
1575
1576 gcc_assert (GET_CODE (expr) == SET);
1577
1578 src = SET_SRC (expr);
1579 dest = SET_DEST (expr);
1580
1581 if (REG_P (src))
1582 {
1583 rtx rsi = reg_saved_in (src);
1584 if (rsi)
1585 src = rsi;
1586 }
1587
1588 fde = cfun->fde;
1589
1590 switch (GET_CODE (dest))
1591 {
1592 case REG:
1593 switch (GET_CODE (src))
1594 {
1595 /* Setting FP from SP. */
1596 case REG:
1597 if (cur_cfa->reg == dwf_regno (src))
1598 {
1599 /* Rule 1 */
1600 /* Update the CFA rule wrt SP or FP. Make sure src is
1601 relative to the current CFA register.
1602
1603 We used to require that dest be either SP or FP, but the
1604 ARM copies SP to a temporary register, and from there to
1605 FP. So we just rely on the backends to only set
1606 RTX_FRAME_RELATED_P on appropriate insns. */
1607 cur_cfa->reg = dwf_regno (dest);
1608 cur_trace->cfa_temp.reg = cur_cfa->reg;
1609 cur_trace->cfa_temp.offset = cur_cfa->offset;
1610 }
1611 else
1612 {
1613 /* Saving a register in a register. */
1614 gcc_assert (!fixed_regs [REGNO (dest)]
1615 /* For the SPARC and its register window. */
1616 || (dwf_regno (src) == DWARF_FRAME_RETURN_COLUMN));
1617
1618 /* After stack is aligned, we can only save SP in FP
1619 if drap register is used. In this case, we have
1620 to restore stack pointer with the CFA value and we
1621 don't generate this DWARF information. */
1622 if (fde
1623 && fde->stack_realign
1624 && REGNO (src) == STACK_POINTER_REGNUM)
1625 gcc_assert (REGNO (dest) == HARD_FRAME_POINTER_REGNUM
1626 && fde->drap_reg != INVALID_REGNUM
1627 && cur_cfa->reg != dwf_regno (src));
1628 else
1629 queue_reg_save (src, dest, 0);
1630 }
1631 break;
1632
1633 case PLUS:
1634 case MINUS:
1635 case LO_SUM:
1636 if (dest == stack_pointer_rtx)
1637 {
1638 /* Rule 2 */
1639 /* Adjusting SP. */
1640 switch (GET_CODE (XEXP (src, 1)))
1641 {
1642 case CONST_INT:
1643 offset = INTVAL (XEXP (src, 1));
1644 break;
1645 case REG:
1646 gcc_assert (dwf_regno (XEXP (src, 1))
1647 == cur_trace->cfa_temp.reg);
1648 offset = cur_trace->cfa_temp.offset;
1649 break;
1650 default:
1651 gcc_unreachable ();
1652 }
1653
1654 if (XEXP (src, 0) == hard_frame_pointer_rtx)
1655 {
1656 /* Restoring SP from FP in the epilogue. */
1657 gcc_assert (cur_cfa->reg == dw_frame_pointer_regnum);
1658 cur_cfa->reg = dw_stack_pointer_regnum;
1659 }
1660 else if (GET_CODE (src) == LO_SUM)
1661 /* Assume we've set the source reg of the LO_SUM from sp. */
1662 ;
1663 else
1664 gcc_assert (XEXP (src, 0) == stack_pointer_rtx);
1665
1666 if (GET_CODE (src) != MINUS)
1667 offset = -offset;
1668 if (cur_cfa->reg == dw_stack_pointer_regnum)
1669 cur_cfa->offset += offset;
1670 if (cur_trace->cfa_store.reg == dw_stack_pointer_regnum)
1671 cur_trace->cfa_store.offset += offset;
1672 }
1673 else if (dest == hard_frame_pointer_rtx)
1674 {
1675 /* Rule 3 */
1676 /* Either setting the FP from an offset of the SP,
1677 or adjusting the FP */
1678 gcc_assert (frame_pointer_needed);
1679
1680 gcc_assert (REG_P (XEXP (src, 0))
1681 && dwf_regno (XEXP (src, 0)) == cur_cfa->reg
1682 && CONST_INT_P (XEXP (src, 1)));
1683 offset = INTVAL (XEXP (src, 1));
1684 if (GET_CODE (src) != MINUS)
1685 offset = -offset;
1686 cur_cfa->offset += offset;
1687 cur_cfa->reg = dw_frame_pointer_regnum;
1688 }
1689 else
1690 {
1691 gcc_assert (GET_CODE (src) != MINUS);
1692
1693 /* Rule 4 */
1694 if (REG_P (XEXP (src, 0))
1695 && dwf_regno (XEXP (src, 0)) == cur_cfa->reg
1696 && CONST_INT_P (XEXP (src, 1)))
1697 {
1698 /* Setting a temporary CFA register that will be copied
1699 into the FP later on. */
1700 offset = - INTVAL (XEXP (src, 1));
1701 cur_cfa->offset += offset;
1702 cur_cfa->reg = dwf_regno (dest);
1703 /* Or used to save regs to the stack. */
1704 cur_trace->cfa_temp.reg = cur_cfa->reg;
1705 cur_trace->cfa_temp.offset = cur_cfa->offset;
1706 }
1707
1708 /* Rule 5 */
1709 else if (REG_P (XEXP (src, 0))
1710 && dwf_regno (XEXP (src, 0)) == cur_trace->cfa_temp.reg
1711 && XEXP (src, 1) == stack_pointer_rtx)
1712 {
1713 /* Setting a scratch register that we will use instead
1714 of SP for saving registers to the stack. */
1715 gcc_assert (cur_cfa->reg == dw_stack_pointer_regnum);
1716 cur_trace->cfa_store.reg = dwf_regno (dest);
1717 cur_trace->cfa_store.offset
1718 = cur_cfa->offset - cur_trace->cfa_temp.offset;
1719 }
1720
1721 /* Rule 9 */
1722 else if (GET_CODE (src) == LO_SUM
1723 && CONST_INT_P (XEXP (src, 1)))
1724 {
1725 cur_trace->cfa_temp.reg = dwf_regno (dest);
1726 cur_trace->cfa_temp.offset = INTVAL (XEXP (src, 1));
1727 }
1728 else
1729 gcc_unreachable ();
1730 }
1731 break;
1732
1733 /* Rule 6 */
1734 case CONST_INT:
1735 cur_trace->cfa_temp.reg = dwf_regno (dest);
1736 cur_trace->cfa_temp.offset = INTVAL (src);
1737 break;
1738
1739 /* Rule 7 */
1740 case IOR:
1741 gcc_assert (REG_P (XEXP (src, 0))
1742 && dwf_regno (XEXP (src, 0)) == cur_trace->cfa_temp.reg
1743 && CONST_INT_P (XEXP (src, 1)));
1744
1745 cur_trace->cfa_temp.reg = dwf_regno (dest);
1746 cur_trace->cfa_temp.offset |= INTVAL (XEXP (src, 1));
1747 break;
1748
1749 /* Skip over HIGH, assuming it will be followed by a LO_SUM,
1750 which will fill in all of the bits. */
1751 /* Rule 8 */
1752 case HIGH:
1753 break;
1754
1755 /* Rule 15 */
1756 case UNSPEC:
1757 case UNSPEC_VOLATILE:
1758 /* All unspecs should be represented by REG_CFA_* notes. */
1759 gcc_unreachable ();
1760 return;
1761
1762 /* Rule 16 */
1763 case AND:
1764 /* If this AND operation happens on stack pointer in prologue,
1765 we assume the stack is realigned and we extract the
1766 alignment. */
1767 if (fde && XEXP (src, 0) == stack_pointer_rtx)
1768 {
1769 /* We interpret reg_save differently with stack_realign set.
1770 Thus we must flush whatever we have queued first. */
1771 dwarf2out_flush_queued_reg_saves ();
1772
1773 gcc_assert (cur_trace->cfa_store.reg
1774 == dwf_regno (XEXP (src, 0)));
1775 fde->stack_realign = 1;
1776 fde->stack_realignment = INTVAL (XEXP (src, 1));
1777 cur_trace->cfa_store.offset = 0;
1778
1779 if (cur_cfa->reg != dw_stack_pointer_regnum
1780 && cur_cfa->reg != dw_frame_pointer_regnum)
1781 fde->drap_reg = cur_cfa->reg;
1782 }
1783 return;
1784
1785 default:
1786 gcc_unreachable ();
1787 }
1788 break;
1789
1790 case MEM:
1791
1792 /* Saving a register to the stack. Make sure dest is relative to the
1793 CFA register. */
1794 switch (GET_CODE (XEXP (dest, 0)))
1795 {
1796 /* Rule 10 */
1797 /* With a push. */
1798 case PRE_MODIFY:
1799 case POST_MODIFY:
1800 /* We can't handle variable size modifications. */
1801 gcc_assert (GET_CODE (XEXP (XEXP (XEXP (dest, 0), 1), 1))
1802 == CONST_INT);
1803 offset = -INTVAL (XEXP (XEXP (XEXP (dest, 0), 1), 1));
1804
1805 gcc_assert (REGNO (XEXP (XEXP (dest, 0), 0)) == STACK_POINTER_REGNUM
1806 && cur_trace->cfa_store.reg == dw_stack_pointer_regnum);
1807
1808 cur_trace->cfa_store.offset += offset;
1809 if (cur_cfa->reg == dw_stack_pointer_regnum)
1810 cur_cfa->offset = cur_trace->cfa_store.offset;
1811
1812 if (GET_CODE (XEXP (dest, 0)) == POST_MODIFY)
1813 offset -= cur_trace->cfa_store.offset;
1814 else
1815 offset = -cur_trace->cfa_store.offset;
1816 break;
1817
1818 /* Rule 11 */
1819 case PRE_INC:
1820 case PRE_DEC:
1821 case POST_DEC:
1822 offset = GET_MODE_SIZE (GET_MODE (dest));
1823 if (GET_CODE (XEXP (dest, 0)) == PRE_INC)
1824 offset = -offset;
1825
1826 gcc_assert ((REGNO (XEXP (XEXP (dest, 0), 0))
1827 == STACK_POINTER_REGNUM)
1828 && cur_trace->cfa_store.reg == dw_stack_pointer_regnum);
1829
1830 cur_trace->cfa_store.offset += offset;
1831
1832 /* Rule 18: If stack is aligned, we will use FP as a
1833 reference to represent the address of the stored
1834 regiser. */
1835 if (fde
1836 && fde->stack_realign
1837 && REG_P (src)
1838 && REGNO (src) == HARD_FRAME_POINTER_REGNUM)
1839 {
1840 gcc_assert (cur_cfa->reg != dw_frame_pointer_regnum);
1841 cur_trace->cfa_store.offset = 0;
1842 }
1843
1844 if (cur_cfa->reg == dw_stack_pointer_regnum)
1845 cur_cfa->offset = cur_trace->cfa_store.offset;
1846
1847 if (GET_CODE (XEXP (dest, 0)) == POST_DEC)
1848 offset += -cur_trace->cfa_store.offset;
1849 else
1850 offset = -cur_trace->cfa_store.offset;
1851 break;
1852
1853 /* Rule 12 */
1854 /* With an offset. */
1855 case PLUS:
1856 case MINUS:
1857 case LO_SUM:
1858 {
1859 unsigned int regno;
1860
1861 gcc_assert (CONST_INT_P (XEXP (XEXP (dest, 0), 1))
1862 && REG_P (XEXP (XEXP (dest, 0), 0)));
1863 offset = INTVAL (XEXP (XEXP (dest, 0), 1));
1864 if (GET_CODE (XEXP (dest, 0)) == MINUS)
1865 offset = -offset;
1866
1867 regno = dwf_regno (XEXP (XEXP (dest, 0), 0));
1868
1869 if (cur_cfa->reg == regno)
1870 offset -= cur_cfa->offset;
1871 else if (cur_trace->cfa_store.reg == regno)
1872 offset -= cur_trace->cfa_store.offset;
1873 else
1874 {
1875 gcc_assert (cur_trace->cfa_temp.reg == regno);
1876 offset -= cur_trace->cfa_temp.offset;
1877 }
1878 }
1879 break;
1880
1881 /* Rule 13 */
1882 /* Without an offset. */
1883 case REG:
1884 {
1885 unsigned int regno = dwf_regno (XEXP (dest, 0));
1886
1887 if (cur_cfa->reg == regno)
1888 offset = -cur_cfa->offset;
1889 else if (cur_trace->cfa_store.reg == regno)
1890 offset = -cur_trace->cfa_store.offset;
1891 else
1892 {
1893 gcc_assert (cur_trace->cfa_temp.reg == regno);
1894 offset = -cur_trace->cfa_temp.offset;
1895 }
1896 }
1897 break;
1898
1899 /* Rule 14 */
1900 case POST_INC:
1901 gcc_assert (cur_trace->cfa_temp.reg
1902 == dwf_regno (XEXP (XEXP (dest, 0), 0)));
1903 offset = -cur_trace->cfa_temp.offset;
1904 cur_trace->cfa_temp.offset -= GET_MODE_SIZE (GET_MODE (dest));
1905 break;
1906
1907 default:
1908 gcc_unreachable ();
1909 }
1910
1911 /* Rule 17 */
1912 /* If the source operand of this MEM operation is a memory,
1913 we only care how much stack grew. */
1914 if (MEM_P (src))
1915 break;
1916
1917 if (REG_P (src)
1918 && REGNO (src) != STACK_POINTER_REGNUM
1919 && REGNO (src) != HARD_FRAME_POINTER_REGNUM
1920 && dwf_regno (src) == cur_cfa->reg)
1921 {
1922 /* We're storing the current CFA reg into the stack. */
1923
1924 if (cur_cfa->offset == 0)
1925 {
1926 /* Rule 19 */
1927 /* If stack is aligned, putting CFA reg into stack means
1928 we can no longer use reg + offset to represent CFA.
1929 Here we use DW_CFA_def_cfa_expression instead. The
1930 result of this expression equals to the original CFA
1931 value. */
1932 if (fde
1933 && fde->stack_realign
1934 && cur_cfa->indirect == 0
1935 && cur_cfa->reg != dw_frame_pointer_regnum)
1936 {
1937 gcc_assert (fde->drap_reg == cur_cfa->reg);
1938
1939 cur_cfa->indirect = 1;
1940 cur_cfa->reg = dw_frame_pointer_regnum;
1941 cur_cfa->base_offset = offset;
1942 cur_cfa->offset = 0;
1943
1944 fde->drap_reg_saved = 1;
1945 break;
1946 }
1947
1948 /* If the source register is exactly the CFA, assume
1949 we're saving SP like any other register; this happens
1950 on the ARM. */
1951 queue_reg_save (stack_pointer_rtx, NULL_RTX, offset);
1952 break;
1953 }
1954 else
1955 {
1956 /* Otherwise, we'll need to look in the stack to
1957 calculate the CFA. */
1958 rtx x = XEXP (dest, 0);
1959
1960 if (!REG_P (x))
1961 x = XEXP (x, 0);
1962 gcc_assert (REG_P (x));
1963
1964 cur_cfa->reg = dwf_regno (x);
1965 cur_cfa->base_offset = offset;
1966 cur_cfa->indirect = 1;
1967 break;
1968 }
1969 }
1970
1971 if (REG_P (src))
1972 span = targetm.dwarf_register_span (src);
1973 else
1974 span = NULL;
1975
1976 if (!span)
1977 queue_reg_save (src, NULL_RTX, offset);
1978 else
1979 {
1980 /* We have a PARALLEL describing where the contents of SRC live.
1981 Queue register saves for each piece of the PARALLEL. */
1982 HOST_WIDE_INT span_offset = offset;
1983
1984 gcc_assert (GET_CODE (span) == PARALLEL);
1985
1986 const int par_len = XVECLEN (span, 0);
1987 for (int par_index = 0; par_index < par_len; par_index++)
1988 {
1989 rtx elem = XVECEXP (span, 0, par_index);
1990 queue_reg_save (elem, NULL_RTX, span_offset);
1991 span_offset += GET_MODE_SIZE (GET_MODE (elem));
1992 }
1993 }
1994 break;
1995
1996 default:
1997 gcc_unreachable ();
1998 }
1999 }
2000
2001 /* Record call frame debugging information for INSN, which either sets
2002 SP or FP (adjusting how we calculate the frame address) or saves a
2003 register to the stack. */
2004
2005 static void
2006 dwarf2out_frame_debug (rtx_insn *insn)
2007 {
2008 rtx note, n, pat;
2009 bool handled_one = false;
2010
2011 for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
2012 switch (REG_NOTE_KIND (note))
2013 {
2014 case REG_FRAME_RELATED_EXPR:
2015 pat = XEXP (note, 0);
2016 goto do_frame_expr;
2017
2018 case REG_CFA_DEF_CFA:
2019 dwarf2out_frame_debug_def_cfa (XEXP (note, 0));
2020 handled_one = true;
2021 break;
2022
2023 case REG_CFA_ADJUST_CFA:
2024 n = XEXP (note, 0);
2025 if (n == NULL)
2026 {
2027 n = PATTERN (insn);
2028 if (GET_CODE (n) == PARALLEL)
2029 n = XVECEXP (n, 0, 0);
2030 }
2031 dwarf2out_frame_debug_adjust_cfa (n);
2032 handled_one = true;
2033 break;
2034
2035 case REG_CFA_OFFSET:
2036 n = XEXP (note, 0);
2037 if (n == NULL)
2038 n = single_set (insn);
2039 dwarf2out_frame_debug_cfa_offset (n);
2040 handled_one = true;
2041 break;
2042
2043 case REG_CFA_REGISTER:
2044 n = XEXP (note, 0);
2045 if (n == NULL)
2046 {
2047 n = PATTERN (insn);
2048 if (GET_CODE (n) == PARALLEL)
2049 n = XVECEXP (n, 0, 0);
2050 }
2051 dwarf2out_frame_debug_cfa_register (n);
2052 handled_one = true;
2053 break;
2054
2055 case REG_CFA_EXPRESSION:
2056 n = XEXP (note, 0);
2057 if (n == NULL)
2058 n = single_set (insn);
2059 dwarf2out_frame_debug_cfa_expression (n);
2060 handled_one = true;
2061 break;
2062
2063 case REG_CFA_RESTORE:
2064 n = XEXP (note, 0);
2065 if (n == NULL)
2066 {
2067 n = PATTERN (insn);
2068 if (GET_CODE (n) == PARALLEL)
2069 n = XVECEXP (n, 0, 0);
2070 n = XEXP (n, 0);
2071 }
2072 dwarf2out_frame_debug_cfa_restore (n);
2073 handled_one = true;
2074 break;
2075
2076 case REG_CFA_SET_VDRAP:
2077 n = XEXP (note, 0);
2078 if (REG_P (n))
2079 {
2080 dw_fde_ref fde = cfun->fde;
2081 if (fde)
2082 {
2083 gcc_assert (fde->vdrap_reg == INVALID_REGNUM);
2084 if (REG_P (n))
2085 fde->vdrap_reg = dwf_regno (n);
2086 }
2087 }
2088 handled_one = true;
2089 break;
2090
2091 case REG_CFA_WINDOW_SAVE:
2092 dwarf2out_frame_debug_cfa_window_save ();
2093 handled_one = true;
2094 break;
2095
2096 case REG_CFA_FLUSH_QUEUE:
2097 /* The actual flush happens elsewhere. */
2098 handled_one = true;
2099 break;
2100
2101 default:
2102 break;
2103 }
2104
2105 if (!handled_one)
2106 {
2107 pat = PATTERN (insn);
2108 do_frame_expr:
2109 dwarf2out_frame_debug_expr (pat);
2110
2111 /* Check again. A parallel can save and update the same register.
2112 We could probably check just once, here, but this is safer than
2113 removing the check at the start of the function. */
2114 if (clobbers_queued_reg_save (pat))
2115 dwarf2out_flush_queued_reg_saves ();
2116 }
2117 }
2118
2119 /* Emit CFI info to change the state from OLD_ROW to NEW_ROW. */
2120
2121 static void
2122 change_cfi_row (dw_cfi_row *old_row, dw_cfi_row *new_row)
2123 {
2124 size_t i, n_old, n_new, n_max;
2125 dw_cfi_ref cfi;
2126
2127 if (new_row->cfa_cfi && !cfi_equal_p (old_row->cfa_cfi, new_row->cfa_cfi))
2128 add_cfi (new_row->cfa_cfi);
2129 else
2130 {
2131 cfi = def_cfa_0 (&old_row->cfa, &new_row->cfa);
2132 if (cfi)
2133 add_cfi (cfi);
2134 }
2135
2136 n_old = vec_safe_length (old_row->reg_save);
2137 n_new = vec_safe_length (new_row->reg_save);
2138 n_max = MAX (n_old, n_new);
2139
2140 for (i = 0; i < n_max; ++i)
2141 {
2142 dw_cfi_ref r_old = NULL, r_new = NULL;
2143
2144 if (i < n_old)
2145 r_old = (*old_row->reg_save)[i];
2146 if (i < n_new)
2147 r_new = (*new_row->reg_save)[i];
2148
2149 if (r_old == r_new)
2150 ;
2151 else if (r_new == NULL)
2152 add_cfi_restore (i);
2153 else if (!cfi_equal_p (r_old, r_new))
2154 add_cfi (r_new);
2155 }
2156 }
2157
2158 /* Examine CFI and return true if a cfi label and set_loc is needed
2159 beforehand. Even when generating CFI assembler instructions, we
2160 still have to add the cfi to the list so that lookup_cfa_1 works
2161 later on. When -g2 and above we even need to force emitting of
2162 CFI labels and add to list a DW_CFA_set_loc for convert_cfa_to_fb_loc_list
2163 purposes. If we're generating DWARF3 output we use DW_OP_call_frame_cfa
2164 and so don't use convert_cfa_to_fb_loc_list. */
2165
2166 static bool
2167 cfi_label_required_p (dw_cfi_ref cfi)
2168 {
2169 if (!dwarf2out_do_cfi_asm ())
2170 return true;
2171
2172 if (dwarf_version == 2
2173 && debug_info_level > DINFO_LEVEL_TERSE
2174 && (write_symbols == DWARF2_DEBUG
2175 || write_symbols == VMS_AND_DWARF2_DEBUG))
2176 {
2177 switch (cfi->dw_cfi_opc)
2178 {
2179 case DW_CFA_def_cfa_offset:
2180 case DW_CFA_def_cfa_offset_sf:
2181 case DW_CFA_def_cfa_register:
2182 case DW_CFA_def_cfa:
2183 case DW_CFA_def_cfa_sf:
2184 case DW_CFA_def_cfa_expression:
2185 case DW_CFA_restore_state:
2186 return true;
2187 default:
2188 return false;
2189 }
2190 }
2191 return false;
2192 }
2193
2194 /* Walk the function, looking for NOTE_INSN_CFI notes. Add the CFIs to the
2195 function's FDE, adding CFI labels and set_loc/advance_loc opcodes as
2196 necessary. */
2197 static void
2198 add_cfis_to_fde (void)
2199 {
2200 dw_fde_ref fde = cfun->fde;
2201 rtx_insn *insn, *next;
2202 /* We always start with a function_begin label. */
2203 bool first = false;
2204
2205 for (insn = get_insns (); insn; insn = next)
2206 {
2207 next = NEXT_INSN (insn);
2208
2209 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS)
2210 {
2211 fde->dw_fde_switch_cfi_index = vec_safe_length (fde->dw_fde_cfi);
2212 /* Don't attempt to advance_loc4 between labels
2213 in different sections. */
2214 first = true;
2215 }
2216
2217 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_CFI)
2218 {
2219 bool required = cfi_label_required_p (NOTE_CFI (insn));
2220 while (next)
2221 if (NOTE_P (next) && NOTE_KIND (next) == NOTE_INSN_CFI)
2222 {
2223 required |= cfi_label_required_p (NOTE_CFI (next));
2224 next = NEXT_INSN (next);
2225 }
2226 else if (active_insn_p (next)
2227 || (NOTE_P (next) && (NOTE_KIND (next)
2228 == NOTE_INSN_SWITCH_TEXT_SECTIONS)))
2229 break;
2230 else
2231 next = NEXT_INSN (next);
2232 if (required)
2233 {
2234 int num = dwarf2out_cfi_label_num;
2235 const char *label = dwarf2out_cfi_label ();
2236 dw_cfi_ref xcfi;
2237
2238 /* Set the location counter to the new label. */
2239 xcfi = new_cfi ();
2240 xcfi->dw_cfi_opc = (first ? DW_CFA_set_loc
2241 : DW_CFA_advance_loc4);
2242 xcfi->dw_cfi_oprnd1.dw_cfi_addr = label;
2243 vec_safe_push (fde->dw_fde_cfi, xcfi);
2244
2245 rtx_note *tmp = emit_note_before (NOTE_INSN_CFI_LABEL, insn);
2246 NOTE_LABEL_NUMBER (tmp) = num;
2247 }
2248
2249 do
2250 {
2251 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_CFI)
2252 vec_safe_push (fde->dw_fde_cfi, NOTE_CFI (insn));
2253 insn = NEXT_INSN (insn);
2254 }
2255 while (insn != next);
2256 first = false;
2257 }
2258 }
2259 }
2260
2261 /* If LABEL is the start of a trace, then initialize the state of that
2262 trace from CUR_TRACE and CUR_ROW. */
2263
2264 static void
2265 maybe_record_trace_start (rtx_insn *start, rtx_insn *origin)
2266 {
2267 dw_trace_info *ti;
2268 HOST_WIDE_INT args_size;
2269
2270 ti = get_trace_info (start);
2271 gcc_assert (ti != NULL);
2272
2273 if (dump_file)
2274 {
2275 fprintf (dump_file, " saw edge from trace %u to %u (via %s %d)\n",
2276 cur_trace->id, ti->id,
2277 (origin ? rtx_name[(int) GET_CODE (origin)] : "fallthru"),
2278 (origin ? INSN_UID (origin) : 0));
2279 }
2280
2281 args_size = cur_trace->end_true_args_size;
2282 if (ti->beg_row == NULL)
2283 {
2284 /* This is the first time we've encountered this trace. Propagate
2285 state across the edge and push the trace onto the work list. */
2286 ti->beg_row = copy_cfi_row (cur_row);
2287 ti->beg_true_args_size = args_size;
2288
2289 ti->cfa_store = cur_trace->cfa_store;
2290 ti->cfa_temp = cur_trace->cfa_temp;
2291 ti->regs_saved_in_regs = cur_trace->regs_saved_in_regs.copy ();
2292
2293 trace_work_list.safe_push (ti);
2294
2295 if (dump_file)
2296 fprintf (dump_file, "\tpush trace %u to worklist\n", ti->id);
2297 }
2298 else
2299 {
2300
2301 /* We ought to have the same state incoming to a given trace no
2302 matter how we arrive at the trace. Anything else means we've
2303 got some kind of optimization error. */
2304 gcc_checking_assert (cfi_row_equal_p (cur_row, ti->beg_row));
2305
2306 /* The args_size is allowed to conflict if it isn't actually used. */
2307 if (ti->beg_true_args_size != args_size)
2308 ti->args_size_undefined = true;
2309 }
2310 }
2311
2312 /* Similarly, but handle the args_size and CFA reset across EH
2313 and non-local goto edges. */
2314
2315 static void
2316 maybe_record_trace_start_abnormal (rtx_insn *start, rtx_insn *origin)
2317 {
2318 HOST_WIDE_INT save_args_size, delta;
2319 dw_cfa_location save_cfa;
2320
2321 save_args_size = cur_trace->end_true_args_size;
2322 if (save_args_size == 0)
2323 {
2324 maybe_record_trace_start (start, origin);
2325 return;
2326 }
2327
2328 delta = -save_args_size;
2329 cur_trace->end_true_args_size = 0;
2330
2331 save_cfa = cur_row->cfa;
2332 if (cur_row->cfa.reg == dw_stack_pointer_regnum)
2333 {
2334 /* Convert a change in args_size (always a positive in the
2335 direction of stack growth) to a change in stack pointer. */
2336 if (!STACK_GROWS_DOWNWARD)
2337 delta = -delta;
2338
2339 cur_row->cfa.offset += delta;
2340 }
2341
2342 maybe_record_trace_start (start, origin);
2343
2344 cur_trace->end_true_args_size = save_args_size;
2345 cur_row->cfa = save_cfa;
2346 }
2347
2348 /* Propagate CUR_TRACE state to the destinations implied by INSN. */
2349 /* ??? Sadly, this is in large part a duplicate of make_edges. */
2350
2351 static void
2352 create_trace_edges (rtx_insn *insn)
2353 {
2354 rtx tmp;
2355 int i, n;
2356
2357 if (JUMP_P (insn))
2358 {
2359 rtx_jump_table_data *table;
2360
2361 if (find_reg_note (insn, REG_NON_LOCAL_GOTO, NULL_RTX))
2362 return;
2363
2364 if (tablejump_p (insn, NULL, &table))
2365 {
2366 rtvec vec = table->get_labels ();
2367
2368 n = GET_NUM_ELEM (vec);
2369 for (i = 0; i < n; ++i)
2370 {
2371 rtx_insn *lab = as_a <rtx_insn *> (XEXP (RTVEC_ELT (vec, i), 0));
2372 maybe_record_trace_start (lab, insn);
2373 }
2374 }
2375 else if (computed_jump_p (insn))
2376 {
2377 for (rtx_insn_list *lab = forced_labels; lab; lab = lab->next ())
2378 maybe_record_trace_start (lab->insn (), insn);
2379 }
2380 else if (returnjump_p (insn))
2381 ;
2382 else if ((tmp = extract_asm_operands (PATTERN (insn))) != NULL)
2383 {
2384 n = ASM_OPERANDS_LABEL_LENGTH (tmp);
2385 for (i = 0; i < n; ++i)
2386 {
2387 rtx_insn *lab =
2388 as_a <rtx_insn *> (XEXP (ASM_OPERANDS_LABEL (tmp, i), 0));
2389 maybe_record_trace_start (lab, insn);
2390 }
2391 }
2392 else
2393 {
2394 rtx_insn *lab = JUMP_LABEL_AS_INSN (insn);
2395 gcc_assert (lab != NULL);
2396 maybe_record_trace_start (lab, insn);
2397 }
2398 }
2399 else if (CALL_P (insn))
2400 {
2401 /* Sibling calls don't have edges inside this function. */
2402 if (SIBLING_CALL_P (insn))
2403 return;
2404
2405 /* Process non-local goto edges. */
2406 if (can_nonlocal_goto (insn))
2407 for (rtx_insn_list *lab = nonlocal_goto_handler_labels;
2408 lab;
2409 lab = lab->next ())
2410 maybe_record_trace_start_abnormal (lab->insn (), insn);
2411 }
2412 else if (rtx_sequence *seq = dyn_cast <rtx_sequence *> (PATTERN (insn)))
2413 {
2414 int i, n = seq->len ();
2415 for (i = 0; i < n; ++i)
2416 create_trace_edges (seq->insn (i));
2417 return;
2418 }
2419
2420 /* Process EH edges. */
2421 if (CALL_P (insn) || cfun->can_throw_non_call_exceptions)
2422 {
2423 eh_landing_pad lp = get_eh_landing_pad_from_rtx (insn);
2424 if (lp)
2425 maybe_record_trace_start_abnormal (lp->landing_pad, insn);
2426 }
2427 }
2428
2429 /* A subroutine of scan_trace. Do what needs to be done "after" INSN. */
2430
2431 static void
2432 scan_insn_after (rtx_insn *insn)
2433 {
2434 if (RTX_FRAME_RELATED_P (insn))
2435 dwarf2out_frame_debug (insn);
2436 notice_args_size (insn);
2437 }
2438
2439 /* Scan the trace beginning at INSN and create the CFI notes for the
2440 instructions therein. */
2441
2442 static void
2443 scan_trace (dw_trace_info *trace)
2444 {
2445 rtx_insn *prev, *insn = trace->head;
2446 dw_cfa_location this_cfa;
2447
2448 if (dump_file)
2449 fprintf (dump_file, "Processing trace %u : start at %s %d\n",
2450 trace->id, rtx_name[(int) GET_CODE (insn)],
2451 INSN_UID (insn));
2452
2453 trace->end_row = copy_cfi_row (trace->beg_row);
2454 trace->end_true_args_size = trace->beg_true_args_size;
2455
2456 cur_trace = trace;
2457 cur_row = trace->end_row;
2458
2459 this_cfa = cur_row->cfa;
2460 cur_cfa = &this_cfa;
2461
2462 for (prev = insn, insn = NEXT_INSN (insn);
2463 insn;
2464 prev = insn, insn = NEXT_INSN (insn))
2465 {
2466 rtx_insn *control;
2467
2468 /* Do everything that happens "before" the insn. */
2469 add_cfi_insn = prev;
2470
2471 /* Notice the end of a trace. */
2472 if (BARRIER_P (insn))
2473 {
2474 /* Don't bother saving the unneeded queued registers at all. */
2475 queued_reg_saves.truncate (0);
2476 break;
2477 }
2478 if (save_point_p (insn))
2479 {
2480 /* Propagate across fallthru edges. */
2481 dwarf2out_flush_queued_reg_saves ();
2482 maybe_record_trace_start (insn, NULL);
2483 break;
2484 }
2485
2486 if (DEBUG_INSN_P (insn) || !inside_basic_block_p (insn))
2487 continue;
2488
2489 /* Handle all changes to the row state. Sequences require special
2490 handling for the positioning of the notes. */
2491 if (rtx_sequence *pat = dyn_cast <rtx_sequence *> (PATTERN (insn)))
2492 {
2493 rtx_insn *elt;
2494 int i, n = pat->len ();
2495
2496 control = pat->insn (0);
2497 if (can_throw_internal (control))
2498 notice_eh_throw (control);
2499 dwarf2out_flush_queued_reg_saves ();
2500
2501 if (JUMP_P (control) && INSN_ANNULLED_BRANCH_P (control))
2502 {
2503 /* ??? Hopefully multiple delay slots are not annulled. */
2504 gcc_assert (n == 2);
2505 gcc_assert (!RTX_FRAME_RELATED_P (control));
2506 gcc_assert (!find_reg_note (control, REG_ARGS_SIZE, NULL));
2507
2508 elt = pat->insn (1);
2509
2510 if (INSN_FROM_TARGET_P (elt))
2511 {
2512 HOST_WIDE_INT restore_args_size;
2513 cfi_vec save_row_reg_save;
2514
2515 /* If ELT is an instruction from target of an annulled
2516 branch, the effects are for the target only and so
2517 the args_size and CFA along the current path
2518 shouldn't change. */
2519 add_cfi_insn = NULL;
2520 restore_args_size = cur_trace->end_true_args_size;
2521 cur_cfa = &cur_row->cfa;
2522 save_row_reg_save = vec_safe_copy (cur_row->reg_save);
2523
2524 scan_insn_after (elt);
2525
2526 /* ??? Should we instead save the entire row state? */
2527 gcc_assert (!queued_reg_saves.length ());
2528
2529 create_trace_edges (control);
2530
2531 cur_trace->end_true_args_size = restore_args_size;
2532 cur_row->cfa = this_cfa;
2533 cur_row->reg_save = save_row_reg_save;
2534 cur_cfa = &this_cfa;
2535 }
2536 else
2537 {
2538 /* If ELT is a annulled branch-taken instruction (i.e.
2539 executed only when branch is not taken), the args_size
2540 and CFA should not change through the jump. */
2541 create_trace_edges (control);
2542
2543 /* Update and continue with the trace. */
2544 add_cfi_insn = insn;
2545 scan_insn_after (elt);
2546 def_cfa_1 (&this_cfa);
2547 }
2548 continue;
2549 }
2550
2551 /* The insns in the delay slot should all be considered to happen
2552 "before" a call insn. Consider a call with a stack pointer
2553 adjustment in the delay slot. The backtrace from the callee
2554 should include the sp adjustment. Unfortunately, that leaves
2555 us with an unavoidable unwinding error exactly at the call insn
2556 itself. For jump insns we'd prefer to avoid this error by
2557 placing the notes after the sequence. */
2558 if (JUMP_P (control))
2559 add_cfi_insn = insn;
2560
2561 for (i = 1; i < n; ++i)
2562 {
2563 elt = pat->insn (i);
2564 scan_insn_after (elt);
2565 }
2566
2567 /* Make sure any register saves are visible at the jump target. */
2568 dwarf2out_flush_queued_reg_saves ();
2569 any_cfis_emitted = false;
2570
2571 /* However, if there is some adjustment on the call itself, e.g.
2572 a call_pop, that action should be considered to happen after
2573 the call returns. */
2574 add_cfi_insn = insn;
2575 scan_insn_after (control);
2576 }
2577 else
2578 {
2579 /* Flush data before calls and jumps, and of course if necessary. */
2580 if (can_throw_internal (insn))
2581 {
2582 notice_eh_throw (insn);
2583 dwarf2out_flush_queued_reg_saves ();
2584 }
2585 else if (!NONJUMP_INSN_P (insn)
2586 || clobbers_queued_reg_save (insn)
2587 || find_reg_note (insn, REG_CFA_FLUSH_QUEUE, NULL))
2588 dwarf2out_flush_queued_reg_saves ();
2589 any_cfis_emitted = false;
2590
2591 add_cfi_insn = insn;
2592 scan_insn_after (insn);
2593 control = insn;
2594 }
2595
2596 /* Between frame-related-p and args_size we might have otherwise
2597 emitted two cfa adjustments. Do it now. */
2598 def_cfa_1 (&this_cfa);
2599
2600 /* Minimize the number of advances by emitting the entire queue
2601 once anything is emitted. */
2602 if (any_cfis_emitted
2603 || find_reg_note (insn, REG_CFA_FLUSH_QUEUE, NULL))
2604 dwarf2out_flush_queued_reg_saves ();
2605
2606 /* Note that a test for control_flow_insn_p does exactly the
2607 same tests as are done to actually create the edges. So
2608 always call the routine and let it not create edges for
2609 non-control-flow insns. */
2610 create_trace_edges (control);
2611 }
2612
2613 add_cfi_insn = NULL;
2614 cur_row = NULL;
2615 cur_trace = NULL;
2616 cur_cfa = NULL;
2617 }
2618
2619 /* Scan the function and create the initial set of CFI notes. */
2620
2621 static void
2622 create_cfi_notes (void)
2623 {
2624 dw_trace_info *ti;
2625
2626 gcc_checking_assert (!queued_reg_saves.exists ());
2627 gcc_checking_assert (!trace_work_list.exists ());
2628
2629 /* Always begin at the entry trace. */
2630 ti = &trace_info[0];
2631 scan_trace (ti);
2632
2633 while (!trace_work_list.is_empty ())
2634 {
2635 ti = trace_work_list.pop ();
2636 scan_trace (ti);
2637 }
2638
2639 queued_reg_saves.release ();
2640 trace_work_list.release ();
2641 }
2642
2643 /* Return the insn before the first NOTE_INSN_CFI after START. */
2644
2645 static rtx_insn *
2646 before_next_cfi_note (rtx_insn *start)
2647 {
2648 rtx_insn *prev = start;
2649 while (start)
2650 {
2651 if (NOTE_P (start) && NOTE_KIND (start) == NOTE_INSN_CFI)
2652 return prev;
2653 prev = start;
2654 start = NEXT_INSN (start);
2655 }
2656 gcc_unreachable ();
2657 }
2658
2659 /* Insert CFI notes between traces to properly change state between them. */
2660
2661 static void
2662 connect_traces (void)
2663 {
2664 unsigned i, n = trace_info.length ();
2665 dw_trace_info *prev_ti, *ti;
2666
2667 /* ??? Ideally, we should have both queued and processed every trace.
2668 However the current representation of constant pools on various targets
2669 is indistinguishable from unreachable code. Assume for the moment that
2670 we can simply skip over such traces. */
2671 /* ??? Consider creating a DATA_INSN rtx code to indicate that
2672 these are not "real" instructions, and should not be considered.
2673 This could be generically useful for tablejump data as well. */
2674 /* Remove all unprocessed traces from the list. */
2675 for (i = n - 1; i > 0; --i)
2676 {
2677 ti = &trace_info[i];
2678 if (ti->beg_row == NULL)
2679 {
2680 trace_info.ordered_remove (i);
2681 n -= 1;
2682 }
2683 else
2684 gcc_assert (ti->end_row != NULL);
2685 }
2686
2687 /* Work from the end back to the beginning. This lets us easily insert
2688 remember/restore_state notes in the correct order wrt other notes. */
2689 prev_ti = &trace_info[n - 1];
2690 for (i = n - 1; i > 0; --i)
2691 {
2692 dw_cfi_row *old_row;
2693
2694 ti = prev_ti;
2695 prev_ti = &trace_info[i - 1];
2696
2697 add_cfi_insn = ti->head;
2698
2699 /* In dwarf2out_switch_text_section, we'll begin a new FDE
2700 for the portion of the function in the alternate text
2701 section. The row state at the very beginning of that
2702 new FDE will be exactly the row state from the CIE. */
2703 if (ti->switch_sections)
2704 old_row = cie_cfi_row;
2705 else
2706 {
2707 old_row = prev_ti->end_row;
2708 /* If there's no change from the previous end state, fine. */
2709 if (cfi_row_equal_p (old_row, ti->beg_row))
2710 ;
2711 /* Otherwise check for the common case of sharing state with
2712 the beginning of an epilogue, but not the end. Insert
2713 remember/restore opcodes in that case. */
2714 else if (cfi_row_equal_p (prev_ti->beg_row, ti->beg_row))
2715 {
2716 dw_cfi_ref cfi;
2717
2718 /* Note that if we blindly insert the remember at the
2719 start of the trace, we can wind up increasing the
2720 size of the unwind info due to extra advance opcodes.
2721 Instead, put the remember immediately before the next
2722 state change. We know there must be one, because the
2723 state at the beginning and head of the trace differ. */
2724 add_cfi_insn = before_next_cfi_note (prev_ti->head);
2725 cfi = new_cfi ();
2726 cfi->dw_cfi_opc = DW_CFA_remember_state;
2727 add_cfi (cfi);
2728
2729 add_cfi_insn = ti->head;
2730 cfi = new_cfi ();
2731 cfi->dw_cfi_opc = DW_CFA_restore_state;
2732 add_cfi (cfi);
2733
2734 old_row = prev_ti->beg_row;
2735 }
2736 /* Otherwise, we'll simply change state from the previous end. */
2737 }
2738
2739 change_cfi_row (old_row, ti->beg_row);
2740
2741 if (dump_file && add_cfi_insn != ti->head)
2742 {
2743 rtx_insn *note;
2744
2745 fprintf (dump_file, "Fixup between trace %u and %u:\n",
2746 prev_ti->id, ti->id);
2747
2748 note = ti->head;
2749 do
2750 {
2751 note = NEXT_INSN (note);
2752 gcc_assert (NOTE_P (note) && NOTE_KIND (note) == NOTE_INSN_CFI);
2753 output_cfi_directive (dump_file, NOTE_CFI (note));
2754 }
2755 while (note != add_cfi_insn);
2756 }
2757 }
2758
2759 /* Connect args_size between traces that have can_throw_internal insns. */
2760 if (cfun->eh->lp_array)
2761 {
2762 HOST_WIDE_INT prev_args_size = 0;
2763
2764 for (i = 0; i < n; ++i)
2765 {
2766 ti = &trace_info[i];
2767
2768 if (ti->switch_sections)
2769 prev_args_size = 0;
2770 if (ti->eh_head == NULL)
2771 continue;
2772 gcc_assert (!ti->args_size_undefined);
2773
2774 if (ti->beg_delay_args_size != prev_args_size)
2775 {
2776 /* ??? Search back to previous CFI note. */
2777 add_cfi_insn = PREV_INSN (ti->eh_head);
2778 add_cfi_args_size (ti->beg_delay_args_size);
2779 }
2780
2781 prev_args_size = ti->end_delay_args_size;
2782 }
2783 }
2784 }
2785
2786 /* Set up the pseudo-cfg of instruction traces, as described at the
2787 block comment at the top of the file. */
2788
2789 static void
2790 create_pseudo_cfg (void)
2791 {
2792 bool saw_barrier, switch_sections;
2793 dw_trace_info ti;
2794 rtx_insn *insn;
2795 unsigned i;
2796
2797 /* The first trace begins at the start of the function,
2798 and begins with the CIE row state. */
2799 trace_info.create (16);
2800 memset (&ti, 0, sizeof (ti));
2801 ti.head = get_insns ();
2802 ti.beg_row = cie_cfi_row;
2803 ti.cfa_store = cie_cfi_row->cfa;
2804 ti.cfa_temp.reg = INVALID_REGNUM;
2805 trace_info.quick_push (ti);
2806
2807 if (cie_return_save)
2808 ti.regs_saved_in_regs.safe_push (*cie_return_save);
2809
2810 /* Walk all the insns, collecting start of trace locations. */
2811 saw_barrier = false;
2812 switch_sections = false;
2813 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
2814 {
2815 if (BARRIER_P (insn))
2816 saw_barrier = true;
2817 else if (NOTE_P (insn)
2818 && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS)
2819 {
2820 /* We should have just seen a barrier. */
2821 gcc_assert (saw_barrier);
2822 switch_sections = true;
2823 }
2824 /* Watch out for save_point notes between basic blocks.
2825 In particular, a note after a barrier. Do not record these,
2826 delaying trace creation until the label. */
2827 else if (save_point_p (insn)
2828 && (LABEL_P (insn) || !saw_barrier))
2829 {
2830 memset (&ti, 0, sizeof (ti));
2831 ti.head = insn;
2832 ti.switch_sections = switch_sections;
2833 ti.id = trace_info.length ();
2834 trace_info.safe_push (ti);
2835
2836 saw_barrier = false;
2837 switch_sections = false;
2838 }
2839 }
2840
2841 /* Create the trace index after we've finished building trace_info,
2842 avoiding stale pointer problems due to reallocation. */
2843 trace_index
2844 = new hash_table<trace_info_hasher> (trace_info.length ());
2845 dw_trace_info *tp;
2846 FOR_EACH_VEC_ELT (trace_info, i, tp)
2847 {
2848 dw_trace_info **slot;
2849
2850 if (dump_file)
2851 fprintf (dump_file, "Creating trace %u : start at %s %d%s\n", tp->id,
2852 rtx_name[(int) GET_CODE (tp->head)], INSN_UID (tp->head),
2853 tp->switch_sections ? " (section switch)" : "");
2854
2855 slot = trace_index->find_slot_with_hash (tp, INSN_UID (tp->head), INSERT);
2856 gcc_assert (*slot == NULL);
2857 *slot = tp;
2858 }
2859 }
2860
2861 /* Record the initial position of the return address. RTL is
2862 INCOMING_RETURN_ADDR_RTX. */
2863
2864 static void
2865 initial_return_save (rtx rtl)
2866 {
2867 unsigned int reg = INVALID_REGNUM;
2868 HOST_WIDE_INT offset = 0;
2869
2870 switch (GET_CODE (rtl))
2871 {
2872 case REG:
2873 /* RA is in a register. */
2874 reg = dwf_regno (rtl);
2875 break;
2876
2877 case MEM:
2878 /* RA is on the stack. */
2879 rtl = XEXP (rtl, 0);
2880 switch (GET_CODE (rtl))
2881 {
2882 case REG:
2883 gcc_assert (REGNO (rtl) == STACK_POINTER_REGNUM);
2884 offset = 0;
2885 break;
2886
2887 case PLUS:
2888 gcc_assert (REGNO (XEXP (rtl, 0)) == STACK_POINTER_REGNUM);
2889 offset = INTVAL (XEXP (rtl, 1));
2890 break;
2891
2892 case MINUS:
2893 gcc_assert (REGNO (XEXP (rtl, 0)) == STACK_POINTER_REGNUM);
2894 offset = -INTVAL (XEXP (rtl, 1));
2895 break;
2896
2897 default:
2898 gcc_unreachable ();
2899 }
2900
2901 break;
2902
2903 case PLUS:
2904 /* The return address is at some offset from any value we can
2905 actually load. For instance, on the SPARC it is in %i7+8. Just
2906 ignore the offset for now; it doesn't matter for unwinding frames. */
2907 gcc_assert (CONST_INT_P (XEXP (rtl, 1)));
2908 initial_return_save (XEXP (rtl, 0));
2909 return;
2910
2911 default:
2912 gcc_unreachable ();
2913 }
2914
2915 if (reg != DWARF_FRAME_RETURN_COLUMN)
2916 {
2917 if (reg != INVALID_REGNUM)
2918 record_reg_saved_in_reg (rtl, pc_rtx);
2919 reg_save (DWARF_FRAME_RETURN_COLUMN, reg, offset - cur_row->cfa.offset);
2920 }
2921 }
2922
2923 static void
2924 create_cie_data (void)
2925 {
2926 dw_cfa_location loc;
2927 dw_trace_info cie_trace;
2928
2929 dw_stack_pointer_regnum = DWARF_FRAME_REGNUM (STACK_POINTER_REGNUM);
2930
2931 memset (&cie_trace, 0, sizeof (cie_trace));
2932 cur_trace = &cie_trace;
2933
2934 add_cfi_vec = &cie_cfi_vec;
2935 cie_cfi_row = cur_row = new_cfi_row ();
2936
2937 /* On entry, the Canonical Frame Address is at SP. */
2938 memset (&loc, 0, sizeof (loc));
2939 loc.reg = dw_stack_pointer_regnum;
2940 loc.offset = INCOMING_FRAME_SP_OFFSET;
2941 def_cfa_1 (&loc);
2942
2943 if (targetm.debug_unwind_info () == UI_DWARF2
2944 || targetm_common.except_unwind_info (&global_options) == UI_DWARF2)
2945 {
2946 initial_return_save (INCOMING_RETURN_ADDR_RTX);
2947
2948 /* For a few targets, we have the return address incoming into a
2949 register, but choose a different return column. This will result
2950 in a DW_CFA_register for the return, and an entry in
2951 regs_saved_in_regs to match. If the target later stores that
2952 return address register to the stack, we want to be able to emit
2953 the DW_CFA_offset against the return column, not the intermediate
2954 save register. Save the contents of regs_saved_in_regs so that
2955 we can re-initialize it at the start of each function. */
2956 switch (cie_trace.regs_saved_in_regs.length ())
2957 {
2958 case 0:
2959 break;
2960 case 1:
2961 cie_return_save = ggc_alloc<reg_saved_in_data> ();
2962 *cie_return_save = cie_trace.regs_saved_in_regs[0];
2963 cie_trace.regs_saved_in_regs.release ();
2964 break;
2965 default:
2966 gcc_unreachable ();
2967 }
2968 }
2969
2970 add_cfi_vec = NULL;
2971 cur_row = NULL;
2972 cur_trace = NULL;
2973 }
2974
2975 /* Annotate the function with NOTE_INSN_CFI notes to record the CFI
2976 state at each location within the function. These notes will be
2977 emitted during pass_final. */
2978
2979 static unsigned int
2980 execute_dwarf2_frame (void)
2981 {
2982 /* Different HARD_FRAME_POINTER_REGNUM might coexist in the same file. */
2983 dw_frame_pointer_regnum = DWARF_FRAME_REGNUM (HARD_FRAME_POINTER_REGNUM);
2984
2985 /* The first time we're called, compute the incoming frame state. */
2986 if (cie_cfi_vec == NULL)
2987 create_cie_data ();
2988
2989 dwarf2out_alloc_current_fde ();
2990
2991 create_pseudo_cfg ();
2992
2993 /* Do the work. */
2994 create_cfi_notes ();
2995 connect_traces ();
2996 add_cfis_to_fde ();
2997
2998 /* Free all the data we allocated. */
2999 {
3000 size_t i;
3001 dw_trace_info *ti;
3002
3003 FOR_EACH_VEC_ELT (trace_info, i, ti)
3004 ti->regs_saved_in_regs.release ();
3005 }
3006 trace_info.release ();
3007
3008 delete trace_index;
3009 trace_index = NULL;
3010
3011 return 0;
3012 }
3013 \f
3014 /* Convert a DWARF call frame info. operation to its string name */
3015
3016 static const char *
3017 dwarf_cfi_name (unsigned int cfi_opc)
3018 {
3019 const char *name = get_DW_CFA_name (cfi_opc);
3020
3021 if (name != NULL)
3022 return name;
3023
3024 return "DW_CFA_<unknown>";
3025 }
3026
3027 /* This routine will generate the correct assembly data for a location
3028 description based on a cfi entry with a complex address. */
3029
3030 static void
3031 output_cfa_loc (dw_cfi_ref cfi, int for_eh)
3032 {
3033 dw_loc_descr_ref loc;
3034 unsigned long size;
3035
3036 if (cfi->dw_cfi_opc == DW_CFA_expression)
3037 {
3038 unsigned r =
3039 DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3040 dw2_asm_output_data (1, r, NULL);
3041 loc = cfi->dw_cfi_oprnd2.dw_cfi_loc;
3042 }
3043 else
3044 loc = cfi->dw_cfi_oprnd1.dw_cfi_loc;
3045
3046 /* Output the size of the block. */
3047 size = size_of_locs (loc);
3048 dw2_asm_output_data_uleb128 (size, NULL);
3049
3050 /* Now output the operations themselves. */
3051 output_loc_sequence (loc, for_eh);
3052 }
3053
3054 /* Similar, but used for .cfi_escape. */
3055
3056 static void
3057 output_cfa_loc_raw (dw_cfi_ref cfi)
3058 {
3059 dw_loc_descr_ref loc;
3060 unsigned long size;
3061
3062 if (cfi->dw_cfi_opc == DW_CFA_expression)
3063 {
3064 unsigned r =
3065 DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3066 fprintf (asm_out_file, "%#x,", r);
3067 loc = cfi->dw_cfi_oprnd2.dw_cfi_loc;
3068 }
3069 else
3070 loc = cfi->dw_cfi_oprnd1.dw_cfi_loc;
3071
3072 /* Output the size of the block. */
3073 size = size_of_locs (loc);
3074 dw2_asm_output_data_uleb128_raw (size);
3075 fputc (',', asm_out_file);
3076
3077 /* Now output the operations themselves. */
3078 output_loc_sequence_raw (loc);
3079 }
3080
3081 /* Output a Call Frame Information opcode and its operand(s). */
3082
3083 void
3084 output_cfi (dw_cfi_ref cfi, dw_fde_ref fde, int for_eh)
3085 {
3086 unsigned long r;
3087 HOST_WIDE_INT off;
3088
3089 if (cfi->dw_cfi_opc == DW_CFA_advance_loc)
3090 dw2_asm_output_data (1, (cfi->dw_cfi_opc
3091 | (cfi->dw_cfi_oprnd1.dw_cfi_offset & 0x3f)),
3092 "DW_CFA_advance_loc " HOST_WIDE_INT_PRINT_HEX,
3093 ((unsigned HOST_WIDE_INT)
3094 cfi->dw_cfi_oprnd1.dw_cfi_offset));
3095 else if (cfi->dw_cfi_opc == DW_CFA_offset)
3096 {
3097 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3098 dw2_asm_output_data (1, (cfi->dw_cfi_opc | (r & 0x3f)),
3099 "DW_CFA_offset, column %#lx", r);
3100 off = div_data_align (cfi->dw_cfi_oprnd2.dw_cfi_offset);
3101 dw2_asm_output_data_uleb128 (off, NULL);
3102 }
3103 else if (cfi->dw_cfi_opc == DW_CFA_restore)
3104 {
3105 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3106 dw2_asm_output_data (1, (cfi->dw_cfi_opc | (r & 0x3f)),
3107 "DW_CFA_restore, column %#lx", r);
3108 }
3109 else
3110 {
3111 dw2_asm_output_data (1, cfi->dw_cfi_opc,
3112 "%s", dwarf_cfi_name (cfi->dw_cfi_opc));
3113
3114 switch (cfi->dw_cfi_opc)
3115 {
3116 case DW_CFA_set_loc:
3117 if (for_eh)
3118 dw2_asm_output_encoded_addr_rtx (
3119 ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/1, /*global=*/0),
3120 gen_rtx_SYMBOL_REF (Pmode, cfi->dw_cfi_oprnd1.dw_cfi_addr),
3121 false, NULL);
3122 else
3123 dw2_asm_output_addr (DWARF2_ADDR_SIZE,
3124 cfi->dw_cfi_oprnd1.dw_cfi_addr, NULL);
3125 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
3126 break;
3127
3128 case DW_CFA_advance_loc1:
3129 dw2_asm_output_delta (1, cfi->dw_cfi_oprnd1.dw_cfi_addr,
3130 fde->dw_fde_current_label, NULL);
3131 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
3132 break;
3133
3134 case DW_CFA_advance_loc2:
3135 dw2_asm_output_delta (2, cfi->dw_cfi_oprnd1.dw_cfi_addr,
3136 fde->dw_fde_current_label, NULL);
3137 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
3138 break;
3139
3140 case DW_CFA_advance_loc4:
3141 dw2_asm_output_delta (4, cfi->dw_cfi_oprnd1.dw_cfi_addr,
3142 fde->dw_fde_current_label, NULL);
3143 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
3144 break;
3145
3146 case DW_CFA_MIPS_advance_loc8:
3147 dw2_asm_output_delta (8, cfi->dw_cfi_oprnd1.dw_cfi_addr,
3148 fde->dw_fde_current_label, NULL);
3149 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
3150 break;
3151
3152 case DW_CFA_offset_extended:
3153 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3154 dw2_asm_output_data_uleb128 (r, NULL);
3155 off = div_data_align (cfi->dw_cfi_oprnd2.dw_cfi_offset);
3156 dw2_asm_output_data_uleb128 (off, NULL);
3157 break;
3158
3159 case DW_CFA_def_cfa:
3160 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3161 dw2_asm_output_data_uleb128 (r, NULL);
3162 dw2_asm_output_data_uleb128 (cfi->dw_cfi_oprnd2.dw_cfi_offset, NULL);
3163 break;
3164
3165 case DW_CFA_offset_extended_sf:
3166 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3167 dw2_asm_output_data_uleb128 (r, NULL);
3168 off = div_data_align (cfi->dw_cfi_oprnd2.dw_cfi_offset);
3169 dw2_asm_output_data_sleb128 (off, NULL);
3170 break;
3171
3172 case DW_CFA_def_cfa_sf:
3173 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3174 dw2_asm_output_data_uleb128 (r, NULL);
3175 off = div_data_align (cfi->dw_cfi_oprnd2.dw_cfi_offset);
3176 dw2_asm_output_data_sleb128 (off, NULL);
3177 break;
3178
3179 case DW_CFA_restore_extended:
3180 case DW_CFA_undefined:
3181 case DW_CFA_same_value:
3182 case DW_CFA_def_cfa_register:
3183 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3184 dw2_asm_output_data_uleb128 (r, NULL);
3185 break;
3186
3187 case DW_CFA_register:
3188 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3189 dw2_asm_output_data_uleb128 (r, NULL);
3190 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd2.dw_cfi_reg_num, for_eh);
3191 dw2_asm_output_data_uleb128 (r, NULL);
3192 break;
3193
3194 case DW_CFA_def_cfa_offset:
3195 case DW_CFA_GNU_args_size:
3196 dw2_asm_output_data_uleb128 (cfi->dw_cfi_oprnd1.dw_cfi_offset, NULL);
3197 break;
3198
3199 case DW_CFA_def_cfa_offset_sf:
3200 off = div_data_align (cfi->dw_cfi_oprnd1.dw_cfi_offset);
3201 dw2_asm_output_data_sleb128 (off, NULL);
3202 break;
3203
3204 case DW_CFA_GNU_window_save:
3205 break;
3206
3207 case DW_CFA_def_cfa_expression:
3208 case DW_CFA_expression:
3209 output_cfa_loc (cfi, for_eh);
3210 break;
3211
3212 case DW_CFA_GNU_negative_offset_extended:
3213 /* Obsoleted by DW_CFA_offset_extended_sf. */
3214 gcc_unreachable ();
3215
3216 default:
3217 break;
3218 }
3219 }
3220 }
3221
3222 /* Similar, but do it via assembler directives instead. */
3223
3224 void
3225 output_cfi_directive (FILE *f, dw_cfi_ref cfi)
3226 {
3227 unsigned long r, r2;
3228
3229 switch (cfi->dw_cfi_opc)
3230 {
3231 case DW_CFA_advance_loc:
3232 case DW_CFA_advance_loc1:
3233 case DW_CFA_advance_loc2:
3234 case DW_CFA_advance_loc4:
3235 case DW_CFA_MIPS_advance_loc8:
3236 case DW_CFA_set_loc:
3237 /* Should only be created in a code path not followed when emitting
3238 via directives. The assembler is going to take care of this for
3239 us. But this routines is also used for debugging dumps, so
3240 print something. */
3241 gcc_assert (f != asm_out_file);
3242 fprintf (f, "\t.cfi_advance_loc\n");
3243 break;
3244
3245 case DW_CFA_offset:
3246 case DW_CFA_offset_extended:
3247 case DW_CFA_offset_extended_sf:
3248 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3249 fprintf (f, "\t.cfi_offset %lu, " HOST_WIDE_INT_PRINT_DEC"\n",
3250 r, cfi->dw_cfi_oprnd2.dw_cfi_offset);
3251 break;
3252
3253 case DW_CFA_restore:
3254 case DW_CFA_restore_extended:
3255 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3256 fprintf (f, "\t.cfi_restore %lu\n", r);
3257 break;
3258
3259 case DW_CFA_undefined:
3260 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3261 fprintf (f, "\t.cfi_undefined %lu\n", r);
3262 break;
3263
3264 case DW_CFA_same_value:
3265 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3266 fprintf (f, "\t.cfi_same_value %lu\n", r);
3267 break;
3268
3269 case DW_CFA_def_cfa:
3270 case DW_CFA_def_cfa_sf:
3271 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3272 fprintf (f, "\t.cfi_def_cfa %lu, " HOST_WIDE_INT_PRINT_DEC"\n",
3273 r, cfi->dw_cfi_oprnd2.dw_cfi_offset);
3274 break;
3275
3276 case DW_CFA_def_cfa_register:
3277 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3278 fprintf (f, "\t.cfi_def_cfa_register %lu\n", r);
3279 break;
3280
3281 case DW_CFA_register:
3282 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3283 r2 = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd2.dw_cfi_reg_num, 1);
3284 fprintf (f, "\t.cfi_register %lu, %lu\n", r, r2);
3285 break;
3286
3287 case DW_CFA_def_cfa_offset:
3288 case DW_CFA_def_cfa_offset_sf:
3289 fprintf (f, "\t.cfi_def_cfa_offset "
3290 HOST_WIDE_INT_PRINT_DEC"\n",
3291 cfi->dw_cfi_oprnd1.dw_cfi_offset);
3292 break;
3293
3294 case DW_CFA_remember_state:
3295 fprintf (f, "\t.cfi_remember_state\n");
3296 break;
3297 case DW_CFA_restore_state:
3298 fprintf (f, "\t.cfi_restore_state\n");
3299 break;
3300
3301 case DW_CFA_GNU_args_size:
3302 if (f == asm_out_file)
3303 {
3304 fprintf (f, "\t.cfi_escape %#x,", DW_CFA_GNU_args_size);
3305 dw2_asm_output_data_uleb128_raw (cfi->dw_cfi_oprnd1.dw_cfi_offset);
3306 if (flag_debug_asm)
3307 fprintf (f, "\t%s args_size " HOST_WIDE_INT_PRINT_DEC,
3308 ASM_COMMENT_START, cfi->dw_cfi_oprnd1.dw_cfi_offset);
3309 fputc ('\n', f);
3310 }
3311 else
3312 {
3313 fprintf (f, "\t.cfi_GNU_args_size " HOST_WIDE_INT_PRINT_DEC "\n",
3314 cfi->dw_cfi_oprnd1.dw_cfi_offset);
3315 }
3316 break;
3317
3318 case DW_CFA_GNU_window_save:
3319 fprintf (f, "\t.cfi_window_save\n");
3320 break;
3321
3322 case DW_CFA_def_cfa_expression:
3323 if (f != asm_out_file)
3324 {
3325 fprintf (f, "\t.cfi_def_cfa_expression ...\n");
3326 break;
3327 }
3328 /* FALLTHRU */
3329 case DW_CFA_expression:
3330 if (f != asm_out_file)
3331 {
3332 fprintf (f, "\t.cfi_cfa_expression ...\n");
3333 break;
3334 }
3335 fprintf (f, "\t.cfi_escape %#x,", cfi->dw_cfi_opc);
3336 output_cfa_loc_raw (cfi);
3337 fputc ('\n', f);
3338 break;
3339
3340 default:
3341 gcc_unreachable ();
3342 }
3343 }
3344
3345 void
3346 dwarf2out_emit_cfi (dw_cfi_ref cfi)
3347 {
3348 if (dwarf2out_do_cfi_asm ())
3349 output_cfi_directive (asm_out_file, cfi);
3350 }
3351
3352 static void
3353 dump_cfi_row (FILE *f, dw_cfi_row *row)
3354 {
3355 dw_cfi_ref cfi;
3356 unsigned i;
3357
3358 cfi = row->cfa_cfi;
3359 if (!cfi)
3360 {
3361 dw_cfa_location dummy;
3362 memset (&dummy, 0, sizeof (dummy));
3363 dummy.reg = INVALID_REGNUM;
3364 cfi = def_cfa_0 (&dummy, &row->cfa);
3365 }
3366 output_cfi_directive (f, cfi);
3367
3368 FOR_EACH_VEC_SAFE_ELT (row->reg_save, i, cfi)
3369 if (cfi)
3370 output_cfi_directive (f, cfi);
3371 }
3372
3373 void debug_cfi_row (dw_cfi_row *row);
3374
3375 void
3376 debug_cfi_row (dw_cfi_row *row)
3377 {
3378 dump_cfi_row (stderr, row);
3379 }
3380 \f
3381
3382 /* Save the result of dwarf2out_do_frame across PCH.
3383 This variable is tri-state, with 0 unset, >0 true, <0 false. */
3384 static GTY(()) signed char saved_do_cfi_asm = 0;
3385
3386 /* Decide whether we want to emit frame unwind information for the current
3387 translation unit. */
3388
3389 bool
3390 dwarf2out_do_frame (void)
3391 {
3392 /* We want to emit correct CFA location expressions or lists, so we
3393 have to return true if we're going to output debug info, even if
3394 we're not going to output frame or unwind info. */
3395 if (write_symbols == DWARF2_DEBUG || write_symbols == VMS_AND_DWARF2_DEBUG)
3396 return true;
3397
3398 if (saved_do_cfi_asm > 0)
3399 return true;
3400
3401 if (targetm.debug_unwind_info () == UI_DWARF2)
3402 return true;
3403
3404 if ((flag_unwind_tables || flag_exceptions)
3405 && targetm_common.except_unwind_info (&global_options) == UI_DWARF2)
3406 return true;
3407
3408 return false;
3409 }
3410
3411 /* Decide whether to emit frame unwind via assembler directives. */
3412
3413 bool
3414 dwarf2out_do_cfi_asm (void)
3415 {
3416 int enc;
3417
3418 if (saved_do_cfi_asm != 0)
3419 return saved_do_cfi_asm > 0;
3420
3421 /* Assume failure for a moment. */
3422 saved_do_cfi_asm = -1;
3423
3424 if (!flag_dwarf2_cfi_asm || !dwarf2out_do_frame ())
3425 return false;
3426 if (!HAVE_GAS_CFI_PERSONALITY_DIRECTIVE)
3427 return false;
3428
3429 /* Make sure the personality encoding is one the assembler can support.
3430 In particular, aligned addresses can't be handled. */
3431 enc = ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/2,/*global=*/1);
3432 if ((enc & 0x70) != 0 && (enc & 0x70) != DW_EH_PE_pcrel)
3433 return false;
3434 enc = ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/0,/*global=*/0);
3435 if ((enc & 0x70) != 0 && (enc & 0x70) != DW_EH_PE_pcrel)
3436 return false;
3437
3438 /* If we can't get the assembler to emit only .debug_frame, and we don't need
3439 dwarf2 unwind info for exceptions, then emit .debug_frame by hand. */
3440 if (!HAVE_GAS_CFI_SECTIONS_DIRECTIVE
3441 && !flag_unwind_tables && !flag_exceptions
3442 && targetm_common.except_unwind_info (&global_options) != UI_DWARF2)
3443 return false;
3444
3445 /* Success! */
3446 saved_do_cfi_asm = 1;
3447 return true;
3448 }
3449
3450 namespace {
3451
3452 const pass_data pass_data_dwarf2_frame =
3453 {
3454 RTL_PASS, /* type */
3455 "dwarf2", /* name */
3456 OPTGROUP_NONE, /* optinfo_flags */
3457 TV_FINAL, /* tv_id */
3458 0, /* properties_required */
3459 0, /* properties_provided */
3460 0, /* properties_destroyed */
3461 0, /* todo_flags_start */
3462 0, /* todo_flags_finish */
3463 };
3464
3465 class pass_dwarf2_frame : public rtl_opt_pass
3466 {
3467 public:
3468 pass_dwarf2_frame (gcc::context *ctxt)
3469 : rtl_opt_pass (pass_data_dwarf2_frame, ctxt)
3470 {}
3471
3472 /* opt_pass methods: */
3473 virtual bool gate (function *);
3474 virtual unsigned int execute (function *) { return execute_dwarf2_frame (); }
3475
3476 }; // class pass_dwarf2_frame
3477
3478 bool
3479 pass_dwarf2_frame::gate (function *)
3480 {
3481 #ifndef HAVE_prologue
3482 /* Targets which still implement the prologue in assembler text
3483 cannot use the generic dwarf2 unwinding. */
3484 return false;
3485 #endif
3486
3487 /* ??? What to do for UI_TARGET unwinding? They might be able to benefit
3488 from the optimized shrink-wrapping annotations that we will compute.
3489 For now, only produce the CFI notes for dwarf2. */
3490 return dwarf2out_do_frame ();
3491 }
3492
3493 } // anon namespace
3494
3495 rtl_opt_pass *
3496 make_pass_dwarf2_frame (gcc::context *ctxt)
3497 {
3498 return new pass_dwarf2_frame (ctxt);
3499 }
3500
3501 #include "gt-dwarf2cfi.h"