2010-05-05 Michael Snyder <msnyder@vmware.com>
[binutils-gdb.git] / gdb / solib-svr4.c
1 /* Handle SVR4 shared libraries for GDB, the GNU Debugger.
2
3 Copyright (C) 1990, 1991, 1992, 1993, 1994, 1995, 1996, 1998, 1999, 2000,
4 2001, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
5 Free Software Foundation, Inc.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23
24 #include "elf/external.h"
25 #include "elf/common.h"
26 #include "elf/mips.h"
27
28 #include "symtab.h"
29 #include "bfd.h"
30 #include "symfile.h"
31 #include "objfiles.h"
32 #include "gdbcore.h"
33 #include "target.h"
34 #include "inferior.h"
35 #include "regcache.h"
36 #include "gdbthread.h"
37 #include "observer.h"
38
39 #include "gdb_assert.h"
40
41 #include "solist.h"
42 #include "solib.h"
43 #include "solib-svr4.h"
44
45 #include "bfd-target.h"
46 #include "elf-bfd.h"
47 #include "exec.h"
48 #include "auxv.h"
49 #include "exceptions.h"
50
51 static struct link_map_offsets *svr4_fetch_link_map_offsets (void);
52 static int svr4_have_link_map_offsets (void);
53 static void svr4_relocate_main_executable (void);
54
55 /* Link map info to include in an allocated so_list entry */
56
57 struct lm_info
58 {
59 /* Pointer to copy of link map from inferior. The type is char *
60 rather than void *, so that we may use byte offsets to find the
61 various fields without the need for a cast. */
62 gdb_byte *lm;
63
64 /* Amount by which addresses in the binary should be relocated to
65 match the inferior. This could most often be taken directly
66 from lm, but when prelinking is involved and the prelink base
67 address changes, we may need a different offset, we want to
68 warn about the difference and compute it only once. */
69 CORE_ADDR l_addr;
70
71 /* The target location of lm. */
72 CORE_ADDR lm_addr;
73 };
74
75 /* On SVR4 systems, a list of symbols in the dynamic linker where
76 GDB can try to place a breakpoint to monitor shared library
77 events.
78
79 If none of these symbols are found, or other errors occur, then
80 SVR4 systems will fall back to using a symbol as the "startup
81 mapping complete" breakpoint address. */
82
83 static char *solib_break_names[] =
84 {
85 "r_debug_state",
86 "_r_debug_state",
87 "_dl_debug_state",
88 "rtld_db_dlactivity",
89 "__dl_rtld_db_dlactivity",
90 "_rtld_debug_state",
91
92 NULL
93 };
94
95 static char *bkpt_names[] =
96 {
97 "_start",
98 "__start",
99 "main",
100 NULL
101 };
102
103 static char *main_name_list[] =
104 {
105 "main_$main",
106 NULL
107 };
108
109 /* Return non-zero if GDB_SO_NAME and INFERIOR_SO_NAME represent
110 the same shared library. */
111
112 static int
113 svr4_same_1 (const char *gdb_so_name, const char *inferior_so_name)
114 {
115 if (strcmp (gdb_so_name, inferior_so_name) == 0)
116 return 1;
117
118 /* On Solaris, when starting inferior we think that dynamic linker is
119 /usr/lib/ld.so.1, but later on, the table of loaded shared libraries
120 contains /lib/ld.so.1. Sometimes one file is a link to another, but
121 sometimes they have identical content, but are not linked to each
122 other. We don't restrict this check for Solaris, but the chances
123 of running into this situation elsewhere are very low. */
124 if (strcmp (gdb_so_name, "/usr/lib/ld.so.1") == 0
125 && strcmp (inferior_so_name, "/lib/ld.so.1") == 0)
126 return 1;
127
128 /* Similarly, we observed the same issue with sparc64, but with
129 different locations. */
130 if (strcmp (gdb_so_name, "/usr/lib/sparcv9/ld.so.1") == 0
131 && strcmp (inferior_so_name, "/lib/sparcv9/ld.so.1") == 0)
132 return 1;
133
134 return 0;
135 }
136
137 static int
138 svr4_same (struct so_list *gdb, struct so_list *inferior)
139 {
140 return (svr4_same_1 (gdb->so_original_name, inferior->so_original_name));
141 }
142
143 /* link map access functions */
144
145 static CORE_ADDR
146 LM_ADDR_FROM_LINK_MAP (struct so_list *so)
147 {
148 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
149 struct type *ptr_type = builtin_type (target_gdbarch)->builtin_data_ptr;
150
151 return extract_typed_address (so->lm_info->lm + lmo->l_addr_offset,
152 ptr_type);
153 }
154
155 static int
156 HAS_LM_DYNAMIC_FROM_LINK_MAP (void)
157 {
158 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
159
160 return lmo->l_ld_offset >= 0;
161 }
162
163 static CORE_ADDR
164 LM_DYNAMIC_FROM_LINK_MAP (struct so_list *so)
165 {
166 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
167 struct type *ptr_type = builtin_type (target_gdbarch)->builtin_data_ptr;
168
169 return extract_typed_address (so->lm_info->lm + lmo->l_ld_offset,
170 ptr_type);
171 }
172
173 static CORE_ADDR
174 LM_ADDR_CHECK (struct so_list *so, bfd *abfd)
175 {
176 if (so->lm_info->l_addr == (CORE_ADDR)-1)
177 {
178 struct bfd_section *dyninfo_sect;
179 CORE_ADDR l_addr, l_dynaddr, dynaddr;
180
181 l_addr = LM_ADDR_FROM_LINK_MAP (so);
182
183 if (! abfd || ! HAS_LM_DYNAMIC_FROM_LINK_MAP ())
184 goto set_addr;
185
186 l_dynaddr = LM_DYNAMIC_FROM_LINK_MAP (so);
187
188 dyninfo_sect = bfd_get_section_by_name (abfd, ".dynamic");
189 if (dyninfo_sect == NULL)
190 goto set_addr;
191
192 dynaddr = bfd_section_vma (abfd, dyninfo_sect);
193
194 if (dynaddr + l_addr != l_dynaddr)
195 {
196 CORE_ADDR align = 0x1000;
197 CORE_ADDR minpagesize = align;
198
199 if (bfd_get_flavour (abfd) == bfd_target_elf_flavour)
200 {
201 Elf_Internal_Ehdr *ehdr = elf_tdata (abfd)->elf_header;
202 Elf_Internal_Phdr *phdr = elf_tdata (abfd)->phdr;
203 int i;
204
205 align = 1;
206
207 for (i = 0; i < ehdr->e_phnum; i++)
208 if (phdr[i].p_type == PT_LOAD && phdr[i].p_align > align)
209 align = phdr[i].p_align;
210
211 minpagesize = get_elf_backend_data (abfd)->minpagesize;
212 }
213
214 /* Turn it into a mask. */
215 align--;
216
217 /* If the changes match the alignment requirements, we
218 assume we're using a core file that was generated by the
219 same binary, just prelinked with a different base offset.
220 If it doesn't match, we may have a different binary, the
221 same binary with the dynamic table loaded at an unrelated
222 location, or anything, really. To avoid regressions,
223 don't adjust the base offset in the latter case, although
224 odds are that, if things really changed, debugging won't
225 quite work.
226
227 One could expect more the condition
228 ((l_addr & align) == 0 && ((l_dynaddr - dynaddr) & align) == 0)
229 but the one below is relaxed for PPC. The PPC kernel supports
230 either 4k or 64k page sizes. To be prepared for 64k pages,
231 PPC ELF files are built using an alignment requirement of 64k.
232 However, when running on a kernel supporting 4k pages, the memory
233 mapping of the library may not actually happen on a 64k boundary!
234
235 (In the usual case where (l_addr & align) == 0, this check is
236 equivalent to the possibly expected check above.)
237
238 Even on PPC it must be zero-aligned at least for MINPAGESIZE. */
239
240 if ((l_addr & (minpagesize - 1)) == 0
241 && (l_addr & align) == ((l_dynaddr - dynaddr) & align))
242 {
243 l_addr = l_dynaddr - dynaddr;
244
245 if (info_verbose)
246 printf_unfiltered (_("Using PIC (Position Independent Code) "
247 "prelink displacement %s for \"%s\".\n"),
248 paddress (target_gdbarch, l_addr),
249 so->so_name);
250 }
251 else
252 warning (_(".dynamic section for \"%s\" "
253 "is not at the expected address "
254 "(wrong library or version mismatch?)"), so->so_name);
255 }
256
257 set_addr:
258 so->lm_info->l_addr = l_addr;
259 }
260
261 return so->lm_info->l_addr;
262 }
263
264 static CORE_ADDR
265 LM_NEXT (struct so_list *so)
266 {
267 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
268 struct type *ptr_type = builtin_type (target_gdbarch)->builtin_data_ptr;
269
270 return extract_typed_address (so->lm_info->lm + lmo->l_next_offset,
271 ptr_type);
272 }
273
274 static CORE_ADDR
275 LM_PREV (struct so_list *so)
276 {
277 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
278 struct type *ptr_type = builtin_type (target_gdbarch)->builtin_data_ptr;
279
280 return extract_typed_address (so->lm_info->lm + lmo->l_prev_offset,
281 ptr_type);
282 }
283
284 static CORE_ADDR
285 LM_NAME (struct so_list *so)
286 {
287 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
288 struct type *ptr_type = builtin_type (target_gdbarch)->builtin_data_ptr;
289
290 return extract_typed_address (so->lm_info->lm + lmo->l_name_offset,
291 ptr_type);
292 }
293
294 static int
295 IGNORE_FIRST_LINK_MAP_ENTRY (struct so_list *so)
296 {
297 /* Assume that everything is a library if the dynamic loader was loaded
298 late by a static executable. */
299 if (exec_bfd && bfd_get_section_by_name (exec_bfd, ".dynamic") == NULL)
300 return 0;
301
302 return LM_PREV (so) == 0;
303 }
304
305 /* Per pspace SVR4 specific data. */
306
307 struct svr4_info
308 {
309 CORE_ADDR debug_base; /* Base of dynamic linker structures */
310
311 /* Validity flag for debug_loader_offset. */
312 int debug_loader_offset_p;
313
314 /* Load address for the dynamic linker, inferred. */
315 CORE_ADDR debug_loader_offset;
316
317 /* Name of the dynamic linker, valid if debug_loader_offset_p. */
318 char *debug_loader_name;
319
320 /* Load map address for the main executable. */
321 CORE_ADDR main_lm_addr;
322
323 CORE_ADDR interp_text_sect_low;
324 CORE_ADDR interp_text_sect_high;
325 CORE_ADDR interp_plt_sect_low;
326 CORE_ADDR interp_plt_sect_high;
327 };
328
329 /* Per-program-space data key. */
330 static const struct program_space_data *solib_svr4_pspace_data;
331
332 static void
333 svr4_pspace_data_cleanup (struct program_space *pspace, void *arg)
334 {
335 struct svr4_info *info;
336
337 info = program_space_data (pspace, solib_svr4_pspace_data);
338 xfree (info);
339 }
340
341 /* Get the current svr4 data. If none is found yet, add it now. This
342 function always returns a valid object. */
343
344 static struct svr4_info *
345 get_svr4_info (void)
346 {
347 struct svr4_info *info;
348
349 info = program_space_data (current_program_space, solib_svr4_pspace_data);
350 if (info != NULL)
351 return info;
352
353 info = XZALLOC (struct svr4_info);
354 set_program_space_data (current_program_space, solib_svr4_pspace_data, info);
355 return info;
356 }
357
358 /* Local function prototypes */
359
360 static int match_main (char *);
361
362 static CORE_ADDR bfd_lookup_symbol (bfd *, char *);
363
364 /*
365
366 LOCAL FUNCTION
367
368 bfd_lookup_symbol -- lookup the value for a specific symbol
369
370 SYNOPSIS
371
372 CORE_ADDR bfd_lookup_symbol (bfd *abfd, char *symname)
373
374 DESCRIPTION
375
376 An expensive way to lookup the value of a single symbol for
377 bfd's that are only temporary anyway. This is used by the
378 shared library support to find the address of the debugger
379 notification routine in the shared library.
380
381 The returned symbol may be in a code or data section; functions
382 will normally be in a code section, but may be in a data section
383 if this architecture uses function descriptors.
384
385 Note that 0 is specifically allowed as an error return (no
386 such symbol).
387 */
388
389 static CORE_ADDR
390 bfd_lookup_symbol (bfd *abfd, char *symname)
391 {
392 long storage_needed;
393 asymbol *sym;
394 asymbol **symbol_table;
395 unsigned int number_of_symbols;
396 unsigned int i;
397 struct cleanup *back_to;
398 CORE_ADDR symaddr = 0;
399
400 storage_needed = bfd_get_symtab_upper_bound (abfd);
401
402 if (storage_needed > 0)
403 {
404 symbol_table = (asymbol **) xmalloc (storage_needed);
405 back_to = make_cleanup (xfree, symbol_table);
406 number_of_symbols = bfd_canonicalize_symtab (abfd, symbol_table);
407
408 for (i = 0; i < number_of_symbols; i++)
409 {
410 sym = *symbol_table++;
411 if (strcmp (sym->name, symname) == 0
412 && (sym->section->flags & (SEC_CODE | SEC_DATA)) != 0)
413 {
414 /* BFD symbols are section relative. */
415 symaddr = sym->value + sym->section->vma;
416 break;
417 }
418 }
419 do_cleanups (back_to);
420 }
421
422 if (symaddr)
423 return symaddr;
424
425 /* On FreeBSD, the dynamic linker is stripped by default. So we'll
426 have to check the dynamic string table too. */
427
428 storage_needed = bfd_get_dynamic_symtab_upper_bound (abfd);
429
430 if (storage_needed > 0)
431 {
432 symbol_table = (asymbol **) xmalloc (storage_needed);
433 back_to = make_cleanup (xfree, symbol_table);
434 number_of_symbols = bfd_canonicalize_dynamic_symtab (abfd, symbol_table);
435
436 for (i = 0; i < number_of_symbols; i++)
437 {
438 sym = *symbol_table++;
439
440 if (strcmp (sym->name, symname) == 0
441 && (sym->section->flags & (SEC_CODE | SEC_DATA)) != 0)
442 {
443 /* BFD symbols are section relative. */
444 symaddr = sym->value + sym->section->vma;
445 break;
446 }
447 }
448 do_cleanups (back_to);
449 }
450
451 return symaddr;
452 }
453
454
455 /* Read program header TYPE from inferior memory. The header is found
456 by scanning the OS auxillary vector.
457
458 If TYPE == -1, return the program headers instead of the contents of
459 one program header.
460
461 Return a pointer to allocated memory holding the program header contents,
462 or NULL on failure. If sucessful, and unless P_SECT_SIZE is NULL, the
463 size of those contents is returned to P_SECT_SIZE. Likewise, the target
464 architecture size (32-bit or 64-bit) is returned to P_ARCH_SIZE. */
465
466 static gdb_byte *
467 read_program_header (int type, int *p_sect_size, int *p_arch_size)
468 {
469 enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch);
470 CORE_ADDR at_phdr, at_phent, at_phnum;
471 int arch_size, sect_size;
472 CORE_ADDR sect_addr;
473 gdb_byte *buf;
474
475 /* Get required auxv elements from target. */
476 if (target_auxv_search (&current_target, AT_PHDR, &at_phdr) <= 0)
477 return 0;
478 if (target_auxv_search (&current_target, AT_PHENT, &at_phent) <= 0)
479 return 0;
480 if (target_auxv_search (&current_target, AT_PHNUM, &at_phnum) <= 0)
481 return 0;
482 if (!at_phdr || !at_phnum)
483 return 0;
484
485 /* Determine ELF architecture type. */
486 if (at_phent == sizeof (Elf32_External_Phdr))
487 arch_size = 32;
488 else if (at_phent == sizeof (Elf64_External_Phdr))
489 arch_size = 64;
490 else
491 return 0;
492
493 /* Find the requested segment. */
494 if (type == -1)
495 {
496 sect_addr = at_phdr;
497 sect_size = at_phent * at_phnum;
498 }
499 else if (arch_size == 32)
500 {
501 Elf32_External_Phdr phdr;
502 int i;
503
504 /* Search for requested PHDR. */
505 for (i = 0; i < at_phnum; i++)
506 {
507 if (target_read_memory (at_phdr + i * sizeof (phdr),
508 (gdb_byte *)&phdr, sizeof (phdr)))
509 return 0;
510
511 if (extract_unsigned_integer ((gdb_byte *)phdr.p_type,
512 4, byte_order) == type)
513 break;
514 }
515
516 if (i == at_phnum)
517 return 0;
518
519 /* Retrieve address and size. */
520 sect_addr = extract_unsigned_integer ((gdb_byte *)phdr.p_vaddr,
521 4, byte_order);
522 sect_size = extract_unsigned_integer ((gdb_byte *)phdr.p_memsz,
523 4, byte_order);
524 }
525 else
526 {
527 Elf64_External_Phdr phdr;
528 int i;
529
530 /* Search for requested PHDR. */
531 for (i = 0; i < at_phnum; i++)
532 {
533 if (target_read_memory (at_phdr + i * sizeof (phdr),
534 (gdb_byte *)&phdr, sizeof (phdr)))
535 return 0;
536
537 if (extract_unsigned_integer ((gdb_byte *)phdr.p_type,
538 4, byte_order) == type)
539 break;
540 }
541
542 if (i == at_phnum)
543 return 0;
544
545 /* Retrieve address and size. */
546 sect_addr = extract_unsigned_integer ((gdb_byte *)phdr.p_vaddr,
547 8, byte_order);
548 sect_size = extract_unsigned_integer ((gdb_byte *)phdr.p_memsz,
549 8, byte_order);
550 }
551
552 /* Read in requested program header. */
553 buf = xmalloc (sect_size);
554 if (target_read_memory (sect_addr, buf, sect_size))
555 {
556 xfree (buf);
557 return NULL;
558 }
559
560 if (p_arch_size)
561 *p_arch_size = arch_size;
562 if (p_sect_size)
563 *p_sect_size = sect_size;
564
565 return buf;
566 }
567
568
569 /* Return program interpreter string. */
570 static gdb_byte *
571 find_program_interpreter (void)
572 {
573 gdb_byte *buf = NULL;
574
575 /* If we have an exec_bfd, use its section table. */
576 if (exec_bfd
577 && bfd_get_flavour (exec_bfd) == bfd_target_elf_flavour)
578 {
579 struct bfd_section *interp_sect;
580
581 interp_sect = bfd_get_section_by_name (exec_bfd, ".interp");
582 if (interp_sect != NULL)
583 {
584 int sect_size = bfd_section_size (exec_bfd, interp_sect);
585
586 buf = xmalloc (sect_size);
587 bfd_get_section_contents (exec_bfd, interp_sect, buf, 0, sect_size);
588 }
589 }
590
591 /* If we didn't find it, use the target auxillary vector. */
592 if (!buf)
593 buf = read_program_header (PT_INTERP, NULL, NULL);
594
595 return buf;
596 }
597
598
599 /* Scan for DYNTAG in .dynamic section of ABFD. If DYNTAG is found 1 is
600 returned and the corresponding PTR is set. */
601
602 static int
603 scan_dyntag (int dyntag, bfd *abfd, CORE_ADDR *ptr)
604 {
605 int arch_size, step, sect_size;
606 long dyn_tag;
607 CORE_ADDR dyn_ptr, dyn_addr;
608 gdb_byte *bufend, *bufstart, *buf;
609 Elf32_External_Dyn *x_dynp_32;
610 Elf64_External_Dyn *x_dynp_64;
611 struct bfd_section *sect;
612 struct target_section *target_section;
613
614 if (abfd == NULL)
615 return 0;
616
617 if (bfd_get_flavour (abfd) != bfd_target_elf_flavour)
618 return 0;
619
620 arch_size = bfd_get_arch_size (abfd);
621 if (arch_size == -1)
622 return 0;
623
624 /* Find the start address of the .dynamic section. */
625 sect = bfd_get_section_by_name (abfd, ".dynamic");
626 if (sect == NULL)
627 return 0;
628
629 for (target_section = current_target_sections->sections;
630 target_section < current_target_sections->sections_end;
631 target_section++)
632 if (sect == target_section->the_bfd_section)
633 break;
634 if (target_section < current_target_sections->sections_end)
635 dyn_addr = target_section->addr;
636 else
637 {
638 /* ABFD may come from OBJFILE acting only as a symbol file without being
639 loaded into the target (see add_symbol_file_command). This case is
640 such fallback to the file VMA address without the possibility of
641 having the section relocated to its actual in-memory address. */
642
643 dyn_addr = bfd_section_vma (abfd, sect);
644 }
645
646 /* Read in .dynamic from the BFD. We will get the actual value
647 from memory later. */
648 sect_size = bfd_section_size (abfd, sect);
649 buf = bufstart = alloca (sect_size);
650 if (!bfd_get_section_contents (abfd, sect,
651 buf, 0, sect_size))
652 return 0;
653
654 /* Iterate over BUF and scan for DYNTAG. If found, set PTR and return. */
655 step = (arch_size == 32) ? sizeof (Elf32_External_Dyn)
656 : sizeof (Elf64_External_Dyn);
657 for (bufend = buf + sect_size;
658 buf < bufend;
659 buf += step)
660 {
661 if (arch_size == 32)
662 {
663 x_dynp_32 = (Elf32_External_Dyn *) buf;
664 dyn_tag = bfd_h_get_32 (abfd, (bfd_byte *) x_dynp_32->d_tag);
665 dyn_ptr = bfd_h_get_32 (abfd, (bfd_byte *) x_dynp_32->d_un.d_ptr);
666 }
667 else
668 {
669 x_dynp_64 = (Elf64_External_Dyn *) buf;
670 dyn_tag = bfd_h_get_64 (abfd, (bfd_byte *) x_dynp_64->d_tag);
671 dyn_ptr = bfd_h_get_64 (abfd, (bfd_byte *) x_dynp_64->d_un.d_ptr);
672 }
673 if (dyn_tag == DT_NULL)
674 return 0;
675 if (dyn_tag == dyntag)
676 {
677 /* If requested, try to read the runtime value of this .dynamic
678 entry. */
679 if (ptr)
680 {
681 struct type *ptr_type;
682 gdb_byte ptr_buf[8];
683 CORE_ADDR ptr_addr;
684
685 ptr_type = builtin_type (target_gdbarch)->builtin_data_ptr;
686 ptr_addr = dyn_addr + (buf - bufstart) + arch_size / 8;
687 if (target_read_memory (ptr_addr, ptr_buf, arch_size / 8) == 0)
688 dyn_ptr = extract_typed_address (ptr_buf, ptr_type);
689 *ptr = dyn_ptr;
690 }
691 return 1;
692 }
693 }
694
695 return 0;
696 }
697
698 /* Scan for DYNTAG in .dynamic section of the target's main executable,
699 found by consulting the OS auxillary vector. If DYNTAG is found 1 is
700 returned and the corresponding PTR is set. */
701
702 static int
703 scan_dyntag_auxv (int dyntag, CORE_ADDR *ptr)
704 {
705 enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch);
706 int sect_size, arch_size, step;
707 long dyn_tag;
708 CORE_ADDR dyn_ptr;
709 gdb_byte *bufend, *bufstart, *buf;
710
711 /* Read in .dynamic section. */
712 buf = bufstart = read_program_header (PT_DYNAMIC, &sect_size, &arch_size);
713 if (!buf)
714 return 0;
715
716 /* Iterate over BUF and scan for DYNTAG. If found, set PTR and return. */
717 step = (arch_size == 32) ? sizeof (Elf32_External_Dyn)
718 : sizeof (Elf64_External_Dyn);
719 for (bufend = buf + sect_size;
720 buf < bufend;
721 buf += step)
722 {
723 if (arch_size == 32)
724 {
725 Elf32_External_Dyn *dynp = (Elf32_External_Dyn *) buf;
726 dyn_tag = extract_unsigned_integer ((gdb_byte *) dynp->d_tag,
727 4, byte_order);
728 dyn_ptr = extract_unsigned_integer ((gdb_byte *) dynp->d_un.d_ptr,
729 4, byte_order);
730 }
731 else
732 {
733 Elf64_External_Dyn *dynp = (Elf64_External_Dyn *) buf;
734 dyn_tag = extract_unsigned_integer ((gdb_byte *) dynp->d_tag,
735 8, byte_order);
736 dyn_ptr = extract_unsigned_integer ((gdb_byte *) dynp->d_un.d_ptr,
737 8, byte_order);
738 }
739 if (dyn_tag == DT_NULL)
740 break;
741
742 if (dyn_tag == dyntag)
743 {
744 if (ptr)
745 *ptr = dyn_ptr;
746
747 xfree (bufstart);
748 return 1;
749 }
750 }
751
752 xfree (bufstart);
753 return 0;
754 }
755
756
757 /*
758
759 LOCAL FUNCTION
760
761 elf_locate_base -- locate the base address of dynamic linker structs
762 for SVR4 elf targets.
763
764 SYNOPSIS
765
766 CORE_ADDR elf_locate_base (void)
767
768 DESCRIPTION
769
770 For SVR4 elf targets the address of the dynamic linker's runtime
771 structure is contained within the dynamic info section in the
772 executable file. The dynamic section is also mapped into the
773 inferior address space. Because the runtime loader fills in the
774 real address before starting the inferior, we have to read in the
775 dynamic info section from the inferior address space.
776 If there are any errors while trying to find the address, we
777 silently return 0, otherwise the found address is returned.
778
779 */
780
781 static CORE_ADDR
782 elf_locate_base (void)
783 {
784 struct minimal_symbol *msymbol;
785 CORE_ADDR dyn_ptr;
786
787 /* Look for DT_MIPS_RLD_MAP first. MIPS executables use this
788 instead of DT_DEBUG, although they sometimes contain an unused
789 DT_DEBUG. */
790 if (scan_dyntag (DT_MIPS_RLD_MAP, exec_bfd, &dyn_ptr)
791 || scan_dyntag_auxv (DT_MIPS_RLD_MAP, &dyn_ptr))
792 {
793 struct type *ptr_type = builtin_type (target_gdbarch)->builtin_data_ptr;
794 gdb_byte *pbuf;
795 int pbuf_size = TYPE_LENGTH (ptr_type);
796 pbuf = alloca (pbuf_size);
797 /* DT_MIPS_RLD_MAP contains a pointer to the address
798 of the dynamic link structure. */
799 if (target_read_memory (dyn_ptr, pbuf, pbuf_size))
800 return 0;
801 return extract_typed_address (pbuf, ptr_type);
802 }
803
804 /* Find DT_DEBUG. */
805 if (scan_dyntag (DT_DEBUG, exec_bfd, &dyn_ptr)
806 || scan_dyntag_auxv (DT_DEBUG, &dyn_ptr))
807 return dyn_ptr;
808
809 /* This may be a static executable. Look for the symbol
810 conventionally named _r_debug, as a last resort. */
811 msymbol = lookup_minimal_symbol ("_r_debug", NULL, symfile_objfile);
812 if (msymbol != NULL)
813 return SYMBOL_VALUE_ADDRESS (msymbol);
814
815 /* DT_DEBUG entry not found. */
816 return 0;
817 }
818
819 /*
820
821 LOCAL FUNCTION
822
823 locate_base -- locate the base address of dynamic linker structs
824
825 SYNOPSIS
826
827 CORE_ADDR locate_base (struct svr4_info *)
828
829 DESCRIPTION
830
831 For both the SunOS and SVR4 shared library implementations, if the
832 inferior executable has been linked dynamically, there is a single
833 address somewhere in the inferior's data space which is the key to
834 locating all of the dynamic linker's runtime structures. This
835 address is the value of the debug base symbol. The job of this
836 function is to find and return that address, or to return 0 if there
837 is no such address (the executable is statically linked for example).
838
839 For SunOS, the job is almost trivial, since the dynamic linker and
840 all of it's structures are statically linked to the executable at
841 link time. Thus the symbol for the address we are looking for has
842 already been added to the minimal symbol table for the executable's
843 objfile at the time the symbol file's symbols were read, and all we
844 have to do is look it up there. Note that we explicitly do NOT want
845 to find the copies in the shared library.
846
847 The SVR4 version is a bit more complicated because the address
848 is contained somewhere in the dynamic info section. We have to go
849 to a lot more work to discover the address of the debug base symbol.
850 Because of this complexity, we cache the value we find and return that
851 value on subsequent invocations. Note there is no copy in the
852 executable symbol tables.
853
854 */
855
856 static CORE_ADDR
857 locate_base (struct svr4_info *info)
858 {
859 /* Check to see if we have a currently valid address, and if so, avoid
860 doing all this work again and just return the cached address. If
861 we have no cached address, try to locate it in the dynamic info
862 section for ELF executables. There's no point in doing any of this
863 though if we don't have some link map offsets to work with. */
864
865 if (info->debug_base == 0 && svr4_have_link_map_offsets ())
866 info->debug_base = elf_locate_base ();
867 return info->debug_base;
868 }
869
870 /* Find the first element in the inferior's dynamic link map, and
871 return its address in the inferior. Return zero if the address
872 could not be determined.
873
874 FIXME: Perhaps we should validate the info somehow, perhaps by
875 checking r_version for a known version number, or r_state for
876 RT_CONSISTENT. */
877
878 static CORE_ADDR
879 solib_svr4_r_map (struct svr4_info *info)
880 {
881 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
882 struct type *ptr_type = builtin_type (target_gdbarch)->builtin_data_ptr;
883 CORE_ADDR addr = 0;
884 volatile struct gdb_exception ex;
885
886 TRY_CATCH (ex, RETURN_MASK_ERROR)
887 {
888 addr = read_memory_typed_address (info->debug_base + lmo->r_map_offset,
889 ptr_type);
890 }
891 exception_print (gdb_stderr, ex);
892 return addr;
893 }
894
895 /* Find r_brk from the inferior's debug base. */
896
897 static CORE_ADDR
898 solib_svr4_r_brk (struct svr4_info *info)
899 {
900 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
901 struct type *ptr_type = builtin_type (target_gdbarch)->builtin_data_ptr;
902
903 return read_memory_typed_address (info->debug_base + lmo->r_brk_offset,
904 ptr_type);
905 }
906
907 /* Find the link map for the dynamic linker (if it is not in the
908 normal list of loaded shared objects). */
909
910 static CORE_ADDR
911 solib_svr4_r_ldsomap (struct svr4_info *info)
912 {
913 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
914 struct type *ptr_type = builtin_type (target_gdbarch)->builtin_data_ptr;
915 enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch);
916 ULONGEST version;
917
918 /* Check version, and return zero if `struct r_debug' doesn't have
919 the r_ldsomap member. */
920 version
921 = read_memory_unsigned_integer (info->debug_base + lmo->r_version_offset,
922 lmo->r_version_size, byte_order);
923 if (version < 2 || lmo->r_ldsomap_offset == -1)
924 return 0;
925
926 return read_memory_typed_address (info->debug_base + lmo->r_ldsomap_offset,
927 ptr_type);
928 }
929
930 /* On Solaris systems with some versions of the dynamic linker,
931 ld.so's l_name pointer points to the SONAME in the string table
932 rather than into writable memory. So that GDB can find shared
933 libraries when loading a core file generated by gcore, ensure that
934 memory areas containing the l_name string are saved in the core
935 file. */
936
937 static int
938 svr4_keep_data_in_core (CORE_ADDR vaddr, unsigned long size)
939 {
940 struct svr4_info *info;
941 CORE_ADDR ldsomap;
942 struct so_list *new;
943 struct cleanup *old_chain;
944 struct link_map_offsets *lmo;
945 CORE_ADDR lm_name;
946
947 info = get_svr4_info ();
948
949 info->debug_base = 0;
950 locate_base (info);
951 if (!info->debug_base)
952 return 0;
953
954 ldsomap = solib_svr4_r_ldsomap (info);
955 if (!ldsomap)
956 return 0;
957
958 lmo = svr4_fetch_link_map_offsets ();
959 new = XZALLOC (struct so_list);
960 old_chain = make_cleanup (xfree, new);
961 new->lm_info = xmalloc (sizeof (struct lm_info));
962 make_cleanup (xfree, new->lm_info);
963 new->lm_info->l_addr = (CORE_ADDR)-1;
964 new->lm_info->lm_addr = ldsomap;
965 new->lm_info->lm = xzalloc (lmo->link_map_size);
966 make_cleanup (xfree, new->lm_info->lm);
967 read_memory (ldsomap, new->lm_info->lm, lmo->link_map_size);
968 lm_name = LM_NAME (new);
969 do_cleanups (old_chain);
970
971 return (lm_name >= vaddr && lm_name < vaddr + size);
972 }
973
974 /*
975
976 LOCAL FUNCTION
977
978 open_symbol_file_object
979
980 SYNOPSIS
981
982 void open_symbol_file_object (void *from_tty)
983
984 DESCRIPTION
985
986 If no open symbol file, attempt to locate and open the main symbol
987 file. On SVR4 systems, this is the first link map entry. If its
988 name is here, we can open it. Useful when attaching to a process
989 without first loading its symbol file.
990
991 If FROM_TTYP dereferences to a non-zero integer, allow messages to
992 be printed. This parameter is a pointer rather than an int because
993 open_symbol_file_object() is called via catch_errors() and
994 catch_errors() requires a pointer argument. */
995
996 static int
997 open_symbol_file_object (void *from_ttyp)
998 {
999 CORE_ADDR lm, l_name;
1000 char *filename;
1001 int errcode;
1002 int from_tty = *(int *)from_ttyp;
1003 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
1004 struct type *ptr_type = builtin_type (target_gdbarch)->builtin_data_ptr;
1005 int l_name_size = TYPE_LENGTH (ptr_type);
1006 gdb_byte *l_name_buf = xmalloc (l_name_size);
1007 struct cleanup *cleanups = make_cleanup (xfree, l_name_buf);
1008 struct svr4_info *info = get_svr4_info ();
1009
1010 if (symfile_objfile)
1011 if (!query (_("Attempt to reload symbols from process? ")))
1012 return 0;
1013
1014 /* Always locate the debug struct, in case it has moved. */
1015 info->debug_base = 0;
1016 if (locate_base (info) == 0)
1017 return 0; /* failed somehow... */
1018
1019 /* First link map member should be the executable. */
1020 lm = solib_svr4_r_map (info);
1021 if (lm == 0)
1022 return 0; /* failed somehow... */
1023
1024 /* Read address of name from target memory to GDB. */
1025 read_memory (lm + lmo->l_name_offset, l_name_buf, l_name_size);
1026
1027 /* Convert the address to host format. */
1028 l_name = extract_typed_address (l_name_buf, ptr_type);
1029
1030 /* Free l_name_buf. */
1031 do_cleanups (cleanups);
1032
1033 if (l_name == 0)
1034 return 0; /* No filename. */
1035
1036 /* Now fetch the filename from target memory. */
1037 target_read_string (l_name, &filename, SO_NAME_MAX_PATH_SIZE - 1, &errcode);
1038 make_cleanup (xfree, filename);
1039
1040 if (errcode)
1041 {
1042 warning (_("failed to read exec filename from attached file: %s"),
1043 safe_strerror (errcode));
1044 return 0;
1045 }
1046
1047 /* Have a pathname: read the symbol file. */
1048 symbol_file_add_main (filename, from_tty);
1049
1050 return 1;
1051 }
1052
1053 /* If no shared library information is available from the dynamic
1054 linker, build a fallback list from other sources. */
1055
1056 static struct so_list *
1057 svr4_default_sos (void)
1058 {
1059 struct svr4_info *info = get_svr4_info ();
1060
1061 struct so_list *head = NULL;
1062 struct so_list **link_ptr = &head;
1063
1064 if (info->debug_loader_offset_p)
1065 {
1066 struct so_list *new = XZALLOC (struct so_list);
1067
1068 new->lm_info = xmalloc (sizeof (struct lm_info));
1069
1070 /* Nothing will ever check the cached copy of the link
1071 map if we set l_addr. */
1072 new->lm_info->l_addr = info->debug_loader_offset;
1073 new->lm_info->lm_addr = 0;
1074 new->lm_info->lm = NULL;
1075
1076 strncpy (new->so_name, info->debug_loader_name,
1077 SO_NAME_MAX_PATH_SIZE - 1);
1078 new->so_name[SO_NAME_MAX_PATH_SIZE - 1] = '\0';
1079 strcpy (new->so_original_name, new->so_name);
1080
1081 *link_ptr = new;
1082 link_ptr = &new->next;
1083 }
1084
1085 return head;
1086 }
1087
1088 /* LOCAL FUNCTION
1089
1090 current_sos -- build a list of currently loaded shared objects
1091
1092 SYNOPSIS
1093
1094 struct so_list *current_sos ()
1095
1096 DESCRIPTION
1097
1098 Build a list of `struct so_list' objects describing the shared
1099 objects currently loaded in the inferior. This list does not
1100 include an entry for the main executable file.
1101
1102 Note that we only gather information directly available from the
1103 inferior --- we don't examine any of the shared library files
1104 themselves. The declaration of `struct so_list' says which fields
1105 we provide values for. */
1106
1107 static struct so_list *
1108 svr4_current_sos (void)
1109 {
1110 CORE_ADDR lm, prev_lm;
1111 struct so_list *head = 0;
1112 struct so_list **link_ptr = &head;
1113 CORE_ADDR ldsomap = 0;
1114 struct svr4_info *info;
1115
1116 info = get_svr4_info ();
1117
1118 /* Always locate the debug struct, in case it has moved. */
1119 info->debug_base = 0;
1120 locate_base (info);
1121
1122 /* If we can't find the dynamic linker's base structure, this
1123 must not be a dynamically linked executable. Hmm. */
1124 if (! info->debug_base)
1125 return svr4_default_sos ();
1126
1127 /* Walk the inferior's link map list, and build our list of
1128 `struct so_list' nodes. */
1129 prev_lm = 0;
1130 lm = solib_svr4_r_map (info);
1131
1132 while (lm)
1133 {
1134 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
1135 struct so_list *new = XZALLOC (struct so_list);
1136 struct cleanup *old_chain = make_cleanup (xfree, new);
1137 CORE_ADDR next_lm;
1138
1139 new->lm_info = xmalloc (sizeof (struct lm_info));
1140 make_cleanup (xfree, new->lm_info);
1141
1142 new->lm_info->l_addr = (CORE_ADDR)-1;
1143 new->lm_info->lm_addr = lm;
1144 new->lm_info->lm = xzalloc (lmo->link_map_size);
1145 make_cleanup (xfree, new->lm_info->lm);
1146
1147 read_memory (lm, new->lm_info->lm, lmo->link_map_size);
1148
1149 next_lm = LM_NEXT (new);
1150
1151 if (LM_PREV (new) != prev_lm)
1152 {
1153 warning (_("Corrupted shared library list"));
1154 free_so (new);
1155 next_lm = 0;
1156 }
1157
1158 /* For SVR4 versions, the first entry in the link map is for the
1159 inferior executable, so we must ignore it. For some versions of
1160 SVR4, it has no name. For others (Solaris 2.3 for example), it
1161 does have a name, so we can no longer use a missing name to
1162 decide when to ignore it. */
1163 else if (IGNORE_FIRST_LINK_MAP_ENTRY (new) && ldsomap == 0)
1164 {
1165 info->main_lm_addr = new->lm_info->lm_addr;
1166 free_so (new);
1167 }
1168 else
1169 {
1170 int errcode;
1171 char *buffer;
1172
1173 /* Extract this shared object's name. */
1174 target_read_string (LM_NAME (new), &buffer,
1175 SO_NAME_MAX_PATH_SIZE - 1, &errcode);
1176 if (errcode != 0)
1177 warning (_("Can't read pathname for load map: %s."),
1178 safe_strerror (errcode));
1179 else
1180 {
1181 strncpy (new->so_name, buffer, SO_NAME_MAX_PATH_SIZE - 1);
1182 new->so_name[SO_NAME_MAX_PATH_SIZE - 1] = '\0';
1183 strcpy (new->so_original_name, new->so_name);
1184 }
1185 xfree (buffer);
1186
1187 /* If this entry has no name, or its name matches the name
1188 for the main executable, don't include it in the list. */
1189 if (! new->so_name[0]
1190 || match_main (new->so_name))
1191 free_so (new);
1192 else
1193 {
1194 new->next = 0;
1195 *link_ptr = new;
1196 link_ptr = &new->next;
1197 }
1198 }
1199
1200 prev_lm = lm;
1201 lm = next_lm;
1202
1203 /* On Solaris, the dynamic linker is not in the normal list of
1204 shared objects, so make sure we pick it up too. Having
1205 symbol information for the dynamic linker is quite crucial
1206 for skipping dynamic linker resolver code. */
1207 if (lm == 0 && ldsomap == 0)
1208 {
1209 lm = ldsomap = solib_svr4_r_ldsomap (info);
1210 prev_lm = 0;
1211 }
1212
1213 discard_cleanups (old_chain);
1214 }
1215
1216 if (head == NULL)
1217 return svr4_default_sos ();
1218
1219 return head;
1220 }
1221
1222 /* Get the address of the link_map for a given OBJFILE. */
1223
1224 CORE_ADDR
1225 svr4_fetch_objfile_link_map (struct objfile *objfile)
1226 {
1227 struct so_list *so;
1228 struct svr4_info *info = get_svr4_info ();
1229
1230 /* Cause svr4_current_sos() to be run if it hasn't been already. */
1231 if (info->main_lm_addr == 0)
1232 solib_add (NULL, 0, &current_target, auto_solib_add);
1233
1234 /* svr4_current_sos() will set main_lm_addr for the main executable. */
1235 if (objfile == symfile_objfile)
1236 return info->main_lm_addr;
1237
1238 /* The other link map addresses may be found by examining the list
1239 of shared libraries. */
1240 for (so = master_so_list (); so; so = so->next)
1241 if (so->objfile == objfile)
1242 return so->lm_info->lm_addr;
1243
1244 /* Not found! */
1245 return 0;
1246 }
1247
1248 /* On some systems, the only way to recognize the link map entry for
1249 the main executable file is by looking at its name. Return
1250 non-zero iff SONAME matches one of the known main executable names. */
1251
1252 static int
1253 match_main (char *soname)
1254 {
1255 char **mainp;
1256
1257 for (mainp = main_name_list; *mainp != NULL; mainp++)
1258 {
1259 if (strcmp (soname, *mainp) == 0)
1260 return (1);
1261 }
1262
1263 return (0);
1264 }
1265
1266 /* Return 1 if PC lies in the dynamic symbol resolution code of the
1267 SVR4 run time loader. */
1268
1269 int
1270 svr4_in_dynsym_resolve_code (CORE_ADDR pc)
1271 {
1272 struct svr4_info *info = get_svr4_info ();
1273
1274 return ((pc >= info->interp_text_sect_low
1275 && pc < info->interp_text_sect_high)
1276 || (pc >= info->interp_plt_sect_low
1277 && pc < info->interp_plt_sect_high)
1278 || in_plt_section (pc, NULL));
1279 }
1280
1281 /* Given an executable's ABFD and target, compute the entry-point
1282 address. */
1283
1284 static CORE_ADDR
1285 exec_entry_point (struct bfd *abfd, struct target_ops *targ)
1286 {
1287 /* KevinB wrote ... for most targets, the address returned by
1288 bfd_get_start_address() is the entry point for the start
1289 function. But, for some targets, bfd_get_start_address() returns
1290 the address of a function descriptor from which the entry point
1291 address may be extracted. This address is extracted by
1292 gdbarch_convert_from_func_ptr_addr(). The method
1293 gdbarch_convert_from_func_ptr_addr() is the merely the identify
1294 function for targets which don't use function descriptors. */
1295 return gdbarch_convert_from_func_ptr_addr (target_gdbarch,
1296 bfd_get_start_address (abfd),
1297 targ);
1298 }
1299
1300 /*
1301
1302 LOCAL FUNCTION
1303
1304 enable_break -- arrange for dynamic linker to hit breakpoint
1305
1306 SYNOPSIS
1307
1308 int enable_break (void)
1309
1310 DESCRIPTION
1311
1312 Both the SunOS and the SVR4 dynamic linkers have, as part of their
1313 debugger interface, support for arranging for the inferior to hit
1314 a breakpoint after mapping in the shared libraries. This function
1315 enables that breakpoint.
1316
1317 For SunOS, there is a special flag location (in_debugger) which we
1318 set to 1. When the dynamic linker sees this flag set, it will set
1319 a breakpoint at a location known only to itself, after saving the
1320 original contents of that place and the breakpoint address itself,
1321 in it's own internal structures. When we resume the inferior, it
1322 will eventually take a SIGTRAP when it runs into the breakpoint.
1323 We handle this (in a different place) by restoring the contents of
1324 the breakpointed location (which is only known after it stops),
1325 chasing around to locate the shared libraries that have been
1326 loaded, then resuming.
1327
1328 For SVR4, the debugger interface structure contains a member (r_brk)
1329 which is statically initialized at the time the shared library is
1330 built, to the offset of a function (_r_debug_state) which is guaran-
1331 teed to be called once before mapping in a library, and again when
1332 the mapping is complete. At the time we are examining this member,
1333 it contains only the unrelocated offset of the function, so we have
1334 to do our own relocation. Later, when the dynamic linker actually
1335 runs, it relocates r_brk to be the actual address of _r_debug_state().
1336
1337 The debugger interface structure also contains an enumeration which
1338 is set to either RT_ADD or RT_DELETE prior to changing the mapping,
1339 depending upon whether or not the library is being mapped or unmapped,
1340 and then set to RT_CONSISTENT after the library is mapped/unmapped.
1341 */
1342
1343 static int
1344 enable_break (struct svr4_info *info, int from_tty)
1345 {
1346 struct minimal_symbol *msymbol;
1347 char **bkpt_namep;
1348 asection *interp_sect;
1349 gdb_byte *interp_name;
1350 CORE_ADDR sym_addr;
1351
1352 info->interp_text_sect_low = info->interp_text_sect_high = 0;
1353 info->interp_plt_sect_low = info->interp_plt_sect_high = 0;
1354
1355 /* If we already have a shared library list in the target, and
1356 r_debug contains r_brk, set the breakpoint there - this should
1357 mean r_brk has already been relocated. Assume the dynamic linker
1358 is the object containing r_brk. */
1359
1360 solib_add (NULL, from_tty, &current_target, auto_solib_add);
1361 sym_addr = 0;
1362 if (info->debug_base && solib_svr4_r_map (info) != 0)
1363 sym_addr = solib_svr4_r_brk (info);
1364
1365 if (sym_addr != 0)
1366 {
1367 struct obj_section *os;
1368
1369 sym_addr = gdbarch_addr_bits_remove
1370 (target_gdbarch, gdbarch_convert_from_func_ptr_addr (target_gdbarch,
1371 sym_addr,
1372 &current_target));
1373
1374 /* On at least some versions of Solaris there's a dynamic relocation
1375 on _r_debug.r_brk and SYM_ADDR may not be relocated yet, e.g., if
1376 we get control before the dynamic linker has self-relocated.
1377 Check if SYM_ADDR is in a known section, if it is assume we can
1378 trust its value. This is just a heuristic though, it could go away
1379 or be replaced if it's getting in the way.
1380
1381 On ARM we need to know whether the ISA of rtld_db_dlactivity (or
1382 however it's spelled in your particular system) is ARM or Thumb.
1383 That knowledge is encoded in the address, if it's Thumb the low bit
1384 is 1. However, we've stripped that info above and it's not clear
1385 what all the consequences are of passing a non-addr_bits_remove'd
1386 address to create_solib_event_breakpoint. The call to
1387 find_pc_section verifies we know about the address and have some
1388 hope of computing the right kind of breakpoint to use (via
1389 symbol info). It does mean that GDB needs to be pointed at a
1390 non-stripped version of the dynamic linker in order to obtain
1391 information it already knows about. Sigh. */
1392
1393 os = find_pc_section (sym_addr);
1394 if (os != NULL)
1395 {
1396 /* Record the relocated start and end address of the dynamic linker
1397 text and plt section for svr4_in_dynsym_resolve_code. */
1398 bfd *tmp_bfd;
1399 CORE_ADDR load_addr;
1400
1401 tmp_bfd = os->objfile->obfd;
1402 load_addr = ANOFFSET (os->objfile->section_offsets,
1403 os->objfile->sect_index_text);
1404
1405 interp_sect = bfd_get_section_by_name (tmp_bfd, ".text");
1406 if (interp_sect)
1407 {
1408 info->interp_text_sect_low =
1409 bfd_section_vma (tmp_bfd, interp_sect) + load_addr;
1410 info->interp_text_sect_high =
1411 info->interp_text_sect_low
1412 + bfd_section_size (tmp_bfd, interp_sect);
1413 }
1414 interp_sect = bfd_get_section_by_name (tmp_bfd, ".plt");
1415 if (interp_sect)
1416 {
1417 info->interp_plt_sect_low =
1418 bfd_section_vma (tmp_bfd, interp_sect) + load_addr;
1419 info->interp_plt_sect_high =
1420 info->interp_plt_sect_low
1421 + bfd_section_size (tmp_bfd, interp_sect);
1422 }
1423
1424 create_solib_event_breakpoint (target_gdbarch, sym_addr);
1425 return 1;
1426 }
1427 }
1428
1429 /* Find the program interpreter; if not found, warn the user and drop
1430 into the old breakpoint at symbol code. */
1431 interp_name = find_program_interpreter ();
1432 if (interp_name)
1433 {
1434 CORE_ADDR load_addr = 0;
1435 int load_addr_found = 0;
1436 int loader_found_in_list = 0;
1437 struct so_list *so;
1438 bfd *tmp_bfd = NULL;
1439 struct target_ops *tmp_bfd_target;
1440 volatile struct gdb_exception ex;
1441
1442 sym_addr = 0;
1443
1444 /* Now we need to figure out where the dynamic linker was
1445 loaded so that we can load its symbols and place a breakpoint
1446 in the dynamic linker itself.
1447
1448 This address is stored on the stack. However, I've been unable
1449 to find any magic formula to find it for Solaris (appears to
1450 be trivial on GNU/Linux). Therefore, we have to try an alternate
1451 mechanism to find the dynamic linker's base address. */
1452
1453 TRY_CATCH (ex, RETURN_MASK_ALL)
1454 {
1455 tmp_bfd = solib_bfd_open (interp_name);
1456 }
1457 if (tmp_bfd == NULL)
1458 goto bkpt_at_symbol;
1459
1460 /* Now convert the TMP_BFD into a target. That way target, as
1461 well as BFD operations can be used. Note that closing the
1462 target will also close the underlying bfd. */
1463 tmp_bfd_target = target_bfd_reopen (tmp_bfd);
1464
1465 /* On a running target, we can get the dynamic linker's base
1466 address from the shared library table. */
1467 so = master_so_list ();
1468 while (so)
1469 {
1470 if (svr4_same_1 (interp_name, so->so_original_name))
1471 {
1472 load_addr_found = 1;
1473 loader_found_in_list = 1;
1474 load_addr = LM_ADDR_CHECK (so, tmp_bfd);
1475 break;
1476 }
1477 so = so->next;
1478 }
1479
1480 /* If we were not able to find the base address of the loader
1481 from our so_list, then try using the AT_BASE auxilliary entry. */
1482 if (!load_addr_found)
1483 if (target_auxv_search (&current_target, AT_BASE, &load_addr) > 0)
1484 {
1485 int addr_bit = gdbarch_addr_bit (target_gdbarch);
1486
1487 /* Ensure LOAD_ADDR has proper sign in its possible upper bits so
1488 that `+ load_addr' will overflow CORE_ADDR width not creating
1489 invalid addresses like 0x101234567 for 32bit inferiors on 64bit
1490 GDB. */
1491
1492 if (addr_bit < (sizeof (CORE_ADDR) * HOST_CHAR_BIT))
1493 {
1494 CORE_ADDR space_size = (CORE_ADDR) 1 << addr_bit;
1495 CORE_ADDR tmp_entry_point = exec_entry_point (tmp_bfd,
1496 tmp_bfd_target);
1497
1498 gdb_assert (load_addr < space_size);
1499
1500 /* TMP_ENTRY_POINT exceeding SPACE_SIZE would be for prelinked
1501 64bit ld.so with 32bit executable, it should not happen. */
1502
1503 if (tmp_entry_point < space_size
1504 && tmp_entry_point + load_addr >= space_size)
1505 load_addr -= space_size;
1506 }
1507
1508 load_addr_found = 1;
1509 }
1510
1511 /* Otherwise we find the dynamic linker's base address by examining
1512 the current pc (which should point at the entry point for the
1513 dynamic linker) and subtracting the offset of the entry point.
1514
1515 This is more fragile than the previous approaches, but is a good
1516 fallback method because it has actually been working well in
1517 most cases. */
1518 if (!load_addr_found)
1519 {
1520 struct regcache *regcache
1521 = get_thread_arch_regcache (inferior_ptid, target_gdbarch);
1522 load_addr = (regcache_read_pc (regcache)
1523 - exec_entry_point (tmp_bfd, tmp_bfd_target));
1524 }
1525
1526 if (!loader_found_in_list)
1527 {
1528 info->debug_loader_name = xstrdup (interp_name);
1529 info->debug_loader_offset_p = 1;
1530 info->debug_loader_offset = load_addr;
1531 solib_add (NULL, from_tty, &current_target, auto_solib_add);
1532 }
1533
1534 /* Record the relocated start and end address of the dynamic linker
1535 text and plt section for svr4_in_dynsym_resolve_code. */
1536 interp_sect = bfd_get_section_by_name (tmp_bfd, ".text");
1537 if (interp_sect)
1538 {
1539 info->interp_text_sect_low =
1540 bfd_section_vma (tmp_bfd, interp_sect) + load_addr;
1541 info->interp_text_sect_high =
1542 info->interp_text_sect_low
1543 + bfd_section_size (tmp_bfd, interp_sect);
1544 }
1545 interp_sect = bfd_get_section_by_name (tmp_bfd, ".plt");
1546 if (interp_sect)
1547 {
1548 info->interp_plt_sect_low =
1549 bfd_section_vma (tmp_bfd, interp_sect) + load_addr;
1550 info->interp_plt_sect_high =
1551 info->interp_plt_sect_low
1552 + bfd_section_size (tmp_bfd, interp_sect);
1553 }
1554
1555 /* Now try to set a breakpoint in the dynamic linker. */
1556 for (bkpt_namep = solib_break_names; *bkpt_namep != NULL; bkpt_namep++)
1557 {
1558 sym_addr = bfd_lookup_symbol (tmp_bfd, *bkpt_namep);
1559 if (sym_addr != 0)
1560 break;
1561 }
1562
1563 if (sym_addr != 0)
1564 /* Convert 'sym_addr' from a function pointer to an address.
1565 Because we pass tmp_bfd_target instead of the current
1566 target, this will always produce an unrelocated value. */
1567 sym_addr = gdbarch_convert_from_func_ptr_addr (target_gdbarch,
1568 sym_addr,
1569 tmp_bfd_target);
1570
1571 /* We're done with both the temporary bfd and target. Remember,
1572 closing the target closes the underlying bfd. */
1573 target_close (tmp_bfd_target, 0);
1574
1575 if (sym_addr != 0)
1576 {
1577 create_solib_event_breakpoint (target_gdbarch, load_addr + sym_addr);
1578 xfree (interp_name);
1579 return 1;
1580 }
1581
1582 /* For whatever reason we couldn't set a breakpoint in the dynamic
1583 linker. Warn and drop into the old code. */
1584 bkpt_at_symbol:
1585 xfree (interp_name);
1586 warning (_("Unable to find dynamic linker breakpoint function.\n"
1587 "GDB will be unable to debug shared library initializers\n"
1588 "and track explicitly loaded dynamic code."));
1589 }
1590
1591 /* Scan through the lists of symbols, trying to look up the symbol and
1592 set a breakpoint there. Terminate loop when we/if we succeed. */
1593
1594 for (bkpt_namep = solib_break_names; *bkpt_namep != NULL; bkpt_namep++)
1595 {
1596 msymbol = lookup_minimal_symbol (*bkpt_namep, NULL, symfile_objfile);
1597 if ((msymbol != NULL) && (SYMBOL_VALUE_ADDRESS (msymbol) != 0))
1598 {
1599 sym_addr = SYMBOL_VALUE_ADDRESS (msymbol);
1600 sym_addr = gdbarch_convert_from_func_ptr_addr (target_gdbarch,
1601 sym_addr,
1602 &current_target);
1603 create_solib_event_breakpoint (target_gdbarch, sym_addr);
1604 return 1;
1605 }
1606 }
1607
1608 for (bkpt_namep = bkpt_names; *bkpt_namep != NULL; bkpt_namep++)
1609 {
1610 msymbol = lookup_minimal_symbol (*bkpt_namep, NULL, symfile_objfile);
1611 if ((msymbol != NULL) && (SYMBOL_VALUE_ADDRESS (msymbol) != 0))
1612 {
1613 sym_addr = SYMBOL_VALUE_ADDRESS (msymbol);
1614 sym_addr = gdbarch_convert_from_func_ptr_addr (target_gdbarch,
1615 sym_addr,
1616 &current_target);
1617 create_solib_event_breakpoint (target_gdbarch, sym_addr);
1618 return 1;
1619 }
1620 }
1621 return 0;
1622 }
1623
1624 /*
1625
1626 LOCAL FUNCTION
1627
1628 special_symbol_handling -- additional shared library symbol handling
1629
1630 SYNOPSIS
1631
1632 void special_symbol_handling ()
1633
1634 DESCRIPTION
1635
1636 Once the symbols from a shared object have been loaded in the usual
1637 way, we are called to do any system specific symbol handling that
1638 is needed.
1639
1640 For SunOS4, this consisted of grunging around in the dynamic
1641 linkers structures to find symbol definitions for "common" symbols
1642 and adding them to the minimal symbol table for the runtime common
1643 objfile.
1644
1645 However, for SVR4, there's nothing to do.
1646
1647 */
1648
1649 static void
1650 svr4_special_symbol_handling (void)
1651 {
1652 svr4_relocate_main_executable ();
1653 }
1654
1655 /* Read the ELF program headers from ABFD. Return the contents and
1656 set *PHDRS_SIZE to the size of the program headers. */
1657
1658 static gdb_byte *
1659 read_program_headers_from_bfd (bfd *abfd, int *phdrs_size)
1660 {
1661 Elf_Internal_Ehdr *ehdr;
1662 gdb_byte *buf;
1663
1664 ehdr = elf_elfheader (abfd);
1665
1666 *phdrs_size = ehdr->e_phnum * ehdr->e_phentsize;
1667 if (*phdrs_size == 0)
1668 return NULL;
1669
1670 buf = xmalloc (*phdrs_size);
1671 if (bfd_seek (abfd, ehdr->e_phoff, SEEK_SET) != 0
1672 || bfd_bread (buf, *phdrs_size, abfd) != *phdrs_size)
1673 {
1674 xfree (buf);
1675 return NULL;
1676 }
1677
1678 return buf;
1679 }
1680
1681 /* Return 1 and fill *DISPLACEMENTP with detected PIE offset of inferior
1682 exec_bfd. Otherwise return 0.
1683
1684 We relocate all of the sections by the same amount. This
1685 behavior is mandated by recent editions of the System V ABI.
1686 According to the System V Application Binary Interface,
1687 Edition 4.1, page 5-5:
1688
1689 ... Though the system chooses virtual addresses for
1690 individual processes, it maintains the segments' relative
1691 positions. Because position-independent code uses relative
1692 addressesing between segments, the difference between
1693 virtual addresses in memory must match the difference
1694 between virtual addresses in the file. The difference
1695 between the virtual address of any segment in memory and
1696 the corresponding virtual address in the file is thus a
1697 single constant value for any one executable or shared
1698 object in a given process. This difference is the base
1699 address. One use of the base address is to relocate the
1700 memory image of the program during dynamic linking.
1701
1702 The same language also appears in Edition 4.0 of the System V
1703 ABI and is left unspecified in some of the earlier editions.
1704
1705 Decide if the objfile needs to be relocated. As indicated above, we will
1706 only be here when execution is stopped. But during attachment PC can be at
1707 arbitrary address therefore regcache_read_pc can be misleading (contrary to
1708 the auxv AT_ENTRY value). Moreover for executable with interpreter section
1709 regcache_read_pc would point to the interpreter and not the main executable.
1710
1711 So, to summarize, relocations are necessary when the start address obtained
1712 from the executable is different from the address in auxv AT_ENTRY entry.
1713
1714 [ The astute reader will note that we also test to make sure that
1715 the executable in question has the DYNAMIC flag set. It is my
1716 opinion that this test is unnecessary (undesirable even). It
1717 was added to avoid inadvertent relocation of an executable
1718 whose e_type member in the ELF header is not ET_DYN. There may
1719 be a time in the future when it is desirable to do relocations
1720 on other types of files as well in which case this condition
1721 should either be removed or modified to accomodate the new file
1722 type. - Kevin, Nov 2000. ] */
1723
1724 static int
1725 svr4_exec_displacement (CORE_ADDR *displacementp)
1726 {
1727 /* ENTRY_POINT is a possible function descriptor - before
1728 a call to gdbarch_convert_from_func_ptr_addr. */
1729 CORE_ADDR entry_point, displacement;
1730
1731 if (exec_bfd == NULL)
1732 return 0;
1733
1734 /* Therefore for ELF it is ET_EXEC and not ET_DYN. Both shared libraries
1735 being executed themselves and PIE (Position Independent Executable)
1736 executables are ET_DYN. */
1737
1738 if ((bfd_get_file_flags (exec_bfd) & DYNAMIC) == 0)
1739 return 0;
1740
1741 if (target_auxv_search (&current_target, AT_ENTRY, &entry_point) <= 0)
1742 return 0;
1743
1744 displacement = entry_point - bfd_get_start_address (exec_bfd);
1745
1746 /* Verify the DISPLACEMENT candidate complies with the required page
1747 alignment. It is cheaper than the program headers comparison below. */
1748
1749 if (bfd_get_flavour (exec_bfd) == bfd_target_elf_flavour)
1750 {
1751 const struct elf_backend_data *elf = get_elf_backend_data (exec_bfd);
1752
1753 /* p_align of PT_LOAD segments does not specify any alignment but
1754 only congruency of addresses:
1755 p_offset % p_align == p_vaddr % p_align
1756 Kernel is free to load the executable with lower alignment. */
1757
1758 if ((displacement & (elf->minpagesize - 1)) != 0)
1759 return 0;
1760 }
1761
1762 /* Verify that the auxilliary vector describes the same file as exec_bfd, by
1763 comparing their program headers. If the program headers in the auxilliary
1764 vector do not match the program headers in the executable, then we are
1765 looking at a different file than the one used by the kernel - for
1766 instance, "gdb program" connected to "gdbserver :PORT ld.so program". */
1767
1768 if (bfd_get_flavour (exec_bfd) == bfd_target_elf_flavour)
1769 {
1770 /* Be optimistic and clear OK only if GDB was able to verify the headers
1771 really do not match. */
1772 int phdrs_size, phdrs2_size, ok = 1;
1773 gdb_byte *buf, *buf2;
1774
1775 buf = read_program_header (-1, &phdrs_size, NULL);
1776 buf2 = read_program_headers_from_bfd (exec_bfd, &phdrs2_size);
1777 if (buf != NULL && buf2 != NULL
1778 && (phdrs_size != phdrs2_size
1779 || memcmp (buf, buf2, phdrs_size) != 0))
1780 ok = 0;
1781
1782 xfree (buf);
1783 xfree (buf2);
1784
1785 if (!ok)
1786 return 0;
1787 }
1788
1789 if (info_verbose)
1790 {
1791 /* It can be printed repeatedly as there is no easy way to check
1792 the executable symbols/file has been already relocated to
1793 displacement. */
1794
1795 printf_unfiltered (_("Using PIE (Position Independent Executable) "
1796 "displacement %s for \"%s\".\n"),
1797 paddress (target_gdbarch, displacement),
1798 bfd_get_filename (exec_bfd));
1799 }
1800
1801 *displacementp = displacement;
1802 return 1;
1803 }
1804
1805 /* Relocate the main executable. This function should be called upon
1806 stopping the inferior process at the entry point to the program.
1807 The entry point from BFD is compared to the AT_ENTRY of AUXV and if they are
1808 different, the main executable is relocated by the proper amount. */
1809
1810 static void
1811 svr4_relocate_main_executable (void)
1812 {
1813 CORE_ADDR displacement;
1814
1815 if (symfile_objfile)
1816 {
1817 int i;
1818
1819 /* Remote target may have already set specific offsets by `qOffsets'
1820 which should be preferred. */
1821
1822 for (i = 0; i < symfile_objfile->num_sections; i++)
1823 if (ANOFFSET (symfile_objfile->section_offsets, i) != 0)
1824 return;
1825 }
1826
1827 if (! svr4_exec_displacement (&displacement))
1828 return;
1829
1830 /* Even DISPLACEMENT 0 is a valid new difference of in-memory vs. in-file
1831 addresses. */
1832
1833 if (symfile_objfile)
1834 {
1835 struct section_offsets *new_offsets;
1836 int i;
1837
1838 new_offsets = alloca (symfile_objfile->num_sections
1839 * sizeof (*new_offsets));
1840
1841 for (i = 0; i < symfile_objfile->num_sections; i++)
1842 new_offsets->offsets[i] = displacement;
1843
1844 objfile_relocate (symfile_objfile, new_offsets);
1845 }
1846 else if (exec_bfd)
1847 {
1848 asection *asect;
1849
1850 for (asect = exec_bfd->sections; asect != NULL; asect = asect->next)
1851 exec_set_section_address (bfd_get_filename (exec_bfd), asect->index,
1852 (bfd_section_vma (exec_bfd, asect)
1853 + displacement));
1854 }
1855 }
1856
1857 /*
1858
1859 GLOBAL FUNCTION
1860
1861 svr4_solib_create_inferior_hook -- shared library startup support
1862
1863 SYNOPSIS
1864
1865 void svr4_solib_create_inferior_hook (int from_tty)
1866
1867 DESCRIPTION
1868
1869 When gdb starts up the inferior, it nurses it along (through the
1870 shell) until it is ready to execute it's first instruction. At this
1871 point, this function gets called via expansion of the macro
1872 SOLIB_CREATE_INFERIOR_HOOK.
1873
1874 For SunOS executables, this first instruction is typically the
1875 one at "_start", or a similar text label, regardless of whether
1876 the executable is statically or dynamically linked. The runtime
1877 startup code takes care of dynamically linking in any shared
1878 libraries, once gdb allows the inferior to continue.
1879
1880 For SVR4 executables, this first instruction is either the first
1881 instruction in the dynamic linker (for dynamically linked
1882 executables) or the instruction at "start" for statically linked
1883 executables. For dynamically linked executables, the system
1884 first exec's /lib/libc.so.N, which contains the dynamic linker,
1885 and starts it running. The dynamic linker maps in any needed
1886 shared libraries, maps in the actual user executable, and then
1887 jumps to "start" in the user executable.
1888
1889 For both SunOS shared libraries, and SVR4 shared libraries, we
1890 can arrange to cooperate with the dynamic linker to discover the
1891 names of shared libraries that are dynamically linked, and the
1892 base addresses to which they are linked.
1893
1894 This function is responsible for discovering those names and
1895 addresses, and saving sufficient information about them to allow
1896 their symbols to be read at a later time.
1897
1898 FIXME
1899
1900 Between enable_break() and disable_break(), this code does not
1901 properly handle hitting breakpoints which the user might have
1902 set in the startup code or in the dynamic linker itself. Proper
1903 handling will probably have to wait until the implementation is
1904 changed to use the "breakpoint handler function" method.
1905
1906 Also, what if child has exit()ed? Must exit loop somehow.
1907 */
1908
1909 static void
1910 svr4_solib_create_inferior_hook (int from_tty)
1911 {
1912 #if defined(_SCO_DS)
1913 struct inferior *inf;
1914 struct thread_info *tp;
1915 #endif /* defined(_SCO_DS) */
1916 struct svr4_info *info;
1917
1918 info = get_svr4_info ();
1919
1920 /* Relocate the main executable if necessary. */
1921 if (current_inferior ()->attach_flag == 0)
1922 svr4_relocate_main_executable ();
1923
1924 if (!svr4_have_link_map_offsets ())
1925 return;
1926
1927 if (!enable_break (info, from_tty))
1928 return;
1929
1930 #if defined(_SCO_DS)
1931 /* SCO needs the loop below, other systems should be using the
1932 special shared library breakpoints and the shared library breakpoint
1933 service routine.
1934
1935 Now run the target. It will eventually hit the breakpoint, at
1936 which point all of the libraries will have been mapped in and we
1937 can go groveling around in the dynamic linker structures to find
1938 out what we need to know about them. */
1939
1940 inf = current_inferior ();
1941 tp = inferior_thread ();
1942
1943 clear_proceed_status ();
1944 inf->stop_soon = STOP_QUIETLY;
1945 tp->stop_signal = TARGET_SIGNAL_0;
1946 do
1947 {
1948 target_resume (pid_to_ptid (-1), 0, tp->stop_signal);
1949 wait_for_inferior (0);
1950 }
1951 while (tp->stop_signal != TARGET_SIGNAL_TRAP);
1952 inf->stop_soon = NO_STOP_QUIETLY;
1953 #endif /* defined(_SCO_DS) */
1954 }
1955
1956 static void
1957 svr4_clear_solib (void)
1958 {
1959 struct svr4_info *info;
1960
1961 info = get_svr4_info ();
1962 info->debug_base = 0;
1963 info->debug_loader_offset_p = 0;
1964 info->debug_loader_offset = 0;
1965 xfree (info->debug_loader_name);
1966 info->debug_loader_name = NULL;
1967 }
1968
1969 static void
1970 svr4_free_so (struct so_list *so)
1971 {
1972 xfree (so->lm_info->lm);
1973 xfree (so->lm_info);
1974 }
1975
1976
1977 /* Clear any bits of ADDR that wouldn't fit in a target-format
1978 data pointer. "Data pointer" here refers to whatever sort of
1979 address the dynamic linker uses to manage its sections. At the
1980 moment, we don't support shared libraries on any processors where
1981 code and data pointers are different sizes.
1982
1983 This isn't really the right solution. What we really need here is
1984 a way to do arithmetic on CORE_ADDR values that respects the
1985 natural pointer/address correspondence. (For example, on the MIPS,
1986 converting a 32-bit pointer to a 64-bit CORE_ADDR requires you to
1987 sign-extend the value. There, simply truncating the bits above
1988 gdbarch_ptr_bit, as we do below, is no good.) This should probably
1989 be a new gdbarch method or something. */
1990 static CORE_ADDR
1991 svr4_truncate_ptr (CORE_ADDR addr)
1992 {
1993 if (gdbarch_ptr_bit (target_gdbarch) == sizeof (CORE_ADDR) * 8)
1994 /* We don't need to truncate anything, and the bit twiddling below
1995 will fail due to overflow problems. */
1996 return addr;
1997 else
1998 return addr & (((CORE_ADDR) 1 << gdbarch_ptr_bit (target_gdbarch)) - 1);
1999 }
2000
2001
2002 static void
2003 svr4_relocate_section_addresses (struct so_list *so,
2004 struct target_section *sec)
2005 {
2006 sec->addr = svr4_truncate_ptr (sec->addr + LM_ADDR_CHECK (so,
2007 sec->bfd));
2008 sec->endaddr = svr4_truncate_ptr (sec->endaddr + LM_ADDR_CHECK (so,
2009 sec->bfd));
2010 }
2011 \f
2012
2013 /* Architecture-specific operations. */
2014
2015 /* Per-architecture data key. */
2016 static struct gdbarch_data *solib_svr4_data;
2017
2018 struct solib_svr4_ops
2019 {
2020 /* Return a description of the layout of `struct link_map'. */
2021 struct link_map_offsets *(*fetch_link_map_offsets)(void);
2022 };
2023
2024 /* Return a default for the architecture-specific operations. */
2025
2026 static void *
2027 solib_svr4_init (struct obstack *obstack)
2028 {
2029 struct solib_svr4_ops *ops;
2030
2031 ops = OBSTACK_ZALLOC (obstack, struct solib_svr4_ops);
2032 ops->fetch_link_map_offsets = NULL;
2033 return ops;
2034 }
2035
2036 /* Set the architecture-specific `struct link_map_offsets' fetcher for
2037 GDBARCH to FLMO. Also, install SVR4 solib_ops into GDBARCH. */
2038
2039 void
2040 set_solib_svr4_fetch_link_map_offsets (struct gdbarch *gdbarch,
2041 struct link_map_offsets *(*flmo) (void))
2042 {
2043 struct solib_svr4_ops *ops = gdbarch_data (gdbarch, solib_svr4_data);
2044
2045 ops->fetch_link_map_offsets = flmo;
2046
2047 set_solib_ops (gdbarch, &svr4_so_ops);
2048 }
2049
2050 /* Fetch a link_map_offsets structure using the architecture-specific
2051 `struct link_map_offsets' fetcher. */
2052
2053 static struct link_map_offsets *
2054 svr4_fetch_link_map_offsets (void)
2055 {
2056 struct solib_svr4_ops *ops = gdbarch_data (target_gdbarch, solib_svr4_data);
2057
2058 gdb_assert (ops->fetch_link_map_offsets);
2059 return ops->fetch_link_map_offsets ();
2060 }
2061
2062 /* Return 1 if a link map offset fetcher has been defined, 0 otherwise. */
2063
2064 static int
2065 svr4_have_link_map_offsets (void)
2066 {
2067 struct solib_svr4_ops *ops = gdbarch_data (target_gdbarch, solib_svr4_data);
2068 return (ops->fetch_link_map_offsets != NULL);
2069 }
2070 \f
2071
2072 /* Most OS'es that have SVR4-style ELF dynamic libraries define a
2073 `struct r_debug' and a `struct link_map' that are binary compatible
2074 with the origional SVR4 implementation. */
2075
2076 /* Fetch (and possibly build) an appropriate `struct link_map_offsets'
2077 for an ILP32 SVR4 system. */
2078
2079 struct link_map_offsets *
2080 svr4_ilp32_fetch_link_map_offsets (void)
2081 {
2082 static struct link_map_offsets lmo;
2083 static struct link_map_offsets *lmp = NULL;
2084
2085 if (lmp == NULL)
2086 {
2087 lmp = &lmo;
2088
2089 lmo.r_version_offset = 0;
2090 lmo.r_version_size = 4;
2091 lmo.r_map_offset = 4;
2092 lmo.r_brk_offset = 8;
2093 lmo.r_ldsomap_offset = 20;
2094
2095 /* Everything we need is in the first 20 bytes. */
2096 lmo.link_map_size = 20;
2097 lmo.l_addr_offset = 0;
2098 lmo.l_name_offset = 4;
2099 lmo.l_ld_offset = 8;
2100 lmo.l_next_offset = 12;
2101 lmo.l_prev_offset = 16;
2102 }
2103
2104 return lmp;
2105 }
2106
2107 /* Fetch (and possibly build) an appropriate `struct link_map_offsets'
2108 for an LP64 SVR4 system. */
2109
2110 struct link_map_offsets *
2111 svr4_lp64_fetch_link_map_offsets (void)
2112 {
2113 static struct link_map_offsets lmo;
2114 static struct link_map_offsets *lmp = NULL;
2115
2116 if (lmp == NULL)
2117 {
2118 lmp = &lmo;
2119
2120 lmo.r_version_offset = 0;
2121 lmo.r_version_size = 4;
2122 lmo.r_map_offset = 8;
2123 lmo.r_brk_offset = 16;
2124 lmo.r_ldsomap_offset = 40;
2125
2126 /* Everything we need is in the first 40 bytes. */
2127 lmo.link_map_size = 40;
2128 lmo.l_addr_offset = 0;
2129 lmo.l_name_offset = 8;
2130 lmo.l_ld_offset = 16;
2131 lmo.l_next_offset = 24;
2132 lmo.l_prev_offset = 32;
2133 }
2134
2135 return lmp;
2136 }
2137 \f
2138
2139 struct target_so_ops svr4_so_ops;
2140
2141 /* Lookup global symbol for ELF DSOs linked with -Bsymbolic. Those DSOs have a
2142 different rule for symbol lookup. The lookup begins here in the DSO, not in
2143 the main executable. */
2144
2145 static struct symbol *
2146 elf_lookup_lib_symbol (const struct objfile *objfile,
2147 const char *name,
2148 const domain_enum domain)
2149 {
2150 bfd *abfd;
2151
2152 if (objfile == symfile_objfile)
2153 abfd = exec_bfd;
2154 else
2155 {
2156 /* OBJFILE should have been passed as the non-debug one. */
2157 gdb_assert (objfile->separate_debug_objfile_backlink == NULL);
2158
2159 abfd = objfile->obfd;
2160 }
2161
2162 if (abfd == NULL || scan_dyntag (DT_SYMBOLIC, abfd, NULL) != 1)
2163 return NULL;
2164
2165 return lookup_global_symbol_from_objfile (objfile, name, domain);
2166 }
2167
2168 extern initialize_file_ftype _initialize_svr4_solib; /* -Wmissing-prototypes */
2169
2170 void
2171 _initialize_svr4_solib (void)
2172 {
2173 solib_svr4_data = gdbarch_data_register_pre_init (solib_svr4_init);
2174 solib_svr4_pspace_data
2175 = register_program_space_data_with_cleanup (svr4_pspace_data_cleanup);
2176
2177 svr4_so_ops.relocate_section_addresses = svr4_relocate_section_addresses;
2178 svr4_so_ops.free_so = svr4_free_so;
2179 svr4_so_ops.clear_solib = svr4_clear_solib;
2180 svr4_so_ops.solib_create_inferior_hook = svr4_solib_create_inferior_hook;
2181 svr4_so_ops.special_symbol_handling = svr4_special_symbol_handling;
2182 svr4_so_ops.current_sos = svr4_current_sos;
2183 svr4_so_ops.open_symbol_file_object = open_symbol_file_object;
2184 svr4_so_ops.in_dynsym_resolve_code = svr4_in_dynsym_resolve_code;
2185 svr4_so_ops.bfd_open = solib_bfd_open;
2186 svr4_so_ops.lookup_lib_global_symbol = elf_lookup_lib_symbol;
2187 svr4_so_ops.same = svr4_same;
2188 svr4_so_ops.keep_data_in_core = svr4_keep_data_in_core;
2189 }