gdb/
[binutils-gdb.git] / gdb / solib-svr4.c
1 /* Handle SVR4 shared libraries for GDB, the GNU Debugger.
2
3 Copyright (C) 1990, 1991, 1992, 1993, 1994, 1995, 1996, 1998, 1999, 2000,
4 2001, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
5 Free Software Foundation, Inc.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23
24 #include "elf/external.h"
25 #include "elf/common.h"
26 #include "elf/mips.h"
27
28 #include "symtab.h"
29 #include "bfd.h"
30 #include "symfile.h"
31 #include "objfiles.h"
32 #include "gdbcore.h"
33 #include "target.h"
34 #include "inferior.h"
35 #include "regcache.h"
36 #include "gdbthread.h"
37 #include "observer.h"
38
39 #include "gdb_assert.h"
40
41 #include "solist.h"
42 #include "solib.h"
43 #include "solib-svr4.h"
44
45 #include "bfd-target.h"
46 #include "elf-bfd.h"
47 #include "exec.h"
48 #include "auxv.h"
49 #include "exceptions.h"
50
51 static struct link_map_offsets *svr4_fetch_link_map_offsets (void);
52 static int svr4_have_link_map_offsets (void);
53 static void svr4_relocate_main_executable (void);
54
55 /* Link map info to include in an allocated so_list entry */
56
57 struct lm_info
58 {
59 /* Pointer to copy of link map from inferior. The type is char *
60 rather than void *, so that we may use byte offsets to find the
61 various fields without the need for a cast. */
62 gdb_byte *lm;
63
64 /* Amount by which addresses in the binary should be relocated to
65 match the inferior. This could most often be taken directly
66 from lm, but when prelinking is involved and the prelink base
67 address changes, we may need a different offset, we want to
68 warn about the difference and compute it only once. */
69 CORE_ADDR l_addr;
70
71 /* The target location of lm. */
72 CORE_ADDR lm_addr;
73 };
74
75 /* On SVR4 systems, a list of symbols in the dynamic linker where
76 GDB can try to place a breakpoint to monitor shared library
77 events.
78
79 If none of these symbols are found, or other errors occur, then
80 SVR4 systems will fall back to using a symbol as the "startup
81 mapping complete" breakpoint address. */
82
83 static char *solib_break_names[] =
84 {
85 "r_debug_state",
86 "_r_debug_state",
87 "_dl_debug_state",
88 "rtld_db_dlactivity",
89 "__dl_rtld_db_dlactivity",
90 "_rtld_debug_state",
91
92 NULL
93 };
94
95 static char *bkpt_names[] =
96 {
97 "_start",
98 "__start",
99 "main",
100 NULL
101 };
102
103 static char *main_name_list[] =
104 {
105 "main_$main",
106 NULL
107 };
108
109 /* Return non-zero if GDB_SO_NAME and INFERIOR_SO_NAME represent
110 the same shared library. */
111
112 static int
113 svr4_same_1 (const char *gdb_so_name, const char *inferior_so_name)
114 {
115 if (strcmp (gdb_so_name, inferior_so_name) == 0)
116 return 1;
117
118 /* On Solaris, when starting inferior we think that dynamic linker is
119 /usr/lib/ld.so.1, but later on, the table of loaded shared libraries
120 contains /lib/ld.so.1. Sometimes one file is a link to another, but
121 sometimes they have identical content, but are not linked to each
122 other. We don't restrict this check for Solaris, but the chances
123 of running into this situation elsewhere are very low. */
124 if (strcmp (gdb_so_name, "/usr/lib/ld.so.1") == 0
125 && strcmp (inferior_so_name, "/lib/ld.so.1") == 0)
126 return 1;
127
128 /* Similarly, we observed the same issue with sparc64, but with
129 different locations. */
130 if (strcmp (gdb_so_name, "/usr/lib/sparcv9/ld.so.1") == 0
131 && strcmp (inferior_so_name, "/lib/sparcv9/ld.so.1") == 0)
132 return 1;
133
134 return 0;
135 }
136
137 static int
138 svr4_same (struct so_list *gdb, struct so_list *inferior)
139 {
140 return (svr4_same_1 (gdb->so_original_name, inferior->so_original_name));
141 }
142
143 /* link map access functions */
144
145 static CORE_ADDR
146 LM_ADDR_FROM_LINK_MAP (struct so_list *so)
147 {
148 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
149 struct type *ptr_type = builtin_type (target_gdbarch)->builtin_data_ptr;
150
151 return extract_typed_address (so->lm_info->lm + lmo->l_addr_offset,
152 ptr_type);
153 }
154
155 static int
156 HAS_LM_DYNAMIC_FROM_LINK_MAP (void)
157 {
158 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
159
160 return lmo->l_ld_offset >= 0;
161 }
162
163 static CORE_ADDR
164 LM_DYNAMIC_FROM_LINK_MAP (struct so_list *so)
165 {
166 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
167 struct type *ptr_type = builtin_type (target_gdbarch)->builtin_data_ptr;
168
169 return extract_typed_address (so->lm_info->lm + lmo->l_ld_offset,
170 ptr_type);
171 }
172
173 static CORE_ADDR
174 LM_ADDR_CHECK (struct so_list *so, bfd *abfd)
175 {
176 if (so->lm_info->l_addr == (CORE_ADDR)-1)
177 {
178 struct bfd_section *dyninfo_sect;
179 CORE_ADDR l_addr, l_dynaddr, dynaddr;
180
181 l_addr = LM_ADDR_FROM_LINK_MAP (so);
182
183 if (! abfd || ! HAS_LM_DYNAMIC_FROM_LINK_MAP ())
184 goto set_addr;
185
186 l_dynaddr = LM_DYNAMIC_FROM_LINK_MAP (so);
187
188 dyninfo_sect = bfd_get_section_by_name (abfd, ".dynamic");
189 if (dyninfo_sect == NULL)
190 goto set_addr;
191
192 dynaddr = bfd_section_vma (abfd, dyninfo_sect);
193
194 if (dynaddr + l_addr != l_dynaddr)
195 {
196 CORE_ADDR align = 0x1000;
197 CORE_ADDR minpagesize = align;
198
199 if (bfd_get_flavour (abfd) == bfd_target_elf_flavour)
200 {
201 Elf_Internal_Ehdr *ehdr = elf_tdata (abfd)->elf_header;
202 Elf_Internal_Phdr *phdr = elf_tdata (abfd)->phdr;
203 int i;
204
205 align = 1;
206
207 for (i = 0; i < ehdr->e_phnum; i++)
208 if (phdr[i].p_type == PT_LOAD && phdr[i].p_align > align)
209 align = phdr[i].p_align;
210
211 minpagesize = get_elf_backend_data (abfd)->minpagesize;
212 }
213
214 /* Turn it into a mask. */
215 align--;
216
217 /* If the changes match the alignment requirements, we
218 assume we're using a core file that was generated by the
219 same binary, just prelinked with a different base offset.
220 If it doesn't match, we may have a different binary, the
221 same binary with the dynamic table loaded at an unrelated
222 location, or anything, really. To avoid regressions,
223 don't adjust the base offset in the latter case, although
224 odds are that, if things really changed, debugging won't
225 quite work.
226
227 One could expect more the condition
228 ((l_addr & align) == 0 && ((l_dynaddr - dynaddr) & align) == 0)
229 but the one below is relaxed for PPC. The PPC kernel supports
230 either 4k or 64k page sizes. To be prepared for 64k pages,
231 PPC ELF files are built using an alignment requirement of 64k.
232 However, when running on a kernel supporting 4k pages, the memory
233 mapping of the library may not actually happen on a 64k boundary!
234
235 (In the usual case where (l_addr & align) == 0, this check is
236 equivalent to the possibly expected check above.)
237
238 Even on PPC it must be zero-aligned at least for MINPAGESIZE. */
239
240 if ((l_addr & (minpagesize - 1)) == 0
241 && (l_addr & align) == ((l_dynaddr - dynaddr) & align))
242 {
243 l_addr = l_dynaddr - dynaddr;
244
245 if (info_verbose)
246 {
247 warning (_(".dynamic section for \"%s\" "
248 "is not at the expected address"), so->so_name);
249 warning (_("difference appears to be caused by prelink, "
250 "adjusting expectations"));
251 }
252 }
253 else
254 warning (_(".dynamic section for \"%s\" "
255 "is not at the expected address "
256 "(wrong library or version mismatch?)"), so->so_name);
257 }
258
259 set_addr:
260 so->lm_info->l_addr = l_addr;
261 }
262
263 return so->lm_info->l_addr;
264 }
265
266 static CORE_ADDR
267 LM_NEXT (struct so_list *so)
268 {
269 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
270 struct type *ptr_type = builtin_type (target_gdbarch)->builtin_data_ptr;
271
272 return extract_typed_address (so->lm_info->lm + lmo->l_next_offset,
273 ptr_type);
274 }
275
276 static CORE_ADDR
277 LM_NAME (struct so_list *so)
278 {
279 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
280 struct type *ptr_type = builtin_type (target_gdbarch)->builtin_data_ptr;
281
282 return extract_typed_address (so->lm_info->lm + lmo->l_name_offset,
283 ptr_type);
284 }
285
286 static int
287 IGNORE_FIRST_LINK_MAP_ENTRY (struct so_list *so)
288 {
289 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
290 struct type *ptr_type = builtin_type (target_gdbarch)->builtin_data_ptr;
291
292 /* Assume that everything is a library if the dynamic loader was loaded
293 late by a static executable. */
294 if (exec_bfd && bfd_get_section_by_name (exec_bfd, ".dynamic") == NULL)
295 return 0;
296
297 return extract_typed_address (so->lm_info->lm + lmo->l_prev_offset,
298 ptr_type) == 0;
299 }
300
301 /* Per pspace SVR4 specific data. */
302
303 struct svr4_info
304 {
305 CORE_ADDR debug_base; /* Base of dynamic linker structures */
306
307 /* Validity flag for debug_loader_offset. */
308 int debug_loader_offset_p;
309
310 /* Load address for the dynamic linker, inferred. */
311 CORE_ADDR debug_loader_offset;
312
313 /* Name of the dynamic linker, valid if debug_loader_offset_p. */
314 char *debug_loader_name;
315
316 /* Load map address for the main executable. */
317 CORE_ADDR main_lm_addr;
318
319 CORE_ADDR interp_text_sect_low;
320 CORE_ADDR interp_text_sect_high;
321 CORE_ADDR interp_plt_sect_low;
322 CORE_ADDR interp_plt_sect_high;
323 };
324
325 /* Per-program-space data key. */
326 static const struct program_space_data *solib_svr4_pspace_data;
327
328 static void
329 svr4_pspace_data_cleanup (struct program_space *pspace, void *arg)
330 {
331 struct svr4_info *info;
332
333 info = program_space_data (pspace, solib_svr4_pspace_data);
334 xfree (info);
335 }
336
337 /* Get the current svr4 data. If none is found yet, add it now. This
338 function always returns a valid object. */
339
340 static struct svr4_info *
341 get_svr4_info (void)
342 {
343 struct svr4_info *info;
344
345 info = program_space_data (current_program_space, solib_svr4_pspace_data);
346 if (info != NULL)
347 return info;
348
349 info = XZALLOC (struct svr4_info);
350 set_program_space_data (current_program_space, solib_svr4_pspace_data, info);
351 return info;
352 }
353
354 /* Local function prototypes */
355
356 static int match_main (char *);
357
358 static CORE_ADDR bfd_lookup_symbol (bfd *, char *);
359
360 /*
361
362 LOCAL FUNCTION
363
364 bfd_lookup_symbol -- lookup the value for a specific symbol
365
366 SYNOPSIS
367
368 CORE_ADDR bfd_lookup_symbol (bfd *abfd, char *symname)
369
370 DESCRIPTION
371
372 An expensive way to lookup the value of a single symbol for
373 bfd's that are only temporary anyway. This is used by the
374 shared library support to find the address of the debugger
375 notification routine in the shared library.
376
377 The returned symbol may be in a code or data section; functions
378 will normally be in a code section, but may be in a data section
379 if this architecture uses function descriptors.
380
381 Note that 0 is specifically allowed as an error return (no
382 such symbol).
383 */
384
385 static CORE_ADDR
386 bfd_lookup_symbol (bfd *abfd, char *symname)
387 {
388 long storage_needed;
389 asymbol *sym;
390 asymbol **symbol_table;
391 unsigned int number_of_symbols;
392 unsigned int i;
393 struct cleanup *back_to;
394 CORE_ADDR symaddr = 0;
395
396 storage_needed = bfd_get_symtab_upper_bound (abfd);
397
398 if (storage_needed > 0)
399 {
400 symbol_table = (asymbol **) xmalloc (storage_needed);
401 back_to = make_cleanup (xfree, symbol_table);
402 number_of_symbols = bfd_canonicalize_symtab (abfd, symbol_table);
403
404 for (i = 0; i < number_of_symbols; i++)
405 {
406 sym = *symbol_table++;
407 if (strcmp (sym->name, symname) == 0
408 && (sym->section->flags & (SEC_CODE | SEC_DATA)) != 0)
409 {
410 /* BFD symbols are section relative. */
411 symaddr = sym->value + sym->section->vma;
412 break;
413 }
414 }
415 do_cleanups (back_to);
416 }
417
418 if (symaddr)
419 return symaddr;
420
421 /* On FreeBSD, the dynamic linker is stripped by default. So we'll
422 have to check the dynamic string table too. */
423
424 storage_needed = bfd_get_dynamic_symtab_upper_bound (abfd);
425
426 if (storage_needed > 0)
427 {
428 symbol_table = (asymbol **) xmalloc (storage_needed);
429 back_to = make_cleanup (xfree, symbol_table);
430 number_of_symbols = bfd_canonicalize_dynamic_symtab (abfd, symbol_table);
431
432 for (i = 0; i < number_of_symbols; i++)
433 {
434 sym = *symbol_table++;
435
436 if (strcmp (sym->name, symname) == 0
437 && (sym->section->flags & (SEC_CODE | SEC_DATA)) != 0)
438 {
439 /* BFD symbols are section relative. */
440 symaddr = sym->value + sym->section->vma;
441 break;
442 }
443 }
444 do_cleanups (back_to);
445 }
446
447 return symaddr;
448 }
449
450
451 /* Read program header TYPE from inferior memory. The header is found
452 by scanning the OS auxillary vector.
453
454 If TYPE == -1, return the program headers instead of the contents of
455 one program header.
456
457 Return a pointer to allocated memory holding the program header contents,
458 or NULL on failure. If sucessful, and unless P_SECT_SIZE is NULL, the
459 size of those contents is returned to P_SECT_SIZE. Likewise, the target
460 architecture size (32-bit or 64-bit) is returned to P_ARCH_SIZE. */
461
462 static gdb_byte *
463 read_program_header (int type, int *p_sect_size, int *p_arch_size)
464 {
465 enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch);
466 CORE_ADDR at_phdr, at_phent, at_phnum;
467 int arch_size, sect_size;
468 CORE_ADDR sect_addr;
469 gdb_byte *buf;
470
471 /* Get required auxv elements from target. */
472 if (target_auxv_search (&current_target, AT_PHDR, &at_phdr) <= 0)
473 return 0;
474 if (target_auxv_search (&current_target, AT_PHENT, &at_phent) <= 0)
475 return 0;
476 if (target_auxv_search (&current_target, AT_PHNUM, &at_phnum) <= 0)
477 return 0;
478 if (!at_phdr || !at_phnum)
479 return 0;
480
481 /* Determine ELF architecture type. */
482 if (at_phent == sizeof (Elf32_External_Phdr))
483 arch_size = 32;
484 else if (at_phent == sizeof (Elf64_External_Phdr))
485 arch_size = 64;
486 else
487 return 0;
488
489 /* Find the requested segment. */
490 if (type == -1)
491 {
492 sect_addr = at_phdr;
493 sect_size = at_phent * at_phnum;
494 }
495 else if (arch_size == 32)
496 {
497 Elf32_External_Phdr phdr;
498 int i;
499
500 /* Search for requested PHDR. */
501 for (i = 0; i < at_phnum; i++)
502 {
503 if (target_read_memory (at_phdr + i * sizeof (phdr),
504 (gdb_byte *)&phdr, sizeof (phdr)))
505 return 0;
506
507 if (extract_unsigned_integer ((gdb_byte *)phdr.p_type,
508 4, byte_order) == type)
509 break;
510 }
511
512 if (i == at_phnum)
513 return 0;
514
515 /* Retrieve address and size. */
516 sect_addr = extract_unsigned_integer ((gdb_byte *)phdr.p_vaddr,
517 4, byte_order);
518 sect_size = extract_unsigned_integer ((gdb_byte *)phdr.p_memsz,
519 4, byte_order);
520 }
521 else
522 {
523 Elf64_External_Phdr phdr;
524 int i;
525
526 /* Search for requested PHDR. */
527 for (i = 0; i < at_phnum; i++)
528 {
529 if (target_read_memory (at_phdr + i * sizeof (phdr),
530 (gdb_byte *)&phdr, sizeof (phdr)))
531 return 0;
532
533 if (extract_unsigned_integer ((gdb_byte *)phdr.p_type,
534 4, byte_order) == type)
535 break;
536 }
537
538 if (i == at_phnum)
539 return 0;
540
541 /* Retrieve address and size. */
542 sect_addr = extract_unsigned_integer ((gdb_byte *)phdr.p_vaddr,
543 8, byte_order);
544 sect_size = extract_unsigned_integer ((gdb_byte *)phdr.p_memsz,
545 8, byte_order);
546 }
547
548 /* Read in requested program header. */
549 buf = xmalloc (sect_size);
550 if (target_read_memory (sect_addr, buf, sect_size))
551 {
552 xfree (buf);
553 return NULL;
554 }
555
556 if (p_arch_size)
557 *p_arch_size = arch_size;
558 if (p_sect_size)
559 *p_sect_size = sect_size;
560
561 return buf;
562 }
563
564
565 /* Return program interpreter string. */
566 static gdb_byte *
567 find_program_interpreter (void)
568 {
569 gdb_byte *buf = NULL;
570
571 /* If we have an exec_bfd, use its section table. */
572 if (exec_bfd
573 && bfd_get_flavour (exec_bfd) == bfd_target_elf_flavour)
574 {
575 struct bfd_section *interp_sect;
576
577 interp_sect = bfd_get_section_by_name (exec_bfd, ".interp");
578 if (interp_sect != NULL)
579 {
580 CORE_ADDR sect_addr = bfd_section_vma (exec_bfd, interp_sect);
581 int sect_size = bfd_section_size (exec_bfd, interp_sect);
582
583 buf = xmalloc (sect_size);
584 bfd_get_section_contents (exec_bfd, interp_sect, buf, 0, sect_size);
585 }
586 }
587
588 /* If we didn't find it, use the target auxillary vector. */
589 if (!buf)
590 buf = read_program_header (PT_INTERP, NULL, NULL);
591
592 return buf;
593 }
594
595
596 /* Scan for DYNTAG in .dynamic section of ABFD. If DYNTAG is found 1 is
597 returned and the corresponding PTR is set. */
598
599 static int
600 scan_dyntag (int dyntag, bfd *abfd, CORE_ADDR *ptr)
601 {
602 int arch_size, step, sect_size;
603 long dyn_tag;
604 CORE_ADDR dyn_ptr, dyn_addr;
605 gdb_byte *bufend, *bufstart, *buf;
606 Elf32_External_Dyn *x_dynp_32;
607 Elf64_External_Dyn *x_dynp_64;
608 struct bfd_section *sect;
609 struct target_section *target_section;
610
611 if (abfd == NULL)
612 return 0;
613
614 if (bfd_get_flavour (abfd) != bfd_target_elf_flavour)
615 return 0;
616
617 arch_size = bfd_get_arch_size (abfd);
618 if (arch_size == -1)
619 return 0;
620
621 /* Find the start address of the .dynamic section. */
622 sect = bfd_get_section_by_name (abfd, ".dynamic");
623 if (sect == NULL)
624 return 0;
625
626 for (target_section = current_target_sections->sections;
627 target_section < current_target_sections->sections_end;
628 target_section++)
629 if (sect == target_section->the_bfd_section)
630 break;
631 if (target_section < current_target_sections->sections_end)
632 dyn_addr = target_section->addr;
633 else
634 {
635 /* ABFD may come from OBJFILE acting only as a symbol file without being
636 loaded into the target (see add_symbol_file_command). This case is
637 such fallback to the file VMA address without the possibility of
638 having the section relocated to its actual in-memory address. */
639
640 dyn_addr = bfd_section_vma (abfd, sect);
641 }
642
643 /* Read in .dynamic from the BFD. We will get the actual value
644 from memory later. */
645 sect_size = bfd_section_size (abfd, sect);
646 buf = bufstart = alloca (sect_size);
647 if (!bfd_get_section_contents (abfd, sect,
648 buf, 0, sect_size))
649 return 0;
650
651 /* Iterate over BUF and scan for DYNTAG. If found, set PTR and return. */
652 step = (arch_size == 32) ? sizeof (Elf32_External_Dyn)
653 : sizeof (Elf64_External_Dyn);
654 for (bufend = buf + sect_size;
655 buf < bufend;
656 buf += step)
657 {
658 if (arch_size == 32)
659 {
660 x_dynp_32 = (Elf32_External_Dyn *) buf;
661 dyn_tag = bfd_h_get_32 (abfd, (bfd_byte *) x_dynp_32->d_tag);
662 dyn_ptr = bfd_h_get_32 (abfd, (bfd_byte *) x_dynp_32->d_un.d_ptr);
663 }
664 else
665 {
666 x_dynp_64 = (Elf64_External_Dyn *) buf;
667 dyn_tag = bfd_h_get_64 (abfd, (bfd_byte *) x_dynp_64->d_tag);
668 dyn_ptr = bfd_h_get_64 (abfd, (bfd_byte *) x_dynp_64->d_un.d_ptr);
669 }
670 if (dyn_tag == DT_NULL)
671 return 0;
672 if (dyn_tag == dyntag)
673 {
674 /* If requested, try to read the runtime value of this .dynamic
675 entry. */
676 if (ptr)
677 {
678 struct type *ptr_type;
679 gdb_byte ptr_buf[8];
680 CORE_ADDR ptr_addr;
681
682 ptr_type = builtin_type (target_gdbarch)->builtin_data_ptr;
683 ptr_addr = dyn_addr + (buf - bufstart) + arch_size / 8;
684 if (target_read_memory (ptr_addr, ptr_buf, arch_size / 8) == 0)
685 dyn_ptr = extract_typed_address (ptr_buf, ptr_type);
686 *ptr = dyn_ptr;
687 }
688 return 1;
689 }
690 }
691
692 return 0;
693 }
694
695 /* Scan for DYNTAG in .dynamic section of the target's main executable,
696 found by consulting the OS auxillary vector. If DYNTAG is found 1 is
697 returned and the corresponding PTR is set. */
698
699 static int
700 scan_dyntag_auxv (int dyntag, CORE_ADDR *ptr)
701 {
702 enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch);
703 int sect_size, arch_size, step;
704 long dyn_tag;
705 CORE_ADDR dyn_ptr;
706 gdb_byte *bufend, *bufstart, *buf;
707
708 /* Read in .dynamic section. */
709 buf = bufstart = read_program_header (PT_DYNAMIC, &sect_size, &arch_size);
710 if (!buf)
711 return 0;
712
713 /* Iterate over BUF and scan for DYNTAG. If found, set PTR and return. */
714 step = (arch_size == 32) ? sizeof (Elf32_External_Dyn)
715 : sizeof (Elf64_External_Dyn);
716 for (bufend = buf + sect_size;
717 buf < bufend;
718 buf += step)
719 {
720 if (arch_size == 32)
721 {
722 Elf32_External_Dyn *dynp = (Elf32_External_Dyn *) buf;
723 dyn_tag = extract_unsigned_integer ((gdb_byte *) dynp->d_tag,
724 4, byte_order);
725 dyn_ptr = extract_unsigned_integer ((gdb_byte *) dynp->d_un.d_ptr,
726 4, byte_order);
727 }
728 else
729 {
730 Elf64_External_Dyn *dynp = (Elf64_External_Dyn *) buf;
731 dyn_tag = extract_unsigned_integer ((gdb_byte *) dynp->d_tag,
732 8, byte_order);
733 dyn_ptr = extract_unsigned_integer ((gdb_byte *) dynp->d_un.d_ptr,
734 8, byte_order);
735 }
736 if (dyn_tag == DT_NULL)
737 break;
738
739 if (dyn_tag == dyntag)
740 {
741 if (ptr)
742 *ptr = dyn_ptr;
743
744 xfree (bufstart);
745 return 1;
746 }
747 }
748
749 xfree (bufstart);
750 return 0;
751 }
752
753
754 /*
755
756 LOCAL FUNCTION
757
758 elf_locate_base -- locate the base address of dynamic linker structs
759 for SVR4 elf targets.
760
761 SYNOPSIS
762
763 CORE_ADDR elf_locate_base (void)
764
765 DESCRIPTION
766
767 For SVR4 elf targets the address of the dynamic linker's runtime
768 structure is contained within the dynamic info section in the
769 executable file. The dynamic section is also mapped into the
770 inferior address space. Because the runtime loader fills in the
771 real address before starting the inferior, we have to read in the
772 dynamic info section from the inferior address space.
773 If there are any errors while trying to find the address, we
774 silently return 0, otherwise the found address is returned.
775
776 */
777
778 static CORE_ADDR
779 elf_locate_base (void)
780 {
781 struct minimal_symbol *msymbol;
782 CORE_ADDR dyn_ptr;
783
784 /* Look for DT_MIPS_RLD_MAP first. MIPS executables use this
785 instead of DT_DEBUG, although they sometimes contain an unused
786 DT_DEBUG. */
787 if (scan_dyntag (DT_MIPS_RLD_MAP, exec_bfd, &dyn_ptr)
788 || scan_dyntag_auxv (DT_MIPS_RLD_MAP, &dyn_ptr))
789 {
790 struct type *ptr_type = builtin_type (target_gdbarch)->builtin_data_ptr;
791 gdb_byte *pbuf;
792 int pbuf_size = TYPE_LENGTH (ptr_type);
793 pbuf = alloca (pbuf_size);
794 /* DT_MIPS_RLD_MAP contains a pointer to the address
795 of the dynamic link structure. */
796 if (target_read_memory (dyn_ptr, pbuf, pbuf_size))
797 return 0;
798 return extract_typed_address (pbuf, ptr_type);
799 }
800
801 /* Find DT_DEBUG. */
802 if (scan_dyntag (DT_DEBUG, exec_bfd, &dyn_ptr)
803 || scan_dyntag_auxv (DT_DEBUG, &dyn_ptr))
804 return dyn_ptr;
805
806 /* This may be a static executable. Look for the symbol
807 conventionally named _r_debug, as a last resort. */
808 msymbol = lookup_minimal_symbol ("_r_debug", NULL, symfile_objfile);
809 if (msymbol != NULL)
810 return SYMBOL_VALUE_ADDRESS (msymbol);
811
812 /* DT_DEBUG entry not found. */
813 return 0;
814 }
815
816 /*
817
818 LOCAL FUNCTION
819
820 locate_base -- locate the base address of dynamic linker structs
821
822 SYNOPSIS
823
824 CORE_ADDR locate_base (struct svr4_info *)
825
826 DESCRIPTION
827
828 For both the SunOS and SVR4 shared library implementations, if the
829 inferior executable has been linked dynamically, there is a single
830 address somewhere in the inferior's data space which is the key to
831 locating all of the dynamic linker's runtime structures. This
832 address is the value of the debug base symbol. The job of this
833 function is to find and return that address, or to return 0 if there
834 is no such address (the executable is statically linked for example).
835
836 For SunOS, the job is almost trivial, since the dynamic linker and
837 all of it's structures are statically linked to the executable at
838 link time. Thus the symbol for the address we are looking for has
839 already been added to the minimal symbol table for the executable's
840 objfile at the time the symbol file's symbols were read, and all we
841 have to do is look it up there. Note that we explicitly do NOT want
842 to find the copies in the shared library.
843
844 The SVR4 version is a bit more complicated because the address
845 is contained somewhere in the dynamic info section. We have to go
846 to a lot more work to discover the address of the debug base symbol.
847 Because of this complexity, we cache the value we find and return that
848 value on subsequent invocations. Note there is no copy in the
849 executable symbol tables.
850
851 */
852
853 static CORE_ADDR
854 locate_base (struct svr4_info *info)
855 {
856 /* Check to see if we have a currently valid address, and if so, avoid
857 doing all this work again and just return the cached address. If
858 we have no cached address, try to locate it in the dynamic info
859 section for ELF executables. There's no point in doing any of this
860 though if we don't have some link map offsets to work with. */
861
862 if (info->debug_base == 0 && svr4_have_link_map_offsets ())
863 info->debug_base = elf_locate_base ();
864 return info->debug_base;
865 }
866
867 /* Find the first element in the inferior's dynamic link map, and
868 return its address in the inferior.
869
870 FIXME: Perhaps we should validate the info somehow, perhaps by
871 checking r_version for a known version number, or r_state for
872 RT_CONSISTENT. */
873
874 static CORE_ADDR
875 solib_svr4_r_map (struct svr4_info *info)
876 {
877 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
878 struct type *ptr_type = builtin_type (target_gdbarch)->builtin_data_ptr;
879 CORE_ADDR addr = 0;
880 volatile struct gdb_exception ex;
881
882 TRY_CATCH (ex, RETURN_MASK_ERROR)
883 {
884 addr = read_memory_typed_address (info->debug_base + lmo->r_map_offset,
885 ptr_type);
886 }
887 exception_print (gdb_stderr, ex);
888 return addr;
889 }
890
891 /* Find r_brk from the inferior's debug base. */
892
893 static CORE_ADDR
894 solib_svr4_r_brk (struct svr4_info *info)
895 {
896 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
897 struct type *ptr_type = builtin_type (target_gdbarch)->builtin_data_ptr;
898
899 return read_memory_typed_address (info->debug_base + lmo->r_brk_offset,
900 ptr_type);
901 }
902
903 /* Find the link map for the dynamic linker (if it is not in the
904 normal list of loaded shared objects). */
905
906 static CORE_ADDR
907 solib_svr4_r_ldsomap (struct svr4_info *info)
908 {
909 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
910 struct type *ptr_type = builtin_type (target_gdbarch)->builtin_data_ptr;
911 enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch);
912 ULONGEST version;
913
914 /* Check version, and return zero if `struct r_debug' doesn't have
915 the r_ldsomap member. */
916 version
917 = read_memory_unsigned_integer (info->debug_base + lmo->r_version_offset,
918 lmo->r_version_size, byte_order);
919 if (version < 2 || lmo->r_ldsomap_offset == -1)
920 return 0;
921
922 return read_memory_typed_address (info->debug_base + lmo->r_ldsomap_offset,
923 ptr_type);
924 }
925
926 /* On Solaris systems with some versions of the dynamic linker,
927 ld.so's l_name pointer points to the SONAME in the string table
928 rather than into writable memory. So that GDB can find shared
929 libraries when loading a core file generated by gcore, ensure that
930 memory areas containing the l_name string are saved in the core
931 file. */
932
933 static int
934 svr4_keep_data_in_core (CORE_ADDR vaddr, unsigned long size)
935 {
936 struct svr4_info *info;
937 CORE_ADDR ldsomap;
938 struct so_list *new;
939 struct cleanup *old_chain;
940 struct link_map_offsets *lmo;
941 CORE_ADDR lm_name;
942
943 info = get_svr4_info ();
944
945 info->debug_base = 0;
946 locate_base (info);
947 if (!info->debug_base)
948 return 0;
949
950 ldsomap = solib_svr4_r_ldsomap (info);
951 if (!ldsomap)
952 return 0;
953
954 lmo = svr4_fetch_link_map_offsets ();
955 new = XZALLOC (struct so_list);
956 old_chain = make_cleanup (xfree, new);
957 new->lm_info = xmalloc (sizeof (struct lm_info));
958 make_cleanup (xfree, new->lm_info);
959 new->lm_info->l_addr = (CORE_ADDR)-1;
960 new->lm_info->lm_addr = ldsomap;
961 new->lm_info->lm = xzalloc (lmo->link_map_size);
962 make_cleanup (xfree, new->lm_info->lm);
963 read_memory (ldsomap, new->lm_info->lm, lmo->link_map_size);
964 lm_name = LM_NAME (new);
965 do_cleanups (old_chain);
966
967 return (lm_name >= vaddr && lm_name < vaddr + size);
968 }
969
970 /*
971
972 LOCAL FUNCTION
973
974 open_symbol_file_object
975
976 SYNOPSIS
977
978 void open_symbol_file_object (void *from_tty)
979
980 DESCRIPTION
981
982 If no open symbol file, attempt to locate and open the main symbol
983 file. On SVR4 systems, this is the first link map entry. If its
984 name is here, we can open it. Useful when attaching to a process
985 without first loading its symbol file.
986
987 If FROM_TTYP dereferences to a non-zero integer, allow messages to
988 be printed. This parameter is a pointer rather than an int because
989 open_symbol_file_object() is called via catch_errors() and
990 catch_errors() requires a pointer argument. */
991
992 static int
993 open_symbol_file_object (void *from_ttyp)
994 {
995 CORE_ADDR lm, l_name;
996 char *filename;
997 int errcode;
998 int from_tty = *(int *)from_ttyp;
999 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
1000 struct type *ptr_type = builtin_type (target_gdbarch)->builtin_data_ptr;
1001 int l_name_size = TYPE_LENGTH (ptr_type);
1002 gdb_byte *l_name_buf = xmalloc (l_name_size);
1003 struct cleanup *cleanups = make_cleanup (xfree, l_name_buf);
1004 struct svr4_info *info = get_svr4_info ();
1005
1006 if (symfile_objfile)
1007 if (!query (_("Attempt to reload symbols from process? ")))
1008 return 0;
1009
1010 /* Always locate the debug struct, in case it has moved. */
1011 info->debug_base = 0;
1012 if (locate_base (info) == 0)
1013 return 0; /* failed somehow... */
1014
1015 /* First link map member should be the executable. */
1016 lm = solib_svr4_r_map (info);
1017 if (lm == 0)
1018 return 0; /* failed somehow... */
1019
1020 /* Read address of name from target memory to GDB. */
1021 read_memory (lm + lmo->l_name_offset, l_name_buf, l_name_size);
1022
1023 /* Convert the address to host format. */
1024 l_name = extract_typed_address (l_name_buf, ptr_type);
1025
1026 /* Free l_name_buf. */
1027 do_cleanups (cleanups);
1028
1029 if (l_name == 0)
1030 return 0; /* No filename. */
1031
1032 /* Now fetch the filename from target memory. */
1033 target_read_string (l_name, &filename, SO_NAME_MAX_PATH_SIZE - 1, &errcode);
1034 make_cleanup (xfree, filename);
1035
1036 if (errcode)
1037 {
1038 warning (_("failed to read exec filename from attached file: %s"),
1039 safe_strerror (errcode));
1040 return 0;
1041 }
1042
1043 /* Have a pathname: read the symbol file. */
1044 symbol_file_add_main (filename, from_tty);
1045
1046 return 1;
1047 }
1048
1049 /* If no shared library information is available from the dynamic
1050 linker, build a fallback list from other sources. */
1051
1052 static struct so_list *
1053 svr4_default_sos (void)
1054 {
1055 struct svr4_info *info = get_svr4_info ();
1056
1057 struct so_list *head = NULL;
1058 struct so_list **link_ptr = &head;
1059
1060 if (info->debug_loader_offset_p)
1061 {
1062 struct so_list *new = XZALLOC (struct so_list);
1063
1064 new->lm_info = xmalloc (sizeof (struct lm_info));
1065
1066 /* Nothing will ever check the cached copy of the link
1067 map if we set l_addr. */
1068 new->lm_info->l_addr = info->debug_loader_offset;
1069 new->lm_info->lm_addr = 0;
1070 new->lm_info->lm = NULL;
1071
1072 strncpy (new->so_name, info->debug_loader_name,
1073 SO_NAME_MAX_PATH_SIZE - 1);
1074 new->so_name[SO_NAME_MAX_PATH_SIZE - 1] = '\0';
1075 strcpy (new->so_original_name, new->so_name);
1076
1077 *link_ptr = new;
1078 link_ptr = &new->next;
1079 }
1080
1081 return head;
1082 }
1083
1084 /* LOCAL FUNCTION
1085
1086 current_sos -- build a list of currently loaded shared objects
1087
1088 SYNOPSIS
1089
1090 struct so_list *current_sos ()
1091
1092 DESCRIPTION
1093
1094 Build a list of `struct so_list' objects describing the shared
1095 objects currently loaded in the inferior. This list does not
1096 include an entry for the main executable file.
1097
1098 Note that we only gather information directly available from the
1099 inferior --- we don't examine any of the shared library files
1100 themselves. The declaration of `struct so_list' says which fields
1101 we provide values for. */
1102
1103 static struct so_list *
1104 svr4_current_sos (void)
1105 {
1106 CORE_ADDR lm;
1107 struct so_list *head = 0;
1108 struct so_list **link_ptr = &head;
1109 CORE_ADDR ldsomap = 0;
1110 struct svr4_info *info;
1111
1112 info = get_svr4_info ();
1113
1114 /* Always locate the debug struct, in case it has moved. */
1115 info->debug_base = 0;
1116 locate_base (info);
1117
1118 /* If we can't find the dynamic linker's base structure, this
1119 must not be a dynamically linked executable. Hmm. */
1120 if (! info->debug_base)
1121 return svr4_default_sos ();
1122
1123 /* Walk the inferior's link map list, and build our list of
1124 `struct so_list' nodes. */
1125 lm = solib_svr4_r_map (info);
1126
1127 while (lm)
1128 {
1129 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
1130 struct so_list *new = XZALLOC (struct so_list);
1131 struct cleanup *old_chain = make_cleanup (xfree, new);
1132
1133 new->lm_info = xmalloc (sizeof (struct lm_info));
1134 make_cleanup (xfree, new->lm_info);
1135
1136 new->lm_info->l_addr = (CORE_ADDR)-1;
1137 new->lm_info->lm_addr = lm;
1138 new->lm_info->lm = xzalloc (lmo->link_map_size);
1139 make_cleanup (xfree, new->lm_info->lm);
1140
1141 read_memory (lm, new->lm_info->lm, lmo->link_map_size);
1142
1143 lm = LM_NEXT (new);
1144
1145 /* For SVR4 versions, the first entry in the link map is for the
1146 inferior executable, so we must ignore it. For some versions of
1147 SVR4, it has no name. For others (Solaris 2.3 for example), it
1148 does have a name, so we can no longer use a missing name to
1149 decide when to ignore it. */
1150 if (IGNORE_FIRST_LINK_MAP_ENTRY (new) && ldsomap == 0)
1151 {
1152 info->main_lm_addr = new->lm_info->lm_addr;
1153 free_so (new);
1154 }
1155 else
1156 {
1157 int errcode;
1158 char *buffer;
1159
1160 /* Extract this shared object's name. */
1161 target_read_string (LM_NAME (new), &buffer,
1162 SO_NAME_MAX_PATH_SIZE - 1, &errcode);
1163 if (errcode != 0)
1164 warning (_("Can't read pathname for load map: %s."),
1165 safe_strerror (errcode));
1166 else
1167 {
1168 strncpy (new->so_name, buffer, SO_NAME_MAX_PATH_SIZE - 1);
1169 new->so_name[SO_NAME_MAX_PATH_SIZE - 1] = '\0';
1170 strcpy (new->so_original_name, new->so_name);
1171 }
1172 xfree (buffer);
1173
1174 /* If this entry has no name, or its name matches the name
1175 for the main executable, don't include it in the list. */
1176 if (! new->so_name[0]
1177 || match_main (new->so_name))
1178 free_so (new);
1179 else
1180 {
1181 new->next = 0;
1182 *link_ptr = new;
1183 link_ptr = &new->next;
1184 }
1185 }
1186
1187 /* On Solaris, the dynamic linker is not in the normal list of
1188 shared objects, so make sure we pick it up too. Having
1189 symbol information for the dynamic linker is quite crucial
1190 for skipping dynamic linker resolver code. */
1191 if (lm == 0 && ldsomap == 0)
1192 lm = ldsomap = solib_svr4_r_ldsomap (info);
1193
1194 discard_cleanups (old_chain);
1195 }
1196
1197 if (head == NULL)
1198 return svr4_default_sos ();
1199
1200 return head;
1201 }
1202
1203 /* Get the address of the link_map for a given OBJFILE. */
1204
1205 CORE_ADDR
1206 svr4_fetch_objfile_link_map (struct objfile *objfile)
1207 {
1208 struct so_list *so;
1209 struct svr4_info *info = get_svr4_info ();
1210
1211 /* Cause svr4_current_sos() to be run if it hasn't been already. */
1212 if (info->main_lm_addr == 0)
1213 solib_add (NULL, 0, &current_target, auto_solib_add);
1214
1215 /* svr4_current_sos() will set main_lm_addr for the main executable. */
1216 if (objfile == symfile_objfile)
1217 return info->main_lm_addr;
1218
1219 /* The other link map addresses may be found by examining the list
1220 of shared libraries. */
1221 for (so = master_so_list (); so; so = so->next)
1222 if (so->objfile == objfile)
1223 return so->lm_info->lm_addr;
1224
1225 /* Not found! */
1226 return 0;
1227 }
1228
1229 /* On some systems, the only way to recognize the link map entry for
1230 the main executable file is by looking at its name. Return
1231 non-zero iff SONAME matches one of the known main executable names. */
1232
1233 static int
1234 match_main (char *soname)
1235 {
1236 char **mainp;
1237
1238 for (mainp = main_name_list; *mainp != NULL; mainp++)
1239 {
1240 if (strcmp (soname, *mainp) == 0)
1241 return (1);
1242 }
1243
1244 return (0);
1245 }
1246
1247 /* Return 1 if PC lies in the dynamic symbol resolution code of the
1248 SVR4 run time loader. */
1249
1250 int
1251 svr4_in_dynsym_resolve_code (CORE_ADDR pc)
1252 {
1253 struct svr4_info *info = get_svr4_info ();
1254
1255 return ((pc >= info->interp_text_sect_low
1256 && pc < info->interp_text_sect_high)
1257 || (pc >= info->interp_plt_sect_low
1258 && pc < info->interp_plt_sect_high)
1259 || in_plt_section (pc, NULL));
1260 }
1261
1262 /* Given an executable's ABFD and target, compute the entry-point
1263 address. */
1264
1265 static CORE_ADDR
1266 exec_entry_point (struct bfd *abfd, struct target_ops *targ)
1267 {
1268 /* KevinB wrote ... for most targets, the address returned by
1269 bfd_get_start_address() is the entry point for the start
1270 function. But, for some targets, bfd_get_start_address() returns
1271 the address of a function descriptor from which the entry point
1272 address may be extracted. This address is extracted by
1273 gdbarch_convert_from_func_ptr_addr(). The method
1274 gdbarch_convert_from_func_ptr_addr() is the merely the identify
1275 function for targets which don't use function descriptors. */
1276 return gdbarch_convert_from_func_ptr_addr (target_gdbarch,
1277 bfd_get_start_address (abfd),
1278 targ);
1279 }
1280
1281 /*
1282
1283 LOCAL FUNCTION
1284
1285 enable_break -- arrange for dynamic linker to hit breakpoint
1286
1287 SYNOPSIS
1288
1289 int enable_break (void)
1290
1291 DESCRIPTION
1292
1293 Both the SunOS and the SVR4 dynamic linkers have, as part of their
1294 debugger interface, support for arranging for the inferior to hit
1295 a breakpoint after mapping in the shared libraries. This function
1296 enables that breakpoint.
1297
1298 For SunOS, there is a special flag location (in_debugger) which we
1299 set to 1. When the dynamic linker sees this flag set, it will set
1300 a breakpoint at a location known only to itself, after saving the
1301 original contents of that place and the breakpoint address itself,
1302 in it's own internal structures. When we resume the inferior, it
1303 will eventually take a SIGTRAP when it runs into the breakpoint.
1304 We handle this (in a different place) by restoring the contents of
1305 the breakpointed location (which is only known after it stops),
1306 chasing around to locate the shared libraries that have been
1307 loaded, then resuming.
1308
1309 For SVR4, the debugger interface structure contains a member (r_brk)
1310 which is statically initialized at the time the shared library is
1311 built, to the offset of a function (_r_debug_state) which is guaran-
1312 teed to be called once before mapping in a library, and again when
1313 the mapping is complete. At the time we are examining this member,
1314 it contains only the unrelocated offset of the function, so we have
1315 to do our own relocation. Later, when the dynamic linker actually
1316 runs, it relocates r_brk to be the actual address of _r_debug_state().
1317
1318 The debugger interface structure also contains an enumeration which
1319 is set to either RT_ADD or RT_DELETE prior to changing the mapping,
1320 depending upon whether or not the library is being mapped or unmapped,
1321 and then set to RT_CONSISTENT after the library is mapped/unmapped.
1322 */
1323
1324 static int
1325 enable_break (struct svr4_info *info, int from_tty)
1326 {
1327 struct minimal_symbol *msymbol;
1328 char **bkpt_namep;
1329 asection *interp_sect;
1330 gdb_byte *interp_name;
1331 CORE_ADDR sym_addr;
1332
1333 info->interp_text_sect_low = info->interp_text_sect_high = 0;
1334 info->interp_plt_sect_low = info->interp_plt_sect_high = 0;
1335
1336 /* If we already have a shared library list in the target, and
1337 r_debug contains r_brk, set the breakpoint there - this should
1338 mean r_brk has already been relocated. Assume the dynamic linker
1339 is the object containing r_brk. */
1340
1341 solib_add (NULL, from_tty, &current_target, auto_solib_add);
1342 sym_addr = 0;
1343 if (info->debug_base && solib_svr4_r_map (info) != 0)
1344 sym_addr = solib_svr4_r_brk (info);
1345
1346 if (sym_addr != 0)
1347 {
1348 struct obj_section *os;
1349
1350 sym_addr = gdbarch_addr_bits_remove
1351 (target_gdbarch, gdbarch_convert_from_func_ptr_addr (target_gdbarch,
1352 sym_addr,
1353 &current_target));
1354
1355 /* On at least some versions of Solaris there's a dynamic relocation
1356 on _r_debug.r_brk and SYM_ADDR may not be relocated yet, e.g., if
1357 we get control before the dynamic linker has self-relocated.
1358 Check if SYM_ADDR is in a known section, if it is assume we can
1359 trust its value. This is just a heuristic though, it could go away
1360 or be replaced if it's getting in the way.
1361
1362 On ARM we need to know whether the ISA of rtld_db_dlactivity (or
1363 however it's spelled in your particular system) is ARM or Thumb.
1364 That knowledge is encoded in the address, if it's Thumb the low bit
1365 is 1. However, we've stripped that info above and it's not clear
1366 what all the consequences are of passing a non-addr_bits_remove'd
1367 address to create_solib_event_breakpoint. The call to
1368 find_pc_section verifies we know about the address and have some
1369 hope of computing the right kind of breakpoint to use (via
1370 symbol info). It does mean that GDB needs to be pointed at a
1371 non-stripped version of the dynamic linker in order to obtain
1372 information it already knows about. Sigh. */
1373
1374 os = find_pc_section (sym_addr);
1375 if (os != NULL)
1376 {
1377 /* Record the relocated start and end address of the dynamic linker
1378 text and plt section for svr4_in_dynsym_resolve_code. */
1379 bfd *tmp_bfd;
1380 CORE_ADDR load_addr;
1381
1382 tmp_bfd = os->objfile->obfd;
1383 load_addr = ANOFFSET (os->objfile->section_offsets,
1384 os->objfile->sect_index_text);
1385
1386 interp_sect = bfd_get_section_by_name (tmp_bfd, ".text");
1387 if (interp_sect)
1388 {
1389 info->interp_text_sect_low =
1390 bfd_section_vma (tmp_bfd, interp_sect) + load_addr;
1391 info->interp_text_sect_high =
1392 info->interp_text_sect_low
1393 + bfd_section_size (tmp_bfd, interp_sect);
1394 }
1395 interp_sect = bfd_get_section_by_name (tmp_bfd, ".plt");
1396 if (interp_sect)
1397 {
1398 info->interp_plt_sect_low =
1399 bfd_section_vma (tmp_bfd, interp_sect) + load_addr;
1400 info->interp_plt_sect_high =
1401 info->interp_plt_sect_low
1402 + bfd_section_size (tmp_bfd, interp_sect);
1403 }
1404
1405 create_solib_event_breakpoint (target_gdbarch, sym_addr);
1406 return 1;
1407 }
1408 }
1409
1410 /* Find the program interpreter; if not found, warn the user and drop
1411 into the old breakpoint at symbol code. */
1412 interp_name = find_program_interpreter ();
1413 if (interp_name)
1414 {
1415 CORE_ADDR load_addr = 0;
1416 int load_addr_found = 0;
1417 int loader_found_in_list = 0;
1418 struct so_list *so;
1419 bfd *tmp_bfd = NULL;
1420 struct target_ops *tmp_bfd_target;
1421 volatile struct gdb_exception ex;
1422
1423 sym_addr = 0;
1424
1425 /* Now we need to figure out where the dynamic linker was
1426 loaded so that we can load its symbols and place a breakpoint
1427 in the dynamic linker itself.
1428
1429 This address is stored on the stack. However, I've been unable
1430 to find any magic formula to find it for Solaris (appears to
1431 be trivial on GNU/Linux). Therefore, we have to try an alternate
1432 mechanism to find the dynamic linker's base address. */
1433
1434 TRY_CATCH (ex, RETURN_MASK_ALL)
1435 {
1436 tmp_bfd = solib_bfd_open (interp_name);
1437 }
1438 if (tmp_bfd == NULL)
1439 goto bkpt_at_symbol;
1440
1441 /* Now convert the TMP_BFD into a target. That way target, as
1442 well as BFD operations can be used. Note that closing the
1443 target will also close the underlying bfd. */
1444 tmp_bfd_target = target_bfd_reopen (tmp_bfd);
1445
1446 /* On a running target, we can get the dynamic linker's base
1447 address from the shared library table. */
1448 so = master_so_list ();
1449 while (so)
1450 {
1451 if (svr4_same_1 (interp_name, so->so_original_name))
1452 {
1453 load_addr_found = 1;
1454 loader_found_in_list = 1;
1455 load_addr = LM_ADDR_CHECK (so, tmp_bfd);
1456 break;
1457 }
1458 so = so->next;
1459 }
1460
1461 /* If we were not able to find the base address of the loader
1462 from our so_list, then try using the AT_BASE auxilliary entry. */
1463 if (!load_addr_found)
1464 if (target_auxv_search (&current_target, AT_BASE, &load_addr) > 0)
1465 {
1466 int addr_bit = gdbarch_addr_bit (target_gdbarch);
1467
1468 /* Ensure LOAD_ADDR has proper sign in its possible upper bits so
1469 that `+ load_addr' will overflow CORE_ADDR width not creating
1470 invalid addresses like 0x101234567 for 32bit inferiors on 64bit
1471 GDB. */
1472
1473 if (addr_bit < (sizeof (CORE_ADDR) * HOST_CHAR_BIT))
1474 {
1475 CORE_ADDR space_size = (CORE_ADDR) 1 << addr_bit;
1476 CORE_ADDR tmp_entry_point = exec_entry_point (tmp_bfd,
1477 tmp_bfd_target);
1478
1479 gdb_assert (load_addr < space_size);
1480
1481 /* TMP_ENTRY_POINT exceeding SPACE_SIZE would be for prelinked
1482 64bit ld.so with 32bit executable, it should not happen. */
1483
1484 if (tmp_entry_point < space_size
1485 && tmp_entry_point + load_addr >= space_size)
1486 load_addr -= space_size;
1487 }
1488
1489 load_addr_found = 1;
1490 }
1491
1492 /* Otherwise we find the dynamic linker's base address by examining
1493 the current pc (which should point at the entry point for the
1494 dynamic linker) and subtracting the offset of the entry point.
1495
1496 This is more fragile than the previous approaches, but is a good
1497 fallback method because it has actually been working well in
1498 most cases. */
1499 if (!load_addr_found)
1500 {
1501 struct regcache *regcache
1502 = get_thread_arch_regcache (inferior_ptid, target_gdbarch);
1503 load_addr = (regcache_read_pc (regcache)
1504 - exec_entry_point (tmp_bfd, tmp_bfd_target));
1505 }
1506
1507 if (!loader_found_in_list)
1508 {
1509 info->debug_loader_name = xstrdup (interp_name);
1510 info->debug_loader_offset_p = 1;
1511 info->debug_loader_offset = load_addr;
1512 solib_add (NULL, from_tty, &current_target, auto_solib_add);
1513 }
1514
1515 /* Record the relocated start and end address of the dynamic linker
1516 text and plt section for svr4_in_dynsym_resolve_code. */
1517 interp_sect = bfd_get_section_by_name (tmp_bfd, ".text");
1518 if (interp_sect)
1519 {
1520 info->interp_text_sect_low =
1521 bfd_section_vma (tmp_bfd, interp_sect) + load_addr;
1522 info->interp_text_sect_high =
1523 info->interp_text_sect_low
1524 + bfd_section_size (tmp_bfd, interp_sect);
1525 }
1526 interp_sect = bfd_get_section_by_name (tmp_bfd, ".plt");
1527 if (interp_sect)
1528 {
1529 info->interp_plt_sect_low =
1530 bfd_section_vma (tmp_bfd, interp_sect) + load_addr;
1531 info->interp_plt_sect_high =
1532 info->interp_plt_sect_low
1533 + bfd_section_size (tmp_bfd, interp_sect);
1534 }
1535
1536 /* Now try to set a breakpoint in the dynamic linker. */
1537 for (bkpt_namep = solib_break_names; *bkpt_namep != NULL; bkpt_namep++)
1538 {
1539 sym_addr = bfd_lookup_symbol (tmp_bfd, *bkpt_namep);
1540 if (sym_addr != 0)
1541 break;
1542 }
1543
1544 if (sym_addr != 0)
1545 /* Convert 'sym_addr' from a function pointer to an address.
1546 Because we pass tmp_bfd_target instead of the current
1547 target, this will always produce an unrelocated value. */
1548 sym_addr = gdbarch_convert_from_func_ptr_addr (target_gdbarch,
1549 sym_addr,
1550 tmp_bfd_target);
1551
1552 /* We're done with both the temporary bfd and target. Remember,
1553 closing the target closes the underlying bfd. */
1554 target_close (tmp_bfd_target, 0);
1555
1556 if (sym_addr != 0)
1557 {
1558 create_solib_event_breakpoint (target_gdbarch, load_addr + sym_addr);
1559 xfree (interp_name);
1560 return 1;
1561 }
1562
1563 /* For whatever reason we couldn't set a breakpoint in the dynamic
1564 linker. Warn and drop into the old code. */
1565 bkpt_at_symbol:
1566 xfree (interp_name);
1567 warning (_("Unable to find dynamic linker breakpoint function.\n"
1568 "GDB will be unable to debug shared library initializers\n"
1569 "and track explicitly loaded dynamic code."));
1570 }
1571
1572 /* Scan through the lists of symbols, trying to look up the symbol and
1573 set a breakpoint there. Terminate loop when we/if we succeed. */
1574
1575 for (bkpt_namep = solib_break_names; *bkpt_namep != NULL; bkpt_namep++)
1576 {
1577 msymbol = lookup_minimal_symbol (*bkpt_namep, NULL, symfile_objfile);
1578 if ((msymbol != NULL) && (SYMBOL_VALUE_ADDRESS (msymbol) != 0))
1579 {
1580 sym_addr = SYMBOL_VALUE_ADDRESS (msymbol);
1581 sym_addr = gdbarch_convert_from_func_ptr_addr (target_gdbarch,
1582 sym_addr,
1583 &current_target);
1584 create_solib_event_breakpoint (target_gdbarch, sym_addr);
1585 return 1;
1586 }
1587 }
1588
1589 for (bkpt_namep = bkpt_names; *bkpt_namep != NULL; bkpt_namep++)
1590 {
1591 msymbol = lookup_minimal_symbol (*bkpt_namep, NULL, symfile_objfile);
1592 if ((msymbol != NULL) && (SYMBOL_VALUE_ADDRESS (msymbol) != 0))
1593 {
1594 sym_addr = SYMBOL_VALUE_ADDRESS (msymbol);
1595 sym_addr = gdbarch_convert_from_func_ptr_addr (target_gdbarch,
1596 sym_addr,
1597 &current_target);
1598 create_solib_event_breakpoint (target_gdbarch, sym_addr);
1599 return 1;
1600 }
1601 }
1602 return 0;
1603 }
1604
1605 /*
1606
1607 LOCAL FUNCTION
1608
1609 special_symbol_handling -- additional shared library symbol handling
1610
1611 SYNOPSIS
1612
1613 void special_symbol_handling ()
1614
1615 DESCRIPTION
1616
1617 Once the symbols from a shared object have been loaded in the usual
1618 way, we are called to do any system specific symbol handling that
1619 is needed.
1620
1621 For SunOS4, this consisted of grunging around in the dynamic
1622 linkers structures to find symbol definitions for "common" symbols
1623 and adding them to the minimal symbol table for the runtime common
1624 objfile.
1625
1626 However, for SVR4, there's nothing to do.
1627
1628 */
1629
1630 static void
1631 svr4_special_symbol_handling (void)
1632 {
1633 svr4_relocate_main_executable ();
1634 }
1635
1636 /* Read the ELF program headers from ABFD. Return the contents and
1637 set *PHDRS_SIZE to the size of the program headers. */
1638
1639 static gdb_byte *
1640 read_program_headers_from_bfd (bfd *abfd, int *phdrs_size)
1641 {
1642 Elf_Internal_Ehdr *ehdr;
1643 gdb_byte *buf;
1644
1645 ehdr = elf_elfheader (abfd);
1646
1647 *phdrs_size = ehdr->e_phnum * ehdr->e_phentsize;
1648 if (*phdrs_size == 0)
1649 return NULL;
1650
1651 buf = xmalloc (*phdrs_size);
1652 if (bfd_seek (abfd, ehdr->e_phoff, SEEK_SET) != 0
1653 || bfd_bread (buf, *phdrs_size, abfd) != *phdrs_size)
1654 {
1655 xfree (buf);
1656 return NULL;
1657 }
1658
1659 return buf;
1660 }
1661
1662 /* We relocate all of the sections by the same amount. This
1663 behavior is mandated by recent editions of the System V ABI.
1664 According to the System V Application Binary Interface,
1665 Edition 4.1, page 5-5:
1666
1667 ... Though the system chooses virtual addresses for
1668 individual processes, it maintains the segments' relative
1669 positions. Because position-independent code uses relative
1670 addressesing between segments, the difference between
1671 virtual addresses in memory must match the difference
1672 between virtual addresses in the file. The difference
1673 between the virtual address of any segment in memory and
1674 the corresponding virtual address in the file is thus a
1675 single constant value for any one executable or shared
1676 object in a given process. This difference is the base
1677 address. One use of the base address is to relocate the
1678 memory image of the program during dynamic linking.
1679
1680 The same language also appears in Edition 4.0 of the System V
1681 ABI and is left unspecified in some of the earlier editions.
1682
1683 Decide if the objfile needs to be relocated. As indicated above, we will
1684 only be here when execution is stopped. But during attachment PC can be at
1685 arbitrary address therefore regcache_read_pc can be misleading (contrary to
1686 the auxv AT_ENTRY value). Moreover for executable with interpreter section
1687 regcache_read_pc would point to the interpreter and not the main executable.
1688
1689 So, to summarize, relocations are necessary when the start address obtained
1690 from the executable is different from the address in auxv AT_ENTRY entry.
1691
1692 [ The astute reader will note that we also test to make sure that
1693 the executable in question has the DYNAMIC flag set. It is my
1694 opinion that this test is unnecessary (undesirable even). It
1695 was added to avoid inadvertent relocation of an executable
1696 whose e_type member in the ELF header is not ET_DYN. There may
1697 be a time in the future when it is desirable to do relocations
1698 on other types of files as well in which case this condition
1699 should either be removed or modified to accomodate the new file
1700 type. - Kevin, Nov 2000. ] */
1701
1702 static CORE_ADDR
1703 svr4_exec_displacement (void)
1704 {
1705 /* ENTRY_POINT is a possible function descriptor - before
1706 a call to gdbarch_convert_from_func_ptr_addr. */
1707 CORE_ADDR entry_point, displacement;
1708
1709 if (exec_bfd == NULL)
1710 return 0;
1711
1712 /* Therefore for ELF it is ET_EXEC and not ET_DYN. Both shared libraries
1713 being executed themselves and PIE (Position Independent Executable)
1714 executables are ET_DYN. */
1715
1716 if ((bfd_get_file_flags (exec_bfd) & DYNAMIC) == 0)
1717 return 0;
1718
1719 if (target_auxv_search (&current_target, AT_ENTRY, &entry_point) <= 0)
1720 return 0;
1721
1722 displacement = entry_point - bfd_get_start_address (exec_bfd);
1723
1724 /* Verify the DISPLACEMENT candidate complies with the required page
1725 alignment. It is cheaper than the program headers comparison below. */
1726
1727 if (bfd_get_flavour (exec_bfd) == bfd_target_elf_flavour)
1728 {
1729 const struct elf_backend_data *elf = get_elf_backend_data (exec_bfd);
1730
1731 /* p_align of PT_LOAD segments does not specify any alignment but
1732 only congruency of addresses:
1733 p_offset % p_align == p_vaddr % p_align
1734 Kernel is free to load the executable with lower alignment. */
1735
1736 if ((displacement & (elf->minpagesize - 1)) != 0)
1737 return 0;
1738 }
1739
1740 /* Verify that the auxilliary vector describes the same file as exec_bfd, by
1741 comparing their program headers. If the program headers in the auxilliary
1742 vector do not match the program headers in the executable, then we are
1743 looking at a different file than the one used by the kernel - for
1744 instance, "gdb program" connected to "gdbserver :PORT ld.so program". */
1745
1746 if (bfd_get_flavour (exec_bfd) == bfd_target_elf_flavour)
1747 {
1748 /* Be optimistic and clear OK only if GDB was able to verify the headers
1749 really do not match. */
1750 int phdrs_size, phdrs2_size, ok = 1;
1751 gdb_byte *buf, *buf2;
1752
1753 buf = read_program_header (-1, &phdrs_size, NULL);
1754 buf2 = read_program_headers_from_bfd (exec_bfd, &phdrs2_size);
1755 if (buf != NULL && buf2 != NULL
1756 && (phdrs_size != phdrs2_size
1757 || memcmp (buf, buf2, phdrs_size) != 0))
1758 ok = 0;
1759
1760 xfree (buf);
1761 xfree (buf2);
1762
1763 if (!ok)
1764 return 0;
1765 }
1766
1767 return displacement;
1768 }
1769
1770 /* Relocate the main executable. This function should be called upon
1771 stopping the inferior process at the entry point to the program.
1772 The entry point from BFD is compared to the AT_ENTRY of AUXV and if they are
1773 different, the main executable is relocated by the proper amount. */
1774
1775 static void
1776 svr4_relocate_main_executable (void)
1777 {
1778 CORE_ADDR displacement = svr4_exec_displacement ();
1779
1780 /* Even if DISPLACEMENT is 0 still try to relocate it as this is a new
1781 difference of in-memory vs. in-file addresses and we could already
1782 relocate the executable at this function to improper address before. */
1783
1784 if (symfile_objfile)
1785 {
1786 struct section_offsets *new_offsets;
1787 int i;
1788
1789 new_offsets = alloca (symfile_objfile->num_sections
1790 * sizeof (*new_offsets));
1791
1792 for (i = 0; i < symfile_objfile->num_sections; i++)
1793 new_offsets->offsets[i] = displacement;
1794
1795 objfile_relocate (symfile_objfile, new_offsets);
1796 }
1797 else if (exec_bfd)
1798 {
1799 asection *asect;
1800
1801 for (asect = exec_bfd->sections; asect != NULL; asect = asect->next)
1802 exec_set_section_address (bfd_get_filename (exec_bfd), asect->index,
1803 (bfd_section_vma (exec_bfd, asect)
1804 + displacement));
1805 }
1806 }
1807
1808 /*
1809
1810 GLOBAL FUNCTION
1811
1812 svr4_solib_create_inferior_hook -- shared library startup support
1813
1814 SYNOPSIS
1815
1816 void svr4_solib_create_inferior_hook (int from_tty)
1817
1818 DESCRIPTION
1819
1820 When gdb starts up the inferior, it nurses it along (through the
1821 shell) until it is ready to execute it's first instruction. At this
1822 point, this function gets called via expansion of the macro
1823 SOLIB_CREATE_INFERIOR_HOOK.
1824
1825 For SunOS executables, this first instruction is typically the
1826 one at "_start", or a similar text label, regardless of whether
1827 the executable is statically or dynamically linked. The runtime
1828 startup code takes care of dynamically linking in any shared
1829 libraries, once gdb allows the inferior to continue.
1830
1831 For SVR4 executables, this first instruction is either the first
1832 instruction in the dynamic linker (for dynamically linked
1833 executables) or the instruction at "start" for statically linked
1834 executables. For dynamically linked executables, the system
1835 first exec's /lib/libc.so.N, which contains the dynamic linker,
1836 and starts it running. The dynamic linker maps in any needed
1837 shared libraries, maps in the actual user executable, and then
1838 jumps to "start" in the user executable.
1839
1840 For both SunOS shared libraries, and SVR4 shared libraries, we
1841 can arrange to cooperate with the dynamic linker to discover the
1842 names of shared libraries that are dynamically linked, and the
1843 base addresses to which they are linked.
1844
1845 This function is responsible for discovering those names and
1846 addresses, and saving sufficient information about them to allow
1847 their symbols to be read at a later time.
1848
1849 FIXME
1850
1851 Between enable_break() and disable_break(), this code does not
1852 properly handle hitting breakpoints which the user might have
1853 set in the startup code or in the dynamic linker itself. Proper
1854 handling will probably have to wait until the implementation is
1855 changed to use the "breakpoint handler function" method.
1856
1857 Also, what if child has exit()ed? Must exit loop somehow.
1858 */
1859
1860 static void
1861 svr4_solib_create_inferior_hook (int from_tty)
1862 {
1863 struct inferior *inf;
1864 struct thread_info *tp;
1865 struct svr4_info *info;
1866
1867 info = get_svr4_info ();
1868
1869 /* Relocate the main executable if necessary. */
1870 if (current_inferior ()->attach_flag == 0)
1871 svr4_relocate_main_executable ();
1872
1873 if (!svr4_have_link_map_offsets ())
1874 return;
1875
1876 if (!enable_break (info, from_tty))
1877 return;
1878
1879 #if defined(_SCO_DS)
1880 /* SCO needs the loop below, other systems should be using the
1881 special shared library breakpoints and the shared library breakpoint
1882 service routine.
1883
1884 Now run the target. It will eventually hit the breakpoint, at
1885 which point all of the libraries will have been mapped in and we
1886 can go groveling around in the dynamic linker structures to find
1887 out what we need to know about them. */
1888
1889 inf = current_inferior ();
1890 tp = inferior_thread ();
1891
1892 clear_proceed_status ();
1893 inf->stop_soon = STOP_QUIETLY;
1894 tp->stop_signal = TARGET_SIGNAL_0;
1895 do
1896 {
1897 target_resume (pid_to_ptid (-1), 0, tp->stop_signal);
1898 wait_for_inferior (0);
1899 }
1900 while (tp->stop_signal != TARGET_SIGNAL_TRAP);
1901 inf->stop_soon = NO_STOP_QUIETLY;
1902 #endif /* defined(_SCO_DS) */
1903 }
1904
1905 static void
1906 svr4_clear_solib (void)
1907 {
1908 struct svr4_info *info;
1909
1910 info = get_svr4_info ();
1911 info->debug_base = 0;
1912 info->debug_loader_offset_p = 0;
1913 info->debug_loader_offset = 0;
1914 xfree (info->debug_loader_name);
1915 info->debug_loader_name = NULL;
1916 }
1917
1918 static void
1919 svr4_free_so (struct so_list *so)
1920 {
1921 xfree (so->lm_info->lm);
1922 xfree (so->lm_info);
1923 }
1924
1925
1926 /* Clear any bits of ADDR that wouldn't fit in a target-format
1927 data pointer. "Data pointer" here refers to whatever sort of
1928 address the dynamic linker uses to manage its sections. At the
1929 moment, we don't support shared libraries on any processors where
1930 code and data pointers are different sizes.
1931
1932 This isn't really the right solution. What we really need here is
1933 a way to do arithmetic on CORE_ADDR values that respects the
1934 natural pointer/address correspondence. (For example, on the MIPS,
1935 converting a 32-bit pointer to a 64-bit CORE_ADDR requires you to
1936 sign-extend the value. There, simply truncating the bits above
1937 gdbarch_ptr_bit, as we do below, is no good.) This should probably
1938 be a new gdbarch method or something. */
1939 static CORE_ADDR
1940 svr4_truncate_ptr (CORE_ADDR addr)
1941 {
1942 if (gdbarch_ptr_bit (target_gdbarch) == sizeof (CORE_ADDR) * 8)
1943 /* We don't need to truncate anything, and the bit twiddling below
1944 will fail due to overflow problems. */
1945 return addr;
1946 else
1947 return addr & (((CORE_ADDR) 1 << gdbarch_ptr_bit (target_gdbarch)) - 1);
1948 }
1949
1950
1951 static void
1952 svr4_relocate_section_addresses (struct so_list *so,
1953 struct target_section *sec)
1954 {
1955 sec->addr = svr4_truncate_ptr (sec->addr + LM_ADDR_CHECK (so,
1956 sec->bfd));
1957 sec->endaddr = svr4_truncate_ptr (sec->endaddr + LM_ADDR_CHECK (so,
1958 sec->bfd));
1959 }
1960 \f
1961
1962 /* Architecture-specific operations. */
1963
1964 /* Per-architecture data key. */
1965 static struct gdbarch_data *solib_svr4_data;
1966
1967 struct solib_svr4_ops
1968 {
1969 /* Return a description of the layout of `struct link_map'. */
1970 struct link_map_offsets *(*fetch_link_map_offsets)(void);
1971 };
1972
1973 /* Return a default for the architecture-specific operations. */
1974
1975 static void *
1976 solib_svr4_init (struct obstack *obstack)
1977 {
1978 struct solib_svr4_ops *ops;
1979
1980 ops = OBSTACK_ZALLOC (obstack, struct solib_svr4_ops);
1981 ops->fetch_link_map_offsets = NULL;
1982 return ops;
1983 }
1984
1985 /* Set the architecture-specific `struct link_map_offsets' fetcher for
1986 GDBARCH to FLMO. Also, install SVR4 solib_ops into GDBARCH. */
1987
1988 void
1989 set_solib_svr4_fetch_link_map_offsets (struct gdbarch *gdbarch,
1990 struct link_map_offsets *(*flmo) (void))
1991 {
1992 struct solib_svr4_ops *ops = gdbarch_data (gdbarch, solib_svr4_data);
1993
1994 ops->fetch_link_map_offsets = flmo;
1995
1996 set_solib_ops (gdbarch, &svr4_so_ops);
1997 }
1998
1999 /* Fetch a link_map_offsets structure using the architecture-specific
2000 `struct link_map_offsets' fetcher. */
2001
2002 static struct link_map_offsets *
2003 svr4_fetch_link_map_offsets (void)
2004 {
2005 struct solib_svr4_ops *ops = gdbarch_data (target_gdbarch, solib_svr4_data);
2006
2007 gdb_assert (ops->fetch_link_map_offsets);
2008 return ops->fetch_link_map_offsets ();
2009 }
2010
2011 /* Return 1 if a link map offset fetcher has been defined, 0 otherwise. */
2012
2013 static int
2014 svr4_have_link_map_offsets (void)
2015 {
2016 struct solib_svr4_ops *ops = gdbarch_data (target_gdbarch, solib_svr4_data);
2017 return (ops->fetch_link_map_offsets != NULL);
2018 }
2019 \f
2020
2021 /* Most OS'es that have SVR4-style ELF dynamic libraries define a
2022 `struct r_debug' and a `struct link_map' that are binary compatible
2023 with the origional SVR4 implementation. */
2024
2025 /* Fetch (and possibly build) an appropriate `struct link_map_offsets'
2026 for an ILP32 SVR4 system. */
2027
2028 struct link_map_offsets *
2029 svr4_ilp32_fetch_link_map_offsets (void)
2030 {
2031 static struct link_map_offsets lmo;
2032 static struct link_map_offsets *lmp = NULL;
2033
2034 if (lmp == NULL)
2035 {
2036 lmp = &lmo;
2037
2038 lmo.r_version_offset = 0;
2039 lmo.r_version_size = 4;
2040 lmo.r_map_offset = 4;
2041 lmo.r_brk_offset = 8;
2042 lmo.r_ldsomap_offset = 20;
2043
2044 /* Everything we need is in the first 20 bytes. */
2045 lmo.link_map_size = 20;
2046 lmo.l_addr_offset = 0;
2047 lmo.l_name_offset = 4;
2048 lmo.l_ld_offset = 8;
2049 lmo.l_next_offset = 12;
2050 lmo.l_prev_offset = 16;
2051 }
2052
2053 return lmp;
2054 }
2055
2056 /* Fetch (and possibly build) an appropriate `struct link_map_offsets'
2057 for an LP64 SVR4 system. */
2058
2059 struct link_map_offsets *
2060 svr4_lp64_fetch_link_map_offsets (void)
2061 {
2062 static struct link_map_offsets lmo;
2063 static struct link_map_offsets *lmp = NULL;
2064
2065 if (lmp == NULL)
2066 {
2067 lmp = &lmo;
2068
2069 lmo.r_version_offset = 0;
2070 lmo.r_version_size = 4;
2071 lmo.r_map_offset = 8;
2072 lmo.r_brk_offset = 16;
2073 lmo.r_ldsomap_offset = 40;
2074
2075 /* Everything we need is in the first 40 bytes. */
2076 lmo.link_map_size = 40;
2077 lmo.l_addr_offset = 0;
2078 lmo.l_name_offset = 8;
2079 lmo.l_ld_offset = 16;
2080 lmo.l_next_offset = 24;
2081 lmo.l_prev_offset = 32;
2082 }
2083
2084 return lmp;
2085 }
2086 \f
2087
2088 struct target_so_ops svr4_so_ops;
2089
2090 /* Lookup global symbol for ELF DSOs linked with -Bsymbolic. Those DSOs have a
2091 different rule for symbol lookup. The lookup begins here in the DSO, not in
2092 the main executable. */
2093
2094 static struct symbol *
2095 elf_lookup_lib_symbol (const struct objfile *objfile,
2096 const char *name,
2097 const domain_enum domain)
2098 {
2099 bfd *abfd;
2100
2101 if (objfile == symfile_objfile)
2102 abfd = exec_bfd;
2103 else
2104 {
2105 /* OBJFILE should have been passed as the non-debug one. */
2106 gdb_assert (objfile->separate_debug_objfile_backlink == NULL);
2107
2108 abfd = objfile->obfd;
2109 }
2110
2111 if (abfd == NULL || scan_dyntag (DT_SYMBOLIC, abfd, NULL) != 1)
2112 return NULL;
2113
2114 return lookup_global_symbol_from_objfile (objfile, name, domain);
2115 }
2116
2117 extern initialize_file_ftype _initialize_svr4_solib; /* -Wmissing-prototypes */
2118
2119 void
2120 _initialize_svr4_solib (void)
2121 {
2122 solib_svr4_data = gdbarch_data_register_pre_init (solib_svr4_init);
2123 solib_svr4_pspace_data
2124 = register_program_space_data_with_cleanup (svr4_pspace_data_cleanup);
2125
2126 svr4_so_ops.relocate_section_addresses = svr4_relocate_section_addresses;
2127 svr4_so_ops.free_so = svr4_free_so;
2128 svr4_so_ops.clear_solib = svr4_clear_solib;
2129 svr4_so_ops.solib_create_inferior_hook = svr4_solib_create_inferior_hook;
2130 svr4_so_ops.special_symbol_handling = svr4_special_symbol_handling;
2131 svr4_so_ops.current_sos = svr4_current_sos;
2132 svr4_so_ops.open_symbol_file_object = open_symbol_file_object;
2133 svr4_so_ops.in_dynsym_resolve_code = svr4_in_dynsym_resolve_code;
2134 svr4_so_ops.bfd_open = solib_bfd_open;
2135 svr4_so_ops.lookup_lib_global_symbol = elf_lookup_lib_symbol;
2136 svr4_so_ops.same = svr4_same;
2137 svr4_so_ops.keep_data_in_core = svr4_keep_data_in_core;
2138 }