X86-64: Allow copy relocs for building PIE
[binutils-gdb.git] / bfd / elf64-x86-64.c
1 /* X86-64 specific support for ELF
2 Copyright (C) 2000-2014 Free Software Foundation, Inc.
3 Contributed by Jan Hubicka <jh@suse.cz>.
4
5 This file is part of BFD, the Binary File Descriptor library.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
20 MA 02110-1301, USA. */
21
22 #include "sysdep.h"
23 #include "bfd.h"
24 #include "bfdlink.h"
25 #include "libbfd.h"
26 #include "elf-bfd.h"
27 #include "elf-nacl.h"
28 #include "bfd_stdint.h"
29 #include "objalloc.h"
30 #include "hashtab.h"
31 #include "dwarf2.h"
32 #include "libiberty.h"
33
34 #include "elf/x86-64.h"
35
36 #ifdef CORE_HEADER
37 #include <stdarg.h>
38 #include CORE_HEADER
39 #endif
40
41 /* In case we're on a 32-bit machine, construct a 64-bit "-1" value. */
42 #define MINUS_ONE (~ (bfd_vma) 0)
43
44 /* Since both 32-bit and 64-bit x86-64 encode relocation type in the
45 identical manner, we use ELF32_R_TYPE instead of ELF64_R_TYPE to get
46 relocation type. We also use ELF_ST_TYPE instead of ELF64_ST_TYPE
47 since they are the same. */
48
49 #define ABI_64_P(abfd) \
50 (get_elf_backend_data (abfd)->s->elfclass == ELFCLASS64)
51
52 /* The relocation "howto" table. Order of fields:
53 type, rightshift, size, bitsize, pc_relative, bitpos, complain_on_overflow,
54 special_function, name, partial_inplace, src_mask, dst_mask, pcrel_offset. */
55 static reloc_howto_type x86_64_elf_howto_table[] =
56 {
57 HOWTO(R_X86_64_NONE, 0, 0, 0, FALSE, 0, complain_overflow_dont,
58 bfd_elf_generic_reloc, "R_X86_64_NONE", FALSE, 0x00000000, 0x00000000,
59 FALSE),
60 HOWTO(R_X86_64_64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
61 bfd_elf_generic_reloc, "R_X86_64_64", FALSE, MINUS_ONE, MINUS_ONE,
62 FALSE),
63 HOWTO(R_X86_64_PC32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
64 bfd_elf_generic_reloc, "R_X86_64_PC32", FALSE, 0xffffffff, 0xffffffff,
65 TRUE),
66 HOWTO(R_X86_64_GOT32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
67 bfd_elf_generic_reloc, "R_X86_64_GOT32", FALSE, 0xffffffff, 0xffffffff,
68 FALSE),
69 HOWTO(R_X86_64_PLT32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
70 bfd_elf_generic_reloc, "R_X86_64_PLT32", FALSE, 0xffffffff, 0xffffffff,
71 TRUE),
72 HOWTO(R_X86_64_COPY, 0, 2, 32, FALSE, 0, complain_overflow_bitfield,
73 bfd_elf_generic_reloc, "R_X86_64_COPY", FALSE, 0xffffffff, 0xffffffff,
74 FALSE),
75 HOWTO(R_X86_64_GLOB_DAT, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
76 bfd_elf_generic_reloc, "R_X86_64_GLOB_DAT", FALSE, MINUS_ONE,
77 MINUS_ONE, FALSE),
78 HOWTO(R_X86_64_JUMP_SLOT, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
79 bfd_elf_generic_reloc, "R_X86_64_JUMP_SLOT", FALSE, MINUS_ONE,
80 MINUS_ONE, FALSE),
81 HOWTO(R_X86_64_RELATIVE, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
82 bfd_elf_generic_reloc, "R_X86_64_RELATIVE", FALSE, MINUS_ONE,
83 MINUS_ONE, FALSE),
84 HOWTO(R_X86_64_GOTPCREL, 0, 2, 32, TRUE, 0, complain_overflow_signed,
85 bfd_elf_generic_reloc, "R_X86_64_GOTPCREL", FALSE, 0xffffffff,
86 0xffffffff, TRUE),
87 HOWTO(R_X86_64_32, 0, 2, 32, FALSE, 0, complain_overflow_unsigned,
88 bfd_elf_generic_reloc, "R_X86_64_32", FALSE, 0xffffffff, 0xffffffff,
89 FALSE),
90 HOWTO(R_X86_64_32S, 0, 2, 32, FALSE, 0, complain_overflow_signed,
91 bfd_elf_generic_reloc, "R_X86_64_32S", FALSE, 0xffffffff, 0xffffffff,
92 FALSE),
93 HOWTO(R_X86_64_16, 0, 1, 16, FALSE, 0, complain_overflow_bitfield,
94 bfd_elf_generic_reloc, "R_X86_64_16", FALSE, 0xffff, 0xffff, FALSE),
95 HOWTO(R_X86_64_PC16,0, 1, 16, TRUE, 0, complain_overflow_bitfield,
96 bfd_elf_generic_reloc, "R_X86_64_PC16", FALSE, 0xffff, 0xffff, TRUE),
97 HOWTO(R_X86_64_8, 0, 0, 8, FALSE, 0, complain_overflow_bitfield,
98 bfd_elf_generic_reloc, "R_X86_64_8", FALSE, 0xff, 0xff, FALSE),
99 HOWTO(R_X86_64_PC8, 0, 0, 8, TRUE, 0, complain_overflow_signed,
100 bfd_elf_generic_reloc, "R_X86_64_PC8", FALSE, 0xff, 0xff, TRUE),
101 HOWTO(R_X86_64_DTPMOD64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
102 bfd_elf_generic_reloc, "R_X86_64_DTPMOD64", FALSE, MINUS_ONE,
103 MINUS_ONE, FALSE),
104 HOWTO(R_X86_64_DTPOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
105 bfd_elf_generic_reloc, "R_X86_64_DTPOFF64", FALSE, MINUS_ONE,
106 MINUS_ONE, FALSE),
107 HOWTO(R_X86_64_TPOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
108 bfd_elf_generic_reloc, "R_X86_64_TPOFF64", FALSE, MINUS_ONE,
109 MINUS_ONE, FALSE),
110 HOWTO(R_X86_64_TLSGD, 0, 2, 32, TRUE, 0, complain_overflow_signed,
111 bfd_elf_generic_reloc, "R_X86_64_TLSGD", FALSE, 0xffffffff,
112 0xffffffff, TRUE),
113 HOWTO(R_X86_64_TLSLD, 0, 2, 32, TRUE, 0, complain_overflow_signed,
114 bfd_elf_generic_reloc, "R_X86_64_TLSLD", FALSE, 0xffffffff,
115 0xffffffff, TRUE),
116 HOWTO(R_X86_64_DTPOFF32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
117 bfd_elf_generic_reloc, "R_X86_64_DTPOFF32", FALSE, 0xffffffff,
118 0xffffffff, FALSE),
119 HOWTO(R_X86_64_GOTTPOFF, 0, 2, 32, TRUE, 0, complain_overflow_signed,
120 bfd_elf_generic_reloc, "R_X86_64_GOTTPOFF", FALSE, 0xffffffff,
121 0xffffffff, TRUE),
122 HOWTO(R_X86_64_TPOFF32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
123 bfd_elf_generic_reloc, "R_X86_64_TPOFF32", FALSE, 0xffffffff,
124 0xffffffff, FALSE),
125 HOWTO(R_X86_64_PC64, 0, 4, 64, TRUE, 0, complain_overflow_bitfield,
126 bfd_elf_generic_reloc, "R_X86_64_PC64", FALSE, MINUS_ONE, MINUS_ONE,
127 TRUE),
128 HOWTO(R_X86_64_GOTOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
129 bfd_elf_generic_reloc, "R_X86_64_GOTOFF64",
130 FALSE, MINUS_ONE, MINUS_ONE, FALSE),
131 HOWTO(R_X86_64_GOTPC32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
132 bfd_elf_generic_reloc, "R_X86_64_GOTPC32",
133 FALSE, 0xffffffff, 0xffffffff, TRUE),
134 HOWTO(R_X86_64_GOT64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
135 bfd_elf_generic_reloc, "R_X86_64_GOT64", FALSE, MINUS_ONE, MINUS_ONE,
136 FALSE),
137 HOWTO(R_X86_64_GOTPCREL64, 0, 4, 64, TRUE, 0, complain_overflow_signed,
138 bfd_elf_generic_reloc, "R_X86_64_GOTPCREL64", FALSE, MINUS_ONE,
139 MINUS_ONE, TRUE),
140 HOWTO(R_X86_64_GOTPC64, 0, 4, 64, TRUE, 0, complain_overflow_signed,
141 bfd_elf_generic_reloc, "R_X86_64_GOTPC64",
142 FALSE, MINUS_ONE, MINUS_ONE, TRUE),
143 HOWTO(R_X86_64_GOTPLT64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
144 bfd_elf_generic_reloc, "R_X86_64_GOTPLT64", FALSE, MINUS_ONE,
145 MINUS_ONE, FALSE),
146 HOWTO(R_X86_64_PLTOFF64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
147 bfd_elf_generic_reloc, "R_X86_64_PLTOFF64", FALSE, MINUS_ONE,
148 MINUS_ONE, FALSE),
149 HOWTO(R_X86_64_SIZE32, 0, 2, 32, FALSE, 0, complain_overflow_unsigned,
150 bfd_elf_generic_reloc, "R_X86_64_SIZE32", FALSE, 0xffffffff, 0xffffffff,
151 FALSE),
152 HOWTO(R_X86_64_SIZE64, 0, 4, 64, FALSE, 0, complain_overflow_unsigned,
153 bfd_elf_generic_reloc, "R_X86_64_SIZE64", FALSE, MINUS_ONE, MINUS_ONE,
154 FALSE),
155 HOWTO(R_X86_64_GOTPC32_TLSDESC, 0, 2, 32, TRUE, 0,
156 complain_overflow_bitfield, bfd_elf_generic_reloc,
157 "R_X86_64_GOTPC32_TLSDESC",
158 FALSE, 0xffffffff, 0xffffffff, TRUE),
159 HOWTO(R_X86_64_TLSDESC_CALL, 0, 0, 0, FALSE, 0,
160 complain_overflow_dont, bfd_elf_generic_reloc,
161 "R_X86_64_TLSDESC_CALL",
162 FALSE, 0, 0, FALSE),
163 HOWTO(R_X86_64_TLSDESC, 0, 4, 64, FALSE, 0,
164 complain_overflow_bitfield, bfd_elf_generic_reloc,
165 "R_X86_64_TLSDESC",
166 FALSE, MINUS_ONE, MINUS_ONE, FALSE),
167 HOWTO(R_X86_64_IRELATIVE, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
168 bfd_elf_generic_reloc, "R_X86_64_IRELATIVE", FALSE, MINUS_ONE,
169 MINUS_ONE, FALSE),
170 HOWTO(R_X86_64_RELATIVE64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
171 bfd_elf_generic_reloc, "R_X86_64_RELATIVE64", FALSE, MINUS_ONE,
172 MINUS_ONE, FALSE),
173 HOWTO(R_X86_64_PC32_BND, 0, 2, 32, TRUE, 0, complain_overflow_signed,
174 bfd_elf_generic_reloc, "R_X86_64_PC32_BND", FALSE, 0xffffffff, 0xffffffff,
175 TRUE),
176 HOWTO(R_X86_64_PLT32_BND, 0, 2, 32, TRUE, 0, complain_overflow_signed,
177 bfd_elf_generic_reloc, "R_X86_64_PLT32_BND", FALSE, 0xffffffff, 0xffffffff,
178 TRUE),
179
180 /* We have a gap in the reloc numbers here.
181 R_X86_64_standard counts the number up to this point, and
182 R_X86_64_vt_offset is the value to subtract from a reloc type of
183 R_X86_64_GNU_VT* to form an index into this table. */
184 #define R_X86_64_standard (R_X86_64_PLT32_BND + 1)
185 #define R_X86_64_vt_offset (R_X86_64_GNU_VTINHERIT - R_X86_64_standard)
186
187 /* GNU extension to record C++ vtable hierarchy. */
188 HOWTO (R_X86_64_GNU_VTINHERIT, 0, 4, 0, FALSE, 0, complain_overflow_dont,
189 NULL, "R_X86_64_GNU_VTINHERIT", FALSE, 0, 0, FALSE),
190
191 /* GNU extension to record C++ vtable member usage. */
192 HOWTO (R_X86_64_GNU_VTENTRY, 0, 4, 0, FALSE, 0, complain_overflow_dont,
193 _bfd_elf_rel_vtable_reloc_fn, "R_X86_64_GNU_VTENTRY", FALSE, 0, 0,
194 FALSE),
195
196 /* Use complain_overflow_bitfield on R_X86_64_32 for x32. */
197 HOWTO(R_X86_64_32, 0, 2, 32, FALSE, 0, complain_overflow_bitfield,
198 bfd_elf_generic_reloc, "R_X86_64_32", FALSE, 0xffffffff, 0xffffffff,
199 FALSE)
200 };
201
202 #define IS_X86_64_PCREL_TYPE(TYPE) \
203 ( ((TYPE) == R_X86_64_PC8) \
204 || ((TYPE) == R_X86_64_PC16) \
205 || ((TYPE) == R_X86_64_PC32) \
206 || ((TYPE) == R_X86_64_PC32_BND) \
207 || ((TYPE) == R_X86_64_PC64))
208
209 /* Map BFD relocs to the x86_64 elf relocs. */
210 struct elf_reloc_map
211 {
212 bfd_reloc_code_real_type bfd_reloc_val;
213 unsigned char elf_reloc_val;
214 };
215
216 static const struct elf_reloc_map x86_64_reloc_map[] =
217 {
218 { BFD_RELOC_NONE, R_X86_64_NONE, },
219 { BFD_RELOC_64, R_X86_64_64, },
220 { BFD_RELOC_32_PCREL, R_X86_64_PC32, },
221 { BFD_RELOC_X86_64_GOT32, R_X86_64_GOT32,},
222 { BFD_RELOC_X86_64_PLT32, R_X86_64_PLT32,},
223 { BFD_RELOC_X86_64_COPY, R_X86_64_COPY, },
224 { BFD_RELOC_X86_64_GLOB_DAT, R_X86_64_GLOB_DAT, },
225 { BFD_RELOC_X86_64_JUMP_SLOT, R_X86_64_JUMP_SLOT, },
226 { BFD_RELOC_X86_64_RELATIVE, R_X86_64_RELATIVE, },
227 { BFD_RELOC_X86_64_GOTPCREL, R_X86_64_GOTPCREL, },
228 { BFD_RELOC_32, R_X86_64_32, },
229 { BFD_RELOC_X86_64_32S, R_X86_64_32S, },
230 { BFD_RELOC_16, R_X86_64_16, },
231 { BFD_RELOC_16_PCREL, R_X86_64_PC16, },
232 { BFD_RELOC_8, R_X86_64_8, },
233 { BFD_RELOC_8_PCREL, R_X86_64_PC8, },
234 { BFD_RELOC_X86_64_DTPMOD64, R_X86_64_DTPMOD64, },
235 { BFD_RELOC_X86_64_DTPOFF64, R_X86_64_DTPOFF64, },
236 { BFD_RELOC_X86_64_TPOFF64, R_X86_64_TPOFF64, },
237 { BFD_RELOC_X86_64_TLSGD, R_X86_64_TLSGD, },
238 { BFD_RELOC_X86_64_TLSLD, R_X86_64_TLSLD, },
239 { BFD_RELOC_X86_64_DTPOFF32, R_X86_64_DTPOFF32, },
240 { BFD_RELOC_X86_64_GOTTPOFF, R_X86_64_GOTTPOFF, },
241 { BFD_RELOC_X86_64_TPOFF32, R_X86_64_TPOFF32, },
242 { BFD_RELOC_64_PCREL, R_X86_64_PC64, },
243 { BFD_RELOC_X86_64_GOTOFF64, R_X86_64_GOTOFF64, },
244 { BFD_RELOC_X86_64_GOTPC32, R_X86_64_GOTPC32, },
245 { BFD_RELOC_X86_64_GOT64, R_X86_64_GOT64, },
246 { BFD_RELOC_X86_64_GOTPCREL64,R_X86_64_GOTPCREL64, },
247 { BFD_RELOC_X86_64_GOTPC64, R_X86_64_GOTPC64, },
248 { BFD_RELOC_X86_64_GOTPLT64, R_X86_64_GOTPLT64, },
249 { BFD_RELOC_X86_64_PLTOFF64, R_X86_64_PLTOFF64, },
250 { BFD_RELOC_SIZE32, R_X86_64_SIZE32, },
251 { BFD_RELOC_SIZE64, R_X86_64_SIZE64, },
252 { BFD_RELOC_X86_64_GOTPC32_TLSDESC, R_X86_64_GOTPC32_TLSDESC, },
253 { BFD_RELOC_X86_64_TLSDESC_CALL, R_X86_64_TLSDESC_CALL, },
254 { BFD_RELOC_X86_64_TLSDESC, R_X86_64_TLSDESC, },
255 { BFD_RELOC_X86_64_IRELATIVE, R_X86_64_IRELATIVE, },
256 { BFD_RELOC_X86_64_PC32_BND, R_X86_64_PC32_BND,},
257 { BFD_RELOC_X86_64_PLT32_BND, R_X86_64_PLT32_BND,},
258 { BFD_RELOC_VTABLE_INHERIT, R_X86_64_GNU_VTINHERIT, },
259 { BFD_RELOC_VTABLE_ENTRY, R_X86_64_GNU_VTENTRY, },
260 };
261
262 static reloc_howto_type *
263 elf_x86_64_rtype_to_howto (bfd *abfd, unsigned r_type)
264 {
265 unsigned i;
266
267 if (r_type == (unsigned int) R_X86_64_32)
268 {
269 if (ABI_64_P (abfd))
270 i = r_type;
271 else
272 i = ARRAY_SIZE (x86_64_elf_howto_table) - 1;
273 }
274 else if (r_type < (unsigned int) R_X86_64_GNU_VTINHERIT
275 || r_type >= (unsigned int) R_X86_64_max)
276 {
277 if (r_type >= (unsigned int) R_X86_64_standard)
278 {
279 (*_bfd_error_handler) (_("%B: invalid relocation type %d"),
280 abfd, (int) r_type);
281 r_type = R_X86_64_NONE;
282 }
283 i = r_type;
284 }
285 else
286 i = r_type - (unsigned int) R_X86_64_vt_offset;
287 BFD_ASSERT (x86_64_elf_howto_table[i].type == r_type);
288 return &x86_64_elf_howto_table[i];
289 }
290
291 /* Given a BFD reloc type, return a HOWTO structure. */
292 static reloc_howto_type *
293 elf_x86_64_reloc_type_lookup (bfd *abfd,
294 bfd_reloc_code_real_type code)
295 {
296 unsigned int i;
297
298 for (i = 0; i < sizeof (x86_64_reloc_map) / sizeof (struct elf_reloc_map);
299 i++)
300 {
301 if (x86_64_reloc_map[i].bfd_reloc_val == code)
302 return elf_x86_64_rtype_to_howto (abfd,
303 x86_64_reloc_map[i].elf_reloc_val);
304 }
305 return 0;
306 }
307
308 static reloc_howto_type *
309 elf_x86_64_reloc_name_lookup (bfd *abfd,
310 const char *r_name)
311 {
312 unsigned int i;
313
314 if (!ABI_64_P (abfd) && strcasecmp (r_name, "R_X86_64_32") == 0)
315 {
316 /* Get x32 R_X86_64_32. */
317 reloc_howto_type *reloc
318 = &x86_64_elf_howto_table[ARRAY_SIZE (x86_64_elf_howto_table) - 1];
319 BFD_ASSERT (reloc->type == (unsigned int) R_X86_64_32);
320 return reloc;
321 }
322
323 for (i = 0; i < ARRAY_SIZE (x86_64_elf_howto_table); i++)
324 if (x86_64_elf_howto_table[i].name != NULL
325 && strcasecmp (x86_64_elf_howto_table[i].name, r_name) == 0)
326 return &x86_64_elf_howto_table[i];
327
328 return NULL;
329 }
330
331 /* Given an x86_64 ELF reloc type, fill in an arelent structure. */
332
333 static void
334 elf_x86_64_info_to_howto (bfd *abfd ATTRIBUTE_UNUSED, arelent *cache_ptr,
335 Elf_Internal_Rela *dst)
336 {
337 unsigned r_type;
338
339 r_type = ELF32_R_TYPE (dst->r_info);
340 cache_ptr->howto = elf_x86_64_rtype_to_howto (abfd, r_type);
341 BFD_ASSERT (r_type == cache_ptr->howto->type);
342 }
343 \f
344 /* Support for core dump NOTE sections. */
345 static bfd_boolean
346 elf_x86_64_grok_prstatus (bfd *abfd, Elf_Internal_Note *note)
347 {
348 int offset;
349 size_t size;
350
351 switch (note->descsz)
352 {
353 default:
354 return FALSE;
355
356 case 296: /* sizeof(istruct elf_prstatus) on Linux/x32 */
357 /* pr_cursig */
358 elf_tdata (abfd)->core->signal = bfd_get_16 (abfd, note->descdata + 12);
359
360 /* pr_pid */
361 elf_tdata (abfd)->core->lwpid = bfd_get_32 (abfd, note->descdata + 24);
362
363 /* pr_reg */
364 offset = 72;
365 size = 216;
366
367 break;
368
369 case 336: /* sizeof(istruct elf_prstatus) on Linux/x86_64 */
370 /* pr_cursig */
371 elf_tdata (abfd)->core->signal
372 = bfd_get_16 (abfd, note->descdata + 12);
373
374 /* pr_pid */
375 elf_tdata (abfd)->core->lwpid
376 = bfd_get_32 (abfd, note->descdata + 32);
377
378 /* pr_reg */
379 offset = 112;
380 size = 216;
381
382 break;
383 }
384
385 /* Make a ".reg/999" section. */
386 return _bfd_elfcore_make_pseudosection (abfd, ".reg",
387 size, note->descpos + offset);
388 }
389
390 static bfd_boolean
391 elf_x86_64_grok_psinfo (bfd *abfd, Elf_Internal_Note *note)
392 {
393 switch (note->descsz)
394 {
395 default:
396 return FALSE;
397
398 case 124: /* sizeof(struct elf_prpsinfo) on Linux/x32 */
399 elf_tdata (abfd)->core->pid
400 = bfd_get_32 (abfd, note->descdata + 12);
401 elf_tdata (abfd)->core->program
402 = _bfd_elfcore_strndup (abfd, note->descdata + 28, 16);
403 elf_tdata (abfd)->core->command
404 = _bfd_elfcore_strndup (abfd, note->descdata + 44, 80);
405 break;
406
407 case 136: /* sizeof(struct elf_prpsinfo) on Linux/x86_64 */
408 elf_tdata (abfd)->core->pid
409 = bfd_get_32 (abfd, note->descdata + 24);
410 elf_tdata (abfd)->core->program
411 = _bfd_elfcore_strndup (abfd, note->descdata + 40, 16);
412 elf_tdata (abfd)->core->command
413 = _bfd_elfcore_strndup (abfd, note->descdata + 56, 80);
414 }
415
416 /* Note that for some reason, a spurious space is tacked
417 onto the end of the args in some (at least one anyway)
418 implementations, so strip it off if it exists. */
419
420 {
421 char *command = elf_tdata (abfd)->core->command;
422 int n = strlen (command);
423
424 if (0 < n && command[n - 1] == ' ')
425 command[n - 1] = '\0';
426 }
427
428 return TRUE;
429 }
430
431 #ifdef CORE_HEADER
432 static char *
433 elf_x86_64_write_core_note (bfd *abfd, char *buf, int *bufsiz,
434 int note_type, ...)
435 {
436 const struct elf_backend_data *bed = get_elf_backend_data (abfd);
437 va_list ap;
438 const char *fname, *psargs;
439 long pid;
440 int cursig;
441 const void *gregs;
442
443 switch (note_type)
444 {
445 default:
446 return NULL;
447
448 case NT_PRPSINFO:
449 va_start (ap, note_type);
450 fname = va_arg (ap, const char *);
451 psargs = va_arg (ap, const char *);
452 va_end (ap);
453
454 if (bed->s->elfclass == ELFCLASS32)
455 {
456 prpsinfo32_t data;
457 memset (&data, 0, sizeof (data));
458 strncpy (data.pr_fname, fname, sizeof (data.pr_fname));
459 strncpy (data.pr_psargs, psargs, sizeof (data.pr_psargs));
460 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
461 &data, sizeof (data));
462 }
463 else
464 {
465 prpsinfo64_t data;
466 memset (&data, 0, sizeof (data));
467 strncpy (data.pr_fname, fname, sizeof (data.pr_fname));
468 strncpy (data.pr_psargs, psargs, sizeof (data.pr_psargs));
469 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
470 &data, sizeof (data));
471 }
472 /* NOTREACHED */
473
474 case NT_PRSTATUS:
475 va_start (ap, note_type);
476 pid = va_arg (ap, long);
477 cursig = va_arg (ap, int);
478 gregs = va_arg (ap, const void *);
479 va_end (ap);
480
481 if (bed->s->elfclass == ELFCLASS32)
482 {
483 if (bed->elf_machine_code == EM_X86_64)
484 {
485 prstatusx32_t prstat;
486 memset (&prstat, 0, sizeof (prstat));
487 prstat.pr_pid = pid;
488 prstat.pr_cursig = cursig;
489 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
490 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
491 &prstat, sizeof (prstat));
492 }
493 else
494 {
495 prstatus32_t prstat;
496 memset (&prstat, 0, sizeof (prstat));
497 prstat.pr_pid = pid;
498 prstat.pr_cursig = cursig;
499 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
500 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
501 &prstat, sizeof (prstat));
502 }
503 }
504 else
505 {
506 prstatus64_t prstat;
507 memset (&prstat, 0, sizeof (prstat));
508 prstat.pr_pid = pid;
509 prstat.pr_cursig = cursig;
510 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
511 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
512 &prstat, sizeof (prstat));
513 }
514 }
515 /* NOTREACHED */
516 }
517 #endif
518 \f
519 /* Functions for the x86-64 ELF linker. */
520
521 /* The name of the dynamic interpreter. This is put in the .interp
522 section. */
523
524 #define ELF64_DYNAMIC_INTERPRETER "/lib/ld64.so.1"
525 #define ELF32_DYNAMIC_INTERPRETER "/lib/ldx32.so.1"
526
527 /* If ELIMINATE_COPY_RELOCS is non-zero, the linker will try to avoid
528 copying dynamic variables from a shared lib into an app's dynbss
529 section, and instead use a dynamic relocation to point into the
530 shared lib. */
531 #define ELIMINATE_COPY_RELOCS 1
532
533 /* The size in bytes of an entry in the global offset table. */
534
535 #define GOT_ENTRY_SIZE 8
536
537 /* The size in bytes of an entry in the procedure linkage table. */
538
539 #define PLT_ENTRY_SIZE 16
540
541 /* The first entry in a procedure linkage table looks like this. See the
542 SVR4 ABI i386 supplement and the x86-64 ABI to see how this works. */
543
544 static const bfd_byte elf_x86_64_plt0_entry[PLT_ENTRY_SIZE] =
545 {
546 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
547 0xff, 0x25, 16, 0, 0, 0, /* jmpq *GOT+16(%rip) */
548 0x0f, 0x1f, 0x40, 0x00 /* nopl 0(%rax) */
549 };
550
551 /* Subsequent entries in a procedure linkage table look like this. */
552
553 static const bfd_byte elf_x86_64_plt_entry[PLT_ENTRY_SIZE] =
554 {
555 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
556 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
557 0x68, /* pushq immediate */
558 0, 0, 0, 0, /* replaced with index into relocation table. */
559 0xe9, /* jmp relative */
560 0, 0, 0, 0 /* replaced with offset to start of .plt0. */
561 };
562
563 /* The first entry in a procedure linkage table with BND relocations
564 like this. */
565
566 static const bfd_byte elf_x86_64_bnd_plt0_entry[PLT_ENTRY_SIZE] =
567 {
568 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
569 0xf2, 0xff, 0x25, 16, 0, 0, 0, /* bnd jmpq *GOT+16(%rip) */
570 0x0f, 0x1f, 0 /* nopl (%rax) */
571 };
572
573 /* Subsequent entries for legacy branches in a procedure linkage table
574 with BND relocations look like this. */
575
576 static const bfd_byte elf_x86_64_legacy_plt_entry[PLT_ENTRY_SIZE] =
577 {
578 0x68, 0, 0, 0, 0, /* pushq immediate */
579 0xe9, 0, 0, 0, 0, /* jmpq relative */
580 0x66, 0x0f, 0x1f, 0x44, 0, 0 /* nopw (%rax,%rax,1) */
581 };
582
583 /* Subsequent entries for branches with BND prefx in a procedure linkage
584 table with BND relocations look like this. */
585
586 static const bfd_byte elf_x86_64_bnd_plt_entry[PLT_ENTRY_SIZE] =
587 {
588 0x68, 0, 0, 0, 0, /* pushq immediate */
589 0xf2, 0xe9, 0, 0, 0, 0, /* bnd jmpq relative */
590 0x0f, 0x1f, 0x44, 0, 0 /* nopl 0(%rax,%rax,1) */
591 };
592
593 /* Entries for legacy branches in the second procedure linkage table
594 look like this. */
595
596 static const bfd_byte elf_x86_64_legacy_plt2_entry[8] =
597 {
598 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
599 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
600 0x66, 0x90 /* xchg %ax,%ax */
601 };
602
603 /* Entries for branches with BND prefix in the second procedure linkage
604 table look like this. */
605
606 static const bfd_byte elf_x86_64_bnd_plt2_entry[8] =
607 {
608 0xf2, 0xff, 0x25, /* bnd jmpq *name@GOTPC(%rip) */
609 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
610 0x90 /* nop */
611 };
612
613 /* .eh_frame covering the .plt section. */
614
615 static const bfd_byte elf_x86_64_eh_frame_plt[] =
616 {
617 #define PLT_CIE_LENGTH 20
618 #define PLT_FDE_LENGTH 36
619 #define PLT_FDE_START_OFFSET 4 + PLT_CIE_LENGTH + 8
620 #define PLT_FDE_LEN_OFFSET 4 + PLT_CIE_LENGTH + 12
621 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
622 0, 0, 0, 0, /* CIE ID */
623 1, /* CIE version */
624 'z', 'R', 0, /* Augmentation string */
625 1, /* Code alignment factor */
626 0x78, /* Data alignment factor */
627 16, /* Return address column */
628 1, /* Augmentation size */
629 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
630 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
631 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
632 DW_CFA_nop, DW_CFA_nop,
633
634 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
635 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
636 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
637 0, 0, 0, 0, /* .plt size goes here */
638 0, /* Augmentation size */
639 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
640 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
641 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
642 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
643 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
644 11, /* Block length */
645 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
646 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
647 DW_OP_lit15, DW_OP_and, DW_OP_lit11, DW_OP_ge,
648 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
649 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
650 };
651
652 /* Architecture-specific backend data for x86-64. */
653
654 struct elf_x86_64_backend_data
655 {
656 /* Templates for the initial PLT entry and for subsequent entries. */
657 const bfd_byte *plt0_entry;
658 const bfd_byte *plt_entry;
659 unsigned int plt_entry_size; /* Size of each PLT entry. */
660
661 /* Offsets into plt0_entry that are to be replaced with GOT[1] and GOT[2]. */
662 unsigned int plt0_got1_offset;
663 unsigned int plt0_got2_offset;
664
665 /* Offset of the end of the PC-relative instruction containing
666 plt0_got2_offset. */
667 unsigned int plt0_got2_insn_end;
668
669 /* Offsets into plt_entry that are to be replaced with... */
670 unsigned int plt_got_offset; /* ... address of this symbol in .got. */
671 unsigned int plt_reloc_offset; /* ... offset into relocation table. */
672 unsigned int plt_plt_offset; /* ... offset to start of .plt. */
673
674 /* Length of the PC-relative instruction containing plt_got_offset. */
675 unsigned int plt_got_insn_size;
676
677 /* Offset of the end of the PC-relative jump to plt0_entry. */
678 unsigned int plt_plt_insn_end;
679
680 /* Offset into plt_entry where the initial value of the GOT entry points. */
681 unsigned int plt_lazy_offset;
682
683 /* .eh_frame covering the .plt section. */
684 const bfd_byte *eh_frame_plt;
685 unsigned int eh_frame_plt_size;
686 };
687
688 #define get_elf_x86_64_arch_data(bed) \
689 ((const struct elf_x86_64_backend_data *) (bed)->arch_data)
690
691 #define get_elf_x86_64_backend_data(abfd) \
692 get_elf_x86_64_arch_data (get_elf_backend_data (abfd))
693
694 #define GET_PLT_ENTRY_SIZE(abfd) \
695 get_elf_x86_64_backend_data (abfd)->plt_entry_size
696
697 /* These are the standard parameters. */
698 static const struct elf_x86_64_backend_data elf_x86_64_arch_bed =
699 {
700 elf_x86_64_plt0_entry, /* plt0_entry */
701 elf_x86_64_plt_entry, /* plt_entry */
702 sizeof (elf_x86_64_plt_entry), /* plt_entry_size */
703 2, /* plt0_got1_offset */
704 8, /* plt0_got2_offset */
705 12, /* plt0_got2_insn_end */
706 2, /* plt_got_offset */
707 7, /* plt_reloc_offset */
708 12, /* plt_plt_offset */
709 6, /* plt_got_insn_size */
710 PLT_ENTRY_SIZE, /* plt_plt_insn_end */
711 6, /* plt_lazy_offset */
712 elf_x86_64_eh_frame_plt, /* eh_frame_plt */
713 sizeof (elf_x86_64_eh_frame_plt), /* eh_frame_plt_size */
714 };
715
716 static const struct elf_x86_64_backend_data elf_x86_64_bnd_arch_bed =
717 {
718 elf_x86_64_bnd_plt0_entry, /* plt0_entry */
719 elf_x86_64_bnd_plt_entry, /* plt_entry */
720 sizeof (elf_x86_64_bnd_plt_entry), /* plt_entry_size */
721 2, /* plt0_got1_offset */
722 1+8, /* plt0_got2_offset */
723 1+12, /* plt0_got2_insn_end */
724 1+2, /* plt_got_offset */
725 1, /* plt_reloc_offset */
726 7, /* plt_plt_offset */
727 1+6, /* plt_got_insn_size */
728 11, /* plt_plt_insn_end */
729 0, /* plt_lazy_offset */
730 elf_x86_64_eh_frame_plt, /* eh_frame_plt */
731 sizeof (elf_x86_64_eh_frame_plt), /* eh_frame_plt_size */
732 };
733
734 #define elf_backend_arch_data &elf_x86_64_arch_bed
735
736 /* x86-64 ELF linker hash entry. */
737
738 struct elf_x86_64_link_hash_entry
739 {
740 struct elf_link_hash_entry elf;
741
742 /* Track dynamic relocs copied for this symbol. */
743 struct elf_dyn_relocs *dyn_relocs;
744
745 #define GOT_UNKNOWN 0
746 #define GOT_NORMAL 1
747 #define GOT_TLS_GD 2
748 #define GOT_TLS_IE 3
749 #define GOT_TLS_GDESC 4
750 #define GOT_TLS_GD_BOTH_P(type) \
751 ((type) == (GOT_TLS_GD | GOT_TLS_GDESC))
752 #define GOT_TLS_GD_P(type) \
753 ((type) == GOT_TLS_GD || GOT_TLS_GD_BOTH_P (type))
754 #define GOT_TLS_GDESC_P(type) \
755 ((type) == GOT_TLS_GDESC || GOT_TLS_GD_BOTH_P (type))
756 #define GOT_TLS_GD_ANY_P(type) \
757 (GOT_TLS_GD_P (type) || GOT_TLS_GDESC_P (type))
758 unsigned char tls_type;
759
760 /* TRUE if symbol has at least one BND relocation. */
761 bfd_boolean has_bnd_reloc;
762
763 /* Information about the GOT PLT entry. Filled when there are both
764 GOT and PLT relocations against the same function. */
765 union gotplt_union plt_got;
766
767 /* Information about the second PLT entry. Filled when has_bnd_reloc is
768 set. */
769 union gotplt_union plt_bnd;
770
771 /* Offset of the GOTPLT entry reserved for the TLS descriptor,
772 starting at the end of the jump table. */
773 bfd_vma tlsdesc_got;
774 };
775
776 #define elf_x86_64_hash_entry(ent) \
777 ((struct elf_x86_64_link_hash_entry *)(ent))
778
779 struct elf_x86_64_obj_tdata
780 {
781 struct elf_obj_tdata root;
782
783 /* tls_type for each local got entry. */
784 char *local_got_tls_type;
785
786 /* GOTPLT entries for TLS descriptors. */
787 bfd_vma *local_tlsdesc_gotent;
788 };
789
790 #define elf_x86_64_tdata(abfd) \
791 ((struct elf_x86_64_obj_tdata *) (abfd)->tdata.any)
792
793 #define elf_x86_64_local_got_tls_type(abfd) \
794 (elf_x86_64_tdata (abfd)->local_got_tls_type)
795
796 #define elf_x86_64_local_tlsdesc_gotent(abfd) \
797 (elf_x86_64_tdata (abfd)->local_tlsdesc_gotent)
798
799 #define is_x86_64_elf(bfd) \
800 (bfd_get_flavour (bfd) == bfd_target_elf_flavour \
801 && elf_tdata (bfd) != NULL \
802 && elf_object_id (bfd) == X86_64_ELF_DATA)
803
804 static bfd_boolean
805 elf_x86_64_mkobject (bfd *abfd)
806 {
807 return bfd_elf_allocate_object (abfd, sizeof (struct elf_x86_64_obj_tdata),
808 X86_64_ELF_DATA);
809 }
810
811 /* x86-64 ELF linker hash table. */
812
813 struct elf_x86_64_link_hash_table
814 {
815 struct elf_link_hash_table elf;
816
817 /* Short-cuts to get to dynamic linker sections. */
818 asection *sdynbss;
819 asection *srelbss;
820 asection *plt_eh_frame;
821 asection *plt_bnd;
822 asection *plt_got;
823
824 union
825 {
826 bfd_signed_vma refcount;
827 bfd_vma offset;
828 } tls_ld_got;
829
830 /* The amount of space used by the jump slots in the GOT. */
831 bfd_vma sgotplt_jump_table_size;
832
833 /* Small local sym cache. */
834 struct sym_cache sym_cache;
835
836 bfd_vma (*r_info) (bfd_vma, bfd_vma);
837 bfd_vma (*r_sym) (bfd_vma);
838 unsigned int pointer_r_type;
839 const char *dynamic_interpreter;
840 int dynamic_interpreter_size;
841
842 /* _TLS_MODULE_BASE_ symbol. */
843 struct bfd_link_hash_entry *tls_module_base;
844
845 /* Used by local STT_GNU_IFUNC symbols. */
846 htab_t loc_hash_table;
847 void * loc_hash_memory;
848
849 /* The offset into splt of the PLT entry for the TLS descriptor
850 resolver. Special values are 0, if not necessary (or not found
851 to be necessary yet), and -1 if needed but not determined
852 yet. */
853 bfd_vma tlsdesc_plt;
854 /* The offset into sgot of the GOT entry used by the PLT entry
855 above. */
856 bfd_vma tlsdesc_got;
857
858 /* The index of the next R_X86_64_JUMP_SLOT entry in .rela.plt. */
859 bfd_vma next_jump_slot_index;
860 /* The index of the next R_X86_64_IRELATIVE entry in .rela.plt. */
861 bfd_vma next_irelative_index;
862 };
863
864 /* Get the x86-64 ELF linker hash table from a link_info structure. */
865
866 #define elf_x86_64_hash_table(p) \
867 (elf_hash_table_id ((struct elf_link_hash_table *) ((p)->hash)) \
868 == X86_64_ELF_DATA ? ((struct elf_x86_64_link_hash_table *) ((p)->hash)) : NULL)
869
870 #define elf_x86_64_compute_jump_table_size(htab) \
871 ((htab)->elf.srelplt->reloc_count * GOT_ENTRY_SIZE)
872
873 /* Create an entry in an x86-64 ELF linker hash table. */
874
875 static struct bfd_hash_entry *
876 elf_x86_64_link_hash_newfunc (struct bfd_hash_entry *entry,
877 struct bfd_hash_table *table,
878 const char *string)
879 {
880 /* Allocate the structure if it has not already been allocated by a
881 subclass. */
882 if (entry == NULL)
883 {
884 entry = (struct bfd_hash_entry *)
885 bfd_hash_allocate (table,
886 sizeof (struct elf_x86_64_link_hash_entry));
887 if (entry == NULL)
888 return entry;
889 }
890
891 /* Call the allocation method of the superclass. */
892 entry = _bfd_elf_link_hash_newfunc (entry, table, string);
893 if (entry != NULL)
894 {
895 struct elf_x86_64_link_hash_entry *eh;
896
897 eh = (struct elf_x86_64_link_hash_entry *) entry;
898 eh->dyn_relocs = NULL;
899 eh->tls_type = GOT_UNKNOWN;
900 eh->has_bnd_reloc = FALSE;
901 eh->plt_bnd.offset = (bfd_vma) -1;
902 eh->plt_got.offset = (bfd_vma) -1;
903 eh->tlsdesc_got = (bfd_vma) -1;
904 }
905
906 return entry;
907 }
908
909 /* Compute a hash of a local hash entry. We use elf_link_hash_entry
910 for local symbol so that we can handle local STT_GNU_IFUNC symbols
911 as global symbol. We reuse indx and dynstr_index for local symbol
912 hash since they aren't used by global symbols in this backend. */
913
914 static hashval_t
915 elf_x86_64_local_htab_hash (const void *ptr)
916 {
917 struct elf_link_hash_entry *h
918 = (struct elf_link_hash_entry *) ptr;
919 return ELF_LOCAL_SYMBOL_HASH (h->indx, h->dynstr_index);
920 }
921
922 /* Compare local hash entries. */
923
924 static int
925 elf_x86_64_local_htab_eq (const void *ptr1, const void *ptr2)
926 {
927 struct elf_link_hash_entry *h1
928 = (struct elf_link_hash_entry *) ptr1;
929 struct elf_link_hash_entry *h2
930 = (struct elf_link_hash_entry *) ptr2;
931
932 return h1->indx == h2->indx && h1->dynstr_index == h2->dynstr_index;
933 }
934
935 /* Find and/or create a hash entry for local symbol. */
936
937 static struct elf_link_hash_entry *
938 elf_x86_64_get_local_sym_hash (struct elf_x86_64_link_hash_table *htab,
939 bfd *abfd, const Elf_Internal_Rela *rel,
940 bfd_boolean create)
941 {
942 struct elf_x86_64_link_hash_entry e, *ret;
943 asection *sec = abfd->sections;
944 hashval_t h = ELF_LOCAL_SYMBOL_HASH (sec->id,
945 htab->r_sym (rel->r_info));
946 void **slot;
947
948 e.elf.indx = sec->id;
949 e.elf.dynstr_index = htab->r_sym (rel->r_info);
950 slot = htab_find_slot_with_hash (htab->loc_hash_table, &e, h,
951 create ? INSERT : NO_INSERT);
952
953 if (!slot)
954 return NULL;
955
956 if (*slot)
957 {
958 ret = (struct elf_x86_64_link_hash_entry *) *slot;
959 return &ret->elf;
960 }
961
962 ret = (struct elf_x86_64_link_hash_entry *)
963 objalloc_alloc ((struct objalloc *) htab->loc_hash_memory,
964 sizeof (struct elf_x86_64_link_hash_entry));
965 if (ret)
966 {
967 memset (ret, 0, sizeof (*ret));
968 ret->elf.indx = sec->id;
969 ret->elf.dynstr_index = htab->r_sym (rel->r_info);
970 ret->elf.dynindx = -1;
971 ret->plt_got.offset = (bfd_vma) -1;
972 *slot = ret;
973 }
974 return &ret->elf;
975 }
976
977 /* Destroy an X86-64 ELF linker hash table. */
978
979 static void
980 elf_x86_64_link_hash_table_free (bfd *obfd)
981 {
982 struct elf_x86_64_link_hash_table *htab
983 = (struct elf_x86_64_link_hash_table *) obfd->link.hash;
984
985 if (htab->loc_hash_table)
986 htab_delete (htab->loc_hash_table);
987 if (htab->loc_hash_memory)
988 objalloc_free ((struct objalloc *) htab->loc_hash_memory);
989 _bfd_elf_link_hash_table_free (obfd);
990 }
991
992 /* Create an X86-64 ELF linker hash table. */
993
994 static struct bfd_link_hash_table *
995 elf_x86_64_link_hash_table_create (bfd *abfd)
996 {
997 struct elf_x86_64_link_hash_table *ret;
998 bfd_size_type amt = sizeof (struct elf_x86_64_link_hash_table);
999
1000 ret = (struct elf_x86_64_link_hash_table *) bfd_zmalloc (amt);
1001 if (ret == NULL)
1002 return NULL;
1003
1004 if (!_bfd_elf_link_hash_table_init (&ret->elf, abfd,
1005 elf_x86_64_link_hash_newfunc,
1006 sizeof (struct elf_x86_64_link_hash_entry),
1007 X86_64_ELF_DATA))
1008 {
1009 free (ret);
1010 return NULL;
1011 }
1012
1013 if (ABI_64_P (abfd))
1014 {
1015 ret->r_info = elf64_r_info;
1016 ret->r_sym = elf64_r_sym;
1017 ret->pointer_r_type = R_X86_64_64;
1018 ret->dynamic_interpreter = ELF64_DYNAMIC_INTERPRETER;
1019 ret->dynamic_interpreter_size = sizeof ELF64_DYNAMIC_INTERPRETER;
1020 }
1021 else
1022 {
1023 ret->r_info = elf32_r_info;
1024 ret->r_sym = elf32_r_sym;
1025 ret->pointer_r_type = R_X86_64_32;
1026 ret->dynamic_interpreter = ELF32_DYNAMIC_INTERPRETER;
1027 ret->dynamic_interpreter_size = sizeof ELF32_DYNAMIC_INTERPRETER;
1028 }
1029
1030 ret->loc_hash_table = htab_try_create (1024,
1031 elf_x86_64_local_htab_hash,
1032 elf_x86_64_local_htab_eq,
1033 NULL);
1034 ret->loc_hash_memory = objalloc_create ();
1035 if (!ret->loc_hash_table || !ret->loc_hash_memory)
1036 {
1037 elf_x86_64_link_hash_table_free (abfd);
1038 return NULL;
1039 }
1040 ret->elf.root.hash_table_free = elf_x86_64_link_hash_table_free;
1041
1042 return &ret->elf.root;
1043 }
1044
1045 /* Create .plt, .rela.plt, .got, .got.plt, .rela.got, .dynbss, and
1046 .rela.bss sections in DYNOBJ, and set up shortcuts to them in our
1047 hash table. */
1048
1049 static bfd_boolean
1050 elf_x86_64_create_dynamic_sections (bfd *dynobj,
1051 struct bfd_link_info *info)
1052 {
1053 struct elf_x86_64_link_hash_table *htab;
1054
1055 if (!_bfd_elf_create_dynamic_sections (dynobj, info))
1056 return FALSE;
1057
1058 htab = elf_x86_64_hash_table (info);
1059 if (htab == NULL)
1060 return FALSE;
1061
1062 htab->sdynbss = bfd_get_linker_section (dynobj, ".dynbss");
1063 if (!htab->sdynbss)
1064 abort ();
1065
1066 if (info->executable)
1067 {
1068 /* Always allow copy relocs for building executables. */
1069 asection *s;
1070 s = bfd_get_linker_section (dynobj, ".rela.bss");
1071 if (s == NULL)
1072 {
1073 const struct elf_backend_data *bed = get_elf_backend_data (dynobj);
1074 s = bfd_make_section_anyway_with_flags (dynobj,
1075 ".rela.bss",
1076 (bed->dynamic_sec_flags
1077 | SEC_READONLY));
1078 if (s == NULL
1079 || ! bfd_set_section_alignment (dynobj, s,
1080 bed->s->log_file_align))
1081 return FALSE;
1082 }
1083 htab->srelbss = s;
1084 }
1085
1086 if (!info->no_ld_generated_unwind_info
1087 && htab->plt_eh_frame == NULL
1088 && htab->elf.splt != NULL)
1089 {
1090 flagword flags = (SEC_ALLOC | SEC_LOAD | SEC_READONLY
1091 | SEC_HAS_CONTENTS | SEC_IN_MEMORY
1092 | SEC_LINKER_CREATED);
1093 htab->plt_eh_frame
1094 = bfd_make_section_anyway_with_flags (dynobj, ".eh_frame", flags);
1095 if (htab->plt_eh_frame == NULL
1096 || !bfd_set_section_alignment (dynobj, htab->plt_eh_frame, 3))
1097 return FALSE;
1098 }
1099 return TRUE;
1100 }
1101
1102 /* Copy the extra info we tack onto an elf_link_hash_entry. */
1103
1104 static void
1105 elf_x86_64_copy_indirect_symbol (struct bfd_link_info *info,
1106 struct elf_link_hash_entry *dir,
1107 struct elf_link_hash_entry *ind)
1108 {
1109 struct elf_x86_64_link_hash_entry *edir, *eind;
1110
1111 edir = (struct elf_x86_64_link_hash_entry *) dir;
1112 eind = (struct elf_x86_64_link_hash_entry *) ind;
1113
1114 if (!edir->has_bnd_reloc)
1115 edir->has_bnd_reloc = eind->has_bnd_reloc;
1116
1117 if (eind->dyn_relocs != NULL)
1118 {
1119 if (edir->dyn_relocs != NULL)
1120 {
1121 struct elf_dyn_relocs **pp;
1122 struct elf_dyn_relocs *p;
1123
1124 /* Add reloc counts against the indirect sym to the direct sym
1125 list. Merge any entries against the same section. */
1126 for (pp = &eind->dyn_relocs; (p = *pp) != NULL; )
1127 {
1128 struct elf_dyn_relocs *q;
1129
1130 for (q = edir->dyn_relocs; q != NULL; q = q->next)
1131 if (q->sec == p->sec)
1132 {
1133 q->pc_count += p->pc_count;
1134 q->count += p->count;
1135 *pp = p->next;
1136 break;
1137 }
1138 if (q == NULL)
1139 pp = &p->next;
1140 }
1141 *pp = edir->dyn_relocs;
1142 }
1143
1144 edir->dyn_relocs = eind->dyn_relocs;
1145 eind->dyn_relocs = NULL;
1146 }
1147
1148 if (ind->root.type == bfd_link_hash_indirect
1149 && dir->got.refcount <= 0)
1150 {
1151 edir->tls_type = eind->tls_type;
1152 eind->tls_type = GOT_UNKNOWN;
1153 }
1154
1155 if (ELIMINATE_COPY_RELOCS
1156 && ind->root.type != bfd_link_hash_indirect
1157 && dir->dynamic_adjusted)
1158 {
1159 /* If called to transfer flags for a weakdef during processing
1160 of elf_adjust_dynamic_symbol, don't copy non_got_ref.
1161 We clear it ourselves for ELIMINATE_COPY_RELOCS. */
1162 dir->ref_dynamic |= ind->ref_dynamic;
1163 dir->ref_regular |= ind->ref_regular;
1164 dir->ref_regular_nonweak |= ind->ref_regular_nonweak;
1165 dir->needs_plt |= ind->needs_plt;
1166 dir->pointer_equality_needed |= ind->pointer_equality_needed;
1167 }
1168 else
1169 _bfd_elf_link_hash_copy_indirect (info, dir, ind);
1170 }
1171
1172 static bfd_boolean
1173 elf64_x86_64_elf_object_p (bfd *abfd)
1174 {
1175 /* Set the right machine number for an x86-64 elf64 file. */
1176 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x86_64);
1177 return TRUE;
1178 }
1179
1180 static bfd_boolean
1181 elf32_x86_64_elf_object_p (bfd *abfd)
1182 {
1183 /* Set the right machine number for an x86-64 elf32 file. */
1184 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x64_32);
1185 return TRUE;
1186 }
1187
1188 /* Return TRUE if the TLS access code sequence support transition
1189 from R_TYPE. */
1190
1191 static bfd_boolean
1192 elf_x86_64_check_tls_transition (bfd *abfd,
1193 struct bfd_link_info *info,
1194 asection *sec,
1195 bfd_byte *contents,
1196 Elf_Internal_Shdr *symtab_hdr,
1197 struct elf_link_hash_entry **sym_hashes,
1198 unsigned int r_type,
1199 const Elf_Internal_Rela *rel,
1200 const Elf_Internal_Rela *relend)
1201 {
1202 unsigned int val;
1203 unsigned long r_symndx;
1204 bfd_boolean largepic = FALSE;
1205 struct elf_link_hash_entry *h;
1206 bfd_vma offset;
1207 struct elf_x86_64_link_hash_table *htab;
1208
1209 /* Get the section contents. */
1210 if (contents == NULL)
1211 {
1212 if (elf_section_data (sec)->this_hdr.contents != NULL)
1213 contents = elf_section_data (sec)->this_hdr.contents;
1214 else
1215 {
1216 /* FIXME: How to better handle error condition? */
1217 if (!bfd_malloc_and_get_section (abfd, sec, &contents))
1218 return FALSE;
1219
1220 /* Cache the section contents for elf_link_input_bfd. */
1221 elf_section_data (sec)->this_hdr.contents = contents;
1222 }
1223 }
1224
1225 htab = elf_x86_64_hash_table (info);
1226 offset = rel->r_offset;
1227 switch (r_type)
1228 {
1229 case R_X86_64_TLSGD:
1230 case R_X86_64_TLSLD:
1231 if ((rel + 1) >= relend)
1232 return FALSE;
1233
1234 if (r_type == R_X86_64_TLSGD)
1235 {
1236 /* Check transition from GD access model. For 64bit, only
1237 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
1238 .word 0x6666; rex64; call __tls_get_addr
1239 can transit to different access model. For 32bit, only
1240 leaq foo@tlsgd(%rip), %rdi
1241 .word 0x6666; rex64; call __tls_get_addr
1242 can transit to different access model. For largepic
1243 we also support:
1244 leaq foo@tlsgd(%rip), %rdi
1245 movabsq $__tls_get_addr@pltoff, %rax
1246 addq $rbx, %rax
1247 call *%rax. */
1248
1249 static const unsigned char call[] = { 0x66, 0x66, 0x48, 0xe8 };
1250 static const unsigned char leaq[] = { 0x66, 0x48, 0x8d, 0x3d };
1251
1252 if ((offset + 12) > sec->size)
1253 return FALSE;
1254
1255 if (memcmp (contents + offset + 4, call, 4) != 0)
1256 {
1257 if (!ABI_64_P (abfd)
1258 || (offset + 19) > sec->size
1259 || offset < 3
1260 || memcmp (contents + offset - 3, leaq + 1, 3) != 0
1261 || memcmp (contents + offset + 4, "\x48\xb8", 2) != 0
1262 || memcmp (contents + offset + 14, "\x48\x01\xd8\xff\xd0", 5)
1263 != 0)
1264 return FALSE;
1265 largepic = TRUE;
1266 }
1267 else if (ABI_64_P (abfd))
1268 {
1269 if (offset < 4
1270 || memcmp (contents + offset - 4, leaq, 4) != 0)
1271 return FALSE;
1272 }
1273 else
1274 {
1275 if (offset < 3
1276 || memcmp (contents + offset - 3, leaq + 1, 3) != 0)
1277 return FALSE;
1278 }
1279 }
1280 else
1281 {
1282 /* Check transition from LD access model. Only
1283 leaq foo@tlsld(%rip), %rdi;
1284 call __tls_get_addr
1285 can transit to different access model. For largepic
1286 we also support:
1287 leaq foo@tlsld(%rip), %rdi
1288 movabsq $__tls_get_addr@pltoff, %rax
1289 addq $rbx, %rax
1290 call *%rax. */
1291
1292 static const unsigned char lea[] = { 0x48, 0x8d, 0x3d };
1293
1294 if (offset < 3 || (offset + 9) > sec->size)
1295 return FALSE;
1296
1297 if (memcmp (contents + offset - 3, lea, 3) != 0)
1298 return FALSE;
1299
1300 if (0xe8 != *(contents + offset + 4))
1301 {
1302 if (!ABI_64_P (abfd)
1303 || (offset + 19) > sec->size
1304 || memcmp (contents + offset + 4, "\x48\xb8", 2) != 0
1305 || memcmp (contents + offset + 14, "\x48\x01\xd8\xff\xd0", 5)
1306 != 0)
1307 return FALSE;
1308 largepic = TRUE;
1309 }
1310 }
1311
1312 r_symndx = htab->r_sym (rel[1].r_info);
1313 if (r_symndx < symtab_hdr->sh_info)
1314 return FALSE;
1315
1316 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
1317 /* Use strncmp to check __tls_get_addr since __tls_get_addr
1318 may be versioned. */
1319 return (h != NULL
1320 && h->root.root.string != NULL
1321 && (largepic
1322 ? ELF32_R_TYPE (rel[1].r_info) == R_X86_64_PLTOFF64
1323 : (ELF32_R_TYPE (rel[1].r_info) == R_X86_64_PC32
1324 || ELF32_R_TYPE (rel[1].r_info) == R_X86_64_PLT32))
1325 && (strncmp (h->root.root.string,
1326 "__tls_get_addr", 14) == 0));
1327
1328 case R_X86_64_GOTTPOFF:
1329 /* Check transition from IE access model:
1330 mov foo@gottpoff(%rip), %reg
1331 add foo@gottpoff(%rip), %reg
1332 */
1333
1334 /* Check REX prefix first. */
1335 if (offset >= 3 && (offset + 4) <= sec->size)
1336 {
1337 val = bfd_get_8 (abfd, contents + offset - 3);
1338 if (val != 0x48 && val != 0x4c)
1339 {
1340 /* X32 may have 0x44 REX prefix or no REX prefix. */
1341 if (ABI_64_P (abfd))
1342 return FALSE;
1343 }
1344 }
1345 else
1346 {
1347 /* X32 may not have any REX prefix. */
1348 if (ABI_64_P (abfd))
1349 return FALSE;
1350 if (offset < 2 || (offset + 3) > sec->size)
1351 return FALSE;
1352 }
1353
1354 val = bfd_get_8 (abfd, contents + offset - 2);
1355 if (val != 0x8b && val != 0x03)
1356 return FALSE;
1357
1358 val = bfd_get_8 (abfd, contents + offset - 1);
1359 return (val & 0xc7) == 5;
1360
1361 case R_X86_64_GOTPC32_TLSDESC:
1362 /* Check transition from GDesc access model:
1363 leaq x@tlsdesc(%rip), %rax
1364
1365 Make sure it's a leaq adding rip to a 32-bit offset
1366 into any register, although it's probably almost always
1367 going to be rax. */
1368
1369 if (offset < 3 || (offset + 4) > sec->size)
1370 return FALSE;
1371
1372 val = bfd_get_8 (abfd, contents + offset - 3);
1373 if ((val & 0xfb) != 0x48)
1374 return FALSE;
1375
1376 if (bfd_get_8 (abfd, contents + offset - 2) != 0x8d)
1377 return FALSE;
1378
1379 val = bfd_get_8 (abfd, contents + offset - 1);
1380 return (val & 0xc7) == 0x05;
1381
1382 case R_X86_64_TLSDESC_CALL:
1383 /* Check transition from GDesc access model:
1384 call *x@tlsdesc(%rax)
1385 */
1386 if (offset + 2 <= sec->size)
1387 {
1388 /* Make sure that it's a call *x@tlsdesc(%rax). */
1389 static const unsigned char call[] = { 0xff, 0x10 };
1390 return memcmp (contents + offset, call, 2) == 0;
1391 }
1392
1393 return FALSE;
1394
1395 default:
1396 abort ();
1397 }
1398 }
1399
1400 /* Return TRUE if the TLS access transition is OK or no transition
1401 will be performed. Update R_TYPE if there is a transition. */
1402
1403 static bfd_boolean
1404 elf_x86_64_tls_transition (struct bfd_link_info *info, bfd *abfd,
1405 asection *sec, bfd_byte *contents,
1406 Elf_Internal_Shdr *symtab_hdr,
1407 struct elf_link_hash_entry **sym_hashes,
1408 unsigned int *r_type, int tls_type,
1409 const Elf_Internal_Rela *rel,
1410 const Elf_Internal_Rela *relend,
1411 struct elf_link_hash_entry *h,
1412 unsigned long r_symndx)
1413 {
1414 unsigned int from_type = *r_type;
1415 unsigned int to_type = from_type;
1416 bfd_boolean check = TRUE;
1417
1418 /* Skip TLS transition for functions. */
1419 if (h != NULL
1420 && (h->type == STT_FUNC
1421 || h->type == STT_GNU_IFUNC))
1422 return TRUE;
1423
1424 switch (from_type)
1425 {
1426 case R_X86_64_TLSGD:
1427 case R_X86_64_GOTPC32_TLSDESC:
1428 case R_X86_64_TLSDESC_CALL:
1429 case R_X86_64_GOTTPOFF:
1430 if (info->executable)
1431 {
1432 if (h == NULL)
1433 to_type = R_X86_64_TPOFF32;
1434 else
1435 to_type = R_X86_64_GOTTPOFF;
1436 }
1437
1438 /* When we are called from elf_x86_64_relocate_section,
1439 CONTENTS isn't NULL and there may be additional transitions
1440 based on TLS_TYPE. */
1441 if (contents != NULL)
1442 {
1443 unsigned int new_to_type = to_type;
1444
1445 if (info->executable
1446 && h != NULL
1447 && h->dynindx == -1
1448 && tls_type == GOT_TLS_IE)
1449 new_to_type = R_X86_64_TPOFF32;
1450
1451 if (to_type == R_X86_64_TLSGD
1452 || to_type == R_X86_64_GOTPC32_TLSDESC
1453 || to_type == R_X86_64_TLSDESC_CALL)
1454 {
1455 if (tls_type == GOT_TLS_IE)
1456 new_to_type = R_X86_64_GOTTPOFF;
1457 }
1458
1459 /* We checked the transition before when we were called from
1460 elf_x86_64_check_relocs. We only want to check the new
1461 transition which hasn't been checked before. */
1462 check = new_to_type != to_type && from_type == to_type;
1463 to_type = new_to_type;
1464 }
1465
1466 break;
1467
1468 case R_X86_64_TLSLD:
1469 if (info->executable)
1470 to_type = R_X86_64_TPOFF32;
1471 break;
1472
1473 default:
1474 return TRUE;
1475 }
1476
1477 /* Return TRUE if there is no transition. */
1478 if (from_type == to_type)
1479 return TRUE;
1480
1481 /* Check if the transition can be performed. */
1482 if (check
1483 && ! elf_x86_64_check_tls_transition (abfd, info, sec, contents,
1484 symtab_hdr, sym_hashes,
1485 from_type, rel, relend))
1486 {
1487 reloc_howto_type *from, *to;
1488 const char *name;
1489
1490 from = elf_x86_64_rtype_to_howto (abfd, from_type);
1491 to = elf_x86_64_rtype_to_howto (abfd, to_type);
1492
1493 if (h)
1494 name = h->root.root.string;
1495 else
1496 {
1497 struct elf_x86_64_link_hash_table *htab;
1498
1499 htab = elf_x86_64_hash_table (info);
1500 if (htab == NULL)
1501 name = "*unknown*";
1502 else
1503 {
1504 Elf_Internal_Sym *isym;
1505
1506 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
1507 abfd, r_symndx);
1508 name = bfd_elf_sym_name (abfd, symtab_hdr, isym, NULL);
1509 }
1510 }
1511
1512 (*_bfd_error_handler)
1513 (_("%B: TLS transition from %s to %s against `%s' at 0x%lx "
1514 "in section `%A' failed"),
1515 abfd, sec, from->name, to->name, name,
1516 (unsigned long) rel->r_offset);
1517 bfd_set_error (bfd_error_bad_value);
1518 return FALSE;
1519 }
1520
1521 *r_type = to_type;
1522 return TRUE;
1523 }
1524
1525 /* Look through the relocs for a section during the first phase, and
1526 calculate needed space in the global offset table, procedure
1527 linkage table, and dynamic reloc sections. */
1528
1529 static bfd_boolean
1530 elf_x86_64_check_relocs (bfd *abfd, struct bfd_link_info *info,
1531 asection *sec,
1532 const Elf_Internal_Rela *relocs)
1533 {
1534 struct elf_x86_64_link_hash_table *htab;
1535 Elf_Internal_Shdr *symtab_hdr;
1536 struct elf_link_hash_entry **sym_hashes;
1537 const Elf_Internal_Rela *rel;
1538 const Elf_Internal_Rela *rel_end;
1539 asection *sreloc;
1540 bfd_boolean use_plt_got;
1541
1542 if (info->relocatable)
1543 return TRUE;
1544
1545 BFD_ASSERT (is_x86_64_elf (abfd));
1546
1547 htab = elf_x86_64_hash_table (info);
1548 if (htab == NULL)
1549 return FALSE;
1550
1551 use_plt_got = get_elf_x86_64_backend_data (abfd) == &elf_x86_64_arch_bed;
1552
1553 symtab_hdr = &elf_symtab_hdr (abfd);
1554 sym_hashes = elf_sym_hashes (abfd);
1555
1556 sreloc = NULL;
1557
1558 rel_end = relocs + sec->reloc_count;
1559 for (rel = relocs; rel < rel_end; rel++)
1560 {
1561 unsigned int r_type;
1562 unsigned long r_symndx;
1563 struct elf_link_hash_entry *h;
1564 Elf_Internal_Sym *isym;
1565 const char *name;
1566 bfd_boolean size_reloc;
1567
1568 r_symndx = htab->r_sym (rel->r_info);
1569 r_type = ELF32_R_TYPE (rel->r_info);
1570
1571 if (r_symndx >= NUM_SHDR_ENTRIES (symtab_hdr))
1572 {
1573 (*_bfd_error_handler) (_("%B: bad symbol index: %d"),
1574 abfd, r_symndx);
1575 return FALSE;
1576 }
1577
1578 if (r_symndx < symtab_hdr->sh_info)
1579 {
1580 /* A local symbol. */
1581 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
1582 abfd, r_symndx);
1583 if (isym == NULL)
1584 return FALSE;
1585
1586 /* Check relocation against local STT_GNU_IFUNC symbol. */
1587 if (ELF_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
1588 {
1589 h = elf_x86_64_get_local_sym_hash (htab, abfd, rel,
1590 TRUE);
1591 if (h == NULL)
1592 return FALSE;
1593
1594 /* Fake a STT_GNU_IFUNC symbol. */
1595 h->type = STT_GNU_IFUNC;
1596 h->def_regular = 1;
1597 h->ref_regular = 1;
1598 h->forced_local = 1;
1599 h->root.type = bfd_link_hash_defined;
1600 }
1601 else
1602 h = NULL;
1603 }
1604 else
1605 {
1606 isym = NULL;
1607 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
1608 while (h->root.type == bfd_link_hash_indirect
1609 || h->root.type == bfd_link_hash_warning)
1610 h = (struct elf_link_hash_entry *) h->root.u.i.link;
1611 }
1612
1613 /* Check invalid x32 relocations. */
1614 if (!ABI_64_P (abfd))
1615 switch (r_type)
1616 {
1617 default:
1618 break;
1619
1620 case R_X86_64_DTPOFF64:
1621 case R_X86_64_TPOFF64:
1622 case R_X86_64_PC64:
1623 case R_X86_64_GOTOFF64:
1624 case R_X86_64_GOT64:
1625 case R_X86_64_GOTPCREL64:
1626 case R_X86_64_GOTPC64:
1627 case R_X86_64_GOTPLT64:
1628 case R_X86_64_PLTOFF64:
1629 {
1630 if (h)
1631 name = h->root.root.string;
1632 else
1633 name = bfd_elf_sym_name (abfd, symtab_hdr, isym,
1634 NULL);
1635 (*_bfd_error_handler)
1636 (_("%B: relocation %s against symbol `%s' isn't "
1637 "supported in x32 mode"), abfd,
1638 x86_64_elf_howto_table[r_type].name, name);
1639 bfd_set_error (bfd_error_bad_value);
1640 return FALSE;
1641 }
1642 break;
1643 }
1644
1645 if (h != NULL)
1646 {
1647 /* Create the ifunc sections for static executables. If we
1648 never see an indirect function symbol nor we are building
1649 a static executable, those sections will be empty and
1650 won't appear in output. */
1651 switch (r_type)
1652 {
1653 default:
1654 break;
1655
1656 case R_X86_64_PC32_BND:
1657 case R_X86_64_PLT32_BND:
1658 case R_X86_64_PC32:
1659 case R_X86_64_PLT32:
1660 case R_X86_64_32:
1661 case R_X86_64_64:
1662 /* MPX PLT is supported only if elf_x86_64_arch_bed
1663 is used in 64-bit mode. */
1664 if (ABI_64_P (abfd)
1665 && info->bndplt
1666 && (get_elf_x86_64_backend_data (abfd)
1667 == &elf_x86_64_arch_bed))
1668 {
1669 elf_x86_64_hash_entry (h)->has_bnd_reloc = TRUE;
1670
1671 /* Create the second PLT for Intel MPX support. */
1672 if (htab->plt_bnd == NULL)
1673 {
1674 unsigned int plt_bnd_align;
1675 const struct elf_backend_data *bed;
1676
1677 bed = get_elf_backend_data (info->output_bfd);
1678 BFD_ASSERT (sizeof (elf_x86_64_bnd_plt2_entry) == 8
1679 && (sizeof (elf_x86_64_bnd_plt2_entry)
1680 == sizeof (elf_x86_64_legacy_plt2_entry)));
1681 plt_bnd_align = 3;
1682
1683 if (htab->elf.dynobj == NULL)
1684 htab->elf.dynobj = abfd;
1685 htab->plt_bnd
1686 = bfd_make_section_anyway_with_flags (htab->elf.dynobj,
1687 ".plt.bnd",
1688 (bed->dynamic_sec_flags
1689 | SEC_ALLOC
1690 | SEC_CODE
1691 | SEC_LOAD
1692 | SEC_READONLY));
1693 if (htab->plt_bnd == NULL
1694 || !bfd_set_section_alignment (htab->elf.dynobj,
1695 htab->plt_bnd,
1696 plt_bnd_align))
1697 return FALSE;
1698 }
1699 }
1700
1701 case R_X86_64_32S:
1702 case R_X86_64_PC64:
1703 case R_X86_64_GOTPCREL:
1704 case R_X86_64_GOTPCREL64:
1705 if (htab->elf.dynobj == NULL)
1706 htab->elf.dynobj = abfd;
1707 if (!_bfd_elf_create_ifunc_sections (htab->elf.dynobj, info))
1708 return FALSE;
1709 break;
1710 }
1711
1712 /* It is referenced by a non-shared object. */
1713 h->ref_regular = 1;
1714 h->root.non_ir_ref = 1;
1715 }
1716
1717 if (! elf_x86_64_tls_transition (info, abfd, sec, NULL,
1718 symtab_hdr, sym_hashes,
1719 &r_type, GOT_UNKNOWN,
1720 rel, rel_end, h, r_symndx))
1721 return FALSE;
1722
1723 switch (r_type)
1724 {
1725 case R_X86_64_TLSLD:
1726 htab->tls_ld_got.refcount += 1;
1727 goto create_got;
1728
1729 case R_X86_64_TPOFF32:
1730 if (!info->executable && ABI_64_P (abfd))
1731 {
1732 if (h)
1733 name = h->root.root.string;
1734 else
1735 name = bfd_elf_sym_name (abfd, symtab_hdr, isym,
1736 NULL);
1737 (*_bfd_error_handler)
1738 (_("%B: relocation %s against `%s' can not be used when making a shared object; recompile with -fPIC"),
1739 abfd,
1740 x86_64_elf_howto_table[r_type].name, name);
1741 bfd_set_error (bfd_error_bad_value);
1742 return FALSE;
1743 }
1744 break;
1745
1746 case R_X86_64_GOTTPOFF:
1747 if (!info->executable)
1748 info->flags |= DF_STATIC_TLS;
1749 /* Fall through */
1750
1751 case R_X86_64_GOT32:
1752 case R_X86_64_GOTPCREL:
1753 case R_X86_64_TLSGD:
1754 case R_X86_64_GOT64:
1755 case R_X86_64_GOTPCREL64:
1756 case R_X86_64_GOTPLT64:
1757 case R_X86_64_GOTPC32_TLSDESC:
1758 case R_X86_64_TLSDESC_CALL:
1759 /* This symbol requires a global offset table entry. */
1760 {
1761 int tls_type, old_tls_type;
1762
1763 switch (r_type)
1764 {
1765 default: tls_type = GOT_NORMAL; break;
1766 case R_X86_64_TLSGD: tls_type = GOT_TLS_GD; break;
1767 case R_X86_64_GOTTPOFF: tls_type = GOT_TLS_IE; break;
1768 case R_X86_64_GOTPC32_TLSDESC:
1769 case R_X86_64_TLSDESC_CALL:
1770 tls_type = GOT_TLS_GDESC; break;
1771 }
1772
1773 if (h != NULL)
1774 {
1775 h->got.refcount += 1;
1776 old_tls_type = elf_x86_64_hash_entry (h)->tls_type;
1777 }
1778 else
1779 {
1780 bfd_signed_vma *local_got_refcounts;
1781
1782 /* This is a global offset table entry for a local symbol. */
1783 local_got_refcounts = elf_local_got_refcounts (abfd);
1784 if (local_got_refcounts == NULL)
1785 {
1786 bfd_size_type size;
1787
1788 size = symtab_hdr->sh_info;
1789 size *= sizeof (bfd_signed_vma)
1790 + sizeof (bfd_vma) + sizeof (char);
1791 local_got_refcounts = ((bfd_signed_vma *)
1792 bfd_zalloc (abfd, size));
1793 if (local_got_refcounts == NULL)
1794 return FALSE;
1795 elf_local_got_refcounts (abfd) = local_got_refcounts;
1796 elf_x86_64_local_tlsdesc_gotent (abfd)
1797 = (bfd_vma *) (local_got_refcounts + symtab_hdr->sh_info);
1798 elf_x86_64_local_got_tls_type (abfd)
1799 = (char *) (local_got_refcounts + 2 * symtab_hdr->sh_info);
1800 }
1801 local_got_refcounts[r_symndx] += 1;
1802 old_tls_type
1803 = elf_x86_64_local_got_tls_type (abfd) [r_symndx];
1804 }
1805
1806 /* If a TLS symbol is accessed using IE at least once,
1807 there is no point to use dynamic model for it. */
1808 if (old_tls_type != tls_type && old_tls_type != GOT_UNKNOWN
1809 && (! GOT_TLS_GD_ANY_P (old_tls_type)
1810 || tls_type != GOT_TLS_IE))
1811 {
1812 if (old_tls_type == GOT_TLS_IE && GOT_TLS_GD_ANY_P (tls_type))
1813 tls_type = old_tls_type;
1814 else if (GOT_TLS_GD_ANY_P (old_tls_type)
1815 && GOT_TLS_GD_ANY_P (tls_type))
1816 tls_type |= old_tls_type;
1817 else
1818 {
1819 if (h)
1820 name = h->root.root.string;
1821 else
1822 name = bfd_elf_sym_name (abfd, symtab_hdr,
1823 isym, NULL);
1824 (*_bfd_error_handler)
1825 (_("%B: '%s' accessed both as normal and thread local symbol"),
1826 abfd, name);
1827 bfd_set_error (bfd_error_bad_value);
1828 return FALSE;
1829 }
1830 }
1831
1832 if (old_tls_type != tls_type)
1833 {
1834 if (h != NULL)
1835 elf_x86_64_hash_entry (h)->tls_type = tls_type;
1836 else
1837 elf_x86_64_local_got_tls_type (abfd) [r_symndx] = tls_type;
1838 }
1839 }
1840 /* Fall through */
1841
1842 case R_X86_64_GOTOFF64:
1843 case R_X86_64_GOTPC32:
1844 case R_X86_64_GOTPC64:
1845 create_got:
1846 if (htab->elf.sgot == NULL)
1847 {
1848 if (htab->elf.dynobj == NULL)
1849 htab->elf.dynobj = abfd;
1850 if (!_bfd_elf_create_got_section (htab->elf.dynobj,
1851 info))
1852 return FALSE;
1853 }
1854 break;
1855
1856 case R_X86_64_PLT32:
1857 case R_X86_64_PLT32_BND:
1858 /* This symbol requires a procedure linkage table entry. We
1859 actually build the entry in adjust_dynamic_symbol,
1860 because this might be a case of linking PIC code which is
1861 never referenced by a dynamic object, in which case we
1862 don't need to generate a procedure linkage table entry
1863 after all. */
1864
1865 /* If this is a local symbol, we resolve it directly without
1866 creating a procedure linkage table entry. */
1867 if (h == NULL)
1868 continue;
1869
1870 h->needs_plt = 1;
1871 h->plt.refcount += 1;
1872 break;
1873
1874 case R_X86_64_PLTOFF64:
1875 /* This tries to form the 'address' of a function relative
1876 to GOT. For global symbols we need a PLT entry. */
1877 if (h != NULL)
1878 {
1879 h->needs_plt = 1;
1880 h->plt.refcount += 1;
1881 }
1882 goto create_got;
1883
1884 case R_X86_64_SIZE32:
1885 case R_X86_64_SIZE64:
1886 size_reloc = TRUE;
1887 goto do_size;
1888
1889 case R_X86_64_32:
1890 if (!ABI_64_P (abfd))
1891 goto pointer;
1892 case R_X86_64_8:
1893 case R_X86_64_16:
1894 case R_X86_64_32S:
1895 /* Let's help debug shared library creation. These relocs
1896 cannot be used in shared libs. Don't error out for
1897 sections we don't care about, such as debug sections or
1898 non-constant sections. */
1899 if (info->shared
1900 && (sec->flags & SEC_ALLOC) != 0
1901 && (sec->flags & SEC_READONLY) != 0)
1902 {
1903 if (h)
1904 name = h->root.root.string;
1905 else
1906 name = bfd_elf_sym_name (abfd, symtab_hdr, isym, NULL);
1907 (*_bfd_error_handler)
1908 (_("%B: relocation %s against `%s' can not be used when making a shared object; recompile with -fPIC"),
1909 abfd, x86_64_elf_howto_table[r_type].name, name);
1910 bfd_set_error (bfd_error_bad_value);
1911 return FALSE;
1912 }
1913 /* Fall through. */
1914
1915 case R_X86_64_PC8:
1916 case R_X86_64_PC16:
1917 case R_X86_64_PC32:
1918 case R_X86_64_PC32_BND:
1919 case R_X86_64_PC64:
1920 case R_X86_64_64:
1921 pointer:
1922 if (h != NULL && info->executable)
1923 {
1924 /* If this reloc is in a read-only section, we might
1925 need a copy reloc. We can't check reliably at this
1926 stage whether the section is read-only, as input
1927 sections have not yet been mapped to output sections.
1928 Tentatively set the flag for now, and correct in
1929 adjust_dynamic_symbol. */
1930 h->non_got_ref = 1;
1931
1932 /* We may need a .plt entry if the function this reloc
1933 refers to is in a shared lib. */
1934 h->plt.refcount += 1;
1935 if (r_type != R_X86_64_PC32
1936 && r_type != R_X86_64_PC32_BND
1937 && r_type != R_X86_64_PC64)
1938 h->pointer_equality_needed = 1;
1939 }
1940
1941 size_reloc = FALSE;
1942 do_size:
1943 /* If we are creating a shared library, and this is a reloc
1944 against a global symbol, or a non PC relative reloc
1945 against a local symbol, then we need to copy the reloc
1946 into the shared library. However, if we are linking with
1947 -Bsymbolic, we do not need to copy a reloc against a
1948 global symbol which is defined in an object we are
1949 including in the link (i.e., DEF_REGULAR is set). At
1950 this point we have not seen all the input files, so it is
1951 possible that DEF_REGULAR is not set now but will be set
1952 later (it is never cleared). In case of a weak definition,
1953 DEF_REGULAR may be cleared later by a strong definition in
1954 a shared library. We account for that possibility below by
1955 storing information in the relocs_copied field of the hash
1956 table entry. A similar situation occurs when creating
1957 shared libraries and symbol visibility changes render the
1958 symbol local. We allow copy relocs for non-GOT pc-relative
1959 relocation.
1960
1961 If on the other hand, we are creating an executable, we
1962 may need to keep relocations for symbols satisfied by a
1963 dynamic library if we manage to avoid copy relocs for the
1964 symbol. */
1965 if ((info->shared
1966 && (sec->flags & SEC_ALLOC) != 0
1967 && (! IS_X86_64_PCREL_TYPE (r_type)
1968 || (h != NULL
1969 && !h->non_got_ref
1970 && (! SYMBOLIC_BIND (info, h)
1971 || h->root.type == bfd_link_hash_defweak
1972 || !h->def_regular))))
1973 || (ELIMINATE_COPY_RELOCS
1974 && !info->shared
1975 && (sec->flags & SEC_ALLOC) != 0
1976 && h != NULL
1977 && (h->root.type == bfd_link_hash_defweak
1978 || !h->def_regular)))
1979 {
1980 struct elf_dyn_relocs *p;
1981 struct elf_dyn_relocs **head;
1982
1983 /* We must copy these reloc types into the output file.
1984 Create a reloc section in dynobj and make room for
1985 this reloc. */
1986 if (sreloc == NULL)
1987 {
1988 if (htab->elf.dynobj == NULL)
1989 htab->elf.dynobj = abfd;
1990
1991 sreloc = _bfd_elf_make_dynamic_reloc_section
1992 (sec, htab->elf.dynobj, ABI_64_P (abfd) ? 3 : 2,
1993 abfd, /*rela?*/ TRUE);
1994
1995 if (sreloc == NULL)
1996 return FALSE;
1997 }
1998
1999 /* If this is a global symbol, we count the number of
2000 relocations we need for this symbol. */
2001 if (h != NULL)
2002 {
2003 head = &((struct elf_x86_64_link_hash_entry *) h)->dyn_relocs;
2004 }
2005 else
2006 {
2007 /* Track dynamic relocs needed for local syms too.
2008 We really need local syms available to do this
2009 easily. Oh well. */
2010 asection *s;
2011 void **vpp;
2012
2013 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
2014 abfd, r_symndx);
2015 if (isym == NULL)
2016 return FALSE;
2017
2018 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
2019 if (s == NULL)
2020 s = sec;
2021
2022 /* Beware of type punned pointers vs strict aliasing
2023 rules. */
2024 vpp = &(elf_section_data (s)->local_dynrel);
2025 head = (struct elf_dyn_relocs **)vpp;
2026 }
2027
2028 p = *head;
2029 if (p == NULL || p->sec != sec)
2030 {
2031 bfd_size_type amt = sizeof *p;
2032
2033 p = ((struct elf_dyn_relocs *)
2034 bfd_alloc (htab->elf.dynobj, amt));
2035 if (p == NULL)
2036 return FALSE;
2037 p->next = *head;
2038 *head = p;
2039 p->sec = sec;
2040 p->count = 0;
2041 p->pc_count = 0;
2042 }
2043
2044 p->count += 1;
2045 /* Count size relocation as PC-relative relocation. */
2046 if (IS_X86_64_PCREL_TYPE (r_type) || size_reloc)
2047 p->pc_count += 1;
2048 }
2049 break;
2050
2051 /* This relocation describes the C++ object vtable hierarchy.
2052 Reconstruct it for later use during GC. */
2053 case R_X86_64_GNU_VTINHERIT:
2054 if (!bfd_elf_gc_record_vtinherit (abfd, sec, h, rel->r_offset))
2055 return FALSE;
2056 break;
2057
2058 /* This relocation describes which C++ vtable entries are actually
2059 used. Record for later use during GC. */
2060 case R_X86_64_GNU_VTENTRY:
2061 BFD_ASSERT (h != NULL);
2062 if (h != NULL
2063 && !bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_addend))
2064 return FALSE;
2065 break;
2066
2067 default:
2068 break;
2069 }
2070
2071 if (use_plt_got
2072 && h != NULL
2073 && h->plt.refcount > 0
2074 && h->got.refcount > 0
2075 && htab->plt_got == NULL)
2076 {
2077 /* Create the GOT procedure linkage table. */
2078 unsigned int plt_got_align;
2079 const struct elf_backend_data *bed;
2080
2081 bed = get_elf_backend_data (info->output_bfd);
2082 BFD_ASSERT (sizeof (elf_x86_64_legacy_plt2_entry) == 8
2083 && (sizeof (elf_x86_64_bnd_plt2_entry)
2084 == sizeof (elf_x86_64_legacy_plt2_entry)));
2085 plt_got_align = 3;
2086
2087 if (htab->elf.dynobj == NULL)
2088 htab->elf.dynobj = abfd;
2089 htab->plt_got
2090 = bfd_make_section_anyway_with_flags (htab->elf.dynobj,
2091 ".plt.got",
2092 (bed->dynamic_sec_flags
2093 | SEC_ALLOC
2094 | SEC_CODE
2095 | SEC_LOAD
2096 | SEC_READONLY));
2097 if (htab->plt_got == NULL
2098 || !bfd_set_section_alignment (htab->elf.dynobj,
2099 htab->plt_got,
2100 plt_got_align))
2101 return FALSE;
2102 }
2103 }
2104
2105 return TRUE;
2106 }
2107
2108 /* Return the section that should be marked against GC for a given
2109 relocation. */
2110
2111 static asection *
2112 elf_x86_64_gc_mark_hook (asection *sec,
2113 struct bfd_link_info *info,
2114 Elf_Internal_Rela *rel,
2115 struct elf_link_hash_entry *h,
2116 Elf_Internal_Sym *sym)
2117 {
2118 if (h != NULL)
2119 switch (ELF32_R_TYPE (rel->r_info))
2120 {
2121 case R_X86_64_GNU_VTINHERIT:
2122 case R_X86_64_GNU_VTENTRY:
2123 return NULL;
2124 }
2125
2126 return _bfd_elf_gc_mark_hook (sec, info, rel, h, sym);
2127 }
2128
2129 /* Update the got entry reference counts for the section being removed. */
2130
2131 static bfd_boolean
2132 elf_x86_64_gc_sweep_hook (bfd *abfd, struct bfd_link_info *info,
2133 asection *sec,
2134 const Elf_Internal_Rela *relocs)
2135 {
2136 struct elf_x86_64_link_hash_table *htab;
2137 Elf_Internal_Shdr *symtab_hdr;
2138 struct elf_link_hash_entry **sym_hashes;
2139 bfd_signed_vma *local_got_refcounts;
2140 const Elf_Internal_Rela *rel, *relend;
2141
2142 if (info->relocatable)
2143 return TRUE;
2144
2145 htab = elf_x86_64_hash_table (info);
2146 if (htab == NULL)
2147 return FALSE;
2148
2149 elf_section_data (sec)->local_dynrel = NULL;
2150
2151 symtab_hdr = &elf_symtab_hdr (abfd);
2152 sym_hashes = elf_sym_hashes (abfd);
2153 local_got_refcounts = elf_local_got_refcounts (abfd);
2154
2155 htab = elf_x86_64_hash_table (info);
2156 relend = relocs + sec->reloc_count;
2157 for (rel = relocs; rel < relend; rel++)
2158 {
2159 unsigned long r_symndx;
2160 unsigned int r_type;
2161 struct elf_link_hash_entry *h = NULL;
2162
2163 r_symndx = htab->r_sym (rel->r_info);
2164 if (r_symndx >= symtab_hdr->sh_info)
2165 {
2166 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
2167 while (h->root.type == bfd_link_hash_indirect
2168 || h->root.type == bfd_link_hash_warning)
2169 h = (struct elf_link_hash_entry *) h->root.u.i.link;
2170 }
2171 else
2172 {
2173 /* A local symbol. */
2174 Elf_Internal_Sym *isym;
2175
2176 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
2177 abfd, r_symndx);
2178
2179 /* Check relocation against local STT_GNU_IFUNC symbol. */
2180 if (isym != NULL
2181 && ELF_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
2182 {
2183 h = elf_x86_64_get_local_sym_hash (htab, abfd, rel, FALSE);
2184 if (h == NULL)
2185 abort ();
2186 }
2187 }
2188
2189 if (h)
2190 {
2191 struct elf_x86_64_link_hash_entry *eh;
2192 struct elf_dyn_relocs **pp;
2193 struct elf_dyn_relocs *p;
2194
2195 eh = (struct elf_x86_64_link_hash_entry *) h;
2196
2197 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; pp = &p->next)
2198 if (p->sec == sec)
2199 {
2200 /* Everything must go for SEC. */
2201 *pp = p->next;
2202 break;
2203 }
2204 }
2205
2206 r_type = ELF32_R_TYPE (rel->r_info);
2207 if (! elf_x86_64_tls_transition (info, abfd, sec, NULL,
2208 symtab_hdr, sym_hashes,
2209 &r_type, GOT_UNKNOWN,
2210 rel, relend, h, r_symndx))
2211 return FALSE;
2212
2213 switch (r_type)
2214 {
2215 case R_X86_64_TLSLD:
2216 if (htab->tls_ld_got.refcount > 0)
2217 htab->tls_ld_got.refcount -= 1;
2218 break;
2219
2220 case R_X86_64_TLSGD:
2221 case R_X86_64_GOTPC32_TLSDESC:
2222 case R_X86_64_TLSDESC_CALL:
2223 case R_X86_64_GOTTPOFF:
2224 case R_X86_64_GOT32:
2225 case R_X86_64_GOTPCREL:
2226 case R_X86_64_GOT64:
2227 case R_X86_64_GOTPCREL64:
2228 case R_X86_64_GOTPLT64:
2229 if (h != NULL)
2230 {
2231 if (h->got.refcount > 0)
2232 h->got.refcount -= 1;
2233 if (h->type == STT_GNU_IFUNC)
2234 {
2235 if (h->plt.refcount > 0)
2236 h->plt.refcount -= 1;
2237 }
2238 }
2239 else if (local_got_refcounts != NULL)
2240 {
2241 if (local_got_refcounts[r_symndx] > 0)
2242 local_got_refcounts[r_symndx] -= 1;
2243 }
2244 break;
2245
2246 case R_X86_64_8:
2247 case R_X86_64_16:
2248 case R_X86_64_32:
2249 case R_X86_64_64:
2250 case R_X86_64_32S:
2251 case R_X86_64_PC8:
2252 case R_X86_64_PC16:
2253 case R_X86_64_PC32:
2254 case R_X86_64_PC32_BND:
2255 case R_X86_64_PC64:
2256 case R_X86_64_SIZE32:
2257 case R_X86_64_SIZE64:
2258 if (info->shared
2259 && (h == NULL || h->type != STT_GNU_IFUNC))
2260 break;
2261 /* Fall thru */
2262
2263 case R_X86_64_PLT32:
2264 case R_X86_64_PLT32_BND:
2265 case R_X86_64_PLTOFF64:
2266 if (h != NULL)
2267 {
2268 if (h->plt.refcount > 0)
2269 h->plt.refcount -= 1;
2270 }
2271 break;
2272
2273 default:
2274 break;
2275 }
2276 }
2277
2278 return TRUE;
2279 }
2280
2281 /* Adjust a symbol defined by a dynamic object and referenced by a
2282 regular object. The current definition is in some section of the
2283 dynamic object, but we're not including those sections. We have to
2284 change the definition to something the rest of the link can
2285 understand. */
2286
2287 static bfd_boolean
2288 elf_x86_64_adjust_dynamic_symbol (struct bfd_link_info *info,
2289 struct elf_link_hash_entry *h)
2290 {
2291 struct elf_x86_64_link_hash_table *htab;
2292 asection *s;
2293 struct elf_x86_64_link_hash_entry *eh;
2294 struct elf_dyn_relocs *p;
2295
2296 /* STT_GNU_IFUNC symbol must go through PLT. */
2297 if (h->type == STT_GNU_IFUNC)
2298 {
2299 /* All local STT_GNU_IFUNC references must be treate as local
2300 calls via local PLT. */
2301 if (h->ref_regular
2302 && SYMBOL_CALLS_LOCAL (info, h))
2303 {
2304 bfd_size_type pc_count = 0, count = 0;
2305 struct elf_dyn_relocs **pp;
2306
2307 eh = (struct elf_x86_64_link_hash_entry *) h;
2308 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; )
2309 {
2310 pc_count += p->pc_count;
2311 p->count -= p->pc_count;
2312 p->pc_count = 0;
2313 count += p->count;
2314 if (p->count == 0)
2315 *pp = p->next;
2316 else
2317 pp = &p->next;
2318 }
2319
2320 if (pc_count || count)
2321 {
2322 h->needs_plt = 1;
2323 h->non_got_ref = 1;
2324 if (h->plt.refcount <= 0)
2325 h->plt.refcount = 1;
2326 else
2327 h->plt.refcount += 1;
2328 }
2329 }
2330
2331 if (h->plt.refcount <= 0)
2332 {
2333 h->plt.offset = (bfd_vma) -1;
2334 h->needs_plt = 0;
2335 }
2336 return TRUE;
2337 }
2338
2339 /* If this is a function, put it in the procedure linkage table. We
2340 will fill in the contents of the procedure linkage table later,
2341 when we know the address of the .got section. */
2342 if (h->type == STT_FUNC
2343 || h->needs_plt)
2344 {
2345 if (h->plt.refcount <= 0
2346 || SYMBOL_CALLS_LOCAL (info, h)
2347 || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
2348 && h->root.type == bfd_link_hash_undefweak))
2349 {
2350 /* This case can occur if we saw a PLT32 reloc in an input
2351 file, but the symbol was never referred to by a dynamic
2352 object, or if all references were garbage collected. In
2353 such a case, we don't actually need to build a procedure
2354 linkage table, and we can just do a PC32 reloc instead. */
2355 h->plt.offset = (bfd_vma) -1;
2356 h->needs_plt = 0;
2357 }
2358
2359 return TRUE;
2360 }
2361 else
2362 /* It's possible that we incorrectly decided a .plt reloc was
2363 needed for an R_X86_64_PC32 reloc to a non-function sym in
2364 check_relocs. We can't decide accurately between function and
2365 non-function syms in check-relocs; Objects loaded later in
2366 the link may change h->type. So fix it now. */
2367 h->plt.offset = (bfd_vma) -1;
2368
2369 /* If this is a weak symbol, and there is a real definition, the
2370 processor independent code will have arranged for us to see the
2371 real definition first, and we can just use the same value. */
2372 if (h->u.weakdef != NULL)
2373 {
2374 BFD_ASSERT (h->u.weakdef->root.type == bfd_link_hash_defined
2375 || h->u.weakdef->root.type == bfd_link_hash_defweak);
2376 h->root.u.def.section = h->u.weakdef->root.u.def.section;
2377 h->root.u.def.value = h->u.weakdef->root.u.def.value;
2378 if (ELIMINATE_COPY_RELOCS || info->nocopyreloc)
2379 h->non_got_ref = h->u.weakdef->non_got_ref;
2380 return TRUE;
2381 }
2382
2383 /* This is a reference to a symbol defined by a dynamic object which
2384 is not a function. */
2385
2386 /* If we are creating a shared library, we must presume that the
2387 only references to the symbol are via the global offset table.
2388 For such cases we need not do anything here; the relocations will
2389 be handled correctly by relocate_section. */
2390 if (!info->executable)
2391 return TRUE;
2392
2393 /* If there are no references to this symbol that do not use the
2394 GOT, we don't need to generate a copy reloc. */
2395 if (!h->non_got_ref)
2396 return TRUE;
2397
2398 /* If -z nocopyreloc was given, we won't generate them either. */
2399 if (info->nocopyreloc)
2400 {
2401 h->non_got_ref = 0;
2402 return TRUE;
2403 }
2404
2405 if (ELIMINATE_COPY_RELOCS && !info->shared)
2406 {
2407 eh = (struct elf_x86_64_link_hash_entry *) h;
2408 for (p = eh->dyn_relocs; p != NULL; p = p->next)
2409 {
2410 s = p->sec->output_section;
2411 if (s != NULL && (s->flags & SEC_READONLY) != 0)
2412 break;
2413 }
2414
2415 /* If we didn't find any dynamic relocs in read-only sections, then
2416 we'll be keeping the dynamic relocs and avoiding the copy reloc. */
2417 if (p == NULL)
2418 {
2419 h->non_got_ref = 0;
2420 return TRUE;
2421 }
2422 }
2423
2424 /* We must allocate the symbol in our .dynbss section, which will
2425 become part of the .bss section of the executable. There will be
2426 an entry for this symbol in the .dynsym section. The dynamic
2427 object will contain position independent code, so all references
2428 from the dynamic object to this symbol will go through the global
2429 offset table. The dynamic linker will use the .dynsym entry to
2430 determine the address it must put in the global offset table, so
2431 both the dynamic object and the regular object will refer to the
2432 same memory location for the variable. */
2433
2434 htab = elf_x86_64_hash_table (info);
2435 if (htab == NULL)
2436 return FALSE;
2437
2438 /* We must generate a R_X86_64_COPY reloc to tell the dynamic linker
2439 to copy the initial value out of the dynamic object and into the
2440 runtime process image. */
2441 if ((h->root.u.def.section->flags & SEC_ALLOC) != 0 && h->size != 0)
2442 {
2443 const struct elf_backend_data *bed;
2444 bed = get_elf_backend_data (info->output_bfd);
2445 htab->srelbss->size += bed->s->sizeof_rela;
2446 h->needs_copy = 1;
2447 }
2448
2449 s = htab->sdynbss;
2450
2451 return _bfd_elf_adjust_dynamic_copy (h, s);
2452 }
2453
2454 /* Allocate space in .plt, .got and associated reloc sections for
2455 dynamic relocs. */
2456
2457 static bfd_boolean
2458 elf_x86_64_allocate_dynrelocs (struct elf_link_hash_entry *h, void * inf)
2459 {
2460 struct bfd_link_info *info;
2461 struct elf_x86_64_link_hash_table *htab;
2462 struct elf_x86_64_link_hash_entry *eh;
2463 struct elf_dyn_relocs *p;
2464 const struct elf_backend_data *bed;
2465 unsigned int plt_entry_size;
2466
2467 if (h->root.type == bfd_link_hash_indirect)
2468 return TRUE;
2469
2470 eh = (struct elf_x86_64_link_hash_entry *) h;
2471
2472 info = (struct bfd_link_info *) inf;
2473 htab = elf_x86_64_hash_table (info);
2474 if (htab == NULL)
2475 return FALSE;
2476 bed = get_elf_backend_data (info->output_bfd);
2477 plt_entry_size = GET_PLT_ENTRY_SIZE (info->output_bfd);
2478
2479 /* We can't use the GOT PLT if pointer equality is needed since
2480 finish_dynamic_symbol won't clear symbol value and the dynamic
2481 linker won't update the GOT slot. We will get into an infinite
2482 loop at run-time. */
2483 if (htab->plt_got != NULL
2484 && h->type != STT_GNU_IFUNC
2485 && !h->pointer_equality_needed
2486 && h->plt.refcount > 0
2487 && h->got.refcount > 0)
2488 {
2489 /* Don't use the regular PLT if there are both GOT and GOTPLT
2490 reloctions. */
2491 h->plt.offset = (bfd_vma) -1;
2492
2493 /* Use the GOT PLT. */
2494 eh->plt_got.refcount = 1;
2495 }
2496
2497 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle it
2498 here if it is defined and referenced in a non-shared object. */
2499 if (h->type == STT_GNU_IFUNC
2500 && h->def_regular)
2501 {
2502 if (_bfd_elf_allocate_ifunc_dyn_relocs (info, h,
2503 &eh->dyn_relocs,
2504 plt_entry_size,
2505 plt_entry_size,
2506 GOT_ENTRY_SIZE))
2507 {
2508 asection *s = htab->plt_bnd;
2509 if (h->plt.offset != (bfd_vma) -1 && s != NULL)
2510 {
2511 /* Use the .plt.bnd section if it is created. */
2512 eh->plt_bnd.offset = s->size;
2513
2514 /* Make room for this entry in the .plt.bnd section. */
2515 s->size += sizeof (elf_x86_64_legacy_plt2_entry);
2516 }
2517
2518 return TRUE;
2519 }
2520 else
2521 return FALSE;
2522 }
2523 else if (htab->elf.dynamic_sections_created
2524 && (h->plt.refcount > 0 || eh->plt_got.refcount > 0))
2525 {
2526 bfd_boolean use_plt_got = eh->plt_got.refcount > 0;
2527
2528 /* Make sure this symbol is output as a dynamic symbol.
2529 Undefined weak syms won't yet be marked as dynamic. */
2530 if (h->dynindx == -1
2531 && !h->forced_local)
2532 {
2533 if (! bfd_elf_link_record_dynamic_symbol (info, h))
2534 return FALSE;
2535 }
2536
2537 if (info->shared
2538 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h))
2539 {
2540 asection *s = htab->elf.splt;
2541 asection *bnd_s = htab->plt_bnd;
2542 asection *got_s = htab->plt_got;
2543
2544 /* If this is the first .plt entry, make room for the special
2545 first entry. */
2546 if (s->size == 0)
2547 s->size = plt_entry_size;
2548
2549 if (use_plt_got)
2550 eh->plt_got.offset = got_s->size;
2551 else
2552 {
2553 h->plt.offset = s->size;
2554 if (bnd_s)
2555 eh->plt_bnd.offset = bnd_s->size;
2556 }
2557
2558 /* If this symbol is not defined in a regular file, and we are
2559 not generating a shared library, then set the symbol to this
2560 location in the .plt. This is required to make function
2561 pointers compare as equal between the normal executable and
2562 the shared library. */
2563 if (! info->shared
2564 && !h->def_regular)
2565 {
2566 if (use_plt_got)
2567 {
2568 /* We need to make a call to the entry of the GOT PLT
2569 instead of regular PLT entry. */
2570 h->root.u.def.section = got_s;
2571 h->root.u.def.value = eh->plt_got.offset;
2572 }
2573 else
2574 {
2575 if (bnd_s)
2576 {
2577 /* We need to make a call to the entry of the second
2578 PLT instead of regular PLT entry. */
2579 h->root.u.def.section = bnd_s;
2580 h->root.u.def.value = eh->plt_bnd.offset;
2581 }
2582 else
2583 {
2584 h->root.u.def.section = s;
2585 h->root.u.def.value = h->plt.offset;
2586 }
2587 }
2588 }
2589
2590 /* Make room for this entry. */
2591 if (use_plt_got)
2592 got_s->size += sizeof (elf_x86_64_legacy_plt2_entry);
2593 else
2594 {
2595 s->size += plt_entry_size;
2596 if (bnd_s)
2597 bnd_s->size += sizeof (elf_x86_64_legacy_plt2_entry);
2598
2599 /* We also need to make an entry in the .got.plt section,
2600 which will be placed in the .got section by the linker
2601 script. */
2602 htab->elf.sgotplt->size += GOT_ENTRY_SIZE;
2603
2604 /* We also need to make an entry in the .rela.plt
2605 section. */
2606 htab->elf.srelplt->size += bed->s->sizeof_rela;
2607 htab->elf.srelplt->reloc_count++;
2608 }
2609 }
2610 else
2611 {
2612 h->plt.offset = (bfd_vma) -1;
2613 h->needs_plt = 0;
2614 }
2615 }
2616 else
2617 {
2618 h->plt.offset = (bfd_vma) -1;
2619 h->needs_plt = 0;
2620 }
2621
2622 eh->tlsdesc_got = (bfd_vma) -1;
2623
2624 /* If R_X86_64_GOTTPOFF symbol is now local to the binary,
2625 make it a R_X86_64_TPOFF32 requiring no GOT entry. */
2626 if (h->got.refcount > 0
2627 && info->executable
2628 && h->dynindx == -1
2629 && elf_x86_64_hash_entry (h)->tls_type == GOT_TLS_IE)
2630 {
2631 h->got.offset = (bfd_vma) -1;
2632 }
2633 else if (h->got.refcount > 0)
2634 {
2635 asection *s;
2636 bfd_boolean dyn;
2637 int tls_type = elf_x86_64_hash_entry (h)->tls_type;
2638
2639 /* Make sure this symbol is output as a dynamic symbol.
2640 Undefined weak syms won't yet be marked as dynamic. */
2641 if (h->dynindx == -1
2642 && !h->forced_local)
2643 {
2644 if (! bfd_elf_link_record_dynamic_symbol (info, h))
2645 return FALSE;
2646 }
2647
2648 if (GOT_TLS_GDESC_P (tls_type))
2649 {
2650 eh->tlsdesc_got = htab->elf.sgotplt->size
2651 - elf_x86_64_compute_jump_table_size (htab);
2652 htab->elf.sgotplt->size += 2 * GOT_ENTRY_SIZE;
2653 h->got.offset = (bfd_vma) -2;
2654 }
2655 if (! GOT_TLS_GDESC_P (tls_type)
2656 || GOT_TLS_GD_P (tls_type))
2657 {
2658 s = htab->elf.sgot;
2659 h->got.offset = s->size;
2660 s->size += GOT_ENTRY_SIZE;
2661 if (GOT_TLS_GD_P (tls_type))
2662 s->size += GOT_ENTRY_SIZE;
2663 }
2664 dyn = htab->elf.dynamic_sections_created;
2665 /* R_X86_64_TLSGD needs one dynamic relocation if local symbol
2666 and two if global.
2667 R_X86_64_GOTTPOFF needs one dynamic relocation. */
2668 if ((GOT_TLS_GD_P (tls_type) && h->dynindx == -1)
2669 || tls_type == GOT_TLS_IE)
2670 htab->elf.srelgot->size += bed->s->sizeof_rela;
2671 else if (GOT_TLS_GD_P (tls_type))
2672 htab->elf.srelgot->size += 2 * bed->s->sizeof_rela;
2673 else if (! GOT_TLS_GDESC_P (tls_type)
2674 && (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
2675 || h->root.type != bfd_link_hash_undefweak)
2676 && (info->shared
2677 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h)))
2678 htab->elf.srelgot->size += bed->s->sizeof_rela;
2679 if (GOT_TLS_GDESC_P (tls_type))
2680 {
2681 htab->elf.srelplt->size += bed->s->sizeof_rela;
2682 htab->tlsdesc_plt = (bfd_vma) -1;
2683 }
2684 }
2685 else
2686 h->got.offset = (bfd_vma) -1;
2687
2688 if (eh->dyn_relocs == NULL)
2689 return TRUE;
2690
2691 /* In the shared -Bsymbolic case, discard space allocated for
2692 dynamic pc-relative relocs against symbols which turn out to be
2693 defined in regular objects. For the normal shared case, discard
2694 space for pc-relative relocs that have become local due to symbol
2695 visibility changes. */
2696
2697 if (info->shared)
2698 {
2699 /* Relocs that use pc_count are those that appear on a call
2700 insn, or certain REL relocs that can generated via assembly.
2701 We want calls to protected symbols to resolve directly to the
2702 function rather than going via the plt. If people want
2703 function pointer comparisons to work as expected then they
2704 should avoid writing weird assembly. */
2705 if (SYMBOL_CALLS_LOCAL (info, h))
2706 {
2707 struct elf_dyn_relocs **pp;
2708
2709 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; )
2710 {
2711 p->count -= p->pc_count;
2712 p->pc_count = 0;
2713 if (p->count == 0)
2714 *pp = p->next;
2715 else
2716 pp = &p->next;
2717 }
2718 }
2719
2720 /* Also discard relocs on undefined weak syms with non-default
2721 visibility. */
2722 if (eh->dyn_relocs != NULL
2723 && h->root.type == bfd_link_hash_undefweak)
2724 {
2725 if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
2726 eh->dyn_relocs = NULL;
2727
2728 /* Make sure undefined weak symbols are output as a dynamic
2729 symbol in PIEs. */
2730 else if (h->dynindx == -1
2731 && ! h->forced_local
2732 && ! bfd_elf_link_record_dynamic_symbol (info, h))
2733 return FALSE;
2734 }
2735
2736 }
2737 else if (ELIMINATE_COPY_RELOCS)
2738 {
2739 /* For the non-shared case, discard space for relocs against
2740 symbols which turn out to need copy relocs or are not
2741 dynamic. */
2742
2743 if (!h->non_got_ref
2744 && ((h->def_dynamic
2745 && !h->def_regular)
2746 || (htab->elf.dynamic_sections_created
2747 && (h->root.type == bfd_link_hash_undefweak
2748 || h->root.type == bfd_link_hash_undefined))))
2749 {
2750 /* Make sure this symbol is output as a dynamic symbol.
2751 Undefined weak syms won't yet be marked as dynamic. */
2752 if (h->dynindx == -1
2753 && ! h->forced_local
2754 && ! bfd_elf_link_record_dynamic_symbol (info, h))
2755 return FALSE;
2756
2757 /* If that succeeded, we know we'll be keeping all the
2758 relocs. */
2759 if (h->dynindx != -1)
2760 goto keep;
2761 }
2762
2763 eh->dyn_relocs = NULL;
2764
2765 keep: ;
2766 }
2767
2768 /* Finally, allocate space. */
2769 for (p = eh->dyn_relocs; p != NULL; p = p->next)
2770 {
2771 asection * sreloc;
2772
2773 sreloc = elf_section_data (p->sec)->sreloc;
2774
2775 BFD_ASSERT (sreloc != NULL);
2776
2777 sreloc->size += p->count * bed->s->sizeof_rela;
2778 }
2779
2780 return TRUE;
2781 }
2782
2783 /* Allocate space in .plt, .got and associated reloc sections for
2784 local dynamic relocs. */
2785
2786 static bfd_boolean
2787 elf_x86_64_allocate_local_dynrelocs (void **slot, void *inf)
2788 {
2789 struct elf_link_hash_entry *h
2790 = (struct elf_link_hash_entry *) *slot;
2791
2792 if (h->type != STT_GNU_IFUNC
2793 || !h->def_regular
2794 || !h->ref_regular
2795 || !h->forced_local
2796 || h->root.type != bfd_link_hash_defined)
2797 abort ();
2798
2799 return elf_x86_64_allocate_dynrelocs (h, inf);
2800 }
2801
2802 /* Find any dynamic relocs that apply to read-only sections. */
2803
2804 static bfd_boolean
2805 elf_x86_64_readonly_dynrelocs (struct elf_link_hash_entry *h,
2806 void * inf)
2807 {
2808 struct elf_x86_64_link_hash_entry *eh;
2809 struct elf_dyn_relocs *p;
2810
2811 /* Skip local IFUNC symbols. */
2812 if (h->forced_local && h->type == STT_GNU_IFUNC)
2813 return TRUE;
2814
2815 eh = (struct elf_x86_64_link_hash_entry *) h;
2816 for (p = eh->dyn_relocs; p != NULL; p = p->next)
2817 {
2818 asection *s = p->sec->output_section;
2819
2820 if (s != NULL && (s->flags & SEC_READONLY) != 0)
2821 {
2822 struct bfd_link_info *info = (struct bfd_link_info *) inf;
2823
2824 info->flags |= DF_TEXTREL;
2825
2826 if (info->warn_shared_textrel && info->shared)
2827 info->callbacks->einfo (_("%P: %B: warning: relocation against `%s' in readonly section `%A'.\n"),
2828 p->sec->owner, h->root.root.string,
2829 p->sec);
2830
2831 /* Not an error, just cut short the traversal. */
2832 return FALSE;
2833 }
2834 }
2835 return TRUE;
2836 }
2837
2838 /* Convert
2839 mov foo@GOTPCREL(%rip), %reg
2840 to
2841 lea foo(%rip), %reg
2842 with the local symbol, foo. */
2843
2844 static bfd_boolean
2845 elf_x86_64_convert_mov_to_lea (bfd *abfd, asection *sec,
2846 struct bfd_link_info *link_info)
2847 {
2848 Elf_Internal_Shdr *symtab_hdr;
2849 Elf_Internal_Rela *internal_relocs;
2850 Elf_Internal_Rela *irel, *irelend;
2851 bfd_byte *contents;
2852 struct elf_x86_64_link_hash_table *htab;
2853 bfd_boolean changed_contents;
2854 bfd_boolean changed_relocs;
2855 bfd_signed_vma *local_got_refcounts;
2856
2857 /* Don't even try to convert non-ELF outputs. */
2858 if (!is_elf_hash_table (link_info->hash))
2859 return FALSE;
2860
2861 /* Nothing to do if there are no codes, no relocations or no output. */
2862 if ((sec->flags & (SEC_CODE | SEC_RELOC)) != (SEC_CODE | SEC_RELOC)
2863 || sec->reloc_count == 0
2864 || bfd_is_abs_section (sec->output_section))
2865 return TRUE;
2866
2867 symtab_hdr = &elf_tdata (abfd)->symtab_hdr;
2868
2869 /* Load the relocations for this section. */
2870 internal_relocs = (_bfd_elf_link_read_relocs
2871 (abfd, sec, NULL, (Elf_Internal_Rela *) NULL,
2872 link_info->keep_memory));
2873 if (internal_relocs == NULL)
2874 return FALSE;
2875
2876 htab = elf_x86_64_hash_table (link_info);
2877 changed_contents = FALSE;
2878 changed_relocs = FALSE;
2879 local_got_refcounts = elf_local_got_refcounts (abfd);
2880
2881 /* Get the section contents. */
2882 if (elf_section_data (sec)->this_hdr.contents != NULL)
2883 contents = elf_section_data (sec)->this_hdr.contents;
2884 else
2885 {
2886 if (!bfd_malloc_and_get_section (abfd, sec, &contents))
2887 goto error_return;
2888 }
2889
2890 irelend = internal_relocs + sec->reloc_count;
2891 for (irel = internal_relocs; irel < irelend; irel++)
2892 {
2893 unsigned int r_type = ELF32_R_TYPE (irel->r_info);
2894 unsigned int r_symndx = htab->r_sym (irel->r_info);
2895 unsigned int indx;
2896 struct elf_link_hash_entry *h;
2897
2898 if (r_type != R_X86_64_GOTPCREL)
2899 continue;
2900
2901 /* Get the symbol referred to by the reloc. */
2902 if (r_symndx < symtab_hdr->sh_info)
2903 {
2904 Elf_Internal_Sym *isym;
2905
2906 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
2907 abfd, r_symndx);
2908
2909 /* STT_GNU_IFUNC must keep R_X86_64_GOTPCREL relocation. */
2910 if (ELF_ST_TYPE (isym->st_info) != STT_GNU_IFUNC
2911 && irel->r_offset >= 2
2912 && bfd_get_8 (input_bfd,
2913 contents + irel->r_offset - 2) == 0x8b)
2914 {
2915 bfd_put_8 (output_bfd, 0x8d,
2916 contents + irel->r_offset - 2);
2917 irel->r_info = htab->r_info (r_symndx, R_X86_64_PC32);
2918 if (local_got_refcounts != NULL
2919 && local_got_refcounts[r_symndx] > 0)
2920 local_got_refcounts[r_symndx] -= 1;
2921 changed_contents = TRUE;
2922 changed_relocs = TRUE;
2923 }
2924 continue;
2925 }
2926
2927 indx = r_symndx - symtab_hdr->sh_info;
2928 h = elf_sym_hashes (abfd)[indx];
2929 BFD_ASSERT (h != NULL);
2930
2931 while (h->root.type == bfd_link_hash_indirect
2932 || h->root.type == bfd_link_hash_warning)
2933 h = (struct elf_link_hash_entry *) h->root.u.i.link;
2934
2935 /* STT_GNU_IFUNC must keep R_X86_64_GOTPCREL relocation. We also
2936 avoid optimizing _DYNAMIC since ld.so may use its link-time
2937 address. */
2938 if (h->def_regular
2939 && h->type != STT_GNU_IFUNC
2940 && h != htab->elf.hdynamic
2941 && SYMBOL_REFERENCES_LOCAL (link_info, h)
2942 && irel->r_offset >= 2
2943 && bfd_get_8 (input_bfd,
2944 contents + irel->r_offset - 2) == 0x8b)
2945 {
2946 bfd_put_8 (output_bfd, 0x8d,
2947 contents + irel->r_offset - 2);
2948 irel->r_info = htab->r_info (r_symndx, R_X86_64_PC32);
2949 if (h->got.refcount > 0)
2950 h->got.refcount -= 1;
2951 changed_contents = TRUE;
2952 changed_relocs = TRUE;
2953 }
2954 }
2955
2956 if (contents != NULL
2957 && elf_section_data (sec)->this_hdr.contents != contents)
2958 {
2959 if (!changed_contents && !link_info->keep_memory)
2960 free (contents);
2961 else
2962 {
2963 /* Cache the section contents for elf_link_input_bfd. */
2964 elf_section_data (sec)->this_hdr.contents = contents;
2965 }
2966 }
2967
2968 if (elf_section_data (sec)->relocs != internal_relocs)
2969 {
2970 if (!changed_relocs)
2971 free (internal_relocs);
2972 else
2973 elf_section_data (sec)->relocs = internal_relocs;
2974 }
2975
2976 return TRUE;
2977
2978 error_return:
2979 if (contents != NULL
2980 && elf_section_data (sec)->this_hdr.contents != contents)
2981 free (contents);
2982 if (internal_relocs != NULL
2983 && elf_section_data (sec)->relocs != internal_relocs)
2984 free (internal_relocs);
2985 return FALSE;
2986 }
2987
2988 /* Set the sizes of the dynamic sections. */
2989
2990 static bfd_boolean
2991 elf_x86_64_size_dynamic_sections (bfd *output_bfd,
2992 struct bfd_link_info *info)
2993 {
2994 struct elf_x86_64_link_hash_table *htab;
2995 bfd *dynobj;
2996 asection *s;
2997 bfd_boolean relocs;
2998 bfd *ibfd;
2999 const struct elf_backend_data *bed;
3000
3001 htab = elf_x86_64_hash_table (info);
3002 if (htab == NULL)
3003 return FALSE;
3004 bed = get_elf_backend_data (output_bfd);
3005
3006 dynobj = htab->elf.dynobj;
3007 if (dynobj == NULL)
3008 abort ();
3009
3010 if (htab->elf.dynamic_sections_created)
3011 {
3012 /* Set the contents of the .interp section to the interpreter. */
3013 if (info->executable)
3014 {
3015 s = bfd_get_linker_section (dynobj, ".interp");
3016 if (s == NULL)
3017 abort ();
3018 s->size = htab->dynamic_interpreter_size;
3019 s->contents = (unsigned char *) htab->dynamic_interpreter;
3020 }
3021 }
3022
3023 /* Set up .got offsets for local syms, and space for local dynamic
3024 relocs. */
3025 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
3026 {
3027 bfd_signed_vma *local_got;
3028 bfd_signed_vma *end_local_got;
3029 char *local_tls_type;
3030 bfd_vma *local_tlsdesc_gotent;
3031 bfd_size_type locsymcount;
3032 Elf_Internal_Shdr *symtab_hdr;
3033 asection *srel;
3034
3035 if (! is_x86_64_elf (ibfd))
3036 continue;
3037
3038 for (s = ibfd->sections; s != NULL; s = s->next)
3039 {
3040 struct elf_dyn_relocs *p;
3041
3042 if (!elf_x86_64_convert_mov_to_lea (ibfd, s, info))
3043 return FALSE;
3044
3045 for (p = (struct elf_dyn_relocs *)
3046 (elf_section_data (s)->local_dynrel);
3047 p != NULL;
3048 p = p->next)
3049 {
3050 if (!bfd_is_abs_section (p->sec)
3051 && bfd_is_abs_section (p->sec->output_section))
3052 {
3053 /* Input section has been discarded, either because
3054 it is a copy of a linkonce section or due to
3055 linker script /DISCARD/, so we'll be discarding
3056 the relocs too. */
3057 }
3058 else if (p->count != 0)
3059 {
3060 srel = elf_section_data (p->sec)->sreloc;
3061 srel->size += p->count * bed->s->sizeof_rela;
3062 if ((p->sec->output_section->flags & SEC_READONLY) != 0
3063 && (info->flags & DF_TEXTREL) == 0)
3064 {
3065 info->flags |= DF_TEXTREL;
3066 if (info->warn_shared_textrel && info->shared)
3067 info->callbacks->einfo (_("%P: %B: warning: relocation in readonly section `%A'.\n"),
3068 p->sec->owner, p->sec);
3069 }
3070 }
3071 }
3072 }
3073
3074 local_got = elf_local_got_refcounts (ibfd);
3075 if (!local_got)
3076 continue;
3077
3078 symtab_hdr = &elf_symtab_hdr (ibfd);
3079 locsymcount = symtab_hdr->sh_info;
3080 end_local_got = local_got + locsymcount;
3081 local_tls_type = elf_x86_64_local_got_tls_type (ibfd);
3082 local_tlsdesc_gotent = elf_x86_64_local_tlsdesc_gotent (ibfd);
3083 s = htab->elf.sgot;
3084 srel = htab->elf.srelgot;
3085 for (; local_got < end_local_got;
3086 ++local_got, ++local_tls_type, ++local_tlsdesc_gotent)
3087 {
3088 *local_tlsdesc_gotent = (bfd_vma) -1;
3089 if (*local_got > 0)
3090 {
3091 if (GOT_TLS_GDESC_P (*local_tls_type))
3092 {
3093 *local_tlsdesc_gotent = htab->elf.sgotplt->size
3094 - elf_x86_64_compute_jump_table_size (htab);
3095 htab->elf.sgotplt->size += 2 * GOT_ENTRY_SIZE;
3096 *local_got = (bfd_vma) -2;
3097 }
3098 if (! GOT_TLS_GDESC_P (*local_tls_type)
3099 || GOT_TLS_GD_P (*local_tls_type))
3100 {
3101 *local_got = s->size;
3102 s->size += GOT_ENTRY_SIZE;
3103 if (GOT_TLS_GD_P (*local_tls_type))
3104 s->size += GOT_ENTRY_SIZE;
3105 }
3106 if (info->shared
3107 || GOT_TLS_GD_ANY_P (*local_tls_type)
3108 || *local_tls_type == GOT_TLS_IE)
3109 {
3110 if (GOT_TLS_GDESC_P (*local_tls_type))
3111 {
3112 htab->elf.srelplt->size
3113 += bed->s->sizeof_rela;
3114 htab->tlsdesc_plt = (bfd_vma) -1;
3115 }
3116 if (! GOT_TLS_GDESC_P (*local_tls_type)
3117 || GOT_TLS_GD_P (*local_tls_type))
3118 srel->size += bed->s->sizeof_rela;
3119 }
3120 }
3121 else
3122 *local_got = (bfd_vma) -1;
3123 }
3124 }
3125
3126 if (htab->tls_ld_got.refcount > 0)
3127 {
3128 /* Allocate 2 got entries and 1 dynamic reloc for R_X86_64_TLSLD
3129 relocs. */
3130 htab->tls_ld_got.offset = htab->elf.sgot->size;
3131 htab->elf.sgot->size += 2 * GOT_ENTRY_SIZE;
3132 htab->elf.srelgot->size += bed->s->sizeof_rela;
3133 }
3134 else
3135 htab->tls_ld_got.offset = -1;
3136
3137 /* Allocate global sym .plt and .got entries, and space for global
3138 sym dynamic relocs. */
3139 elf_link_hash_traverse (&htab->elf, elf_x86_64_allocate_dynrelocs,
3140 info);
3141
3142 /* Allocate .plt and .got entries, and space for local symbols. */
3143 htab_traverse (htab->loc_hash_table,
3144 elf_x86_64_allocate_local_dynrelocs,
3145 info);
3146
3147 /* For every jump slot reserved in the sgotplt, reloc_count is
3148 incremented. However, when we reserve space for TLS descriptors,
3149 it's not incremented, so in order to compute the space reserved
3150 for them, it suffices to multiply the reloc count by the jump
3151 slot size.
3152
3153 PR ld/13302: We start next_irelative_index at the end of .rela.plt
3154 so that R_X86_64_IRELATIVE entries come last. */
3155 if (htab->elf.srelplt)
3156 {
3157 htab->sgotplt_jump_table_size
3158 = elf_x86_64_compute_jump_table_size (htab);
3159 htab->next_irelative_index = htab->elf.srelplt->reloc_count - 1;
3160 }
3161 else if (htab->elf.irelplt)
3162 htab->next_irelative_index = htab->elf.irelplt->reloc_count - 1;
3163
3164 if (htab->tlsdesc_plt)
3165 {
3166 /* If we're not using lazy TLS relocations, don't generate the
3167 PLT and GOT entries they require. */
3168 if ((info->flags & DF_BIND_NOW))
3169 htab->tlsdesc_plt = 0;
3170 else
3171 {
3172 htab->tlsdesc_got = htab->elf.sgot->size;
3173 htab->elf.sgot->size += GOT_ENTRY_SIZE;
3174 /* Reserve room for the initial entry.
3175 FIXME: we could probably do away with it in this case. */
3176 if (htab->elf.splt->size == 0)
3177 htab->elf.splt->size += GET_PLT_ENTRY_SIZE (output_bfd);
3178 htab->tlsdesc_plt = htab->elf.splt->size;
3179 htab->elf.splt->size += GET_PLT_ENTRY_SIZE (output_bfd);
3180 }
3181 }
3182
3183 if (htab->elf.sgotplt)
3184 {
3185 /* Don't allocate .got.plt section if there are no GOT nor PLT
3186 entries and there is no refeence to _GLOBAL_OFFSET_TABLE_. */
3187 if ((htab->elf.hgot == NULL
3188 || !htab->elf.hgot->ref_regular_nonweak)
3189 && (htab->elf.sgotplt->size
3190 == get_elf_backend_data (output_bfd)->got_header_size)
3191 && (htab->elf.splt == NULL
3192 || htab->elf.splt->size == 0)
3193 && (htab->elf.sgot == NULL
3194 || htab->elf.sgot->size == 0)
3195 && (htab->elf.iplt == NULL
3196 || htab->elf.iplt->size == 0)
3197 && (htab->elf.igotplt == NULL
3198 || htab->elf.igotplt->size == 0))
3199 htab->elf.sgotplt->size = 0;
3200 }
3201
3202 if (htab->plt_eh_frame != NULL
3203 && htab->elf.splt != NULL
3204 && htab->elf.splt->size != 0
3205 && !bfd_is_abs_section (htab->elf.splt->output_section)
3206 && _bfd_elf_eh_frame_present (info))
3207 {
3208 const struct elf_x86_64_backend_data *arch_data
3209 = get_elf_x86_64_arch_data (bed);
3210 htab->plt_eh_frame->size = arch_data->eh_frame_plt_size;
3211 }
3212
3213 /* We now have determined the sizes of the various dynamic sections.
3214 Allocate memory for them. */
3215 relocs = FALSE;
3216 for (s = dynobj->sections; s != NULL; s = s->next)
3217 {
3218 if ((s->flags & SEC_LINKER_CREATED) == 0)
3219 continue;
3220
3221 if (s == htab->elf.splt
3222 || s == htab->elf.sgot
3223 || s == htab->elf.sgotplt
3224 || s == htab->elf.iplt
3225 || s == htab->elf.igotplt
3226 || s == htab->plt_bnd
3227 || s == htab->plt_got
3228 || s == htab->plt_eh_frame
3229 || s == htab->sdynbss)
3230 {
3231 /* Strip this section if we don't need it; see the
3232 comment below. */
3233 }
3234 else if (CONST_STRNEQ (bfd_get_section_name (dynobj, s), ".rela"))
3235 {
3236 if (s->size != 0 && s != htab->elf.srelplt)
3237 relocs = TRUE;
3238
3239 /* We use the reloc_count field as a counter if we need
3240 to copy relocs into the output file. */
3241 if (s != htab->elf.srelplt)
3242 s->reloc_count = 0;
3243 }
3244 else
3245 {
3246 /* It's not one of our sections, so don't allocate space. */
3247 continue;
3248 }
3249
3250 if (s->size == 0)
3251 {
3252 /* If we don't need this section, strip it from the
3253 output file. This is mostly to handle .rela.bss and
3254 .rela.plt. We must create both sections in
3255 create_dynamic_sections, because they must be created
3256 before the linker maps input sections to output
3257 sections. The linker does that before
3258 adjust_dynamic_symbol is called, and it is that
3259 function which decides whether anything needs to go
3260 into these sections. */
3261
3262 s->flags |= SEC_EXCLUDE;
3263 continue;
3264 }
3265
3266 if ((s->flags & SEC_HAS_CONTENTS) == 0)
3267 continue;
3268
3269 /* Allocate memory for the section contents. We use bfd_zalloc
3270 here in case unused entries are not reclaimed before the
3271 section's contents are written out. This should not happen,
3272 but this way if it does, we get a R_X86_64_NONE reloc instead
3273 of garbage. */
3274 s->contents = (bfd_byte *) bfd_zalloc (dynobj, s->size);
3275 if (s->contents == NULL)
3276 return FALSE;
3277 }
3278
3279 if (htab->plt_eh_frame != NULL
3280 && htab->plt_eh_frame->contents != NULL)
3281 {
3282 const struct elf_x86_64_backend_data *arch_data
3283 = get_elf_x86_64_arch_data (bed);
3284
3285 memcpy (htab->plt_eh_frame->contents,
3286 arch_data->eh_frame_plt, htab->plt_eh_frame->size);
3287 bfd_put_32 (dynobj, htab->elf.splt->size,
3288 htab->plt_eh_frame->contents + PLT_FDE_LEN_OFFSET);
3289 }
3290
3291 if (htab->elf.dynamic_sections_created)
3292 {
3293 /* Add some entries to the .dynamic section. We fill in the
3294 values later, in elf_x86_64_finish_dynamic_sections, but we
3295 must add the entries now so that we get the correct size for
3296 the .dynamic section. The DT_DEBUG entry is filled in by the
3297 dynamic linker and used by the debugger. */
3298 #define add_dynamic_entry(TAG, VAL) \
3299 _bfd_elf_add_dynamic_entry (info, TAG, VAL)
3300
3301 if (info->executable)
3302 {
3303 if (!add_dynamic_entry (DT_DEBUG, 0))
3304 return FALSE;
3305 }
3306
3307 if (htab->elf.splt->size != 0)
3308 {
3309 if (!add_dynamic_entry (DT_PLTGOT, 0)
3310 || !add_dynamic_entry (DT_PLTRELSZ, 0)
3311 || !add_dynamic_entry (DT_PLTREL, DT_RELA)
3312 || !add_dynamic_entry (DT_JMPREL, 0))
3313 return FALSE;
3314
3315 if (htab->tlsdesc_plt
3316 && (!add_dynamic_entry (DT_TLSDESC_PLT, 0)
3317 || !add_dynamic_entry (DT_TLSDESC_GOT, 0)))
3318 return FALSE;
3319 }
3320
3321 if (relocs)
3322 {
3323 if (!add_dynamic_entry (DT_RELA, 0)
3324 || !add_dynamic_entry (DT_RELASZ, 0)
3325 || !add_dynamic_entry (DT_RELAENT, bed->s->sizeof_rela))
3326 return FALSE;
3327
3328 /* If any dynamic relocs apply to a read-only section,
3329 then we need a DT_TEXTREL entry. */
3330 if ((info->flags & DF_TEXTREL) == 0)
3331 elf_link_hash_traverse (&htab->elf,
3332 elf_x86_64_readonly_dynrelocs,
3333 info);
3334
3335 if ((info->flags & DF_TEXTREL) != 0)
3336 {
3337 if (!add_dynamic_entry (DT_TEXTREL, 0))
3338 return FALSE;
3339 }
3340 }
3341 }
3342 #undef add_dynamic_entry
3343
3344 return TRUE;
3345 }
3346
3347 static bfd_boolean
3348 elf_x86_64_always_size_sections (bfd *output_bfd,
3349 struct bfd_link_info *info)
3350 {
3351 asection *tls_sec = elf_hash_table (info)->tls_sec;
3352
3353 if (tls_sec)
3354 {
3355 struct elf_link_hash_entry *tlsbase;
3356
3357 tlsbase = elf_link_hash_lookup (elf_hash_table (info),
3358 "_TLS_MODULE_BASE_",
3359 FALSE, FALSE, FALSE);
3360
3361 if (tlsbase && tlsbase->type == STT_TLS)
3362 {
3363 struct elf_x86_64_link_hash_table *htab;
3364 struct bfd_link_hash_entry *bh = NULL;
3365 const struct elf_backend_data *bed
3366 = get_elf_backend_data (output_bfd);
3367
3368 htab = elf_x86_64_hash_table (info);
3369 if (htab == NULL)
3370 return FALSE;
3371
3372 if (!(_bfd_generic_link_add_one_symbol
3373 (info, output_bfd, "_TLS_MODULE_BASE_", BSF_LOCAL,
3374 tls_sec, 0, NULL, FALSE,
3375 bed->collect, &bh)))
3376 return FALSE;
3377
3378 htab->tls_module_base = bh;
3379
3380 tlsbase = (struct elf_link_hash_entry *)bh;
3381 tlsbase->def_regular = 1;
3382 tlsbase->other = STV_HIDDEN;
3383 (*bed->elf_backend_hide_symbol) (info, tlsbase, TRUE);
3384 }
3385 }
3386
3387 return TRUE;
3388 }
3389
3390 /* _TLS_MODULE_BASE_ needs to be treated especially when linking
3391 executables. Rather than setting it to the beginning of the TLS
3392 section, we have to set it to the end. This function may be called
3393 multiple times, it is idempotent. */
3394
3395 static void
3396 elf_x86_64_set_tls_module_base (struct bfd_link_info *info)
3397 {
3398 struct elf_x86_64_link_hash_table *htab;
3399 struct bfd_link_hash_entry *base;
3400
3401 if (!info->executable)
3402 return;
3403
3404 htab = elf_x86_64_hash_table (info);
3405 if (htab == NULL)
3406 return;
3407
3408 base = htab->tls_module_base;
3409 if (base == NULL)
3410 return;
3411
3412 base->u.def.value = htab->elf.tls_size;
3413 }
3414
3415 /* Return the base VMA address which should be subtracted from real addresses
3416 when resolving @dtpoff relocation.
3417 This is PT_TLS segment p_vaddr. */
3418
3419 static bfd_vma
3420 elf_x86_64_dtpoff_base (struct bfd_link_info *info)
3421 {
3422 /* If tls_sec is NULL, we should have signalled an error already. */
3423 if (elf_hash_table (info)->tls_sec == NULL)
3424 return 0;
3425 return elf_hash_table (info)->tls_sec->vma;
3426 }
3427
3428 /* Return the relocation value for @tpoff relocation
3429 if STT_TLS virtual address is ADDRESS. */
3430
3431 static bfd_vma
3432 elf_x86_64_tpoff (struct bfd_link_info *info, bfd_vma address)
3433 {
3434 struct elf_link_hash_table *htab = elf_hash_table (info);
3435 const struct elf_backend_data *bed = get_elf_backend_data (info->output_bfd);
3436 bfd_vma static_tls_size;
3437
3438 /* If tls_segment is NULL, we should have signalled an error already. */
3439 if (htab->tls_sec == NULL)
3440 return 0;
3441
3442 /* Consider special static TLS alignment requirements. */
3443 static_tls_size = BFD_ALIGN (htab->tls_size, bed->static_tls_alignment);
3444 return address - static_tls_size - htab->tls_sec->vma;
3445 }
3446
3447 /* Is the instruction before OFFSET in CONTENTS a 32bit relative
3448 branch? */
3449
3450 static bfd_boolean
3451 is_32bit_relative_branch (bfd_byte *contents, bfd_vma offset)
3452 {
3453 /* Opcode Instruction
3454 0xe8 call
3455 0xe9 jump
3456 0x0f 0x8x conditional jump */
3457 return ((offset > 0
3458 && (contents [offset - 1] == 0xe8
3459 || contents [offset - 1] == 0xe9))
3460 || (offset > 1
3461 && contents [offset - 2] == 0x0f
3462 && (contents [offset - 1] & 0xf0) == 0x80));
3463 }
3464
3465 /* Relocate an x86_64 ELF section. */
3466
3467 static bfd_boolean
3468 elf_x86_64_relocate_section (bfd *output_bfd,
3469 struct bfd_link_info *info,
3470 bfd *input_bfd,
3471 asection *input_section,
3472 bfd_byte *contents,
3473 Elf_Internal_Rela *relocs,
3474 Elf_Internal_Sym *local_syms,
3475 asection **local_sections)
3476 {
3477 struct elf_x86_64_link_hash_table *htab;
3478 Elf_Internal_Shdr *symtab_hdr;
3479 struct elf_link_hash_entry **sym_hashes;
3480 bfd_vma *local_got_offsets;
3481 bfd_vma *local_tlsdesc_gotents;
3482 Elf_Internal_Rela *rel;
3483 Elf_Internal_Rela *relend;
3484 const unsigned int plt_entry_size = GET_PLT_ENTRY_SIZE (info->output_bfd);
3485
3486 BFD_ASSERT (is_x86_64_elf (input_bfd));
3487
3488 htab = elf_x86_64_hash_table (info);
3489 if (htab == NULL)
3490 return FALSE;
3491 symtab_hdr = &elf_symtab_hdr (input_bfd);
3492 sym_hashes = elf_sym_hashes (input_bfd);
3493 local_got_offsets = elf_local_got_offsets (input_bfd);
3494 local_tlsdesc_gotents = elf_x86_64_local_tlsdesc_gotent (input_bfd);
3495
3496 elf_x86_64_set_tls_module_base (info);
3497
3498 rel = relocs;
3499 relend = relocs + input_section->reloc_count;
3500 for (; rel < relend; rel++)
3501 {
3502 unsigned int r_type;
3503 reloc_howto_type *howto;
3504 unsigned long r_symndx;
3505 struct elf_link_hash_entry *h;
3506 struct elf_x86_64_link_hash_entry *eh;
3507 Elf_Internal_Sym *sym;
3508 asection *sec;
3509 bfd_vma off, offplt, plt_offset;
3510 bfd_vma relocation;
3511 bfd_boolean unresolved_reloc;
3512 bfd_reloc_status_type r;
3513 int tls_type;
3514 asection *base_got, *resolved_plt;
3515 bfd_vma st_size;
3516
3517 r_type = ELF32_R_TYPE (rel->r_info);
3518 if (r_type == (int) R_X86_64_GNU_VTINHERIT
3519 || r_type == (int) R_X86_64_GNU_VTENTRY)
3520 continue;
3521
3522 if (r_type >= (int) R_X86_64_standard)
3523 {
3524 (*_bfd_error_handler)
3525 (_("%B: unrecognized relocation (0x%x) in section `%A'"),
3526 input_bfd, input_section, r_type);
3527 bfd_set_error (bfd_error_bad_value);
3528 return FALSE;
3529 }
3530
3531 if (r_type != (int) R_X86_64_32
3532 || ABI_64_P (output_bfd))
3533 howto = x86_64_elf_howto_table + r_type;
3534 else
3535 howto = (x86_64_elf_howto_table
3536 + ARRAY_SIZE (x86_64_elf_howto_table) - 1);
3537 r_symndx = htab->r_sym (rel->r_info);
3538 h = NULL;
3539 sym = NULL;
3540 sec = NULL;
3541 unresolved_reloc = FALSE;
3542 if (r_symndx < symtab_hdr->sh_info)
3543 {
3544 sym = local_syms + r_symndx;
3545 sec = local_sections[r_symndx];
3546
3547 relocation = _bfd_elf_rela_local_sym (output_bfd, sym,
3548 &sec, rel);
3549 st_size = sym->st_size;
3550
3551 /* Relocate against local STT_GNU_IFUNC symbol. */
3552 if (!info->relocatable
3553 && ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC)
3554 {
3555 h = elf_x86_64_get_local_sym_hash (htab, input_bfd,
3556 rel, FALSE);
3557 if (h == NULL)
3558 abort ();
3559
3560 /* Set STT_GNU_IFUNC symbol value. */
3561 h->root.u.def.value = sym->st_value;
3562 h->root.u.def.section = sec;
3563 }
3564 }
3565 else
3566 {
3567 bfd_boolean warned ATTRIBUTE_UNUSED;
3568 bfd_boolean ignored ATTRIBUTE_UNUSED;
3569
3570 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
3571 r_symndx, symtab_hdr, sym_hashes,
3572 h, sec, relocation,
3573 unresolved_reloc, warned, ignored);
3574 st_size = h->size;
3575 }
3576
3577 if (sec != NULL && discarded_section (sec))
3578 RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section,
3579 rel, 1, relend, howto, 0, contents);
3580
3581 if (info->relocatable)
3582 continue;
3583
3584 if (rel->r_addend == 0 && !ABI_64_P (output_bfd))
3585 {
3586 if (r_type == R_X86_64_64)
3587 {
3588 /* For x32, treat R_X86_64_64 like R_X86_64_32 and
3589 zero-extend it to 64bit if addend is zero. */
3590 r_type = R_X86_64_32;
3591 memset (contents + rel->r_offset + 4, 0, 4);
3592 }
3593 else if (r_type == R_X86_64_SIZE64)
3594 {
3595 /* For x32, treat R_X86_64_SIZE64 like R_X86_64_SIZE32 and
3596 zero-extend it to 64bit if addend is zero. */
3597 r_type = R_X86_64_SIZE32;
3598 memset (contents + rel->r_offset + 4, 0, 4);
3599 }
3600 }
3601
3602 eh = (struct elf_x86_64_link_hash_entry *) h;
3603
3604 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle
3605 it here if it is defined in a non-shared object. */
3606 if (h != NULL
3607 && h->type == STT_GNU_IFUNC
3608 && h->def_regular)
3609 {
3610 bfd_vma plt_index;
3611 const char *name;
3612
3613 if ((input_section->flags & SEC_ALLOC) == 0
3614 || h->plt.offset == (bfd_vma) -1)
3615 abort ();
3616
3617 /* STT_GNU_IFUNC symbol must go through PLT. */
3618 if (htab->elf.splt != NULL)
3619 {
3620 if (htab->plt_bnd != NULL)
3621 {
3622 resolved_plt = htab->plt_bnd;
3623 plt_offset = eh->plt_bnd.offset;
3624 }
3625 else
3626 {
3627 resolved_plt = htab->elf.splt;
3628 plt_offset = h->plt.offset;
3629 }
3630 }
3631 else
3632 {
3633 resolved_plt = htab->elf.iplt;
3634 plt_offset = h->plt.offset;
3635 }
3636
3637 relocation = (resolved_plt->output_section->vma
3638 + resolved_plt->output_offset + plt_offset);
3639
3640 switch (r_type)
3641 {
3642 default:
3643 if (h->root.root.string)
3644 name = h->root.root.string;
3645 else
3646 name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym,
3647 NULL);
3648 (*_bfd_error_handler)
3649 (_("%B: relocation %s against STT_GNU_IFUNC "
3650 "symbol `%s' isn't handled by %s"), input_bfd,
3651 x86_64_elf_howto_table[r_type].name,
3652 name, __FUNCTION__);
3653 bfd_set_error (bfd_error_bad_value);
3654 return FALSE;
3655
3656 case R_X86_64_32S:
3657 if (info->shared)
3658 abort ();
3659 goto do_relocation;
3660
3661 case R_X86_64_32:
3662 if (ABI_64_P (output_bfd))
3663 goto do_relocation;
3664 /* FALLTHROUGH */
3665 case R_X86_64_64:
3666 if (rel->r_addend != 0)
3667 {
3668 if (h->root.root.string)
3669 name = h->root.root.string;
3670 else
3671 name = bfd_elf_sym_name (input_bfd, symtab_hdr,
3672 sym, NULL);
3673 (*_bfd_error_handler)
3674 (_("%B: relocation %s against STT_GNU_IFUNC "
3675 "symbol `%s' has non-zero addend: %d"),
3676 input_bfd, x86_64_elf_howto_table[r_type].name,
3677 name, rel->r_addend);
3678 bfd_set_error (bfd_error_bad_value);
3679 return FALSE;
3680 }
3681
3682 /* Generate dynamic relcoation only when there is a
3683 non-GOT reference in a shared object. */
3684 if (info->shared && h->non_got_ref)
3685 {
3686 Elf_Internal_Rela outrel;
3687 asection *sreloc;
3688
3689 /* Need a dynamic relocation to get the real function
3690 address. */
3691 outrel.r_offset = _bfd_elf_section_offset (output_bfd,
3692 info,
3693 input_section,
3694 rel->r_offset);
3695 if (outrel.r_offset == (bfd_vma) -1
3696 || outrel.r_offset == (bfd_vma) -2)
3697 abort ();
3698
3699 outrel.r_offset += (input_section->output_section->vma
3700 + input_section->output_offset);
3701
3702 if (h->dynindx == -1
3703 || h->forced_local
3704 || info->executable)
3705 {
3706 /* This symbol is resolved locally. */
3707 outrel.r_info = htab->r_info (0, R_X86_64_IRELATIVE);
3708 outrel.r_addend = (h->root.u.def.value
3709 + h->root.u.def.section->output_section->vma
3710 + h->root.u.def.section->output_offset);
3711 }
3712 else
3713 {
3714 outrel.r_info = htab->r_info (h->dynindx, r_type);
3715 outrel.r_addend = 0;
3716 }
3717
3718 sreloc = htab->elf.irelifunc;
3719 elf_append_rela (output_bfd, sreloc, &outrel);
3720
3721 /* If this reloc is against an external symbol, we
3722 do not want to fiddle with the addend. Otherwise,
3723 we need to include the symbol value so that it
3724 becomes an addend for the dynamic reloc. For an
3725 internal symbol, we have updated addend. */
3726 continue;
3727 }
3728 /* FALLTHROUGH */
3729 case R_X86_64_PC32:
3730 case R_X86_64_PC32_BND:
3731 case R_X86_64_PC64:
3732 case R_X86_64_PLT32:
3733 case R_X86_64_PLT32_BND:
3734 goto do_relocation;
3735
3736 case R_X86_64_GOTPCREL:
3737 case R_X86_64_GOTPCREL64:
3738 base_got = htab->elf.sgot;
3739 off = h->got.offset;
3740
3741 if (base_got == NULL)
3742 abort ();
3743
3744 if (off == (bfd_vma) -1)
3745 {
3746 /* We can't use h->got.offset here to save state, or
3747 even just remember the offset, as finish_dynamic_symbol
3748 would use that as offset into .got. */
3749
3750 if (htab->elf.splt != NULL)
3751 {
3752 plt_index = h->plt.offset / plt_entry_size - 1;
3753 off = (plt_index + 3) * GOT_ENTRY_SIZE;
3754 base_got = htab->elf.sgotplt;
3755 }
3756 else
3757 {
3758 plt_index = h->plt.offset / plt_entry_size;
3759 off = plt_index * GOT_ENTRY_SIZE;
3760 base_got = htab->elf.igotplt;
3761 }
3762
3763 if (h->dynindx == -1
3764 || h->forced_local
3765 || info->symbolic)
3766 {
3767 /* This references the local defitionion. We must
3768 initialize this entry in the global offset table.
3769 Since the offset must always be a multiple of 8,
3770 we use the least significant bit to record
3771 whether we have initialized it already.
3772
3773 When doing a dynamic link, we create a .rela.got
3774 relocation entry to initialize the value. This
3775 is done in the finish_dynamic_symbol routine. */
3776 if ((off & 1) != 0)
3777 off &= ~1;
3778 else
3779 {
3780 bfd_put_64 (output_bfd, relocation,
3781 base_got->contents + off);
3782 /* Note that this is harmless for the GOTPLT64
3783 case, as -1 | 1 still is -1. */
3784 h->got.offset |= 1;
3785 }
3786 }
3787 }
3788
3789 relocation = (base_got->output_section->vma
3790 + base_got->output_offset + off);
3791
3792 goto do_relocation;
3793 }
3794 }
3795
3796 /* When generating a shared object, the relocations handled here are
3797 copied into the output file to be resolved at run time. */
3798 switch (r_type)
3799 {
3800 case R_X86_64_GOT32:
3801 case R_X86_64_GOT64:
3802 /* Relocation is to the entry for this symbol in the global
3803 offset table. */
3804 case R_X86_64_GOTPCREL:
3805 case R_X86_64_GOTPCREL64:
3806 /* Use global offset table entry as symbol value. */
3807 case R_X86_64_GOTPLT64:
3808 /* This is obsolete and treated the the same as GOT64. */
3809 base_got = htab->elf.sgot;
3810
3811 if (htab->elf.sgot == NULL)
3812 abort ();
3813
3814 if (h != NULL)
3815 {
3816 bfd_boolean dyn;
3817
3818 off = h->got.offset;
3819 if (h->needs_plt
3820 && h->plt.offset != (bfd_vma)-1
3821 && off == (bfd_vma)-1)
3822 {
3823 /* We can't use h->got.offset here to save
3824 state, or even just remember the offset, as
3825 finish_dynamic_symbol would use that as offset into
3826 .got. */
3827 bfd_vma plt_index = h->plt.offset / plt_entry_size - 1;
3828 off = (plt_index + 3) * GOT_ENTRY_SIZE;
3829 base_got = htab->elf.sgotplt;
3830 }
3831
3832 dyn = htab->elf.dynamic_sections_created;
3833
3834 if (! WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h)
3835 || (info->shared
3836 && SYMBOL_REFERENCES_LOCAL (info, h))
3837 || (ELF_ST_VISIBILITY (h->other)
3838 && h->root.type == bfd_link_hash_undefweak))
3839 {
3840 /* This is actually a static link, or it is a -Bsymbolic
3841 link and the symbol is defined locally, or the symbol
3842 was forced to be local because of a version file. We
3843 must initialize this entry in the global offset table.
3844 Since the offset must always be a multiple of 8, we
3845 use the least significant bit to record whether we
3846 have initialized it already.
3847
3848 When doing a dynamic link, we create a .rela.got
3849 relocation entry to initialize the value. This is
3850 done in the finish_dynamic_symbol routine. */
3851 if ((off & 1) != 0)
3852 off &= ~1;
3853 else
3854 {
3855 bfd_put_64 (output_bfd, relocation,
3856 base_got->contents + off);
3857 /* Note that this is harmless for the GOTPLT64 case,
3858 as -1 | 1 still is -1. */
3859 h->got.offset |= 1;
3860 }
3861 }
3862 else
3863 unresolved_reloc = FALSE;
3864 }
3865 else
3866 {
3867 if (local_got_offsets == NULL)
3868 abort ();
3869
3870 off = local_got_offsets[r_symndx];
3871
3872 /* The offset must always be a multiple of 8. We use
3873 the least significant bit to record whether we have
3874 already generated the necessary reloc. */
3875 if ((off & 1) != 0)
3876 off &= ~1;
3877 else
3878 {
3879 bfd_put_64 (output_bfd, relocation,
3880 base_got->contents + off);
3881
3882 if (info->shared)
3883 {
3884 asection *s;
3885 Elf_Internal_Rela outrel;
3886
3887 /* We need to generate a R_X86_64_RELATIVE reloc
3888 for the dynamic linker. */
3889 s = htab->elf.srelgot;
3890 if (s == NULL)
3891 abort ();
3892
3893 outrel.r_offset = (base_got->output_section->vma
3894 + base_got->output_offset
3895 + off);
3896 outrel.r_info = htab->r_info (0, R_X86_64_RELATIVE);
3897 outrel.r_addend = relocation;
3898 elf_append_rela (output_bfd, s, &outrel);
3899 }
3900
3901 local_got_offsets[r_symndx] |= 1;
3902 }
3903 }
3904
3905 if (off >= (bfd_vma) -2)
3906 abort ();
3907
3908 relocation = base_got->output_section->vma
3909 + base_got->output_offset + off;
3910 if (r_type != R_X86_64_GOTPCREL && r_type != R_X86_64_GOTPCREL64)
3911 relocation -= htab->elf.sgotplt->output_section->vma
3912 - htab->elf.sgotplt->output_offset;
3913
3914 break;
3915
3916 case R_X86_64_GOTOFF64:
3917 /* Relocation is relative to the start of the global offset
3918 table. */
3919
3920 /* Check to make sure it isn't a protected function symbol
3921 for shared library since it may not be local when used
3922 as function address. */
3923 if (!info->executable
3924 && h
3925 && !SYMBOLIC_BIND (info, h)
3926 && h->def_regular
3927 && h->type == STT_FUNC
3928 && ELF_ST_VISIBILITY (h->other) == STV_PROTECTED)
3929 {
3930 (*_bfd_error_handler)
3931 (_("%B: relocation R_X86_64_GOTOFF64 against protected function `%s' can not be used when making a shared object"),
3932 input_bfd, h->root.root.string);
3933 bfd_set_error (bfd_error_bad_value);
3934 return FALSE;
3935 }
3936
3937 /* Note that sgot is not involved in this
3938 calculation. We always want the start of .got.plt. If we
3939 defined _GLOBAL_OFFSET_TABLE_ in a different way, as is
3940 permitted by the ABI, we might have to change this
3941 calculation. */
3942 relocation -= htab->elf.sgotplt->output_section->vma
3943 + htab->elf.sgotplt->output_offset;
3944 break;
3945
3946 case R_X86_64_GOTPC32:
3947 case R_X86_64_GOTPC64:
3948 /* Use global offset table as symbol value. */
3949 relocation = htab->elf.sgotplt->output_section->vma
3950 + htab->elf.sgotplt->output_offset;
3951 unresolved_reloc = FALSE;
3952 break;
3953
3954 case R_X86_64_PLTOFF64:
3955 /* Relocation is PLT entry relative to GOT. For local
3956 symbols it's the symbol itself relative to GOT. */
3957 if (h != NULL
3958 /* See PLT32 handling. */
3959 && h->plt.offset != (bfd_vma) -1
3960 && htab->elf.splt != NULL)
3961 {
3962 if (htab->plt_bnd != NULL)
3963 {
3964 resolved_plt = htab->plt_bnd;
3965 plt_offset = eh->plt_bnd.offset;
3966 }
3967 else
3968 {
3969 resolved_plt = htab->elf.splt;
3970 plt_offset = h->plt.offset;
3971 }
3972
3973 relocation = (resolved_plt->output_section->vma
3974 + resolved_plt->output_offset
3975 + plt_offset);
3976 unresolved_reloc = FALSE;
3977 }
3978
3979 relocation -= htab->elf.sgotplt->output_section->vma
3980 + htab->elf.sgotplt->output_offset;
3981 break;
3982
3983 case R_X86_64_PLT32:
3984 case R_X86_64_PLT32_BND:
3985 /* Relocation is to the entry for this symbol in the
3986 procedure linkage table. */
3987
3988 /* Resolve a PLT32 reloc against a local symbol directly,
3989 without using the procedure linkage table. */
3990 if (h == NULL)
3991 break;
3992
3993 if ((h->plt.offset == (bfd_vma) -1
3994 && eh->plt_got.offset == (bfd_vma) -1)
3995 || htab->elf.splt == NULL)
3996 {
3997 /* We didn't make a PLT entry for this symbol. This
3998 happens when statically linking PIC code, or when
3999 using -Bsymbolic. */
4000 break;
4001 }
4002
4003 if (h->plt.offset != (bfd_vma) -1)
4004 {
4005 if (htab->plt_bnd != NULL)
4006 {
4007 resolved_plt = htab->plt_bnd;
4008 plt_offset = eh->plt_bnd.offset;
4009 }
4010 else
4011 {
4012 resolved_plt = htab->elf.splt;
4013 plt_offset = h->plt.offset;
4014 }
4015 }
4016 else
4017 {
4018 /* Use the GOT PLT. */
4019 resolved_plt = htab->plt_got;
4020 plt_offset = eh->plt_got.offset;
4021 }
4022
4023 relocation = (resolved_plt->output_section->vma
4024 + resolved_plt->output_offset
4025 + plt_offset);
4026 unresolved_reloc = FALSE;
4027 break;
4028
4029 case R_X86_64_SIZE32:
4030 case R_X86_64_SIZE64:
4031 /* Set to symbol size. */
4032 relocation = st_size;
4033 goto direct;
4034
4035 case R_X86_64_PC8:
4036 case R_X86_64_PC16:
4037 case R_X86_64_PC32:
4038 case R_X86_64_PC32_BND:
4039 if (info->shared
4040 && (input_section->flags & SEC_ALLOC) != 0
4041 && (input_section->flags & SEC_READONLY) != 0
4042 && h != NULL)
4043 {
4044 bfd_boolean fail = FALSE;
4045 bfd_boolean branch
4046 = ((r_type == R_X86_64_PC32
4047 || r_type == R_X86_64_PC32_BND)
4048 && is_32bit_relative_branch (contents, rel->r_offset));
4049
4050 if (SYMBOL_REFERENCES_LOCAL (info, h))
4051 {
4052 /* Symbol is referenced locally. Make sure it is
4053 defined locally or for a branch. */
4054 fail = !h->def_regular && !branch;
4055 }
4056 else if (!h->needs_copy)
4057 {
4058 /* Symbol doesn't need copy reloc and isn't referenced
4059 locally. We only allow branch to symbol with
4060 non-default visibility. */
4061 fail = (!branch
4062 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT);
4063 }
4064
4065 if (fail)
4066 {
4067 const char *fmt;
4068 const char *v;
4069 const char *pic = "";
4070
4071 switch (ELF_ST_VISIBILITY (h->other))
4072 {
4073 case STV_HIDDEN:
4074 v = _("hidden symbol");
4075 break;
4076 case STV_INTERNAL:
4077 v = _("internal symbol");
4078 break;
4079 case STV_PROTECTED:
4080 v = _("protected symbol");
4081 break;
4082 default:
4083 v = _("symbol");
4084 pic = _("; recompile with -fPIC");
4085 break;
4086 }
4087
4088 if (h->def_regular)
4089 fmt = _("%B: relocation %s against %s `%s' can not be used when making a shared object%s");
4090 else
4091 fmt = _("%B: relocation %s against undefined %s `%s' can not be used when making a shared object%s");
4092
4093 (*_bfd_error_handler) (fmt, input_bfd,
4094 x86_64_elf_howto_table[r_type].name,
4095 v, h->root.root.string, pic);
4096 bfd_set_error (bfd_error_bad_value);
4097 return FALSE;
4098 }
4099 }
4100 /* Fall through. */
4101
4102 case R_X86_64_8:
4103 case R_X86_64_16:
4104 case R_X86_64_32:
4105 case R_X86_64_PC64:
4106 case R_X86_64_64:
4107 /* FIXME: The ABI says the linker should make sure the value is
4108 the same when it's zeroextended to 64 bit. */
4109
4110 direct:
4111 if ((input_section->flags & SEC_ALLOC) == 0)
4112 break;
4113
4114 /* Don't copy a pc-relative relocation into the output file
4115 if the symbol needs copy reloc. */
4116 if ((info->shared
4117 && !(h != NULL
4118 && h->needs_copy
4119 && IS_X86_64_PCREL_TYPE (r_type))
4120 && (h == NULL
4121 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
4122 || h->root.type != bfd_link_hash_undefweak)
4123 && ((! IS_X86_64_PCREL_TYPE (r_type)
4124 && r_type != R_X86_64_SIZE32
4125 && r_type != R_X86_64_SIZE64)
4126 || ! SYMBOL_CALLS_LOCAL (info, h)))
4127 || (ELIMINATE_COPY_RELOCS
4128 && !info->shared
4129 && h != NULL
4130 && h->dynindx != -1
4131 && !h->non_got_ref
4132 && ((h->def_dynamic
4133 && !h->def_regular)
4134 || h->root.type == bfd_link_hash_undefweak
4135 || h->root.type == bfd_link_hash_undefined)))
4136 {
4137 Elf_Internal_Rela outrel;
4138 bfd_boolean skip, relocate;
4139 asection *sreloc;
4140
4141 /* When generating a shared object, these relocations
4142 are copied into the output file to be resolved at run
4143 time. */
4144 skip = FALSE;
4145 relocate = FALSE;
4146
4147 outrel.r_offset =
4148 _bfd_elf_section_offset (output_bfd, info, input_section,
4149 rel->r_offset);
4150 if (outrel.r_offset == (bfd_vma) -1)
4151 skip = TRUE;
4152 else if (outrel.r_offset == (bfd_vma) -2)
4153 skip = TRUE, relocate = TRUE;
4154
4155 outrel.r_offset += (input_section->output_section->vma
4156 + input_section->output_offset);
4157
4158 if (skip)
4159 memset (&outrel, 0, sizeof outrel);
4160
4161 /* h->dynindx may be -1 if this symbol was marked to
4162 become local. */
4163 else if (h != NULL
4164 && h->dynindx != -1
4165 && (IS_X86_64_PCREL_TYPE (r_type)
4166 || ! info->shared
4167 || ! SYMBOLIC_BIND (info, h)
4168 || ! h->def_regular))
4169 {
4170 outrel.r_info = htab->r_info (h->dynindx, r_type);
4171 outrel.r_addend = rel->r_addend;
4172 }
4173 else
4174 {
4175 /* This symbol is local, or marked to become local. */
4176 if (r_type == htab->pointer_r_type)
4177 {
4178 relocate = TRUE;
4179 outrel.r_info = htab->r_info (0, R_X86_64_RELATIVE);
4180 outrel.r_addend = relocation + rel->r_addend;
4181 }
4182 else if (r_type == R_X86_64_64
4183 && !ABI_64_P (output_bfd))
4184 {
4185 relocate = TRUE;
4186 outrel.r_info = htab->r_info (0,
4187 R_X86_64_RELATIVE64);
4188 outrel.r_addend = relocation + rel->r_addend;
4189 /* Check addend overflow. */
4190 if ((outrel.r_addend & 0x80000000)
4191 != (rel->r_addend & 0x80000000))
4192 {
4193 const char *name;
4194 int addend = rel->r_addend;
4195 if (h && h->root.root.string)
4196 name = h->root.root.string;
4197 else
4198 name = bfd_elf_sym_name (input_bfd, symtab_hdr,
4199 sym, NULL);
4200 if (addend < 0)
4201 (*_bfd_error_handler)
4202 (_("%B: addend -0x%x in relocation %s against "
4203 "symbol `%s' at 0x%lx in section `%A' is "
4204 "out of range"),
4205 input_bfd, input_section, addend,
4206 x86_64_elf_howto_table[r_type].name,
4207 name, (unsigned long) rel->r_offset);
4208 else
4209 (*_bfd_error_handler)
4210 (_("%B: addend 0x%x in relocation %s against "
4211 "symbol `%s' at 0x%lx in section `%A' is "
4212 "out of range"),
4213 input_bfd, input_section, addend,
4214 x86_64_elf_howto_table[r_type].name,
4215 name, (unsigned long) rel->r_offset);
4216 bfd_set_error (bfd_error_bad_value);
4217 return FALSE;
4218 }
4219 }
4220 else
4221 {
4222 long sindx;
4223
4224 if (bfd_is_abs_section (sec))
4225 sindx = 0;
4226 else if (sec == NULL || sec->owner == NULL)
4227 {
4228 bfd_set_error (bfd_error_bad_value);
4229 return FALSE;
4230 }
4231 else
4232 {
4233 asection *osec;
4234
4235 /* We are turning this relocation into one
4236 against a section symbol. It would be
4237 proper to subtract the symbol's value,
4238 osec->vma, from the emitted reloc addend,
4239 but ld.so expects buggy relocs. */
4240 osec = sec->output_section;
4241 sindx = elf_section_data (osec)->dynindx;
4242 if (sindx == 0)
4243 {
4244 asection *oi = htab->elf.text_index_section;
4245 sindx = elf_section_data (oi)->dynindx;
4246 }
4247 BFD_ASSERT (sindx != 0);
4248 }
4249
4250 outrel.r_info = htab->r_info (sindx, r_type);
4251 outrel.r_addend = relocation + rel->r_addend;
4252 }
4253 }
4254
4255 sreloc = elf_section_data (input_section)->sreloc;
4256
4257 if (sreloc == NULL || sreloc->contents == NULL)
4258 {
4259 r = bfd_reloc_notsupported;
4260 goto check_relocation_error;
4261 }
4262
4263 elf_append_rela (output_bfd, sreloc, &outrel);
4264
4265 /* If this reloc is against an external symbol, we do
4266 not want to fiddle with the addend. Otherwise, we
4267 need to include the symbol value so that it becomes
4268 an addend for the dynamic reloc. */
4269 if (! relocate)
4270 continue;
4271 }
4272
4273 break;
4274
4275 case R_X86_64_TLSGD:
4276 case R_X86_64_GOTPC32_TLSDESC:
4277 case R_X86_64_TLSDESC_CALL:
4278 case R_X86_64_GOTTPOFF:
4279 tls_type = GOT_UNKNOWN;
4280 if (h == NULL && local_got_offsets)
4281 tls_type = elf_x86_64_local_got_tls_type (input_bfd) [r_symndx];
4282 else if (h != NULL)
4283 tls_type = elf_x86_64_hash_entry (h)->tls_type;
4284
4285 if (! elf_x86_64_tls_transition (info, input_bfd,
4286 input_section, contents,
4287 symtab_hdr, sym_hashes,
4288 &r_type, tls_type, rel,
4289 relend, h, r_symndx))
4290 return FALSE;
4291
4292 if (r_type == R_X86_64_TPOFF32)
4293 {
4294 bfd_vma roff = rel->r_offset;
4295
4296 BFD_ASSERT (! unresolved_reloc);
4297
4298 if (ELF32_R_TYPE (rel->r_info) == R_X86_64_TLSGD)
4299 {
4300 /* GD->LE transition. For 64bit, change
4301 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
4302 .word 0x6666; rex64; call __tls_get_addr
4303 into:
4304 movq %fs:0, %rax
4305 leaq foo@tpoff(%rax), %rax
4306 For 32bit, change
4307 leaq foo@tlsgd(%rip), %rdi
4308 .word 0x6666; rex64; call __tls_get_addr
4309 into:
4310 movl %fs:0, %eax
4311 leaq foo@tpoff(%rax), %rax
4312 For largepic, change:
4313 leaq foo@tlsgd(%rip), %rdi
4314 movabsq $__tls_get_addr@pltoff, %rax
4315 addq %rbx, %rax
4316 call *%rax
4317 into:
4318 movq %fs:0, %rax
4319 leaq foo@tpoff(%rax), %rax
4320 nopw 0x0(%rax,%rax,1) */
4321 int largepic = 0;
4322 if (ABI_64_P (output_bfd)
4323 && contents[roff + 5] == (bfd_byte) '\xb8')
4324 {
4325 memcpy (contents + roff - 3,
4326 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x8d\x80"
4327 "\0\0\0\0\x66\x0f\x1f\x44\0", 22);
4328 largepic = 1;
4329 }
4330 else if (ABI_64_P (output_bfd))
4331 memcpy (contents + roff - 4,
4332 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x8d\x80\0\0\0",
4333 16);
4334 else
4335 memcpy (contents + roff - 3,
4336 "\x64\x8b\x04\x25\0\0\0\0\x48\x8d\x80\0\0\0",
4337 15);
4338 bfd_put_32 (output_bfd,
4339 elf_x86_64_tpoff (info, relocation),
4340 contents + roff + 8 + largepic);
4341 /* Skip R_X86_64_PC32/R_X86_64_PLT32/R_X86_64_PLTOFF64. */
4342 rel++;
4343 continue;
4344 }
4345 else if (ELF32_R_TYPE (rel->r_info) == R_X86_64_GOTPC32_TLSDESC)
4346 {
4347 /* GDesc -> LE transition.
4348 It's originally something like:
4349 leaq x@tlsdesc(%rip), %rax
4350
4351 Change it to:
4352 movl $x@tpoff, %rax. */
4353
4354 unsigned int val, type;
4355
4356 type = bfd_get_8 (input_bfd, contents + roff - 3);
4357 val = bfd_get_8 (input_bfd, contents + roff - 1);
4358 bfd_put_8 (output_bfd, 0x48 | ((type >> 2) & 1),
4359 contents + roff - 3);
4360 bfd_put_8 (output_bfd, 0xc7, contents + roff - 2);
4361 bfd_put_8 (output_bfd, 0xc0 | ((val >> 3) & 7),
4362 contents + roff - 1);
4363 bfd_put_32 (output_bfd,
4364 elf_x86_64_tpoff (info, relocation),
4365 contents + roff);
4366 continue;
4367 }
4368 else if (ELF32_R_TYPE (rel->r_info) == R_X86_64_TLSDESC_CALL)
4369 {
4370 /* GDesc -> LE transition.
4371 It's originally:
4372 call *(%rax)
4373 Turn it into:
4374 xchg %ax,%ax. */
4375 bfd_put_8 (output_bfd, 0x66, contents + roff);
4376 bfd_put_8 (output_bfd, 0x90, contents + roff + 1);
4377 continue;
4378 }
4379 else if (ELF32_R_TYPE (rel->r_info) == R_X86_64_GOTTPOFF)
4380 {
4381 /* IE->LE transition:
4382 For 64bit, originally it can be one of:
4383 movq foo@gottpoff(%rip), %reg
4384 addq foo@gottpoff(%rip), %reg
4385 We change it into:
4386 movq $foo, %reg
4387 leaq foo(%reg), %reg
4388 addq $foo, %reg.
4389 For 32bit, originally it can be one of:
4390 movq foo@gottpoff(%rip), %reg
4391 addl foo@gottpoff(%rip), %reg
4392 We change it into:
4393 movq $foo, %reg
4394 leal foo(%reg), %reg
4395 addl $foo, %reg. */
4396
4397 unsigned int val, type, reg;
4398
4399 if (roff >= 3)
4400 val = bfd_get_8 (input_bfd, contents + roff - 3);
4401 else
4402 val = 0;
4403 type = bfd_get_8 (input_bfd, contents + roff - 2);
4404 reg = bfd_get_8 (input_bfd, contents + roff - 1);
4405 reg >>= 3;
4406 if (type == 0x8b)
4407 {
4408 /* movq */
4409 if (val == 0x4c)
4410 bfd_put_8 (output_bfd, 0x49,
4411 contents + roff - 3);
4412 else if (!ABI_64_P (output_bfd) && val == 0x44)
4413 bfd_put_8 (output_bfd, 0x41,
4414 contents + roff - 3);
4415 bfd_put_8 (output_bfd, 0xc7,
4416 contents + roff - 2);
4417 bfd_put_8 (output_bfd, 0xc0 | reg,
4418 contents + roff - 1);
4419 }
4420 else if (reg == 4)
4421 {
4422 /* addq/addl -> addq/addl - addressing with %rsp/%r12
4423 is special */
4424 if (val == 0x4c)
4425 bfd_put_8 (output_bfd, 0x49,
4426 contents + roff - 3);
4427 else if (!ABI_64_P (output_bfd) && val == 0x44)
4428 bfd_put_8 (output_bfd, 0x41,
4429 contents + roff - 3);
4430 bfd_put_8 (output_bfd, 0x81,
4431 contents + roff - 2);
4432 bfd_put_8 (output_bfd, 0xc0 | reg,
4433 contents + roff - 1);
4434 }
4435 else
4436 {
4437 /* addq/addl -> leaq/leal */
4438 if (val == 0x4c)
4439 bfd_put_8 (output_bfd, 0x4d,
4440 contents + roff - 3);
4441 else if (!ABI_64_P (output_bfd) && val == 0x44)
4442 bfd_put_8 (output_bfd, 0x45,
4443 contents + roff - 3);
4444 bfd_put_8 (output_bfd, 0x8d,
4445 contents + roff - 2);
4446 bfd_put_8 (output_bfd, 0x80 | reg | (reg << 3),
4447 contents + roff - 1);
4448 }
4449 bfd_put_32 (output_bfd,
4450 elf_x86_64_tpoff (info, relocation),
4451 contents + roff);
4452 continue;
4453 }
4454 else
4455 BFD_ASSERT (FALSE);
4456 }
4457
4458 if (htab->elf.sgot == NULL)
4459 abort ();
4460
4461 if (h != NULL)
4462 {
4463 off = h->got.offset;
4464 offplt = elf_x86_64_hash_entry (h)->tlsdesc_got;
4465 }
4466 else
4467 {
4468 if (local_got_offsets == NULL)
4469 abort ();
4470
4471 off = local_got_offsets[r_symndx];
4472 offplt = local_tlsdesc_gotents[r_symndx];
4473 }
4474
4475 if ((off & 1) != 0)
4476 off &= ~1;
4477 else
4478 {
4479 Elf_Internal_Rela outrel;
4480 int dr_type, indx;
4481 asection *sreloc;
4482
4483 if (htab->elf.srelgot == NULL)
4484 abort ();
4485
4486 indx = h && h->dynindx != -1 ? h->dynindx : 0;
4487
4488 if (GOT_TLS_GDESC_P (tls_type))
4489 {
4490 outrel.r_info = htab->r_info (indx, R_X86_64_TLSDESC);
4491 BFD_ASSERT (htab->sgotplt_jump_table_size + offplt
4492 + 2 * GOT_ENTRY_SIZE <= htab->elf.sgotplt->size);
4493 outrel.r_offset = (htab->elf.sgotplt->output_section->vma
4494 + htab->elf.sgotplt->output_offset
4495 + offplt
4496 + htab->sgotplt_jump_table_size);
4497 sreloc = htab->elf.srelplt;
4498 if (indx == 0)
4499 outrel.r_addend = relocation - elf_x86_64_dtpoff_base (info);
4500 else
4501 outrel.r_addend = 0;
4502 elf_append_rela (output_bfd, sreloc, &outrel);
4503 }
4504
4505 sreloc = htab->elf.srelgot;
4506
4507 outrel.r_offset = (htab->elf.sgot->output_section->vma
4508 + htab->elf.sgot->output_offset + off);
4509
4510 if (GOT_TLS_GD_P (tls_type))
4511 dr_type = R_X86_64_DTPMOD64;
4512 else if (GOT_TLS_GDESC_P (tls_type))
4513 goto dr_done;
4514 else
4515 dr_type = R_X86_64_TPOFF64;
4516
4517 bfd_put_64 (output_bfd, 0, htab->elf.sgot->contents + off);
4518 outrel.r_addend = 0;
4519 if ((dr_type == R_X86_64_TPOFF64
4520 || dr_type == R_X86_64_TLSDESC) && indx == 0)
4521 outrel.r_addend = relocation - elf_x86_64_dtpoff_base (info);
4522 outrel.r_info = htab->r_info (indx, dr_type);
4523
4524 elf_append_rela (output_bfd, sreloc, &outrel);
4525
4526 if (GOT_TLS_GD_P (tls_type))
4527 {
4528 if (indx == 0)
4529 {
4530 BFD_ASSERT (! unresolved_reloc);
4531 bfd_put_64 (output_bfd,
4532 relocation - elf_x86_64_dtpoff_base (info),
4533 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
4534 }
4535 else
4536 {
4537 bfd_put_64 (output_bfd, 0,
4538 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
4539 outrel.r_info = htab->r_info (indx,
4540 R_X86_64_DTPOFF64);
4541 outrel.r_offset += GOT_ENTRY_SIZE;
4542 elf_append_rela (output_bfd, sreloc,
4543 &outrel);
4544 }
4545 }
4546
4547 dr_done:
4548 if (h != NULL)
4549 h->got.offset |= 1;
4550 else
4551 local_got_offsets[r_symndx] |= 1;
4552 }
4553
4554 if (off >= (bfd_vma) -2
4555 && ! GOT_TLS_GDESC_P (tls_type))
4556 abort ();
4557 if (r_type == ELF32_R_TYPE (rel->r_info))
4558 {
4559 if (r_type == R_X86_64_GOTPC32_TLSDESC
4560 || r_type == R_X86_64_TLSDESC_CALL)
4561 relocation = htab->elf.sgotplt->output_section->vma
4562 + htab->elf.sgotplt->output_offset
4563 + offplt + htab->sgotplt_jump_table_size;
4564 else
4565 relocation = htab->elf.sgot->output_section->vma
4566 + htab->elf.sgot->output_offset + off;
4567 unresolved_reloc = FALSE;
4568 }
4569 else
4570 {
4571 bfd_vma roff = rel->r_offset;
4572
4573 if (ELF32_R_TYPE (rel->r_info) == R_X86_64_TLSGD)
4574 {
4575 /* GD->IE transition. For 64bit, change
4576 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
4577 .word 0x6666; rex64; call __tls_get_addr@plt
4578 into:
4579 movq %fs:0, %rax
4580 addq foo@gottpoff(%rip), %rax
4581 For 32bit, change
4582 leaq foo@tlsgd(%rip), %rdi
4583 .word 0x6666; rex64; call __tls_get_addr@plt
4584 into:
4585 movl %fs:0, %eax
4586 addq foo@gottpoff(%rip), %rax
4587 For largepic, change:
4588 leaq foo@tlsgd(%rip), %rdi
4589 movabsq $__tls_get_addr@pltoff, %rax
4590 addq %rbx, %rax
4591 call *%rax
4592 into:
4593 movq %fs:0, %rax
4594 addq foo@gottpoff(%rax), %rax
4595 nopw 0x0(%rax,%rax,1) */
4596 int largepic = 0;
4597 if (ABI_64_P (output_bfd)
4598 && contents[roff + 5] == (bfd_byte) '\xb8')
4599 {
4600 memcpy (contents + roff - 3,
4601 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x03\x05"
4602 "\0\0\0\0\x66\x0f\x1f\x44\0", 22);
4603 largepic = 1;
4604 }
4605 else if (ABI_64_P (output_bfd))
4606 memcpy (contents + roff - 4,
4607 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x03\x05\0\0\0",
4608 16);
4609 else
4610 memcpy (contents + roff - 3,
4611 "\x64\x8b\x04\x25\0\0\0\0\x48\x03\x05\0\0\0",
4612 15);
4613
4614 relocation = (htab->elf.sgot->output_section->vma
4615 + htab->elf.sgot->output_offset + off
4616 - roff
4617 - largepic
4618 - input_section->output_section->vma
4619 - input_section->output_offset
4620 - 12);
4621 bfd_put_32 (output_bfd, relocation,
4622 contents + roff + 8 + largepic);
4623 /* Skip R_X86_64_PLT32/R_X86_64_PLTOFF64. */
4624 rel++;
4625 continue;
4626 }
4627 else if (ELF32_R_TYPE (rel->r_info) == R_X86_64_GOTPC32_TLSDESC)
4628 {
4629 /* GDesc -> IE transition.
4630 It's originally something like:
4631 leaq x@tlsdesc(%rip), %rax
4632
4633 Change it to:
4634 movq x@gottpoff(%rip), %rax # before xchg %ax,%ax. */
4635
4636 /* Now modify the instruction as appropriate. To
4637 turn a leaq into a movq in the form we use it, it
4638 suffices to change the second byte from 0x8d to
4639 0x8b. */
4640 bfd_put_8 (output_bfd, 0x8b, contents + roff - 2);
4641
4642 bfd_put_32 (output_bfd,
4643 htab->elf.sgot->output_section->vma
4644 + htab->elf.sgot->output_offset + off
4645 - rel->r_offset
4646 - input_section->output_section->vma
4647 - input_section->output_offset
4648 - 4,
4649 contents + roff);
4650 continue;
4651 }
4652 else if (ELF32_R_TYPE (rel->r_info) == R_X86_64_TLSDESC_CALL)
4653 {
4654 /* GDesc -> IE transition.
4655 It's originally:
4656 call *(%rax)
4657
4658 Change it to:
4659 xchg %ax, %ax. */
4660
4661 bfd_put_8 (output_bfd, 0x66, contents + roff);
4662 bfd_put_8 (output_bfd, 0x90, contents + roff + 1);
4663 continue;
4664 }
4665 else
4666 BFD_ASSERT (FALSE);
4667 }
4668 break;
4669
4670 case R_X86_64_TLSLD:
4671 if (! elf_x86_64_tls_transition (info, input_bfd,
4672 input_section, contents,
4673 symtab_hdr, sym_hashes,
4674 &r_type, GOT_UNKNOWN,
4675 rel, relend, h, r_symndx))
4676 return FALSE;
4677
4678 if (r_type != R_X86_64_TLSLD)
4679 {
4680 /* LD->LE transition:
4681 leaq foo@tlsld(%rip), %rdi; call __tls_get_addr.
4682 For 64bit, we change it into:
4683 .word 0x6666; .byte 0x66; movq %fs:0, %rax.
4684 For 32bit, we change it into:
4685 nopl 0x0(%rax); movl %fs:0, %eax.
4686 For largepic, change:
4687 leaq foo@tlsgd(%rip), %rdi
4688 movabsq $__tls_get_addr@pltoff, %rax
4689 addq %rbx, %rax
4690 call *%rax
4691 into:
4692 data32 data32 data32 nopw %cs:0x0(%rax,%rax,1)
4693 movq %fs:0, %eax */
4694
4695 BFD_ASSERT (r_type == R_X86_64_TPOFF32);
4696 if (ABI_64_P (output_bfd)
4697 && contents[rel->r_offset + 5] == (bfd_byte) '\xb8')
4698 memcpy (contents + rel->r_offset - 3,
4699 "\x66\x66\x66\x66\x2e\x0f\x1f\x84\0\0\0\0\0"
4700 "\x64\x48\x8b\x04\x25\0\0\0", 22);
4701 else if (ABI_64_P (output_bfd))
4702 memcpy (contents + rel->r_offset - 3,
4703 "\x66\x66\x66\x64\x48\x8b\x04\x25\0\0\0", 12);
4704 else
4705 memcpy (contents + rel->r_offset - 3,
4706 "\x0f\x1f\x40\x00\x64\x8b\x04\x25\0\0\0", 12);
4707 /* Skip R_X86_64_PC32/R_X86_64_PLT32/R_X86_64_PLTOFF64. */
4708 rel++;
4709 continue;
4710 }
4711
4712 if (htab->elf.sgot == NULL)
4713 abort ();
4714
4715 off = htab->tls_ld_got.offset;
4716 if (off & 1)
4717 off &= ~1;
4718 else
4719 {
4720 Elf_Internal_Rela outrel;
4721
4722 if (htab->elf.srelgot == NULL)
4723 abort ();
4724
4725 outrel.r_offset = (htab->elf.sgot->output_section->vma
4726 + htab->elf.sgot->output_offset + off);
4727
4728 bfd_put_64 (output_bfd, 0,
4729 htab->elf.sgot->contents + off);
4730 bfd_put_64 (output_bfd, 0,
4731 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
4732 outrel.r_info = htab->r_info (0, R_X86_64_DTPMOD64);
4733 outrel.r_addend = 0;
4734 elf_append_rela (output_bfd, htab->elf.srelgot,
4735 &outrel);
4736 htab->tls_ld_got.offset |= 1;
4737 }
4738 relocation = htab->elf.sgot->output_section->vma
4739 + htab->elf.sgot->output_offset + off;
4740 unresolved_reloc = FALSE;
4741 break;
4742
4743 case R_X86_64_DTPOFF32:
4744 if (!info->executable|| (input_section->flags & SEC_CODE) == 0)
4745 relocation -= elf_x86_64_dtpoff_base (info);
4746 else
4747 relocation = elf_x86_64_tpoff (info, relocation);
4748 break;
4749
4750 case R_X86_64_TPOFF32:
4751 case R_X86_64_TPOFF64:
4752 BFD_ASSERT (info->executable);
4753 relocation = elf_x86_64_tpoff (info, relocation);
4754 break;
4755
4756 case R_X86_64_DTPOFF64:
4757 BFD_ASSERT ((input_section->flags & SEC_CODE) == 0);
4758 relocation -= elf_x86_64_dtpoff_base (info);
4759 break;
4760
4761 default:
4762 break;
4763 }
4764
4765 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
4766 because such sections are not SEC_ALLOC and thus ld.so will
4767 not process them. */
4768 if (unresolved_reloc
4769 && !((input_section->flags & SEC_DEBUGGING) != 0
4770 && h->def_dynamic)
4771 && _bfd_elf_section_offset (output_bfd, info, input_section,
4772 rel->r_offset) != (bfd_vma) -1)
4773 {
4774 (*_bfd_error_handler)
4775 (_("%B(%A+0x%lx): unresolvable %s relocation against symbol `%s'"),
4776 input_bfd,
4777 input_section,
4778 (long) rel->r_offset,
4779 howto->name,
4780 h->root.root.string);
4781 return FALSE;
4782 }
4783
4784 do_relocation:
4785 r = _bfd_final_link_relocate (howto, input_bfd, input_section,
4786 contents, rel->r_offset,
4787 relocation, rel->r_addend);
4788
4789 check_relocation_error:
4790 if (r != bfd_reloc_ok)
4791 {
4792 const char *name;
4793
4794 if (h != NULL)
4795 name = h->root.root.string;
4796 else
4797 {
4798 name = bfd_elf_string_from_elf_section (input_bfd,
4799 symtab_hdr->sh_link,
4800 sym->st_name);
4801 if (name == NULL)
4802 return FALSE;
4803 if (*name == '\0')
4804 name = bfd_section_name (input_bfd, sec);
4805 }
4806
4807 if (r == bfd_reloc_overflow)
4808 {
4809 if (! ((*info->callbacks->reloc_overflow)
4810 (info, (h ? &h->root : NULL), name, howto->name,
4811 (bfd_vma) 0, input_bfd, input_section,
4812 rel->r_offset)))
4813 return FALSE;
4814 }
4815 else
4816 {
4817 (*_bfd_error_handler)
4818 (_("%B(%A+0x%lx): reloc against `%s': error %d"),
4819 input_bfd, input_section,
4820 (long) rel->r_offset, name, (int) r);
4821 return FALSE;
4822 }
4823 }
4824 }
4825
4826 return TRUE;
4827 }
4828
4829 /* Finish up dynamic symbol handling. We set the contents of various
4830 dynamic sections here. */
4831
4832 static bfd_boolean
4833 elf_x86_64_finish_dynamic_symbol (bfd *output_bfd,
4834 struct bfd_link_info *info,
4835 struct elf_link_hash_entry *h,
4836 Elf_Internal_Sym *sym ATTRIBUTE_UNUSED)
4837 {
4838 struct elf_x86_64_link_hash_table *htab;
4839 const struct elf_x86_64_backend_data *abed;
4840 bfd_boolean use_plt_bnd;
4841 struct elf_x86_64_link_hash_entry *eh;
4842
4843 htab = elf_x86_64_hash_table (info);
4844 if (htab == NULL)
4845 return FALSE;
4846
4847 /* Use MPX backend data in case of BND relocation. Use .plt_bnd
4848 section only if there is .plt section. */
4849 use_plt_bnd = htab->elf.splt != NULL && htab->plt_bnd != NULL;
4850 abed = (use_plt_bnd
4851 ? &elf_x86_64_bnd_arch_bed
4852 : get_elf_x86_64_backend_data (output_bfd));
4853
4854 eh = (struct elf_x86_64_link_hash_entry *) h;
4855
4856 if (h->plt.offset != (bfd_vma) -1)
4857 {
4858 bfd_vma plt_index;
4859 bfd_vma got_offset, plt_offset, plt_plt_offset, plt_got_offset;
4860 bfd_vma plt_plt_insn_end, plt_got_insn_size;
4861 Elf_Internal_Rela rela;
4862 bfd_byte *loc;
4863 asection *plt, *gotplt, *relplt, *resolved_plt;
4864 const struct elf_backend_data *bed;
4865 bfd_vma plt_got_pcrel_offset;
4866
4867 /* When building a static executable, use .iplt, .igot.plt and
4868 .rela.iplt sections for STT_GNU_IFUNC symbols. */
4869 if (htab->elf.splt != NULL)
4870 {
4871 plt = htab->elf.splt;
4872 gotplt = htab->elf.sgotplt;
4873 relplt = htab->elf.srelplt;
4874 }
4875 else
4876 {
4877 plt = htab->elf.iplt;
4878 gotplt = htab->elf.igotplt;
4879 relplt = htab->elf.irelplt;
4880 }
4881
4882 /* This symbol has an entry in the procedure linkage table. Set
4883 it up. */
4884 if ((h->dynindx == -1
4885 && !((h->forced_local || info->executable)
4886 && h->def_regular
4887 && h->type == STT_GNU_IFUNC))
4888 || plt == NULL
4889 || gotplt == NULL
4890 || relplt == NULL)
4891 abort ();
4892
4893 /* Get the index in the procedure linkage table which
4894 corresponds to this symbol. This is the index of this symbol
4895 in all the symbols for which we are making plt entries. The
4896 first entry in the procedure linkage table is reserved.
4897
4898 Get the offset into the .got table of the entry that
4899 corresponds to this function. Each .got entry is GOT_ENTRY_SIZE
4900 bytes. The first three are reserved for the dynamic linker.
4901
4902 For static executables, we don't reserve anything. */
4903
4904 if (plt == htab->elf.splt)
4905 {
4906 got_offset = h->plt.offset / abed->plt_entry_size - 1;
4907 got_offset = (got_offset + 3) * GOT_ENTRY_SIZE;
4908 }
4909 else
4910 {
4911 got_offset = h->plt.offset / abed->plt_entry_size;
4912 got_offset = got_offset * GOT_ENTRY_SIZE;
4913 }
4914
4915 plt_plt_insn_end = abed->plt_plt_insn_end;
4916 plt_plt_offset = abed->plt_plt_offset;
4917 plt_got_insn_size = abed->plt_got_insn_size;
4918 plt_got_offset = abed->plt_got_offset;
4919 if (use_plt_bnd)
4920 {
4921 /* Use the second PLT with BND relocations. */
4922 const bfd_byte *plt_entry, *plt2_entry;
4923
4924 if (eh->has_bnd_reloc)
4925 {
4926 plt_entry = elf_x86_64_bnd_plt_entry;
4927 plt2_entry = elf_x86_64_bnd_plt2_entry;
4928 }
4929 else
4930 {
4931 plt_entry = elf_x86_64_legacy_plt_entry;
4932 plt2_entry = elf_x86_64_legacy_plt2_entry;
4933
4934 /* Subtract 1 since there is no BND prefix. */
4935 plt_plt_insn_end -= 1;
4936 plt_plt_offset -= 1;
4937 plt_got_insn_size -= 1;
4938 plt_got_offset -= 1;
4939 }
4940
4941 BFD_ASSERT (sizeof (elf_x86_64_bnd_plt_entry)
4942 == sizeof (elf_x86_64_legacy_plt_entry));
4943
4944 /* Fill in the entry in the procedure linkage table. */
4945 memcpy (plt->contents + h->plt.offset,
4946 plt_entry, sizeof (elf_x86_64_legacy_plt_entry));
4947 /* Fill in the entry in the second PLT. */
4948 memcpy (htab->plt_bnd->contents + eh->plt_bnd.offset,
4949 plt2_entry, sizeof (elf_x86_64_legacy_plt2_entry));
4950
4951 resolved_plt = htab->plt_bnd;
4952 plt_offset = eh->plt_bnd.offset;
4953 }
4954 else
4955 {
4956 /* Fill in the entry in the procedure linkage table. */
4957 memcpy (plt->contents + h->plt.offset, abed->plt_entry,
4958 abed->plt_entry_size);
4959
4960 resolved_plt = plt;
4961 plt_offset = h->plt.offset;
4962 }
4963
4964 /* Insert the relocation positions of the plt section. */
4965
4966 /* Put offset the PC-relative instruction referring to the GOT entry,
4967 subtracting the size of that instruction. */
4968 plt_got_pcrel_offset = (gotplt->output_section->vma
4969 + gotplt->output_offset
4970 + got_offset
4971 - resolved_plt->output_section->vma
4972 - resolved_plt->output_offset
4973 - plt_offset
4974 - plt_got_insn_size);
4975
4976 /* Check PC-relative offset overflow in PLT entry. */
4977 if ((plt_got_pcrel_offset + 0x80000000) > 0xffffffff)
4978 info->callbacks->einfo (_("%F%B: PC-relative offset overflow in PLT entry for `%s'\n"),
4979 output_bfd, h->root.root.string);
4980
4981 bfd_put_32 (output_bfd, plt_got_pcrel_offset,
4982 resolved_plt->contents + plt_offset + plt_got_offset);
4983
4984 /* Fill in the entry in the global offset table, initially this
4985 points to the second part of the PLT entry. */
4986 bfd_put_64 (output_bfd, (plt->output_section->vma
4987 + plt->output_offset
4988 + h->plt.offset + abed->plt_lazy_offset),
4989 gotplt->contents + got_offset);
4990
4991 /* Fill in the entry in the .rela.plt section. */
4992 rela.r_offset = (gotplt->output_section->vma
4993 + gotplt->output_offset
4994 + got_offset);
4995 if (h->dynindx == -1
4996 || ((info->executable
4997 || ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
4998 && h->def_regular
4999 && h->type == STT_GNU_IFUNC))
5000 {
5001 /* If an STT_GNU_IFUNC symbol is locally defined, generate
5002 R_X86_64_IRELATIVE instead of R_X86_64_JUMP_SLOT. */
5003 rela.r_info = htab->r_info (0, R_X86_64_IRELATIVE);
5004 rela.r_addend = (h->root.u.def.value
5005 + h->root.u.def.section->output_section->vma
5006 + h->root.u.def.section->output_offset);
5007 /* R_X86_64_IRELATIVE comes last. */
5008 plt_index = htab->next_irelative_index--;
5009 }
5010 else
5011 {
5012 rela.r_info = htab->r_info (h->dynindx, R_X86_64_JUMP_SLOT);
5013 rela.r_addend = 0;
5014 plt_index = htab->next_jump_slot_index++;
5015 }
5016
5017 /* Don't fill PLT entry for static executables. */
5018 if (plt == htab->elf.splt)
5019 {
5020 bfd_vma plt0_offset = h->plt.offset + plt_plt_insn_end;
5021
5022 /* Put relocation index. */
5023 bfd_put_32 (output_bfd, plt_index,
5024 plt->contents + h->plt.offset + abed->plt_reloc_offset);
5025
5026 /* Put offset for jmp .PLT0 and check for overflow. We don't
5027 check relocation index for overflow since branch displacement
5028 will overflow first. */
5029 if (plt0_offset > 0x80000000)
5030 info->callbacks->einfo (_("%F%B: branch displacement overflow in PLT entry for `%s'\n"),
5031 output_bfd, h->root.root.string);
5032 bfd_put_32 (output_bfd, - plt0_offset,
5033 plt->contents + h->plt.offset + plt_plt_offset);
5034 }
5035
5036 bed = get_elf_backend_data (output_bfd);
5037 loc = relplt->contents + plt_index * bed->s->sizeof_rela;
5038 bed->s->swap_reloca_out (output_bfd, &rela, loc);
5039 }
5040 else if (eh->plt_got.offset != (bfd_vma) -1)
5041 {
5042 bfd_vma got_offset, plt_offset, plt_got_offset, plt_got_insn_size;
5043 asection *plt, *got;
5044 bfd_boolean got_after_plt;
5045 int32_t got_pcrel_offset;
5046 const bfd_byte *got_plt_entry;
5047
5048 /* Set the entry in the GOT procedure linkage table. */
5049 plt = htab->plt_got;
5050 got = htab->elf.sgot;
5051 got_offset = h->got.offset;
5052
5053 if (got_offset == (bfd_vma) -1
5054 || h->type == STT_GNU_IFUNC
5055 || plt == NULL
5056 || got == NULL)
5057 abort ();
5058
5059 /* Use the second PLT entry template for the GOT PLT since they
5060 are the identical. */
5061 plt_got_insn_size = elf_x86_64_bnd_arch_bed.plt_got_insn_size;
5062 plt_got_offset = elf_x86_64_bnd_arch_bed.plt_got_offset;
5063 if (eh->has_bnd_reloc)
5064 got_plt_entry = elf_x86_64_bnd_plt2_entry;
5065 else
5066 {
5067 got_plt_entry = elf_x86_64_legacy_plt2_entry;
5068
5069 /* Subtract 1 since there is no BND prefix. */
5070 plt_got_insn_size -= 1;
5071 plt_got_offset -= 1;
5072 }
5073
5074 /* Fill in the entry in the GOT procedure linkage table. */
5075 plt_offset = eh->plt_got.offset;
5076 memcpy (plt->contents + plt_offset,
5077 got_plt_entry, sizeof (elf_x86_64_legacy_plt2_entry));
5078
5079 /* Put offset the PC-relative instruction referring to the GOT
5080 entry, subtracting the size of that instruction. */
5081 got_pcrel_offset = (got->output_section->vma
5082 + got->output_offset
5083 + got_offset
5084 - plt->output_section->vma
5085 - plt->output_offset
5086 - plt_offset
5087 - plt_got_insn_size);
5088
5089 /* Check PC-relative offset overflow in GOT PLT entry. */
5090 got_after_plt = got->output_section->vma > plt->output_section->vma;
5091 if ((got_after_plt && got_pcrel_offset < 0)
5092 || (!got_after_plt && got_pcrel_offset > 0))
5093 info->callbacks->einfo (_("%F%B: PC-relative offset overflow in GOT PLT entry for `%s'\n"),
5094 output_bfd, h->root.root.string);
5095
5096 bfd_put_32 (output_bfd, got_pcrel_offset,
5097 plt->contents + plt_offset + plt_got_offset);
5098 }
5099
5100 if (!h->def_regular
5101 && (h->plt.offset != (bfd_vma) -1
5102 || eh->plt_got.offset != (bfd_vma) -1))
5103 {
5104 /* Mark the symbol as undefined, rather than as defined in
5105 the .plt section. Leave the value if there were any
5106 relocations where pointer equality matters (this is a clue
5107 for the dynamic linker, to make function pointer
5108 comparisons work between an application and shared
5109 library), otherwise set it to zero. If a function is only
5110 called from a binary, there is no need to slow down
5111 shared libraries because of that. */
5112 sym->st_shndx = SHN_UNDEF;
5113 if (!h->pointer_equality_needed)
5114 sym->st_value = 0;
5115 }
5116
5117 if (h->got.offset != (bfd_vma) -1
5118 && ! GOT_TLS_GD_ANY_P (elf_x86_64_hash_entry (h)->tls_type)
5119 && elf_x86_64_hash_entry (h)->tls_type != GOT_TLS_IE)
5120 {
5121 Elf_Internal_Rela rela;
5122
5123 /* This symbol has an entry in the global offset table. Set it
5124 up. */
5125 if (htab->elf.sgot == NULL || htab->elf.srelgot == NULL)
5126 abort ();
5127
5128 rela.r_offset = (htab->elf.sgot->output_section->vma
5129 + htab->elf.sgot->output_offset
5130 + (h->got.offset &~ (bfd_vma) 1));
5131
5132 /* If this is a static link, or it is a -Bsymbolic link and the
5133 symbol is defined locally or was forced to be local because
5134 of a version file, we just want to emit a RELATIVE reloc.
5135 The entry in the global offset table will already have been
5136 initialized in the relocate_section function. */
5137 if (h->def_regular
5138 && h->type == STT_GNU_IFUNC)
5139 {
5140 if (info->shared)
5141 {
5142 /* Generate R_X86_64_GLOB_DAT. */
5143 goto do_glob_dat;
5144 }
5145 else
5146 {
5147 asection *plt;
5148
5149 if (!h->pointer_equality_needed)
5150 abort ();
5151
5152 /* For non-shared object, we can't use .got.plt, which
5153 contains the real function addres if we need pointer
5154 equality. We load the GOT entry with the PLT entry. */
5155 plt = htab->elf.splt ? htab->elf.splt : htab->elf.iplt;
5156 bfd_put_64 (output_bfd, (plt->output_section->vma
5157 + plt->output_offset
5158 + h->plt.offset),
5159 htab->elf.sgot->contents + h->got.offset);
5160 return TRUE;
5161 }
5162 }
5163 else if (info->shared
5164 && SYMBOL_REFERENCES_LOCAL (info, h))
5165 {
5166 if (!h->def_regular)
5167 return FALSE;
5168 BFD_ASSERT((h->got.offset & 1) != 0);
5169 rela.r_info = htab->r_info (0, R_X86_64_RELATIVE);
5170 rela.r_addend = (h->root.u.def.value
5171 + h->root.u.def.section->output_section->vma
5172 + h->root.u.def.section->output_offset);
5173 }
5174 else
5175 {
5176 BFD_ASSERT((h->got.offset & 1) == 0);
5177 do_glob_dat:
5178 bfd_put_64 (output_bfd, (bfd_vma) 0,
5179 htab->elf.sgot->contents + h->got.offset);
5180 rela.r_info = htab->r_info (h->dynindx, R_X86_64_GLOB_DAT);
5181 rela.r_addend = 0;
5182 }
5183
5184 elf_append_rela (output_bfd, htab->elf.srelgot, &rela);
5185 }
5186
5187 if (h->needs_copy)
5188 {
5189 Elf_Internal_Rela rela;
5190
5191 /* This symbol needs a copy reloc. Set it up. */
5192
5193 if (h->dynindx == -1
5194 || (h->root.type != bfd_link_hash_defined
5195 && h->root.type != bfd_link_hash_defweak)
5196 || htab->srelbss == NULL)
5197 abort ();
5198
5199 rela.r_offset = (h->root.u.def.value
5200 + h->root.u.def.section->output_section->vma
5201 + h->root.u.def.section->output_offset);
5202 rela.r_info = htab->r_info (h->dynindx, R_X86_64_COPY);
5203 rela.r_addend = 0;
5204 elf_append_rela (output_bfd, htab->srelbss, &rela);
5205 }
5206
5207 return TRUE;
5208 }
5209
5210 /* Finish up local dynamic symbol handling. We set the contents of
5211 various dynamic sections here. */
5212
5213 static bfd_boolean
5214 elf_x86_64_finish_local_dynamic_symbol (void **slot, void *inf)
5215 {
5216 struct elf_link_hash_entry *h
5217 = (struct elf_link_hash_entry *) *slot;
5218 struct bfd_link_info *info
5219 = (struct bfd_link_info *) inf;
5220
5221 return elf_x86_64_finish_dynamic_symbol (info->output_bfd,
5222 info, h, NULL);
5223 }
5224
5225 /* Used to decide how to sort relocs in an optimal manner for the
5226 dynamic linker, before writing them out. */
5227
5228 static enum elf_reloc_type_class
5229 elf_x86_64_reloc_type_class (const struct bfd_link_info *info ATTRIBUTE_UNUSED,
5230 const asection *rel_sec ATTRIBUTE_UNUSED,
5231 const Elf_Internal_Rela *rela)
5232 {
5233 switch ((int) ELF32_R_TYPE (rela->r_info))
5234 {
5235 case R_X86_64_RELATIVE:
5236 case R_X86_64_RELATIVE64:
5237 return reloc_class_relative;
5238 case R_X86_64_JUMP_SLOT:
5239 return reloc_class_plt;
5240 case R_X86_64_COPY:
5241 return reloc_class_copy;
5242 default:
5243 return reloc_class_normal;
5244 }
5245 }
5246
5247 /* Finish up the dynamic sections. */
5248
5249 static bfd_boolean
5250 elf_x86_64_finish_dynamic_sections (bfd *output_bfd,
5251 struct bfd_link_info *info)
5252 {
5253 struct elf_x86_64_link_hash_table *htab;
5254 bfd *dynobj;
5255 asection *sdyn;
5256 const struct elf_x86_64_backend_data *abed;
5257
5258 htab = elf_x86_64_hash_table (info);
5259 if (htab == NULL)
5260 return FALSE;
5261
5262 /* Use MPX backend data in case of BND relocation. Use .plt_bnd
5263 section only if there is .plt section. */
5264 abed = (htab->elf.splt != NULL && htab->plt_bnd != NULL
5265 ? &elf_x86_64_bnd_arch_bed
5266 : get_elf_x86_64_backend_data (output_bfd));
5267
5268 dynobj = htab->elf.dynobj;
5269 sdyn = bfd_get_linker_section (dynobj, ".dynamic");
5270
5271 if (htab->elf.dynamic_sections_created)
5272 {
5273 bfd_byte *dyncon, *dynconend;
5274 const struct elf_backend_data *bed;
5275 bfd_size_type sizeof_dyn;
5276
5277 if (sdyn == NULL || htab->elf.sgot == NULL)
5278 abort ();
5279
5280 bed = get_elf_backend_data (dynobj);
5281 sizeof_dyn = bed->s->sizeof_dyn;
5282 dyncon = sdyn->contents;
5283 dynconend = sdyn->contents + sdyn->size;
5284 for (; dyncon < dynconend; dyncon += sizeof_dyn)
5285 {
5286 Elf_Internal_Dyn dyn;
5287 asection *s;
5288
5289 (*bed->s->swap_dyn_in) (dynobj, dyncon, &dyn);
5290
5291 switch (dyn.d_tag)
5292 {
5293 default:
5294 continue;
5295
5296 case DT_PLTGOT:
5297 s = htab->elf.sgotplt;
5298 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset;
5299 break;
5300
5301 case DT_JMPREL:
5302 dyn.d_un.d_ptr = htab->elf.srelplt->output_section->vma;
5303 break;
5304
5305 case DT_PLTRELSZ:
5306 s = htab->elf.srelplt->output_section;
5307 dyn.d_un.d_val = s->size;
5308 break;
5309
5310 case DT_RELASZ:
5311 /* The procedure linkage table relocs (DT_JMPREL) should
5312 not be included in the overall relocs (DT_RELA).
5313 Therefore, we override the DT_RELASZ entry here to
5314 make it not include the JMPREL relocs. Since the
5315 linker script arranges for .rela.plt to follow all
5316 other relocation sections, we don't have to worry
5317 about changing the DT_RELA entry. */
5318 if (htab->elf.srelplt != NULL)
5319 {
5320 s = htab->elf.srelplt->output_section;
5321 dyn.d_un.d_val -= s->size;
5322 }
5323 break;
5324
5325 case DT_TLSDESC_PLT:
5326 s = htab->elf.splt;
5327 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset
5328 + htab->tlsdesc_plt;
5329 break;
5330
5331 case DT_TLSDESC_GOT:
5332 s = htab->elf.sgot;
5333 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset
5334 + htab->tlsdesc_got;
5335 break;
5336 }
5337
5338 (*bed->s->swap_dyn_out) (output_bfd, &dyn, dyncon);
5339 }
5340
5341 /* Fill in the special first entry in the procedure linkage table. */
5342 if (htab->elf.splt && htab->elf.splt->size > 0)
5343 {
5344 /* Fill in the first entry in the procedure linkage table. */
5345 memcpy (htab->elf.splt->contents,
5346 abed->plt0_entry, abed->plt_entry_size);
5347 /* Add offset for pushq GOT+8(%rip), since the instruction
5348 uses 6 bytes subtract this value. */
5349 bfd_put_32 (output_bfd,
5350 (htab->elf.sgotplt->output_section->vma
5351 + htab->elf.sgotplt->output_offset
5352 + 8
5353 - htab->elf.splt->output_section->vma
5354 - htab->elf.splt->output_offset
5355 - 6),
5356 htab->elf.splt->contents + abed->plt0_got1_offset);
5357 /* Add offset for the PC-relative instruction accessing GOT+16,
5358 subtracting the offset to the end of that instruction. */
5359 bfd_put_32 (output_bfd,
5360 (htab->elf.sgotplt->output_section->vma
5361 + htab->elf.sgotplt->output_offset
5362 + 16
5363 - htab->elf.splt->output_section->vma
5364 - htab->elf.splt->output_offset
5365 - abed->plt0_got2_insn_end),
5366 htab->elf.splt->contents + abed->plt0_got2_offset);
5367
5368 elf_section_data (htab->elf.splt->output_section)
5369 ->this_hdr.sh_entsize = abed->plt_entry_size;
5370
5371 if (htab->tlsdesc_plt)
5372 {
5373 bfd_put_64 (output_bfd, (bfd_vma) 0,
5374 htab->elf.sgot->contents + htab->tlsdesc_got);
5375
5376 memcpy (htab->elf.splt->contents + htab->tlsdesc_plt,
5377 abed->plt0_entry, abed->plt_entry_size);
5378
5379 /* Add offset for pushq GOT+8(%rip), since the
5380 instruction uses 6 bytes subtract this value. */
5381 bfd_put_32 (output_bfd,
5382 (htab->elf.sgotplt->output_section->vma
5383 + htab->elf.sgotplt->output_offset
5384 + 8
5385 - htab->elf.splt->output_section->vma
5386 - htab->elf.splt->output_offset
5387 - htab->tlsdesc_plt
5388 - 6),
5389 htab->elf.splt->contents
5390 + htab->tlsdesc_plt + abed->plt0_got1_offset);
5391 /* Add offset for the PC-relative instruction accessing GOT+TDG,
5392 where TGD stands for htab->tlsdesc_got, subtracting the offset
5393 to the end of that instruction. */
5394 bfd_put_32 (output_bfd,
5395 (htab->elf.sgot->output_section->vma
5396 + htab->elf.sgot->output_offset
5397 + htab->tlsdesc_got
5398 - htab->elf.splt->output_section->vma
5399 - htab->elf.splt->output_offset
5400 - htab->tlsdesc_plt
5401 - abed->plt0_got2_insn_end),
5402 htab->elf.splt->contents
5403 + htab->tlsdesc_plt + abed->plt0_got2_offset);
5404 }
5405 }
5406 }
5407
5408 if (htab->plt_bnd != NULL)
5409 elf_section_data (htab->plt_bnd->output_section)
5410 ->this_hdr.sh_entsize = sizeof (elf_x86_64_bnd_plt2_entry);
5411
5412 if (htab->elf.sgotplt)
5413 {
5414 if (bfd_is_abs_section (htab->elf.sgotplt->output_section))
5415 {
5416 (*_bfd_error_handler)
5417 (_("discarded output section: `%A'"), htab->elf.sgotplt);
5418 return FALSE;
5419 }
5420
5421 /* Fill in the first three entries in the global offset table. */
5422 if (htab->elf.sgotplt->size > 0)
5423 {
5424 /* Set the first entry in the global offset table to the address of
5425 the dynamic section. */
5426 if (sdyn == NULL)
5427 bfd_put_64 (output_bfd, (bfd_vma) 0, htab->elf.sgotplt->contents);
5428 else
5429 bfd_put_64 (output_bfd,
5430 sdyn->output_section->vma + sdyn->output_offset,
5431 htab->elf.sgotplt->contents);
5432 /* Write GOT[1] and GOT[2], needed for the dynamic linker. */
5433 bfd_put_64 (output_bfd, (bfd_vma) 0, htab->elf.sgotplt->contents + GOT_ENTRY_SIZE);
5434 bfd_put_64 (output_bfd, (bfd_vma) 0, htab->elf.sgotplt->contents + GOT_ENTRY_SIZE*2);
5435 }
5436
5437 elf_section_data (htab->elf.sgotplt->output_section)->this_hdr.sh_entsize =
5438 GOT_ENTRY_SIZE;
5439 }
5440
5441 /* Adjust .eh_frame for .plt section. */
5442 if (htab->plt_eh_frame != NULL
5443 && htab->plt_eh_frame->contents != NULL)
5444 {
5445 if (htab->elf.splt != NULL
5446 && htab->elf.splt->size != 0
5447 && (htab->elf.splt->flags & SEC_EXCLUDE) == 0
5448 && htab->elf.splt->output_section != NULL
5449 && htab->plt_eh_frame->output_section != NULL)
5450 {
5451 bfd_vma plt_start = htab->elf.splt->output_section->vma;
5452 bfd_vma eh_frame_start = htab->plt_eh_frame->output_section->vma
5453 + htab->plt_eh_frame->output_offset
5454 + PLT_FDE_START_OFFSET;
5455 bfd_put_signed_32 (dynobj, plt_start - eh_frame_start,
5456 htab->plt_eh_frame->contents
5457 + PLT_FDE_START_OFFSET);
5458 }
5459 if (htab->plt_eh_frame->sec_info_type == SEC_INFO_TYPE_EH_FRAME)
5460 {
5461 if (! _bfd_elf_write_section_eh_frame (output_bfd, info,
5462 htab->plt_eh_frame,
5463 htab->plt_eh_frame->contents))
5464 return FALSE;
5465 }
5466 }
5467
5468 if (htab->elf.sgot && htab->elf.sgot->size > 0)
5469 elf_section_data (htab->elf.sgot->output_section)->this_hdr.sh_entsize
5470 = GOT_ENTRY_SIZE;
5471
5472 /* Fill PLT and GOT entries for local STT_GNU_IFUNC symbols. */
5473 htab_traverse (htab->loc_hash_table,
5474 elf_x86_64_finish_local_dynamic_symbol,
5475 info);
5476
5477 return TRUE;
5478 }
5479
5480 /* Return address in section PLT for the Ith GOTPLT relocation, for
5481 relocation REL or (bfd_vma) -1 if it should not be included. */
5482
5483 static bfd_vma
5484 elf_x86_64_plt_sym_val (bfd_vma i, const asection *plt,
5485 const arelent *rel)
5486 {
5487 bfd *abfd;
5488 const struct elf_x86_64_backend_data *bed;
5489 bfd_vma plt_offset;
5490
5491 /* Only match R_X86_64_JUMP_SLOT and R_X86_64_IRELATIVE. */
5492 if (rel->howto->type != R_X86_64_JUMP_SLOT
5493 && rel->howto->type != R_X86_64_IRELATIVE)
5494 return (bfd_vma) -1;
5495
5496 abfd = plt->owner;
5497 bed = get_elf_x86_64_backend_data (abfd);
5498 plt_offset = bed->plt_entry_size;
5499
5500 if (elf_elfheader (abfd)->e_ident[EI_OSABI] != ELFOSABI_GNU)
5501 return plt->vma + (i + 1) * plt_offset;
5502
5503 while (plt_offset < plt->size)
5504 {
5505 bfd_vma reloc_index;
5506 bfd_byte reloc_index_raw[4];
5507
5508 if (!bfd_get_section_contents (abfd, (asection *) plt,
5509 reloc_index_raw,
5510 plt_offset + bed->plt_reloc_offset,
5511 sizeof (reloc_index_raw)))
5512 return (bfd_vma) -1;
5513
5514 reloc_index = H_GET_32 (abfd, reloc_index_raw);
5515 if (reloc_index == i)
5516 return plt->vma + plt_offset;
5517 plt_offset += bed->plt_entry_size;
5518 }
5519
5520 abort ();
5521 }
5522
5523 /* Return offset in .plt.bnd section for the Ith GOTPLT relocation with
5524 PLT section, or (bfd_vma) -1 if it should not be included. */
5525
5526 static bfd_vma
5527 elf_x86_64_plt_sym_val_offset_plt_bnd (bfd_vma i, const asection *plt)
5528 {
5529 const struct elf_x86_64_backend_data *bed = &elf_x86_64_bnd_arch_bed;
5530 bfd *abfd = plt->owner;
5531 bfd_vma plt_offset = bed->plt_entry_size;
5532
5533 if (elf_elfheader (abfd)->e_ident[EI_OSABI] != ELFOSABI_GNU)
5534 return i * sizeof (elf_x86_64_legacy_plt2_entry);
5535
5536 while (plt_offset < plt->size)
5537 {
5538 bfd_vma reloc_index;
5539 bfd_byte reloc_index_raw[4];
5540
5541 if (!bfd_get_section_contents (abfd, (asection *) plt,
5542 reloc_index_raw,
5543 plt_offset + bed->plt_reloc_offset,
5544 sizeof (reloc_index_raw)))
5545 return (bfd_vma) -1;
5546
5547 reloc_index = H_GET_32 (abfd, reloc_index_raw);
5548 if (reloc_index == i)
5549 {
5550 /* This is the index in .plt section. */
5551 long plt_index = plt_offset / bed->plt_entry_size;
5552 /* Return the offset in .plt.bnd section. */
5553 return (plt_index - 1) * sizeof (elf_x86_64_legacy_plt2_entry);
5554 }
5555 plt_offset += bed->plt_entry_size;
5556 }
5557
5558 abort ();
5559 }
5560
5561 /* Similar to _bfd_elf_get_synthetic_symtab, with .plt.bnd section
5562 support. */
5563
5564 static long
5565 elf_x86_64_get_synthetic_symtab (bfd *abfd,
5566 long symcount,
5567 asymbol **syms,
5568 long dynsymcount,
5569 asymbol **dynsyms,
5570 asymbol **ret)
5571 {
5572 const struct elf_backend_data *bed = get_elf_backend_data (abfd);
5573 asection *relplt;
5574 asymbol *s;
5575 bfd_boolean (*slurp_relocs) (bfd *, asection *, asymbol **, bfd_boolean);
5576 arelent *p;
5577 long count, i, n;
5578 size_t size;
5579 Elf_Internal_Shdr *hdr;
5580 char *names;
5581 asection *plt, *plt_push;
5582
5583 plt_push = bfd_get_section_by_name (abfd, ".plt");
5584 if (plt_push == NULL)
5585 return 0;
5586
5587 plt = bfd_get_section_by_name (abfd, ".plt.bnd");
5588 /* Use the generic ELF version if there is no .plt.bnd section. */
5589 if (plt == NULL)
5590 return _bfd_elf_get_synthetic_symtab (abfd, symcount, syms,
5591 dynsymcount, dynsyms, ret);
5592
5593 *ret = NULL;
5594
5595 if ((abfd->flags & (DYNAMIC | EXEC_P)) == 0)
5596 return 0;
5597
5598 if (dynsymcount <= 0)
5599 return 0;
5600
5601 relplt = bfd_get_section_by_name (abfd, ".rela.plt");
5602 if (relplt == NULL)
5603 return 0;
5604
5605 hdr = &elf_section_data (relplt)->this_hdr;
5606 if (hdr->sh_link != elf_dynsymtab (abfd)
5607 || (hdr->sh_type != SHT_REL && hdr->sh_type != SHT_RELA))
5608 return 0;
5609
5610 slurp_relocs = get_elf_backend_data (abfd)->s->slurp_reloc_table;
5611 if (! (*slurp_relocs) (abfd, relplt, dynsyms, TRUE))
5612 return -1;
5613
5614 count = relplt->size / hdr->sh_entsize;
5615 size = count * sizeof (asymbol);
5616 p = relplt->relocation;
5617 for (i = 0; i < count; i++, p += bed->s->int_rels_per_ext_rel)
5618 {
5619 size += strlen ((*p->sym_ptr_ptr)->name) + sizeof ("@plt");
5620 if (p->addend != 0)
5621 size += sizeof ("+0x") - 1 + 8 + 8;
5622 }
5623
5624 s = *ret = (asymbol *) bfd_malloc (size);
5625 if (s == NULL)
5626 return -1;
5627
5628 names = (char *) (s + count);
5629 p = relplt->relocation;
5630 n = 0;
5631 for (i = 0; i < count; i++, p++)
5632 {
5633 bfd_vma offset;
5634 size_t len;
5635
5636 if (p->howto->type != R_X86_64_JUMP_SLOT
5637 && p->howto->type != R_X86_64_IRELATIVE)
5638 continue;
5639
5640 offset = elf_x86_64_plt_sym_val_offset_plt_bnd (i, plt_push);
5641
5642 *s = **p->sym_ptr_ptr;
5643 /* Undefined syms won't have BSF_LOCAL or BSF_GLOBAL set. Since
5644 we are defining a symbol, ensure one of them is set. */
5645 if ((s->flags & BSF_LOCAL) == 0)
5646 s->flags |= BSF_GLOBAL;
5647 s->flags |= BSF_SYNTHETIC;
5648 s->section = plt;
5649 s->value = offset;
5650 s->name = names;
5651 s->udata.p = NULL;
5652 len = strlen ((*p->sym_ptr_ptr)->name);
5653 memcpy (names, (*p->sym_ptr_ptr)->name, len);
5654 names += len;
5655 if (p->addend != 0)
5656 {
5657 char buf[30], *a;
5658
5659 memcpy (names, "+0x", sizeof ("+0x") - 1);
5660 names += sizeof ("+0x") - 1;
5661 bfd_sprintf_vma (abfd, buf, p->addend);
5662 for (a = buf; *a == '0'; ++a)
5663 ;
5664 len = strlen (a);
5665 memcpy (names, a, len);
5666 names += len;
5667 }
5668 memcpy (names, "@plt", sizeof ("@plt"));
5669 names += sizeof ("@plt");
5670 ++s, ++n;
5671 }
5672
5673 return n;
5674 }
5675
5676 /* Handle an x86-64 specific section when reading an object file. This
5677 is called when elfcode.h finds a section with an unknown type. */
5678
5679 static bfd_boolean
5680 elf_x86_64_section_from_shdr (bfd *abfd, Elf_Internal_Shdr *hdr,
5681 const char *name, int shindex)
5682 {
5683 if (hdr->sh_type != SHT_X86_64_UNWIND)
5684 return FALSE;
5685
5686 if (! _bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
5687 return FALSE;
5688
5689 return TRUE;
5690 }
5691
5692 /* Hook called by the linker routine which adds symbols from an object
5693 file. We use it to put SHN_X86_64_LCOMMON items in .lbss, instead
5694 of .bss. */
5695
5696 static bfd_boolean
5697 elf_x86_64_add_symbol_hook (bfd *abfd,
5698 struct bfd_link_info *info,
5699 Elf_Internal_Sym *sym,
5700 const char **namep ATTRIBUTE_UNUSED,
5701 flagword *flagsp ATTRIBUTE_UNUSED,
5702 asection **secp,
5703 bfd_vma *valp)
5704 {
5705 asection *lcomm;
5706
5707 switch (sym->st_shndx)
5708 {
5709 case SHN_X86_64_LCOMMON:
5710 lcomm = bfd_get_section_by_name (abfd, "LARGE_COMMON");
5711 if (lcomm == NULL)
5712 {
5713 lcomm = bfd_make_section_with_flags (abfd,
5714 "LARGE_COMMON",
5715 (SEC_ALLOC
5716 | SEC_IS_COMMON
5717 | SEC_LINKER_CREATED));
5718 if (lcomm == NULL)
5719 return FALSE;
5720 elf_section_flags (lcomm) |= SHF_X86_64_LARGE;
5721 }
5722 *secp = lcomm;
5723 *valp = sym->st_size;
5724 return TRUE;
5725 }
5726
5727 if ((ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC
5728 || ELF_ST_BIND (sym->st_info) == STB_GNU_UNIQUE)
5729 && (abfd->flags & DYNAMIC) == 0
5730 && bfd_get_flavour (info->output_bfd) == bfd_target_elf_flavour)
5731 elf_tdata (info->output_bfd)->has_gnu_symbols = TRUE;
5732
5733 return TRUE;
5734 }
5735
5736
5737 /* Given a BFD section, try to locate the corresponding ELF section
5738 index. */
5739
5740 static bfd_boolean
5741 elf_x86_64_elf_section_from_bfd_section (bfd *abfd ATTRIBUTE_UNUSED,
5742 asection *sec, int *index_return)
5743 {
5744 if (sec == &_bfd_elf_large_com_section)
5745 {
5746 *index_return = SHN_X86_64_LCOMMON;
5747 return TRUE;
5748 }
5749 return FALSE;
5750 }
5751
5752 /* Process a symbol. */
5753
5754 static void
5755 elf_x86_64_symbol_processing (bfd *abfd ATTRIBUTE_UNUSED,
5756 asymbol *asym)
5757 {
5758 elf_symbol_type *elfsym = (elf_symbol_type *) asym;
5759
5760 switch (elfsym->internal_elf_sym.st_shndx)
5761 {
5762 case SHN_X86_64_LCOMMON:
5763 asym->section = &_bfd_elf_large_com_section;
5764 asym->value = elfsym->internal_elf_sym.st_size;
5765 /* Common symbol doesn't set BSF_GLOBAL. */
5766 asym->flags &= ~BSF_GLOBAL;
5767 break;
5768 }
5769 }
5770
5771 static bfd_boolean
5772 elf_x86_64_common_definition (Elf_Internal_Sym *sym)
5773 {
5774 return (sym->st_shndx == SHN_COMMON
5775 || sym->st_shndx == SHN_X86_64_LCOMMON);
5776 }
5777
5778 static unsigned int
5779 elf_x86_64_common_section_index (asection *sec)
5780 {
5781 if ((elf_section_flags (sec) & SHF_X86_64_LARGE) == 0)
5782 return SHN_COMMON;
5783 else
5784 return SHN_X86_64_LCOMMON;
5785 }
5786
5787 static asection *
5788 elf_x86_64_common_section (asection *sec)
5789 {
5790 if ((elf_section_flags (sec) & SHF_X86_64_LARGE) == 0)
5791 return bfd_com_section_ptr;
5792 else
5793 return &_bfd_elf_large_com_section;
5794 }
5795
5796 static bfd_boolean
5797 elf_x86_64_merge_symbol (struct elf_link_hash_entry *h,
5798 const Elf_Internal_Sym *sym,
5799 asection **psec,
5800 bfd_boolean newdef,
5801 bfd_boolean olddef,
5802 bfd *oldbfd,
5803 const asection *oldsec)
5804 {
5805 /* A normal common symbol and a large common symbol result in a
5806 normal common symbol. We turn the large common symbol into a
5807 normal one. */
5808 if (!olddef
5809 && h->root.type == bfd_link_hash_common
5810 && !newdef
5811 && bfd_is_com_section (*psec)
5812 && oldsec != *psec)
5813 {
5814 if (sym->st_shndx == SHN_COMMON
5815 && (elf_section_flags (oldsec) & SHF_X86_64_LARGE) != 0)
5816 {
5817 h->root.u.c.p->section
5818 = bfd_make_section_old_way (oldbfd, "COMMON");
5819 h->root.u.c.p->section->flags = SEC_ALLOC;
5820 }
5821 else if (sym->st_shndx == SHN_X86_64_LCOMMON
5822 && (elf_section_flags (oldsec) & SHF_X86_64_LARGE) == 0)
5823 *psec = bfd_com_section_ptr;
5824 }
5825
5826 return TRUE;
5827 }
5828
5829 static int
5830 elf_x86_64_additional_program_headers (bfd *abfd,
5831 struct bfd_link_info *info ATTRIBUTE_UNUSED)
5832 {
5833 asection *s;
5834 int count = 0;
5835
5836 /* Check to see if we need a large readonly segment. */
5837 s = bfd_get_section_by_name (abfd, ".lrodata");
5838 if (s && (s->flags & SEC_LOAD))
5839 count++;
5840
5841 /* Check to see if we need a large data segment. Since .lbss sections
5842 is placed right after the .bss section, there should be no need for
5843 a large data segment just because of .lbss. */
5844 s = bfd_get_section_by_name (abfd, ".ldata");
5845 if (s && (s->flags & SEC_LOAD))
5846 count++;
5847
5848 return count;
5849 }
5850
5851 /* Return TRUE if symbol should be hashed in the `.gnu.hash' section. */
5852
5853 static bfd_boolean
5854 elf_x86_64_hash_symbol (struct elf_link_hash_entry *h)
5855 {
5856 if (h->plt.offset != (bfd_vma) -1
5857 && !h->def_regular
5858 && !h->pointer_equality_needed)
5859 return FALSE;
5860
5861 return _bfd_elf_hash_symbol (h);
5862 }
5863
5864 /* Return TRUE iff relocations for INPUT are compatible with OUTPUT. */
5865
5866 static bfd_boolean
5867 elf_x86_64_relocs_compatible (const bfd_target *input,
5868 const bfd_target *output)
5869 {
5870 return ((xvec_get_elf_backend_data (input)->s->elfclass
5871 == xvec_get_elf_backend_data (output)->s->elfclass)
5872 && _bfd_elf_relocs_compatible (input, output));
5873 }
5874
5875 static const struct bfd_elf_special_section
5876 elf_x86_64_special_sections[]=
5877 {
5878 { STRING_COMMA_LEN (".gnu.linkonce.lb"), -2, SHT_NOBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
5879 { STRING_COMMA_LEN (".gnu.linkonce.lr"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_X86_64_LARGE},
5880 { STRING_COMMA_LEN (".gnu.linkonce.lt"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_EXECINSTR + SHF_X86_64_LARGE},
5881 { STRING_COMMA_LEN (".lbss"), -2, SHT_NOBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
5882 { STRING_COMMA_LEN (".ldata"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
5883 { STRING_COMMA_LEN (".lrodata"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_X86_64_LARGE},
5884 { NULL, 0, 0, 0, 0 }
5885 };
5886
5887 #define TARGET_LITTLE_SYM x86_64_elf64_vec
5888 #define TARGET_LITTLE_NAME "elf64-x86-64"
5889 #define ELF_ARCH bfd_arch_i386
5890 #define ELF_TARGET_ID X86_64_ELF_DATA
5891 #define ELF_MACHINE_CODE EM_X86_64
5892 #define ELF_MAXPAGESIZE 0x200000
5893 #define ELF_MINPAGESIZE 0x1000
5894 #define ELF_COMMONPAGESIZE 0x1000
5895
5896 #define elf_backend_can_gc_sections 1
5897 #define elf_backend_can_refcount 1
5898 #define elf_backend_want_got_plt 1
5899 #define elf_backend_plt_readonly 1
5900 #define elf_backend_want_plt_sym 0
5901 #define elf_backend_got_header_size (GOT_ENTRY_SIZE*3)
5902 #define elf_backend_rela_normal 1
5903 #define elf_backend_plt_alignment 4
5904
5905 #define elf_info_to_howto elf_x86_64_info_to_howto
5906
5907 #define bfd_elf64_bfd_link_hash_table_create \
5908 elf_x86_64_link_hash_table_create
5909 #define bfd_elf64_bfd_reloc_type_lookup elf_x86_64_reloc_type_lookup
5910 #define bfd_elf64_bfd_reloc_name_lookup \
5911 elf_x86_64_reloc_name_lookup
5912
5913 #define elf_backend_adjust_dynamic_symbol elf_x86_64_adjust_dynamic_symbol
5914 #define elf_backend_relocs_compatible elf_x86_64_relocs_compatible
5915 #define elf_backend_check_relocs elf_x86_64_check_relocs
5916 #define elf_backend_copy_indirect_symbol elf_x86_64_copy_indirect_symbol
5917 #define elf_backend_create_dynamic_sections elf_x86_64_create_dynamic_sections
5918 #define elf_backend_finish_dynamic_sections elf_x86_64_finish_dynamic_sections
5919 #define elf_backend_finish_dynamic_symbol elf_x86_64_finish_dynamic_symbol
5920 #define elf_backend_gc_mark_hook elf_x86_64_gc_mark_hook
5921 #define elf_backend_gc_sweep_hook elf_x86_64_gc_sweep_hook
5922 #define elf_backend_grok_prstatus elf_x86_64_grok_prstatus
5923 #define elf_backend_grok_psinfo elf_x86_64_grok_psinfo
5924 #ifdef CORE_HEADER
5925 #define elf_backend_write_core_note elf_x86_64_write_core_note
5926 #endif
5927 #define elf_backend_reloc_type_class elf_x86_64_reloc_type_class
5928 #define elf_backend_relocate_section elf_x86_64_relocate_section
5929 #define elf_backend_size_dynamic_sections elf_x86_64_size_dynamic_sections
5930 #define elf_backend_always_size_sections elf_x86_64_always_size_sections
5931 #define elf_backend_init_index_section _bfd_elf_init_1_index_section
5932 #define elf_backend_plt_sym_val elf_x86_64_plt_sym_val
5933 #define elf_backend_object_p elf64_x86_64_elf_object_p
5934 #define bfd_elf64_mkobject elf_x86_64_mkobject
5935 #define bfd_elf64_get_synthetic_symtab elf_x86_64_get_synthetic_symtab
5936
5937 #define elf_backend_section_from_shdr \
5938 elf_x86_64_section_from_shdr
5939
5940 #define elf_backend_section_from_bfd_section \
5941 elf_x86_64_elf_section_from_bfd_section
5942 #define elf_backend_add_symbol_hook \
5943 elf_x86_64_add_symbol_hook
5944 #define elf_backend_symbol_processing \
5945 elf_x86_64_symbol_processing
5946 #define elf_backend_common_section_index \
5947 elf_x86_64_common_section_index
5948 #define elf_backend_common_section \
5949 elf_x86_64_common_section
5950 #define elf_backend_common_definition \
5951 elf_x86_64_common_definition
5952 #define elf_backend_merge_symbol \
5953 elf_x86_64_merge_symbol
5954 #define elf_backend_special_sections \
5955 elf_x86_64_special_sections
5956 #define elf_backend_additional_program_headers \
5957 elf_x86_64_additional_program_headers
5958 #define elf_backend_hash_symbol \
5959 elf_x86_64_hash_symbol
5960
5961 #include "elf64-target.h"
5962
5963 /* FreeBSD support. */
5964
5965 #undef TARGET_LITTLE_SYM
5966 #define TARGET_LITTLE_SYM x86_64_elf64_fbsd_vec
5967 #undef TARGET_LITTLE_NAME
5968 #define TARGET_LITTLE_NAME "elf64-x86-64-freebsd"
5969
5970 #undef ELF_OSABI
5971 #define ELF_OSABI ELFOSABI_FREEBSD
5972
5973 #undef elf64_bed
5974 #define elf64_bed elf64_x86_64_fbsd_bed
5975
5976 #include "elf64-target.h"
5977
5978 /* Solaris 2 support. */
5979
5980 #undef TARGET_LITTLE_SYM
5981 #define TARGET_LITTLE_SYM x86_64_elf64_sol2_vec
5982 #undef TARGET_LITTLE_NAME
5983 #define TARGET_LITTLE_NAME "elf64-x86-64-sol2"
5984
5985 /* Restore default: we cannot use ELFOSABI_SOLARIS, otherwise ELFOSABI_NONE
5986 objects won't be recognized. */
5987 #undef ELF_OSABI
5988
5989 #undef elf64_bed
5990 #define elf64_bed elf64_x86_64_sol2_bed
5991
5992 /* The 64-bit static TLS arena size is rounded to the nearest 16-byte
5993 boundary. */
5994 #undef elf_backend_static_tls_alignment
5995 #define elf_backend_static_tls_alignment 16
5996
5997 /* The Solaris 2 ABI requires a plt symbol on all platforms.
5998
5999 Cf. Linker and Libraries Guide, Ch. 2, Link-Editor, Generating the Output
6000 File, p.63. */
6001 #undef elf_backend_want_plt_sym
6002 #define elf_backend_want_plt_sym 1
6003
6004 #include "elf64-target.h"
6005
6006 #undef bfd_elf64_get_synthetic_symtab
6007
6008 /* Native Client support. */
6009
6010 static bfd_boolean
6011 elf64_x86_64_nacl_elf_object_p (bfd *abfd)
6012 {
6013 /* Set the right machine number for a NaCl x86-64 ELF64 file. */
6014 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x86_64_nacl);
6015 return TRUE;
6016 }
6017
6018 #undef TARGET_LITTLE_SYM
6019 #define TARGET_LITTLE_SYM x86_64_elf64_nacl_vec
6020 #undef TARGET_LITTLE_NAME
6021 #define TARGET_LITTLE_NAME "elf64-x86-64-nacl"
6022 #undef elf64_bed
6023 #define elf64_bed elf64_x86_64_nacl_bed
6024
6025 #undef ELF_MAXPAGESIZE
6026 #undef ELF_MINPAGESIZE
6027 #undef ELF_COMMONPAGESIZE
6028 #define ELF_MAXPAGESIZE 0x10000
6029 #define ELF_MINPAGESIZE 0x10000
6030 #define ELF_COMMONPAGESIZE 0x10000
6031
6032 /* Restore defaults. */
6033 #undef ELF_OSABI
6034 #undef elf_backend_static_tls_alignment
6035 #undef elf_backend_want_plt_sym
6036 #define elf_backend_want_plt_sym 0
6037
6038 /* NaCl uses substantially different PLT entries for the same effects. */
6039
6040 #undef elf_backend_plt_alignment
6041 #define elf_backend_plt_alignment 5
6042 #define NACL_PLT_ENTRY_SIZE 64
6043 #define NACLMASK 0xe0 /* 32-byte alignment mask. */
6044
6045 static const bfd_byte elf_x86_64_nacl_plt0_entry[NACL_PLT_ENTRY_SIZE] =
6046 {
6047 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
6048 0x4c, 0x8b, 0x1d, 16, 0, 0, 0, /* mov GOT+16(%rip), %r11 */
6049 0x41, 0x83, 0xe3, NACLMASK, /* and $-32, %r11d */
6050 0x4d, 0x01, 0xfb, /* add %r15, %r11 */
6051 0x41, 0xff, 0xe3, /* jmpq *%r11 */
6052
6053 /* 9-byte nop sequence to pad out to the next 32-byte boundary. */
6054 0x66, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw 0x0(%rax,%rax,1) */
6055
6056 /* 32 bytes of nop to pad out to the standard size. */
6057 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data32 prefixes */
6058 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
6059 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data32 prefixes */
6060 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
6061 0x66, /* excess data32 prefix */
6062 0x90 /* nop */
6063 };
6064
6065 static const bfd_byte elf_x86_64_nacl_plt_entry[NACL_PLT_ENTRY_SIZE] =
6066 {
6067 0x4c, 0x8b, 0x1d, 0, 0, 0, 0, /* mov name@GOTPCREL(%rip),%r11 */
6068 0x41, 0x83, 0xe3, NACLMASK, /* and $-32, %r11d */
6069 0x4d, 0x01, 0xfb, /* add %r15, %r11 */
6070 0x41, 0xff, 0xe3, /* jmpq *%r11 */
6071
6072 /* 15-byte nop sequence to pad out to the next 32-byte boundary. */
6073 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data32 prefixes */
6074 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
6075
6076 /* Lazy GOT entries point here (32-byte aligned). */
6077 0x68, /* pushq immediate */
6078 0, 0, 0, 0, /* replaced with index into relocation table. */
6079 0xe9, /* jmp relative */
6080 0, 0, 0, 0, /* replaced with offset to start of .plt0. */
6081
6082 /* 22 bytes of nop to pad out to the standard size. */
6083 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data32 prefixes */
6084 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
6085 0x0f, 0x1f, 0x80, 0, 0, 0, 0, /* nopl 0x0(%rax) */
6086 };
6087
6088 /* .eh_frame covering the .plt section. */
6089
6090 static const bfd_byte elf_x86_64_nacl_eh_frame_plt[] =
6091 {
6092 #if (PLT_CIE_LENGTH != 20 \
6093 || PLT_FDE_LENGTH != 36 \
6094 || PLT_FDE_START_OFFSET != 4 + PLT_CIE_LENGTH + 8 \
6095 || PLT_FDE_LEN_OFFSET != 4 + PLT_CIE_LENGTH + 12)
6096 # error "Need elf_x86_64_backend_data parameters for eh_frame_plt offsets!"
6097 #endif
6098 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
6099 0, 0, 0, 0, /* CIE ID */
6100 1, /* CIE version */
6101 'z', 'R', 0, /* Augmentation string */
6102 1, /* Code alignment factor */
6103 0x78, /* Data alignment factor */
6104 16, /* Return address column */
6105 1, /* Augmentation size */
6106 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
6107 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
6108 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
6109 DW_CFA_nop, DW_CFA_nop,
6110
6111 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
6112 PLT_CIE_LENGTH + 8, 0, 0, 0,/* CIE pointer */
6113 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
6114 0, 0, 0, 0, /* .plt size goes here */
6115 0, /* Augmentation size */
6116 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
6117 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
6118 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
6119 DW_CFA_advance_loc + 58, /* DW_CFA_advance_loc: 58 to __PLT__+64 */
6120 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
6121 13, /* Block length */
6122 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
6123 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
6124 DW_OP_const1u, 63, DW_OP_and, DW_OP_const1u, 37, DW_OP_ge,
6125 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
6126 DW_CFA_nop, DW_CFA_nop
6127 };
6128
6129 static const struct elf_x86_64_backend_data elf_x86_64_nacl_arch_bed =
6130 {
6131 elf_x86_64_nacl_plt0_entry, /* plt0_entry */
6132 elf_x86_64_nacl_plt_entry, /* plt_entry */
6133 NACL_PLT_ENTRY_SIZE, /* plt_entry_size */
6134 2, /* plt0_got1_offset */
6135 9, /* plt0_got2_offset */
6136 13, /* plt0_got2_insn_end */
6137 3, /* plt_got_offset */
6138 33, /* plt_reloc_offset */
6139 38, /* plt_plt_offset */
6140 7, /* plt_got_insn_size */
6141 42, /* plt_plt_insn_end */
6142 32, /* plt_lazy_offset */
6143 elf_x86_64_nacl_eh_frame_plt, /* eh_frame_plt */
6144 sizeof (elf_x86_64_nacl_eh_frame_plt), /* eh_frame_plt_size */
6145 };
6146
6147 #undef elf_backend_arch_data
6148 #define elf_backend_arch_data &elf_x86_64_nacl_arch_bed
6149
6150 #undef elf_backend_object_p
6151 #define elf_backend_object_p elf64_x86_64_nacl_elf_object_p
6152 #undef elf_backend_modify_segment_map
6153 #define elf_backend_modify_segment_map nacl_modify_segment_map
6154 #undef elf_backend_modify_program_headers
6155 #define elf_backend_modify_program_headers nacl_modify_program_headers
6156 #undef elf_backend_final_write_processing
6157 #define elf_backend_final_write_processing nacl_final_write_processing
6158
6159 #include "elf64-target.h"
6160
6161 /* Native Client x32 support. */
6162
6163 static bfd_boolean
6164 elf32_x86_64_nacl_elf_object_p (bfd *abfd)
6165 {
6166 /* Set the right machine number for a NaCl x86-64 ELF32 file. */
6167 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x64_32_nacl);
6168 return TRUE;
6169 }
6170
6171 #undef TARGET_LITTLE_SYM
6172 #define TARGET_LITTLE_SYM x86_64_elf32_nacl_vec
6173 #undef TARGET_LITTLE_NAME
6174 #define TARGET_LITTLE_NAME "elf32-x86-64-nacl"
6175 #undef elf32_bed
6176 #define elf32_bed elf32_x86_64_nacl_bed
6177
6178 #define bfd_elf32_bfd_link_hash_table_create \
6179 elf_x86_64_link_hash_table_create
6180 #define bfd_elf32_bfd_reloc_type_lookup \
6181 elf_x86_64_reloc_type_lookup
6182 #define bfd_elf32_bfd_reloc_name_lookup \
6183 elf_x86_64_reloc_name_lookup
6184 #define bfd_elf32_mkobject \
6185 elf_x86_64_mkobject
6186
6187 #undef elf_backend_object_p
6188 #define elf_backend_object_p \
6189 elf32_x86_64_nacl_elf_object_p
6190
6191 #undef elf_backend_bfd_from_remote_memory
6192 #define elf_backend_bfd_from_remote_memory \
6193 _bfd_elf32_bfd_from_remote_memory
6194
6195 #undef elf_backend_size_info
6196 #define elf_backend_size_info \
6197 _bfd_elf32_size_info
6198
6199 #include "elf32-target.h"
6200
6201 /* Restore defaults. */
6202 #undef elf_backend_object_p
6203 #define elf_backend_object_p elf64_x86_64_elf_object_p
6204 #undef elf_backend_bfd_from_remote_memory
6205 #undef elf_backend_size_info
6206 #undef elf_backend_modify_segment_map
6207 #undef elf_backend_modify_program_headers
6208 #undef elf_backend_final_write_processing
6209
6210 /* Intel L1OM support. */
6211
6212 static bfd_boolean
6213 elf64_l1om_elf_object_p (bfd *abfd)
6214 {
6215 /* Set the right machine number for an L1OM elf64 file. */
6216 bfd_default_set_arch_mach (abfd, bfd_arch_l1om, bfd_mach_l1om);
6217 return TRUE;
6218 }
6219
6220 #undef TARGET_LITTLE_SYM
6221 #define TARGET_LITTLE_SYM l1om_elf64_vec
6222 #undef TARGET_LITTLE_NAME
6223 #define TARGET_LITTLE_NAME "elf64-l1om"
6224 #undef ELF_ARCH
6225 #define ELF_ARCH bfd_arch_l1om
6226
6227 #undef ELF_MACHINE_CODE
6228 #define ELF_MACHINE_CODE EM_L1OM
6229
6230 #undef ELF_OSABI
6231
6232 #undef elf64_bed
6233 #define elf64_bed elf64_l1om_bed
6234
6235 #undef elf_backend_object_p
6236 #define elf_backend_object_p elf64_l1om_elf_object_p
6237
6238 /* Restore defaults. */
6239 #undef ELF_MAXPAGESIZE
6240 #undef ELF_MINPAGESIZE
6241 #undef ELF_COMMONPAGESIZE
6242 #define ELF_MAXPAGESIZE 0x200000
6243 #define ELF_MINPAGESIZE 0x1000
6244 #define ELF_COMMONPAGESIZE 0x1000
6245 #undef elf_backend_plt_alignment
6246 #define elf_backend_plt_alignment 4
6247 #undef elf_backend_arch_data
6248 #define elf_backend_arch_data &elf_x86_64_arch_bed
6249
6250 #include "elf64-target.h"
6251
6252 /* FreeBSD L1OM support. */
6253
6254 #undef TARGET_LITTLE_SYM
6255 #define TARGET_LITTLE_SYM l1om_elf64_fbsd_vec
6256 #undef TARGET_LITTLE_NAME
6257 #define TARGET_LITTLE_NAME "elf64-l1om-freebsd"
6258
6259 #undef ELF_OSABI
6260 #define ELF_OSABI ELFOSABI_FREEBSD
6261
6262 #undef elf64_bed
6263 #define elf64_bed elf64_l1om_fbsd_bed
6264
6265 #include "elf64-target.h"
6266
6267 /* Intel K1OM support. */
6268
6269 static bfd_boolean
6270 elf64_k1om_elf_object_p (bfd *abfd)
6271 {
6272 /* Set the right machine number for an K1OM elf64 file. */
6273 bfd_default_set_arch_mach (abfd, bfd_arch_k1om, bfd_mach_k1om);
6274 return TRUE;
6275 }
6276
6277 #undef TARGET_LITTLE_SYM
6278 #define TARGET_LITTLE_SYM k1om_elf64_vec
6279 #undef TARGET_LITTLE_NAME
6280 #define TARGET_LITTLE_NAME "elf64-k1om"
6281 #undef ELF_ARCH
6282 #define ELF_ARCH bfd_arch_k1om
6283
6284 #undef ELF_MACHINE_CODE
6285 #define ELF_MACHINE_CODE EM_K1OM
6286
6287 #undef ELF_OSABI
6288
6289 #undef elf64_bed
6290 #define elf64_bed elf64_k1om_bed
6291
6292 #undef elf_backend_object_p
6293 #define elf_backend_object_p elf64_k1om_elf_object_p
6294
6295 #undef elf_backend_static_tls_alignment
6296
6297 #undef elf_backend_want_plt_sym
6298 #define elf_backend_want_plt_sym 0
6299
6300 #include "elf64-target.h"
6301
6302 /* FreeBSD K1OM support. */
6303
6304 #undef TARGET_LITTLE_SYM
6305 #define TARGET_LITTLE_SYM k1om_elf64_fbsd_vec
6306 #undef TARGET_LITTLE_NAME
6307 #define TARGET_LITTLE_NAME "elf64-k1om-freebsd"
6308
6309 #undef ELF_OSABI
6310 #define ELF_OSABI ELFOSABI_FREEBSD
6311
6312 #undef elf64_bed
6313 #define elf64_bed elf64_k1om_fbsd_bed
6314
6315 #include "elf64-target.h"
6316
6317 /* 32bit x86-64 support. */
6318
6319 #undef TARGET_LITTLE_SYM
6320 #define TARGET_LITTLE_SYM x86_64_elf32_vec
6321 #undef TARGET_LITTLE_NAME
6322 #define TARGET_LITTLE_NAME "elf32-x86-64"
6323 #undef elf32_bed
6324
6325 #undef ELF_ARCH
6326 #define ELF_ARCH bfd_arch_i386
6327
6328 #undef ELF_MACHINE_CODE
6329 #define ELF_MACHINE_CODE EM_X86_64
6330
6331 #undef ELF_OSABI
6332
6333 #undef elf_backend_object_p
6334 #define elf_backend_object_p \
6335 elf32_x86_64_elf_object_p
6336
6337 #undef elf_backend_bfd_from_remote_memory
6338 #define elf_backend_bfd_from_remote_memory \
6339 _bfd_elf32_bfd_from_remote_memory
6340
6341 #undef elf_backend_size_info
6342 #define elf_backend_size_info \
6343 _bfd_elf32_size_info
6344
6345 #include "elf32-target.h"