Fallout from recent bfd_reloc_outofrange changes
[binutils-gdb.git] / bfd / elf64-x86-64.c
1 /* X86-64 specific support for ELF
2 Copyright (C) 2000-2015 Free Software Foundation, Inc.
3 Contributed by Jan Hubicka <jh@suse.cz>.
4
5 This file is part of BFD, the Binary File Descriptor library.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
20 MA 02110-1301, USA. */
21
22 #include "sysdep.h"
23 #include "bfd.h"
24 #include "bfdlink.h"
25 #include "libbfd.h"
26 #include "elf-bfd.h"
27 #include "elf-nacl.h"
28 #include "bfd_stdint.h"
29 #include "objalloc.h"
30 #include "hashtab.h"
31 #include "dwarf2.h"
32 #include "libiberty.h"
33
34 #include "elf/x86-64.h"
35
36 #ifdef CORE_HEADER
37 #include <stdarg.h>
38 #include CORE_HEADER
39 #endif
40
41 /* In case we're on a 32-bit machine, construct a 64-bit "-1" value. */
42 #define MINUS_ONE (~ (bfd_vma) 0)
43
44 /* Since both 32-bit and 64-bit x86-64 encode relocation type in the
45 identical manner, we use ELF32_R_TYPE instead of ELF64_R_TYPE to get
46 relocation type. We also use ELF_ST_TYPE instead of ELF64_ST_TYPE
47 since they are the same. */
48
49 #define ABI_64_P(abfd) \
50 (get_elf_backend_data (abfd)->s->elfclass == ELFCLASS64)
51
52 /* The relocation "howto" table. Order of fields:
53 type, rightshift, size, bitsize, pc_relative, bitpos, complain_on_overflow,
54 special_function, name, partial_inplace, src_mask, dst_mask, pcrel_offset. */
55 static reloc_howto_type x86_64_elf_howto_table[] =
56 {
57 HOWTO(R_X86_64_NONE, 0, 3, 0, FALSE, 0, complain_overflow_dont,
58 bfd_elf_generic_reloc, "R_X86_64_NONE", FALSE, 0x00000000, 0x00000000,
59 FALSE),
60 HOWTO(R_X86_64_64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
61 bfd_elf_generic_reloc, "R_X86_64_64", FALSE, MINUS_ONE, MINUS_ONE,
62 FALSE),
63 HOWTO(R_X86_64_PC32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
64 bfd_elf_generic_reloc, "R_X86_64_PC32", FALSE, 0xffffffff, 0xffffffff,
65 TRUE),
66 HOWTO(R_X86_64_GOT32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
67 bfd_elf_generic_reloc, "R_X86_64_GOT32", FALSE, 0xffffffff, 0xffffffff,
68 FALSE),
69 HOWTO(R_X86_64_PLT32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
70 bfd_elf_generic_reloc, "R_X86_64_PLT32", FALSE, 0xffffffff, 0xffffffff,
71 TRUE),
72 HOWTO(R_X86_64_COPY, 0, 2, 32, FALSE, 0, complain_overflow_bitfield,
73 bfd_elf_generic_reloc, "R_X86_64_COPY", FALSE, 0xffffffff, 0xffffffff,
74 FALSE),
75 HOWTO(R_X86_64_GLOB_DAT, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
76 bfd_elf_generic_reloc, "R_X86_64_GLOB_DAT", FALSE, MINUS_ONE,
77 MINUS_ONE, FALSE),
78 HOWTO(R_X86_64_JUMP_SLOT, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
79 bfd_elf_generic_reloc, "R_X86_64_JUMP_SLOT", FALSE, MINUS_ONE,
80 MINUS_ONE, FALSE),
81 HOWTO(R_X86_64_RELATIVE, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
82 bfd_elf_generic_reloc, "R_X86_64_RELATIVE", FALSE, MINUS_ONE,
83 MINUS_ONE, FALSE),
84 HOWTO(R_X86_64_GOTPCREL, 0, 2, 32, TRUE, 0, complain_overflow_signed,
85 bfd_elf_generic_reloc, "R_X86_64_GOTPCREL", FALSE, 0xffffffff,
86 0xffffffff, TRUE),
87 HOWTO(R_X86_64_32, 0, 2, 32, FALSE, 0, complain_overflow_unsigned,
88 bfd_elf_generic_reloc, "R_X86_64_32", FALSE, 0xffffffff, 0xffffffff,
89 FALSE),
90 HOWTO(R_X86_64_32S, 0, 2, 32, FALSE, 0, complain_overflow_signed,
91 bfd_elf_generic_reloc, "R_X86_64_32S", FALSE, 0xffffffff, 0xffffffff,
92 FALSE),
93 HOWTO(R_X86_64_16, 0, 1, 16, FALSE, 0, complain_overflow_bitfield,
94 bfd_elf_generic_reloc, "R_X86_64_16", FALSE, 0xffff, 0xffff, FALSE),
95 HOWTO(R_X86_64_PC16,0, 1, 16, TRUE, 0, complain_overflow_bitfield,
96 bfd_elf_generic_reloc, "R_X86_64_PC16", FALSE, 0xffff, 0xffff, TRUE),
97 HOWTO(R_X86_64_8, 0, 0, 8, FALSE, 0, complain_overflow_bitfield,
98 bfd_elf_generic_reloc, "R_X86_64_8", FALSE, 0xff, 0xff, FALSE),
99 HOWTO(R_X86_64_PC8, 0, 0, 8, TRUE, 0, complain_overflow_signed,
100 bfd_elf_generic_reloc, "R_X86_64_PC8", FALSE, 0xff, 0xff, TRUE),
101 HOWTO(R_X86_64_DTPMOD64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
102 bfd_elf_generic_reloc, "R_X86_64_DTPMOD64", FALSE, MINUS_ONE,
103 MINUS_ONE, FALSE),
104 HOWTO(R_X86_64_DTPOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
105 bfd_elf_generic_reloc, "R_X86_64_DTPOFF64", FALSE, MINUS_ONE,
106 MINUS_ONE, FALSE),
107 HOWTO(R_X86_64_TPOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
108 bfd_elf_generic_reloc, "R_X86_64_TPOFF64", FALSE, MINUS_ONE,
109 MINUS_ONE, FALSE),
110 HOWTO(R_X86_64_TLSGD, 0, 2, 32, TRUE, 0, complain_overflow_signed,
111 bfd_elf_generic_reloc, "R_X86_64_TLSGD", FALSE, 0xffffffff,
112 0xffffffff, TRUE),
113 HOWTO(R_X86_64_TLSLD, 0, 2, 32, TRUE, 0, complain_overflow_signed,
114 bfd_elf_generic_reloc, "R_X86_64_TLSLD", FALSE, 0xffffffff,
115 0xffffffff, TRUE),
116 HOWTO(R_X86_64_DTPOFF32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
117 bfd_elf_generic_reloc, "R_X86_64_DTPOFF32", FALSE, 0xffffffff,
118 0xffffffff, FALSE),
119 HOWTO(R_X86_64_GOTTPOFF, 0, 2, 32, TRUE, 0, complain_overflow_signed,
120 bfd_elf_generic_reloc, "R_X86_64_GOTTPOFF", FALSE, 0xffffffff,
121 0xffffffff, TRUE),
122 HOWTO(R_X86_64_TPOFF32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
123 bfd_elf_generic_reloc, "R_X86_64_TPOFF32", FALSE, 0xffffffff,
124 0xffffffff, FALSE),
125 HOWTO(R_X86_64_PC64, 0, 4, 64, TRUE, 0, complain_overflow_bitfield,
126 bfd_elf_generic_reloc, "R_X86_64_PC64", FALSE, MINUS_ONE, MINUS_ONE,
127 TRUE),
128 HOWTO(R_X86_64_GOTOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
129 bfd_elf_generic_reloc, "R_X86_64_GOTOFF64",
130 FALSE, MINUS_ONE, MINUS_ONE, FALSE),
131 HOWTO(R_X86_64_GOTPC32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
132 bfd_elf_generic_reloc, "R_X86_64_GOTPC32",
133 FALSE, 0xffffffff, 0xffffffff, TRUE),
134 HOWTO(R_X86_64_GOT64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
135 bfd_elf_generic_reloc, "R_X86_64_GOT64", FALSE, MINUS_ONE, MINUS_ONE,
136 FALSE),
137 HOWTO(R_X86_64_GOTPCREL64, 0, 4, 64, TRUE, 0, complain_overflow_signed,
138 bfd_elf_generic_reloc, "R_X86_64_GOTPCREL64", FALSE, MINUS_ONE,
139 MINUS_ONE, TRUE),
140 HOWTO(R_X86_64_GOTPC64, 0, 4, 64, TRUE, 0, complain_overflow_signed,
141 bfd_elf_generic_reloc, "R_X86_64_GOTPC64",
142 FALSE, MINUS_ONE, MINUS_ONE, TRUE),
143 HOWTO(R_X86_64_GOTPLT64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
144 bfd_elf_generic_reloc, "R_X86_64_GOTPLT64", FALSE, MINUS_ONE,
145 MINUS_ONE, FALSE),
146 HOWTO(R_X86_64_PLTOFF64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
147 bfd_elf_generic_reloc, "R_X86_64_PLTOFF64", FALSE, MINUS_ONE,
148 MINUS_ONE, FALSE),
149 HOWTO(R_X86_64_SIZE32, 0, 2, 32, FALSE, 0, complain_overflow_unsigned,
150 bfd_elf_generic_reloc, "R_X86_64_SIZE32", FALSE, 0xffffffff, 0xffffffff,
151 FALSE),
152 HOWTO(R_X86_64_SIZE64, 0, 4, 64, FALSE, 0, complain_overflow_unsigned,
153 bfd_elf_generic_reloc, "R_X86_64_SIZE64", FALSE, MINUS_ONE, MINUS_ONE,
154 FALSE),
155 HOWTO(R_X86_64_GOTPC32_TLSDESC, 0, 2, 32, TRUE, 0,
156 complain_overflow_bitfield, bfd_elf_generic_reloc,
157 "R_X86_64_GOTPC32_TLSDESC",
158 FALSE, 0xffffffff, 0xffffffff, TRUE),
159 HOWTO(R_X86_64_TLSDESC_CALL, 0, 0, 0, FALSE, 0,
160 complain_overflow_dont, bfd_elf_generic_reloc,
161 "R_X86_64_TLSDESC_CALL",
162 FALSE, 0, 0, FALSE),
163 HOWTO(R_X86_64_TLSDESC, 0, 4, 64, FALSE, 0,
164 complain_overflow_bitfield, bfd_elf_generic_reloc,
165 "R_X86_64_TLSDESC",
166 FALSE, MINUS_ONE, MINUS_ONE, FALSE),
167 HOWTO(R_X86_64_IRELATIVE, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
168 bfd_elf_generic_reloc, "R_X86_64_IRELATIVE", FALSE, MINUS_ONE,
169 MINUS_ONE, FALSE),
170 HOWTO(R_X86_64_RELATIVE64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
171 bfd_elf_generic_reloc, "R_X86_64_RELATIVE64", FALSE, MINUS_ONE,
172 MINUS_ONE, FALSE),
173 HOWTO(R_X86_64_PC32_BND, 0, 2, 32, TRUE, 0, complain_overflow_signed,
174 bfd_elf_generic_reloc, "R_X86_64_PC32_BND", FALSE, 0xffffffff, 0xffffffff,
175 TRUE),
176 HOWTO(R_X86_64_PLT32_BND, 0, 2, 32, TRUE, 0, complain_overflow_signed,
177 bfd_elf_generic_reloc, "R_X86_64_PLT32_BND", FALSE, 0xffffffff, 0xffffffff,
178 TRUE),
179
180 /* We have a gap in the reloc numbers here.
181 R_X86_64_standard counts the number up to this point, and
182 R_X86_64_vt_offset is the value to subtract from a reloc type of
183 R_X86_64_GNU_VT* to form an index into this table. */
184 #define R_X86_64_standard (R_X86_64_PLT32_BND + 1)
185 #define R_X86_64_vt_offset (R_X86_64_GNU_VTINHERIT - R_X86_64_standard)
186
187 /* GNU extension to record C++ vtable hierarchy. */
188 HOWTO (R_X86_64_GNU_VTINHERIT, 0, 4, 0, FALSE, 0, complain_overflow_dont,
189 NULL, "R_X86_64_GNU_VTINHERIT", FALSE, 0, 0, FALSE),
190
191 /* GNU extension to record C++ vtable member usage. */
192 HOWTO (R_X86_64_GNU_VTENTRY, 0, 4, 0, FALSE, 0, complain_overflow_dont,
193 _bfd_elf_rel_vtable_reloc_fn, "R_X86_64_GNU_VTENTRY", FALSE, 0, 0,
194 FALSE),
195
196 /* Use complain_overflow_bitfield on R_X86_64_32 for x32. */
197 HOWTO(R_X86_64_32, 0, 2, 32, FALSE, 0, complain_overflow_bitfield,
198 bfd_elf_generic_reloc, "R_X86_64_32", FALSE, 0xffffffff, 0xffffffff,
199 FALSE)
200 };
201
202 #define IS_X86_64_PCREL_TYPE(TYPE) \
203 ( ((TYPE) == R_X86_64_PC8) \
204 || ((TYPE) == R_X86_64_PC16) \
205 || ((TYPE) == R_X86_64_PC32) \
206 || ((TYPE) == R_X86_64_PC32_BND) \
207 || ((TYPE) == R_X86_64_PC64))
208
209 /* Map BFD relocs to the x86_64 elf relocs. */
210 struct elf_reloc_map
211 {
212 bfd_reloc_code_real_type bfd_reloc_val;
213 unsigned char elf_reloc_val;
214 };
215
216 static const struct elf_reloc_map x86_64_reloc_map[] =
217 {
218 { BFD_RELOC_NONE, R_X86_64_NONE, },
219 { BFD_RELOC_64, R_X86_64_64, },
220 { BFD_RELOC_32_PCREL, R_X86_64_PC32, },
221 { BFD_RELOC_X86_64_GOT32, R_X86_64_GOT32,},
222 { BFD_RELOC_X86_64_PLT32, R_X86_64_PLT32,},
223 { BFD_RELOC_X86_64_COPY, R_X86_64_COPY, },
224 { BFD_RELOC_X86_64_GLOB_DAT, R_X86_64_GLOB_DAT, },
225 { BFD_RELOC_X86_64_JUMP_SLOT, R_X86_64_JUMP_SLOT, },
226 { BFD_RELOC_X86_64_RELATIVE, R_X86_64_RELATIVE, },
227 { BFD_RELOC_X86_64_GOTPCREL, R_X86_64_GOTPCREL, },
228 { BFD_RELOC_32, R_X86_64_32, },
229 { BFD_RELOC_X86_64_32S, R_X86_64_32S, },
230 { BFD_RELOC_16, R_X86_64_16, },
231 { BFD_RELOC_16_PCREL, R_X86_64_PC16, },
232 { BFD_RELOC_8, R_X86_64_8, },
233 { BFD_RELOC_8_PCREL, R_X86_64_PC8, },
234 { BFD_RELOC_X86_64_DTPMOD64, R_X86_64_DTPMOD64, },
235 { BFD_RELOC_X86_64_DTPOFF64, R_X86_64_DTPOFF64, },
236 { BFD_RELOC_X86_64_TPOFF64, R_X86_64_TPOFF64, },
237 { BFD_RELOC_X86_64_TLSGD, R_X86_64_TLSGD, },
238 { BFD_RELOC_X86_64_TLSLD, R_X86_64_TLSLD, },
239 { BFD_RELOC_X86_64_DTPOFF32, R_X86_64_DTPOFF32, },
240 { BFD_RELOC_X86_64_GOTTPOFF, R_X86_64_GOTTPOFF, },
241 { BFD_RELOC_X86_64_TPOFF32, R_X86_64_TPOFF32, },
242 { BFD_RELOC_64_PCREL, R_X86_64_PC64, },
243 { BFD_RELOC_X86_64_GOTOFF64, R_X86_64_GOTOFF64, },
244 { BFD_RELOC_X86_64_GOTPC32, R_X86_64_GOTPC32, },
245 { BFD_RELOC_X86_64_GOT64, R_X86_64_GOT64, },
246 { BFD_RELOC_X86_64_GOTPCREL64,R_X86_64_GOTPCREL64, },
247 { BFD_RELOC_X86_64_GOTPC64, R_X86_64_GOTPC64, },
248 { BFD_RELOC_X86_64_GOTPLT64, R_X86_64_GOTPLT64, },
249 { BFD_RELOC_X86_64_PLTOFF64, R_X86_64_PLTOFF64, },
250 { BFD_RELOC_SIZE32, R_X86_64_SIZE32, },
251 { BFD_RELOC_SIZE64, R_X86_64_SIZE64, },
252 { BFD_RELOC_X86_64_GOTPC32_TLSDESC, R_X86_64_GOTPC32_TLSDESC, },
253 { BFD_RELOC_X86_64_TLSDESC_CALL, R_X86_64_TLSDESC_CALL, },
254 { BFD_RELOC_X86_64_TLSDESC, R_X86_64_TLSDESC, },
255 { BFD_RELOC_X86_64_IRELATIVE, R_X86_64_IRELATIVE, },
256 { BFD_RELOC_X86_64_PC32_BND, R_X86_64_PC32_BND,},
257 { BFD_RELOC_X86_64_PLT32_BND, R_X86_64_PLT32_BND,},
258 { BFD_RELOC_VTABLE_INHERIT, R_X86_64_GNU_VTINHERIT, },
259 { BFD_RELOC_VTABLE_ENTRY, R_X86_64_GNU_VTENTRY, },
260 };
261
262 static reloc_howto_type *
263 elf_x86_64_rtype_to_howto (bfd *abfd, unsigned r_type)
264 {
265 unsigned i;
266
267 if (r_type == (unsigned int) R_X86_64_32)
268 {
269 if (ABI_64_P (abfd))
270 i = r_type;
271 else
272 i = ARRAY_SIZE (x86_64_elf_howto_table) - 1;
273 }
274 else if (r_type < (unsigned int) R_X86_64_GNU_VTINHERIT
275 || r_type >= (unsigned int) R_X86_64_max)
276 {
277 if (r_type >= (unsigned int) R_X86_64_standard)
278 {
279 (*_bfd_error_handler) (_("%B: invalid relocation type %d"),
280 abfd, (int) r_type);
281 r_type = R_X86_64_NONE;
282 }
283 i = r_type;
284 }
285 else
286 i = r_type - (unsigned int) R_X86_64_vt_offset;
287 BFD_ASSERT (x86_64_elf_howto_table[i].type == r_type);
288 return &x86_64_elf_howto_table[i];
289 }
290
291 /* Given a BFD reloc type, return a HOWTO structure. */
292 static reloc_howto_type *
293 elf_x86_64_reloc_type_lookup (bfd *abfd,
294 bfd_reloc_code_real_type code)
295 {
296 unsigned int i;
297
298 for (i = 0; i < sizeof (x86_64_reloc_map) / sizeof (struct elf_reloc_map);
299 i++)
300 {
301 if (x86_64_reloc_map[i].bfd_reloc_val == code)
302 return elf_x86_64_rtype_to_howto (abfd,
303 x86_64_reloc_map[i].elf_reloc_val);
304 }
305 return NULL;
306 }
307
308 static reloc_howto_type *
309 elf_x86_64_reloc_name_lookup (bfd *abfd,
310 const char *r_name)
311 {
312 unsigned int i;
313
314 if (!ABI_64_P (abfd) && strcasecmp (r_name, "R_X86_64_32") == 0)
315 {
316 /* Get x32 R_X86_64_32. */
317 reloc_howto_type *reloc
318 = &x86_64_elf_howto_table[ARRAY_SIZE (x86_64_elf_howto_table) - 1];
319 BFD_ASSERT (reloc->type == (unsigned int) R_X86_64_32);
320 return reloc;
321 }
322
323 for (i = 0; i < ARRAY_SIZE (x86_64_elf_howto_table); i++)
324 if (x86_64_elf_howto_table[i].name != NULL
325 && strcasecmp (x86_64_elf_howto_table[i].name, r_name) == 0)
326 return &x86_64_elf_howto_table[i];
327
328 return NULL;
329 }
330
331 /* Given an x86_64 ELF reloc type, fill in an arelent structure. */
332
333 static void
334 elf_x86_64_info_to_howto (bfd *abfd ATTRIBUTE_UNUSED, arelent *cache_ptr,
335 Elf_Internal_Rela *dst)
336 {
337 unsigned r_type;
338
339 r_type = ELF32_R_TYPE (dst->r_info);
340 cache_ptr->howto = elf_x86_64_rtype_to_howto (abfd, r_type);
341 BFD_ASSERT (r_type == cache_ptr->howto->type);
342 }
343 \f
344 /* Support for core dump NOTE sections. */
345 static bfd_boolean
346 elf_x86_64_grok_prstatus (bfd *abfd, Elf_Internal_Note *note)
347 {
348 int offset;
349 size_t size;
350
351 switch (note->descsz)
352 {
353 default:
354 return FALSE;
355
356 case 296: /* sizeof(istruct elf_prstatus) on Linux/x32 */
357 /* pr_cursig */
358 elf_tdata (abfd)->core->signal = bfd_get_16 (abfd, note->descdata + 12);
359
360 /* pr_pid */
361 elf_tdata (abfd)->core->lwpid = bfd_get_32 (abfd, note->descdata + 24);
362
363 /* pr_reg */
364 offset = 72;
365 size = 216;
366
367 break;
368
369 case 336: /* sizeof(istruct elf_prstatus) on Linux/x86_64 */
370 /* pr_cursig */
371 elf_tdata (abfd)->core->signal
372 = bfd_get_16 (abfd, note->descdata + 12);
373
374 /* pr_pid */
375 elf_tdata (abfd)->core->lwpid
376 = bfd_get_32 (abfd, note->descdata + 32);
377
378 /* pr_reg */
379 offset = 112;
380 size = 216;
381
382 break;
383 }
384
385 /* Make a ".reg/999" section. */
386 return _bfd_elfcore_make_pseudosection (abfd, ".reg",
387 size, note->descpos + offset);
388 }
389
390 static bfd_boolean
391 elf_x86_64_grok_psinfo (bfd *abfd, Elf_Internal_Note *note)
392 {
393 switch (note->descsz)
394 {
395 default:
396 return FALSE;
397
398 case 124: /* sizeof(struct elf_prpsinfo) on Linux/x32 */
399 elf_tdata (abfd)->core->pid
400 = bfd_get_32 (abfd, note->descdata + 12);
401 elf_tdata (abfd)->core->program
402 = _bfd_elfcore_strndup (abfd, note->descdata + 28, 16);
403 elf_tdata (abfd)->core->command
404 = _bfd_elfcore_strndup (abfd, note->descdata + 44, 80);
405 break;
406
407 case 136: /* sizeof(struct elf_prpsinfo) on Linux/x86_64 */
408 elf_tdata (abfd)->core->pid
409 = bfd_get_32 (abfd, note->descdata + 24);
410 elf_tdata (abfd)->core->program
411 = _bfd_elfcore_strndup (abfd, note->descdata + 40, 16);
412 elf_tdata (abfd)->core->command
413 = _bfd_elfcore_strndup (abfd, note->descdata + 56, 80);
414 }
415
416 /* Note that for some reason, a spurious space is tacked
417 onto the end of the args in some (at least one anyway)
418 implementations, so strip it off if it exists. */
419
420 {
421 char *command = elf_tdata (abfd)->core->command;
422 int n = strlen (command);
423
424 if (0 < n && command[n - 1] == ' ')
425 command[n - 1] = '\0';
426 }
427
428 return TRUE;
429 }
430
431 #ifdef CORE_HEADER
432 static char *
433 elf_x86_64_write_core_note (bfd *abfd, char *buf, int *bufsiz,
434 int note_type, ...)
435 {
436 const struct elf_backend_data *bed = get_elf_backend_data (abfd);
437 va_list ap;
438 const char *fname, *psargs;
439 long pid;
440 int cursig;
441 const void *gregs;
442
443 switch (note_type)
444 {
445 default:
446 return NULL;
447
448 case NT_PRPSINFO:
449 va_start (ap, note_type);
450 fname = va_arg (ap, const char *);
451 psargs = va_arg (ap, const char *);
452 va_end (ap);
453
454 if (bed->s->elfclass == ELFCLASS32)
455 {
456 prpsinfo32_t data;
457 memset (&data, 0, sizeof (data));
458 strncpy (data.pr_fname, fname, sizeof (data.pr_fname));
459 strncpy (data.pr_psargs, psargs, sizeof (data.pr_psargs));
460 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
461 &data, sizeof (data));
462 }
463 else
464 {
465 prpsinfo64_t data;
466 memset (&data, 0, sizeof (data));
467 strncpy (data.pr_fname, fname, sizeof (data.pr_fname));
468 strncpy (data.pr_psargs, psargs, sizeof (data.pr_psargs));
469 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
470 &data, sizeof (data));
471 }
472 /* NOTREACHED */
473
474 case NT_PRSTATUS:
475 va_start (ap, note_type);
476 pid = va_arg (ap, long);
477 cursig = va_arg (ap, int);
478 gregs = va_arg (ap, const void *);
479 va_end (ap);
480
481 if (bed->s->elfclass == ELFCLASS32)
482 {
483 if (bed->elf_machine_code == EM_X86_64)
484 {
485 prstatusx32_t prstat;
486 memset (&prstat, 0, sizeof (prstat));
487 prstat.pr_pid = pid;
488 prstat.pr_cursig = cursig;
489 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
490 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
491 &prstat, sizeof (prstat));
492 }
493 else
494 {
495 prstatus32_t prstat;
496 memset (&prstat, 0, sizeof (prstat));
497 prstat.pr_pid = pid;
498 prstat.pr_cursig = cursig;
499 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
500 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
501 &prstat, sizeof (prstat));
502 }
503 }
504 else
505 {
506 prstatus64_t prstat;
507 memset (&prstat, 0, sizeof (prstat));
508 prstat.pr_pid = pid;
509 prstat.pr_cursig = cursig;
510 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
511 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
512 &prstat, sizeof (prstat));
513 }
514 }
515 /* NOTREACHED */
516 }
517 #endif
518 \f
519 /* Functions for the x86-64 ELF linker. */
520
521 /* The name of the dynamic interpreter. This is put in the .interp
522 section. */
523
524 #define ELF64_DYNAMIC_INTERPRETER "/lib/ld64.so.1"
525 #define ELF32_DYNAMIC_INTERPRETER "/lib/ldx32.so.1"
526
527 /* If ELIMINATE_COPY_RELOCS is non-zero, the linker will try to avoid
528 copying dynamic variables from a shared lib into an app's dynbss
529 section, and instead use a dynamic relocation to point into the
530 shared lib. */
531 #define ELIMINATE_COPY_RELOCS 1
532
533 /* The size in bytes of an entry in the global offset table. */
534
535 #define GOT_ENTRY_SIZE 8
536
537 /* The size in bytes of an entry in the procedure linkage table. */
538
539 #define PLT_ENTRY_SIZE 16
540
541 /* The first entry in a procedure linkage table looks like this. See the
542 SVR4 ABI i386 supplement and the x86-64 ABI to see how this works. */
543
544 static const bfd_byte elf_x86_64_plt0_entry[PLT_ENTRY_SIZE] =
545 {
546 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
547 0xff, 0x25, 16, 0, 0, 0, /* jmpq *GOT+16(%rip) */
548 0x0f, 0x1f, 0x40, 0x00 /* nopl 0(%rax) */
549 };
550
551 /* Subsequent entries in a procedure linkage table look like this. */
552
553 static const bfd_byte elf_x86_64_plt_entry[PLT_ENTRY_SIZE] =
554 {
555 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
556 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
557 0x68, /* pushq immediate */
558 0, 0, 0, 0, /* replaced with index into relocation table. */
559 0xe9, /* jmp relative */
560 0, 0, 0, 0 /* replaced with offset to start of .plt0. */
561 };
562
563 /* The first entry in a procedure linkage table with BND relocations
564 like this. */
565
566 static const bfd_byte elf_x86_64_bnd_plt0_entry[PLT_ENTRY_SIZE] =
567 {
568 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
569 0xf2, 0xff, 0x25, 16, 0, 0, 0, /* bnd jmpq *GOT+16(%rip) */
570 0x0f, 0x1f, 0 /* nopl (%rax) */
571 };
572
573 /* Subsequent entries for legacy branches in a procedure linkage table
574 with BND relocations look like this. */
575
576 static const bfd_byte elf_x86_64_legacy_plt_entry[PLT_ENTRY_SIZE] =
577 {
578 0x68, 0, 0, 0, 0, /* pushq immediate */
579 0xe9, 0, 0, 0, 0, /* jmpq relative */
580 0x66, 0x0f, 0x1f, 0x44, 0, 0 /* nopw (%rax,%rax,1) */
581 };
582
583 /* Subsequent entries for branches with BND prefx in a procedure linkage
584 table with BND relocations look like this. */
585
586 static const bfd_byte elf_x86_64_bnd_plt_entry[PLT_ENTRY_SIZE] =
587 {
588 0x68, 0, 0, 0, 0, /* pushq immediate */
589 0xf2, 0xe9, 0, 0, 0, 0, /* bnd jmpq relative */
590 0x0f, 0x1f, 0x44, 0, 0 /* nopl 0(%rax,%rax,1) */
591 };
592
593 /* Entries for legacy branches in the second procedure linkage table
594 look like this. */
595
596 static const bfd_byte elf_x86_64_legacy_plt2_entry[8] =
597 {
598 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
599 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
600 0x66, 0x90 /* xchg %ax,%ax */
601 };
602
603 /* Entries for branches with BND prefix in the second procedure linkage
604 table look like this. */
605
606 static const bfd_byte elf_x86_64_bnd_plt2_entry[8] =
607 {
608 0xf2, 0xff, 0x25, /* bnd jmpq *name@GOTPC(%rip) */
609 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
610 0x90 /* nop */
611 };
612
613 /* .eh_frame covering the .plt section. */
614
615 static const bfd_byte elf_x86_64_eh_frame_plt[] =
616 {
617 #define PLT_CIE_LENGTH 20
618 #define PLT_FDE_LENGTH 36
619 #define PLT_FDE_START_OFFSET 4 + PLT_CIE_LENGTH + 8
620 #define PLT_FDE_LEN_OFFSET 4 + PLT_CIE_LENGTH + 12
621 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
622 0, 0, 0, 0, /* CIE ID */
623 1, /* CIE version */
624 'z', 'R', 0, /* Augmentation string */
625 1, /* Code alignment factor */
626 0x78, /* Data alignment factor */
627 16, /* Return address column */
628 1, /* Augmentation size */
629 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
630 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
631 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
632 DW_CFA_nop, DW_CFA_nop,
633
634 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
635 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
636 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
637 0, 0, 0, 0, /* .plt size goes here */
638 0, /* Augmentation size */
639 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
640 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
641 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
642 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
643 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
644 11, /* Block length */
645 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
646 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
647 DW_OP_lit15, DW_OP_and, DW_OP_lit11, DW_OP_ge,
648 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
649 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
650 };
651
652 /* Architecture-specific backend data for x86-64. */
653
654 struct elf_x86_64_backend_data
655 {
656 /* Templates for the initial PLT entry and for subsequent entries. */
657 const bfd_byte *plt0_entry;
658 const bfd_byte *plt_entry;
659 unsigned int plt_entry_size; /* Size of each PLT entry. */
660
661 /* Offsets into plt0_entry that are to be replaced with GOT[1] and GOT[2]. */
662 unsigned int plt0_got1_offset;
663 unsigned int plt0_got2_offset;
664
665 /* Offset of the end of the PC-relative instruction containing
666 plt0_got2_offset. */
667 unsigned int plt0_got2_insn_end;
668
669 /* Offsets into plt_entry that are to be replaced with... */
670 unsigned int plt_got_offset; /* ... address of this symbol in .got. */
671 unsigned int plt_reloc_offset; /* ... offset into relocation table. */
672 unsigned int plt_plt_offset; /* ... offset to start of .plt. */
673
674 /* Length of the PC-relative instruction containing plt_got_offset. */
675 unsigned int plt_got_insn_size;
676
677 /* Offset of the end of the PC-relative jump to plt0_entry. */
678 unsigned int plt_plt_insn_end;
679
680 /* Offset into plt_entry where the initial value of the GOT entry points. */
681 unsigned int plt_lazy_offset;
682
683 /* .eh_frame covering the .plt section. */
684 const bfd_byte *eh_frame_plt;
685 unsigned int eh_frame_plt_size;
686 };
687
688 #define get_elf_x86_64_arch_data(bed) \
689 ((const struct elf_x86_64_backend_data *) (bed)->arch_data)
690
691 #define get_elf_x86_64_backend_data(abfd) \
692 get_elf_x86_64_arch_data (get_elf_backend_data (abfd))
693
694 #define GET_PLT_ENTRY_SIZE(abfd) \
695 get_elf_x86_64_backend_data (abfd)->plt_entry_size
696
697 /* These are the standard parameters. */
698 static const struct elf_x86_64_backend_data elf_x86_64_arch_bed =
699 {
700 elf_x86_64_plt0_entry, /* plt0_entry */
701 elf_x86_64_plt_entry, /* plt_entry */
702 sizeof (elf_x86_64_plt_entry), /* plt_entry_size */
703 2, /* plt0_got1_offset */
704 8, /* plt0_got2_offset */
705 12, /* plt0_got2_insn_end */
706 2, /* plt_got_offset */
707 7, /* plt_reloc_offset */
708 12, /* plt_plt_offset */
709 6, /* plt_got_insn_size */
710 PLT_ENTRY_SIZE, /* plt_plt_insn_end */
711 6, /* plt_lazy_offset */
712 elf_x86_64_eh_frame_plt, /* eh_frame_plt */
713 sizeof (elf_x86_64_eh_frame_plt), /* eh_frame_plt_size */
714 };
715
716 static const struct elf_x86_64_backend_data elf_x86_64_bnd_arch_bed =
717 {
718 elf_x86_64_bnd_plt0_entry, /* plt0_entry */
719 elf_x86_64_bnd_plt_entry, /* plt_entry */
720 sizeof (elf_x86_64_bnd_plt_entry), /* plt_entry_size */
721 2, /* plt0_got1_offset */
722 1+8, /* plt0_got2_offset */
723 1+12, /* plt0_got2_insn_end */
724 1+2, /* plt_got_offset */
725 1, /* plt_reloc_offset */
726 7, /* plt_plt_offset */
727 1+6, /* plt_got_insn_size */
728 11, /* plt_plt_insn_end */
729 0, /* plt_lazy_offset */
730 elf_x86_64_eh_frame_plt, /* eh_frame_plt */
731 sizeof (elf_x86_64_eh_frame_plt), /* eh_frame_plt_size */
732 };
733
734 #define elf_backend_arch_data &elf_x86_64_arch_bed
735
736 /* x86-64 ELF linker hash entry. */
737
738 struct elf_x86_64_link_hash_entry
739 {
740 struct elf_link_hash_entry elf;
741
742 /* Track dynamic relocs copied for this symbol. */
743 struct elf_dyn_relocs *dyn_relocs;
744
745 #define GOT_UNKNOWN 0
746 #define GOT_NORMAL 1
747 #define GOT_TLS_GD 2
748 #define GOT_TLS_IE 3
749 #define GOT_TLS_GDESC 4
750 #define GOT_TLS_GD_BOTH_P(type) \
751 ((type) == (GOT_TLS_GD | GOT_TLS_GDESC))
752 #define GOT_TLS_GD_P(type) \
753 ((type) == GOT_TLS_GD || GOT_TLS_GD_BOTH_P (type))
754 #define GOT_TLS_GDESC_P(type) \
755 ((type) == GOT_TLS_GDESC || GOT_TLS_GD_BOTH_P (type))
756 #define GOT_TLS_GD_ANY_P(type) \
757 (GOT_TLS_GD_P (type) || GOT_TLS_GDESC_P (type))
758 unsigned char tls_type;
759
760 /* TRUE if a weak symbol with a real definition needs a copy reloc.
761 When there is a weak symbol with a real definition, the processor
762 independent code will have arranged for us to see the real
763 definition first. We need to copy the needs_copy bit from the
764 real definition and check it when allowing copy reloc in PIE. */
765 unsigned int needs_copy : 1;
766
767 /* TRUE if symbol has at least one BND relocation. */
768 unsigned int has_bnd_reloc : 1;
769
770 /* Information about the GOT PLT entry. Filled when there are both
771 GOT and PLT relocations against the same function. */
772 union gotplt_union plt_got;
773
774 /* Information about the second PLT entry. Filled when has_bnd_reloc is
775 set. */
776 union gotplt_union plt_bnd;
777
778 /* Offset of the GOTPLT entry reserved for the TLS descriptor,
779 starting at the end of the jump table. */
780 bfd_vma tlsdesc_got;
781 };
782
783 #define elf_x86_64_hash_entry(ent) \
784 ((struct elf_x86_64_link_hash_entry *)(ent))
785
786 struct elf_x86_64_obj_tdata
787 {
788 struct elf_obj_tdata root;
789
790 /* tls_type for each local got entry. */
791 char *local_got_tls_type;
792
793 /* GOTPLT entries for TLS descriptors. */
794 bfd_vma *local_tlsdesc_gotent;
795 };
796
797 #define elf_x86_64_tdata(abfd) \
798 ((struct elf_x86_64_obj_tdata *) (abfd)->tdata.any)
799
800 #define elf_x86_64_local_got_tls_type(abfd) \
801 (elf_x86_64_tdata (abfd)->local_got_tls_type)
802
803 #define elf_x86_64_local_tlsdesc_gotent(abfd) \
804 (elf_x86_64_tdata (abfd)->local_tlsdesc_gotent)
805
806 #define is_x86_64_elf(bfd) \
807 (bfd_get_flavour (bfd) == bfd_target_elf_flavour \
808 && elf_tdata (bfd) != NULL \
809 && elf_object_id (bfd) == X86_64_ELF_DATA)
810
811 static bfd_boolean
812 elf_x86_64_mkobject (bfd *abfd)
813 {
814 return bfd_elf_allocate_object (abfd, sizeof (struct elf_x86_64_obj_tdata),
815 X86_64_ELF_DATA);
816 }
817
818 /* x86-64 ELF linker hash table. */
819
820 struct elf_x86_64_link_hash_table
821 {
822 struct elf_link_hash_table elf;
823
824 /* Short-cuts to get to dynamic linker sections. */
825 asection *sdynbss;
826 asection *srelbss;
827 asection *plt_eh_frame;
828 asection *plt_bnd;
829 asection *plt_got;
830
831 union
832 {
833 bfd_signed_vma refcount;
834 bfd_vma offset;
835 } tls_ld_got;
836
837 /* The amount of space used by the jump slots in the GOT. */
838 bfd_vma sgotplt_jump_table_size;
839
840 /* Small local sym cache. */
841 struct sym_cache sym_cache;
842
843 bfd_vma (*r_info) (bfd_vma, bfd_vma);
844 bfd_vma (*r_sym) (bfd_vma);
845 unsigned int pointer_r_type;
846 const char *dynamic_interpreter;
847 int dynamic_interpreter_size;
848
849 /* _TLS_MODULE_BASE_ symbol. */
850 struct bfd_link_hash_entry *tls_module_base;
851
852 /* Used by local STT_GNU_IFUNC symbols. */
853 htab_t loc_hash_table;
854 void * loc_hash_memory;
855
856 /* The offset into splt of the PLT entry for the TLS descriptor
857 resolver. Special values are 0, if not necessary (or not found
858 to be necessary yet), and -1 if needed but not determined
859 yet. */
860 bfd_vma tlsdesc_plt;
861 /* The offset into sgot of the GOT entry used by the PLT entry
862 above. */
863 bfd_vma tlsdesc_got;
864
865 /* The index of the next R_X86_64_JUMP_SLOT entry in .rela.plt. */
866 bfd_vma next_jump_slot_index;
867 /* The index of the next R_X86_64_IRELATIVE entry in .rela.plt. */
868 bfd_vma next_irelative_index;
869 };
870
871 /* Get the x86-64 ELF linker hash table from a link_info structure. */
872
873 #define elf_x86_64_hash_table(p) \
874 (elf_hash_table_id ((struct elf_link_hash_table *) ((p)->hash)) \
875 == X86_64_ELF_DATA ? ((struct elf_x86_64_link_hash_table *) ((p)->hash)) : NULL)
876
877 #define elf_x86_64_compute_jump_table_size(htab) \
878 ((htab)->elf.srelplt->reloc_count * GOT_ENTRY_SIZE)
879
880 /* Create an entry in an x86-64 ELF linker hash table. */
881
882 static struct bfd_hash_entry *
883 elf_x86_64_link_hash_newfunc (struct bfd_hash_entry *entry,
884 struct bfd_hash_table *table,
885 const char *string)
886 {
887 /* Allocate the structure if it has not already been allocated by a
888 subclass. */
889 if (entry == NULL)
890 {
891 entry = (struct bfd_hash_entry *)
892 bfd_hash_allocate (table,
893 sizeof (struct elf_x86_64_link_hash_entry));
894 if (entry == NULL)
895 return entry;
896 }
897
898 /* Call the allocation method of the superclass. */
899 entry = _bfd_elf_link_hash_newfunc (entry, table, string);
900 if (entry != NULL)
901 {
902 struct elf_x86_64_link_hash_entry *eh;
903
904 eh = (struct elf_x86_64_link_hash_entry *) entry;
905 eh->dyn_relocs = NULL;
906 eh->tls_type = GOT_UNKNOWN;
907 eh->needs_copy = 0;
908 eh->has_bnd_reloc = 0;
909 eh->plt_bnd.offset = (bfd_vma) -1;
910 eh->plt_got.offset = (bfd_vma) -1;
911 eh->tlsdesc_got = (bfd_vma) -1;
912 }
913
914 return entry;
915 }
916
917 /* Compute a hash of a local hash entry. We use elf_link_hash_entry
918 for local symbol so that we can handle local STT_GNU_IFUNC symbols
919 as global symbol. We reuse indx and dynstr_index for local symbol
920 hash since they aren't used by global symbols in this backend. */
921
922 static hashval_t
923 elf_x86_64_local_htab_hash (const void *ptr)
924 {
925 struct elf_link_hash_entry *h
926 = (struct elf_link_hash_entry *) ptr;
927 return ELF_LOCAL_SYMBOL_HASH (h->indx, h->dynstr_index);
928 }
929
930 /* Compare local hash entries. */
931
932 static int
933 elf_x86_64_local_htab_eq (const void *ptr1, const void *ptr2)
934 {
935 struct elf_link_hash_entry *h1
936 = (struct elf_link_hash_entry *) ptr1;
937 struct elf_link_hash_entry *h2
938 = (struct elf_link_hash_entry *) ptr2;
939
940 return h1->indx == h2->indx && h1->dynstr_index == h2->dynstr_index;
941 }
942
943 /* Find and/or create a hash entry for local symbol. */
944
945 static struct elf_link_hash_entry *
946 elf_x86_64_get_local_sym_hash (struct elf_x86_64_link_hash_table *htab,
947 bfd *abfd, const Elf_Internal_Rela *rel,
948 bfd_boolean create)
949 {
950 struct elf_x86_64_link_hash_entry e, *ret;
951 asection *sec = abfd->sections;
952 hashval_t h = ELF_LOCAL_SYMBOL_HASH (sec->id,
953 htab->r_sym (rel->r_info));
954 void **slot;
955
956 e.elf.indx = sec->id;
957 e.elf.dynstr_index = htab->r_sym (rel->r_info);
958 slot = htab_find_slot_with_hash (htab->loc_hash_table, &e, h,
959 create ? INSERT : NO_INSERT);
960
961 if (!slot)
962 return NULL;
963
964 if (*slot)
965 {
966 ret = (struct elf_x86_64_link_hash_entry *) *slot;
967 return &ret->elf;
968 }
969
970 ret = (struct elf_x86_64_link_hash_entry *)
971 objalloc_alloc ((struct objalloc *) htab->loc_hash_memory,
972 sizeof (struct elf_x86_64_link_hash_entry));
973 if (ret)
974 {
975 memset (ret, 0, sizeof (*ret));
976 ret->elf.indx = sec->id;
977 ret->elf.dynstr_index = htab->r_sym (rel->r_info);
978 ret->elf.dynindx = -1;
979 ret->plt_got.offset = (bfd_vma) -1;
980 *slot = ret;
981 }
982 return &ret->elf;
983 }
984
985 /* Destroy an X86-64 ELF linker hash table. */
986
987 static void
988 elf_x86_64_link_hash_table_free (bfd *obfd)
989 {
990 struct elf_x86_64_link_hash_table *htab
991 = (struct elf_x86_64_link_hash_table *) obfd->link.hash;
992
993 if (htab->loc_hash_table)
994 htab_delete (htab->loc_hash_table);
995 if (htab->loc_hash_memory)
996 objalloc_free ((struct objalloc *) htab->loc_hash_memory);
997 _bfd_elf_link_hash_table_free (obfd);
998 }
999
1000 /* Create an X86-64 ELF linker hash table. */
1001
1002 static struct bfd_link_hash_table *
1003 elf_x86_64_link_hash_table_create (bfd *abfd)
1004 {
1005 struct elf_x86_64_link_hash_table *ret;
1006 bfd_size_type amt = sizeof (struct elf_x86_64_link_hash_table);
1007
1008 ret = (struct elf_x86_64_link_hash_table *) bfd_zmalloc (amt);
1009 if (ret == NULL)
1010 return NULL;
1011
1012 if (!_bfd_elf_link_hash_table_init (&ret->elf, abfd,
1013 elf_x86_64_link_hash_newfunc,
1014 sizeof (struct elf_x86_64_link_hash_entry),
1015 X86_64_ELF_DATA))
1016 {
1017 free (ret);
1018 return NULL;
1019 }
1020
1021 if (ABI_64_P (abfd))
1022 {
1023 ret->r_info = elf64_r_info;
1024 ret->r_sym = elf64_r_sym;
1025 ret->pointer_r_type = R_X86_64_64;
1026 ret->dynamic_interpreter = ELF64_DYNAMIC_INTERPRETER;
1027 ret->dynamic_interpreter_size = sizeof ELF64_DYNAMIC_INTERPRETER;
1028 }
1029 else
1030 {
1031 ret->r_info = elf32_r_info;
1032 ret->r_sym = elf32_r_sym;
1033 ret->pointer_r_type = R_X86_64_32;
1034 ret->dynamic_interpreter = ELF32_DYNAMIC_INTERPRETER;
1035 ret->dynamic_interpreter_size = sizeof ELF32_DYNAMIC_INTERPRETER;
1036 }
1037
1038 ret->loc_hash_table = htab_try_create (1024,
1039 elf_x86_64_local_htab_hash,
1040 elf_x86_64_local_htab_eq,
1041 NULL);
1042 ret->loc_hash_memory = objalloc_create ();
1043 if (!ret->loc_hash_table || !ret->loc_hash_memory)
1044 {
1045 elf_x86_64_link_hash_table_free (abfd);
1046 return NULL;
1047 }
1048 ret->elf.root.hash_table_free = elf_x86_64_link_hash_table_free;
1049
1050 return &ret->elf.root;
1051 }
1052
1053 /* Create .plt, .rela.plt, .got, .got.plt, .rela.got, .dynbss, and
1054 .rela.bss sections in DYNOBJ, and set up shortcuts to them in our
1055 hash table. */
1056
1057 static bfd_boolean
1058 elf_x86_64_create_dynamic_sections (bfd *dynobj,
1059 struct bfd_link_info *info)
1060 {
1061 struct elf_x86_64_link_hash_table *htab;
1062
1063 if (!_bfd_elf_create_dynamic_sections (dynobj, info))
1064 return FALSE;
1065
1066 htab = elf_x86_64_hash_table (info);
1067 if (htab == NULL)
1068 return FALSE;
1069
1070 htab->sdynbss = bfd_get_linker_section (dynobj, ".dynbss");
1071 if (!htab->sdynbss)
1072 abort ();
1073
1074 if (info->executable)
1075 {
1076 /* Always allow copy relocs for building executables. */
1077 asection *s = bfd_get_linker_section (dynobj, ".rela.bss");
1078 if (s == NULL)
1079 {
1080 const struct elf_backend_data *bed = get_elf_backend_data (dynobj);
1081 s = bfd_make_section_anyway_with_flags (dynobj,
1082 ".rela.bss",
1083 (bed->dynamic_sec_flags
1084 | SEC_READONLY));
1085 if (s == NULL
1086 || ! bfd_set_section_alignment (dynobj, s,
1087 bed->s->log_file_align))
1088 return FALSE;
1089 }
1090 htab->srelbss = s;
1091 }
1092
1093 if (!info->no_ld_generated_unwind_info
1094 && htab->plt_eh_frame == NULL
1095 && htab->elf.splt != NULL)
1096 {
1097 flagword flags = (SEC_ALLOC | SEC_LOAD | SEC_READONLY
1098 | SEC_HAS_CONTENTS | SEC_IN_MEMORY
1099 | SEC_LINKER_CREATED);
1100 htab->plt_eh_frame
1101 = bfd_make_section_anyway_with_flags (dynobj, ".eh_frame", flags);
1102 if (htab->plt_eh_frame == NULL
1103 || !bfd_set_section_alignment (dynobj, htab->plt_eh_frame, 3))
1104 return FALSE;
1105 }
1106 return TRUE;
1107 }
1108
1109 /* Copy the extra info we tack onto an elf_link_hash_entry. */
1110
1111 static void
1112 elf_x86_64_copy_indirect_symbol (struct bfd_link_info *info,
1113 struct elf_link_hash_entry *dir,
1114 struct elf_link_hash_entry *ind)
1115 {
1116 struct elf_x86_64_link_hash_entry *edir, *eind;
1117
1118 edir = (struct elf_x86_64_link_hash_entry *) dir;
1119 eind = (struct elf_x86_64_link_hash_entry *) ind;
1120
1121 if (!edir->has_bnd_reloc)
1122 edir->has_bnd_reloc = eind->has_bnd_reloc;
1123
1124 if (eind->dyn_relocs != NULL)
1125 {
1126 if (edir->dyn_relocs != NULL)
1127 {
1128 struct elf_dyn_relocs **pp;
1129 struct elf_dyn_relocs *p;
1130
1131 /* Add reloc counts against the indirect sym to the direct sym
1132 list. Merge any entries against the same section. */
1133 for (pp = &eind->dyn_relocs; (p = *pp) != NULL; )
1134 {
1135 struct elf_dyn_relocs *q;
1136
1137 for (q = edir->dyn_relocs; q != NULL; q = q->next)
1138 if (q->sec == p->sec)
1139 {
1140 q->pc_count += p->pc_count;
1141 q->count += p->count;
1142 *pp = p->next;
1143 break;
1144 }
1145 if (q == NULL)
1146 pp = &p->next;
1147 }
1148 *pp = edir->dyn_relocs;
1149 }
1150
1151 edir->dyn_relocs = eind->dyn_relocs;
1152 eind->dyn_relocs = NULL;
1153 }
1154
1155 if (ind->root.type == bfd_link_hash_indirect
1156 && dir->got.refcount <= 0)
1157 {
1158 edir->tls_type = eind->tls_type;
1159 eind->tls_type = GOT_UNKNOWN;
1160 }
1161
1162 if (ELIMINATE_COPY_RELOCS
1163 && ind->root.type != bfd_link_hash_indirect
1164 && dir->dynamic_adjusted)
1165 {
1166 /* If called to transfer flags for a weakdef during processing
1167 of elf_adjust_dynamic_symbol, don't copy non_got_ref.
1168 We clear it ourselves for ELIMINATE_COPY_RELOCS. */
1169 dir->ref_dynamic |= ind->ref_dynamic;
1170 dir->ref_regular |= ind->ref_regular;
1171 dir->ref_regular_nonweak |= ind->ref_regular_nonweak;
1172 dir->needs_plt |= ind->needs_plt;
1173 dir->pointer_equality_needed |= ind->pointer_equality_needed;
1174 }
1175 else
1176 _bfd_elf_link_hash_copy_indirect (info, dir, ind);
1177 }
1178
1179 static bfd_boolean
1180 elf64_x86_64_elf_object_p (bfd *abfd)
1181 {
1182 /* Set the right machine number for an x86-64 elf64 file. */
1183 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x86_64);
1184 return TRUE;
1185 }
1186
1187 static bfd_boolean
1188 elf32_x86_64_elf_object_p (bfd *abfd)
1189 {
1190 /* Set the right machine number for an x86-64 elf32 file. */
1191 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x64_32);
1192 return TRUE;
1193 }
1194
1195 /* Return TRUE if the TLS access code sequence support transition
1196 from R_TYPE. */
1197
1198 static bfd_boolean
1199 elf_x86_64_check_tls_transition (bfd *abfd,
1200 struct bfd_link_info *info,
1201 asection *sec,
1202 bfd_byte *contents,
1203 Elf_Internal_Shdr *symtab_hdr,
1204 struct elf_link_hash_entry **sym_hashes,
1205 unsigned int r_type,
1206 const Elf_Internal_Rela *rel,
1207 const Elf_Internal_Rela *relend)
1208 {
1209 unsigned int val;
1210 unsigned long r_symndx;
1211 bfd_boolean largepic = FALSE;
1212 struct elf_link_hash_entry *h;
1213 bfd_vma offset;
1214 struct elf_x86_64_link_hash_table *htab;
1215
1216 /* Get the section contents. */
1217 if (contents == NULL)
1218 {
1219 if (elf_section_data (sec)->this_hdr.contents != NULL)
1220 contents = elf_section_data (sec)->this_hdr.contents;
1221 else
1222 {
1223 /* FIXME: How to better handle error condition? */
1224 if (!bfd_malloc_and_get_section (abfd, sec, &contents))
1225 return FALSE;
1226
1227 /* Cache the section contents for elf_link_input_bfd. */
1228 elf_section_data (sec)->this_hdr.contents = contents;
1229 }
1230 }
1231
1232 htab = elf_x86_64_hash_table (info);
1233 offset = rel->r_offset;
1234 switch (r_type)
1235 {
1236 case R_X86_64_TLSGD:
1237 case R_X86_64_TLSLD:
1238 if ((rel + 1) >= relend)
1239 return FALSE;
1240
1241 if (r_type == R_X86_64_TLSGD)
1242 {
1243 /* Check transition from GD access model. For 64bit, only
1244 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
1245 .word 0x6666; rex64; call __tls_get_addr
1246 can transit to different access model. For 32bit, only
1247 leaq foo@tlsgd(%rip), %rdi
1248 .word 0x6666; rex64; call __tls_get_addr
1249 can transit to different access model. For largepic
1250 we also support:
1251 leaq foo@tlsgd(%rip), %rdi
1252 movabsq $__tls_get_addr@pltoff, %rax
1253 addq $rbx, %rax
1254 call *%rax. */
1255
1256 static const unsigned char call[] = { 0x66, 0x66, 0x48, 0xe8 };
1257 static const unsigned char leaq[] = { 0x66, 0x48, 0x8d, 0x3d };
1258
1259 if ((offset + 12) > sec->size)
1260 return FALSE;
1261
1262 if (memcmp (contents + offset + 4, call, 4) != 0)
1263 {
1264 if (!ABI_64_P (abfd)
1265 || (offset + 19) > sec->size
1266 || offset < 3
1267 || memcmp (contents + offset - 3, leaq + 1, 3) != 0
1268 || memcmp (contents + offset + 4, "\x48\xb8", 2) != 0
1269 || memcmp (contents + offset + 14, "\x48\x01\xd8\xff\xd0", 5)
1270 != 0)
1271 return FALSE;
1272 largepic = TRUE;
1273 }
1274 else if (ABI_64_P (abfd))
1275 {
1276 if (offset < 4
1277 || memcmp (contents + offset - 4, leaq, 4) != 0)
1278 return FALSE;
1279 }
1280 else
1281 {
1282 if (offset < 3
1283 || memcmp (contents + offset - 3, leaq + 1, 3) != 0)
1284 return FALSE;
1285 }
1286 }
1287 else
1288 {
1289 /* Check transition from LD access model. Only
1290 leaq foo@tlsld(%rip), %rdi;
1291 call __tls_get_addr
1292 can transit to different access model. For largepic
1293 we also support:
1294 leaq foo@tlsld(%rip), %rdi
1295 movabsq $__tls_get_addr@pltoff, %rax
1296 addq $rbx, %rax
1297 call *%rax. */
1298
1299 static const unsigned char lea[] = { 0x48, 0x8d, 0x3d };
1300
1301 if (offset < 3 || (offset + 9) > sec->size)
1302 return FALSE;
1303
1304 if (memcmp (contents + offset - 3, lea, 3) != 0)
1305 return FALSE;
1306
1307 if (0xe8 != *(contents + offset + 4))
1308 {
1309 if (!ABI_64_P (abfd)
1310 || (offset + 19) > sec->size
1311 || memcmp (contents + offset + 4, "\x48\xb8", 2) != 0
1312 || memcmp (contents + offset + 14, "\x48\x01\xd8\xff\xd0", 5)
1313 != 0)
1314 return FALSE;
1315 largepic = TRUE;
1316 }
1317 }
1318
1319 r_symndx = htab->r_sym (rel[1].r_info);
1320 if (r_symndx < symtab_hdr->sh_info)
1321 return FALSE;
1322
1323 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
1324 /* Use strncmp to check __tls_get_addr since __tls_get_addr
1325 may be versioned. */
1326 return (h != NULL
1327 && h->root.root.string != NULL
1328 && (largepic
1329 ? ELF32_R_TYPE (rel[1].r_info) == R_X86_64_PLTOFF64
1330 : (ELF32_R_TYPE (rel[1].r_info) == R_X86_64_PC32
1331 || ELF32_R_TYPE (rel[1].r_info) == R_X86_64_PLT32))
1332 && (strncmp (h->root.root.string,
1333 "__tls_get_addr", 14) == 0));
1334
1335 case R_X86_64_GOTTPOFF:
1336 /* Check transition from IE access model:
1337 mov foo@gottpoff(%rip), %reg
1338 add foo@gottpoff(%rip), %reg
1339 */
1340
1341 /* Check REX prefix first. */
1342 if (offset >= 3 && (offset + 4) <= sec->size)
1343 {
1344 val = bfd_get_8 (abfd, contents + offset - 3);
1345 if (val != 0x48 && val != 0x4c)
1346 {
1347 /* X32 may have 0x44 REX prefix or no REX prefix. */
1348 if (ABI_64_P (abfd))
1349 return FALSE;
1350 }
1351 }
1352 else
1353 {
1354 /* X32 may not have any REX prefix. */
1355 if (ABI_64_P (abfd))
1356 return FALSE;
1357 if (offset < 2 || (offset + 3) > sec->size)
1358 return FALSE;
1359 }
1360
1361 val = bfd_get_8 (abfd, contents + offset - 2);
1362 if (val != 0x8b && val != 0x03)
1363 return FALSE;
1364
1365 val = bfd_get_8 (abfd, contents + offset - 1);
1366 return (val & 0xc7) == 5;
1367
1368 case R_X86_64_GOTPC32_TLSDESC:
1369 /* Check transition from GDesc access model:
1370 leaq x@tlsdesc(%rip), %rax
1371
1372 Make sure it's a leaq adding rip to a 32-bit offset
1373 into any register, although it's probably almost always
1374 going to be rax. */
1375
1376 if (offset < 3 || (offset + 4) > sec->size)
1377 return FALSE;
1378
1379 val = bfd_get_8 (abfd, contents + offset - 3);
1380 if ((val & 0xfb) != 0x48)
1381 return FALSE;
1382
1383 if (bfd_get_8 (abfd, contents + offset - 2) != 0x8d)
1384 return FALSE;
1385
1386 val = bfd_get_8 (abfd, contents + offset - 1);
1387 return (val & 0xc7) == 0x05;
1388
1389 case R_X86_64_TLSDESC_CALL:
1390 /* Check transition from GDesc access model:
1391 call *x@tlsdesc(%rax)
1392 */
1393 if (offset + 2 <= sec->size)
1394 {
1395 /* Make sure that it's a call *x@tlsdesc(%rax). */
1396 static const unsigned char call[] = { 0xff, 0x10 };
1397 return memcmp (contents + offset, call, 2) == 0;
1398 }
1399
1400 return FALSE;
1401
1402 default:
1403 abort ();
1404 }
1405 }
1406
1407 /* Return TRUE if the TLS access transition is OK or no transition
1408 will be performed. Update R_TYPE if there is a transition. */
1409
1410 static bfd_boolean
1411 elf_x86_64_tls_transition (struct bfd_link_info *info, bfd *abfd,
1412 asection *sec, bfd_byte *contents,
1413 Elf_Internal_Shdr *symtab_hdr,
1414 struct elf_link_hash_entry **sym_hashes,
1415 unsigned int *r_type, int tls_type,
1416 const Elf_Internal_Rela *rel,
1417 const Elf_Internal_Rela *relend,
1418 struct elf_link_hash_entry *h,
1419 unsigned long r_symndx)
1420 {
1421 unsigned int from_type = *r_type;
1422 unsigned int to_type = from_type;
1423 bfd_boolean check = TRUE;
1424
1425 /* Skip TLS transition for functions. */
1426 if (h != NULL
1427 && (h->type == STT_FUNC
1428 || h->type == STT_GNU_IFUNC))
1429 return TRUE;
1430
1431 switch (from_type)
1432 {
1433 case R_X86_64_TLSGD:
1434 case R_X86_64_GOTPC32_TLSDESC:
1435 case R_X86_64_TLSDESC_CALL:
1436 case R_X86_64_GOTTPOFF:
1437 if (info->executable)
1438 {
1439 if (h == NULL)
1440 to_type = R_X86_64_TPOFF32;
1441 else
1442 to_type = R_X86_64_GOTTPOFF;
1443 }
1444
1445 /* When we are called from elf_x86_64_relocate_section,
1446 CONTENTS isn't NULL and there may be additional transitions
1447 based on TLS_TYPE. */
1448 if (contents != NULL)
1449 {
1450 unsigned int new_to_type = to_type;
1451
1452 if (info->executable
1453 && h != NULL
1454 && h->dynindx == -1
1455 && tls_type == GOT_TLS_IE)
1456 new_to_type = R_X86_64_TPOFF32;
1457
1458 if (to_type == R_X86_64_TLSGD
1459 || to_type == R_X86_64_GOTPC32_TLSDESC
1460 || to_type == R_X86_64_TLSDESC_CALL)
1461 {
1462 if (tls_type == GOT_TLS_IE)
1463 new_to_type = R_X86_64_GOTTPOFF;
1464 }
1465
1466 /* We checked the transition before when we were called from
1467 elf_x86_64_check_relocs. We only want to check the new
1468 transition which hasn't been checked before. */
1469 check = new_to_type != to_type && from_type == to_type;
1470 to_type = new_to_type;
1471 }
1472
1473 break;
1474
1475 case R_X86_64_TLSLD:
1476 if (info->executable)
1477 to_type = R_X86_64_TPOFF32;
1478 break;
1479
1480 default:
1481 return TRUE;
1482 }
1483
1484 /* Return TRUE if there is no transition. */
1485 if (from_type == to_type)
1486 return TRUE;
1487
1488 /* Check if the transition can be performed. */
1489 if (check
1490 && ! elf_x86_64_check_tls_transition (abfd, info, sec, contents,
1491 symtab_hdr, sym_hashes,
1492 from_type, rel, relend))
1493 {
1494 reloc_howto_type *from, *to;
1495 const char *name;
1496
1497 from = elf_x86_64_rtype_to_howto (abfd, from_type);
1498 to = elf_x86_64_rtype_to_howto (abfd, to_type);
1499
1500 if (h)
1501 name = h->root.root.string;
1502 else
1503 {
1504 struct elf_x86_64_link_hash_table *htab;
1505
1506 htab = elf_x86_64_hash_table (info);
1507 if (htab == NULL)
1508 name = "*unknown*";
1509 else
1510 {
1511 Elf_Internal_Sym *isym;
1512
1513 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
1514 abfd, r_symndx);
1515 name = bfd_elf_sym_name (abfd, symtab_hdr, isym, NULL);
1516 }
1517 }
1518
1519 (*_bfd_error_handler)
1520 (_("%B: TLS transition from %s to %s against `%s' at 0x%lx "
1521 "in section `%A' failed"),
1522 abfd, sec, from->name, to->name, name,
1523 (unsigned long) rel->r_offset);
1524 bfd_set_error (bfd_error_bad_value);
1525 return FALSE;
1526 }
1527
1528 *r_type = to_type;
1529 return TRUE;
1530 }
1531
1532 /* Look through the relocs for a section during the first phase, and
1533 calculate needed space in the global offset table, procedure
1534 linkage table, and dynamic reloc sections. */
1535
1536 static bfd_boolean
1537 elf_x86_64_check_relocs (bfd *abfd, struct bfd_link_info *info,
1538 asection *sec,
1539 const Elf_Internal_Rela *relocs)
1540 {
1541 struct elf_x86_64_link_hash_table *htab;
1542 Elf_Internal_Shdr *symtab_hdr;
1543 struct elf_link_hash_entry **sym_hashes;
1544 const Elf_Internal_Rela *rel;
1545 const Elf_Internal_Rela *rel_end;
1546 asection *sreloc;
1547 bfd_boolean use_plt_got;
1548
1549 if (info->relocatable)
1550 return TRUE;
1551
1552 BFD_ASSERT (is_x86_64_elf (abfd));
1553
1554 htab = elf_x86_64_hash_table (info);
1555 if (htab == NULL)
1556 return FALSE;
1557
1558 use_plt_got = get_elf_x86_64_backend_data (abfd) == &elf_x86_64_arch_bed;
1559
1560 symtab_hdr = &elf_symtab_hdr (abfd);
1561 sym_hashes = elf_sym_hashes (abfd);
1562
1563 sreloc = NULL;
1564
1565 rel_end = relocs + sec->reloc_count;
1566 for (rel = relocs; rel < rel_end; rel++)
1567 {
1568 unsigned int r_type;
1569 unsigned long r_symndx;
1570 struct elf_link_hash_entry *h;
1571 Elf_Internal_Sym *isym;
1572 const char *name;
1573 bfd_boolean size_reloc;
1574
1575 r_symndx = htab->r_sym (rel->r_info);
1576 r_type = ELF32_R_TYPE (rel->r_info);
1577
1578 if (r_symndx >= NUM_SHDR_ENTRIES (symtab_hdr))
1579 {
1580 (*_bfd_error_handler) (_("%B: bad symbol index: %d"),
1581 abfd, r_symndx);
1582 return FALSE;
1583 }
1584
1585 if (r_symndx < symtab_hdr->sh_info)
1586 {
1587 /* A local symbol. */
1588 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
1589 abfd, r_symndx);
1590 if (isym == NULL)
1591 return FALSE;
1592
1593 /* Check relocation against local STT_GNU_IFUNC symbol. */
1594 if (ELF_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
1595 {
1596 h = elf_x86_64_get_local_sym_hash (htab, abfd, rel,
1597 TRUE);
1598 if (h == NULL)
1599 return FALSE;
1600
1601 /* Fake a STT_GNU_IFUNC symbol. */
1602 h->type = STT_GNU_IFUNC;
1603 h->def_regular = 1;
1604 h->ref_regular = 1;
1605 h->forced_local = 1;
1606 h->root.type = bfd_link_hash_defined;
1607 }
1608 else
1609 h = NULL;
1610 }
1611 else
1612 {
1613 isym = NULL;
1614 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
1615 while (h->root.type == bfd_link_hash_indirect
1616 || h->root.type == bfd_link_hash_warning)
1617 h = (struct elf_link_hash_entry *) h->root.u.i.link;
1618 }
1619
1620 /* Check invalid x32 relocations. */
1621 if (!ABI_64_P (abfd))
1622 switch (r_type)
1623 {
1624 default:
1625 break;
1626
1627 case R_X86_64_DTPOFF64:
1628 case R_X86_64_TPOFF64:
1629 case R_X86_64_PC64:
1630 case R_X86_64_GOTOFF64:
1631 case R_X86_64_GOT64:
1632 case R_X86_64_GOTPCREL64:
1633 case R_X86_64_GOTPC64:
1634 case R_X86_64_GOTPLT64:
1635 case R_X86_64_PLTOFF64:
1636 {
1637 if (h)
1638 name = h->root.root.string;
1639 else
1640 name = bfd_elf_sym_name (abfd, symtab_hdr, isym,
1641 NULL);
1642 (*_bfd_error_handler)
1643 (_("%B: relocation %s against symbol `%s' isn't "
1644 "supported in x32 mode"), abfd,
1645 x86_64_elf_howto_table[r_type].name, name);
1646 bfd_set_error (bfd_error_bad_value);
1647 return FALSE;
1648 }
1649 break;
1650 }
1651
1652 if (h != NULL)
1653 {
1654 /* Create the ifunc sections for static executables. If we
1655 never see an indirect function symbol nor we are building
1656 a static executable, those sections will be empty and
1657 won't appear in output. */
1658 switch (r_type)
1659 {
1660 default:
1661 break;
1662
1663 case R_X86_64_PC32_BND:
1664 case R_X86_64_PLT32_BND:
1665 case R_X86_64_PC32:
1666 case R_X86_64_PLT32:
1667 case R_X86_64_32:
1668 case R_X86_64_64:
1669 /* MPX PLT is supported only if elf_x86_64_arch_bed
1670 is used in 64-bit mode. */
1671 if (ABI_64_P (abfd)
1672 && info->bndplt
1673 && (get_elf_x86_64_backend_data (abfd)
1674 == &elf_x86_64_arch_bed))
1675 {
1676 elf_x86_64_hash_entry (h)->has_bnd_reloc = 1;
1677
1678 /* Create the second PLT for Intel MPX support. */
1679 if (htab->plt_bnd == NULL)
1680 {
1681 unsigned int plt_bnd_align;
1682 const struct elf_backend_data *bed;
1683
1684 bed = get_elf_backend_data (info->output_bfd);
1685 BFD_ASSERT (sizeof (elf_x86_64_bnd_plt2_entry) == 8
1686 && (sizeof (elf_x86_64_bnd_plt2_entry)
1687 == sizeof (elf_x86_64_legacy_plt2_entry)));
1688 plt_bnd_align = 3;
1689
1690 if (htab->elf.dynobj == NULL)
1691 htab->elf.dynobj = abfd;
1692 htab->plt_bnd
1693 = bfd_make_section_anyway_with_flags (htab->elf.dynobj,
1694 ".plt.bnd",
1695 (bed->dynamic_sec_flags
1696 | SEC_ALLOC
1697 | SEC_CODE
1698 | SEC_LOAD
1699 | SEC_READONLY));
1700 if (htab->plt_bnd == NULL
1701 || !bfd_set_section_alignment (htab->elf.dynobj,
1702 htab->plt_bnd,
1703 plt_bnd_align))
1704 return FALSE;
1705 }
1706 }
1707
1708 case R_X86_64_32S:
1709 case R_X86_64_PC64:
1710 case R_X86_64_GOTPCREL:
1711 case R_X86_64_GOTPCREL64:
1712 if (htab->elf.dynobj == NULL)
1713 htab->elf.dynobj = abfd;
1714 if (!_bfd_elf_create_ifunc_sections (htab->elf.dynobj, info))
1715 return FALSE;
1716 break;
1717 }
1718
1719 /* It is referenced by a non-shared object. */
1720 h->ref_regular = 1;
1721 h->root.non_ir_ref = 1;
1722 }
1723
1724 if (! elf_x86_64_tls_transition (info, abfd, sec, NULL,
1725 symtab_hdr, sym_hashes,
1726 &r_type, GOT_UNKNOWN,
1727 rel, rel_end, h, r_symndx))
1728 return FALSE;
1729
1730 switch (r_type)
1731 {
1732 case R_X86_64_TLSLD:
1733 htab->tls_ld_got.refcount += 1;
1734 goto create_got;
1735
1736 case R_X86_64_TPOFF32:
1737 if (!info->executable && ABI_64_P (abfd))
1738 {
1739 if (h)
1740 name = h->root.root.string;
1741 else
1742 name = bfd_elf_sym_name (abfd, symtab_hdr, isym,
1743 NULL);
1744 (*_bfd_error_handler)
1745 (_("%B: relocation %s against `%s' can not be used when making a shared object; recompile with -fPIC"),
1746 abfd,
1747 x86_64_elf_howto_table[r_type].name, name);
1748 bfd_set_error (bfd_error_bad_value);
1749 return FALSE;
1750 }
1751 break;
1752
1753 case R_X86_64_GOTTPOFF:
1754 if (!info->executable)
1755 info->flags |= DF_STATIC_TLS;
1756 /* Fall through */
1757
1758 case R_X86_64_GOT32:
1759 case R_X86_64_GOTPCREL:
1760 case R_X86_64_TLSGD:
1761 case R_X86_64_GOT64:
1762 case R_X86_64_GOTPCREL64:
1763 case R_X86_64_GOTPLT64:
1764 case R_X86_64_GOTPC32_TLSDESC:
1765 case R_X86_64_TLSDESC_CALL:
1766 /* This symbol requires a global offset table entry. */
1767 {
1768 int tls_type, old_tls_type;
1769
1770 switch (r_type)
1771 {
1772 default: tls_type = GOT_NORMAL; break;
1773 case R_X86_64_TLSGD: tls_type = GOT_TLS_GD; break;
1774 case R_X86_64_GOTTPOFF: tls_type = GOT_TLS_IE; break;
1775 case R_X86_64_GOTPC32_TLSDESC:
1776 case R_X86_64_TLSDESC_CALL:
1777 tls_type = GOT_TLS_GDESC; break;
1778 }
1779
1780 if (h != NULL)
1781 {
1782 h->got.refcount += 1;
1783 old_tls_type = elf_x86_64_hash_entry (h)->tls_type;
1784 }
1785 else
1786 {
1787 bfd_signed_vma *local_got_refcounts;
1788
1789 /* This is a global offset table entry for a local symbol. */
1790 local_got_refcounts = elf_local_got_refcounts (abfd);
1791 if (local_got_refcounts == NULL)
1792 {
1793 bfd_size_type size;
1794
1795 size = symtab_hdr->sh_info;
1796 size *= sizeof (bfd_signed_vma)
1797 + sizeof (bfd_vma) + sizeof (char);
1798 local_got_refcounts = ((bfd_signed_vma *)
1799 bfd_zalloc (abfd, size));
1800 if (local_got_refcounts == NULL)
1801 return FALSE;
1802 elf_local_got_refcounts (abfd) = local_got_refcounts;
1803 elf_x86_64_local_tlsdesc_gotent (abfd)
1804 = (bfd_vma *) (local_got_refcounts + symtab_hdr->sh_info);
1805 elf_x86_64_local_got_tls_type (abfd)
1806 = (char *) (local_got_refcounts + 2 * symtab_hdr->sh_info);
1807 }
1808 local_got_refcounts[r_symndx] += 1;
1809 old_tls_type
1810 = elf_x86_64_local_got_tls_type (abfd) [r_symndx];
1811 }
1812
1813 /* If a TLS symbol is accessed using IE at least once,
1814 there is no point to use dynamic model for it. */
1815 if (old_tls_type != tls_type && old_tls_type != GOT_UNKNOWN
1816 && (! GOT_TLS_GD_ANY_P (old_tls_type)
1817 || tls_type != GOT_TLS_IE))
1818 {
1819 if (old_tls_type == GOT_TLS_IE && GOT_TLS_GD_ANY_P (tls_type))
1820 tls_type = old_tls_type;
1821 else if (GOT_TLS_GD_ANY_P (old_tls_type)
1822 && GOT_TLS_GD_ANY_P (tls_type))
1823 tls_type |= old_tls_type;
1824 else
1825 {
1826 if (h)
1827 name = h->root.root.string;
1828 else
1829 name = bfd_elf_sym_name (abfd, symtab_hdr,
1830 isym, NULL);
1831 (*_bfd_error_handler)
1832 (_("%B: '%s' accessed both as normal and thread local symbol"),
1833 abfd, name);
1834 bfd_set_error (bfd_error_bad_value);
1835 return FALSE;
1836 }
1837 }
1838
1839 if (old_tls_type != tls_type)
1840 {
1841 if (h != NULL)
1842 elf_x86_64_hash_entry (h)->tls_type = tls_type;
1843 else
1844 elf_x86_64_local_got_tls_type (abfd) [r_symndx] = tls_type;
1845 }
1846 }
1847 /* Fall through */
1848
1849 case R_X86_64_GOTOFF64:
1850 case R_X86_64_GOTPC32:
1851 case R_X86_64_GOTPC64:
1852 create_got:
1853 if (htab->elf.sgot == NULL)
1854 {
1855 if (htab->elf.dynobj == NULL)
1856 htab->elf.dynobj = abfd;
1857 if (!_bfd_elf_create_got_section (htab->elf.dynobj,
1858 info))
1859 return FALSE;
1860 }
1861 break;
1862
1863 case R_X86_64_PLT32:
1864 case R_X86_64_PLT32_BND:
1865 /* This symbol requires a procedure linkage table entry. We
1866 actually build the entry in adjust_dynamic_symbol,
1867 because this might be a case of linking PIC code which is
1868 never referenced by a dynamic object, in which case we
1869 don't need to generate a procedure linkage table entry
1870 after all. */
1871
1872 /* If this is a local symbol, we resolve it directly without
1873 creating a procedure linkage table entry. */
1874 if (h == NULL)
1875 continue;
1876
1877 h->needs_plt = 1;
1878 h->plt.refcount += 1;
1879 break;
1880
1881 case R_X86_64_PLTOFF64:
1882 /* This tries to form the 'address' of a function relative
1883 to GOT. For global symbols we need a PLT entry. */
1884 if (h != NULL)
1885 {
1886 h->needs_plt = 1;
1887 h->plt.refcount += 1;
1888 }
1889 goto create_got;
1890
1891 case R_X86_64_SIZE32:
1892 case R_X86_64_SIZE64:
1893 size_reloc = TRUE;
1894 goto do_size;
1895
1896 case R_X86_64_32:
1897 if (!ABI_64_P (abfd))
1898 goto pointer;
1899 case R_X86_64_8:
1900 case R_X86_64_16:
1901 case R_X86_64_32S:
1902 /* Let's help debug shared library creation. These relocs
1903 cannot be used in shared libs. Don't error out for
1904 sections we don't care about, such as debug sections or
1905 non-constant sections. */
1906 if (info->shared
1907 && (sec->flags & SEC_ALLOC) != 0
1908 && (sec->flags & SEC_READONLY) != 0)
1909 {
1910 if (h)
1911 name = h->root.root.string;
1912 else
1913 name = bfd_elf_sym_name (abfd, symtab_hdr, isym, NULL);
1914 (*_bfd_error_handler)
1915 (_("%B: relocation %s against `%s' can not be used when making a shared object; recompile with -fPIC"),
1916 abfd, x86_64_elf_howto_table[r_type].name, name);
1917 bfd_set_error (bfd_error_bad_value);
1918 return FALSE;
1919 }
1920 /* Fall through. */
1921
1922 case R_X86_64_PC8:
1923 case R_X86_64_PC16:
1924 case R_X86_64_PC32:
1925 case R_X86_64_PC32_BND:
1926 case R_X86_64_PC64:
1927 case R_X86_64_64:
1928 pointer:
1929 if (h != NULL && info->executable)
1930 {
1931 /* If this reloc is in a read-only section, we might
1932 need a copy reloc. We can't check reliably at this
1933 stage whether the section is read-only, as input
1934 sections have not yet been mapped to output sections.
1935 Tentatively set the flag for now, and correct in
1936 adjust_dynamic_symbol. */
1937 h->non_got_ref = 1;
1938
1939 /* We may need a .plt entry if the function this reloc
1940 refers to is in a shared lib. */
1941 h->plt.refcount += 1;
1942 if (r_type != R_X86_64_PC32
1943 && r_type != R_X86_64_PC32_BND
1944 && r_type != R_X86_64_PC64)
1945 h->pointer_equality_needed = 1;
1946 }
1947
1948 size_reloc = FALSE;
1949 do_size:
1950 /* If we are creating a shared library, and this is a reloc
1951 against a global symbol, or a non PC relative reloc
1952 against a local symbol, then we need to copy the reloc
1953 into the shared library. However, if we are linking with
1954 -Bsymbolic, we do not need to copy a reloc against a
1955 global symbol which is defined in an object we are
1956 including in the link (i.e., DEF_REGULAR is set). At
1957 this point we have not seen all the input files, so it is
1958 possible that DEF_REGULAR is not set now but will be set
1959 later (it is never cleared). In case of a weak definition,
1960 DEF_REGULAR may be cleared later by a strong definition in
1961 a shared library. We account for that possibility below by
1962 storing information in the relocs_copied field of the hash
1963 table entry. A similar situation occurs when creating
1964 shared libraries and symbol visibility changes render the
1965 symbol local.
1966
1967 If on the other hand, we are creating an executable, we
1968 may need to keep relocations for symbols satisfied by a
1969 dynamic library if we manage to avoid copy relocs for the
1970 symbol. */
1971 if ((info->shared
1972 && (sec->flags & SEC_ALLOC) != 0
1973 && (! IS_X86_64_PCREL_TYPE (r_type)
1974 || (h != NULL
1975 && (! SYMBOLIC_BIND (info, h)
1976 || h->root.type == bfd_link_hash_defweak
1977 || !h->def_regular))))
1978 || (ELIMINATE_COPY_RELOCS
1979 && !info->shared
1980 && (sec->flags & SEC_ALLOC) != 0
1981 && h != NULL
1982 && (h->root.type == bfd_link_hash_defweak
1983 || !h->def_regular)))
1984 {
1985 struct elf_dyn_relocs *p;
1986 struct elf_dyn_relocs **head;
1987
1988 /* We must copy these reloc types into the output file.
1989 Create a reloc section in dynobj and make room for
1990 this reloc. */
1991 if (sreloc == NULL)
1992 {
1993 if (htab->elf.dynobj == NULL)
1994 htab->elf.dynobj = abfd;
1995
1996 sreloc = _bfd_elf_make_dynamic_reloc_section
1997 (sec, htab->elf.dynobj, ABI_64_P (abfd) ? 3 : 2,
1998 abfd, /*rela?*/ TRUE);
1999
2000 if (sreloc == NULL)
2001 return FALSE;
2002 }
2003
2004 /* If this is a global symbol, we count the number of
2005 relocations we need for this symbol. */
2006 if (h != NULL)
2007 {
2008 head = &((struct elf_x86_64_link_hash_entry *) h)->dyn_relocs;
2009 }
2010 else
2011 {
2012 /* Track dynamic relocs needed for local syms too.
2013 We really need local syms available to do this
2014 easily. Oh well. */
2015 asection *s;
2016 void **vpp;
2017
2018 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
2019 abfd, r_symndx);
2020 if (isym == NULL)
2021 return FALSE;
2022
2023 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
2024 if (s == NULL)
2025 s = sec;
2026
2027 /* Beware of type punned pointers vs strict aliasing
2028 rules. */
2029 vpp = &(elf_section_data (s)->local_dynrel);
2030 head = (struct elf_dyn_relocs **)vpp;
2031 }
2032
2033 p = *head;
2034 if (p == NULL || p->sec != sec)
2035 {
2036 bfd_size_type amt = sizeof *p;
2037
2038 p = ((struct elf_dyn_relocs *)
2039 bfd_alloc (htab->elf.dynobj, amt));
2040 if (p == NULL)
2041 return FALSE;
2042 p->next = *head;
2043 *head = p;
2044 p->sec = sec;
2045 p->count = 0;
2046 p->pc_count = 0;
2047 }
2048
2049 p->count += 1;
2050 /* Count size relocation as PC-relative relocation. */
2051 if (IS_X86_64_PCREL_TYPE (r_type) || size_reloc)
2052 p->pc_count += 1;
2053 }
2054 break;
2055
2056 /* This relocation describes the C++ object vtable hierarchy.
2057 Reconstruct it for later use during GC. */
2058 case R_X86_64_GNU_VTINHERIT:
2059 if (!bfd_elf_gc_record_vtinherit (abfd, sec, h, rel->r_offset))
2060 return FALSE;
2061 break;
2062
2063 /* This relocation describes which C++ vtable entries are actually
2064 used. Record for later use during GC. */
2065 case R_X86_64_GNU_VTENTRY:
2066 BFD_ASSERT (h != NULL);
2067 if (h != NULL
2068 && !bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_addend))
2069 return FALSE;
2070 break;
2071
2072 default:
2073 break;
2074 }
2075
2076 if (use_plt_got
2077 && h != NULL
2078 && h->plt.refcount > 0
2079 && h->got.refcount > 0
2080 && htab->plt_got == NULL)
2081 {
2082 /* Create the GOT procedure linkage table. */
2083 unsigned int plt_got_align;
2084 const struct elf_backend_data *bed;
2085
2086 bed = get_elf_backend_data (info->output_bfd);
2087 BFD_ASSERT (sizeof (elf_x86_64_legacy_plt2_entry) == 8
2088 && (sizeof (elf_x86_64_bnd_plt2_entry)
2089 == sizeof (elf_x86_64_legacy_plt2_entry)));
2090 plt_got_align = 3;
2091
2092 if (htab->elf.dynobj == NULL)
2093 htab->elf.dynobj = abfd;
2094 htab->plt_got
2095 = bfd_make_section_anyway_with_flags (htab->elf.dynobj,
2096 ".plt.got",
2097 (bed->dynamic_sec_flags
2098 | SEC_ALLOC
2099 | SEC_CODE
2100 | SEC_LOAD
2101 | SEC_READONLY));
2102 if (htab->plt_got == NULL
2103 || !bfd_set_section_alignment (htab->elf.dynobj,
2104 htab->plt_got,
2105 plt_got_align))
2106 return FALSE;
2107 }
2108 }
2109
2110 return TRUE;
2111 }
2112
2113 /* Return the section that should be marked against GC for a given
2114 relocation. */
2115
2116 static asection *
2117 elf_x86_64_gc_mark_hook (asection *sec,
2118 struct bfd_link_info *info,
2119 Elf_Internal_Rela *rel,
2120 struct elf_link_hash_entry *h,
2121 Elf_Internal_Sym *sym)
2122 {
2123 if (h != NULL)
2124 switch (ELF32_R_TYPE (rel->r_info))
2125 {
2126 case R_X86_64_GNU_VTINHERIT:
2127 case R_X86_64_GNU_VTENTRY:
2128 return NULL;
2129 }
2130
2131 return _bfd_elf_gc_mark_hook (sec, info, rel, h, sym);
2132 }
2133
2134 /* Update the got entry reference counts for the section being removed. */
2135
2136 static bfd_boolean
2137 elf_x86_64_gc_sweep_hook (bfd *abfd, struct bfd_link_info *info,
2138 asection *sec,
2139 const Elf_Internal_Rela *relocs)
2140 {
2141 struct elf_x86_64_link_hash_table *htab;
2142 Elf_Internal_Shdr *symtab_hdr;
2143 struct elf_link_hash_entry **sym_hashes;
2144 bfd_signed_vma *local_got_refcounts;
2145 const Elf_Internal_Rela *rel, *relend;
2146
2147 if (info->relocatable)
2148 return TRUE;
2149
2150 htab = elf_x86_64_hash_table (info);
2151 if (htab == NULL)
2152 return FALSE;
2153
2154 elf_section_data (sec)->local_dynrel = NULL;
2155
2156 symtab_hdr = &elf_symtab_hdr (abfd);
2157 sym_hashes = elf_sym_hashes (abfd);
2158 local_got_refcounts = elf_local_got_refcounts (abfd);
2159
2160 htab = elf_x86_64_hash_table (info);
2161 relend = relocs + sec->reloc_count;
2162 for (rel = relocs; rel < relend; rel++)
2163 {
2164 unsigned long r_symndx;
2165 unsigned int r_type;
2166 struct elf_link_hash_entry *h = NULL;
2167
2168 r_symndx = htab->r_sym (rel->r_info);
2169 if (r_symndx >= symtab_hdr->sh_info)
2170 {
2171 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
2172 while (h->root.type == bfd_link_hash_indirect
2173 || h->root.type == bfd_link_hash_warning)
2174 h = (struct elf_link_hash_entry *) h->root.u.i.link;
2175 }
2176 else
2177 {
2178 /* A local symbol. */
2179 Elf_Internal_Sym *isym;
2180
2181 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
2182 abfd, r_symndx);
2183
2184 /* Check relocation against local STT_GNU_IFUNC symbol. */
2185 if (isym != NULL
2186 && ELF_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
2187 {
2188 h = elf_x86_64_get_local_sym_hash (htab, abfd, rel, FALSE);
2189 if (h == NULL)
2190 abort ();
2191 }
2192 }
2193
2194 if (h)
2195 {
2196 struct elf_x86_64_link_hash_entry *eh;
2197 struct elf_dyn_relocs **pp;
2198 struct elf_dyn_relocs *p;
2199
2200 eh = (struct elf_x86_64_link_hash_entry *) h;
2201
2202 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; pp = &p->next)
2203 if (p->sec == sec)
2204 {
2205 /* Everything must go for SEC. */
2206 *pp = p->next;
2207 break;
2208 }
2209 }
2210
2211 r_type = ELF32_R_TYPE (rel->r_info);
2212 if (! elf_x86_64_tls_transition (info, abfd, sec, NULL,
2213 symtab_hdr, sym_hashes,
2214 &r_type, GOT_UNKNOWN,
2215 rel, relend, h, r_symndx))
2216 return FALSE;
2217
2218 switch (r_type)
2219 {
2220 case R_X86_64_TLSLD:
2221 if (htab->tls_ld_got.refcount > 0)
2222 htab->tls_ld_got.refcount -= 1;
2223 break;
2224
2225 case R_X86_64_TLSGD:
2226 case R_X86_64_GOTPC32_TLSDESC:
2227 case R_X86_64_TLSDESC_CALL:
2228 case R_X86_64_GOTTPOFF:
2229 case R_X86_64_GOT32:
2230 case R_X86_64_GOTPCREL:
2231 case R_X86_64_GOT64:
2232 case R_X86_64_GOTPCREL64:
2233 case R_X86_64_GOTPLT64:
2234 if (h != NULL)
2235 {
2236 if (h->got.refcount > 0)
2237 h->got.refcount -= 1;
2238 if (h->type == STT_GNU_IFUNC)
2239 {
2240 if (h->plt.refcount > 0)
2241 h->plt.refcount -= 1;
2242 }
2243 }
2244 else if (local_got_refcounts != NULL)
2245 {
2246 if (local_got_refcounts[r_symndx] > 0)
2247 local_got_refcounts[r_symndx] -= 1;
2248 }
2249 break;
2250
2251 case R_X86_64_8:
2252 case R_X86_64_16:
2253 case R_X86_64_32:
2254 case R_X86_64_64:
2255 case R_X86_64_32S:
2256 case R_X86_64_PC8:
2257 case R_X86_64_PC16:
2258 case R_X86_64_PC32:
2259 case R_X86_64_PC32_BND:
2260 case R_X86_64_PC64:
2261 case R_X86_64_SIZE32:
2262 case R_X86_64_SIZE64:
2263 if (info->shared
2264 && (h == NULL || h->type != STT_GNU_IFUNC))
2265 break;
2266 /* Fall thru */
2267
2268 case R_X86_64_PLT32:
2269 case R_X86_64_PLT32_BND:
2270 case R_X86_64_PLTOFF64:
2271 if (h != NULL)
2272 {
2273 if (h->plt.refcount > 0)
2274 h->plt.refcount -= 1;
2275 }
2276 break;
2277
2278 default:
2279 break;
2280 }
2281 }
2282
2283 return TRUE;
2284 }
2285
2286 /* Adjust a symbol defined by a dynamic object and referenced by a
2287 regular object. The current definition is in some section of the
2288 dynamic object, but we're not including those sections. We have to
2289 change the definition to something the rest of the link can
2290 understand. */
2291
2292 static bfd_boolean
2293 elf_x86_64_adjust_dynamic_symbol (struct bfd_link_info *info,
2294 struct elf_link_hash_entry *h)
2295 {
2296 struct elf_x86_64_link_hash_table *htab;
2297 asection *s;
2298 struct elf_x86_64_link_hash_entry *eh;
2299 struct elf_dyn_relocs *p;
2300
2301 /* STT_GNU_IFUNC symbol must go through PLT. */
2302 if (h->type == STT_GNU_IFUNC)
2303 {
2304 /* All local STT_GNU_IFUNC references must be treate as local
2305 calls via local PLT. */
2306 if (h->ref_regular
2307 && SYMBOL_CALLS_LOCAL (info, h))
2308 {
2309 bfd_size_type pc_count = 0, count = 0;
2310 struct elf_dyn_relocs **pp;
2311
2312 eh = (struct elf_x86_64_link_hash_entry *) h;
2313 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; )
2314 {
2315 pc_count += p->pc_count;
2316 p->count -= p->pc_count;
2317 p->pc_count = 0;
2318 count += p->count;
2319 if (p->count == 0)
2320 *pp = p->next;
2321 else
2322 pp = &p->next;
2323 }
2324
2325 if (pc_count || count)
2326 {
2327 h->needs_plt = 1;
2328 h->non_got_ref = 1;
2329 if (h->plt.refcount <= 0)
2330 h->plt.refcount = 1;
2331 else
2332 h->plt.refcount += 1;
2333 }
2334 }
2335
2336 if (h->plt.refcount <= 0)
2337 {
2338 h->plt.offset = (bfd_vma) -1;
2339 h->needs_plt = 0;
2340 }
2341 return TRUE;
2342 }
2343
2344 /* If this is a function, put it in the procedure linkage table. We
2345 will fill in the contents of the procedure linkage table later,
2346 when we know the address of the .got section. */
2347 if (h->type == STT_FUNC
2348 || h->needs_plt)
2349 {
2350 if (h->plt.refcount <= 0
2351 || SYMBOL_CALLS_LOCAL (info, h)
2352 || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
2353 && h->root.type == bfd_link_hash_undefweak))
2354 {
2355 /* This case can occur if we saw a PLT32 reloc in an input
2356 file, but the symbol was never referred to by a dynamic
2357 object, or if all references were garbage collected. In
2358 such a case, we don't actually need to build a procedure
2359 linkage table, and we can just do a PC32 reloc instead. */
2360 h->plt.offset = (bfd_vma) -1;
2361 h->needs_plt = 0;
2362 }
2363
2364 return TRUE;
2365 }
2366 else
2367 /* It's possible that we incorrectly decided a .plt reloc was
2368 needed for an R_X86_64_PC32 reloc to a non-function sym in
2369 check_relocs. We can't decide accurately between function and
2370 non-function syms in check-relocs; Objects loaded later in
2371 the link may change h->type. So fix it now. */
2372 h->plt.offset = (bfd_vma) -1;
2373
2374 /* If this is a weak symbol, and there is a real definition, the
2375 processor independent code will have arranged for us to see the
2376 real definition first, and we can just use the same value. */
2377 if (h->u.weakdef != NULL)
2378 {
2379 BFD_ASSERT (h->u.weakdef->root.type == bfd_link_hash_defined
2380 || h->u.weakdef->root.type == bfd_link_hash_defweak);
2381 h->root.u.def.section = h->u.weakdef->root.u.def.section;
2382 h->root.u.def.value = h->u.weakdef->root.u.def.value;
2383 if (ELIMINATE_COPY_RELOCS || info->nocopyreloc)
2384 {
2385 eh = (struct elf_x86_64_link_hash_entry *) h;
2386 h->non_got_ref = h->u.weakdef->non_got_ref;
2387 eh->needs_copy = h->u.weakdef->needs_copy;
2388 }
2389 return TRUE;
2390 }
2391
2392 /* This is a reference to a symbol defined by a dynamic object which
2393 is not a function. */
2394
2395 /* If we are creating a shared library, we must presume that the
2396 only references to the symbol are via the global offset table.
2397 For such cases we need not do anything here; the relocations will
2398 be handled correctly by relocate_section. */
2399 if (!info->executable)
2400 return TRUE;
2401
2402 /* If there are no references to this symbol that do not use the
2403 GOT, we don't need to generate a copy reloc. */
2404 if (!h->non_got_ref)
2405 return TRUE;
2406
2407 /* If -z nocopyreloc was given, we won't generate them either. */
2408 if (info->nocopyreloc)
2409 {
2410 h->non_got_ref = 0;
2411 return TRUE;
2412 }
2413
2414 if (ELIMINATE_COPY_RELOCS)
2415 {
2416 eh = (struct elf_x86_64_link_hash_entry *) h;
2417 for (p = eh->dyn_relocs; p != NULL; p = p->next)
2418 {
2419 s = p->sec->output_section;
2420 if (s != NULL && (s->flags & SEC_READONLY) != 0)
2421 break;
2422 }
2423
2424 /* If we didn't find any dynamic relocs in read-only sections, then
2425 we'll be keeping the dynamic relocs and avoiding the copy reloc. */
2426 if (p == NULL)
2427 {
2428 h->non_got_ref = 0;
2429 return TRUE;
2430 }
2431 }
2432
2433 /* We must allocate the symbol in our .dynbss section, which will
2434 become part of the .bss section of the executable. There will be
2435 an entry for this symbol in the .dynsym section. The dynamic
2436 object will contain position independent code, so all references
2437 from the dynamic object to this symbol will go through the global
2438 offset table. The dynamic linker will use the .dynsym entry to
2439 determine the address it must put in the global offset table, so
2440 both the dynamic object and the regular object will refer to the
2441 same memory location for the variable. */
2442
2443 htab = elf_x86_64_hash_table (info);
2444 if (htab == NULL)
2445 return FALSE;
2446
2447 /* We must generate a R_X86_64_COPY reloc to tell the dynamic linker
2448 to copy the initial value out of the dynamic object and into the
2449 runtime process image. */
2450 if ((h->root.u.def.section->flags & SEC_ALLOC) != 0 && h->size != 0)
2451 {
2452 const struct elf_backend_data *bed;
2453 bed = get_elf_backend_data (info->output_bfd);
2454 htab->srelbss->size += bed->s->sizeof_rela;
2455 h->needs_copy = 1;
2456 }
2457
2458 s = htab->sdynbss;
2459
2460 return _bfd_elf_adjust_dynamic_copy (info, h, s);
2461 }
2462
2463 /* Allocate space in .plt, .got and associated reloc sections for
2464 dynamic relocs. */
2465
2466 static bfd_boolean
2467 elf_x86_64_allocate_dynrelocs (struct elf_link_hash_entry *h, void * inf)
2468 {
2469 struct bfd_link_info *info;
2470 struct elf_x86_64_link_hash_table *htab;
2471 struct elf_x86_64_link_hash_entry *eh;
2472 struct elf_dyn_relocs *p;
2473 const struct elf_backend_data *bed;
2474 unsigned int plt_entry_size;
2475
2476 if (h->root.type == bfd_link_hash_indirect)
2477 return TRUE;
2478
2479 eh = (struct elf_x86_64_link_hash_entry *) h;
2480
2481 info = (struct bfd_link_info *) inf;
2482 htab = elf_x86_64_hash_table (info);
2483 if (htab == NULL)
2484 return FALSE;
2485 bed = get_elf_backend_data (info->output_bfd);
2486 plt_entry_size = GET_PLT_ENTRY_SIZE (info->output_bfd);
2487
2488 /* We can't use the GOT PLT if pointer equality is needed since
2489 finish_dynamic_symbol won't clear symbol value and the dynamic
2490 linker won't update the GOT slot. We will get into an infinite
2491 loop at run-time. */
2492 if (htab->plt_got != NULL
2493 && h->type != STT_GNU_IFUNC
2494 && !h->pointer_equality_needed
2495 && h->plt.refcount > 0
2496 && h->got.refcount > 0)
2497 {
2498 /* Don't use the regular PLT if there are both GOT and GOTPLT
2499 reloctions. */
2500 h->plt.offset = (bfd_vma) -1;
2501
2502 /* Use the GOT PLT. */
2503 eh->plt_got.refcount = 1;
2504 }
2505
2506 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle it
2507 here if it is defined and referenced in a non-shared object. */
2508 if (h->type == STT_GNU_IFUNC
2509 && h->def_regular)
2510 {
2511 if (_bfd_elf_allocate_ifunc_dyn_relocs (info, h,
2512 &eh->dyn_relocs,
2513 plt_entry_size,
2514 plt_entry_size,
2515 GOT_ENTRY_SIZE))
2516 {
2517 asection *s = htab->plt_bnd;
2518 if (h->plt.offset != (bfd_vma) -1 && s != NULL)
2519 {
2520 /* Use the .plt.bnd section if it is created. */
2521 eh->plt_bnd.offset = s->size;
2522
2523 /* Make room for this entry in the .plt.bnd section. */
2524 s->size += sizeof (elf_x86_64_legacy_plt2_entry);
2525 }
2526
2527 return TRUE;
2528 }
2529 else
2530 return FALSE;
2531 }
2532 else if (htab->elf.dynamic_sections_created
2533 && (h->plt.refcount > 0 || eh->plt_got.refcount > 0))
2534 {
2535 bfd_boolean use_plt_got = eh->plt_got.refcount > 0;
2536
2537 /* Make sure this symbol is output as a dynamic symbol.
2538 Undefined weak syms won't yet be marked as dynamic. */
2539 if (h->dynindx == -1
2540 && !h->forced_local)
2541 {
2542 if (! bfd_elf_link_record_dynamic_symbol (info, h))
2543 return FALSE;
2544 }
2545
2546 if (info->shared
2547 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h))
2548 {
2549 asection *s = htab->elf.splt;
2550 asection *bnd_s = htab->plt_bnd;
2551 asection *got_s = htab->plt_got;
2552
2553 /* If this is the first .plt entry, make room for the special
2554 first entry. */
2555 if (s->size == 0)
2556 s->size = plt_entry_size;
2557
2558 if (use_plt_got)
2559 eh->plt_got.offset = got_s->size;
2560 else
2561 {
2562 h->plt.offset = s->size;
2563 if (bnd_s)
2564 eh->plt_bnd.offset = bnd_s->size;
2565 }
2566
2567 /* If this symbol is not defined in a regular file, and we are
2568 not generating a shared library, then set the symbol to this
2569 location in the .plt. This is required to make function
2570 pointers compare as equal between the normal executable and
2571 the shared library. */
2572 if (! info->shared
2573 && !h->def_regular)
2574 {
2575 if (use_plt_got)
2576 {
2577 /* We need to make a call to the entry of the GOT PLT
2578 instead of regular PLT entry. */
2579 h->root.u.def.section = got_s;
2580 h->root.u.def.value = eh->plt_got.offset;
2581 }
2582 else
2583 {
2584 if (bnd_s)
2585 {
2586 /* We need to make a call to the entry of the second
2587 PLT instead of regular PLT entry. */
2588 h->root.u.def.section = bnd_s;
2589 h->root.u.def.value = eh->plt_bnd.offset;
2590 }
2591 else
2592 {
2593 h->root.u.def.section = s;
2594 h->root.u.def.value = h->plt.offset;
2595 }
2596 }
2597 }
2598
2599 /* Make room for this entry. */
2600 if (use_plt_got)
2601 got_s->size += sizeof (elf_x86_64_legacy_plt2_entry);
2602 else
2603 {
2604 s->size += plt_entry_size;
2605 if (bnd_s)
2606 bnd_s->size += sizeof (elf_x86_64_legacy_plt2_entry);
2607
2608 /* We also need to make an entry in the .got.plt section,
2609 which will be placed in the .got section by the linker
2610 script. */
2611 htab->elf.sgotplt->size += GOT_ENTRY_SIZE;
2612
2613 /* We also need to make an entry in the .rela.plt
2614 section. */
2615 htab->elf.srelplt->size += bed->s->sizeof_rela;
2616 htab->elf.srelplt->reloc_count++;
2617 }
2618 }
2619 else
2620 {
2621 h->plt.offset = (bfd_vma) -1;
2622 h->needs_plt = 0;
2623 }
2624 }
2625 else
2626 {
2627 h->plt.offset = (bfd_vma) -1;
2628 h->needs_plt = 0;
2629 }
2630
2631 eh->tlsdesc_got = (bfd_vma) -1;
2632
2633 /* If R_X86_64_GOTTPOFF symbol is now local to the binary,
2634 make it a R_X86_64_TPOFF32 requiring no GOT entry. */
2635 if (h->got.refcount > 0
2636 && info->executable
2637 && h->dynindx == -1
2638 && elf_x86_64_hash_entry (h)->tls_type == GOT_TLS_IE)
2639 {
2640 h->got.offset = (bfd_vma) -1;
2641 }
2642 else if (h->got.refcount > 0)
2643 {
2644 asection *s;
2645 bfd_boolean dyn;
2646 int tls_type = elf_x86_64_hash_entry (h)->tls_type;
2647
2648 /* Make sure this symbol is output as a dynamic symbol.
2649 Undefined weak syms won't yet be marked as dynamic. */
2650 if (h->dynindx == -1
2651 && !h->forced_local)
2652 {
2653 if (! bfd_elf_link_record_dynamic_symbol (info, h))
2654 return FALSE;
2655 }
2656
2657 if (GOT_TLS_GDESC_P (tls_type))
2658 {
2659 eh->tlsdesc_got = htab->elf.sgotplt->size
2660 - elf_x86_64_compute_jump_table_size (htab);
2661 htab->elf.sgotplt->size += 2 * GOT_ENTRY_SIZE;
2662 h->got.offset = (bfd_vma) -2;
2663 }
2664 if (! GOT_TLS_GDESC_P (tls_type)
2665 || GOT_TLS_GD_P (tls_type))
2666 {
2667 s = htab->elf.sgot;
2668 h->got.offset = s->size;
2669 s->size += GOT_ENTRY_SIZE;
2670 if (GOT_TLS_GD_P (tls_type))
2671 s->size += GOT_ENTRY_SIZE;
2672 }
2673 dyn = htab->elf.dynamic_sections_created;
2674 /* R_X86_64_TLSGD needs one dynamic relocation if local symbol
2675 and two if global.
2676 R_X86_64_GOTTPOFF needs one dynamic relocation. */
2677 if ((GOT_TLS_GD_P (tls_type) && h->dynindx == -1)
2678 || tls_type == GOT_TLS_IE)
2679 htab->elf.srelgot->size += bed->s->sizeof_rela;
2680 else if (GOT_TLS_GD_P (tls_type))
2681 htab->elf.srelgot->size += 2 * bed->s->sizeof_rela;
2682 else if (! GOT_TLS_GDESC_P (tls_type)
2683 && (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
2684 || h->root.type != bfd_link_hash_undefweak)
2685 && (info->shared
2686 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h)))
2687 htab->elf.srelgot->size += bed->s->sizeof_rela;
2688 if (GOT_TLS_GDESC_P (tls_type))
2689 {
2690 htab->elf.srelplt->size += bed->s->sizeof_rela;
2691 htab->tlsdesc_plt = (bfd_vma) -1;
2692 }
2693 }
2694 else
2695 h->got.offset = (bfd_vma) -1;
2696
2697 if (eh->dyn_relocs == NULL)
2698 return TRUE;
2699
2700 /* In the shared -Bsymbolic case, discard space allocated for
2701 dynamic pc-relative relocs against symbols which turn out to be
2702 defined in regular objects. For the normal shared case, discard
2703 space for pc-relative relocs that have become local due to symbol
2704 visibility changes. */
2705
2706 if (info->shared)
2707 {
2708 /* Relocs that use pc_count are those that appear on a call
2709 insn, or certain REL relocs that can generated via assembly.
2710 We want calls to protected symbols to resolve directly to the
2711 function rather than going via the plt. If people want
2712 function pointer comparisons to work as expected then they
2713 should avoid writing weird assembly. */
2714 if (SYMBOL_CALLS_LOCAL (info, h))
2715 {
2716 struct elf_dyn_relocs **pp;
2717
2718 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; )
2719 {
2720 p->count -= p->pc_count;
2721 p->pc_count = 0;
2722 if (p->count == 0)
2723 *pp = p->next;
2724 else
2725 pp = &p->next;
2726 }
2727 }
2728
2729 /* Also discard relocs on undefined weak syms with non-default
2730 visibility. */
2731 if (eh->dyn_relocs != NULL)
2732 {
2733 if (h->root.type == bfd_link_hash_undefweak)
2734 {
2735 if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
2736 eh->dyn_relocs = NULL;
2737
2738 /* Make sure undefined weak symbols are output as a dynamic
2739 symbol in PIEs. */
2740 else if (h->dynindx == -1
2741 && ! h->forced_local
2742 && ! bfd_elf_link_record_dynamic_symbol (info, h))
2743 return FALSE;
2744 }
2745 /* For PIE, discard space for pc-relative relocs against
2746 symbols which turn out to need copy relocs. */
2747 else if (info->executable
2748 && (h->needs_copy || eh->needs_copy)
2749 && h->def_dynamic
2750 && !h->def_regular)
2751 {
2752 struct elf_dyn_relocs **pp;
2753
2754 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; )
2755 {
2756 if (p->pc_count != 0)
2757 *pp = p->next;
2758 else
2759 pp = &p->next;
2760 }
2761 }
2762 }
2763 }
2764 else if (ELIMINATE_COPY_RELOCS)
2765 {
2766 /* For the non-shared case, discard space for relocs against
2767 symbols which turn out to need copy relocs or are not
2768 dynamic. */
2769
2770 if (!h->non_got_ref
2771 && ((h->def_dynamic
2772 && !h->def_regular)
2773 || (htab->elf.dynamic_sections_created
2774 && (h->root.type == bfd_link_hash_undefweak
2775 || h->root.type == bfd_link_hash_undefined))))
2776 {
2777 /* Make sure this symbol is output as a dynamic symbol.
2778 Undefined weak syms won't yet be marked as dynamic. */
2779 if (h->dynindx == -1
2780 && ! h->forced_local
2781 && ! bfd_elf_link_record_dynamic_symbol (info, h))
2782 return FALSE;
2783
2784 /* If that succeeded, we know we'll be keeping all the
2785 relocs. */
2786 if (h->dynindx != -1)
2787 goto keep;
2788 }
2789
2790 eh->dyn_relocs = NULL;
2791
2792 keep: ;
2793 }
2794
2795 /* Finally, allocate space. */
2796 for (p = eh->dyn_relocs; p != NULL; p = p->next)
2797 {
2798 asection * sreloc;
2799
2800 sreloc = elf_section_data (p->sec)->sreloc;
2801
2802 BFD_ASSERT (sreloc != NULL);
2803
2804 sreloc->size += p->count * bed->s->sizeof_rela;
2805 }
2806
2807 return TRUE;
2808 }
2809
2810 /* Allocate space in .plt, .got and associated reloc sections for
2811 local dynamic relocs. */
2812
2813 static bfd_boolean
2814 elf_x86_64_allocate_local_dynrelocs (void **slot, void *inf)
2815 {
2816 struct elf_link_hash_entry *h
2817 = (struct elf_link_hash_entry *) *slot;
2818
2819 if (h->type != STT_GNU_IFUNC
2820 || !h->def_regular
2821 || !h->ref_regular
2822 || !h->forced_local
2823 || h->root.type != bfd_link_hash_defined)
2824 abort ();
2825
2826 return elf_x86_64_allocate_dynrelocs (h, inf);
2827 }
2828
2829 /* Find any dynamic relocs that apply to read-only sections. */
2830
2831 static bfd_boolean
2832 elf_x86_64_readonly_dynrelocs (struct elf_link_hash_entry *h,
2833 void * inf)
2834 {
2835 struct elf_x86_64_link_hash_entry *eh;
2836 struct elf_dyn_relocs *p;
2837
2838 /* Skip local IFUNC symbols. */
2839 if (h->forced_local && h->type == STT_GNU_IFUNC)
2840 return TRUE;
2841
2842 eh = (struct elf_x86_64_link_hash_entry *) h;
2843 for (p = eh->dyn_relocs; p != NULL; p = p->next)
2844 {
2845 asection *s = p->sec->output_section;
2846
2847 if (s != NULL && (s->flags & SEC_READONLY) != 0)
2848 {
2849 struct bfd_link_info *info = (struct bfd_link_info *) inf;
2850
2851 info->flags |= DF_TEXTREL;
2852
2853 if (info->warn_shared_textrel && info->shared)
2854 info->callbacks->einfo (_("%P: %B: warning: relocation against `%s' in readonly section `%A'.\n"),
2855 p->sec->owner, h->root.root.string,
2856 p->sec);
2857
2858 /* Not an error, just cut short the traversal. */
2859 return FALSE;
2860 }
2861 }
2862 return TRUE;
2863 }
2864
2865 /* Convert
2866 mov foo@GOTPCREL(%rip), %reg
2867 to
2868 lea foo(%rip), %reg
2869 with the local symbol, foo. */
2870
2871 static bfd_boolean
2872 elf_x86_64_convert_mov_to_lea (bfd *abfd, asection *sec,
2873 struct bfd_link_info *link_info)
2874 {
2875 Elf_Internal_Shdr *symtab_hdr;
2876 Elf_Internal_Rela *internal_relocs;
2877 Elf_Internal_Rela *irel, *irelend;
2878 bfd_byte *contents;
2879 struct elf_x86_64_link_hash_table *htab;
2880 bfd_boolean changed_contents;
2881 bfd_boolean changed_relocs;
2882 bfd_signed_vma *local_got_refcounts;
2883
2884 /* Don't even try to convert non-ELF outputs. */
2885 if (!is_elf_hash_table (link_info->hash))
2886 return FALSE;
2887
2888 /* Nothing to do if there are no codes, no relocations or no output. */
2889 if ((sec->flags & (SEC_CODE | SEC_RELOC)) != (SEC_CODE | SEC_RELOC)
2890 || sec->reloc_count == 0
2891 || bfd_is_abs_section (sec->output_section))
2892 return TRUE;
2893
2894 symtab_hdr = &elf_tdata (abfd)->symtab_hdr;
2895
2896 /* Load the relocations for this section. */
2897 internal_relocs = (_bfd_elf_link_read_relocs
2898 (abfd, sec, NULL, (Elf_Internal_Rela *) NULL,
2899 link_info->keep_memory));
2900 if (internal_relocs == NULL)
2901 return FALSE;
2902
2903 htab = elf_x86_64_hash_table (link_info);
2904 changed_contents = FALSE;
2905 changed_relocs = FALSE;
2906 local_got_refcounts = elf_local_got_refcounts (abfd);
2907
2908 /* Get the section contents. */
2909 if (elf_section_data (sec)->this_hdr.contents != NULL)
2910 contents = elf_section_data (sec)->this_hdr.contents;
2911 else
2912 {
2913 if (!bfd_malloc_and_get_section (abfd, sec, &contents))
2914 goto error_return;
2915 }
2916
2917 irelend = internal_relocs + sec->reloc_count;
2918 for (irel = internal_relocs; irel < irelend; irel++)
2919 {
2920 unsigned int r_type = ELF32_R_TYPE (irel->r_info);
2921 unsigned int r_symndx = htab->r_sym (irel->r_info);
2922 unsigned int indx;
2923 struct elf_link_hash_entry *h;
2924
2925 if (r_type != R_X86_64_GOTPCREL)
2926 continue;
2927
2928 /* Get the symbol referred to by the reloc. */
2929 if (r_symndx < symtab_hdr->sh_info)
2930 {
2931 Elf_Internal_Sym *isym;
2932
2933 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
2934 abfd, r_symndx);
2935
2936 /* STT_GNU_IFUNC must keep R_X86_64_GOTPCREL relocation. */
2937 if (ELF_ST_TYPE (isym->st_info) != STT_GNU_IFUNC
2938 && irel->r_offset >= 2
2939 && bfd_get_8 (input_bfd,
2940 contents + irel->r_offset - 2) == 0x8b)
2941 {
2942 bfd_put_8 (output_bfd, 0x8d,
2943 contents + irel->r_offset - 2);
2944 irel->r_info = htab->r_info (r_symndx, R_X86_64_PC32);
2945 if (local_got_refcounts != NULL
2946 && local_got_refcounts[r_symndx] > 0)
2947 local_got_refcounts[r_symndx] -= 1;
2948 changed_contents = TRUE;
2949 changed_relocs = TRUE;
2950 }
2951 continue;
2952 }
2953
2954 indx = r_symndx - symtab_hdr->sh_info;
2955 h = elf_sym_hashes (abfd)[indx];
2956 BFD_ASSERT (h != NULL);
2957
2958 while (h->root.type == bfd_link_hash_indirect
2959 || h->root.type == bfd_link_hash_warning)
2960 h = (struct elf_link_hash_entry *) h->root.u.i.link;
2961
2962 /* STT_GNU_IFUNC must keep R_X86_64_GOTPCREL relocation. We also
2963 avoid optimizing _DYNAMIC since ld.so may use its link-time
2964 address. */
2965 if (h->def_regular
2966 && h->type != STT_GNU_IFUNC
2967 && h != htab->elf.hdynamic
2968 && SYMBOL_REFERENCES_LOCAL (link_info, h)
2969 && irel->r_offset >= 2
2970 && bfd_get_8 (input_bfd,
2971 contents + irel->r_offset - 2) == 0x8b)
2972 {
2973 bfd_put_8 (output_bfd, 0x8d,
2974 contents + irel->r_offset - 2);
2975 irel->r_info = htab->r_info (r_symndx, R_X86_64_PC32);
2976 if (h->got.refcount > 0)
2977 h->got.refcount -= 1;
2978 changed_contents = TRUE;
2979 changed_relocs = TRUE;
2980 }
2981 }
2982
2983 if (contents != NULL
2984 && elf_section_data (sec)->this_hdr.contents != contents)
2985 {
2986 if (!changed_contents && !link_info->keep_memory)
2987 free (contents);
2988 else
2989 {
2990 /* Cache the section contents for elf_link_input_bfd. */
2991 elf_section_data (sec)->this_hdr.contents = contents;
2992 }
2993 }
2994
2995 if (elf_section_data (sec)->relocs != internal_relocs)
2996 {
2997 if (!changed_relocs)
2998 free (internal_relocs);
2999 else
3000 elf_section_data (sec)->relocs = internal_relocs;
3001 }
3002
3003 return TRUE;
3004
3005 error_return:
3006 if (contents != NULL
3007 && elf_section_data (sec)->this_hdr.contents != contents)
3008 free (contents);
3009 if (internal_relocs != NULL
3010 && elf_section_data (sec)->relocs != internal_relocs)
3011 free (internal_relocs);
3012 return FALSE;
3013 }
3014
3015 /* Set the sizes of the dynamic sections. */
3016
3017 static bfd_boolean
3018 elf_x86_64_size_dynamic_sections (bfd *output_bfd,
3019 struct bfd_link_info *info)
3020 {
3021 struct elf_x86_64_link_hash_table *htab;
3022 bfd *dynobj;
3023 asection *s;
3024 bfd_boolean relocs;
3025 bfd *ibfd;
3026 const struct elf_backend_data *bed;
3027
3028 htab = elf_x86_64_hash_table (info);
3029 if (htab == NULL)
3030 return FALSE;
3031 bed = get_elf_backend_data (output_bfd);
3032
3033 dynobj = htab->elf.dynobj;
3034 if (dynobj == NULL)
3035 abort ();
3036
3037 if (htab->elf.dynamic_sections_created)
3038 {
3039 /* Set the contents of the .interp section to the interpreter. */
3040 if (info->executable)
3041 {
3042 s = bfd_get_linker_section (dynobj, ".interp");
3043 if (s == NULL)
3044 abort ();
3045 s->size = htab->dynamic_interpreter_size;
3046 s->contents = (unsigned char *) htab->dynamic_interpreter;
3047 }
3048 }
3049
3050 /* Set up .got offsets for local syms, and space for local dynamic
3051 relocs. */
3052 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
3053 {
3054 bfd_signed_vma *local_got;
3055 bfd_signed_vma *end_local_got;
3056 char *local_tls_type;
3057 bfd_vma *local_tlsdesc_gotent;
3058 bfd_size_type locsymcount;
3059 Elf_Internal_Shdr *symtab_hdr;
3060 asection *srel;
3061
3062 if (! is_x86_64_elf (ibfd))
3063 continue;
3064
3065 for (s = ibfd->sections; s != NULL; s = s->next)
3066 {
3067 struct elf_dyn_relocs *p;
3068
3069 if (!elf_x86_64_convert_mov_to_lea (ibfd, s, info))
3070 return FALSE;
3071
3072 for (p = (struct elf_dyn_relocs *)
3073 (elf_section_data (s)->local_dynrel);
3074 p != NULL;
3075 p = p->next)
3076 {
3077 if (!bfd_is_abs_section (p->sec)
3078 && bfd_is_abs_section (p->sec->output_section))
3079 {
3080 /* Input section has been discarded, either because
3081 it is a copy of a linkonce section or due to
3082 linker script /DISCARD/, so we'll be discarding
3083 the relocs too. */
3084 }
3085 else if (p->count != 0)
3086 {
3087 srel = elf_section_data (p->sec)->sreloc;
3088 srel->size += p->count * bed->s->sizeof_rela;
3089 if ((p->sec->output_section->flags & SEC_READONLY) != 0
3090 && (info->flags & DF_TEXTREL) == 0)
3091 {
3092 info->flags |= DF_TEXTREL;
3093 if (info->warn_shared_textrel && info->shared)
3094 info->callbacks->einfo (_("%P: %B: warning: relocation in readonly section `%A'.\n"),
3095 p->sec->owner, p->sec);
3096 }
3097 }
3098 }
3099 }
3100
3101 local_got = elf_local_got_refcounts (ibfd);
3102 if (!local_got)
3103 continue;
3104
3105 symtab_hdr = &elf_symtab_hdr (ibfd);
3106 locsymcount = symtab_hdr->sh_info;
3107 end_local_got = local_got + locsymcount;
3108 local_tls_type = elf_x86_64_local_got_tls_type (ibfd);
3109 local_tlsdesc_gotent = elf_x86_64_local_tlsdesc_gotent (ibfd);
3110 s = htab->elf.sgot;
3111 srel = htab->elf.srelgot;
3112 for (; local_got < end_local_got;
3113 ++local_got, ++local_tls_type, ++local_tlsdesc_gotent)
3114 {
3115 *local_tlsdesc_gotent = (bfd_vma) -1;
3116 if (*local_got > 0)
3117 {
3118 if (GOT_TLS_GDESC_P (*local_tls_type))
3119 {
3120 *local_tlsdesc_gotent = htab->elf.sgotplt->size
3121 - elf_x86_64_compute_jump_table_size (htab);
3122 htab->elf.sgotplt->size += 2 * GOT_ENTRY_SIZE;
3123 *local_got = (bfd_vma) -2;
3124 }
3125 if (! GOT_TLS_GDESC_P (*local_tls_type)
3126 || GOT_TLS_GD_P (*local_tls_type))
3127 {
3128 *local_got = s->size;
3129 s->size += GOT_ENTRY_SIZE;
3130 if (GOT_TLS_GD_P (*local_tls_type))
3131 s->size += GOT_ENTRY_SIZE;
3132 }
3133 if (info->shared
3134 || GOT_TLS_GD_ANY_P (*local_tls_type)
3135 || *local_tls_type == GOT_TLS_IE)
3136 {
3137 if (GOT_TLS_GDESC_P (*local_tls_type))
3138 {
3139 htab->elf.srelplt->size
3140 += bed->s->sizeof_rela;
3141 htab->tlsdesc_plt = (bfd_vma) -1;
3142 }
3143 if (! GOT_TLS_GDESC_P (*local_tls_type)
3144 || GOT_TLS_GD_P (*local_tls_type))
3145 srel->size += bed->s->sizeof_rela;
3146 }
3147 }
3148 else
3149 *local_got = (bfd_vma) -1;
3150 }
3151 }
3152
3153 if (htab->tls_ld_got.refcount > 0)
3154 {
3155 /* Allocate 2 got entries and 1 dynamic reloc for R_X86_64_TLSLD
3156 relocs. */
3157 htab->tls_ld_got.offset = htab->elf.sgot->size;
3158 htab->elf.sgot->size += 2 * GOT_ENTRY_SIZE;
3159 htab->elf.srelgot->size += bed->s->sizeof_rela;
3160 }
3161 else
3162 htab->tls_ld_got.offset = -1;
3163
3164 /* Allocate global sym .plt and .got entries, and space for global
3165 sym dynamic relocs. */
3166 elf_link_hash_traverse (&htab->elf, elf_x86_64_allocate_dynrelocs,
3167 info);
3168
3169 /* Allocate .plt and .got entries, and space for local symbols. */
3170 htab_traverse (htab->loc_hash_table,
3171 elf_x86_64_allocate_local_dynrelocs,
3172 info);
3173
3174 /* For every jump slot reserved in the sgotplt, reloc_count is
3175 incremented. However, when we reserve space for TLS descriptors,
3176 it's not incremented, so in order to compute the space reserved
3177 for them, it suffices to multiply the reloc count by the jump
3178 slot size.
3179
3180 PR ld/13302: We start next_irelative_index at the end of .rela.plt
3181 so that R_X86_64_IRELATIVE entries come last. */
3182 if (htab->elf.srelplt)
3183 {
3184 htab->sgotplt_jump_table_size
3185 = elf_x86_64_compute_jump_table_size (htab);
3186 htab->next_irelative_index = htab->elf.srelplt->reloc_count - 1;
3187 }
3188 else if (htab->elf.irelplt)
3189 htab->next_irelative_index = htab->elf.irelplt->reloc_count - 1;
3190
3191 if (htab->tlsdesc_plt)
3192 {
3193 /* If we're not using lazy TLS relocations, don't generate the
3194 PLT and GOT entries they require. */
3195 if ((info->flags & DF_BIND_NOW))
3196 htab->tlsdesc_plt = 0;
3197 else
3198 {
3199 htab->tlsdesc_got = htab->elf.sgot->size;
3200 htab->elf.sgot->size += GOT_ENTRY_SIZE;
3201 /* Reserve room for the initial entry.
3202 FIXME: we could probably do away with it in this case. */
3203 if (htab->elf.splt->size == 0)
3204 htab->elf.splt->size += GET_PLT_ENTRY_SIZE (output_bfd);
3205 htab->tlsdesc_plt = htab->elf.splt->size;
3206 htab->elf.splt->size += GET_PLT_ENTRY_SIZE (output_bfd);
3207 }
3208 }
3209
3210 if (htab->elf.sgotplt)
3211 {
3212 /* Don't allocate .got.plt section if there are no GOT nor PLT
3213 entries and there is no refeence to _GLOBAL_OFFSET_TABLE_. */
3214 if ((htab->elf.hgot == NULL
3215 || !htab->elf.hgot->ref_regular_nonweak)
3216 && (htab->elf.sgotplt->size
3217 == get_elf_backend_data (output_bfd)->got_header_size)
3218 && (htab->elf.splt == NULL
3219 || htab->elf.splt->size == 0)
3220 && (htab->elf.sgot == NULL
3221 || htab->elf.sgot->size == 0)
3222 && (htab->elf.iplt == NULL
3223 || htab->elf.iplt->size == 0)
3224 && (htab->elf.igotplt == NULL
3225 || htab->elf.igotplt->size == 0))
3226 htab->elf.sgotplt->size = 0;
3227 }
3228
3229 if (htab->plt_eh_frame != NULL
3230 && htab->elf.splt != NULL
3231 && htab->elf.splt->size != 0
3232 && !bfd_is_abs_section (htab->elf.splt->output_section)
3233 && _bfd_elf_eh_frame_present (info))
3234 {
3235 const struct elf_x86_64_backend_data *arch_data
3236 = get_elf_x86_64_arch_data (bed);
3237 htab->plt_eh_frame->size = arch_data->eh_frame_plt_size;
3238 }
3239
3240 /* We now have determined the sizes of the various dynamic sections.
3241 Allocate memory for them. */
3242 relocs = FALSE;
3243 for (s = dynobj->sections; s != NULL; s = s->next)
3244 {
3245 if ((s->flags & SEC_LINKER_CREATED) == 0)
3246 continue;
3247
3248 if (s == htab->elf.splt
3249 || s == htab->elf.sgot
3250 || s == htab->elf.sgotplt
3251 || s == htab->elf.iplt
3252 || s == htab->elf.igotplt
3253 || s == htab->plt_bnd
3254 || s == htab->plt_got
3255 || s == htab->plt_eh_frame
3256 || s == htab->sdynbss)
3257 {
3258 /* Strip this section if we don't need it; see the
3259 comment below. */
3260 }
3261 else if (CONST_STRNEQ (bfd_get_section_name (dynobj, s), ".rela"))
3262 {
3263 if (s->size != 0 && s != htab->elf.srelplt)
3264 relocs = TRUE;
3265
3266 /* We use the reloc_count field as a counter if we need
3267 to copy relocs into the output file. */
3268 if (s != htab->elf.srelplt)
3269 s->reloc_count = 0;
3270 }
3271 else
3272 {
3273 /* It's not one of our sections, so don't allocate space. */
3274 continue;
3275 }
3276
3277 if (s->size == 0)
3278 {
3279 /* If we don't need this section, strip it from the
3280 output file. This is mostly to handle .rela.bss and
3281 .rela.plt. We must create both sections in
3282 create_dynamic_sections, because they must be created
3283 before the linker maps input sections to output
3284 sections. The linker does that before
3285 adjust_dynamic_symbol is called, and it is that
3286 function which decides whether anything needs to go
3287 into these sections. */
3288
3289 s->flags |= SEC_EXCLUDE;
3290 continue;
3291 }
3292
3293 if ((s->flags & SEC_HAS_CONTENTS) == 0)
3294 continue;
3295
3296 /* Allocate memory for the section contents. We use bfd_zalloc
3297 here in case unused entries are not reclaimed before the
3298 section's contents are written out. This should not happen,
3299 but this way if it does, we get a R_X86_64_NONE reloc instead
3300 of garbage. */
3301 s->contents = (bfd_byte *) bfd_zalloc (dynobj, s->size);
3302 if (s->contents == NULL)
3303 return FALSE;
3304 }
3305
3306 if (htab->plt_eh_frame != NULL
3307 && htab->plt_eh_frame->contents != NULL)
3308 {
3309 const struct elf_x86_64_backend_data *arch_data
3310 = get_elf_x86_64_arch_data (bed);
3311
3312 memcpy (htab->plt_eh_frame->contents,
3313 arch_data->eh_frame_plt, htab->plt_eh_frame->size);
3314 bfd_put_32 (dynobj, htab->elf.splt->size,
3315 htab->plt_eh_frame->contents + PLT_FDE_LEN_OFFSET);
3316 }
3317
3318 if (htab->elf.dynamic_sections_created)
3319 {
3320 /* Add some entries to the .dynamic section. We fill in the
3321 values later, in elf_x86_64_finish_dynamic_sections, but we
3322 must add the entries now so that we get the correct size for
3323 the .dynamic section. The DT_DEBUG entry is filled in by the
3324 dynamic linker and used by the debugger. */
3325 #define add_dynamic_entry(TAG, VAL) \
3326 _bfd_elf_add_dynamic_entry (info, TAG, VAL)
3327
3328 if (info->executable)
3329 {
3330 if (!add_dynamic_entry (DT_DEBUG, 0))
3331 return FALSE;
3332 }
3333
3334 if (htab->elf.splt->size != 0)
3335 {
3336 if (!add_dynamic_entry (DT_PLTGOT, 0)
3337 || !add_dynamic_entry (DT_PLTRELSZ, 0)
3338 || !add_dynamic_entry (DT_PLTREL, DT_RELA)
3339 || !add_dynamic_entry (DT_JMPREL, 0))
3340 return FALSE;
3341
3342 if (htab->tlsdesc_plt
3343 && (!add_dynamic_entry (DT_TLSDESC_PLT, 0)
3344 || !add_dynamic_entry (DT_TLSDESC_GOT, 0)))
3345 return FALSE;
3346 }
3347
3348 if (relocs)
3349 {
3350 if (!add_dynamic_entry (DT_RELA, 0)
3351 || !add_dynamic_entry (DT_RELASZ, 0)
3352 || !add_dynamic_entry (DT_RELAENT, bed->s->sizeof_rela))
3353 return FALSE;
3354
3355 /* If any dynamic relocs apply to a read-only section,
3356 then we need a DT_TEXTREL entry. */
3357 if ((info->flags & DF_TEXTREL) == 0)
3358 elf_link_hash_traverse (&htab->elf,
3359 elf_x86_64_readonly_dynrelocs,
3360 info);
3361
3362 if ((info->flags & DF_TEXTREL) != 0)
3363 {
3364 if (!add_dynamic_entry (DT_TEXTREL, 0))
3365 return FALSE;
3366 }
3367 }
3368 }
3369 #undef add_dynamic_entry
3370
3371 return TRUE;
3372 }
3373
3374 static bfd_boolean
3375 elf_x86_64_always_size_sections (bfd *output_bfd,
3376 struct bfd_link_info *info)
3377 {
3378 asection *tls_sec = elf_hash_table (info)->tls_sec;
3379
3380 if (tls_sec)
3381 {
3382 struct elf_link_hash_entry *tlsbase;
3383
3384 tlsbase = elf_link_hash_lookup (elf_hash_table (info),
3385 "_TLS_MODULE_BASE_",
3386 FALSE, FALSE, FALSE);
3387
3388 if (tlsbase && tlsbase->type == STT_TLS)
3389 {
3390 struct elf_x86_64_link_hash_table *htab;
3391 struct bfd_link_hash_entry *bh = NULL;
3392 const struct elf_backend_data *bed
3393 = get_elf_backend_data (output_bfd);
3394
3395 htab = elf_x86_64_hash_table (info);
3396 if (htab == NULL)
3397 return FALSE;
3398
3399 if (!(_bfd_generic_link_add_one_symbol
3400 (info, output_bfd, "_TLS_MODULE_BASE_", BSF_LOCAL,
3401 tls_sec, 0, NULL, FALSE,
3402 bed->collect, &bh)))
3403 return FALSE;
3404
3405 htab->tls_module_base = bh;
3406
3407 tlsbase = (struct elf_link_hash_entry *)bh;
3408 tlsbase->def_regular = 1;
3409 tlsbase->other = STV_HIDDEN;
3410 (*bed->elf_backend_hide_symbol) (info, tlsbase, TRUE);
3411 }
3412 }
3413
3414 return TRUE;
3415 }
3416
3417 /* _TLS_MODULE_BASE_ needs to be treated especially when linking
3418 executables. Rather than setting it to the beginning of the TLS
3419 section, we have to set it to the end. This function may be called
3420 multiple times, it is idempotent. */
3421
3422 static void
3423 elf_x86_64_set_tls_module_base (struct bfd_link_info *info)
3424 {
3425 struct elf_x86_64_link_hash_table *htab;
3426 struct bfd_link_hash_entry *base;
3427
3428 if (!info->executable)
3429 return;
3430
3431 htab = elf_x86_64_hash_table (info);
3432 if (htab == NULL)
3433 return;
3434
3435 base = htab->tls_module_base;
3436 if (base == NULL)
3437 return;
3438
3439 base->u.def.value = htab->elf.tls_size;
3440 }
3441
3442 /* Return the base VMA address which should be subtracted from real addresses
3443 when resolving @dtpoff relocation.
3444 This is PT_TLS segment p_vaddr. */
3445
3446 static bfd_vma
3447 elf_x86_64_dtpoff_base (struct bfd_link_info *info)
3448 {
3449 /* If tls_sec is NULL, we should have signalled an error already. */
3450 if (elf_hash_table (info)->tls_sec == NULL)
3451 return 0;
3452 return elf_hash_table (info)->tls_sec->vma;
3453 }
3454
3455 /* Return the relocation value for @tpoff relocation
3456 if STT_TLS virtual address is ADDRESS. */
3457
3458 static bfd_vma
3459 elf_x86_64_tpoff (struct bfd_link_info *info, bfd_vma address)
3460 {
3461 struct elf_link_hash_table *htab = elf_hash_table (info);
3462 const struct elf_backend_data *bed = get_elf_backend_data (info->output_bfd);
3463 bfd_vma static_tls_size;
3464
3465 /* If tls_segment is NULL, we should have signalled an error already. */
3466 if (htab->tls_sec == NULL)
3467 return 0;
3468
3469 /* Consider special static TLS alignment requirements. */
3470 static_tls_size = BFD_ALIGN (htab->tls_size, bed->static_tls_alignment);
3471 return address - static_tls_size - htab->tls_sec->vma;
3472 }
3473
3474 /* Is the instruction before OFFSET in CONTENTS a 32bit relative
3475 branch? */
3476
3477 static bfd_boolean
3478 is_32bit_relative_branch (bfd_byte *contents, bfd_vma offset)
3479 {
3480 /* Opcode Instruction
3481 0xe8 call
3482 0xe9 jump
3483 0x0f 0x8x conditional jump */
3484 return ((offset > 0
3485 && (contents [offset - 1] == 0xe8
3486 || contents [offset - 1] == 0xe9))
3487 || (offset > 1
3488 && contents [offset - 2] == 0x0f
3489 && (contents [offset - 1] & 0xf0) == 0x80));
3490 }
3491
3492 /* Relocate an x86_64 ELF section. */
3493
3494 static bfd_boolean
3495 elf_x86_64_relocate_section (bfd *output_bfd,
3496 struct bfd_link_info *info,
3497 bfd *input_bfd,
3498 asection *input_section,
3499 bfd_byte *contents,
3500 Elf_Internal_Rela *relocs,
3501 Elf_Internal_Sym *local_syms,
3502 asection **local_sections)
3503 {
3504 struct elf_x86_64_link_hash_table *htab;
3505 Elf_Internal_Shdr *symtab_hdr;
3506 struct elf_link_hash_entry **sym_hashes;
3507 bfd_vma *local_got_offsets;
3508 bfd_vma *local_tlsdesc_gotents;
3509 Elf_Internal_Rela *rel;
3510 Elf_Internal_Rela *relend;
3511 const unsigned int plt_entry_size = GET_PLT_ENTRY_SIZE (info->output_bfd);
3512
3513 BFD_ASSERT (is_x86_64_elf (input_bfd));
3514
3515 htab = elf_x86_64_hash_table (info);
3516 if (htab == NULL)
3517 return FALSE;
3518 symtab_hdr = &elf_symtab_hdr (input_bfd);
3519 sym_hashes = elf_sym_hashes (input_bfd);
3520 local_got_offsets = elf_local_got_offsets (input_bfd);
3521 local_tlsdesc_gotents = elf_x86_64_local_tlsdesc_gotent (input_bfd);
3522
3523 elf_x86_64_set_tls_module_base (info);
3524
3525 rel = relocs;
3526 relend = relocs + input_section->reloc_count;
3527 for (; rel < relend; rel++)
3528 {
3529 unsigned int r_type;
3530 reloc_howto_type *howto;
3531 unsigned long r_symndx;
3532 struct elf_link_hash_entry *h;
3533 struct elf_x86_64_link_hash_entry *eh;
3534 Elf_Internal_Sym *sym;
3535 asection *sec;
3536 bfd_vma off, offplt, plt_offset;
3537 bfd_vma relocation;
3538 bfd_boolean unresolved_reloc;
3539 bfd_reloc_status_type r;
3540 int tls_type;
3541 asection *base_got, *resolved_plt;
3542 bfd_vma st_size;
3543
3544 r_type = ELF32_R_TYPE (rel->r_info);
3545 if (r_type == (int) R_X86_64_GNU_VTINHERIT
3546 || r_type == (int) R_X86_64_GNU_VTENTRY)
3547 continue;
3548
3549 if (r_type >= (int) R_X86_64_standard)
3550 {
3551 (*_bfd_error_handler)
3552 (_("%B: unrecognized relocation (0x%x) in section `%A'"),
3553 input_bfd, input_section, r_type);
3554 bfd_set_error (bfd_error_bad_value);
3555 return FALSE;
3556 }
3557
3558 if (r_type != (int) R_X86_64_32
3559 || ABI_64_P (output_bfd))
3560 howto = x86_64_elf_howto_table + r_type;
3561 else
3562 howto = (x86_64_elf_howto_table
3563 + ARRAY_SIZE (x86_64_elf_howto_table) - 1);
3564 r_symndx = htab->r_sym (rel->r_info);
3565 h = NULL;
3566 sym = NULL;
3567 sec = NULL;
3568 unresolved_reloc = FALSE;
3569 if (r_symndx < symtab_hdr->sh_info)
3570 {
3571 sym = local_syms + r_symndx;
3572 sec = local_sections[r_symndx];
3573
3574 relocation = _bfd_elf_rela_local_sym (output_bfd, sym,
3575 &sec, rel);
3576 st_size = sym->st_size;
3577
3578 /* Relocate against local STT_GNU_IFUNC symbol. */
3579 if (!info->relocatable
3580 && ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC)
3581 {
3582 h = elf_x86_64_get_local_sym_hash (htab, input_bfd,
3583 rel, FALSE);
3584 if (h == NULL)
3585 abort ();
3586
3587 /* Set STT_GNU_IFUNC symbol value. */
3588 h->root.u.def.value = sym->st_value;
3589 h->root.u.def.section = sec;
3590 }
3591 }
3592 else
3593 {
3594 bfd_boolean warned ATTRIBUTE_UNUSED;
3595 bfd_boolean ignored ATTRIBUTE_UNUSED;
3596
3597 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
3598 r_symndx, symtab_hdr, sym_hashes,
3599 h, sec, relocation,
3600 unresolved_reloc, warned, ignored);
3601 st_size = h->size;
3602 }
3603
3604 if (sec != NULL && discarded_section (sec))
3605 RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section,
3606 rel, 1, relend, howto, 0, contents);
3607
3608 if (info->relocatable)
3609 continue;
3610
3611 if (rel->r_addend == 0 && !ABI_64_P (output_bfd))
3612 {
3613 if (r_type == R_X86_64_64)
3614 {
3615 /* For x32, treat R_X86_64_64 like R_X86_64_32 and
3616 zero-extend it to 64bit if addend is zero. */
3617 r_type = R_X86_64_32;
3618 memset (contents + rel->r_offset + 4, 0, 4);
3619 }
3620 else if (r_type == R_X86_64_SIZE64)
3621 {
3622 /* For x32, treat R_X86_64_SIZE64 like R_X86_64_SIZE32 and
3623 zero-extend it to 64bit if addend is zero. */
3624 r_type = R_X86_64_SIZE32;
3625 memset (contents + rel->r_offset + 4, 0, 4);
3626 }
3627 }
3628
3629 eh = (struct elf_x86_64_link_hash_entry *) h;
3630
3631 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle
3632 it here if it is defined in a non-shared object. */
3633 if (h != NULL
3634 && h->type == STT_GNU_IFUNC
3635 && h->def_regular)
3636 {
3637 bfd_vma plt_index;
3638 const char *name;
3639
3640 if ((input_section->flags & SEC_ALLOC) == 0
3641 || h->plt.offset == (bfd_vma) -1)
3642 abort ();
3643
3644 /* STT_GNU_IFUNC symbol must go through PLT. */
3645 if (htab->elf.splt != NULL)
3646 {
3647 if (htab->plt_bnd != NULL)
3648 {
3649 resolved_plt = htab->plt_bnd;
3650 plt_offset = eh->plt_bnd.offset;
3651 }
3652 else
3653 {
3654 resolved_plt = htab->elf.splt;
3655 plt_offset = h->plt.offset;
3656 }
3657 }
3658 else
3659 {
3660 resolved_plt = htab->elf.iplt;
3661 plt_offset = h->plt.offset;
3662 }
3663
3664 relocation = (resolved_plt->output_section->vma
3665 + resolved_plt->output_offset + plt_offset);
3666
3667 switch (r_type)
3668 {
3669 default:
3670 if (h->root.root.string)
3671 name = h->root.root.string;
3672 else
3673 name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym,
3674 NULL);
3675 (*_bfd_error_handler)
3676 (_("%B: relocation %s against STT_GNU_IFUNC "
3677 "symbol `%s' isn't handled by %s"), input_bfd,
3678 x86_64_elf_howto_table[r_type].name,
3679 name, __FUNCTION__);
3680 bfd_set_error (bfd_error_bad_value);
3681 return FALSE;
3682
3683 case R_X86_64_32S:
3684 if (info->shared)
3685 abort ();
3686 goto do_relocation;
3687
3688 case R_X86_64_32:
3689 if (ABI_64_P (output_bfd))
3690 goto do_relocation;
3691 /* FALLTHROUGH */
3692 case R_X86_64_64:
3693 if (rel->r_addend != 0)
3694 {
3695 if (h->root.root.string)
3696 name = h->root.root.string;
3697 else
3698 name = bfd_elf_sym_name (input_bfd, symtab_hdr,
3699 sym, NULL);
3700 (*_bfd_error_handler)
3701 (_("%B: relocation %s against STT_GNU_IFUNC "
3702 "symbol `%s' has non-zero addend: %d"),
3703 input_bfd, x86_64_elf_howto_table[r_type].name,
3704 name, rel->r_addend);
3705 bfd_set_error (bfd_error_bad_value);
3706 return FALSE;
3707 }
3708
3709 /* Generate dynamic relcoation only when there is a
3710 non-GOT reference in a shared object. */
3711 if (info->shared && h->non_got_ref)
3712 {
3713 Elf_Internal_Rela outrel;
3714 asection *sreloc;
3715
3716 /* Need a dynamic relocation to get the real function
3717 address. */
3718 outrel.r_offset = _bfd_elf_section_offset (output_bfd,
3719 info,
3720 input_section,
3721 rel->r_offset);
3722 if (outrel.r_offset == (bfd_vma) -1
3723 || outrel.r_offset == (bfd_vma) -2)
3724 abort ();
3725
3726 outrel.r_offset += (input_section->output_section->vma
3727 + input_section->output_offset);
3728
3729 if (h->dynindx == -1
3730 || h->forced_local
3731 || info->executable)
3732 {
3733 /* This symbol is resolved locally. */
3734 outrel.r_info = htab->r_info (0, R_X86_64_IRELATIVE);
3735 outrel.r_addend = (h->root.u.def.value
3736 + h->root.u.def.section->output_section->vma
3737 + h->root.u.def.section->output_offset);
3738 }
3739 else
3740 {
3741 outrel.r_info = htab->r_info (h->dynindx, r_type);
3742 outrel.r_addend = 0;
3743 }
3744
3745 sreloc = htab->elf.irelifunc;
3746 elf_append_rela (output_bfd, sreloc, &outrel);
3747
3748 /* If this reloc is against an external symbol, we
3749 do not want to fiddle with the addend. Otherwise,
3750 we need to include the symbol value so that it
3751 becomes an addend for the dynamic reloc. For an
3752 internal symbol, we have updated addend. */
3753 continue;
3754 }
3755 /* FALLTHROUGH */
3756 case R_X86_64_PC32:
3757 case R_X86_64_PC32_BND:
3758 case R_X86_64_PC64:
3759 case R_X86_64_PLT32:
3760 case R_X86_64_PLT32_BND:
3761 goto do_relocation;
3762
3763 case R_X86_64_GOTPCREL:
3764 case R_X86_64_GOTPCREL64:
3765 base_got = htab->elf.sgot;
3766 off = h->got.offset;
3767
3768 if (base_got == NULL)
3769 abort ();
3770
3771 if (off == (bfd_vma) -1)
3772 {
3773 /* We can't use h->got.offset here to save state, or
3774 even just remember the offset, as finish_dynamic_symbol
3775 would use that as offset into .got. */
3776
3777 if (htab->elf.splt != NULL)
3778 {
3779 plt_index = h->plt.offset / plt_entry_size - 1;
3780 off = (plt_index + 3) * GOT_ENTRY_SIZE;
3781 base_got = htab->elf.sgotplt;
3782 }
3783 else
3784 {
3785 plt_index = h->plt.offset / plt_entry_size;
3786 off = plt_index * GOT_ENTRY_SIZE;
3787 base_got = htab->elf.igotplt;
3788 }
3789
3790 if (h->dynindx == -1
3791 || h->forced_local
3792 || info->symbolic)
3793 {
3794 /* This references the local defitionion. We must
3795 initialize this entry in the global offset table.
3796 Since the offset must always be a multiple of 8,
3797 we use the least significant bit to record
3798 whether we have initialized it already.
3799
3800 When doing a dynamic link, we create a .rela.got
3801 relocation entry to initialize the value. This
3802 is done in the finish_dynamic_symbol routine. */
3803 if ((off & 1) != 0)
3804 off &= ~1;
3805 else
3806 {
3807 bfd_put_64 (output_bfd, relocation,
3808 base_got->contents + off);
3809 /* Note that this is harmless for the GOTPLT64
3810 case, as -1 | 1 still is -1. */
3811 h->got.offset |= 1;
3812 }
3813 }
3814 }
3815
3816 relocation = (base_got->output_section->vma
3817 + base_got->output_offset + off);
3818
3819 goto do_relocation;
3820 }
3821 }
3822
3823 /* When generating a shared object, the relocations handled here are
3824 copied into the output file to be resolved at run time. */
3825 switch (r_type)
3826 {
3827 case R_X86_64_GOT32:
3828 case R_X86_64_GOT64:
3829 /* Relocation is to the entry for this symbol in the global
3830 offset table. */
3831 case R_X86_64_GOTPCREL:
3832 case R_X86_64_GOTPCREL64:
3833 /* Use global offset table entry as symbol value. */
3834 case R_X86_64_GOTPLT64:
3835 /* This is obsolete and treated the the same as GOT64. */
3836 base_got = htab->elf.sgot;
3837
3838 if (htab->elf.sgot == NULL)
3839 abort ();
3840
3841 if (h != NULL)
3842 {
3843 bfd_boolean dyn;
3844
3845 off = h->got.offset;
3846 if (h->needs_plt
3847 && h->plt.offset != (bfd_vma)-1
3848 && off == (bfd_vma)-1)
3849 {
3850 /* We can't use h->got.offset here to save
3851 state, or even just remember the offset, as
3852 finish_dynamic_symbol would use that as offset into
3853 .got. */
3854 bfd_vma plt_index = h->plt.offset / plt_entry_size - 1;
3855 off = (plt_index + 3) * GOT_ENTRY_SIZE;
3856 base_got = htab->elf.sgotplt;
3857 }
3858
3859 dyn = htab->elf.dynamic_sections_created;
3860
3861 if (! WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h)
3862 || (info->shared
3863 && SYMBOL_REFERENCES_LOCAL (info, h))
3864 || (ELF_ST_VISIBILITY (h->other)
3865 && h->root.type == bfd_link_hash_undefweak))
3866 {
3867 /* This is actually a static link, or it is a -Bsymbolic
3868 link and the symbol is defined locally, or the symbol
3869 was forced to be local because of a version file. We
3870 must initialize this entry in the global offset table.
3871 Since the offset must always be a multiple of 8, we
3872 use the least significant bit to record whether we
3873 have initialized it already.
3874
3875 When doing a dynamic link, we create a .rela.got
3876 relocation entry to initialize the value. This is
3877 done in the finish_dynamic_symbol routine. */
3878 if ((off & 1) != 0)
3879 off &= ~1;
3880 else
3881 {
3882 bfd_put_64 (output_bfd, relocation,
3883 base_got->contents + off);
3884 /* Note that this is harmless for the GOTPLT64 case,
3885 as -1 | 1 still is -1. */
3886 h->got.offset |= 1;
3887 }
3888 }
3889 else
3890 unresolved_reloc = FALSE;
3891 }
3892 else
3893 {
3894 if (local_got_offsets == NULL)
3895 abort ();
3896
3897 off = local_got_offsets[r_symndx];
3898
3899 /* The offset must always be a multiple of 8. We use
3900 the least significant bit to record whether we have
3901 already generated the necessary reloc. */
3902 if ((off & 1) != 0)
3903 off &= ~1;
3904 else
3905 {
3906 bfd_put_64 (output_bfd, relocation,
3907 base_got->contents + off);
3908
3909 if (info->shared)
3910 {
3911 asection *s;
3912 Elf_Internal_Rela outrel;
3913
3914 /* We need to generate a R_X86_64_RELATIVE reloc
3915 for the dynamic linker. */
3916 s = htab->elf.srelgot;
3917 if (s == NULL)
3918 abort ();
3919
3920 outrel.r_offset = (base_got->output_section->vma
3921 + base_got->output_offset
3922 + off);
3923 outrel.r_info = htab->r_info (0, R_X86_64_RELATIVE);
3924 outrel.r_addend = relocation;
3925 elf_append_rela (output_bfd, s, &outrel);
3926 }
3927
3928 local_got_offsets[r_symndx] |= 1;
3929 }
3930 }
3931
3932 if (off >= (bfd_vma) -2)
3933 abort ();
3934
3935 relocation = base_got->output_section->vma
3936 + base_got->output_offset + off;
3937 if (r_type != R_X86_64_GOTPCREL && r_type != R_X86_64_GOTPCREL64)
3938 relocation -= htab->elf.sgotplt->output_section->vma
3939 - htab->elf.sgotplt->output_offset;
3940
3941 break;
3942
3943 case R_X86_64_GOTOFF64:
3944 /* Relocation is relative to the start of the global offset
3945 table. */
3946
3947 /* Check to make sure it isn't a protected function symbol
3948 for shared library since it may not be local when used
3949 as function address. */
3950 if (!info->executable
3951 && h
3952 && !SYMBOLIC_BIND (info, h)
3953 && h->def_regular
3954 && h->type == STT_FUNC
3955 && ELF_ST_VISIBILITY (h->other) == STV_PROTECTED)
3956 {
3957 (*_bfd_error_handler)
3958 (_("%B: relocation R_X86_64_GOTOFF64 against protected function `%s' can not be used when making a shared object"),
3959 input_bfd, h->root.root.string);
3960 bfd_set_error (bfd_error_bad_value);
3961 return FALSE;
3962 }
3963
3964 /* Note that sgot is not involved in this
3965 calculation. We always want the start of .got.plt. If we
3966 defined _GLOBAL_OFFSET_TABLE_ in a different way, as is
3967 permitted by the ABI, we might have to change this
3968 calculation. */
3969 relocation -= htab->elf.sgotplt->output_section->vma
3970 + htab->elf.sgotplt->output_offset;
3971 break;
3972
3973 case R_X86_64_GOTPC32:
3974 case R_X86_64_GOTPC64:
3975 /* Use global offset table as symbol value. */
3976 relocation = htab->elf.sgotplt->output_section->vma
3977 + htab->elf.sgotplt->output_offset;
3978 unresolved_reloc = FALSE;
3979 break;
3980
3981 case R_X86_64_PLTOFF64:
3982 /* Relocation is PLT entry relative to GOT. For local
3983 symbols it's the symbol itself relative to GOT. */
3984 if (h != NULL
3985 /* See PLT32 handling. */
3986 && h->plt.offset != (bfd_vma) -1
3987 && htab->elf.splt != NULL)
3988 {
3989 if (htab->plt_bnd != NULL)
3990 {
3991 resolved_plt = htab->plt_bnd;
3992 plt_offset = eh->plt_bnd.offset;
3993 }
3994 else
3995 {
3996 resolved_plt = htab->elf.splt;
3997 plt_offset = h->plt.offset;
3998 }
3999
4000 relocation = (resolved_plt->output_section->vma
4001 + resolved_plt->output_offset
4002 + plt_offset);
4003 unresolved_reloc = FALSE;
4004 }
4005
4006 relocation -= htab->elf.sgotplt->output_section->vma
4007 + htab->elf.sgotplt->output_offset;
4008 break;
4009
4010 case R_X86_64_PLT32:
4011 case R_X86_64_PLT32_BND:
4012 /* Relocation is to the entry for this symbol in the
4013 procedure linkage table. */
4014
4015 /* Resolve a PLT32 reloc against a local symbol directly,
4016 without using the procedure linkage table. */
4017 if (h == NULL)
4018 break;
4019
4020 if ((h->plt.offset == (bfd_vma) -1
4021 && eh->plt_got.offset == (bfd_vma) -1)
4022 || htab->elf.splt == NULL)
4023 {
4024 /* We didn't make a PLT entry for this symbol. This
4025 happens when statically linking PIC code, or when
4026 using -Bsymbolic. */
4027 break;
4028 }
4029
4030 if (h->plt.offset != (bfd_vma) -1)
4031 {
4032 if (htab->plt_bnd != NULL)
4033 {
4034 resolved_plt = htab->plt_bnd;
4035 plt_offset = eh->plt_bnd.offset;
4036 }
4037 else
4038 {
4039 resolved_plt = htab->elf.splt;
4040 plt_offset = h->plt.offset;
4041 }
4042 }
4043 else
4044 {
4045 /* Use the GOT PLT. */
4046 resolved_plt = htab->plt_got;
4047 plt_offset = eh->plt_got.offset;
4048 }
4049
4050 relocation = (resolved_plt->output_section->vma
4051 + resolved_plt->output_offset
4052 + plt_offset);
4053 unresolved_reloc = FALSE;
4054 break;
4055
4056 case R_X86_64_SIZE32:
4057 case R_X86_64_SIZE64:
4058 /* Set to symbol size. */
4059 relocation = st_size;
4060 goto direct;
4061
4062 case R_X86_64_PC8:
4063 case R_X86_64_PC16:
4064 case R_X86_64_PC32:
4065 case R_X86_64_PC32_BND:
4066 /* Don't complain about -fPIC if the symbol is undefined when
4067 building executable. */
4068 if (info->shared
4069 && (input_section->flags & SEC_ALLOC) != 0
4070 && (input_section->flags & SEC_READONLY) != 0
4071 && h != NULL
4072 && !(info->executable
4073 && h->root.type == bfd_link_hash_undefined))
4074 {
4075 bfd_boolean fail = FALSE;
4076 bfd_boolean branch
4077 = ((r_type == R_X86_64_PC32
4078 || r_type == R_X86_64_PC32_BND)
4079 && is_32bit_relative_branch (contents, rel->r_offset));
4080
4081 if (SYMBOL_REFERENCES_LOCAL (info, h))
4082 {
4083 /* Symbol is referenced locally. Make sure it is
4084 defined locally or for a branch. */
4085 fail = !h->def_regular && !branch;
4086 }
4087 else if (!(info->executable
4088 && (h->needs_copy || eh->needs_copy)))
4089 {
4090 /* Symbol doesn't need copy reloc and isn't referenced
4091 locally. We only allow branch to symbol with
4092 non-default visibility. */
4093 fail = (!branch
4094 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT);
4095 }
4096
4097 if (fail)
4098 {
4099 const char *fmt;
4100 const char *v;
4101 const char *pic = "";
4102
4103 switch (ELF_ST_VISIBILITY (h->other))
4104 {
4105 case STV_HIDDEN:
4106 v = _("hidden symbol");
4107 break;
4108 case STV_INTERNAL:
4109 v = _("internal symbol");
4110 break;
4111 case STV_PROTECTED:
4112 v = _("protected symbol");
4113 break;
4114 default:
4115 v = _("symbol");
4116 pic = _("; recompile with -fPIC");
4117 break;
4118 }
4119
4120 if (h->def_regular)
4121 fmt = _("%B: relocation %s against %s `%s' can not be used when making a shared object%s");
4122 else
4123 fmt = _("%B: relocation %s against undefined %s `%s' can not be used when making a shared object%s");
4124
4125 (*_bfd_error_handler) (fmt, input_bfd,
4126 x86_64_elf_howto_table[r_type].name,
4127 v, h->root.root.string, pic);
4128 bfd_set_error (bfd_error_bad_value);
4129 return FALSE;
4130 }
4131 }
4132 /* Fall through. */
4133
4134 case R_X86_64_8:
4135 case R_X86_64_16:
4136 case R_X86_64_32:
4137 case R_X86_64_PC64:
4138 case R_X86_64_64:
4139 /* FIXME: The ABI says the linker should make sure the value is
4140 the same when it's zeroextended to 64 bit. */
4141
4142 direct:
4143 if ((input_section->flags & SEC_ALLOC) == 0)
4144 break;
4145
4146 /* Don't copy a pc-relative relocation into the output file
4147 if the symbol needs copy reloc or the symbol is undefined
4148 when building executable. */
4149 if ((info->shared
4150 && !(info->executable
4151 && h != NULL
4152 && (h->needs_copy
4153 || eh->needs_copy
4154 || h->root.type == bfd_link_hash_undefined)
4155 && IS_X86_64_PCREL_TYPE (r_type))
4156 && (h == NULL
4157 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
4158 || h->root.type != bfd_link_hash_undefweak)
4159 && ((! IS_X86_64_PCREL_TYPE (r_type)
4160 && r_type != R_X86_64_SIZE32
4161 && r_type != R_X86_64_SIZE64)
4162 || ! SYMBOL_CALLS_LOCAL (info, h)))
4163 || (ELIMINATE_COPY_RELOCS
4164 && !info->shared
4165 && h != NULL
4166 && h->dynindx != -1
4167 && !h->non_got_ref
4168 && ((h->def_dynamic
4169 && !h->def_regular)
4170 || h->root.type == bfd_link_hash_undefweak
4171 || h->root.type == bfd_link_hash_undefined)))
4172 {
4173 Elf_Internal_Rela outrel;
4174 bfd_boolean skip, relocate;
4175 asection *sreloc;
4176
4177 /* When generating a shared object, these relocations
4178 are copied into the output file to be resolved at run
4179 time. */
4180 skip = FALSE;
4181 relocate = FALSE;
4182
4183 outrel.r_offset =
4184 _bfd_elf_section_offset (output_bfd, info, input_section,
4185 rel->r_offset);
4186 if (outrel.r_offset == (bfd_vma) -1)
4187 skip = TRUE;
4188 else if (outrel.r_offset == (bfd_vma) -2)
4189 skip = TRUE, relocate = TRUE;
4190
4191 outrel.r_offset += (input_section->output_section->vma
4192 + input_section->output_offset);
4193
4194 if (skip)
4195 memset (&outrel, 0, sizeof outrel);
4196
4197 /* h->dynindx may be -1 if this symbol was marked to
4198 become local. */
4199 else if (h != NULL
4200 && h->dynindx != -1
4201 && (IS_X86_64_PCREL_TYPE (r_type)
4202 || ! info->shared
4203 || ! SYMBOLIC_BIND (info, h)
4204 || ! h->def_regular))
4205 {
4206 outrel.r_info = htab->r_info (h->dynindx, r_type);
4207 outrel.r_addend = rel->r_addend;
4208 }
4209 else
4210 {
4211 /* This symbol is local, or marked to become local. */
4212 if (r_type == htab->pointer_r_type)
4213 {
4214 relocate = TRUE;
4215 outrel.r_info = htab->r_info (0, R_X86_64_RELATIVE);
4216 outrel.r_addend = relocation + rel->r_addend;
4217 }
4218 else if (r_type == R_X86_64_64
4219 && !ABI_64_P (output_bfd))
4220 {
4221 relocate = TRUE;
4222 outrel.r_info = htab->r_info (0,
4223 R_X86_64_RELATIVE64);
4224 outrel.r_addend = relocation + rel->r_addend;
4225 /* Check addend overflow. */
4226 if ((outrel.r_addend & 0x80000000)
4227 != (rel->r_addend & 0x80000000))
4228 {
4229 const char *name;
4230 int addend = rel->r_addend;
4231 if (h && h->root.root.string)
4232 name = h->root.root.string;
4233 else
4234 name = bfd_elf_sym_name (input_bfd, symtab_hdr,
4235 sym, NULL);
4236 if (addend < 0)
4237 (*_bfd_error_handler)
4238 (_("%B: addend -0x%x in relocation %s against "
4239 "symbol `%s' at 0x%lx in section `%A' is "
4240 "out of range"),
4241 input_bfd, input_section, addend,
4242 x86_64_elf_howto_table[r_type].name,
4243 name, (unsigned long) rel->r_offset);
4244 else
4245 (*_bfd_error_handler)
4246 (_("%B: addend 0x%x in relocation %s against "
4247 "symbol `%s' at 0x%lx in section `%A' is "
4248 "out of range"),
4249 input_bfd, input_section, addend,
4250 x86_64_elf_howto_table[r_type].name,
4251 name, (unsigned long) rel->r_offset);
4252 bfd_set_error (bfd_error_bad_value);
4253 return FALSE;
4254 }
4255 }
4256 else
4257 {
4258 long sindx;
4259
4260 if (bfd_is_abs_section (sec))
4261 sindx = 0;
4262 else if (sec == NULL || sec->owner == NULL)
4263 {
4264 bfd_set_error (bfd_error_bad_value);
4265 return FALSE;
4266 }
4267 else
4268 {
4269 asection *osec;
4270
4271 /* We are turning this relocation into one
4272 against a section symbol. It would be
4273 proper to subtract the symbol's value,
4274 osec->vma, from the emitted reloc addend,
4275 but ld.so expects buggy relocs. */
4276 osec = sec->output_section;
4277 sindx = elf_section_data (osec)->dynindx;
4278 if (sindx == 0)
4279 {
4280 asection *oi = htab->elf.text_index_section;
4281 sindx = elf_section_data (oi)->dynindx;
4282 }
4283 BFD_ASSERT (sindx != 0);
4284 }
4285
4286 outrel.r_info = htab->r_info (sindx, r_type);
4287 outrel.r_addend = relocation + rel->r_addend;
4288 }
4289 }
4290
4291 sreloc = elf_section_data (input_section)->sreloc;
4292
4293 if (sreloc == NULL || sreloc->contents == NULL)
4294 {
4295 r = bfd_reloc_notsupported;
4296 goto check_relocation_error;
4297 }
4298
4299 elf_append_rela (output_bfd, sreloc, &outrel);
4300
4301 /* If this reloc is against an external symbol, we do
4302 not want to fiddle with the addend. Otherwise, we
4303 need to include the symbol value so that it becomes
4304 an addend for the dynamic reloc. */
4305 if (! relocate)
4306 continue;
4307 }
4308
4309 break;
4310
4311 case R_X86_64_TLSGD:
4312 case R_X86_64_GOTPC32_TLSDESC:
4313 case R_X86_64_TLSDESC_CALL:
4314 case R_X86_64_GOTTPOFF:
4315 tls_type = GOT_UNKNOWN;
4316 if (h == NULL && local_got_offsets)
4317 tls_type = elf_x86_64_local_got_tls_type (input_bfd) [r_symndx];
4318 else if (h != NULL)
4319 tls_type = elf_x86_64_hash_entry (h)->tls_type;
4320
4321 if (! elf_x86_64_tls_transition (info, input_bfd,
4322 input_section, contents,
4323 symtab_hdr, sym_hashes,
4324 &r_type, tls_type, rel,
4325 relend, h, r_symndx))
4326 return FALSE;
4327
4328 if (r_type == R_X86_64_TPOFF32)
4329 {
4330 bfd_vma roff = rel->r_offset;
4331
4332 BFD_ASSERT (! unresolved_reloc);
4333
4334 if (ELF32_R_TYPE (rel->r_info) == R_X86_64_TLSGD)
4335 {
4336 /* GD->LE transition. For 64bit, change
4337 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
4338 .word 0x6666; rex64; call __tls_get_addr
4339 into:
4340 movq %fs:0, %rax
4341 leaq foo@tpoff(%rax), %rax
4342 For 32bit, change
4343 leaq foo@tlsgd(%rip), %rdi
4344 .word 0x6666; rex64; call __tls_get_addr
4345 into:
4346 movl %fs:0, %eax
4347 leaq foo@tpoff(%rax), %rax
4348 For largepic, change:
4349 leaq foo@tlsgd(%rip), %rdi
4350 movabsq $__tls_get_addr@pltoff, %rax
4351 addq %rbx, %rax
4352 call *%rax
4353 into:
4354 movq %fs:0, %rax
4355 leaq foo@tpoff(%rax), %rax
4356 nopw 0x0(%rax,%rax,1) */
4357 int largepic = 0;
4358 if (ABI_64_P (output_bfd)
4359 && contents[roff + 5] == (bfd_byte) '\xb8')
4360 {
4361 memcpy (contents + roff - 3,
4362 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x8d\x80"
4363 "\0\0\0\0\x66\x0f\x1f\x44\0", 22);
4364 largepic = 1;
4365 }
4366 else if (ABI_64_P (output_bfd))
4367 memcpy (contents + roff - 4,
4368 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x8d\x80\0\0\0",
4369 16);
4370 else
4371 memcpy (contents + roff - 3,
4372 "\x64\x8b\x04\x25\0\0\0\0\x48\x8d\x80\0\0\0",
4373 15);
4374 bfd_put_32 (output_bfd,
4375 elf_x86_64_tpoff (info, relocation),
4376 contents + roff + 8 + largepic);
4377 /* Skip R_X86_64_PC32/R_X86_64_PLT32/R_X86_64_PLTOFF64. */
4378 rel++;
4379 continue;
4380 }
4381 else if (ELF32_R_TYPE (rel->r_info) == R_X86_64_GOTPC32_TLSDESC)
4382 {
4383 /* GDesc -> LE transition.
4384 It's originally something like:
4385 leaq x@tlsdesc(%rip), %rax
4386
4387 Change it to:
4388 movl $x@tpoff, %rax. */
4389
4390 unsigned int val, type;
4391
4392 type = bfd_get_8 (input_bfd, contents + roff - 3);
4393 val = bfd_get_8 (input_bfd, contents + roff - 1);
4394 bfd_put_8 (output_bfd, 0x48 | ((type >> 2) & 1),
4395 contents + roff - 3);
4396 bfd_put_8 (output_bfd, 0xc7, contents + roff - 2);
4397 bfd_put_8 (output_bfd, 0xc0 | ((val >> 3) & 7),
4398 contents + roff - 1);
4399 bfd_put_32 (output_bfd,
4400 elf_x86_64_tpoff (info, relocation),
4401 contents + roff);
4402 continue;
4403 }
4404 else if (ELF32_R_TYPE (rel->r_info) == R_X86_64_TLSDESC_CALL)
4405 {
4406 /* GDesc -> LE transition.
4407 It's originally:
4408 call *(%rax)
4409 Turn it into:
4410 xchg %ax,%ax. */
4411 bfd_put_8 (output_bfd, 0x66, contents + roff);
4412 bfd_put_8 (output_bfd, 0x90, contents + roff + 1);
4413 continue;
4414 }
4415 else if (ELF32_R_TYPE (rel->r_info) == R_X86_64_GOTTPOFF)
4416 {
4417 /* IE->LE transition:
4418 For 64bit, originally it can be one of:
4419 movq foo@gottpoff(%rip), %reg
4420 addq foo@gottpoff(%rip), %reg
4421 We change it into:
4422 movq $foo, %reg
4423 leaq foo(%reg), %reg
4424 addq $foo, %reg.
4425 For 32bit, originally it can be one of:
4426 movq foo@gottpoff(%rip), %reg
4427 addl foo@gottpoff(%rip), %reg
4428 We change it into:
4429 movq $foo, %reg
4430 leal foo(%reg), %reg
4431 addl $foo, %reg. */
4432
4433 unsigned int val, type, reg;
4434
4435 if (roff >= 3)
4436 val = bfd_get_8 (input_bfd, contents + roff - 3);
4437 else
4438 val = 0;
4439 type = bfd_get_8 (input_bfd, contents + roff - 2);
4440 reg = bfd_get_8 (input_bfd, contents + roff - 1);
4441 reg >>= 3;
4442 if (type == 0x8b)
4443 {
4444 /* movq */
4445 if (val == 0x4c)
4446 bfd_put_8 (output_bfd, 0x49,
4447 contents + roff - 3);
4448 else if (!ABI_64_P (output_bfd) && val == 0x44)
4449 bfd_put_8 (output_bfd, 0x41,
4450 contents + roff - 3);
4451 bfd_put_8 (output_bfd, 0xc7,
4452 contents + roff - 2);
4453 bfd_put_8 (output_bfd, 0xc0 | reg,
4454 contents + roff - 1);
4455 }
4456 else if (reg == 4)
4457 {
4458 /* addq/addl -> addq/addl - addressing with %rsp/%r12
4459 is special */
4460 if (val == 0x4c)
4461 bfd_put_8 (output_bfd, 0x49,
4462 contents + roff - 3);
4463 else if (!ABI_64_P (output_bfd) && val == 0x44)
4464 bfd_put_8 (output_bfd, 0x41,
4465 contents + roff - 3);
4466 bfd_put_8 (output_bfd, 0x81,
4467 contents + roff - 2);
4468 bfd_put_8 (output_bfd, 0xc0 | reg,
4469 contents + roff - 1);
4470 }
4471 else
4472 {
4473 /* addq/addl -> leaq/leal */
4474 if (val == 0x4c)
4475 bfd_put_8 (output_bfd, 0x4d,
4476 contents + roff - 3);
4477 else if (!ABI_64_P (output_bfd) && val == 0x44)
4478 bfd_put_8 (output_bfd, 0x45,
4479 contents + roff - 3);
4480 bfd_put_8 (output_bfd, 0x8d,
4481 contents + roff - 2);
4482 bfd_put_8 (output_bfd, 0x80 | reg | (reg << 3),
4483 contents + roff - 1);
4484 }
4485 bfd_put_32 (output_bfd,
4486 elf_x86_64_tpoff (info, relocation),
4487 contents + roff);
4488 continue;
4489 }
4490 else
4491 BFD_ASSERT (FALSE);
4492 }
4493
4494 if (htab->elf.sgot == NULL)
4495 abort ();
4496
4497 if (h != NULL)
4498 {
4499 off = h->got.offset;
4500 offplt = elf_x86_64_hash_entry (h)->tlsdesc_got;
4501 }
4502 else
4503 {
4504 if (local_got_offsets == NULL)
4505 abort ();
4506
4507 off = local_got_offsets[r_symndx];
4508 offplt = local_tlsdesc_gotents[r_symndx];
4509 }
4510
4511 if ((off & 1) != 0)
4512 off &= ~1;
4513 else
4514 {
4515 Elf_Internal_Rela outrel;
4516 int dr_type, indx;
4517 asection *sreloc;
4518
4519 if (htab->elf.srelgot == NULL)
4520 abort ();
4521
4522 indx = h && h->dynindx != -1 ? h->dynindx : 0;
4523
4524 if (GOT_TLS_GDESC_P (tls_type))
4525 {
4526 outrel.r_info = htab->r_info (indx, R_X86_64_TLSDESC);
4527 BFD_ASSERT (htab->sgotplt_jump_table_size + offplt
4528 + 2 * GOT_ENTRY_SIZE <= htab->elf.sgotplt->size);
4529 outrel.r_offset = (htab->elf.sgotplt->output_section->vma
4530 + htab->elf.sgotplt->output_offset
4531 + offplt
4532 + htab->sgotplt_jump_table_size);
4533 sreloc = htab->elf.srelplt;
4534 if (indx == 0)
4535 outrel.r_addend = relocation - elf_x86_64_dtpoff_base (info);
4536 else
4537 outrel.r_addend = 0;
4538 elf_append_rela (output_bfd, sreloc, &outrel);
4539 }
4540
4541 sreloc = htab->elf.srelgot;
4542
4543 outrel.r_offset = (htab->elf.sgot->output_section->vma
4544 + htab->elf.sgot->output_offset + off);
4545
4546 if (GOT_TLS_GD_P (tls_type))
4547 dr_type = R_X86_64_DTPMOD64;
4548 else if (GOT_TLS_GDESC_P (tls_type))
4549 goto dr_done;
4550 else
4551 dr_type = R_X86_64_TPOFF64;
4552
4553 bfd_put_64 (output_bfd, 0, htab->elf.sgot->contents + off);
4554 outrel.r_addend = 0;
4555 if ((dr_type == R_X86_64_TPOFF64
4556 || dr_type == R_X86_64_TLSDESC) && indx == 0)
4557 outrel.r_addend = relocation - elf_x86_64_dtpoff_base (info);
4558 outrel.r_info = htab->r_info (indx, dr_type);
4559
4560 elf_append_rela (output_bfd, sreloc, &outrel);
4561
4562 if (GOT_TLS_GD_P (tls_type))
4563 {
4564 if (indx == 0)
4565 {
4566 BFD_ASSERT (! unresolved_reloc);
4567 bfd_put_64 (output_bfd,
4568 relocation - elf_x86_64_dtpoff_base (info),
4569 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
4570 }
4571 else
4572 {
4573 bfd_put_64 (output_bfd, 0,
4574 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
4575 outrel.r_info = htab->r_info (indx,
4576 R_X86_64_DTPOFF64);
4577 outrel.r_offset += GOT_ENTRY_SIZE;
4578 elf_append_rela (output_bfd, sreloc,
4579 &outrel);
4580 }
4581 }
4582
4583 dr_done:
4584 if (h != NULL)
4585 h->got.offset |= 1;
4586 else
4587 local_got_offsets[r_symndx] |= 1;
4588 }
4589
4590 if (off >= (bfd_vma) -2
4591 && ! GOT_TLS_GDESC_P (tls_type))
4592 abort ();
4593 if (r_type == ELF32_R_TYPE (rel->r_info))
4594 {
4595 if (r_type == R_X86_64_GOTPC32_TLSDESC
4596 || r_type == R_X86_64_TLSDESC_CALL)
4597 relocation = htab->elf.sgotplt->output_section->vma
4598 + htab->elf.sgotplt->output_offset
4599 + offplt + htab->sgotplt_jump_table_size;
4600 else
4601 relocation = htab->elf.sgot->output_section->vma
4602 + htab->elf.sgot->output_offset + off;
4603 unresolved_reloc = FALSE;
4604 }
4605 else
4606 {
4607 bfd_vma roff = rel->r_offset;
4608
4609 if (ELF32_R_TYPE (rel->r_info) == R_X86_64_TLSGD)
4610 {
4611 /* GD->IE transition. For 64bit, change
4612 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
4613 .word 0x6666; rex64; call __tls_get_addr@plt
4614 into:
4615 movq %fs:0, %rax
4616 addq foo@gottpoff(%rip), %rax
4617 For 32bit, change
4618 leaq foo@tlsgd(%rip), %rdi
4619 .word 0x6666; rex64; call __tls_get_addr@plt
4620 into:
4621 movl %fs:0, %eax
4622 addq foo@gottpoff(%rip), %rax
4623 For largepic, change:
4624 leaq foo@tlsgd(%rip), %rdi
4625 movabsq $__tls_get_addr@pltoff, %rax
4626 addq %rbx, %rax
4627 call *%rax
4628 into:
4629 movq %fs:0, %rax
4630 addq foo@gottpoff(%rax), %rax
4631 nopw 0x0(%rax,%rax,1) */
4632 int largepic = 0;
4633 if (ABI_64_P (output_bfd)
4634 && contents[roff + 5] == (bfd_byte) '\xb8')
4635 {
4636 memcpy (contents + roff - 3,
4637 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x03\x05"
4638 "\0\0\0\0\x66\x0f\x1f\x44\0", 22);
4639 largepic = 1;
4640 }
4641 else if (ABI_64_P (output_bfd))
4642 memcpy (contents + roff - 4,
4643 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x03\x05\0\0\0",
4644 16);
4645 else
4646 memcpy (contents + roff - 3,
4647 "\x64\x8b\x04\x25\0\0\0\0\x48\x03\x05\0\0\0",
4648 15);
4649
4650 relocation = (htab->elf.sgot->output_section->vma
4651 + htab->elf.sgot->output_offset + off
4652 - roff
4653 - largepic
4654 - input_section->output_section->vma
4655 - input_section->output_offset
4656 - 12);
4657 bfd_put_32 (output_bfd, relocation,
4658 contents + roff + 8 + largepic);
4659 /* Skip R_X86_64_PLT32/R_X86_64_PLTOFF64. */
4660 rel++;
4661 continue;
4662 }
4663 else if (ELF32_R_TYPE (rel->r_info) == R_X86_64_GOTPC32_TLSDESC)
4664 {
4665 /* GDesc -> IE transition.
4666 It's originally something like:
4667 leaq x@tlsdesc(%rip), %rax
4668
4669 Change it to:
4670 movq x@gottpoff(%rip), %rax # before xchg %ax,%ax. */
4671
4672 /* Now modify the instruction as appropriate. To
4673 turn a leaq into a movq in the form we use it, it
4674 suffices to change the second byte from 0x8d to
4675 0x8b. */
4676 bfd_put_8 (output_bfd, 0x8b, contents + roff - 2);
4677
4678 bfd_put_32 (output_bfd,
4679 htab->elf.sgot->output_section->vma
4680 + htab->elf.sgot->output_offset + off
4681 - rel->r_offset
4682 - input_section->output_section->vma
4683 - input_section->output_offset
4684 - 4,
4685 contents + roff);
4686 continue;
4687 }
4688 else if (ELF32_R_TYPE (rel->r_info) == R_X86_64_TLSDESC_CALL)
4689 {
4690 /* GDesc -> IE transition.
4691 It's originally:
4692 call *(%rax)
4693
4694 Change it to:
4695 xchg %ax, %ax. */
4696
4697 bfd_put_8 (output_bfd, 0x66, contents + roff);
4698 bfd_put_8 (output_bfd, 0x90, contents + roff + 1);
4699 continue;
4700 }
4701 else
4702 BFD_ASSERT (FALSE);
4703 }
4704 break;
4705
4706 case R_X86_64_TLSLD:
4707 if (! elf_x86_64_tls_transition (info, input_bfd,
4708 input_section, contents,
4709 symtab_hdr, sym_hashes,
4710 &r_type, GOT_UNKNOWN,
4711 rel, relend, h, r_symndx))
4712 return FALSE;
4713
4714 if (r_type != R_X86_64_TLSLD)
4715 {
4716 /* LD->LE transition:
4717 leaq foo@tlsld(%rip), %rdi; call __tls_get_addr.
4718 For 64bit, we change it into:
4719 .word 0x6666; .byte 0x66; movq %fs:0, %rax.
4720 For 32bit, we change it into:
4721 nopl 0x0(%rax); movl %fs:0, %eax.
4722 For largepic, change:
4723 leaq foo@tlsgd(%rip), %rdi
4724 movabsq $__tls_get_addr@pltoff, %rax
4725 addq %rbx, %rax
4726 call *%rax
4727 into:
4728 data32 data32 data32 nopw %cs:0x0(%rax,%rax,1)
4729 movq %fs:0, %eax */
4730
4731 BFD_ASSERT (r_type == R_X86_64_TPOFF32);
4732 if (ABI_64_P (output_bfd)
4733 && contents[rel->r_offset + 5] == (bfd_byte) '\xb8')
4734 memcpy (contents + rel->r_offset - 3,
4735 "\x66\x66\x66\x66\x2e\x0f\x1f\x84\0\0\0\0\0"
4736 "\x64\x48\x8b\x04\x25\0\0\0", 22);
4737 else if (ABI_64_P (output_bfd))
4738 memcpy (contents + rel->r_offset - 3,
4739 "\x66\x66\x66\x64\x48\x8b\x04\x25\0\0\0", 12);
4740 else
4741 memcpy (contents + rel->r_offset - 3,
4742 "\x0f\x1f\x40\x00\x64\x8b\x04\x25\0\0\0", 12);
4743 /* Skip R_X86_64_PC32/R_X86_64_PLT32/R_X86_64_PLTOFF64. */
4744 rel++;
4745 continue;
4746 }
4747
4748 if (htab->elf.sgot == NULL)
4749 abort ();
4750
4751 off = htab->tls_ld_got.offset;
4752 if (off & 1)
4753 off &= ~1;
4754 else
4755 {
4756 Elf_Internal_Rela outrel;
4757
4758 if (htab->elf.srelgot == NULL)
4759 abort ();
4760
4761 outrel.r_offset = (htab->elf.sgot->output_section->vma
4762 + htab->elf.sgot->output_offset + off);
4763
4764 bfd_put_64 (output_bfd, 0,
4765 htab->elf.sgot->contents + off);
4766 bfd_put_64 (output_bfd, 0,
4767 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
4768 outrel.r_info = htab->r_info (0, R_X86_64_DTPMOD64);
4769 outrel.r_addend = 0;
4770 elf_append_rela (output_bfd, htab->elf.srelgot,
4771 &outrel);
4772 htab->tls_ld_got.offset |= 1;
4773 }
4774 relocation = htab->elf.sgot->output_section->vma
4775 + htab->elf.sgot->output_offset + off;
4776 unresolved_reloc = FALSE;
4777 break;
4778
4779 case R_X86_64_DTPOFF32:
4780 if (!info->executable|| (input_section->flags & SEC_CODE) == 0)
4781 relocation -= elf_x86_64_dtpoff_base (info);
4782 else
4783 relocation = elf_x86_64_tpoff (info, relocation);
4784 break;
4785
4786 case R_X86_64_TPOFF32:
4787 case R_X86_64_TPOFF64:
4788 BFD_ASSERT (info->executable);
4789 relocation = elf_x86_64_tpoff (info, relocation);
4790 break;
4791
4792 case R_X86_64_DTPOFF64:
4793 BFD_ASSERT ((input_section->flags & SEC_CODE) == 0);
4794 relocation -= elf_x86_64_dtpoff_base (info);
4795 break;
4796
4797 default:
4798 break;
4799 }
4800
4801 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
4802 because such sections are not SEC_ALLOC and thus ld.so will
4803 not process them. */
4804 if (unresolved_reloc
4805 && !((input_section->flags & SEC_DEBUGGING) != 0
4806 && h->def_dynamic)
4807 && _bfd_elf_section_offset (output_bfd, info, input_section,
4808 rel->r_offset) != (bfd_vma) -1)
4809 {
4810 (*_bfd_error_handler)
4811 (_("%B(%A+0x%lx): unresolvable %s relocation against symbol `%s'"),
4812 input_bfd,
4813 input_section,
4814 (long) rel->r_offset,
4815 howto->name,
4816 h->root.root.string);
4817 return FALSE;
4818 }
4819
4820 do_relocation:
4821 r = _bfd_final_link_relocate (howto, input_bfd, input_section,
4822 contents, rel->r_offset,
4823 relocation, rel->r_addend);
4824
4825 check_relocation_error:
4826 if (r != bfd_reloc_ok)
4827 {
4828 const char *name;
4829
4830 if (h != NULL)
4831 name = h->root.root.string;
4832 else
4833 {
4834 name = bfd_elf_string_from_elf_section (input_bfd,
4835 symtab_hdr->sh_link,
4836 sym->st_name);
4837 if (name == NULL)
4838 return FALSE;
4839 if (*name == '\0')
4840 name = bfd_section_name (input_bfd, sec);
4841 }
4842
4843 if (r == bfd_reloc_overflow)
4844 {
4845 if (! ((*info->callbacks->reloc_overflow)
4846 (info, (h ? &h->root : NULL), name, howto->name,
4847 (bfd_vma) 0, input_bfd, input_section,
4848 rel->r_offset)))
4849 return FALSE;
4850 }
4851 else
4852 {
4853 (*_bfd_error_handler)
4854 (_("%B(%A+0x%lx): reloc against `%s': error %d"),
4855 input_bfd, input_section,
4856 (long) rel->r_offset, name, (int) r);
4857 return FALSE;
4858 }
4859 }
4860 }
4861
4862 return TRUE;
4863 }
4864
4865 /* Finish up dynamic symbol handling. We set the contents of various
4866 dynamic sections here. */
4867
4868 static bfd_boolean
4869 elf_x86_64_finish_dynamic_symbol (bfd *output_bfd,
4870 struct bfd_link_info *info,
4871 struct elf_link_hash_entry *h,
4872 Elf_Internal_Sym *sym ATTRIBUTE_UNUSED)
4873 {
4874 struct elf_x86_64_link_hash_table *htab;
4875 const struct elf_x86_64_backend_data *abed;
4876 bfd_boolean use_plt_bnd;
4877 struct elf_x86_64_link_hash_entry *eh;
4878
4879 htab = elf_x86_64_hash_table (info);
4880 if (htab == NULL)
4881 return FALSE;
4882
4883 /* Use MPX backend data in case of BND relocation. Use .plt_bnd
4884 section only if there is .plt section. */
4885 use_plt_bnd = htab->elf.splt != NULL && htab->plt_bnd != NULL;
4886 abed = (use_plt_bnd
4887 ? &elf_x86_64_bnd_arch_bed
4888 : get_elf_x86_64_backend_data (output_bfd));
4889
4890 eh = (struct elf_x86_64_link_hash_entry *) h;
4891
4892 if (h->plt.offset != (bfd_vma) -1)
4893 {
4894 bfd_vma plt_index;
4895 bfd_vma got_offset, plt_offset, plt_plt_offset, plt_got_offset;
4896 bfd_vma plt_plt_insn_end, plt_got_insn_size;
4897 Elf_Internal_Rela rela;
4898 bfd_byte *loc;
4899 asection *plt, *gotplt, *relplt, *resolved_plt;
4900 const struct elf_backend_data *bed;
4901 bfd_vma plt_got_pcrel_offset;
4902
4903 /* When building a static executable, use .iplt, .igot.plt and
4904 .rela.iplt sections for STT_GNU_IFUNC symbols. */
4905 if (htab->elf.splt != NULL)
4906 {
4907 plt = htab->elf.splt;
4908 gotplt = htab->elf.sgotplt;
4909 relplt = htab->elf.srelplt;
4910 }
4911 else
4912 {
4913 plt = htab->elf.iplt;
4914 gotplt = htab->elf.igotplt;
4915 relplt = htab->elf.irelplt;
4916 }
4917
4918 /* This symbol has an entry in the procedure linkage table. Set
4919 it up. */
4920 if ((h->dynindx == -1
4921 && !((h->forced_local || info->executable)
4922 && h->def_regular
4923 && h->type == STT_GNU_IFUNC))
4924 || plt == NULL
4925 || gotplt == NULL
4926 || relplt == NULL)
4927 abort ();
4928
4929 /* Get the index in the procedure linkage table which
4930 corresponds to this symbol. This is the index of this symbol
4931 in all the symbols for which we are making plt entries. The
4932 first entry in the procedure linkage table is reserved.
4933
4934 Get the offset into the .got table of the entry that
4935 corresponds to this function. Each .got entry is GOT_ENTRY_SIZE
4936 bytes. The first three are reserved for the dynamic linker.
4937
4938 For static executables, we don't reserve anything. */
4939
4940 if (plt == htab->elf.splt)
4941 {
4942 got_offset = h->plt.offset / abed->plt_entry_size - 1;
4943 got_offset = (got_offset + 3) * GOT_ENTRY_SIZE;
4944 }
4945 else
4946 {
4947 got_offset = h->plt.offset / abed->plt_entry_size;
4948 got_offset = got_offset * GOT_ENTRY_SIZE;
4949 }
4950
4951 plt_plt_insn_end = abed->plt_plt_insn_end;
4952 plt_plt_offset = abed->plt_plt_offset;
4953 plt_got_insn_size = abed->plt_got_insn_size;
4954 plt_got_offset = abed->plt_got_offset;
4955 if (use_plt_bnd)
4956 {
4957 /* Use the second PLT with BND relocations. */
4958 const bfd_byte *plt_entry, *plt2_entry;
4959
4960 if (eh->has_bnd_reloc)
4961 {
4962 plt_entry = elf_x86_64_bnd_plt_entry;
4963 plt2_entry = elf_x86_64_bnd_plt2_entry;
4964 }
4965 else
4966 {
4967 plt_entry = elf_x86_64_legacy_plt_entry;
4968 plt2_entry = elf_x86_64_legacy_plt2_entry;
4969
4970 /* Subtract 1 since there is no BND prefix. */
4971 plt_plt_insn_end -= 1;
4972 plt_plt_offset -= 1;
4973 plt_got_insn_size -= 1;
4974 plt_got_offset -= 1;
4975 }
4976
4977 BFD_ASSERT (sizeof (elf_x86_64_bnd_plt_entry)
4978 == sizeof (elf_x86_64_legacy_plt_entry));
4979
4980 /* Fill in the entry in the procedure linkage table. */
4981 memcpy (plt->contents + h->plt.offset,
4982 plt_entry, sizeof (elf_x86_64_legacy_plt_entry));
4983 /* Fill in the entry in the second PLT. */
4984 memcpy (htab->plt_bnd->contents + eh->plt_bnd.offset,
4985 plt2_entry, sizeof (elf_x86_64_legacy_plt2_entry));
4986
4987 resolved_plt = htab->plt_bnd;
4988 plt_offset = eh->plt_bnd.offset;
4989 }
4990 else
4991 {
4992 /* Fill in the entry in the procedure linkage table. */
4993 memcpy (plt->contents + h->plt.offset, abed->plt_entry,
4994 abed->plt_entry_size);
4995
4996 resolved_plt = plt;
4997 plt_offset = h->plt.offset;
4998 }
4999
5000 /* Insert the relocation positions of the plt section. */
5001
5002 /* Put offset the PC-relative instruction referring to the GOT entry,
5003 subtracting the size of that instruction. */
5004 plt_got_pcrel_offset = (gotplt->output_section->vma
5005 + gotplt->output_offset
5006 + got_offset
5007 - resolved_plt->output_section->vma
5008 - resolved_plt->output_offset
5009 - plt_offset
5010 - plt_got_insn_size);
5011
5012 /* Check PC-relative offset overflow in PLT entry. */
5013 if ((plt_got_pcrel_offset + 0x80000000) > 0xffffffff)
5014 info->callbacks->einfo (_("%F%B: PC-relative offset overflow in PLT entry for `%s'\n"),
5015 output_bfd, h->root.root.string);
5016
5017 bfd_put_32 (output_bfd, plt_got_pcrel_offset,
5018 resolved_plt->contents + plt_offset + plt_got_offset);
5019
5020 /* Fill in the entry in the global offset table, initially this
5021 points to the second part of the PLT entry. */
5022 bfd_put_64 (output_bfd, (plt->output_section->vma
5023 + plt->output_offset
5024 + h->plt.offset + abed->plt_lazy_offset),
5025 gotplt->contents + got_offset);
5026
5027 /* Fill in the entry in the .rela.plt section. */
5028 rela.r_offset = (gotplt->output_section->vma
5029 + gotplt->output_offset
5030 + got_offset);
5031 if (h->dynindx == -1
5032 || ((info->executable
5033 || ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
5034 && h->def_regular
5035 && h->type == STT_GNU_IFUNC))
5036 {
5037 /* If an STT_GNU_IFUNC symbol is locally defined, generate
5038 R_X86_64_IRELATIVE instead of R_X86_64_JUMP_SLOT. */
5039 rela.r_info = htab->r_info (0, R_X86_64_IRELATIVE);
5040 rela.r_addend = (h->root.u.def.value
5041 + h->root.u.def.section->output_section->vma
5042 + h->root.u.def.section->output_offset);
5043 /* R_X86_64_IRELATIVE comes last. */
5044 plt_index = htab->next_irelative_index--;
5045 }
5046 else
5047 {
5048 rela.r_info = htab->r_info (h->dynindx, R_X86_64_JUMP_SLOT);
5049 rela.r_addend = 0;
5050 plt_index = htab->next_jump_slot_index++;
5051 }
5052
5053 /* Don't fill PLT entry for static executables. */
5054 if (plt == htab->elf.splt)
5055 {
5056 bfd_vma plt0_offset = h->plt.offset + plt_plt_insn_end;
5057
5058 /* Put relocation index. */
5059 bfd_put_32 (output_bfd, plt_index,
5060 plt->contents + h->plt.offset + abed->plt_reloc_offset);
5061
5062 /* Put offset for jmp .PLT0 and check for overflow. We don't
5063 check relocation index for overflow since branch displacement
5064 will overflow first. */
5065 if (plt0_offset > 0x80000000)
5066 info->callbacks->einfo (_("%F%B: branch displacement overflow in PLT entry for `%s'\n"),
5067 output_bfd, h->root.root.string);
5068 bfd_put_32 (output_bfd, - plt0_offset,
5069 plt->contents + h->plt.offset + plt_plt_offset);
5070 }
5071
5072 bed = get_elf_backend_data (output_bfd);
5073 loc = relplt->contents + plt_index * bed->s->sizeof_rela;
5074 bed->s->swap_reloca_out (output_bfd, &rela, loc);
5075 }
5076 else if (eh->plt_got.offset != (bfd_vma) -1)
5077 {
5078 bfd_vma got_offset, plt_offset, plt_got_offset, plt_got_insn_size;
5079 asection *plt, *got;
5080 bfd_boolean got_after_plt;
5081 int32_t got_pcrel_offset;
5082 const bfd_byte *got_plt_entry;
5083
5084 /* Set the entry in the GOT procedure linkage table. */
5085 plt = htab->plt_got;
5086 got = htab->elf.sgot;
5087 got_offset = h->got.offset;
5088
5089 if (got_offset == (bfd_vma) -1
5090 || h->type == STT_GNU_IFUNC
5091 || plt == NULL
5092 || got == NULL)
5093 abort ();
5094
5095 /* Use the second PLT entry template for the GOT PLT since they
5096 are the identical. */
5097 plt_got_insn_size = elf_x86_64_bnd_arch_bed.plt_got_insn_size;
5098 plt_got_offset = elf_x86_64_bnd_arch_bed.plt_got_offset;
5099 if (eh->has_bnd_reloc)
5100 got_plt_entry = elf_x86_64_bnd_plt2_entry;
5101 else
5102 {
5103 got_plt_entry = elf_x86_64_legacy_plt2_entry;
5104
5105 /* Subtract 1 since there is no BND prefix. */
5106 plt_got_insn_size -= 1;
5107 plt_got_offset -= 1;
5108 }
5109
5110 /* Fill in the entry in the GOT procedure linkage table. */
5111 plt_offset = eh->plt_got.offset;
5112 memcpy (plt->contents + plt_offset,
5113 got_plt_entry, sizeof (elf_x86_64_legacy_plt2_entry));
5114
5115 /* Put offset the PC-relative instruction referring to the GOT
5116 entry, subtracting the size of that instruction. */
5117 got_pcrel_offset = (got->output_section->vma
5118 + got->output_offset
5119 + got_offset
5120 - plt->output_section->vma
5121 - plt->output_offset
5122 - plt_offset
5123 - plt_got_insn_size);
5124
5125 /* Check PC-relative offset overflow in GOT PLT entry. */
5126 got_after_plt = got->output_section->vma > plt->output_section->vma;
5127 if ((got_after_plt && got_pcrel_offset < 0)
5128 || (!got_after_plt && got_pcrel_offset > 0))
5129 info->callbacks->einfo (_("%F%B: PC-relative offset overflow in GOT PLT entry for `%s'\n"),
5130 output_bfd, h->root.root.string);
5131
5132 bfd_put_32 (output_bfd, got_pcrel_offset,
5133 plt->contents + plt_offset + plt_got_offset);
5134 }
5135
5136 if (!h->def_regular
5137 && (h->plt.offset != (bfd_vma) -1
5138 || eh->plt_got.offset != (bfd_vma) -1))
5139 {
5140 /* Mark the symbol as undefined, rather than as defined in
5141 the .plt section. Leave the value if there were any
5142 relocations where pointer equality matters (this is a clue
5143 for the dynamic linker, to make function pointer
5144 comparisons work between an application and shared
5145 library), otherwise set it to zero. If a function is only
5146 called from a binary, there is no need to slow down
5147 shared libraries because of that. */
5148 sym->st_shndx = SHN_UNDEF;
5149 if (!h->pointer_equality_needed)
5150 sym->st_value = 0;
5151 }
5152
5153 if (h->got.offset != (bfd_vma) -1
5154 && ! GOT_TLS_GD_ANY_P (elf_x86_64_hash_entry (h)->tls_type)
5155 && elf_x86_64_hash_entry (h)->tls_type != GOT_TLS_IE)
5156 {
5157 Elf_Internal_Rela rela;
5158
5159 /* This symbol has an entry in the global offset table. Set it
5160 up. */
5161 if (htab->elf.sgot == NULL || htab->elf.srelgot == NULL)
5162 abort ();
5163
5164 rela.r_offset = (htab->elf.sgot->output_section->vma
5165 + htab->elf.sgot->output_offset
5166 + (h->got.offset &~ (bfd_vma) 1));
5167
5168 /* If this is a static link, or it is a -Bsymbolic link and the
5169 symbol is defined locally or was forced to be local because
5170 of a version file, we just want to emit a RELATIVE reloc.
5171 The entry in the global offset table will already have been
5172 initialized in the relocate_section function. */
5173 if (h->def_regular
5174 && h->type == STT_GNU_IFUNC)
5175 {
5176 if (info->shared)
5177 {
5178 /* Generate R_X86_64_GLOB_DAT. */
5179 goto do_glob_dat;
5180 }
5181 else
5182 {
5183 asection *plt;
5184
5185 if (!h->pointer_equality_needed)
5186 abort ();
5187
5188 /* For non-shared object, we can't use .got.plt, which
5189 contains the real function addres if we need pointer
5190 equality. We load the GOT entry with the PLT entry. */
5191 plt = htab->elf.splt ? htab->elf.splt : htab->elf.iplt;
5192 bfd_put_64 (output_bfd, (plt->output_section->vma
5193 + plt->output_offset
5194 + h->plt.offset),
5195 htab->elf.sgot->contents + h->got.offset);
5196 return TRUE;
5197 }
5198 }
5199 else if (info->shared
5200 && SYMBOL_REFERENCES_LOCAL (info, h))
5201 {
5202 if (!h->def_regular)
5203 return FALSE;
5204 BFD_ASSERT((h->got.offset & 1) != 0);
5205 rela.r_info = htab->r_info (0, R_X86_64_RELATIVE);
5206 rela.r_addend = (h->root.u.def.value
5207 + h->root.u.def.section->output_section->vma
5208 + h->root.u.def.section->output_offset);
5209 }
5210 else
5211 {
5212 BFD_ASSERT((h->got.offset & 1) == 0);
5213 do_glob_dat:
5214 bfd_put_64 (output_bfd, (bfd_vma) 0,
5215 htab->elf.sgot->contents + h->got.offset);
5216 rela.r_info = htab->r_info (h->dynindx, R_X86_64_GLOB_DAT);
5217 rela.r_addend = 0;
5218 }
5219
5220 elf_append_rela (output_bfd, htab->elf.srelgot, &rela);
5221 }
5222
5223 if (h->needs_copy)
5224 {
5225 Elf_Internal_Rela rela;
5226
5227 /* This symbol needs a copy reloc. Set it up. */
5228
5229 if (h->dynindx == -1
5230 || (h->root.type != bfd_link_hash_defined
5231 && h->root.type != bfd_link_hash_defweak)
5232 || htab->srelbss == NULL)
5233 abort ();
5234
5235 rela.r_offset = (h->root.u.def.value
5236 + h->root.u.def.section->output_section->vma
5237 + h->root.u.def.section->output_offset);
5238 rela.r_info = htab->r_info (h->dynindx, R_X86_64_COPY);
5239 rela.r_addend = 0;
5240 elf_append_rela (output_bfd, htab->srelbss, &rela);
5241 }
5242
5243 return TRUE;
5244 }
5245
5246 /* Finish up local dynamic symbol handling. We set the contents of
5247 various dynamic sections here. */
5248
5249 static bfd_boolean
5250 elf_x86_64_finish_local_dynamic_symbol (void **slot, void *inf)
5251 {
5252 struct elf_link_hash_entry *h
5253 = (struct elf_link_hash_entry *) *slot;
5254 struct bfd_link_info *info
5255 = (struct bfd_link_info *) inf;
5256
5257 return elf_x86_64_finish_dynamic_symbol (info->output_bfd,
5258 info, h, NULL);
5259 }
5260
5261 /* Used to decide how to sort relocs in an optimal manner for the
5262 dynamic linker, before writing them out. */
5263
5264 static enum elf_reloc_type_class
5265 elf_x86_64_reloc_type_class (const struct bfd_link_info *info ATTRIBUTE_UNUSED,
5266 const asection *rel_sec ATTRIBUTE_UNUSED,
5267 const Elf_Internal_Rela *rela)
5268 {
5269 switch ((int) ELF32_R_TYPE (rela->r_info))
5270 {
5271 case R_X86_64_RELATIVE:
5272 case R_X86_64_RELATIVE64:
5273 return reloc_class_relative;
5274 case R_X86_64_JUMP_SLOT:
5275 return reloc_class_plt;
5276 case R_X86_64_COPY:
5277 return reloc_class_copy;
5278 default:
5279 return reloc_class_normal;
5280 }
5281 }
5282
5283 /* Finish up the dynamic sections. */
5284
5285 static bfd_boolean
5286 elf_x86_64_finish_dynamic_sections (bfd *output_bfd,
5287 struct bfd_link_info *info)
5288 {
5289 struct elf_x86_64_link_hash_table *htab;
5290 bfd *dynobj;
5291 asection *sdyn;
5292 const struct elf_x86_64_backend_data *abed;
5293
5294 htab = elf_x86_64_hash_table (info);
5295 if (htab == NULL)
5296 return FALSE;
5297
5298 /* Use MPX backend data in case of BND relocation. Use .plt_bnd
5299 section only if there is .plt section. */
5300 abed = (htab->elf.splt != NULL && htab->plt_bnd != NULL
5301 ? &elf_x86_64_bnd_arch_bed
5302 : get_elf_x86_64_backend_data (output_bfd));
5303
5304 dynobj = htab->elf.dynobj;
5305 sdyn = bfd_get_linker_section (dynobj, ".dynamic");
5306
5307 if (htab->elf.dynamic_sections_created)
5308 {
5309 bfd_byte *dyncon, *dynconend;
5310 const struct elf_backend_data *bed;
5311 bfd_size_type sizeof_dyn;
5312
5313 if (sdyn == NULL || htab->elf.sgot == NULL)
5314 abort ();
5315
5316 bed = get_elf_backend_data (dynobj);
5317 sizeof_dyn = bed->s->sizeof_dyn;
5318 dyncon = sdyn->contents;
5319 dynconend = sdyn->contents + sdyn->size;
5320 for (; dyncon < dynconend; dyncon += sizeof_dyn)
5321 {
5322 Elf_Internal_Dyn dyn;
5323 asection *s;
5324
5325 (*bed->s->swap_dyn_in) (dynobj, dyncon, &dyn);
5326
5327 switch (dyn.d_tag)
5328 {
5329 default:
5330 continue;
5331
5332 case DT_PLTGOT:
5333 s = htab->elf.sgotplt;
5334 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset;
5335 break;
5336
5337 case DT_JMPREL:
5338 dyn.d_un.d_ptr = htab->elf.srelplt->output_section->vma;
5339 break;
5340
5341 case DT_PLTRELSZ:
5342 s = htab->elf.srelplt->output_section;
5343 dyn.d_un.d_val = s->size;
5344 break;
5345
5346 case DT_RELASZ:
5347 /* The procedure linkage table relocs (DT_JMPREL) should
5348 not be included in the overall relocs (DT_RELA).
5349 Therefore, we override the DT_RELASZ entry here to
5350 make it not include the JMPREL relocs. Since the
5351 linker script arranges for .rela.plt to follow all
5352 other relocation sections, we don't have to worry
5353 about changing the DT_RELA entry. */
5354 if (htab->elf.srelplt != NULL)
5355 {
5356 s = htab->elf.srelplt->output_section;
5357 dyn.d_un.d_val -= s->size;
5358 }
5359 break;
5360
5361 case DT_TLSDESC_PLT:
5362 s = htab->elf.splt;
5363 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset
5364 + htab->tlsdesc_plt;
5365 break;
5366
5367 case DT_TLSDESC_GOT:
5368 s = htab->elf.sgot;
5369 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset
5370 + htab->tlsdesc_got;
5371 break;
5372 }
5373
5374 (*bed->s->swap_dyn_out) (output_bfd, &dyn, dyncon);
5375 }
5376
5377 /* Fill in the special first entry in the procedure linkage table. */
5378 if (htab->elf.splt && htab->elf.splt->size > 0)
5379 {
5380 /* Fill in the first entry in the procedure linkage table. */
5381 memcpy (htab->elf.splt->contents,
5382 abed->plt0_entry, abed->plt_entry_size);
5383 /* Add offset for pushq GOT+8(%rip), since the instruction
5384 uses 6 bytes subtract this value. */
5385 bfd_put_32 (output_bfd,
5386 (htab->elf.sgotplt->output_section->vma
5387 + htab->elf.sgotplt->output_offset
5388 + 8
5389 - htab->elf.splt->output_section->vma
5390 - htab->elf.splt->output_offset
5391 - 6),
5392 htab->elf.splt->contents + abed->plt0_got1_offset);
5393 /* Add offset for the PC-relative instruction accessing GOT+16,
5394 subtracting the offset to the end of that instruction. */
5395 bfd_put_32 (output_bfd,
5396 (htab->elf.sgotplt->output_section->vma
5397 + htab->elf.sgotplt->output_offset
5398 + 16
5399 - htab->elf.splt->output_section->vma
5400 - htab->elf.splt->output_offset
5401 - abed->plt0_got2_insn_end),
5402 htab->elf.splt->contents + abed->plt0_got2_offset);
5403
5404 elf_section_data (htab->elf.splt->output_section)
5405 ->this_hdr.sh_entsize = abed->plt_entry_size;
5406
5407 if (htab->tlsdesc_plt)
5408 {
5409 bfd_put_64 (output_bfd, (bfd_vma) 0,
5410 htab->elf.sgot->contents + htab->tlsdesc_got);
5411
5412 memcpy (htab->elf.splt->contents + htab->tlsdesc_plt,
5413 abed->plt0_entry, abed->plt_entry_size);
5414
5415 /* Add offset for pushq GOT+8(%rip), since the
5416 instruction uses 6 bytes subtract this value. */
5417 bfd_put_32 (output_bfd,
5418 (htab->elf.sgotplt->output_section->vma
5419 + htab->elf.sgotplt->output_offset
5420 + 8
5421 - htab->elf.splt->output_section->vma
5422 - htab->elf.splt->output_offset
5423 - htab->tlsdesc_plt
5424 - 6),
5425 htab->elf.splt->contents
5426 + htab->tlsdesc_plt + abed->plt0_got1_offset);
5427 /* Add offset for the PC-relative instruction accessing GOT+TDG,
5428 where TGD stands for htab->tlsdesc_got, subtracting the offset
5429 to the end of that instruction. */
5430 bfd_put_32 (output_bfd,
5431 (htab->elf.sgot->output_section->vma
5432 + htab->elf.sgot->output_offset
5433 + htab->tlsdesc_got
5434 - htab->elf.splt->output_section->vma
5435 - htab->elf.splt->output_offset
5436 - htab->tlsdesc_plt
5437 - abed->plt0_got2_insn_end),
5438 htab->elf.splt->contents
5439 + htab->tlsdesc_plt + abed->plt0_got2_offset);
5440 }
5441 }
5442 }
5443
5444 if (htab->plt_bnd != NULL)
5445 elf_section_data (htab->plt_bnd->output_section)
5446 ->this_hdr.sh_entsize = sizeof (elf_x86_64_bnd_plt2_entry);
5447
5448 if (htab->elf.sgotplt)
5449 {
5450 if (bfd_is_abs_section (htab->elf.sgotplt->output_section))
5451 {
5452 (*_bfd_error_handler)
5453 (_("discarded output section: `%A'"), htab->elf.sgotplt);
5454 return FALSE;
5455 }
5456
5457 /* Fill in the first three entries in the global offset table. */
5458 if (htab->elf.sgotplt->size > 0)
5459 {
5460 /* Set the first entry in the global offset table to the address of
5461 the dynamic section. */
5462 if (sdyn == NULL)
5463 bfd_put_64 (output_bfd, (bfd_vma) 0, htab->elf.sgotplt->contents);
5464 else
5465 bfd_put_64 (output_bfd,
5466 sdyn->output_section->vma + sdyn->output_offset,
5467 htab->elf.sgotplt->contents);
5468 /* Write GOT[1] and GOT[2], needed for the dynamic linker. */
5469 bfd_put_64 (output_bfd, (bfd_vma) 0, htab->elf.sgotplt->contents + GOT_ENTRY_SIZE);
5470 bfd_put_64 (output_bfd, (bfd_vma) 0, htab->elf.sgotplt->contents + GOT_ENTRY_SIZE*2);
5471 }
5472
5473 elf_section_data (htab->elf.sgotplt->output_section)->this_hdr.sh_entsize =
5474 GOT_ENTRY_SIZE;
5475 }
5476
5477 /* Adjust .eh_frame for .plt section. */
5478 if (htab->plt_eh_frame != NULL
5479 && htab->plt_eh_frame->contents != NULL)
5480 {
5481 if (htab->elf.splt != NULL
5482 && htab->elf.splt->size != 0
5483 && (htab->elf.splt->flags & SEC_EXCLUDE) == 0
5484 && htab->elf.splt->output_section != NULL
5485 && htab->plt_eh_frame->output_section != NULL)
5486 {
5487 bfd_vma plt_start = htab->elf.splt->output_section->vma;
5488 bfd_vma eh_frame_start = htab->plt_eh_frame->output_section->vma
5489 + htab->plt_eh_frame->output_offset
5490 + PLT_FDE_START_OFFSET;
5491 bfd_put_signed_32 (dynobj, plt_start - eh_frame_start,
5492 htab->plt_eh_frame->contents
5493 + PLT_FDE_START_OFFSET);
5494 }
5495 if (htab->plt_eh_frame->sec_info_type == SEC_INFO_TYPE_EH_FRAME)
5496 {
5497 if (! _bfd_elf_write_section_eh_frame (output_bfd, info,
5498 htab->plt_eh_frame,
5499 htab->plt_eh_frame->contents))
5500 return FALSE;
5501 }
5502 }
5503
5504 if (htab->elf.sgot && htab->elf.sgot->size > 0)
5505 elf_section_data (htab->elf.sgot->output_section)->this_hdr.sh_entsize
5506 = GOT_ENTRY_SIZE;
5507
5508 /* Fill PLT and GOT entries for local STT_GNU_IFUNC symbols. */
5509 htab_traverse (htab->loc_hash_table,
5510 elf_x86_64_finish_local_dynamic_symbol,
5511 info);
5512
5513 return TRUE;
5514 }
5515
5516 /* Return an array of PLT entry symbol values. */
5517
5518 static bfd_vma *
5519 elf_x86_64_get_plt_sym_val (bfd *abfd, asymbol **dynsyms, asection *plt,
5520 asection *relplt)
5521 {
5522 bfd_boolean (*slurp_relocs) (bfd *, asection *, asymbol **, bfd_boolean);
5523 arelent *p;
5524 long count, i;
5525 bfd_vma *plt_sym_val;
5526 bfd_vma plt_offset;
5527 bfd_byte *plt_contents;
5528 const struct elf_x86_64_backend_data *bed;
5529 Elf_Internal_Shdr *hdr;
5530 asection *plt_bnd;
5531
5532 /* Get the .plt section contents. PLT passed down may point to the
5533 .plt.bnd section. Make sure that PLT always points to the .plt
5534 section. */
5535 plt_bnd = bfd_get_section_by_name (abfd, ".plt.bnd");
5536 if (plt_bnd)
5537 {
5538 if (plt != plt_bnd)
5539 abort ();
5540 plt = bfd_get_section_by_name (abfd, ".plt");
5541 if (plt == NULL)
5542 abort ();
5543 bed = &elf_x86_64_bnd_arch_bed;
5544 }
5545 else
5546 bed = get_elf_x86_64_backend_data (abfd);
5547
5548 plt_contents = (bfd_byte *) bfd_malloc (plt->size);
5549 if (plt_contents == NULL)
5550 return NULL;
5551 if (!bfd_get_section_contents (abfd, (asection *) plt,
5552 plt_contents, 0, plt->size))
5553 {
5554 bad_return:
5555 free (plt_contents);
5556 return NULL;
5557 }
5558
5559 slurp_relocs = get_elf_backend_data (abfd)->s->slurp_reloc_table;
5560 if (! (*slurp_relocs) (abfd, relplt, dynsyms, TRUE))
5561 goto bad_return;
5562
5563 hdr = &elf_section_data (relplt)->this_hdr;
5564 count = relplt->size / hdr->sh_entsize;
5565
5566 plt_sym_val = (bfd_vma *) bfd_malloc (sizeof (bfd_vma) * count);
5567 if (plt_sym_val == NULL)
5568 goto bad_return;
5569
5570 for (i = 0; i < count; i++, p++)
5571 plt_sym_val[i] = -1;
5572
5573 plt_offset = bed->plt_entry_size;
5574 p = relplt->relocation;
5575 for (i = 0; i < count; i++, p++)
5576 {
5577 long reloc_index;
5578
5579 /* Skip unknown relocation. */
5580 if (p->howto == NULL)
5581 continue;
5582
5583 if (p->howto->type != R_X86_64_JUMP_SLOT
5584 && p->howto->type != R_X86_64_IRELATIVE)
5585 continue;
5586
5587 reloc_index = H_GET_32 (abfd, (plt_contents + plt_offset
5588 + bed->plt_reloc_offset));
5589 if (reloc_index >= count)
5590 abort ();
5591 if (plt_bnd)
5592 {
5593 /* This is the index in .plt section. */
5594 long plt_index = plt_offset / bed->plt_entry_size;
5595 /* Store VMA + the offset in .plt.bnd section. */
5596 plt_sym_val[reloc_index] =
5597 (plt_bnd->vma
5598 + (plt_index - 1) * sizeof (elf_x86_64_legacy_plt2_entry));
5599 }
5600 else
5601 plt_sym_val[reloc_index] = plt->vma + plt_offset;
5602 plt_offset += bed->plt_entry_size;
5603 }
5604
5605 free (plt_contents);
5606
5607 return plt_sym_val;
5608 }
5609
5610 /* Similar to _bfd_elf_get_synthetic_symtab, with .plt.bnd section
5611 support. */
5612
5613 static long
5614 elf_x86_64_get_synthetic_symtab (bfd *abfd,
5615 long symcount,
5616 asymbol **syms,
5617 long dynsymcount,
5618 asymbol **dynsyms,
5619 asymbol **ret)
5620 {
5621 /* Pass the .plt.bnd section to _bfd_elf_ifunc_get_synthetic_symtab
5622 as PLT if it exists. */
5623 asection *plt = bfd_get_section_by_name (abfd, ".plt.bnd");
5624 if (plt == NULL)
5625 plt = bfd_get_section_by_name (abfd, ".plt");
5626 return _bfd_elf_ifunc_get_synthetic_symtab (abfd, symcount, syms,
5627 dynsymcount, dynsyms, ret,
5628 plt,
5629 elf_x86_64_get_plt_sym_val);
5630 }
5631
5632 /* Handle an x86-64 specific section when reading an object file. This
5633 is called when elfcode.h finds a section with an unknown type. */
5634
5635 static bfd_boolean
5636 elf_x86_64_section_from_shdr (bfd *abfd, Elf_Internal_Shdr *hdr,
5637 const char *name, int shindex)
5638 {
5639 if (hdr->sh_type != SHT_X86_64_UNWIND)
5640 return FALSE;
5641
5642 if (! _bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
5643 return FALSE;
5644
5645 return TRUE;
5646 }
5647
5648 /* Hook called by the linker routine which adds symbols from an object
5649 file. We use it to put SHN_X86_64_LCOMMON items in .lbss, instead
5650 of .bss. */
5651
5652 static bfd_boolean
5653 elf_x86_64_add_symbol_hook (bfd *abfd,
5654 struct bfd_link_info *info,
5655 Elf_Internal_Sym *sym,
5656 const char **namep ATTRIBUTE_UNUSED,
5657 flagword *flagsp ATTRIBUTE_UNUSED,
5658 asection **secp,
5659 bfd_vma *valp)
5660 {
5661 asection *lcomm;
5662
5663 switch (sym->st_shndx)
5664 {
5665 case SHN_X86_64_LCOMMON:
5666 lcomm = bfd_get_section_by_name (abfd, "LARGE_COMMON");
5667 if (lcomm == NULL)
5668 {
5669 lcomm = bfd_make_section_with_flags (abfd,
5670 "LARGE_COMMON",
5671 (SEC_ALLOC
5672 | SEC_IS_COMMON
5673 | SEC_LINKER_CREATED));
5674 if (lcomm == NULL)
5675 return FALSE;
5676 elf_section_flags (lcomm) |= SHF_X86_64_LARGE;
5677 }
5678 *secp = lcomm;
5679 *valp = sym->st_size;
5680 return TRUE;
5681 }
5682
5683 if ((ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC
5684 || ELF_ST_BIND (sym->st_info) == STB_GNU_UNIQUE)
5685 && (abfd->flags & DYNAMIC) == 0
5686 && bfd_get_flavour (info->output_bfd) == bfd_target_elf_flavour)
5687 elf_tdata (info->output_bfd)->has_gnu_symbols = TRUE;
5688
5689 return TRUE;
5690 }
5691
5692
5693 /* Given a BFD section, try to locate the corresponding ELF section
5694 index. */
5695
5696 static bfd_boolean
5697 elf_x86_64_elf_section_from_bfd_section (bfd *abfd ATTRIBUTE_UNUSED,
5698 asection *sec, int *index_return)
5699 {
5700 if (sec == &_bfd_elf_large_com_section)
5701 {
5702 *index_return = SHN_X86_64_LCOMMON;
5703 return TRUE;
5704 }
5705 return FALSE;
5706 }
5707
5708 /* Process a symbol. */
5709
5710 static void
5711 elf_x86_64_symbol_processing (bfd *abfd ATTRIBUTE_UNUSED,
5712 asymbol *asym)
5713 {
5714 elf_symbol_type *elfsym = (elf_symbol_type *) asym;
5715
5716 switch (elfsym->internal_elf_sym.st_shndx)
5717 {
5718 case SHN_X86_64_LCOMMON:
5719 asym->section = &_bfd_elf_large_com_section;
5720 asym->value = elfsym->internal_elf_sym.st_size;
5721 /* Common symbol doesn't set BSF_GLOBAL. */
5722 asym->flags &= ~BSF_GLOBAL;
5723 break;
5724 }
5725 }
5726
5727 static bfd_boolean
5728 elf_x86_64_common_definition (Elf_Internal_Sym *sym)
5729 {
5730 return (sym->st_shndx == SHN_COMMON
5731 || sym->st_shndx == SHN_X86_64_LCOMMON);
5732 }
5733
5734 static unsigned int
5735 elf_x86_64_common_section_index (asection *sec)
5736 {
5737 if ((elf_section_flags (sec) & SHF_X86_64_LARGE) == 0)
5738 return SHN_COMMON;
5739 else
5740 return SHN_X86_64_LCOMMON;
5741 }
5742
5743 static asection *
5744 elf_x86_64_common_section (asection *sec)
5745 {
5746 if ((elf_section_flags (sec) & SHF_X86_64_LARGE) == 0)
5747 return bfd_com_section_ptr;
5748 else
5749 return &_bfd_elf_large_com_section;
5750 }
5751
5752 static bfd_boolean
5753 elf_x86_64_merge_symbol (struct elf_link_hash_entry *h,
5754 const Elf_Internal_Sym *sym,
5755 asection **psec,
5756 bfd_boolean newdef,
5757 bfd_boolean olddef,
5758 bfd *oldbfd,
5759 const asection *oldsec)
5760 {
5761 /* A normal common symbol and a large common symbol result in a
5762 normal common symbol. We turn the large common symbol into a
5763 normal one. */
5764 if (!olddef
5765 && h->root.type == bfd_link_hash_common
5766 && !newdef
5767 && bfd_is_com_section (*psec)
5768 && oldsec != *psec)
5769 {
5770 if (sym->st_shndx == SHN_COMMON
5771 && (elf_section_flags (oldsec) & SHF_X86_64_LARGE) != 0)
5772 {
5773 h->root.u.c.p->section
5774 = bfd_make_section_old_way (oldbfd, "COMMON");
5775 h->root.u.c.p->section->flags = SEC_ALLOC;
5776 }
5777 else if (sym->st_shndx == SHN_X86_64_LCOMMON
5778 && (elf_section_flags (oldsec) & SHF_X86_64_LARGE) == 0)
5779 *psec = bfd_com_section_ptr;
5780 }
5781
5782 return TRUE;
5783 }
5784
5785 static int
5786 elf_x86_64_additional_program_headers (bfd *abfd,
5787 struct bfd_link_info *info ATTRIBUTE_UNUSED)
5788 {
5789 asection *s;
5790 int count = 0;
5791
5792 /* Check to see if we need a large readonly segment. */
5793 s = bfd_get_section_by_name (abfd, ".lrodata");
5794 if (s && (s->flags & SEC_LOAD))
5795 count++;
5796
5797 /* Check to see if we need a large data segment. Since .lbss sections
5798 is placed right after the .bss section, there should be no need for
5799 a large data segment just because of .lbss. */
5800 s = bfd_get_section_by_name (abfd, ".ldata");
5801 if (s && (s->flags & SEC_LOAD))
5802 count++;
5803
5804 return count;
5805 }
5806
5807 /* Return TRUE if symbol should be hashed in the `.gnu.hash' section. */
5808
5809 static bfd_boolean
5810 elf_x86_64_hash_symbol (struct elf_link_hash_entry *h)
5811 {
5812 if (h->plt.offset != (bfd_vma) -1
5813 && !h->def_regular
5814 && !h->pointer_equality_needed)
5815 return FALSE;
5816
5817 return _bfd_elf_hash_symbol (h);
5818 }
5819
5820 /* Return TRUE iff relocations for INPUT are compatible with OUTPUT. */
5821
5822 static bfd_boolean
5823 elf_x86_64_relocs_compatible (const bfd_target *input,
5824 const bfd_target *output)
5825 {
5826 return ((xvec_get_elf_backend_data (input)->s->elfclass
5827 == xvec_get_elf_backend_data (output)->s->elfclass)
5828 && _bfd_elf_relocs_compatible (input, output));
5829 }
5830
5831 static const struct bfd_elf_special_section
5832 elf_x86_64_special_sections[]=
5833 {
5834 { STRING_COMMA_LEN (".gnu.linkonce.lb"), -2, SHT_NOBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
5835 { STRING_COMMA_LEN (".gnu.linkonce.lr"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_X86_64_LARGE},
5836 { STRING_COMMA_LEN (".gnu.linkonce.lt"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_EXECINSTR + SHF_X86_64_LARGE},
5837 { STRING_COMMA_LEN (".lbss"), -2, SHT_NOBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
5838 { STRING_COMMA_LEN (".ldata"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
5839 { STRING_COMMA_LEN (".lrodata"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_X86_64_LARGE},
5840 { NULL, 0, 0, 0, 0 }
5841 };
5842
5843 #define TARGET_LITTLE_SYM x86_64_elf64_vec
5844 #define TARGET_LITTLE_NAME "elf64-x86-64"
5845 #define ELF_ARCH bfd_arch_i386
5846 #define ELF_TARGET_ID X86_64_ELF_DATA
5847 #define ELF_MACHINE_CODE EM_X86_64
5848 #define ELF_MAXPAGESIZE 0x200000
5849 #define ELF_MINPAGESIZE 0x1000
5850 #define ELF_COMMONPAGESIZE 0x1000
5851
5852 #define elf_backend_can_gc_sections 1
5853 #define elf_backend_can_refcount 1
5854 #define elf_backend_want_got_plt 1
5855 #define elf_backend_plt_readonly 1
5856 #define elf_backend_want_plt_sym 0
5857 #define elf_backend_got_header_size (GOT_ENTRY_SIZE*3)
5858 #define elf_backend_rela_normal 1
5859 #define elf_backend_plt_alignment 4
5860
5861 #define elf_info_to_howto elf_x86_64_info_to_howto
5862
5863 #define bfd_elf64_bfd_link_hash_table_create \
5864 elf_x86_64_link_hash_table_create
5865 #define bfd_elf64_bfd_reloc_type_lookup elf_x86_64_reloc_type_lookup
5866 #define bfd_elf64_bfd_reloc_name_lookup \
5867 elf_x86_64_reloc_name_lookup
5868
5869 #define elf_backend_adjust_dynamic_symbol elf_x86_64_adjust_dynamic_symbol
5870 #define elf_backend_relocs_compatible elf_x86_64_relocs_compatible
5871 #define elf_backend_check_relocs elf_x86_64_check_relocs
5872 #define elf_backend_copy_indirect_symbol elf_x86_64_copy_indirect_symbol
5873 #define elf_backend_create_dynamic_sections elf_x86_64_create_dynamic_sections
5874 #define elf_backend_finish_dynamic_sections elf_x86_64_finish_dynamic_sections
5875 #define elf_backend_finish_dynamic_symbol elf_x86_64_finish_dynamic_symbol
5876 #define elf_backend_gc_mark_hook elf_x86_64_gc_mark_hook
5877 #define elf_backend_gc_sweep_hook elf_x86_64_gc_sweep_hook
5878 #define elf_backend_grok_prstatus elf_x86_64_grok_prstatus
5879 #define elf_backend_grok_psinfo elf_x86_64_grok_psinfo
5880 #ifdef CORE_HEADER
5881 #define elf_backend_write_core_note elf_x86_64_write_core_note
5882 #endif
5883 #define elf_backend_reloc_type_class elf_x86_64_reloc_type_class
5884 #define elf_backend_relocate_section elf_x86_64_relocate_section
5885 #define elf_backend_size_dynamic_sections elf_x86_64_size_dynamic_sections
5886 #define elf_backend_always_size_sections elf_x86_64_always_size_sections
5887 #define elf_backend_init_index_section _bfd_elf_init_1_index_section
5888 #define elf_backend_object_p elf64_x86_64_elf_object_p
5889 #define bfd_elf64_mkobject elf_x86_64_mkobject
5890 #define bfd_elf64_get_synthetic_symtab elf_x86_64_get_synthetic_symtab
5891
5892 #define elf_backend_section_from_shdr \
5893 elf_x86_64_section_from_shdr
5894
5895 #define elf_backend_section_from_bfd_section \
5896 elf_x86_64_elf_section_from_bfd_section
5897 #define elf_backend_add_symbol_hook \
5898 elf_x86_64_add_symbol_hook
5899 #define elf_backend_symbol_processing \
5900 elf_x86_64_symbol_processing
5901 #define elf_backend_common_section_index \
5902 elf_x86_64_common_section_index
5903 #define elf_backend_common_section \
5904 elf_x86_64_common_section
5905 #define elf_backend_common_definition \
5906 elf_x86_64_common_definition
5907 #define elf_backend_merge_symbol \
5908 elf_x86_64_merge_symbol
5909 #define elf_backend_special_sections \
5910 elf_x86_64_special_sections
5911 #define elf_backend_additional_program_headers \
5912 elf_x86_64_additional_program_headers
5913 #define elf_backend_hash_symbol \
5914 elf_x86_64_hash_symbol
5915
5916 #include "elf64-target.h"
5917
5918 /* FreeBSD support. */
5919
5920 #undef TARGET_LITTLE_SYM
5921 #define TARGET_LITTLE_SYM x86_64_elf64_fbsd_vec
5922 #undef TARGET_LITTLE_NAME
5923 #define TARGET_LITTLE_NAME "elf64-x86-64-freebsd"
5924
5925 #undef ELF_OSABI
5926 #define ELF_OSABI ELFOSABI_FREEBSD
5927
5928 #undef elf64_bed
5929 #define elf64_bed elf64_x86_64_fbsd_bed
5930
5931 #include "elf64-target.h"
5932
5933 /* Solaris 2 support. */
5934
5935 #undef TARGET_LITTLE_SYM
5936 #define TARGET_LITTLE_SYM x86_64_elf64_sol2_vec
5937 #undef TARGET_LITTLE_NAME
5938 #define TARGET_LITTLE_NAME "elf64-x86-64-sol2"
5939
5940 /* Restore default: we cannot use ELFOSABI_SOLARIS, otherwise ELFOSABI_NONE
5941 objects won't be recognized. */
5942 #undef ELF_OSABI
5943
5944 #undef elf64_bed
5945 #define elf64_bed elf64_x86_64_sol2_bed
5946
5947 /* The 64-bit static TLS arena size is rounded to the nearest 16-byte
5948 boundary. */
5949 #undef elf_backend_static_tls_alignment
5950 #define elf_backend_static_tls_alignment 16
5951
5952 /* The Solaris 2 ABI requires a plt symbol on all platforms.
5953
5954 Cf. Linker and Libraries Guide, Ch. 2, Link-Editor, Generating the Output
5955 File, p.63. */
5956 #undef elf_backend_want_plt_sym
5957 #define elf_backend_want_plt_sym 1
5958
5959 #include "elf64-target.h"
5960
5961 /* Native Client support. */
5962
5963 static bfd_boolean
5964 elf64_x86_64_nacl_elf_object_p (bfd *abfd)
5965 {
5966 /* Set the right machine number for a NaCl x86-64 ELF64 file. */
5967 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x86_64_nacl);
5968 return TRUE;
5969 }
5970
5971 #undef TARGET_LITTLE_SYM
5972 #define TARGET_LITTLE_SYM x86_64_elf64_nacl_vec
5973 #undef TARGET_LITTLE_NAME
5974 #define TARGET_LITTLE_NAME "elf64-x86-64-nacl"
5975 #undef elf64_bed
5976 #define elf64_bed elf64_x86_64_nacl_bed
5977
5978 #undef ELF_MAXPAGESIZE
5979 #undef ELF_MINPAGESIZE
5980 #undef ELF_COMMONPAGESIZE
5981 #define ELF_MAXPAGESIZE 0x10000
5982 #define ELF_MINPAGESIZE 0x10000
5983 #define ELF_COMMONPAGESIZE 0x10000
5984
5985 /* Restore defaults. */
5986 #undef ELF_OSABI
5987 #undef elf_backend_static_tls_alignment
5988 #undef elf_backend_want_plt_sym
5989 #define elf_backend_want_plt_sym 0
5990
5991 /* NaCl uses substantially different PLT entries for the same effects. */
5992
5993 #undef elf_backend_plt_alignment
5994 #define elf_backend_plt_alignment 5
5995 #define NACL_PLT_ENTRY_SIZE 64
5996 #define NACLMASK 0xe0 /* 32-byte alignment mask. */
5997
5998 static const bfd_byte elf_x86_64_nacl_plt0_entry[NACL_PLT_ENTRY_SIZE] =
5999 {
6000 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
6001 0x4c, 0x8b, 0x1d, 16, 0, 0, 0, /* mov GOT+16(%rip), %r11 */
6002 0x41, 0x83, 0xe3, NACLMASK, /* and $-32, %r11d */
6003 0x4d, 0x01, 0xfb, /* add %r15, %r11 */
6004 0x41, 0xff, 0xe3, /* jmpq *%r11 */
6005
6006 /* 9-byte nop sequence to pad out to the next 32-byte boundary. */
6007 0x66, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw 0x0(%rax,%rax,1) */
6008
6009 /* 32 bytes of nop to pad out to the standard size. */
6010 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data32 prefixes */
6011 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
6012 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data32 prefixes */
6013 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
6014 0x66, /* excess data32 prefix */
6015 0x90 /* nop */
6016 };
6017
6018 static const bfd_byte elf_x86_64_nacl_plt_entry[NACL_PLT_ENTRY_SIZE] =
6019 {
6020 0x4c, 0x8b, 0x1d, 0, 0, 0, 0, /* mov name@GOTPCREL(%rip),%r11 */
6021 0x41, 0x83, 0xe3, NACLMASK, /* and $-32, %r11d */
6022 0x4d, 0x01, 0xfb, /* add %r15, %r11 */
6023 0x41, 0xff, 0xe3, /* jmpq *%r11 */
6024
6025 /* 15-byte nop sequence to pad out to the next 32-byte boundary. */
6026 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data32 prefixes */
6027 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
6028
6029 /* Lazy GOT entries point here (32-byte aligned). */
6030 0x68, /* pushq immediate */
6031 0, 0, 0, 0, /* replaced with index into relocation table. */
6032 0xe9, /* jmp relative */
6033 0, 0, 0, 0, /* replaced with offset to start of .plt0. */
6034
6035 /* 22 bytes of nop to pad out to the standard size. */
6036 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data32 prefixes */
6037 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
6038 0x0f, 0x1f, 0x80, 0, 0, 0, 0, /* nopl 0x0(%rax) */
6039 };
6040
6041 /* .eh_frame covering the .plt section. */
6042
6043 static const bfd_byte elf_x86_64_nacl_eh_frame_plt[] =
6044 {
6045 #if (PLT_CIE_LENGTH != 20 \
6046 || PLT_FDE_LENGTH != 36 \
6047 || PLT_FDE_START_OFFSET != 4 + PLT_CIE_LENGTH + 8 \
6048 || PLT_FDE_LEN_OFFSET != 4 + PLT_CIE_LENGTH + 12)
6049 # error "Need elf_x86_64_backend_data parameters for eh_frame_plt offsets!"
6050 #endif
6051 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
6052 0, 0, 0, 0, /* CIE ID */
6053 1, /* CIE version */
6054 'z', 'R', 0, /* Augmentation string */
6055 1, /* Code alignment factor */
6056 0x78, /* Data alignment factor */
6057 16, /* Return address column */
6058 1, /* Augmentation size */
6059 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
6060 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
6061 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
6062 DW_CFA_nop, DW_CFA_nop,
6063
6064 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
6065 PLT_CIE_LENGTH + 8, 0, 0, 0,/* CIE pointer */
6066 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
6067 0, 0, 0, 0, /* .plt size goes here */
6068 0, /* Augmentation size */
6069 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
6070 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
6071 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
6072 DW_CFA_advance_loc + 58, /* DW_CFA_advance_loc: 58 to __PLT__+64 */
6073 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
6074 13, /* Block length */
6075 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
6076 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
6077 DW_OP_const1u, 63, DW_OP_and, DW_OP_const1u, 37, DW_OP_ge,
6078 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
6079 DW_CFA_nop, DW_CFA_nop
6080 };
6081
6082 static const struct elf_x86_64_backend_data elf_x86_64_nacl_arch_bed =
6083 {
6084 elf_x86_64_nacl_plt0_entry, /* plt0_entry */
6085 elf_x86_64_nacl_plt_entry, /* plt_entry */
6086 NACL_PLT_ENTRY_SIZE, /* plt_entry_size */
6087 2, /* plt0_got1_offset */
6088 9, /* plt0_got2_offset */
6089 13, /* plt0_got2_insn_end */
6090 3, /* plt_got_offset */
6091 33, /* plt_reloc_offset */
6092 38, /* plt_plt_offset */
6093 7, /* plt_got_insn_size */
6094 42, /* plt_plt_insn_end */
6095 32, /* plt_lazy_offset */
6096 elf_x86_64_nacl_eh_frame_plt, /* eh_frame_plt */
6097 sizeof (elf_x86_64_nacl_eh_frame_plt), /* eh_frame_plt_size */
6098 };
6099
6100 #undef elf_backend_arch_data
6101 #define elf_backend_arch_data &elf_x86_64_nacl_arch_bed
6102
6103 #undef elf_backend_object_p
6104 #define elf_backend_object_p elf64_x86_64_nacl_elf_object_p
6105 #undef elf_backend_modify_segment_map
6106 #define elf_backend_modify_segment_map nacl_modify_segment_map
6107 #undef elf_backend_modify_program_headers
6108 #define elf_backend_modify_program_headers nacl_modify_program_headers
6109 #undef elf_backend_final_write_processing
6110 #define elf_backend_final_write_processing nacl_final_write_processing
6111
6112 #include "elf64-target.h"
6113
6114 /* Native Client x32 support. */
6115
6116 static bfd_boolean
6117 elf32_x86_64_nacl_elf_object_p (bfd *abfd)
6118 {
6119 /* Set the right machine number for a NaCl x86-64 ELF32 file. */
6120 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x64_32_nacl);
6121 return TRUE;
6122 }
6123
6124 #undef TARGET_LITTLE_SYM
6125 #define TARGET_LITTLE_SYM x86_64_elf32_nacl_vec
6126 #undef TARGET_LITTLE_NAME
6127 #define TARGET_LITTLE_NAME "elf32-x86-64-nacl"
6128 #undef elf32_bed
6129 #define elf32_bed elf32_x86_64_nacl_bed
6130
6131 #define bfd_elf32_bfd_link_hash_table_create \
6132 elf_x86_64_link_hash_table_create
6133 #define bfd_elf32_bfd_reloc_type_lookup \
6134 elf_x86_64_reloc_type_lookup
6135 #define bfd_elf32_bfd_reloc_name_lookup \
6136 elf_x86_64_reloc_name_lookup
6137 #define bfd_elf32_mkobject \
6138 elf_x86_64_mkobject
6139 #define bfd_elf32_get_synthetic_symtab \
6140 elf_x86_64_get_synthetic_symtab
6141
6142 #undef elf_backend_object_p
6143 #define elf_backend_object_p \
6144 elf32_x86_64_nacl_elf_object_p
6145
6146 #undef elf_backend_bfd_from_remote_memory
6147 #define elf_backend_bfd_from_remote_memory \
6148 _bfd_elf32_bfd_from_remote_memory
6149
6150 #undef elf_backend_size_info
6151 #define elf_backend_size_info \
6152 _bfd_elf32_size_info
6153
6154 #include "elf32-target.h"
6155
6156 /* Restore defaults. */
6157 #undef elf_backend_object_p
6158 #define elf_backend_object_p elf64_x86_64_elf_object_p
6159 #undef elf_backend_bfd_from_remote_memory
6160 #undef elf_backend_size_info
6161 #undef elf_backend_modify_segment_map
6162 #undef elf_backend_modify_program_headers
6163 #undef elf_backend_final_write_processing
6164
6165 /* Intel L1OM support. */
6166
6167 static bfd_boolean
6168 elf64_l1om_elf_object_p (bfd *abfd)
6169 {
6170 /* Set the right machine number for an L1OM elf64 file. */
6171 bfd_default_set_arch_mach (abfd, bfd_arch_l1om, bfd_mach_l1om);
6172 return TRUE;
6173 }
6174
6175 #undef TARGET_LITTLE_SYM
6176 #define TARGET_LITTLE_SYM l1om_elf64_vec
6177 #undef TARGET_LITTLE_NAME
6178 #define TARGET_LITTLE_NAME "elf64-l1om"
6179 #undef ELF_ARCH
6180 #define ELF_ARCH bfd_arch_l1om
6181
6182 #undef ELF_MACHINE_CODE
6183 #define ELF_MACHINE_CODE EM_L1OM
6184
6185 #undef ELF_OSABI
6186
6187 #undef elf64_bed
6188 #define elf64_bed elf64_l1om_bed
6189
6190 #undef elf_backend_object_p
6191 #define elf_backend_object_p elf64_l1om_elf_object_p
6192
6193 /* Restore defaults. */
6194 #undef ELF_MAXPAGESIZE
6195 #undef ELF_MINPAGESIZE
6196 #undef ELF_COMMONPAGESIZE
6197 #define ELF_MAXPAGESIZE 0x200000
6198 #define ELF_MINPAGESIZE 0x1000
6199 #define ELF_COMMONPAGESIZE 0x1000
6200 #undef elf_backend_plt_alignment
6201 #define elf_backend_plt_alignment 4
6202 #undef elf_backend_arch_data
6203 #define elf_backend_arch_data &elf_x86_64_arch_bed
6204
6205 #include "elf64-target.h"
6206
6207 /* FreeBSD L1OM support. */
6208
6209 #undef TARGET_LITTLE_SYM
6210 #define TARGET_LITTLE_SYM l1om_elf64_fbsd_vec
6211 #undef TARGET_LITTLE_NAME
6212 #define TARGET_LITTLE_NAME "elf64-l1om-freebsd"
6213
6214 #undef ELF_OSABI
6215 #define ELF_OSABI ELFOSABI_FREEBSD
6216
6217 #undef elf64_bed
6218 #define elf64_bed elf64_l1om_fbsd_bed
6219
6220 #include "elf64-target.h"
6221
6222 /* Intel K1OM support. */
6223
6224 static bfd_boolean
6225 elf64_k1om_elf_object_p (bfd *abfd)
6226 {
6227 /* Set the right machine number for an K1OM elf64 file. */
6228 bfd_default_set_arch_mach (abfd, bfd_arch_k1om, bfd_mach_k1om);
6229 return TRUE;
6230 }
6231
6232 #undef TARGET_LITTLE_SYM
6233 #define TARGET_LITTLE_SYM k1om_elf64_vec
6234 #undef TARGET_LITTLE_NAME
6235 #define TARGET_LITTLE_NAME "elf64-k1om"
6236 #undef ELF_ARCH
6237 #define ELF_ARCH bfd_arch_k1om
6238
6239 #undef ELF_MACHINE_CODE
6240 #define ELF_MACHINE_CODE EM_K1OM
6241
6242 #undef ELF_OSABI
6243
6244 #undef elf64_bed
6245 #define elf64_bed elf64_k1om_bed
6246
6247 #undef elf_backend_object_p
6248 #define elf_backend_object_p elf64_k1om_elf_object_p
6249
6250 #undef elf_backend_static_tls_alignment
6251
6252 #undef elf_backend_want_plt_sym
6253 #define elf_backend_want_plt_sym 0
6254
6255 #include "elf64-target.h"
6256
6257 /* FreeBSD K1OM support. */
6258
6259 #undef TARGET_LITTLE_SYM
6260 #define TARGET_LITTLE_SYM k1om_elf64_fbsd_vec
6261 #undef TARGET_LITTLE_NAME
6262 #define TARGET_LITTLE_NAME "elf64-k1om-freebsd"
6263
6264 #undef ELF_OSABI
6265 #define ELF_OSABI ELFOSABI_FREEBSD
6266
6267 #undef elf64_bed
6268 #define elf64_bed elf64_k1om_fbsd_bed
6269
6270 #include "elf64-target.h"
6271
6272 /* 32bit x86-64 support. */
6273
6274 #undef TARGET_LITTLE_SYM
6275 #define TARGET_LITTLE_SYM x86_64_elf32_vec
6276 #undef TARGET_LITTLE_NAME
6277 #define TARGET_LITTLE_NAME "elf32-x86-64"
6278 #undef elf32_bed
6279
6280 #undef ELF_ARCH
6281 #define ELF_ARCH bfd_arch_i386
6282
6283 #undef ELF_MACHINE_CODE
6284 #define ELF_MACHINE_CODE EM_X86_64
6285
6286 #undef ELF_OSABI
6287
6288 #undef elf_backend_object_p
6289 #define elf_backend_object_p \
6290 elf32_x86_64_elf_object_p
6291
6292 #undef elf_backend_bfd_from_remote_memory
6293 #define elf_backend_bfd_from_remote_memory \
6294 _bfd_elf32_bfd_from_remote_memory
6295
6296 #undef elf_backend_size_info
6297 #define elf_backend_size_info \
6298 _bfd_elf32_size_info
6299
6300 #include "elf32-target.h"