[S/390] Add null ptr check + port GOTOFF handling from 32 bit over to 64 bit
[binutils-gdb.git] / bfd / elf64-x86-64.c
1 /* X86-64 specific support for ELF
2 Copyright (C) 2000-2015 Free Software Foundation, Inc.
3 Contributed by Jan Hubicka <jh@suse.cz>.
4
5 This file is part of BFD, the Binary File Descriptor library.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
20 MA 02110-1301, USA. */
21
22 #include "sysdep.h"
23 #include "bfd.h"
24 #include "bfdlink.h"
25 #include "libbfd.h"
26 #include "elf-bfd.h"
27 #include "elf-nacl.h"
28 #include "bfd_stdint.h"
29 #include "objalloc.h"
30 #include "hashtab.h"
31 #include "dwarf2.h"
32 #include "libiberty.h"
33
34 #include "opcode/i386.h"
35 #include "elf/x86-64.h"
36
37 #ifdef CORE_HEADER
38 #include <stdarg.h>
39 #include CORE_HEADER
40 #endif
41
42 /* In case we're on a 32-bit machine, construct a 64-bit "-1" value. */
43 #define MINUS_ONE (~ (bfd_vma) 0)
44
45 /* Since both 32-bit and 64-bit x86-64 encode relocation type in the
46 identical manner, we use ELF32_R_TYPE instead of ELF64_R_TYPE to get
47 relocation type. We also use ELF_ST_TYPE instead of ELF64_ST_TYPE
48 since they are the same. */
49
50 #define ABI_64_P(abfd) \
51 (get_elf_backend_data (abfd)->s->elfclass == ELFCLASS64)
52
53 /* The relocation "howto" table. Order of fields:
54 type, rightshift, size, bitsize, pc_relative, bitpos, complain_on_overflow,
55 special_function, name, partial_inplace, src_mask, dst_mask, pcrel_offset. */
56 static reloc_howto_type x86_64_elf_howto_table[] =
57 {
58 HOWTO(R_X86_64_NONE, 0, 3, 0, FALSE, 0, complain_overflow_dont,
59 bfd_elf_generic_reloc, "R_X86_64_NONE", FALSE, 0x00000000, 0x00000000,
60 FALSE),
61 HOWTO(R_X86_64_64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
62 bfd_elf_generic_reloc, "R_X86_64_64", FALSE, MINUS_ONE, MINUS_ONE,
63 FALSE),
64 HOWTO(R_X86_64_PC32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
65 bfd_elf_generic_reloc, "R_X86_64_PC32", FALSE, 0xffffffff, 0xffffffff,
66 TRUE),
67 HOWTO(R_X86_64_GOT32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
68 bfd_elf_generic_reloc, "R_X86_64_GOT32", FALSE, 0xffffffff, 0xffffffff,
69 FALSE),
70 HOWTO(R_X86_64_PLT32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
71 bfd_elf_generic_reloc, "R_X86_64_PLT32", FALSE, 0xffffffff, 0xffffffff,
72 TRUE),
73 HOWTO(R_X86_64_COPY, 0, 2, 32, FALSE, 0, complain_overflow_bitfield,
74 bfd_elf_generic_reloc, "R_X86_64_COPY", FALSE, 0xffffffff, 0xffffffff,
75 FALSE),
76 HOWTO(R_X86_64_GLOB_DAT, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
77 bfd_elf_generic_reloc, "R_X86_64_GLOB_DAT", FALSE, MINUS_ONE,
78 MINUS_ONE, FALSE),
79 HOWTO(R_X86_64_JUMP_SLOT, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
80 bfd_elf_generic_reloc, "R_X86_64_JUMP_SLOT", FALSE, MINUS_ONE,
81 MINUS_ONE, FALSE),
82 HOWTO(R_X86_64_RELATIVE, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
83 bfd_elf_generic_reloc, "R_X86_64_RELATIVE", FALSE, MINUS_ONE,
84 MINUS_ONE, FALSE),
85 HOWTO(R_X86_64_GOTPCREL, 0, 2, 32, TRUE, 0, complain_overflow_signed,
86 bfd_elf_generic_reloc, "R_X86_64_GOTPCREL", FALSE, 0xffffffff,
87 0xffffffff, TRUE),
88 HOWTO(R_X86_64_32, 0, 2, 32, FALSE, 0, complain_overflow_unsigned,
89 bfd_elf_generic_reloc, "R_X86_64_32", FALSE, 0xffffffff, 0xffffffff,
90 FALSE),
91 HOWTO(R_X86_64_32S, 0, 2, 32, FALSE, 0, complain_overflow_signed,
92 bfd_elf_generic_reloc, "R_X86_64_32S", FALSE, 0xffffffff, 0xffffffff,
93 FALSE),
94 HOWTO(R_X86_64_16, 0, 1, 16, FALSE, 0, complain_overflow_bitfield,
95 bfd_elf_generic_reloc, "R_X86_64_16", FALSE, 0xffff, 0xffff, FALSE),
96 HOWTO(R_X86_64_PC16,0, 1, 16, TRUE, 0, complain_overflow_bitfield,
97 bfd_elf_generic_reloc, "R_X86_64_PC16", FALSE, 0xffff, 0xffff, TRUE),
98 HOWTO(R_X86_64_8, 0, 0, 8, FALSE, 0, complain_overflow_bitfield,
99 bfd_elf_generic_reloc, "R_X86_64_8", FALSE, 0xff, 0xff, FALSE),
100 HOWTO(R_X86_64_PC8, 0, 0, 8, TRUE, 0, complain_overflow_signed,
101 bfd_elf_generic_reloc, "R_X86_64_PC8", FALSE, 0xff, 0xff, TRUE),
102 HOWTO(R_X86_64_DTPMOD64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
103 bfd_elf_generic_reloc, "R_X86_64_DTPMOD64", FALSE, MINUS_ONE,
104 MINUS_ONE, FALSE),
105 HOWTO(R_X86_64_DTPOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
106 bfd_elf_generic_reloc, "R_X86_64_DTPOFF64", FALSE, MINUS_ONE,
107 MINUS_ONE, FALSE),
108 HOWTO(R_X86_64_TPOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
109 bfd_elf_generic_reloc, "R_X86_64_TPOFF64", FALSE, MINUS_ONE,
110 MINUS_ONE, FALSE),
111 HOWTO(R_X86_64_TLSGD, 0, 2, 32, TRUE, 0, complain_overflow_signed,
112 bfd_elf_generic_reloc, "R_X86_64_TLSGD", FALSE, 0xffffffff,
113 0xffffffff, TRUE),
114 HOWTO(R_X86_64_TLSLD, 0, 2, 32, TRUE, 0, complain_overflow_signed,
115 bfd_elf_generic_reloc, "R_X86_64_TLSLD", FALSE, 0xffffffff,
116 0xffffffff, TRUE),
117 HOWTO(R_X86_64_DTPOFF32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
118 bfd_elf_generic_reloc, "R_X86_64_DTPOFF32", FALSE, 0xffffffff,
119 0xffffffff, FALSE),
120 HOWTO(R_X86_64_GOTTPOFF, 0, 2, 32, TRUE, 0, complain_overflow_signed,
121 bfd_elf_generic_reloc, "R_X86_64_GOTTPOFF", FALSE, 0xffffffff,
122 0xffffffff, TRUE),
123 HOWTO(R_X86_64_TPOFF32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
124 bfd_elf_generic_reloc, "R_X86_64_TPOFF32", FALSE, 0xffffffff,
125 0xffffffff, FALSE),
126 HOWTO(R_X86_64_PC64, 0, 4, 64, TRUE, 0, complain_overflow_bitfield,
127 bfd_elf_generic_reloc, "R_X86_64_PC64", FALSE, MINUS_ONE, MINUS_ONE,
128 TRUE),
129 HOWTO(R_X86_64_GOTOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
130 bfd_elf_generic_reloc, "R_X86_64_GOTOFF64",
131 FALSE, MINUS_ONE, MINUS_ONE, FALSE),
132 HOWTO(R_X86_64_GOTPC32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
133 bfd_elf_generic_reloc, "R_X86_64_GOTPC32",
134 FALSE, 0xffffffff, 0xffffffff, TRUE),
135 HOWTO(R_X86_64_GOT64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
136 bfd_elf_generic_reloc, "R_X86_64_GOT64", FALSE, MINUS_ONE, MINUS_ONE,
137 FALSE),
138 HOWTO(R_X86_64_GOTPCREL64, 0, 4, 64, TRUE, 0, complain_overflow_signed,
139 bfd_elf_generic_reloc, "R_X86_64_GOTPCREL64", FALSE, MINUS_ONE,
140 MINUS_ONE, TRUE),
141 HOWTO(R_X86_64_GOTPC64, 0, 4, 64, TRUE, 0, complain_overflow_signed,
142 bfd_elf_generic_reloc, "R_X86_64_GOTPC64",
143 FALSE, MINUS_ONE, MINUS_ONE, TRUE),
144 HOWTO(R_X86_64_GOTPLT64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
145 bfd_elf_generic_reloc, "R_X86_64_GOTPLT64", FALSE, MINUS_ONE,
146 MINUS_ONE, FALSE),
147 HOWTO(R_X86_64_PLTOFF64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
148 bfd_elf_generic_reloc, "R_X86_64_PLTOFF64", FALSE, MINUS_ONE,
149 MINUS_ONE, FALSE),
150 HOWTO(R_X86_64_SIZE32, 0, 2, 32, FALSE, 0, complain_overflow_unsigned,
151 bfd_elf_generic_reloc, "R_X86_64_SIZE32", FALSE, 0xffffffff, 0xffffffff,
152 FALSE),
153 HOWTO(R_X86_64_SIZE64, 0, 4, 64, FALSE, 0, complain_overflow_unsigned,
154 bfd_elf_generic_reloc, "R_X86_64_SIZE64", FALSE, MINUS_ONE, MINUS_ONE,
155 FALSE),
156 HOWTO(R_X86_64_GOTPC32_TLSDESC, 0, 2, 32, TRUE, 0,
157 complain_overflow_bitfield, bfd_elf_generic_reloc,
158 "R_X86_64_GOTPC32_TLSDESC",
159 FALSE, 0xffffffff, 0xffffffff, TRUE),
160 HOWTO(R_X86_64_TLSDESC_CALL, 0, 0, 0, FALSE, 0,
161 complain_overflow_dont, bfd_elf_generic_reloc,
162 "R_X86_64_TLSDESC_CALL",
163 FALSE, 0, 0, FALSE),
164 HOWTO(R_X86_64_TLSDESC, 0, 4, 64, FALSE, 0,
165 complain_overflow_bitfield, bfd_elf_generic_reloc,
166 "R_X86_64_TLSDESC",
167 FALSE, MINUS_ONE, MINUS_ONE, FALSE),
168 HOWTO(R_X86_64_IRELATIVE, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
169 bfd_elf_generic_reloc, "R_X86_64_IRELATIVE", FALSE, MINUS_ONE,
170 MINUS_ONE, FALSE),
171 HOWTO(R_X86_64_RELATIVE64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
172 bfd_elf_generic_reloc, "R_X86_64_RELATIVE64", FALSE, MINUS_ONE,
173 MINUS_ONE, FALSE),
174 HOWTO(R_X86_64_PC32_BND, 0, 2, 32, TRUE, 0, complain_overflow_signed,
175 bfd_elf_generic_reloc, "R_X86_64_PC32_BND", FALSE, 0xffffffff, 0xffffffff,
176 TRUE),
177 HOWTO(R_X86_64_PLT32_BND, 0, 2, 32, TRUE, 0, complain_overflow_signed,
178 bfd_elf_generic_reloc, "R_X86_64_PLT32_BND", FALSE, 0xffffffff, 0xffffffff,
179 TRUE),
180 HOWTO(R_X86_64_GOTPCRELX, 0, 2, 32, TRUE, 0, complain_overflow_signed,
181 bfd_elf_generic_reloc, "R_X86_64_GOTPCRELX", FALSE, 0xffffffff,
182 0xffffffff, TRUE),
183 HOWTO(R_X86_64_REX_GOTPCRELX, 0, 2, 32, TRUE, 0, complain_overflow_signed,
184 bfd_elf_generic_reloc, "R_X86_64_REX_GOTPCRELX", FALSE, 0xffffffff,
185 0xffffffff, TRUE),
186
187 /* We have a gap in the reloc numbers here.
188 R_X86_64_standard counts the number up to this point, and
189 R_X86_64_vt_offset is the value to subtract from a reloc type of
190 R_X86_64_GNU_VT* to form an index into this table. */
191 #define R_X86_64_standard (R_X86_64_REX_GOTPCRELX + 1)
192 #define R_X86_64_vt_offset (R_X86_64_GNU_VTINHERIT - R_X86_64_standard)
193
194 /* GNU extension to record C++ vtable hierarchy. */
195 HOWTO (R_X86_64_GNU_VTINHERIT, 0, 4, 0, FALSE, 0, complain_overflow_dont,
196 NULL, "R_X86_64_GNU_VTINHERIT", FALSE, 0, 0, FALSE),
197
198 /* GNU extension to record C++ vtable member usage. */
199 HOWTO (R_X86_64_GNU_VTENTRY, 0, 4, 0, FALSE, 0, complain_overflow_dont,
200 _bfd_elf_rel_vtable_reloc_fn, "R_X86_64_GNU_VTENTRY", FALSE, 0, 0,
201 FALSE),
202
203 /* Use complain_overflow_bitfield on R_X86_64_32 for x32. */
204 HOWTO(R_X86_64_32, 0, 2, 32, FALSE, 0, complain_overflow_bitfield,
205 bfd_elf_generic_reloc, "R_X86_64_32", FALSE, 0xffffffff, 0xffffffff,
206 FALSE)
207 };
208
209 #define IS_X86_64_PCREL_TYPE(TYPE) \
210 ( ((TYPE) == R_X86_64_PC8) \
211 || ((TYPE) == R_X86_64_PC16) \
212 || ((TYPE) == R_X86_64_PC32) \
213 || ((TYPE) == R_X86_64_PC32_BND) \
214 || ((TYPE) == R_X86_64_PC64))
215
216 /* Map BFD relocs to the x86_64 elf relocs. */
217 struct elf_reloc_map
218 {
219 bfd_reloc_code_real_type bfd_reloc_val;
220 unsigned char elf_reloc_val;
221 };
222
223 static const struct elf_reloc_map x86_64_reloc_map[] =
224 {
225 { BFD_RELOC_NONE, R_X86_64_NONE, },
226 { BFD_RELOC_64, R_X86_64_64, },
227 { BFD_RELOC_32_PCREL, R_X86_64_PC32, },
228 { BFD_RELOC_X86_64_GOT32, R_X86_64_GOT32,},
229 { BFD_RELOC_X86_64_PLT32, R_X86_64_PLT32,},
230 { BFD_RELOC_X86_64_COPY, R_X86_64_COPY, },
231 { BFD_RELOC_X86_64_GLOB_DAT, R_X86_64_GLOB_DAT, },
232 { BFD_RELOC_X86_64_JUMP_SLOT, R_X86_64_JUMP_SLOT, },
233 { BFD_RELOC_X86_64_RELATIVE, R_X86_64_RELATIVE, },
234 { BFD_RELOC_X86_64_GOTPCREL, R_X86_64_GOTPCREL, },
235 { BFD_RELOC_32, R_X86_64_32, },
236 { BFD_RELOC_X86_64_32S, R_X86_64_32S, },
237 { BFD_RELOC_16, R_X86_64_16, },
238 { BFD_RELOC_16_PCREL, R_X86_64_PC16, },
239 { BFD_RELOC_8, R_X86_64_8, },
240 { BFD_RELOC_8_PCREL, R_X86_64_PC8, },
241 { BFD_RELOC_X86_64_DTPMOD64, R_X86_64_DTPMOD64, },
242 { BFD_RELOC_X86_64_DTPOFF64, R_X86_64_DTPOFF64, },
243 { BFD_RELOC_X86_64_TPOFF64, R_X86_64_TPOFF64, },
244 { BFD_RELOC_X86_64_TLSGD, R_X86_64_TLSGD, },
245 { BFD_RELOC_X86_64_TLSLD, R_X86_64_TLSLD, },
246 { BFD_RELOC_X86_64_DTPOFF32, R_X86_64_DTPOFF32, },
247 { BFD_RELOC_X86_64_GOTTPOFF, R_X86_64_GOTTPOFF, },
248 { BFD_RELOC_X86_64_TPOFF32, R_X86_64_TPOFF32, },
249 { BFD_RELOC_64_PCREL, R_X86_64_PC64, },
250 { BFD_RELOC_X86_64_GOTOFF64, R_X86_64_GOTOFF64, },
251 { BFD_RELOC_X86_64_GOTPC32, R_X86_64_GOTPC32, },
252 { BFD_RELOC_X86_64_GOT64, R_X86_64_GOT64, },
253 { BFD_RELOC_X86_64_GOTPCREL64,R_X86_64_GOTPCREL64, },
254 { BFD_RELOC_X86_64_GOTPC64, R_X86_64_GOTPC64, },
255 { BFD_RELOC_X86_64_GOTPLT64, R_X86_64_GOTPLT64, },
256 { BFD_RELOC_X86_64_PLTOFF64, R_X86_64_PLTOFF64, },
257 { BFD_RELOC_SIZE32, R_X86_64_SIZE32, },
258 { BFD_RELOC_SIZE64, R_X86_64_SIZE64, },
259 { BFD_RELOC_X86_64_GOTPC32_TLSDESC, R_X86_64_GOTPC32_TLSDESC, },
260 { BFD_RELOC_X86_64_TLSDESC_CALL, R_X86_64_TLSDESC_CALL, },
261 { BFD_RELOC_X86_64_TLSDESC, R_X86_64_TLSDESC, },
262 { BFD_RELOC_X86_64_IRELATIVE, R_X86_64_IRELATIVE, },
263 { BFD_RELOC_X86_64_PC32_BND, R_X86_64_PC32_BND, },
264 { BFD_RELOC_X86_64_PLT32_BND, R_X86_64_PLT32_BND, },
265 { BFD_RELOC_X86_64_GOTPCRELX, R_X86_64_GOTPCRELX, },
266 { BFD_RELOC_X86_64_REX_GOTPCRELX, R_X86_64_REX_GOTPCRELX, },
267 { BFD_RELOC_VTABLE_INHERIT, R_X86_64_GNU_VTINHERIT, },
268 { BFD_RELOC_VTABLE_ENTRY, R_X86_64_GNU_VTENTRY, },
269 };
270
271 static reloc_howto_type *
272 elf_x86_64_rtype_to_howto (bfd *abfd, unsigned r_type)
273 {
274 unsigned i;
275
276 if (r_type == (unsigned int) R_X86_64_32)
277 {
278 if (ABI_64_P (abfd))
279 i = r_type;
280 else
281 i = ARRAY_SIZE (x86_64_elf_howto_table) - 1;
282 }
283 else if (r_type < (unsigned int) R_X86_64_GNU_VTINHERIT
284 || r_type >= (unsigned int) R_X86_64_max)
285 {
286 if (r_type >= (unsigned int) R_X86_64_standard)
287 {
288 (*_bfd_error_handler) (_("%B: invalid relocation type %d"),
289 abfd, (int) r_type);
290 r_type = R_X86_64_NONE;
291 }
292 i = r_type;
293 }
294 else
295 i = r_type - (unsigned int) R_X86_64_vt_offset;
296 BFD_ASSERT (x86_64_elf_howto_table[i].type == r_type);
297 return &x86_64_elf_howto_table[i];
298 }
299
300 /* Given a BFD reloc type, return a HOWTO structure. */
301 static reloc_howto_type *
302 elf_x86_64_reloc_type_lookup (bfd *abfd,
303 bfd_reloc_code_real_type code)
304 {
305 unsigned int i;
306
307 for (i = 0; i < sizeof (x86_64_reloc_map) / sizeof (struct elf_reloc_map);
308 i++)
309 {
310 if (x86_64_reloc_map[i].bfd_reloc_val == code)
311 return elf_x86_64_rtype_to_howto (abfd,
312 x86_64_reloc_map[i].elf_reloc_val);
313 }
314 return NULL;
315 }
316
317 static reloc_howto_type *
318 elf_x86_64_reloc_name_lookup (bfd *abfd,
319 const char *r_name)
320 {
321 unsigned int i;
322
323 if (!ABI_64_P (abfd) && strcasecmp (r_name, "R_X86_64_32") == 0)
324 {
325 /* Get x32 R_X86_64_32. */
326 reloc_howto_type *reloc
327 = &x86_64_elf_howto_table[ARRAY_SIZE (x86_64_elf_howto_table) - 1];
328 BFD_ASSERT (reloc->type == (unsigned int) R_X86_64_32);
329 return reloc;
330 }
331
332 for (i = 0; i < ARRAY_SIZE (x86_64_elf_howto_table); i++)
333 if (x86_64_elf_howto_table[i].name != NULL
334 && strcasecmp (x86_64_elf_howto_table[i].name, r_name) == 0)
335 return &x86_64_elf_howto_table[i];
336
337 return NULL;
338 }
339
340 /* Given an x86_64 ELF reloc type, fill in an arelent structure. */
341
342 static void
343 elf_x86_64_info_to_howto (bfd *abfd ATTRIBUTE_UNUSED, arelent *cache_ptr,
344 Elf_Internal_Rela *dst)
345 {
346 unsigned r_type;
347
348 r_type = ELF32_R_TYPE (dst->r_info);
349 cache_ptr->howto = elf_x86_64_rtype_to_howto (abfd, r_type);
350 BFD_ASSERT (r_type == cache_ptr->howto->type);
351 }
352 \f
353 /* Support for core dump NOTE sections. */
354 static bfd_boolean
355 elf_x86_64_grok_prstatus (bfd *abfd, Elf_Internal_Note *note)
356 {
357 int offset;
358 size_t size;
359
360 switch (note->descsz)
361 {
362 default:
363 return FALSE;
364
365 case 296: /* sizeof(istruct elf_prstatus) on Linux/x32 */
366 /* pr_cursig */
367 elf_tdata (abfd)->core->signal = bfd_get_16 (abfd, note->descdata + 12);
368
369 /* pr_pid */
370 elf_tdata (abfd)->core->lwpid = bfd_get_32 (abfd, note->descdata + 24);
371
372 /* pr_reg */
373 offset = 72;
374 size = 216;
375
376 break;
377
378 case 336: /* sizeof(istruct elf_prstatus) on Linux/x86_64 */
379 /* pr_cursig */
380 elf_tdata (abfd)->core->signal
381 = bfd_get_16 (abfd, note->descdata + 12);
382
383 /* pr_pid */
384 elf_tdata (abfd)->core->lwpid
385 = bfd_get_32 (abfd, note->descdata + 32);
386
387 /* pr_reg */
388 offset = 112;
389 size = 216;
390
391 break;
392 }
393
394 /* Make a ".reg/999" section. */
395 return _bfd_elfcore_make_pseudosection (abfd, ".reg",
396 size, note->descpos + offset);
397 }
398
399 static bfd_boolean
400 elf_x86_64_grok_psinfo (bfd *abfd, Elf_Internal_Note *note)
401 {
402 switch (note->descsz)
403 {
404 default:
405 return FALSE;
406
407 case 124: /* sizeof(struct elf_prpsinfo) on Linux/x32 */
408 elf_tdata (abfd)->core->pid
409 = bfd_get_32 (abfd, note->descdata + 12);
410 elf_tdata (abfd)->core->program
411 = _bfd_elfcore_strndup (abfd, note->descdata + 28, 16);
412 elf_tdata (abfd)->core->command
413 = _bfd_elfcore_strndup (abfd, note->descdata + 44, 80);
414 break;
415
416 case 136: /* sizeof(struct elf_prpsinfo) on Linux/x86_64 */
417 elf_tdata (abfd)->core->pid
418 = bfd_get_32 (abfd, note->descdata + 24);
419 elf_tdata (abfd)->core->program
420 = _bfd_elfcore_strndup (abfd, note->descdata + 40, 16);
421 elf_tdata (abfd)->core->command
422 = _bfd_elfcore_strndup (abfd, note->descdata + 56, 80);
423 }
424
425 /* Note that for some reason, a spurious space is tacked
426 onto the end of the args in some (at least one anyway)
427 implementations, so strip it off if it exists. */
428
429 {
430 char *command = elf_tdata (abfd)->core->command;
431 int n = strlen (command);
432
433 if (0 < n && command[n - 1] == ' ')
434 command[n - 1] = '\0';
435 }
436
437 return TRUE;
438 }
439
440 #ifdef CORE_HEADER
441 static char *
442 elf_x86_64_write_core_note (bfd *abfd, char *buf, int *bufsiz,
443 int note_type, ...)
444 {
445 const struct elf_backend_data *bed = get_elf_backend_data (abfd);
446 va_list ap;
447 const char *fname, *psargs;
448 long pid;
449 int cursig;
450 const void *gregs;
451
452 switch (note_type)
453 {
454 default:
455 return NULL;
456
457 case NT_PRPSINFO:
458 va_start (ap, note_type);
459 fname = va_arg (ap, const char *);
460 psargs = va_arg (ap, const char *);
461 va_end (ap);
462
463 if (bed->s->elfclass == ELFCLASS32)
464 {
465 prpsinfo32_t data;
466 memset (&data, 0, sizeof (data));
467 strncpy (data.pr_fname, fname, sizeof (data.pr_fname));
468 strncpy (data.pr_psargs, psargs, sizeof (data.pr_psargs));
469 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
470 &data, sizeof (data));
471 }
472 else
473 {
474 prpsinfo64_t data;
475 memset (&data, 0, sizeof (data));
476 strncpy (data.pr_fname, fname, sizeof (data.pr_fname));
477 strncpy (data.pr_psargs, psargs, sizeof (data.pr_psargs));
478 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
479 &data, sizeof (data));
480 }
481 /* NOTREACHED */
482
483 case NT_PRSTATUS:
484 va_start (ap, note_type);
485 pid = va_arg (ap, long);
486 cursig = va_arg (ap, int);
487 gregs = va_arg (ap, const void *);
488 va_end (ap);
489
490 if (bed->s->elfclass == ELFCLASS32)
491 {
492 if (bed->elf_machine_code == EM_X86_64)
493 {
494 prstatusx32_t prstat;
495 memset (&prstat, 0, sizeof (prstat));
496 prstat.pr_pid = pid;
497 prstat.pr_cursig = cursig;
498 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
499 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
500 &prstat, sizeof (prstat));
501 }
502 else
503 {
504 prstatus32_t prstat;
505 memset (&prstat, 0, sizeof (prstat));
506 prstat.pr_pid = pid;
507 prstat.pr_cursig = cursig;
508 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
509 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
510 &prstat, sizeof (prstat));
511 }
512 }
513 else
514 {
515 prstatus64_t prstat;
516 memset (&prstat, 0, sizeof (prstat));
517 prstat.pr_pid = pid;
518 prstat.pr_cursig = cursig;
519 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
520 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
521 &prstat, sizeof (prstat));
522 }
523 }
524 /* NOTREACHED */
525 }
526 #endif
527 \f
528 /* Functions for the x86-64 ELF linker. */
529
530 /* The name of the dynamic interpreter. This is put in the .interp
531 section. */
532
533 #define ELF64_DYNAMIC_INTERPRETER "/lib/ld64.so.1"
534 #define ELF32_DYNAMIC_INTERPRETER "/lib/ldx32.so.1"
535
536 /* If ELIMINATE_COPY_RELOCS is non-zero, the linker will try to avoid
537 copying dynamic variables from a shared lib into an app's dynbss
538 section, and instead use a dynamic relocation to point into the
539 shared lib. */
540 #define ELIMINATE_COPY_RELOCS 1
541
542 /* The size in bytes of an entry in the global offset table. */
543
544 #define GOT_ENTRY_SIZE 8
545
546 /* The size in bytes of an entry in the procedure linkage table. */
547
548 #define PLT_ENTRY_SIZE 16
549
550 /* The first entry in a procedure linkage table looks like this. See the
551 SVR4 ABI i386 supplement and the x86-64 ABI to see how this works. */
552
553 static const bfd_byte elf_x86_64_plt0_entry[PLT_ENTRY_SIZE] =
554 {
555 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
556 0xff, 0x25, 16, 0, 0, 0, /* jmpq *GOT+16(%rip) */
557 0x0f, 0x1f, 0x40, 0x00 /* nopl 0(%rax) */
558 };
559
560 /* Subsequent entries in a procedure linkage table look like this. */
561
562 static const bfd_byte elf_x86_64_plt_entry[PLT_ENTRY_SIZE] =
563 {
564 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
565 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
566 0x68, /* pushq immediate */
567 0, 0, 0, 0, /* replaced with index into relocation table. */
568 0xe9, /* jmp relative */
569 0, 0, 0, 0 /* replaced with offset to start of .plt0. */
570 };
571
572 /* The first entry in a procedure linkage table with BND relocations
573 like this. */
574
575 static const bfd_byte elf_x86_64_bnd_plt0_entry[PLT_ENTRY_SIZE] =
576 {
577 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
578 0xf2, 0xff, 0x25, 16, 0, 0, 0, /* bnd jmpq *GOT+16(%rip) */
579 0x0f, 0x1f, 0 /* nopl (%rax) */
580 };
581
582 /* Subsequent entries for legacy branches in a procedure linkage table
583 with BND relocations look like this. */
584
585 static const bfd_byte elf_x86_64_legacy_plt_entry[PLT_ENTRY_SIZE] =
586 {
587 0x68, 0, 0, 0, 0, /* pushq immediate */
588 0xe9, 0, 0, 0, 0, /* jmpq relative */
589 0x66, 0x0f, 0x1f, 0x44, 0, 0 /* nopw (%rax,%rax,1) */
590 };
591
592 /* Subsequent entries for branches with BND prefx in a procedure linkage
593 table with BND relocations look like this. */
594
595 static const bfd_byte elf_x86_64_bnd_plt_entry[PLT_ENTRY_SIZE] =
596 {
597 0x68, 0, 0, 0, 0, /* pushq immediate */
598 0xf2, 0xe9, 0, 0, 0, 0, /* bnd jmpq relative */
599 0x0f, 0x1f, 0x44, 0, 0 /* nopl 0(%rax,%rax,1) */
600 };
601
602 /* Entries for legacy branches in the second procedure linkage table
603 look like this. */
604
605 static const bfd_byte elf_x86_64_legacy_plt2_entry[8] =
606 {
607 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
608 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
609 0x66, 0x90 /* xchg %ax,%ax */
610 };
611
612 /* Entries for branches with BND prefix in the second procedure linkage
613 table look like this. */
614
615 static const bfd_byte elf_x86_64_bnd_plt2_entry[8] =
616 {
617 0xf2, 0xff, 0x25, /* bnd jmpq *name@GOTPC(%rip) */
618 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
619 0x90 /* nop */
620 };
621
622 /* .eh_frame covering the .plt section. */
623
624 static const bfd_byte elf_x86_64_eh_frame_plt[] =
625 {
626 #define PLT_CIE_LENGTH 20
627 #define PLT_FDE_LENGTH 36
628 #define PLT_FDE_START_OFFSET 4 + PLT_CIE_LENGTH + 8
629 #define PLT_FDE_LEN_OFFSET 4 + PLT_CIE_LENGTH + 12
630 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
631 0, 0, 0, 0, /* CIE ID */
632 1, /* CIE version */
633 'z', 'R', 0, /* Augmentation string */
634 1, /* Code alignment factor */
635 0x78, /* Data alignment factor */
636 16, /* Return address column */
637 1, /* Augmentation size */
638 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
639 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
640 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
641 DW_CFA_nop, DW_CFA_nop,
642
643 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
644 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
645 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
646 0, 0, 0, 0, /* .plt size goes here */
647 0, /* Augmentation size */
648 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
649 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
650 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
651 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
652 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
653 11, /* Block length */
654 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
655 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
656 DW_OP_lit15, DW_OP_and, DW_OP_lit11, DW_OP_ge,
657 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
658 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
659 };
660
661 /* Architecture-specific backend data for x86-64. */
662
663 struct elf_x86_64_backend_data
664 {
665 /* Templates for the initial PLT entry and for subsequent entries. */
666 const bfd_byte *plt0_entry;
667 const bfd_byte *plt_entry;
668 unsigned int plt_entry_size; /* Size of each PLT entry. */
669
670 /* Offsets into plt0_entry that are to be replaced with GOT[1] and GOT[2]. */
671 unsigned int plt0_got1_offset;
672 unsigned int plt0_got2_offset;
673
674 /* Offset of the end of the PC-relative instruction containing
675 plt0_got2_offset. */
676 unsigned int plt0_got2_insn_end;
677
678 /* Offsets into plt_entry that are to be replaced with... */
679 unsigned int plt_got_offset; /* ... address of this symbol in .got. */
680 unsigned int plt_reloc_offset; /* ... offset into relocation table. */
681 unsigned int plt_plt_offset; /* ... offset to start of .plt. */
682
683 /* Length of the PC-relative instruction containing plt_got_offset. */
684 unsigned int plt_got_insn_size;
685
686 /* Offset of the end of the PC-relative jump to plt0_entry. */
687 unsigned int plt_plt_insn_end;
688
689 /* Offset into plt_entry where the initial value of the GOT entry points. */
690 unsigned int plt_lazy_offset;
691
692 /* .eh_frame covering the .plt section. */
693 const bfd_byte *eh_frame_plt;
694 unsigned int eh_frame_plt_size;
695 };
696
697 #define get_elf_x86_64_arch_data(bed) \
698 ((const struct elf_x86_64_backend_data *) (bed)->arch_data)
699
700 #define get_elf_x86_64_backend_data(abfd) \
701 get_elf_x86_64_arch_data (get_elf_backend_data (abfd))
702
703 #define GET_PLT_ENTRY_SIZE(abfd) \
704 get_elf_x86_64_backend_data (abfd)->plt_entry_size
705
706 /* These are the standard parameters. */
707 static const struct elf_x86_64_backend_data elf_x86_64_arch_bed =
708 {
709 elf_x86_64_plt0_entry, /* plt0_entry */
710 elf_x86_64_plt_entry, /* plt_entry */
711 sizeof (elf_x86_64_plt_entry), /* plt_entry_size */
712 2, /* plt0_got1_offset */
713 8, /* plt0_got2_offset */
714 12, /* plt0_got2_insn_end */
715 2, /* plt_got_offset */
716 7, /* plt_reloc_offset */
717 12, /* plt_plt_offset */
718 6, /* plt_got_insn_size */
719 PLT_ENTRY_SIZE, /* plt_plt_insn_end */
720 6, /* plt_lazy_offset */
721 elf_x86_64_eh_frame_plt, /* eh_frame_plt */
722 sizeof (elf_x86_64_eh_frame_plt), /* eh_frame_plt_size */
723 };
724
725 static const struct elf_x86_64_backend_data elf_x86_64_bnd_arch_bed =
726 {
727 elf_x86_64_bnd_plt0_entry, /* plt0_entry */
728 elf_x86_64_bnd_plt_entry, /* plt_entry */
729 sizeof (elf_x86_64_bnd_plt_entry), /* plt_entry_size */
730 2, /* plt0_got1_offset */
731 1+8, /* plt0_got2_offset */
732 1+12, /* plt0_got2_insn_end */
733 1+2, /* plt_got_offset */
734 1, /* plt_reloc_offset */
735 7, /* plt_plt_offset */
736 1+6, /* plt_got_insn_size */
737 11, /* plt_plt_insn_end */
738 0, /* plt_lazy_offset */
739 elf_x86_64_eh_frame_plt, /* eh_frame_plt */
740 sizeof (elf_x86_64_eh_frame_plt), /* eh_frame_plt_size */
741 };
742
743 #define elf_backend_arch_data &elf_x86_64_arch_bed
744
745 /* x86-64 ELF linker hash entry. */
746
747 struct elf_x86_64_link_hash_entry
748 {
749 struct elf_link_hash_entry elf;
750
751 /* Track dynamic relocs copied for this symbol. */
752 struct elf_dyn_relocs *dyn_relocs;
753
754 #define GOT_UNKNOWN 0
755 #define GOT_NORMAL 1
756 #define GOT_TLS_GD 2
757 #define GOT_TLS_IE 3
758 #define GOT_TLS_GDESC 4
759 #define GOT_TLS_GD_BOTH_P(type) \
760 ((type) == (GOT_TLS_GD | GOT_TLS_GDESC))
761 #define GOT_TLS_GD_P(type) \
762 ((type) == GOT_TLS_GD || GOT_TLS_GD_BOTH_P (type))
763 #define GOT_TLS_GDESC_P(type) \
764 ((type) == GOT_TLS_GDESC || GOT_TLS_GD_BOTH_P (type))
765 #define GOT_TLS_GD_ANY_P(type) \
766 (GOT_TLS_GD_P (type) || GOT_TLS_GDESC_P (type))
767 unsigned char tls_type;
768
769 /* TRUE if a weak symbol with a real definition needs a copy reloc.
770 When there is a weak symbol with a real definition, the processor
771 independent code will have arranged for us to see the real
772 definition first. We need to copy the needs_copy bit from the
773 real definition and check it when allowing copy reloc in PIE. */
774 unsigned int needs_copy : 1;
775
776 /* TRUE if symbol has at least one BND relocation. */
777 unsigned int has_bnd_reloc : 1;
778
779 /* Reference count of C/C++ function pointer relocations in read-write
780 section which can be resolved at run-time. */
781 bfd_signed_vma func_pointer_refcount;
782
783 /* Information about the GOT PLT entry. Filled when there are both
784 GOT and PLT relocations against the same function. */
785 union gotplt_union plt_got;
786
787 /* Information about the second PLT entry. Filled when has_bnd_reloc is
788 set. */
789 union gotplt_union plt_bnd;
790
791 /* Offset of the GOTPLT entry reserved for the TLS descriptor,
792 starting at the end of the jump table. */
793 bfd_vma tlsdesc_got;
794 };
795
796 #define elf_x86_64_hash_entry(ent) \
797 ((struct elf_x86_64_link_hash_entry *)(ent))
798
799 struct elf_x86_64_obj_tdata
800 {
801 struct elf_obj_tdata root;
802
803 /* tls_type for each local got entry. */
804 char *local_got_tls_type;
805
806 /* GOTPLT entries for TLS descriptors. */
807 bfd_vma *local_tlsdesc_gotent;
808 };
809
810 #define elf_x86_64_tdata(abfd) \
811 ((struct elf_x86_64_obj_tdata *) (abfd)->tdata.any)
812
813 #define elf_x86_64_local_got_tls_type(abfd) \
814 (elf_x86_64_tdata (abfd)->local_got_tls_type)
815
816 #define elf_x86_64_local_tlsdesc_gotent(abfd) \
817 (elf_x86_64_tdata (abfd)->local_tlsdesc_gotent)
818
819 #define is_x86_64_elf(bfd) \
820 (bfd_get_flavour (bfd) == bfd_target_elf_flavour \
821 && elf_tdata (bfd) != NULL \
822 && elf_object_id (bfd) == X86_64_ELF_DATA)
823
824 static bfd_boolean
825 elf_x86_64_mkobject (bfd *abfd)
826 {
827 return bfd_elf_allocate_object (abfd, sizeof (struct elf_x86_64_obj_tdata),
828 X86_64_ELF_DATA);
829 }
830
831 /* x86-64 ELF linker hash table. */
832
833 struct elf_x86_64_link_hash_table
834 {
835 struct elf_link_hash_table elf;
836
837 /* Short-cuts to get to dynamic linker sections. */
838 asection *sdynbss;
839 asection *srelbss;
840 asection *plt_eh_frame;
841 asection *plt_bnd;
842 asection *plt_got;
843
844 union
845 {
846 bfd_signed_vma refcount;
847 bfd_vma offset;
848 } tls_ld_got;
849
850 /* The amount of space used by the jump slots in the GOT. */
851 bfd_vma sgotplt_jump_table_size;
852
853 /* Small local sym cache. */
854 struct sym_cache sym_cache;
855
856 bfd_vma (*r_info) (bfd_vma, bfd_vma);
857 bfd_vma (*r_sym) (bfd_vma);
858 unsigned int pointer_r_type;
859 const char *dynamic_interpreter;
860 int dynamic_interpreter_size;
861
862 /* _TLS_MODULE_BASE_ symbol. */
863 struct bfd_link_hash_entry *tls_module_base;
864
865 /* Used by local STT_GNU_IFUNC symbols. */
866 htab_t loc_hash_table;
867 void * loc_hash_memory;
868
869 /* The offset into splt of the PLT entry for the TLS descriptor
870 resolver. Special values are 0, if not necessary (or not found
871 to be necessary yet), and -1 if needed but not determined
872 yet. */
873 bfd_vma tlsdesc_plt;
874 /* The offset into sgot of the GOT entry used by the PLT entry
875 above. */
876 bfd_vma tlsdesc_got;
877
878 /* The index of the next R_X86_64_JUMP_SLOT entry in .rela.plt. */
879 bfd_vma next_jump_slot_index;
880 /* The index of the next R_X86_64_IRELATIVE entry in .rela.plt. */
881 bfd_vma next_irelative_index;
882 };
883
884 /* Get the x86-64 ELF linker hash table from a link_info structure. */
885
886 #define elf_x86_64_hash_table(p) \
887 (elf_hash_table_id ((struct elf_link_hash_table *) ((p)->hash)) \
888 == X86_64_ELF_DATA ? ((struct elf_x86_64_link_hash_table *) ((p)->hash)) : NULL)
889
890 #define elf_x86_64_compute_jump_table_size(htab) \
891 ((htab)->elf.srelplt->reloc_count * GOT_ENTRY_SIZE)
892
893 /* Create an entry in an x86-64 ELF linker hash table. */
894
895 static struct bfd_hash_entry *
896 elf_x86_64_link_hash_newfunc (struct bfd_hash_entry *entry,
897 struct bfd_hash_table *table,
898 const char *string)
899 {
900 /* Allocate the structure if it has not already been allocated by a
901 subclass. */
902 if (entry == NULL)
903 {
904 entry = (struct bfd_hash_entry *)
905 bfd_hash_allocate (table,
906 sizeof (struct elf_x86_64_link_hash_entry));
907 if (entry == NULL)
908 return entry;
909 }
910
911 /* Call the allocation method of the superclass. */
912 entry = _bfd_elf_link_hash_newfunc (entry, table, string);
913 if (entry != NULL)
914 {
915 struct elf_x86_64_link_hash_entry *eh;
916
917 eh = (struct elf_x86_64_link_hash_entry *) entry;
918 eh->dyn_relocs = NULL;
919 eh->tls_type = GOT_UNKNOWN;
920 eh->needs_copy = 0;
921 eh->has_bnd_reloc = 0;
922 eh->func_pointer_refcount = 0;
923 eh->plt_bnd.offset = (bfd_vma) -1;
924 eh->plt_got.offset = (bfd_vma) -1;
925 eh->tlsdesc_got = (bfd_vma) -1;
926 }
927
928 return entry;
929 }
930
931 /* Compute a hash of a local hash entry. We use elf_link_hash_entry
932 for local symbol so that we can handle local STT_GNU_IFUNC symbols
933 as global symbol. We reuse indx and dynstr_index for local symbol
934 hash since they aren't used by global symbols in this backend. */
935
936 static hashval_t
937 elf_x86_64_local_htab_hash (const void *ptr)
938 {
939 struct elf_link_hash_entry *h
940 = (struct elf_link_hash_entry *) ptr;
941 return ELF_LOCAL_SYMBOL_HASH (h->indx, h->dynstr_index);
942 }
943
944 /* Compare local hash entries. */
945
946 static int
947 elf_x86_64_local_htab_eq (const void *ptr1, const void *ptr2)
948 {
949 struct elf_link_hash_entry *h1
950 = (struct elf_link_hash_entry *) ptr1;
951 struct elf_link_hash_entry *h2
952 = (struct elf_link_hash_entry *) ptr2;
953
954 return h1->indx == h2->indx && h1->dynstr_index == h2->dynstr_index;
955 }
956
957 /* Find and/or create a hash entry for local symbol. */
958
959 static struct elf_link_hash_entry *
960 elf_x86_64_get_local_sym_hash (struct elf_x86_64_link_hash_table *htab,
961 bfd *abfd, const Elf_Internal_Rela *rel,
962 bfd_boolean create)
963 {
964 struct elf_x86_64_link_hash_entry e, *ret;
965 asection *sec = abfd->sections;
966 hashval_t h = ELF_LOCAL_SYMBOL_HASH (sec->id,
967 htab->r_sym (rel->r_info));
968 void **slot;
969
970 e.elf.indx = sec->id;
971 e.elf.dynstr_index = htab->r_sym (rel->r_info);
972 slot = htab_find_slot_with_hash (htab->loc_hash_table, &e, h,
973 create ? INSERT : NO_INSERT);
974
975 if (!slot)
976 return NULL;
977
978 if (*slot)
979 {
980 ret = (struct elf_x86_64_link_hash_entry *) *slot;
981 return &ret->elf;
982 }
983
984 ret = (struct elf_x86_64_link_hash_entry *)
985 objalloc_alloc ((struct objalloc *) htab->loc_hash_memory,
986 sizeof (struct elf_x86_64_link_hash_entry));
987 if (ret)
988 {
989 memset (ret, 0, sizeof (*ret));
990 ret->elf.indx = sec->id;
991 ret->elf.dynstr_index = htab->r_sym (rel->r_info);
992 ret->elf.dynindx = -1;
993 ret->func_pointer_refcount = 0;
994 ret->plt_got.offset = (bfd_vma) -1;
995 *slot = ret;
996 }
997 return &ret->elf;
998 }
999
1000 /* Destroy an X86-64 ELF linker hash table. */
1001
1002 static void
1003 elf_x86_64_link_hash_table_free (bfd *obfd)
1004 {
1005 struct elf_x86_64_link_hash_table *htab
1006 = (struct elf_x86_64_link_hash_table *) obfd->link.hash;
1007
1008 if (htab->loc_hash_table)
1009 htab_delete (htab->loc_hash_table);
1010 if (htab->loc_hash_memory)
1011 objalloc_free ((struct objalloc *) htab->loc_hash_memory);
1012 _bfd_elf_link_hash_table_free (obfd);
1013 }
1014
1015 /* Create an X86-64 ELF linker hash table. */
1016
1017 static struct bfd_link_hash_table *
1018 elf_x86_64_link_hash_table_create (bfd *abfd)
1019 {
1020 struct elf_x86_64_link_hash_table *ret;
1021 bfd_size_type amt = sizeof (struct elf_x86_64_link_hash_table);
1022
1023 ret = (struct elf_x86_64_link_hash_table *) bfd_zmalloc (amt);
1024 if (ret == NULL)
1025 return NULL;
1026
1027 if (!_bfd_elf_link_hash_table_init (&ret->elf, abfd,
1028 elf_x86_64_link_hash_newfunc,
1029 sizeof (struct elf_x86_64_link_hash_entry),
1030 X86_64_ELF_DATA))
1031 {
1032 free (ret);
1033 return NULL;
1034 }
1035
1036 if (ABI_64_P (abfd))
1037 {
1038 ret->r_info = elf64_r_info;
1039 ret->r_sym = elf64_r_sym;
1040 ret->pointer_r_type = R_X86_64_64;
1041 ret->dynamic_interpreter = ELF64_DYNAMIC_INTERPRETER;
1042 ret->dynamic_interpreter_size = sizeof ELF64_DYNAMIC_INTERPRETER;
1043 }
1044 else
1045 {
1046 ret->r_info = elf32_r_info;
1047 ret->r_sym = elf32_r_sym;
1048 ret->pointer_r_type = R_X86_64_32;
1049 ret->dynamic_interpreter = ELF32_DYNAMIC_INTERPRETER;
1050 ret->dynamic_interpreter_size = sizeof ELF32_DYNAMIC_INTERPRETER;
1051 }
1052
1053 ret->loc_hash_table = htab_try_create (1024,
1054 elf_x86_64_local_htab_hash,
1055 elf_x86_64_local_htab_eq,
1056 NULL);
1057 ret->loc_hash_memory = objalloc_create ();
1058 if (!ret->loc_hash_table || !ret->loc_hash_memory)
1059 {
1060 elf_x86_64_link_hash_table_free (abfd);
1061 return NULL;
1062 }
1063 ret->elf.root.hash_table_free = elf_x86_64_link_hash_table_free;
1064
1065 return &ret->elf.root;
1066 }
1067
1068 /* Create .plt, .rela.plt, .got, .got.plt, .rela.got, .dynbss, and
1069 .rela.bss sections in DYNOBJ, and set up shortcuts to them in our
1070 hash table. */
1071
1072 static bfd_boolean
1073 elf_x86_64_create_dynamic_sections (bfd *dynobj,
1074 struct bfd_link_info *info)
1075 {
1076 struct elf_x86_64_link_hash_table *htab;
1077
1078 if (!_bfd_elf_create_dynamic_sections (dynobj, info))
1079 return FALSE;
1080
1081 htab = elf_x86_64_hash_table (info);
1082 if (htab == NULL)
1083 return FALSE;
1084
1085 htab->sdynbss = bfd_get_linker_section (dynobj, ".dynbss");
1086 if (!htab->sdynbss)
1087 abort ();
1088
1089 if (bfd_link_executable (info))
1090 {
1091 /* Always allow copy relocs for building executables. */
1092 asection *s = bfd_get_linker_section (dynobj, ".rela.bss");
1093 if (s == NULL)
1094 {
1095 const struct elf_backend_data *bed = get_elf_backend_data (dynobj);
1096 s = bfd_make_section_anyway_with_flags (dynobj,
1097 ".rela.bss",
1098 (bed->dynamic_sec_flags
1099 | SEC_READONLY));
1100 if (s == NULL
1101 || ! bfd_set_section_alignment (dynobj, s,
1102 bed->s->log_file_align))
1103 return FALSE;
1104 }
1105 htab->srelbss = s;
1106 }
1107
1108 if (!info->no_ld_generated_unwind_info
1109 && htab->plt_eh_frame == NULL
1110 && htab->elf.splt != NULL)
1111 {
1112 flagword flags = (SEC_ALLOC | SEC_LOAD | SEC_READONLY
1113 | SEC_HAS_CONTENTS | SEC_IN_MEMORY
1114 | SEC_LINKER_CREATED);
1115 htab->plt_eh_frame
1116 = bfd_make_section_anyway_with_flags (dynobj, ".eh_frame", flags);
1117 if (htab->plt_eh_frame == NULL
1118 || !bfd_set_section_alignment (dynobj, htab->plt_eh_frame, 3))
1119 return FALSE;
1120 }
1121 return TRUE;
1122 }
1123
1124 /* Copy the extra info we tack onto an elf_link_hash_entry. */
1125
1126 static void
1127 elf_x86_64_copy_indirect_symbol (struct bfd_link_info *info,
1128 struct elf_link_hash_entry *dir,
1129 struct elf_link_hash_entry *ind)
1130 {
1131 struct elf_x86_64_link_hash_entry *edir, *eind;
1132
1133 edir = (struct elf_x86_64_link_hash_entry *) dir;
1134 eind = (struct elf_x86_64_link_hash_entry *) ind;
1135
1136 if (!edir->has_bnd_reloc)
1137 edir->has_bnd_reloc = eind->has_bnd_reloc;
1138
1139 if (eind->dyn_relocs != NULL)
1140 {
1141 if (edir->dyn_relocs != NULL)
1142 {
1143 struct elf_dyn_relocs **pp;
1144 struct elf_dyn_relocs *p;
1145
1146 /* Add reloc counts against the indirect sym to the direct sym
1147 list. Merge any entries against the same section. */
1148 for (pp = &eind->dyn_relocs; (p = *pp) != NULL; )
1149 {
1150 struct elf_dyn_relocs *q;
1151
1152 for (q = edir->dyn_relocs; q != NULL; q = q->next)
1153 if (q->sec == p->sec)
1154 {
1155 q->pc_count += p->pc_count;
1156 q->count += p->count;
1157 *pp = p->next;
1158 break;
1159 }
1160 if (q == NULL)
1161 pp = &p->next;
1162 }
1163 *pp = edir->dyn_relocs;
1164 }
1165
1166 edir->dyn_relocs = eind->dyn_relocs;
1167 eind->dyn_relocs = NULL;
1168 }
1169
1170 if (ind->root.type == bfd_link_hash_indirect
1171 && dir->got.refcount <= 0)
1172 {
1173 edir->tls_type = eind->tls_type;
1174 eind->tls_type = GOT_UNKNOWN;
1175 }
1176
1177 if (ELIMINATE_COPY_RELOCS
1178 && ind->root.type != bfd_link_hash_indirect
1179 && dir->dynamic_adjusted)
1180 {
1181 /* If called to transfer flags for a weakdef during processing
1182 of elf_adjust_dynamic_symbol, don't copy non_got_ref.
1183 We clear it ourselves for ELIMINATE_COPY_RELOCS. */
1184 dir->ref_dynamic |= ind->ref_dynamic;
1185 dir->ref_regular |= ind->ref_regular;
1186 dir->ref_regular_nonweak |= ind->ref_regular_nonweak;
1187 dir->needs_plt |= ind->needs_plt;
1188 dir->pointer_equality_needed |= ind->pointer_equality_needed;
1189 }
1190 else
1191 {
1192 if (eind->func_pointer_refcount > 0)
1193 {
1194 edir->func_pointer_refcount += eind->func_pointer_refcount;
1195 eind->func_pointer_refcount = 0;
1196 }
1197
1198 _bfd_elf_link_hash_copy_indirect (info, dir, ind);
1199 }
1200 }
1201
1202 static bfd_boolean
1203 elf64_x86_64_elf_object_p (bfd *abfd)
1204 {
1205 /* Set the right machine number for an x86-64 elf64 file. */
1206 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x86_64);
1207 return TRUE;
1208 }
1209
1210 static bfd_boolean
1211 elf32_x86_64_elf_object_p (bfd *abfd)
1212 {
1213 /* Set the right machine number for an x86-64 elf32 file. */
1214 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x64_32);
1215 return TRUE;
1216 }
1217
1218 /* Return TRUE if the TLS access code sequence support transition
1219 from R_TYPE. */
1220
1221 static bfd_boolean
1222 elf_x86_64_check_tls_transition (bfd *abfd,
1223 struct bfd_link_info *info,
1224 asection *sec,
1225 bfd_byte *contents,
1226 Elf_Internal_Shdr *symtab_hdr,
1227 struct elf_link_hash_entry **sym_hashes,
1228 unsigned int r_type,
1229 const Elf_Internal_Rela *rel,
1230 const Elf_Internal_Rela *relend)
1231 {
1232 unsigned int val;
1233 unsigned long r_symndx;
1234 bfd_boolean largepic = FALSE;
1235 struct elf_link_hash_entry *h;
1236 bfd_vma offset;
1237 struct elf_x86_64_link_hash_table *htab;
1238
1239 /* Get the section contents. */
1240 if (contents == NULL)
1241 {
1242 if (elf_section_data (sec)->this_hdr.contents != NULL)
1243 contents = elf_section_data (sec)->this_hdr.contents;
1244 else
1245 {
1246 /* FIXME: How to better handle error condition? */
1247 if (!bfd_malloc_and_get_section (abfd, sec, &contents))
1248 return FALSE;
1249
1250 /* Cache the section contents for elf_link_input_bfd. */
1251 elf_section_data (sec)->this_hdr.contents = contents;
1252 }
1253 }
1254
1255 htab = elf_x86_64_hash_table (info);
1256 offset = rel->r_offset;
1257 switch (r_type)
1258 {
1259 case R_X86_64_TLSGD:
1260 case R_X86_64_TLSLD:
1261 if ((rel + 1) >= relend)
1262 return FALSE;
1263
1264 if (r_type == R_X86_64_TLSGD)
1265 {
1266 /* Check transition from GD access model. For 64bit, only
1267 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
1268 .word 0x6666; rex64; call __tls_get_addr
1269 can transit to different access model. For 32bit, only
1270 leaq foo@tlsgd(%rip), %rdi
1271 .word 0x6666; rex64; call __tls_get_addr
1272 can transit to different access model. For largepic
1273 we also support:
1274 leaq foo@tlsgd(%rip), %rdi
1275 movabsq $__tls_get_addr@pltoff, %rax
1276 addq $rbx, %rax
1277 call *%rax. */
1278
1279 static const unsigned char call[] = { 0x66, 0x66, 0x48, 0xe8 };
1280 static const unsigned char leaq[] = { 0x66, 0x48, 0x8d, 0x3d };
1281
1282 if ((offset + 12) > sec->size)
1283 return FALSE;
1284
1285 if (memcmp (contents + offset + 4, call, 4) != 0)
1286 {
1287 if (!ABI_64_P (abfd)
1288 || (offset + 19) > sec->size
1289 || offset < 3
1290 || memcmp (contents + offset - 3, leaq + 1, 3) != 0
1291 || memcmp (contents + offset + 4, "\x48\xb8", 2) != 0
1292 || memcmp (contents + offset + 14, "\x48\x01\xd8\xff\xd0", 5)
1293 != 0)
1294 return FALSE;
1295 largepic = TRUE;
1296 }
1297 else if (ABI_64_P (abfd))
1298 {
1299 if (offset < 4
1300 || memcmp (contents + offset - 4, leaq, 4) != 0)
1301 return FALSE;
1302 }
1303 else
1304 {
1305 if (offset < 3
1306 || memcmp (contents + offset - 3, leaq + 1, 3) != 0)
1307 return FALSE;
1308 }
1309 }
1310 else
1311 {
1312 /* Check transition from LD access model. Only
1313 leaq foo@tlsld(%rip), %rdi;
1314 call __tls_get_addr
1315 can transit to different access model. For largepic
1316 we also support:
1317 leaq foo@tlsld(%rip), %rdi
1318 movabsq $__tls_get_addr@pltoff, %rax
1319 addq $rbx, %rax
1320 call *%rax. */
1321
1322 static const unsigned char lea[] = { 0x48, 0x8d, 0x3d };
1323
1324 if (offset < 3 || (offset + 9) > sec->size)
1325 return FALSE;
1326
1327 if (memcmp (contents + offset - 3, lea, 3) != 0)
1328 return FALSE;
1329
1330 if (0xe8 != *(contents + offset + 4))
1331 {
1332 if (!ABI_64_P (abfd)
1333 || (offset + 19) > sec->size
1334 || memcmp (contents + offset + 4, "\x48\xb8", 2) != 0
1335 || memcmp (contents + offset + 14, "\x48\x01\xd8\xff\xd0", 5)
1336 != 0)
1337 return FALSE;
1338 largepic = TRUE;
1339 }
1340 }
1341
1342 r_symndx = htab->r_sym (rel[1].r_info);
1343 if (r_symndx < symtab_hdr->sh_info)
1344 return FALSE;
1345
1346 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
1347 /* Use strncmp to check __tls_get_addr since __tls_get_addr
1348 may be versioned. */
1349 return (h != NULL
1350 && h->root.root.string != NULL
1351 && (largepic
1352 ? ELF32_R_TYPE (rel[1].r_info) == R_X86_64_PLTOFF64
1353 : (ELF32_R_TYPE (rel[1].r_info) == R_X86_64_PC32
1354 || ELF32_R_TYPE (rel[1].r_info) == R_X86_64_PLT32))
1355 && (strncmp (h->root.root.string,
1356 "__tls_get_addr", 14) == 0));
1357
1358 case R_X86_64_GOTTPOFF:
1359 /* Check transition from IE access model:
1360 mov foo@gottpoff(%rip), %reg
1361 add foo@gottpoff(%rip), %reg
1362 */
1363
1364 /* Check REX prefix first. */
1365 if (offset >= 3 && (offset + 4) <= sec->size)
1366 {
1367 val = bfd_get_8 (abfd, contents + offset - 3);
1368 if (val != 0x48 && val != 0x4c)
1369 {
1370 /* X32 may have 0x44 REX prefix or no REX prefix. */
1371 if (ABI_64_P (abfd))
1372 return FALSE;
1373 }
1374 }
1375 else
1376 {
1377 /* X32 may not have any REX prefix. */
1378 if (ABI_64_P (abfd))
1379 return FALSE;
1380 if (offset < 2 || (offset + 3) > sec->size)
1381 return FALSE;
1382 }
1383
1384 val = bfd_get_8 (abfd, contents + offset - 2);
1385 if (val != 0x8b && val != 0x03)
1386 return FALSE;
1387
1388 val = bfd_get_8 (abfd, contents + offset - 1);
1389 return (val & 0xc7) == 5;
1390
1391 case R_X86_64_GOTPC32_TLSDESC:
1392 /* Check transition from GDesc access model:
1393 leaq x@tlsdesc(%rip), %rax
1394
1395 Make sure it's a leaq adding rip to a 32-bit offset
1396 into any register, although it's probably almost always
1397 going to be rax. */
1398
1399 if (offset < 3 || (offset + 4) > sec->size)
1400 return FALSE;
1401
1402 val = bfd_get_8 (abfd, contents + offset - 3);
1403 if ((val & 0xfb) != 0x48)
1404 return FALSE;
1405
1406 if (bfd_get_8 (abfd, contents + offset - 2) != 0x8d)
1407 return FALSE;
1408
1409 val = bfd_get_8 (abfd, contents + offset - 1);
1410 return (val & 0xc7) == 0x05;
1411
1412 case R_X86_64_TLSDESC_CALL:
1413 /* Check transition from GDesc access model:
1414 call *x@tlsdesc(%rax)
1415 */
1416 if (offset + 2 <= sec->size)
1417 {
1418 /* Make sure that it's a call *x@tlsdesc(%rax). */
1419 static const unsigned char call[] = { 0xff, 0x10 };
1420 return memcmp (contents + offset, call, 2) == 0;
1421 }
1422
1423 return FALSE;
1424
1425 default:
1426 abort ();
1427 }
1428 }
1429
1430 /* Return TRUE if the TLS access transition is OK or no transition
1431 will be performed. Update R_TYPE if there is a transition. */
1432
1433 static bfd_boolean
1434 elf_x86_64_tls_transition (struct bfd_link_info *info, bfd *abfd,
1435 asection *sec, bfd_byte *contents,
1436 Elf_Internal_Shdr *symtab_hdr,
1437 struct elf_link_hash_entry **sym_hashes,
1438 unsigned int *r_type, int tls_type,
1439 const Elf_Internal_Rela *rel,
1440 const Elf_Internal_Rela *relend,
1441 struct elf_link_hash_entry *h,
1442 unsigned long r_symndx)
1443 {
1444 unsigned int from_type = *r_type;
1445 unsigned int to_type = from_type;
1446 bfd_boolean check = TRUE;
1447
1448 /* Skip TLS transition for functions. */
1449 if (h != NULL
1450 && (h->type == STT_FUNC
1451 || h->type == STT_GNU_IFUNC))
1452 return TRUE;
1453
1454 switch (from_type)
1455 {
1456 case R_X86_64_TLSGD:
1457 case R_X86_64_GOTPC32_TLSDESC:
1458 case R_X86_64_TLSDESC_CALL:
1459 case R_X86_64_GOTTPOFF:
1460 if (bfd_link_executable (info))
1461 {
1462 if (h == NULL)
1463 to_type = R_X86_64_TPOFF32;
1464 else
1465 to_type = R_X86_64_GOTTPOFF;
1466 }
1467
1468 /* When we are called from elf_x86_64_relocate_section,
1469 CONTENTS isn't NULL and there may be additional transitions
1470 based on TLS_TYPE. */
1471 if (contents != NULL)
1472 {
1473 unsigned int new_to_type = to_type;
1474
1475 if (bfd_link_executable (info)
1476 && h != NULL
1477 && h->dynindx == -1
1478 && tls_type == GOT_TLS_IE)
1479 new_to_type = R_X86_64_TPOFF32;
1480
1481 if (to_type == R_X86_64_TLSGD
1482 || to_type == R_X86_64_GOTPC32_TLSDESC
1483 || to_type == R_X86_64_TLSDESC_CALL)
1484 {
1485 if (tls_type == GOT_TLS_IE)
1486 new_to_type = R_X86_64_GOTTPOFF;
1487 }
1488
1489 /* We checked the transition before when we were called from
1490 elf_x86_64_check_relocs. We only want to check the new
1491 transition which hasn't been checked before. */
1492 check = new_to_type != to_type && from_type == to_type;
1493 to_type = new_to_type;
1494 }
1495
1496 break;
1497
1498 case R_X86_64_TLSLD:
1499 if (bfd_link_executable (info))
1500 to_type = R_X86_64_TPOFF32;
1501 break;
1502
1503 default:
1504 return TRUE;
1505 }
1506
1507 /* Return TRUE if there is no transition. */
1508 if (from_type == to_type)
1509 return TRUE;
1510
1511 /* Check if the transition can be performed. */
1512 if (check
1513 && ! elf_x86_64_check_tls_transition (abfd, info, sec, contents,
1514 symtab_hdr, sym_hashes,
1515 from_type, rel, relend))
1516 {
1517 reloc_howto_type *from, *to;
1518 const char *name;
1519
1520 from = elf_x86_64_rtype_to_howto (abfd, from_type);
1521 to = elf_x86_64_rtype_to_howto (abfd, to_type);
1522
1523 if (h)
1524 name = h->root.root.string;
1525 else
1526 {
1527 struct elf_x86_64_link_hash_table *htab;
1528
1529 htab = elf_x86_64_hash_table (info);
1530 if (htab == NULL)
1531 name = "*unknown*";
1532 else
1533 {
1534 Elf_Internal_Sym *isym;
1535
1536 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
1537 abfd, r_symndx);
1538 name = bfd_elf_sym_name (abfd, symtab_hdr, isym, NULL);
1539 }
1540 }
1541
1542 (*_bfd_error_handler)
1543 (_("%B: TLS transition from %s to %s against `%s' at 0x%lx "
1544 "in section `%A' failed"),
1545 abfd, sec, from->name, to->name, name,
1546 (unsigned long) rel->r_offset);
1547 bfd_set_error (bfd_error_bad_value);
1548 return FALSE;
1549 }
1550
1551 *r_type = to_type;
1552 return TRUE;
1553 }
1554
1555 /* Rename some of the generic section flags to better document how they
1556 are used here. */
1557 #define need_convert_load sec_flg0
1558
1559 /* Look through the relocs for a section during the first phase, and
1560 calculate needed space in the global offset table, procedure
1561 linkage table, and dynamic reloc sections. */
1562
1563 static bfd_boolean
1564 elf_x86_64_check_relocs (bfd *abfd, struct bfd_link_info *info,
1565 asection *sec,
1566 const Elf_Internal_Rela *relocs)
1567 {
1568 struct elf_x86_64_link_hash_table *htab;
1569 Elf_Internal_Shdr *symtab_hdr;
1570 struct elf_link_hash_entry **sym_hashes;
1571 const Elf_Internal_Rela *rel;
1572 const Elf_Internal_Rela *rel_end;
1573 asection *sreloc;
1574 bfd_boolean use_plt_got;
1575
1576 if (bfd_link_relocatable (info))
1577 return TRUE;
1578
1579 BFD_ASSERT (is_x86_64_elf (abfd));
1580
1581 htab = elf_x86_64_hash_table (info);
1582 if (htab == NULL)
1583 return FALSE;
1584
1585 use_plt_got = get_elf_x86_64_backend_data (abfd) == &elf_x86_64_arch_bed;
1586
1587 symtab_hdr = &elf_symtab_hdr (abfd);
1588 sym_hashes = elf_sym_hashes (abfd);
1589
1590 sreloc = NULL;
1591
1592 rel_end = relocs + sec->reloc_count;
1593 for (rel = relocs; rel < rel_end; rel++)
1594 {
1595 unsigned int r_type;
1596 unsigned long r_symndx;
1597 struct elf_link_hash_entry *h;
1598 Elf_Internal_Sym *isym;
1599 const char *name;
1600 bfd_boolean size_reloc;
1601
1602 r_symndx = htab->r_sym (rel->r_info);
1603 r_type = ELF32_R_TYPE (rel->r_info);
1604
1605 if (r_symndx >= NUM_SHDR_ENTRIES (symtab_hdr))
1606 {
1607 (*_bfd_error_handler) (_("%B: bad symbol index: %d"),
1608 abfd, r_symndx);
1609 return FALSE;
1610 }
1611
1612 if (r_symndx < symtab_hdr->sh_info)
1613 {
1614 /* A local symbol. */
1615 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
1616 abfd, r_symndx);
1617 if (isym == NULL)
1618 return FALSE;
1619
1620 /* Check relocation against local STT_GNU_IFUNC symbol. */
1621 if (ELF_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
1622 {
1623 h = elf_x86_64_get_local_sym_hash (htab, abfd, rel,
1624 TRUE);
1625 if (h == NULL)
1626 return FALSE;
1627
1628 /* Fake a STT_GNU_IFUNC symbol. */
1629 h->type = STT_GNU_IFUNC;
1630 h->def_regular = 1;
1631 h->ref_regular = 1;
1632 h->forced_local = 1;
1633 h->root.type = bfd_link_hash_defined;
1634 }
1635 else
1636 h = NULL;
1637 }
1638 else
1639 {
1640 isym = NULL;
1641 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
1642 while (h->root.type == bfd_link_hash_indirect
1643 || h->root.type == bfd_link_hash_warning)
1644 h = (struct elf_link_hash_entry *) h->root.u.i.link;
1645 }
1646
1647 /* Check invalid x32 relocations. */
1648 if (!ABI_64_P (abfd))
1649 switch (r_type)
1650 {
1651 default:
1652 break;
1653
1654 case R_X86_64_DTPOFF64:
1655 case R_X86_64_TPOFF64:
1656 case R_X86_64_PC64:
1657 case R_X86_64_GOTOFF64:
1658 case R_X86_64_GOT64:
1659 case R_X86_64_GOTPCREL64:
1660 case R_X86_64_GOTPC64:
1661 case R_X86_64_GOTPLT64:
1662 case R_X86_64_PLTOFF64:
1663 {
1664 if (h)
1665 name = h->root.root.string;
1666 else
1667 name = bfd_elf_sym_name (abfd, symtab_hdr, isym,
1668 NULL);
1669 (*_bfd_error_handler)
1670 (_("%B: relocation %s against symbol `%s' isn't "
1671 "supported in x32 mode"), abfd,
1672 x86_64_elf_howto_table[r_type].name, name);
1673 bfd_set_error (bfd_error_bad_value);
1674 return FALSE;
1675 }
1676 break;
1677 }
1678
1679 if (h != NULL)
1680 {
1681 /* Create the ifunc sections for static executables. If we
1682 never see an indirect function symbol nor we are building
1683 a static executable, those sections will be empty and
1684 won't appear in output. */
1685 switch (r_type)
1686 {
1687 default:
1688 break;
1689
1690 case R_X86_64_PC32_BND:
1691 case R_X86_64_PLT32_BND:
1692 case R_X86_64_PC32:
1693 case R_X86_64_PLT32:
1694 case R_X86_64_32:
1695 case R_X86_64_64:
1696 /* MPX PLT is supported only if elf_x86_64_arch_bed
1697 is used in 64-bit mode. */
1698 if (ABI_64_P (abfd)
1699 && info->bndplt
1700 && (get_elf_x86_64_backend_data (abfd)
1701 == &elf_x86_64_arch_bed))
1702 {
1703 elf_x86_64_hash_entry (h)->has_bnd_reloc = 1;
1704
1705 /* Create the second PLT for Intel MPX support. */
1706 if (htab->plt_bnd == NULL)
1707 {
1708 unsigned int plt_bnd_align;
1709 const struct elf_backend_data *bed;
1710
1711 bed = get_elf_backend_data (info->output_bfd);
1712 BFD_ASSERT (sizeof (elf_x86_64_bnd_plt2_entry) == 8
1713 && (sizeof (elf_x86_64_bnd_plt2_entry)
1714 == sizeof (elf_x86_64_legacy_plt2_entry)));
1715 plt_bnd_align = 3;
1716
1717 if (htab->elf.dynobj == NULL)
1718 htab->elf.dynobj = abfd;
1719 htab->plt_bnd
1720 = bfd_make_section_anyway_with_flags (htab->elf.dynobj,
1721 ".plt.bnd",
1722 (bed->dynamic_sec_flags
1723 | SEC_ALLOC
1724 | SEC_CODE
1725 | SEC_LOAD
1726 | SEC_READONLY));
1727 if (htab->plt_bnd == NULL
1728 || !bfd_set_section_alignment (htab->elf.dynobj,
1729 htab->plt_bnd,
1730 plt_bnd_align))
1731 return FALSE;
1732 }
1733 }
1734
1735 case R_X86_64_32S:
1736 case R_X86_64_PC64:
1737 case R_X86_64_GOTPCREL:
1738 case R_X86_64_GOTPCRELX:
1739 case R_X86_64_REX_GOTPCRELX:
1740 case R_X86_64_GOTPCREL64:
1741 if (htab->elf.dynobj == NULL)
1742 htab->elf.dynobj = abfd;
1743 if (!_bfd_elf_create_ifunc_sections (htab->elf.dynobj, info))
1744 return FALSE;
1745 break;
1746 }
1747
1748 /* It is referenced by a non-shared object. */
1749 h->ref_regular = 1;
1750 h->root.non_ir_ref = 1;
1751
1752 if (h->type == STT_GNU_IFUNC)
1753 elf_tdata (info->output_bfd)->has_gnu_symbols
1754 |= elf_gnu_symbol_ifunc;
1755 }
1756
1757 if (! elf_x86_64_tls_transition (info, abfd, sec, NULL,
1758 symtab_hdr, sym_hashes,
1759 &r_type, GOT_UNKNOWN,
1760 rel, rel_end, h, r_symndx))
1761 return FALSE;
1762
1763 switch (r_type)
1764 {
1765 case R_X86_64_TLSLD:
1766 htab->tls_ld_got.refcount += 1;
1767 goto create_got;
1768
1769 case R_X86_64_TPOFF32:
1770 if (!bfd_link_executable (info) && ABI_64_P (abfd))
1771 {
1772 if (h)
1773 name = h->root.root.string;
1774 else
1775 name = bfd_elf_sym_name (abfd, symtab_hdr, isym,
1776 NULL);
1777 (*_bfd_error_handler)
1778 (_("%B: relocation %s against `%s' can not be used when making a shared object; recompile with -fPIC"),
1779 abfd,
1780 x86_64_elf_howto_table[r_type].name, name);
1781 bfd_set_error (bfd_error_bad_value);
1782 return FALSE;
1783 }
1784 break;
1785
1786 case R_X86_64_GOTTPOFF:
1787 if (!bfd_link_executable (info))
1788 info->flags |= DF_STATIC_TLS;
1789 /* Fall through */
1790
1791 case R_X86_64_GOT32:
1792 case R_X86_64_GOTPCREL:
1793 case R_X86_64_GOTPCRELX:
1794 case R_X86_64_REX_GOTPCRELX:
1795 case R_X86_64_TLSGD:
1796 case R_X86_64_GOT64:
1797 case R_X86_64_GOTPCREL64:
1798 case R_X86_64_GOTPLT64:
1799 case R_X86_64_GOTPC32_TLSDESC:
1800 case R_X86_64_TLSDESC_CALL:
1801 /* This symbol requires a global offset table entry. */
1802 {
1803 int tls_type, old_tls_type;
1804
1805 switch (r_type)
1806 {
1807 default: tls_type = GOT_NORMAL; break;
1808 case R_X86_64_TLSGD: tls_type = GOT_TLS_GD; break;
1809 case R_X86_64_GOTTPOFF: tls_type = GOT_TLS_IE; break;
1810 case R_X86_64_GOTPC32_TLSDESC:
1811 case R_X86_64_TLSDESC_CALL:
1812 tls_type = GOT_TLS_GDESC; break;
1813 }
1814
1815 if (h != NULL)
1816 {
1817 h->got.refcount += 1;
1818 old_tls_type = elf_x86_64_hash_entry (h)->tls_type;
1819 }
1820 else
1821 {
1822 bfd_signed_vma *local_got_refcounts;
1823
1824 /* This is a global offset table entry for a local symbol. */
1825 local_got_refcounts = elf_local_got_refcounts (abfd);
1826 if (local_got_refcounts == NULL)
1827 {
1828 bfd_size_type size;
1829
1830 size = symtab_hdr->sh_info;
1831 size *= sizeof (bfd_signed_vma)
1832 + sizeof (bfd_vma) + sizeof (char);
1833 local_got_refcounts = ((bfd_signed_vma *)
1834 bfd_zalloc (abfd, size));
1835 if (local_got_refcounts == NULL)
1836 return FALSE;
1837 elf_local_got_refcounts (abfd) = local_got_refcounts;
1838 elf_x86_64_local_tlsdesc_gotent (abfd)
1839 = (bfd_vma *) (local_got_refcounts + symtab_hdr->sh_info);
1840 elf_x86_64_local_got_tls_type (abfd)
1841 = (char *) (local_got_refcounts + 2 * symtab_hdr->sh_info);
1842 }
1843 local_got_refcounts[r_symndx] += 1;
1844 old_tls_type
1845 = elf_x86_64_local_got_tls_type (abfd) [r_symndx];
1846 }
1847
1848 /* If a TLS symbol is accessed using IE at least once,
1849 there is no point to use dynamic model for it. */
1850 if (old_tls_type != tls_type && old_tls_type != GOT_UNKNOWN
1851 && (! GOT_TLS_GD_ANY_P (old_tls_type)
1852 || tls_type != GOT_TLS_IE))
1853 {
1854 if (old_tls_type == GOT_TLS_IE && GOT_TLS_GD_ANY_P (tls_type))
1855 tls_type = old_tls_type;
1856 else if (GOT_TLS_GD_ANY_P (old_tls_type)
1857 && GOT_TLS_GD_ANY_P (tls_type))
1858 tls_type |= old_tls_type;
1859 else
1860 {
1861 if (h)
1862 name = h->root.root.string;
1863 else
1864 name = bfd_elf_sym_name (abfd, symtab_hdr,
1865 isym, NULL);
1866 (*_bfd_error_handler)
1867 (_("%B: '%s' accessed both as normal and thread local symbol"),
1868 abfd, name);
1869 bfd_set_error (bfd_error_bad_value);
1870 return FALSE;
1871 }
1872 }
1873
1874 if (old_tls_type != tls_type)
1875 {
1876 if (h != NULL)
1877 elf_x86_64_hash_entry (h)->tls_type = tls_type;
1878 else
1879 elf_x86_64_local_got_tls_type (abfd) [r_symndx] = tls_type;
1880 }
1881 }
1882 /* Fall through */
1883
1884 case R_X86_64_GOTOFF64:
1885 case R_X86_64_GOTPC32:
1886 case R_X86_64_GOTPC64:
1887 create_got:
1888 if (htab->elf.sgot == NULL)
1889 {
1890 if (htab->elf.dynobj == NULL)
1891 htab->elf.dynobj = abfd;
1892 if (!_bfd_elf_create_got_section (htab->elf.dynobj,
1893 info))
1894 return FALSE;
1895 }
1896 break;
1897
1898 case R_X86_64_PLT32:
1899 case R_X86_64_PLT32_BND:
1900 /* This symbol requires a procedure linkage table entry. We
1901 actually build the entry in adjust_dynamic_symbol,
1902 because this might be a case of linking PIC code which is
1903 never referenced by a dynamic object, in which case we
1904 don't need to generate a procedure linkage table entry
1905 after all. */
1906
1907 /* If this is a local symbol, we resolve it directly without
1908 creating a procedure linkage table entry. */
1909 if (h == NULL)
1910 continue;
1911
1912 h->needs_plt = 1;
1913 h->plt.refcount += 1;
1914 break;
1915
1916 case R_X86_64_PLTOFF64:
1917 /* This tries to form the 'address' of a function relative
1918 to GOT. For global symbols we need a PLT entry. */
1919 if (h != NULL)
1920 {
1921 h->needs_plt = 1;
1922 h->plt.refcount += 1;
1923 }
1924 goto create_got;
1925
1926 case R_X86_64_SIZE32:
1927 case R_X86_64_SIZE64:
1928 size_reloc = TRUE;
1929 goto do_size;
1930
1931 case R_X86_64_32:
1932 if (!ABI_64_P (abfd))
1933 goto pointer;
1934 case R_X86_64_8:
1935 case R_X86_64_16:
1936 case R_X86_64_32S:
1937 /* Let's help debug shared library creation. These relocs
1938 cannot be used in shared libs. Don't error out for
1939 sections we don't care about, such as debug sections or
1940 non-constant sections. */
1941 if (bfd_link_pic (info)
1942 && (sec->flags & SEC_ALLOC) != 0
1943 && (sec->flags & SEC_READONLY) != 0)
1944 {
1945 if (h)
1946 name = h->root.root.string;
1947 else
1948 name = bfd_elf_sym_name (abfd, symtab_hdr, isym, NULL);
1949 (*_bfd_error_handler)
1950 (_("%B: relocation %s against `%s' can not be used when making a shared object; recompile with -fPIC"),
1951 abfd, x86_64_elf_howto_table[r_type].name, name);
1952 bfd_set_error (bfd_error_bad_value);
1953 return FALSE;
1954 }
1955 /* Fall through. */
1956
1957 case R_X86_64_PC8:
1958 case R_X86_64_PC16:
1959 case R_X86_64_PC32:
1960 case R_X86_64_PC32_BND:
1961 case R_X86_64_PC64:
1962 case R_X86_64_64:
1963 pointer:
1964 if (h != NULL && bfd_link_executable (info))
1965 {
1966 /* If this reloc is in a read-only section, we might
1967 need a copy reloc. We can't check reliably at this
1968 stage whether the section is read-only, as input
1969 sections have not yet been mapped to output sections.
1970 Tentatively set the flag for now, and correct in
1971 adjust_dynamic_symbol. */
1972 h->non_got_ref = 1;
1973
1974 /* We may need a .plt entry if the function this reloc
1975 refers to is in a shared lib. */
1976 h->plt.refcount += 1;
1977 if (r_type == R_X86_64_PC32)
1978 {
1979 /* Since something like ".long foo - ." may be used
1980 as pointer, make sure that PLT is used if foo is
1981 a function defined in a shared library. */
1982 if ((sec->flags & SEC_CODE) == 0)
1983 h->pointer_equality_needed = 1;
1984 }
1985 else if (r_type != R_X86_64_PC32_BND
1986 && r_type != R_X86_64_PC64)
1987 {
1988 h->pointer_equality_needed = 1;
1989 /* At run-time, R_X86_64_64 can be resolved for both
1990 x86-64 and x32. But R_X86_64_32 and R_X86_64_32S
1991 can only be resolved for x32. */
1992 if ((sec->flags & SEC_READONLY) == 0
1993 && (r_type == R_X86_64_64
1994 || (!ABI_64_P (abfd)
1995 && (r_type == R_X86_64_32
1996 || r_type == R_X86_64_32S))))
1997 {
1998 struct elf_x86_64_link_hash_entry *eh
1999 = (struct elf_x86_64_link_hash_entry *) h;
2000 eh->func_pointer_refcount += 1;
2001 }
2002 }
2003 }
2004
2005 size_reloc = FALSE;
2006 do_size:
2007 /* If we are creating a shared library, and this is a reloc
2008 against a global symbol, or a non PC relative reloc
2009 against a local symbol, then we need to copy the reloc
2010 into the shared library. However, if we are linking with
2011 -Bsymbolic, we do not need to copy a reloc against a
2012 global symbol which is defined in an object we are
2013 including in the link (i.e., DEF_REGULAR is set). At
2014 this point we have not seen all the input files, so it is
2015 possible that DEF_REGULAR is not set now but will be set
2016 later (it is never cleared). In case of a weak definition,
2017 DEF_REGULAR may be cleared later by a strong definition in
2018 a shared library. We account for that possibility below by
2019 storing information in the relocs_copied field of the hash
2020 table entry. A similar situation occurs when creating
2021 shared libraries and symbol visibility changes render the
2022 symbol local.
2023
2024 If on the other hand, we are creating an executable, we
2025 may need to keep relocations for symbols satisfied by a
2026 dynamic library if we manage to avoid copy relocs for the
2027 symbol. */
2028 if ((bfd_link_pic (info)
2029 && (sec->flags & SEC_ALLOC) != 0
2030 && (! IS_X86_64_PCREL_TYPE (r_type)
2031 || (h != NULL
2032 && (! SYMBOLIC_BIND (info, h)
2033 || h->root.type == bfd_link_hash_defweak
2034 || !h->def_regular))))
2035 || (ELIMINATE_COPY_RELOCS
2036 && !bfd_link_pic (info)
2037 && (sec->flags & SEC_ALLOC) != 0
2038 && h != NULL
2039 && (h->root.type == bfd_link_hash_defweak
2040 || !h->def_regular)))
2041 {
2042 struct elf_dyn_relocs *p;
2043 struct elf_dyn_relocs **head;
2044
2045 /* We must copy these reloc types into the output file.
2046 Create a reloc section in dynobj and make room for
2047 this reloc. */
2048 if (sreloc == NULL)
2049 {
2050 if (htab->elf.dynobj == NULL)
2051 htab->elf.dynobj = abfd;
2052
2053 sreloc = _bfd_elf_make_dynamic_reloc_section
2054 (sec, htab->elf.dynobj, ABI_64_P (abfd) ? 3 : 2,
2055 abfd, /*rela?*/ TRUE);
2056
2057 if (sreloc == NULL)
2058 return FALSE;
2059 }
2060
2061 /* If this is a global symbol, we count the number of
2062 relocations we need for this symbol. */
2063 if (h != NULL)
2064 {
2065 head = &((struct elf_x86_64_link_hash_entry *) h)->dyn_relocs;
2066 }
2067 else
2068 {
2069 /* Track dynamic relocs needed for local syms too.
2070 We really need local syms available to do this
2071 easily. Oh well. */
2072 asection *s;
2073 void **vpp;
2074
2075 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
2076 abfd, r_symndx);
2077 if (isym == NULL)
2078 return FALSE;
2079
2080 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
2081 if (s == NULL)
2082 s = sec;
2083
2084 /* Beware of type punned pointers vs strict aliasing
2085 rules. */
2086 vpp = &(elf_section_data (s)->local_dynrel);
2087 head = (struct elf_dyn_relocs **)vpp;
2088 }
2089
2090 p = *head;
2091 if (p == NULL || p->sec != sec)
2092 {
2093 bfd_size_type amt = sizeof *p;
2094
2095 p = ((struct elf_dyn_relocs *)
2096 bfd_alloc (htab->elf.dynobj, amt));
2097 if (p == NULL)
2098 return FALSE;
2099 p->next = *head;
2100 *head = p;
2101 p->sec = sec;
2102 p->count = 0;
2103 p->pc_count = 0;
2104 }
2105
2106 p->count += 1;
2107 /* Count size relocation as PC-relative relocation. */
2108 if (IS_X86_64_PCREL_TYPE (r_type) || size_reloc)
2109 p->pc_count += 1;
2110 }
2111 break;
2112
2113 /* This relocation describes the C++ object vtable hierarchy.
2114 Reconstruct it for later use during GC. */
2115 case R_X86_64_GNU_VTINHERIT:
2116 if (!bfd_elf_gc_record_vtinherit (abfd, sec, h, rel->r_offset))
2117 return FALSE;
2118 break;
2119
2120 /* This relocation describes which C++ vtable entries are actually
2121 used. Record for later use during GC. */
2122 case R_X86_64_GNU_VTENTRY:
2123 BFD_ASSERT (h != NULL);
2124 if (h != NULL
2125 && !bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_addend))
2126 return FALSE;
2127 break;
2128
2129 default:
2130 break;
2131 }
2132
2133 if (use_plt_got
2134 && h != NULL
2135 && h->plt.refcount > 0
2136 && (((info->flags & DF_BIND_NOW) && !h->pointer_equality_needed)
2137 || h->got.refcount > 0)
2138 && htab->plt_got == NULL)
2139 {
2140 /* Create the GOT procedure linkage table. */
2141 unsigned int plt_got_align;
2142 const struct elf_backend_data *bed;
2143
2144 bed = get_elf_backend_data (info->output_bfd);
2145 BFD_ASSERT (sizeof (elf_x86_64_legacy_plt2_entry) == 8
2146 && (sizeof (elf_x86_64_bnd_plt2_entry)
2147 == sizeof (elf_x86_64_legacy_plt2_entry)));
2148 plt_got_align = 3;
2149
2150 if (htab->elf.dynobj == NULL)
2151 htab->elf.dynobj = abfd;
2152 htab->plt_got
2153 = bfd_make_section_anyway_with_flags (htab->elf.dynobj,
2154 ".plt.got",
2155 (bed->dynamic_sec_flags
2156 | SEC_ALLOC
2157 | SEC_CODE
2158 | SEC_LOAD
2159 | SEC_READONLY));
2160 if (htab->plt_got == NULL
2161 || !bfd_set_section_alignment (htab->elf.dynobj,
2162 htab->plt_got,
2163 plt_got_align))
2164 return FALSE;
2165 }
2166
2167 if ((r_type == R_X86_64_GOTPCREL
2168 || r_type == R_X86_64_GOTPCRELX
2169 || r_type == R_X86_64_REX_GOTPCRELX)
2170 && (h == NULL || h->type != STT_GNU_IFUNC))
2171 sec->need_convert_load = 1;
2172 }
2173
2174 return TRUE;
2175 }
2176
2177 /* Return the section that should be marked against GC for a given
2178 relocation. */
2179
2180 static asection *
2181 elf_x86_64_gc_mark_hook (asection *sec,
2182 struct bfd_link_info *info,
2183 Elf_Internal_Rela *rel,
2184 struct elf_link_hash_entry *h,
2185 Elf_Internal_Sym *sym)
2186 {
2187 if (h != NULL)
2188 switch (ELF32_R_TYPE (rel->r_info))
2189 {
2190 case R_X86_64_GNU_VTINHERIT:
2191 case R_X86_64_GNU_VTENTRY:
2192 return NULL;
2193 }
2194
2195 return _bfd_elf_gc_mark_hook (sec, info, rel, h, sym);
2196 }
2197
2198 /* Update the got entry reference counts for the section being removed. */
2199
2200 static bfd_boolean
2201 elf_x86_64_gc_sweep_hook (bfd *abfd, struct bfd_link_info *info,
2202 asection *sec,
2203 const Elf_Internal_Rela *relocs)
2204 {
2205 struct elf_x86_64_link_hash_table *htab;
2206 Elf_Internal_Shdr *symtab_hdr;
2207 struct elf_link_hash_entry **sym_hashes;
2208 bfd_signed_vma *local_got_refcounts;
2209 const Elf_Internal_Rela *rel, *relend;
2210
2211 if (bfd_link_relocatable (info))
2212 return TRUE;
2213
2214 htab = elf_x86_64_hash_table (info);
2215 if (htab == NULL)
2216 return FALSE;
2217
2218 elf_section_data (sec)->local_dynrel = NULL;
2219
2220 symtab_hdr = &elf_symtab_hdr (abfd);
2221 sym_hashes = elf_sym_hashes (abfd);
2222 local_got_refcounts = elf_local_got_refcounts (abfd);
2223
2224 htab = elf_x86_64_hash_table (info);
2225 relend = relocs + sec->reloc_count;
2226 for (rel = relocs; rel < relend; rel++)
2227 {
2228 unsigned long r_symndx;
2229 unsigned int r_type;
2230 struct elf_link_hash_entry *h = NULL;
2231 bfd_boolean pointer_reloc;
2232
2233 r_symndx = htab->r_sym (rel->r_info);
2234 if (r_symndx >= symtab_hdr->sh_info)
2235 {
2236 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
2237 while (h->root.type == bfd_link_hash_indirect
2238 || h->root.type == bfd_link_hash_warning)
2239 h = (struct elf_link_hash_entry *) h->root.u.i.link;
2240 }
2241 else
2242 {
2243 /* A local symbol. */
2244 Elf_Internal_Sym *isym;
2245
2246 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
2247 abfd, r_symndx);
2248
2249 /* Check relocation against local STT_GNU_IFUNC symbol. */
2250 if (isym != NULL
2251 && ELF_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
2252 {
2253 h = elf_x86_64_get_local_sym_hash (htab, abfd, rel, FALSE);
2254 if (h == NULL)
2255 abort ();
2256 }
2257 }
2258
2259 if (h)
2260 {
2261 struct elf_x86_64_link_hash_entry *eh;
2262 struct elf_dyn_relocs **pp;
2263 struct elf_dyn_relocs *p;
2264
2265 eh = (struct elf_x86_64_link_hash_entry *) h;
2266
2267 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; pp = &p->next)
2268 if (p->sec == sec)
2269 {
2270 /* Everything must go for SEC. */
2271 *pp = p->next;
2272 break;
2273 }
2274 }
2275
2276 r_type = ELF32_R_TYPE (rel->r_info);
2277 if (! elf_x86_64_tls_transition (info, abfd, sec, NULL,
2278 symtab_hdr, sym_hashes,
2279 &r_type, GOT_UNKNOWN,
2280 rel, relend, h, r_symndx))
2281 return FALSE;
2282
2283 pointer_reloc = FALSE;
2284 switch (r_type)
2285 {
2286 case R_X86_64_TLSLD:
2287 if (htab->tls_ld_got.refcount > 0)
2288 htab->tls_ld_got.refcount -= 1;
2289 break;
2290
2291 case R_X86_64_TLSGD:
2292 case R_X86_64_GOTPC32_TLSDESC:
2293 case R_X86_64_TLSDESC_CALL:
2294 case R_X86_64_GOTTPOFF:
2295 case R_X86_64_GOT32:
2296 case R_X86_64_GOTPCREL:
2297 case R_X86_64_GOTPCRELX:
2298 case R_X86_64_REX_GOTPCRELX:
2299 case R_X86_64_GOT64:
2300 case R_X86_64_GOTPCREL64:
2301 case R_X86_64_GOTPLT64:
2302 if (h != NULL)
2303 {
2304 if (h->got.refcount > 0)
2305 h->got.refcount -= 1;
2306 if (h->type == STT_GNU_IFUNC)
2307 {
2308 if (h->plt.refcount > 0)
2309 h->plt.refcount -= 1;
2310 }
2311 }
2312 else if (local_got_refcounts != NULL)
2313 {
2314 if (local_got_refcounts[r_symndx] > 0)
2315 local_got_refcounts[r_symndx] -= 1;
2316 }
2317 break;
2318
2319 case R_X86_64_32:
2320 case R_X86_64_32S:
2321 pointer_reloc = !ABI_64_P (abfd);
2322 goto pointer;
2323
2324 case R_X86_64_64:
2325 pointer_reloc = TRUE;
2326 case R_X86_64_8:
2327 case R_X86_64_16:
2328 case R_X86_64_PC8:
2329 case R_X86_64_PC16:
2330 case R_X86_64_PC32:
2331 case R_X86_64_PC32_BND:
2332 case R_X86_64_PC64:
2333 case R_X86_64_SIZE32:
2334 case R_X86_64_SIZE64:
2335 pointer:
2336 if (bfd_link_pic (info)
2337 && (h == NULL || h->type != STT_GNU_IFUNC))
2338 break;
2339 /* Fall thru */
2340
2341 case R_X86_64_PLT32:
2342 case R_X86_64_PLT32_BND:
2343 case R_X86_64_PLTOFF64:
2344 if (h != NULL)
2345 {
2346 if (h->plt.refcount > 0)
2347 h->plt.refcount -= 1;
2348 if (pointer_reloc && (sec->flags & SEC_READONLY) == 0)
2349 {
2350 struct elf_x86_64_link_hash_entry *eh
2351 = (struct elf_x86_64_link_hash_entry *) h;
2352 if (eh->func_pointer_refcount > 0)
2353 eh->func_pointer_refcount -= 1;
2354 }
2355 }
2356 break;
2357
2358 default:
2359 break;
2360 }
2361 }
2362
2363 return TRUE;
2364 }
2365
2366 /* Adjust a symbol defined by a dynamic object and referenced by a
2367 regular object. The current definition is in some section of the
2368 dynamic object, but we're not including those sections. We have to
2369 change the definition to something the rest of the link can
2370 understand. */
2371
2372 static bfd_boolean
2373 elf_x86_64_adjust_dynamic_symbol (struct bfd_link_info *info,
2374 struct elf_link_hash_entry *h)
2375 {
2376 struct elf_x86_64_link_hash_table *htab;
2377 asection *s;
2378 struct elf_x86_64_link_hash_entry *eh;
2379 struct elf_dyn_relocs *p;
2380
2381 /* STT_GNU_IFUNC symbol must go through PLT. */
2382 if (h->type == STT_GNU_IFUNC)
2383 {
2384 /* All local STT_GNU_IFUNC references must be treate as local
2385 calls via local PLT. */
2386 if (h->ref_regular
2387 && SYMBOL_CALLS_LOCAL (info, h))
2388 {
2389 bfd_size_type pc_count = 0, count = 0;
2390 struct elf_dyn_relocs **pp;
2391
2392 eh = (struct elf_x86_64_link_hash_entry *) h;
2393 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; )
2394 {
2395 pc_count += p->pc_count;
2396 p->count -= p->pc_count;
2397 p->pc_count = 0;
2398 count += p->count;
2399 if (p->count == 0)
2400 *pp = p->next;
2401 else
2402 pp = &p->next;
2403 }
2404
2405 if (pc_count || count)
2406 {
2407 h->needs_plt = 1;
2408 h->non_got_ref = 1;
2409 if (h->plt.refcount <= 0)
2410 h->plt.refcount = 1;
2411 else
2412 h->plt.refcount += 1;
2413 }
2414 }
2415
2416 if (h->plt.refcount <= 0)
2417 {
2418 h->plt.offset = (bfd_vma) -1;
2419 h->needs_plt = 0;
2420 }
2421 return TRUE;
2422 }
2423
2424 /* If this is a function, put it in the procedure linkage table. We
2425 will fill in the contents of the procedure linkage table later,
2426 when we know the address of the .got section. */
2427 if (h->type == STT_FUNC
2428 || h->needs_plt)
2429 {
2430 if (h->plt.refcount <= 0
2431 || SYMBOL_CALLS_LOCAL (info, h)
2432 || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
2433 && h->root.type == bfd_link_hash_undefweak))
2434 {
2435 /* This case can occur if we saw a PLT32 reloc in an input
2436 file, but the symbol was never referred to by a dynamic
2437 object, or if all references were garbage collected. In
2438 such a case, we don't actually need to build a procedure
2439 linkage table, and we can just do a PC32 reloc instead. */
2440 h->plt.offset = (bfd_vma) -1;
2441 h->needs_plt = 0;
2442 }
2443
2444 return TRUE;
2445 }
2446 else
2447 /* It's possible that we incorrectly decided a .plt reloc was
2448 needed for an R_X86_64_PC32 reloc to a non-function sym in
2449 check_relocs. We can't decide accurately between function and
2450 non-function syms in check-relocs; Objects loaded later in
2451 the link may change h->type. So fix it now. */
2452 h->plt.offset = (bfd_vma) -1;
2453
2454 /* If this is a weak symbol, and there is a real definition, the
2455 processor independent code will have arranged for us to see the
2456 real definition first, and we can just use the same value. */
2457 if (h->u.weakdef != NULL)
2458 {
2459 BFD_ASSERT (h->u.weakdef->root.type == bfd_link_hash_defined
2460 || h->u.weakdef->root.type == bfd_link_hash_defweak);
2461 h->root.u.def.section = h->u.weakdef->root.u.def.section;
2462 h->root.u.def.value = h->u.weakdef->root.u.def.value;
2463 if (ELIMINATE_COPY_RELOCS || info->nocopyreloc)
2464 {
2465 eh = (struct elf_x86_64_link_hash_entry *) h;
2466 h->non_got_ref = h->u.weakdef->non_got_ref;
2467 eh->needs_copy = h->u.weakdef->needs_copy;
2468 }
2469 return TRUE;
2470 }
2471
2472 /* This is a reference to a symbol defined by a dynamic object which
2473 is not a function. */
2474
2475 /* If we are creating a shared library, we must presume that the
2476 only references to the symbol are via the global offset table.
2477 For such cases we need not do anything here; the relocations will
2478 be handled correctly by relocate_section. */
2479 if (!bfd_link_executable (info))
2480 return TRUE;
2481
2482 /* If there are no references to this symbol that do not use the
2483 GOT, we don't need to generate a copy reloc. */
2484 if (!h->non_got_ref)
2485 return TRUE;
2486
2487 /* If -z nocopyreloc was given, we won't generate them either. */
2488 if (info->nocopyreloc)
2489 {
2490 h->non_got_ref = 0;
2491 return TRUE;
2492 }
2493
2494 if (ELIMINATE_COPY_RELOCS)
2495 {
2496 eh = (struct elf_x86_64_link_hash_entry *) h;
2497 for (p = eh->dyn_relocs; p != NULL; p = p->next)
2498 {
2499 s = p->sec->output_section;
2500 if (s != NULL && (s->flags & SEC_READONLY) != 0)
2501 break;
2502 }
2503
2504 /* If we didn't find any dynamic relocs in read-only sections, then
2505 we'll be keeping the dynamic relocs and avoiding the copy reloc. */
2506 if (p == NULL)
2507 {
2508 h->non_got_ref = 0;
2509 return TRUE;
2510 }
2511 }
2512
2513 /* We must allocate the symbol in our .dynbss section, which will
2514 become part of the .bss section of the executable. There will be
2515 an entry for this symbol in the .dynsym section. The dynamic
2516 object will contain position independent code, so all references
2517 from the dynamic object to this symbol will go through the global
2518 offset table. The dynamic linker will use the .dynsym entry to
2519 determine the address it must put in the global offset table, so
2520 both the dynamic object and the regular object will refer to the
2521 same memory location for the variable. */
2522
2523 htab = elf_x86_64_hash_table (info);
2524 if (htab == NULL)
2525 return FALSE;
2526
2527 /* We must generate a R_X86_64_COPY reloc to tell the dynamic linker
2528 to copy the initial value out of the dynamic object and into the
2529 runtime process image. */
2530 if ((h->root.u.def.section->flags & SEC_ALLOC) != 0 && h->size != 0)
2531 {
2532 const struct elf_backend_data *bed;
2533 bed = get_elf_backend_data (info->output_bfd);
2534 htab->srelbss->size += bed->s->sizeof_rela;
2535 h->needs_copy = 1;
2536 }
2537
2538 s = htab->sdynbss;
2539
2540 return _bfd_elf_adjust_dynamic_copy (info, h, s);
2541 }
2542
2543 /* Allocate space in .plt, .got and associated reloc sections for
2544 dynamic relocs. */
2545
2546 static bfd_boolean
2547 elf_x86_64_allocate_dynrelocs (struct elf_link_hash_entry *h, void * inf)
2548 {
2549 struct bfd_link_info *info;
2550 struct elf_x86_64_link_hash_table *htab;
2551 struct elf_x86_64_link_hash_entry *eh;
2552 struct elf_dyn_relocs *p;
2553 const struct elf_backend_data *bed;
2554 unsigned int plt_entry_size;
2555
2556 if (h->root.type == bfd_link_hash_indirect)
2557 return TRUE;
2558
2559 eh = (struct elf_x86_64_link_hash_entry *) h;
2560
2561 info = (struct bfd_link_info *) inf;
2562 htab = elf_x86_64_hash_table (info);
2563 if (htab == NULL)
2564 return FALSE;
2565 bed = get_elf_backend_data (info->output_bfd);
2566 plt_entry_size = GET_PLT_ENTRY_SIZE (info->output_bfd);
2567
2568 /* We can't use the GOT PLT if pointer equality is needed since
2569 finish_dynamic_symbol won't clear symbol value and the dynamic
2570 linker won't update the GOT slot. We will get into an infinite
2571 loop at run-time. */
2572 if (htab->plt_got != NULL
2573 && h->type != STT_GNU_IFUNC
2574 && !h->pointer_equality_needed
2575 && h->plt.refcount > 0
2576 && h->got.refcount > 0)
2577 {
2578 /* Don't use the regular PLT if there are both GOT and GOTPLT
2579 reloctions. */
2580 h->plt.offset = (bfd_vma) -1;
2581
2582 /* Use the GOT PLT. */
2583 eh->plt_got.refcount = 1;
2584 }
2585
2586 /* Clear the reference count of function pointer relocations if
2587 symbol isn't a normal function. */
2588 if (h->type != STT_FUNC)
2589 eh->func_pointer_refcount = 0;
2590
2591 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle it
2592 here if it is defined and referenced in a non-shared object. */
2593 if (h->type == STT_GNU_IFUNC
2594 && h->def_regular)
2595 {
2596 if (_bfd_elf_allocate_ifunc_dyn_relocs (info, h,
2597 &eh->dyn_relocs,
2598 plt_entry_size,
2599 plt_entry_size,
2600 GOT_ENTRY_SIZE))
2601 {
2602 asection *s = htab->plt_bnd;
2603 if (h->plt.offset != (bfd_vma) -1 && s != NULL)
2604 {
2605 /* Use the .plt.bnd section if it is created. */
2606 eh->plt_bnd.offset = s->size;
2607
2608 /* Make room for this entry in the .plt.bnd section. */
2609 s->size += sizeof (elf_x86_64_legacy_plt2_entry);
2610 }
2611
2612 return TRUE;
2613 }
2614 else
2615 return FALSE;
2616 }
2617 /* Don't create the PLT entry if there are only function pointer
2618 relocations which can be resolved at run-time. */
2619 else if (htab->elf.dynamic_sections_created
2620 && (h->plt.refcount > eh->func_pointer_refcount
2621 || eh->plt_got.refcount > 0))
2622 {
2623 bfd_boolean use_plt_got;
2624
2625 /* Clear the reference count of function pointer relocations
2626 if PLT is used. */
2627 eh->func_pointer_refcount = 0;
2628
2629 if ((info->flags & DF_BIND_NOW) && !h->pointer_equality_needed)
2630 {
2631 /* Don't use the regular PLT for DF_BIND_NOW. */
2632 h->plt.offset = (bfd_vma) -1;
2633
2634 /* Use the GOT PLT. */
2635 h->got.refcount = 1;
2636 eh->plt_got.refcount = 1;
2637 }
2638
2639 use_plt_got = eh->plt_got.refcount > 0;
2640
2641 /* Make sure this symbol is output as a dynamic symbol.
2642 Undefined weak syms won't yet be marked as dynamic. */
2643 if (h->dynindx == -1
2644 && !h->forced_local)
2645 {
2646 if (! bfd_elf_link_record_dynamic_symbol (info, h))
2647 return FALSE;
2648 }
2649
2650 if (bfd_link_pic (info)
2651 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h))
2652 {
2653 asection *s = htab->elf.splt;
2654 asection *bnd_s = htab->plt_bnd;
2655 asection *got_s = htab->plt_got;
2656
2657 /* If this is the first .plt entry, make room for the special
2658 first entry. The .plt section is used by prelink to undo
2659 prelinking for dynamic relocations. */
2660 if (s->size == 0)
2661 s->size = plt_entry_size;
2662
2663 if (use_plt_got)
2664 eh->plt_got.offset = got_s->size;
2665 else
2666 {
2667 h->plt.offset = s->size;
2668 if (bnd_s)
2669 eh->plt_bnd.offset = bnd_s->size;
2670 }
2671
2672 /* If this symbol is not defined in a regular file, and we are
2673 not generating a shared library, then set the symbol to this
2674 location in the .plt. This is required to make function
2675 pointers compare as equal between the normal executable and
2676 the shared library. */
2677 if (! bfd_link_pic (info)
2678 && !h->def_regular)
2679 {
2680 if (use_plt_got)
2681 {
2682 /* We need to make a call to the entry of the GOT PLT
2683 instead of regular PLT entry. */
2684 h->root.u.def.section = got_s;
2685 h->root.u.def.value = eh->plt_got.offset;
2686 }
2687 else
2688 {
2689 if (bnd_s)
2690 {
2691 /* We need to make a call to the entry of the second
2692 PLT instead of regular PLT entry. */
2693 h->root.u.def.section = bnd_s;
2694 h->root.u.def.value = eh->plt_bnd.offset;
2695 }
2696 else
2697 {
2698 h->root.u.def.section = s;
2699 h->root.u.def.value = h->plt.offset;
2700 }
2701 }
2702 }
2703
2704 /* Make room for this entry. */
2705 if (use_plt_got)
2706 got_s->size += sizeof (elf_x86_64_legacy_plt2_entry);
2707 else
2708 {
2709 s->size += plt_entry_size;
2710 if (bnd_s)
2711 bnd_s->size += sizeof (elf_x86_64_legacy_plt2_entry);
2712
2713 /* We also need to make an entry in the .got.plt section,
2714 which will be placed in the .got section by the linker
2715 script. */
2716 htab->elf.sgotplt->size += GOT_ENTRY_SIZE;
2717
2718 /* We also need to make an entry in the .rela.plt
2719 section. */
2720 htab->elf.srelplt->size += bed->s->sizeof_rela;
2721 htab->elf.srelplt->reloc_count++;
2722 }
2723 }
2724 else
2725 {
2726 h->plt.offset = (bfd_vma) -1;
2727 h->needs_plt = 0;
2728 }
2729 }
2730 else
2731 {
2732 h->plt.offset = (bfd_vma) -1;
2733 h->needs_plt = 0;
2734 }
2735
2736 eh->tlsdesc_got = (bfd_vma) -1;
2737
2738 /* If R_X86_64_GOTTPOFF symbol is now local to the binary,
2739 make it a R_X86_64_TPOFF32 requiring no GOT entry. */
2740 if (h->got.refcount > 0
2741 && bfd_link_executable (info)
2742 && h->dynindx == -1
2743 && elf_x86_64_hash_entry (h)->tls_type == GOT_TLS_IE)
2744 {
2745 h->got.offset = (bfd_vma) -1;
2746 }
2747 else if (h->got.refcount > 0)
2748 {
2749 asection *s;
2750 bfd_boolean dyn;
2751 int tls_type = elf_x86_64_hash_entry (h)->tls_type;
2752
2753 /* Make sure this symbol is output as a dynamic symbol.
2754 Undefined weak syms won't yet be marked as dynamic. */
2755 if (h->dynindx == -1
2756 && !h->forced_local)
2757 {
2758 if (! bfd_elf_link_record_dynamic_symbol (info, h))
2759 return FALSE;
2760 }
2761
2762 if (GOT_TLS_GDESC_P (tls_type))
2763 {
2764 eh->tlsdesc_got = htab->elf.sgotplt->size
2765 - elf_x86_64_compute_jump_table_size (htab);
2766 htab->elf.sgotplt->size += 2 * GOT_ENTRY_SIZE;
2767 h->got.offset = (bfd_vma) -2;
2768 }
2769 if (! GOT_TLS_GDESC_P (tls_type)
2770 || GOT_TLS_GD_P (tls_type))
2771 {
2772 s = htab->elf.sgot;
2773 h->got.offset = s->size;
2774 s->size += GOT_ENTRY_SIZE;
2775 if (GOT_TLS_GD_P (tls_type))
2776 s->size += GOT_ENTRY_SIZE;
2777 }
2778 dyn = htab->elf.dynamic_sections_created;
2779 /* R_X86_64_TLSGD needs one dynamic relocation if local symbol
2780 and two if global.
2781 R_X86_64_GOTTPOFF needs one dynamic relocation. */
2782 if ((GOT_TLS_GD_P (tls_type) && h->dynindx == -1)
2783 || tls_type == GOT_TLS_IE)
2784 htab->elf.srelgot->size += bed->s->sizeof_rela;
2785 else if (GOT_TLS_GD_P (tls_type))
2786 htab->elf.srelgot->size += 2 * bed->s->sizeof_rela;
2787 else if (! GOT_TLS_GDESC_P (tls_type)
2788 && (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
2789 || h->root.type != bfd_link_hash_undefweak)
2790 && (bfd_link_pic (info)
2791 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h)))
2792 htab->elf.srelgot->size += bed->s->sizeof_rela;
2793 if (GOT_TLS_GDESC_P (tls_type))
2794 {
2795 htab->elf.srelplt->size += bed->s->sizeof_rela;
2796 htab->tlsdesc_plt = (bfd_vma) -1;
2797 }
2798 }
2799 else
2800 h->got.offset = (bfd_vma) -1;
2801
2802 if (eh->dyn_relocs == NULL)
2803 return TRUE;
2804
2805 /* In the shared -Bsymbolic case, discard space allocated for
2806 dynamic pc-relative relocs against symbols which turn out to be
2807 defined in regular objects. For the normal shared case, discard
2808 space for pc-relative relocs that have become local due to symbol
2809 visibility changes. */
2810
2811 if (bfd_link_pic (info))
2812 {
2813 /* Relocs that use pc_count are those that appear on a call
2814 insn, or certain REL relocs that can generated via assembly.
2815 We want calls to protected symbols to resolve directly to the
2816 function rather than going via the plt. If people want
2817 function pointer comparisons to work as expected then they
2818 should avoid writing weird assembly. */
2819 if (SYMBOL_CALLS_LOCAL (info, h))
2820 {
2821 struct elf_dyn_relocs **pp;
2822
2823 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; )
2824 {
2825 p->count -= p->pc_count;
2826 p->pc_count = 0;
2827 if (p->count == 0)
2828 *pp = p->next;
2829 else
2830 pp = &p->next;
2831 }
2832 }
2833
2834 /* Also discard relocs on undefined weak syms with non-default
2835 visibility. */
2836 if (eh->dyn_relocs != NULL)
2837 {
2838 if (h->root.type == bfd_link_hash_undefweak)
2839 {
2840 if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
2841 eh->dyn_relocs = NULL;
2842
2843 /* Make sure undefined weak symbols are output as a dynamic
2844 symbol in PIEs. */
2845 else if (h->dynindx == -1
2846 && ! h->forced_local
2847 && ! bfd_elf_link_record_dynamic_symbol (info, h))
2848 return FALSE;
2849 }
2850 /* For PIE, discard space for pc-relative relocs against
2851 symbols which turn out to need copy relocs. */
2852 else if (bfd_link_executable (info)
2853 && (h->needs_copy || eh->needs_copy)
2854 && h->def_dynamic
2855 && !h->def_regular)
2856 {
2857 struct elf_dyn_relocs **pp;
2858
2859 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; )
2860 {
2861 if (p->pc_count != 0)
2862 *pp = p->next;
2863 else
2864 pp = &p->next;
2865 }
2866 }
2867 }
2868 }
2869 else if (ELIMINATE_COPY_RELOCS)
2870 {
2871 /* For the non-shared case, discard space for relocs against
2872 symbols which turn out to need copy relocs or are not
2873 dynamic. Keep dynamic relocations for run-time function
2874 pointer initialization. */
2875
2876 if ((!h->non_got_ref || eh->func_pointer_refcount > 0)
2877 && ((h->def_dynamic
2878 && !h->def_regular)
2879 || (htab->elf.dynamic_sections_created
2880 && (h->root.type == bfd_link_hash_undefweak
2881 || h->root.type == bfd_link_hash_undefined))))
2882 {
2883 /* Make sure this symbol is output as a dynamic symbol.
2884 Undefined weak syms won't yet be marked as dynamic. */
2885 if (h->dynindx == -1
2886 && ! h->forced_local
2887 && ! bfd_elf_link_record_dynamic_symbol (info, h))
2888 return FALSE;
2889
2890 /* If that succeeded, we know we'll be keeping all the
2891 relocs. */
2892 if (h->dynindx != -1)
2893 goto keep;
2894 }
2895
2896 eh->dyn_relocs = NULL;
2897 eh->func_pointer_refcount = 0;
2898
2899 keep: ;
2900 }
2901
2902 /* Finally, allocate space. */
2903 for (p = eh->dyn_relocs; p != NULL; p = p->next)
2904 {
2905 asection * sreloc;
2906
2907 sreloc = elf_section_data (p->sec)->sreloc;
2908
2909 BFD_ASSERT (sreloc != NULL);
2910
2911 sreloc->size += p->count * bed->s->sizeof_rela;
2912 }
2913
2914 return TRUE;
2915 }
2916
2917 /* Allocate space in .plt, .got and associated reloc sections for
2918 local dynamic relocs. */
2919
2920 static bfd_boolean
2921 elf_x86_64_allocate_local_dynrelocs (void **slot, void *inf)
2922 {
2923 struct elf_link_hash_entry *h
2924 = (struct elf_link_hash_entry *) *slot;
2925
2926 if (h->type != STT_GNU_IFUNC
2927 || !h->def_regular
2928 || !h->ref_regular
2929 || !h->forced_local
2930 || h->root.type != bfd_link_hash_defined)
2931 abort ();
2932
2933 return elf_x86_64_allocate_dynrelocs (h, inf);
2934 }
2935
2936 /* Find any dynamic relocs that apply to read-only sections. */
2937
2938 static bfd_boolean
2939 elf_x86_64_readonly_dynrelocs (struct elf_link_hash_entry *h,
2940 void * inf)
2941 {
2942 struct elf_x86_64_link_hash_entry *eh;
2943 struct elf_dyn_relocs *p;
2944
2945 /* Skip local IFUNC symbols. */
2946 if (h->forced_local && h->type == STT_GNU_IFUNC)
2947 return TRUE;
2948
2949 eh = (struct elf_x86_64_link_hash_entry *) h;
2950 for (p = eh->dyn_relocs; p != NULL; p = p->next)
2951 {
2952 asection *s = p->sec->output_section;
2953
2954 if (s != NULL && (s->flags & SEC_READONLY) != 0)
2955 {
2956 struct bfd_link_info *info = (struct bfd_link_info *) inf;
2957
2958 info->flags |= DF_TEXTREL;
2959
2960 if ((info->warn_shared_textrel && bfd_link_pic (info))
2961 || info->error_textrel)
2962 info->callbacks->einfo (_("%P: %B: warning: relocation against `%s' in readonly section `%A'\n"),
2963 p->sec->owner, h->root.root.string,
2964 p->sec);
2965
2966 /* Not an error, just cut short the traversal. */
2967 return FALSE;
2968 }
2969 }
2970 return TRUE;
2971 }
2972
2973 /* With the local symbol, foo, we convert
2974 mov foo@GOTPCREL(%rip), %reg
2975 to
2976 lea foo(%rip), %reg
2977 and convert
2978 call/jmp *foo@GOTPCREL(%rip)
2979 to
2980 nop call foo/jmp foo nop
2981 When PIC is false, convert
2982 test %reg, foo@GOTPCREL(%rip)
2983 to
2984 test $foo, %reg
2985 and convert
2986 binop foo@GOTPCREL(%rip), %reg
2987 to
2988 binop $foo, %reg
2989 where binop is one of adc, add, and, cmp, or, sbb, sub, xor
2990 instructions. */
2991
2992 static bfd_boolean
2993 elf_x86_64_convert_load (bfd *abfd, asection *sec,
2994 struct bfd_link_info *link_info)
2995 {
2996 Elf_Internal_Shdr *symtab_hdr;
2997 Elf_Internal_Rela *internal_relocs;
2998 Elf_Internal_Rela *irel, *irelend;
2999 bfd_byte *contents;
3000 struct elf_x86_64_link_hash_table *htab;
3001 bfd_boolean changed_contents;
3002 bfd_boolean changed_relocs;
3003 bfd_signed_vma *local_got_refcounts;
3004 bfd_vma maxpagesize;
3005
3006 /* Don't even try to convert non-ELF outputs. */
3007 if (!is_elf_hash_table (link_info->hash))
3008 return FALSE;
3009
3010 /* Nothing to do if there is no need or no output. */
3011 if ((sec->flags & (SEC_CODE | SEC_RELOC)) != (SEC_CODE | SEC_RELOC)
3012 || sec->need_convert_load == 0
3013 || bfd_is_abs_section (sec->output_section))
3014 return TRUE;
3015
3016 symtab_hdr = &elf_tdata (abfd)->symtab_hdr;
3017
3018 /* Load the relocations for this section. */
3019 internal_relocs = (_bfd_elf_link_read_relocs
3020 (abfd, sec, NULL, (Elf_Internal_Rela *) NULL,
3021 link_info->keep_memory));
3022 if (internal_relocs == NULL)
3023 return FALSE;
3024
3025 htab = elf_x86_64_hash_table (link_info);
3026 changed_contents = FALSE;
3027 changed_relocs = FALSE;
3028 local_got_refcounts = elf_local_got_refcounts (abfd);
3029 maxpagesize = get_elf_backend_data (abfd)->maxpagesize;
3030
3031 /* Get the section contents. */
3032 if (elf_section_data (sec)->this_hdr.contents != NULL)
3033 contents = elf_section_data (sec)->this_hdr.contents;
3034 else
3035 {
3036 if (!bfd_malloc_and_get_section (abfd, sec, &contents))
3037 goto error_return;
3038 }
3039
3040 irelend = internal_relocs + sec->reloc_count;
3041 for (irel = internal_relocs; irel < irelend; irel++)
3042 {
3043 unsigned int r_type = ELF32_R_TYPE (irel->r_info);
3044 unsigned int r_symndx = htab->r_sym (irel->r_info);
3045 unsigned int indx;
3046 struct elf_link_hash_entry *h;
3047 asection *tsec;
3048 char symtype;
3049 bfd_vma toff, roff;
3050 bfd_signed_vma raddend;
3051 unsigned int opcode;
3052 unsigned int modrm;
3053
3054 if (r_type != R_X86_64_GOTPCREL
3055 && r_type != R_X86_64_GOTPCRELX
3056 && r_type != R_X86_64_REX_GOTPCRELX)
3057 continue;
3058
3059 roff = irel->r_offset;
3060 if (roff < (r_type == R_X86_64_REX_GOTPCRELX ? 3 : 2))
3061 continue;
3062
3063 raddend = irel->r_addend;
3064 /* Addend for 32-bit PC-relative relocation must be -4. */
3065 if (raddend != -4)
3066 continue;
3067
3068 opcode = bfd_get_8 (abfd, contents + roff - 2);
3069
3070 /* It is OK to convert mov to lea. */
3071 if (opcode != 0x8b)
3072 {
3073 /* Only convert R_X86_64_GOTPCRELX and R_X86_64_REX_GOTPCRELX
3074 for mov call, jmp or one of adc, add, and, cmp, or, sbb,
3075 sub, test, xor instructions. */
3076 if (r_type != R_X86_64_GOTPCRELX
3077 && r_type != R_X86_64_REX_GOTPCRELX)
3078 continue;
3079
3080 /* It is OK to convert indirect branch to direct branch. */
3081 if (opcode != 0xff)
3082 {
3083 /* It is OK to convert adc, add, and, cmp, or, sbb, sub,
3084 test, xor only when PIC is false. */
3085 if (bfd_link_pic (link_info))
3086 continue;
3087 }
3088 }
3089
3090 /* Get the symbol referred to by the reloc. */
3091 if (r_symndx < symtab_hdr->sh_info)
3092 {
3093 Elf_Internal_Sym *isym;
3094
3095 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
3096 abfd, r_symndx);
3097
3098 symtype = ELF_ST_TYPE (isym->st_info);
3099
3100 /* STT_GNU_IFUNC must keep GOTPCREL relocations and skip
3101 relocation against undefined symbols. */
3102 if (symtype == STT_GNU_IFUNC || isym->st_shndx == SHN_UNDEF)
3103 continue;
3104
3105 if (isym->st_shndx == SHN_ABS)
3106 tsec = bfd_abs_section_ptr;
3107 else if (isym->st_shndx == SHN_COMMON)
3108 tsec = bfd_com_section_ptr;
3109 else if (isym->st_shndx == SHN_X86_64_LCOMMON)
3110 tsec = &_bfd_elf_large_com_section;
3111 else
3112 tsec = bfd_section_from_elf_index (abfd, isym->st_shndx);
3113
3114 h = NULL;
3115 toff = isym->st_value;
3116 }
3117 else
3118 {
3119 indx = r_symndx - symtab_hdr->sh_info;
3120 h = elf_sym_hashes (abfd)[indx];
3121 BFD_ASSERT (h != NULL);
3122
3123 while (h->root.type == bfd_link_hash_indirect
3124 || h->root.type == bfd_link_hash_warning)
3125 h = (struct elf_link_hash_entry *) h->root.u.i.link;
3126
3127 /* STT_GNU_IFUNC must keep GOTPCREL relocations. We also
3128 avoid optimizing GOTPCREL relocations againt _DYNAMIC
3129 since ld.so may use its link-time address. */
3130 if ((h->root.type == bfd_link_hash_defined
3131 || h->root.type == bfd_link_hash_defweak
3132 || h->root.type == bfd_link_hash_new)
3133 && h->type != STT_GNU_IFUNC
3134 && h != htab->elf.hdynamic
3135 && SYMBOL_REFERENCES_LOCAL (link_info, h))
3136 {
3137 /* bfd_link_hash_new is set by an assignment in a linker
3138 script in bfd_elf_record_link_assignment. FIXME: If
3139 we ever get a linker error due relocation overflow, we
3140 will skip this optimization. */
3141 if (h->root.type == bfd_link_hash_new)
3142 goto convert;
3143 tsec = h->root.u.def.section;
3144 toff = h->root.u.def.value;
3145 symtype = h->type;
3146 }
3147 else
3148 continue;
3149 }
3150
3151 if (tsec->sec_info_type == SEC_INFO_TYPE_MERGE)
3152 {
3153 /* At this stage in linking, no SEC_MERGE symbol has been
3154 adjusted, so all references to such symbols need to be
3155 passed through _bfd_merged_section_offset. (Later, in
3156 relocate_section, all SEC_MERGE symbols *except* for
3157 section symbols have been adjusted.)
3158
3159 gas may reduce relocations against symbols in SEC_MERGE
3160 sections to a relocation against the section symbol when
3161 the original addend was zero. When the reloc is against
3162 a section symbol we should include the addend in the
3163 offset passed to _bfd_merged_section_offset, since the
3164 location of interest is the original symbol. On the
3165 other hand, an access to "sym+addend" where "sym" is not
3166 a section symbol should not include the addend; Such an
3167 access is presumed to be an offset from "sym"; The
3168 location of interest is just "sym". */
3169 if (symtype == STT_SECTION)
3170 toff += raddend;
3171
3172 toff = _bfd_merged_section_offset (abfd, &tsec,
3173 elf_section_data (tsec)->sec_info,
3174 toff);
3175
3176 if (symtype != STT_SECTION)
3177 toff += raddend;
3178 }
3179 else
3180 toff += raddend;
3181
3182 /* Don't convert if R_X86_64_PC32 relocation overflows. */
3183 if (tsec->output_section == sec->output_section)
3184 {
3185 if ((toff - roff + 0x80000000) > 0xffffffff)
3186 continue;
3187 }
3188 else
3189 {
3190 asection *asect;
3191 bfd_size_type size;
3192
3193 /* At this point, we don't know the load addresses of TSEC
3194 section nor SEC section. We estimate the distrance between
3195 SEC and TSEC. */
3196 size = 0;
3197 for (asect = sec->output_section;
3198 asect != NULL && asect != tsec->output_section;
3199 asect = asect->next)
3200 {
3201 asection *i;
3202 for (i = asect->output_section->map_head.s;
3203 i != NULL;
3204 i = i->map_head.s)
3205 {
3206 size = align_power (size, i->alignment_power);
3207 size += i->size;
3208 }
3209 }
3210
3211 /* Don't convert GOTPCREL relocations if TSEC isn't placed
3212 after SEC. */
3213 if (asect == NULL)
3214 continue;
3215
3216 /* Take PT_GNU_RELRO segment into account by adding
3217 maxpagesize. */
3218 if ((toff + size + maxpagesize - roff + 0x80000000)
3219 > 0xffffffff)
3220 continue;
3221 }
3222
3223 convert:
3224 if (opcode == 0xff)
3225 {
3226 /* We have "call/jmp *foo@GOTPCREL(%rip)". */
3227 unsigned int nop;
3228 unsigned int disp;
3229 bfd_vma nop_offset;
3230
3231 /* Convert R_X86_64_GOTPCRELX and R_X86_64_REX_GOTPCRELX to
3232 R_X86_64_PC32. */
3233 modrm = bfd_get_8 (abfd, contents + roff - 1);
3234 if (modrm == 0x25)
3235 {
3236 /* Convert to "jmp foo nop". */
3237 modrm = 0xe9;
3238 nop = NOP_OPCODE;
3239 nop_offset = irel->r_offset + 3;
3240 disp = bfd_get_32 (abfd, contents + irel->r_offset);
3241 irel->r_offset -= 1;
3242 bfd_put_32 (abfd, disp, contents + irel->r_offset);
3243 }
3244 else
3245 {
3246 /* Convert to "nop call foo". ADDR_PREFIX_OPCODE
3247 is a nop prefix. */
3248 modrm = 0xe8;
3249 nop = link_info->call_nop_byte;
3250 if (link_info->call_nop_as_suffix)
3251 {
3252 nop_offset = irel->r_offset + 3;
3253 disp = bfd_get_32 (abfd, contents + irel->r_offset);
3254 irel->r_offset -= 1;
3255 bfd_put_32 (abfd, disp, contents + irel->r_offset);
3256 }
3257 else
3258 nop_offset = irel->r_offset - 2;
3259 }
3260 bfd_put_8 (abfd, nop, contents + nop_offset);
3261 bfd_put_8 (abfd, modrm, contents + irel->r_offset - 1);
3262 r_type = R_X86_64_PC32;
3263 }
3264 else
3265 {
3266 if (opcode == 0x8b)
3267 {
3268 /* Convert "mov foo@GOTPCREL(%rip), %reg" to
3269 "lea foo(%rip), %reg". */
3270 opcode = 0x8d;
3271 r_type = R_X86_64_PC32;
3272 }
3273 else
3274 {
3275 modrm = bfd_get_8 (abfd, contents + roff - 1);
3276 if (opcode == 0x85)
3277 {
3278 /* Convert "test %reg, foo@GOTPCREL(%rip)" to
3279 "test $foo, %reg". */
3280 modrm = 0xc0 | (modrm & 0x38) >> 3;
3281 opcode = 0xf7;
3282 }
3283 else
3284 {
3285 /* Convert "binop foo@GOTPCREL(%rip), %reg" to
3286 "binop $foo, %reg". */
3287 modrm = 0xc0 | (modrm & 0x38) >> 3 | (opcode & 0x3c);
3288 opcode = 0x81;
3289 }
3290 bfd_put_8 (abfd, modrm, contents + roff - 1);
3291
3292 if (r_type == R_X86_64_REX_GOTPCRELX)
3293 {
3294 /* Move the R bit to the B bit in REX byte. */
3295 unsigned int rex = bfd_get_8 (abfd, contents + roff - 3);
3296 rex = (rex & ~REX_R) | (rex & REX_R) >> 2;
3297 bfd_put_8 (abfd, rex, contents + roff - 3);
3298 }
3299 /* No addend for R_X86_64_32S relocation. */
3300 irel->r_addend = 0;
3301 r_type = R_X86_64_32S;
3302 }
3303
3304 bfd_put_8 (abfd, opcode, contents + roff - 2);
3305 }
3306
3307 irel->r_info = htab->r_info (r_symndx, r_type);
3308 changed_contents = TRUE;
3309 changed_relocs = TRUE;
3310
3311 if (h)
3312 {
3313 if (h->got.refcount > 0)
3314 h->got.refcount -= 1;
3315 }
3316 else
3317 {
3318 if (local_got_refcounts != NULL
3319 && local_got_refcounts[r_symndx] > 0)
3320 local_got_refcounts[r_symndx] -= 1;
3321 }
3322 }
3323
3324 if (contents != NULL
3325 && elf_section_data (sec)->this_hdr.contents != contents)
3326 {
3327 if (!changed_contents && !link_info->keep_memory)
3328 free (contents);
3329 else
3330 {
3331 /* Cache the section contents for elf_link_input_bfd. */
3332 elf_section_data (sec)->this_hdr.contents = contents;
3333 }
3334 }
3335
3336 if (elf_section_data (sec)->relocs != internal_relocs)
3337 {
3338 if (!changed_relocs)
3339 free (internal_relocs);
3340 else
3341 elf_section_data (sec)->relocs = internal_relocs;
3342 }
3343
3344 return TRUE;
3345
3346 error_return:
3347 if (contents != NULL
3348 && elf_section_data (sec)->this_hdr.contents != contents)
3349 free (contents);
3350 if (internal_relocs != NULL
3351 && elf_section_data (sec)->relocs != internal_relocs)
3352 free (internal_relocs);
3353 return FALSE;
3354 }
3355
3356 /* Set the sizes of the dynamic sections. */
3357
3358 static bfd_boolean
3359 elf_x86_64_size_dynamic_sections (bfd *output_bfd,
3360 struct bfd_link_info *info)
3361 {
3362 struct elf_x86_64_link_hash_table *htab;
3363 bfd *dynobj;
3364 asection *s;
3365 bfd_boolean relocs;
3366 bfd *ibfd;
3367 const struct elf_backend_data *bed;
3368
3369 htab = elf_x86_64_hash_table (info);
3370 if (htab == NULL)
3371 return FALSE;
3372 bed = get_elf_backend_data (output_bfd);
3373
3374 dynobj = htab->elf.dynobj;
3375 if (dynobj == NULL)
3376 abort ();
3377
3378 if (htab->elf.dynamic_sections_created)
3379 {
3380 /* Set the contents of the .interp section to the interpreter. */
3381 if (bfd_link_executable (info) && !info->nointerp)
3382 {
3383 s = bfd_get_linker_section (dynobj, ".interp");
3384 if (s == NULL)
3385 abort ();
3386 s->size = htab->dynamic_interpreter_size;
3387 s->contents = (unsigned char *) htab->dynamic_interpreter;
3388 }
3389 }
3390
3391 /* Set up .got offsets for local syms, and space for local dynamic
3392 relocs. */
3393 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
3394 {
3395 bfd_signed_vma *local_got;
3396 bfd_signed_vma *end_local_got;
3397 char *local_tls_type;
3398 bfd_vma *local_tlsdesc_gotent;
3399 bfd_size_type locsymcount;
3400 Elf_Internal_Shdr *symtab_hdr;
3401 asection *srel;
3402
3403 if (! is_x86_64_elf (ibfd))
3404 continue;
3405
3406 for (s = ibfd->sections; s != NULL; s = s->next)
3407 {
3408 struct elf_dyn_relocs *p;
3409
3410 if (!elf_x86_64_convert_load (ibfd, s, info))
3411 return FALSE;
3412
3413 for (p = (struct elf_dyn_relocs *)
3414 (elf_section_data (s)->local_dynrel);
3415 p != NULL;
3416 p = p->next)
3417 {
3418 if (!bfd_is_abs_section (p->sec)
3419 && bfd_is_abs_section (p->sec->output_section))
3420 {
3421 /* Input section has been discarded, either because
3422 it is a copy of a linkonce section or due to
3423 linker script /DISCARD/, so we'll be discarding
3424 the relocs too. */
3425 }
3426 else if (p->count != 0)
3427 {
3428 srel = elf_section_data (p->sec)->sreloc;
3429 srel->size += p->count * bed->s->sizeof_rela;
3430 if ((p->sec->output_section->flags & SEC_READONLY) != 0
3431 && (info->flags & DF_TEXTREL) == 0)
3432 {
3433 info->flags |= DF_TEXTREL;
3434 if ((info->warn_shared_textrel && bfd_link_pic (info))
3435 || info->error_textrel)
3436 info->callbacks->einfo (_("%P: %B: warning: relocation in readonly section `%A'\n"),
3437 p->sec->owner, p->sec);
3438 }
3439 }
3440 }
3441 }
3442
3443 local_got = elf_local_got_refcounts (ibfd);
3444 if (!local_got)
3445 continue;
3446
3447 symtab_hdr = &elf_symtab_hdr (ibfd);
3448 locsymcount = symtab_hdr->sh_info;
3449 end_local_got = local_got + locsymcount;
3450 local_tls_type = elf_x86_64_local_got_tls_type (ibfd);
3451 local_tlsdesc_gotent = elf_x86_64_local_tlsdesc_gotent (ibfd);
3452 s = htab->elf.sgot;
3453 srel = htab->elf.srelgot;
3454 for (; local_got < end_local_got;
3455 ++local_got, ++local_tls_type, ++local_tlsdesc_gotent)
3456 {
3457 *local_tlsdesc_gotent = (bfd_vma) -1;
3458 if (*local_got > 0)
3459 {
3460 if (GOT_TLS_GDESC_P (*local_tls_type))
3461 {
3462 *local_tlsdesc_gotent = htab->elf.sgotplt->size
3463 - elf_x86_64_compute_jump_table_size (htab);
3464 htab->elf.sgotplt->size += 2 * GOT_ENTRY_SIZE;
3465 *local_got = (bfd_vma) -2;
3466 }
3467 if (! GOT_TLS_GDESC_P (*local_tls_type)
3468 || GOT_TLS_GD_P (*local_tls_type))
3469 {
3470 *local_got = s->size;
3471 s->size += GOT_ENTRY_SIZE;
3472 if (GOT_TLS_GD_P (*local_tls_type))
3473 s->size += GOT_ENTRY_SIZE;
3474 }
3475 if (bfd_link_pic (info)
3476 || GOT_TLS_GD_ANY_P (*local_tls_type)
3477 || *local_tls_type == GOT_TLS_IE)
3478 {
3479 if (GOT_TLS_GDESC_P (*local_tls_type))
3480 {
3481 htab->elf.srelplt->size
3482 += bed->s->sizeof_rela;
3483 htab->tlsdesc_plt = (bfd_vma) -1;
3484 }
3485 if (! GOT_TLS_GDESC_P (*local_tls_type)
3486 || GOT_TLS_GD_P (*local_tls_type))
3487 srel->size += bed->s->sizeof_rela;
3488 }
3489 }
3490 else
3491 *local_got = (bfd_vma) -1;
3492 }
3493 }
3494
3495 if (htab->tls_ld_got.refcount > 0)
3496 {
3497 /* Allocate 2 got entries and 1 dynamic reloc for R_X86_64_TLSLD
3498 relocs. */
3499 htab->tls_ld_got.offset = htab->elf.sgot->size;
3500 htab->elf.sgot->size += 2 * GOT_ENTRY_SIZE;
3501 htab->elf.srelgot->size += bed->s->sizeof_rela;
3502 }
3503 else
3504 htab->tls_ld_got.offset = -1;
3505
3506 /* Allocate global sym .plt and .got entries, and space for global
3507 sym dynamic relocs. */
3508 elf_link_hash_traverse (&htab->elf, elf_x86_64_allocate_dynrelocs,
3509 info);
3510
3511 /* Allocate .plt and .got entries, and space for local symbols. */
3512 htab_traverse (htab->loc_hash_table,
3513 elf_x86_64_allocate_local_dynrelocs,
3514 info);
3515
3516 /* For every jump slot reserved in the sgotplt, reloc_count is
3517 incremented. However, when we reserve space for TLS descriptors,
3518 it's not incremented, so in order to compute the space reserved
3519 for them, it suffices to multiply the reloc count by the jump
3520 slot size.
3521
3522 PR ld/13302: We start next_irelative_index at the end of .rela.plt
3523 so that R_X86_64_IRELATIVE entries come last. */
3524 if (htab->elf.srelplt)
3525 {
3526 htab->sgotplt_jump_table_size
3527 = elf_x86_64_compute_jump_table_size (htab);
3528 htab->next_irelative_index = htab->elf.srelplt->reloc_count - 1;
3529 }
3530 else if (htab->elf.irelplt)
3531 htab->next_irelative_index = htab->elf.irelplt->reloc_count - 1;
3532
3533 if (htab->tlsdesc_plt)
3534 {
3535 /* If we're not using lazy TLS relocations, don't generate the
3536 PLT and GOT entries they require. */
3537 if ((info->flags & DF_BIND_NOW))
3538 htab->tlsdesc_plt = 0;
3539 else
3540 {
3541 htab->tlsdesc_got = htab->elf.sgot->size;
3542 htab->elf.sgot->size += GOT_ENTRY_SIZE;
3543 /* Reserve room for the initial entry.
3544 FIXME: we could probably do away with it in this case. */
3545 if (htab->elf.splt->size == 0)
3546 htab->elf.splt->size += GET_PLT_ENTRY_SIZE (output_bfd);
3547 htab->tlsdesc_plt = htab->elf.splt->size;
3548 htab->elf.splt->size += GET_PLT_ENTRY_SIZE (output_bfd);
3549 }
3550 }
3551
3552 if (htab->elf.sgotplt)
3553 {
3554 /* Don't allocate .got.plt section if there are no GOT nor PLT
3555 entries and there is no refeence to _GLOBAL_OFFSET_TABLE_. */
3556 if ((htab->elf.hgot == NULL
3557 || !htab->elf.hgot->ref_regular_nonweak)
3558 && (htab->elf.sgotplt->size
3559 == get_elf_backend_data (output_bfd)->got_header_size)
3560 && (htab->elf.splt == NULL
3561 || htab->elf.splt->size == 0)
3562 && (htab->elf.sgot == NULL
3563 || htab->elf.sgot->size == 0)
3564 && (htab->elf.iplt == NULL
3565 || htab->elf.iplt->size == 0)
3566 && (htab->elf.igotplt == NULL
3567 || htab->elf.igotplt->size == 0))
3568 htab->elf.sgotplt->size = 0;
3569 }
3570
3571 if (htab->plt_eh_frame != NULL
3572 && htab->elf.splt != NULL
3573 && htab->elf.splt->size != 0
3574 && !bfd_is_abs_section (htab->elf.splt->output_section)
3575 && _bfd_elf_eh_frame_present (info))
3576 {
3577 const struct elf_x86_64_backend_data *arch_data
3578 = get_elf_x86_64_arch_data (bed);
3579 htab->plt_eh_frame->size = arch_data->eh_frame_plt_size;
3580 }
3581
3582 /* We now have determined the sizes of the various dynamic sections.
3583 Allocate memory for them. */
3584 relocs = FALSE;
3585 for (s = dynobj->sections; s != NULL; s = s->next)
3586 {
3587 if ((s->flags & SEC_LINKER_CREATED) == 0)
3588 continue;
3589
3590 if (s == htab->elf.splt
3591 || s == htab->elf.sgot
3592 || s == htab->elf.sgotplt
3593 || s == htab->elf.iplt
3594 || s == htab->elf.igotplt
3595 || s == htab->plt_bnd
3596 || s == htab->plt_got
3597 || s == htab->plt_eh_frame
3598 || s == htab->sdynbss)
3599 {
3600 /* Strip this section if we don't need it; see the
3601 comment below. */
3602 }
3603 else if (CONST_STRNEQ (bfd_get_section_name (dynobj, s), ".rela"))
3604 {
3605 if (s->size != 0 && s != htab->elf.srelplt)
3606 relocs = TRUE;
3607
3608 /* We use the reloc_count field as a counter if we need
3609 to copy relocs into the output file. */
3610 if (s != htab->elf.srelplt)
3611 s->reloc_count = 0;
3612 }
3613 else
3614 {
3615 /* It's not one of our sections, so don't allocate space. */
3616 continue;
3617 }
3618
3619 if (s->size == 0)
3620 {
3621 /* If we don't need this section, strip it from the
3622 output file. This is mostly to handle .rela.bss and
3623 .rela.plt. We must create both sections in
3624 create_dynamic_sections, because they must be created
3625 before the linker maps input sections to output
3626 sections. The linker does that before
3627 adjust_dynamic_symbol is called, and it is that
3628 function which decides whether anything needs to go
3629 into these sections. */
3630
3631 s->flags |= SEC_EXCLUDE;
3632 continue;
3633 }
3634
3635 if ((s->flags & SEC_HAS_CONTENTS) == 0)
3636 continue;
3637
3638 /* Allocate memory for the section contents. We use bfd_zalloc
3639 here in case unused entries are not reclaimed before the
3640 section's contents are written out. This should not happen,
3641 but this way if it does, we get a R_X86_64_NONE reloc instead
3642 of garbage. */
3643 s->contents = (bfd_byte *) bfd_zalloc (dynobj, s->size);
3644 if (s->contents == NULL)
3645 return FALSE;
3646 }
3647
3648 if (htab->plt_eh_frame != NULL
3649 && htab->plt_eh_frame->contents != NULL)
3650 {
3651 const struct elf_x86_64_backend_data *arch_data
3652 = get_elf_x86_64_arch_data (bed);
3653
3654 memcpy (htab->plt_eh_frame->contents,
3655 arch_data->eh_frame_plt, htab->plt_eh_frame->size);
3656 bfd_put_32 (dynobj, htab->elf.splt->size,
3657 htab->plt_eh_frame->contents + PLT_FDE_LEN_OFFSET);
3658 }
3659
3660 if (htab->elf.dynamic_sections_created)
3661 {
3662 /* Add some entries to the .dynamic section. We fill in the
3663 values later, in elf_x86_64_finish_dynamic_sections, but we
3664 must add the entries now so that we get the correct size for
3665 the .dynamic section. The DT_DEBUG entry is filled in by the
3666 dynamic linker and used by the debugger. */
3667 #define add_dynamic_entry(TAG, VAL) \
3668 _bfd_elf_add_dynamic_entry (info, TAG, VAL)
3669
3670 if (bfd_link_executable (info))
3671 {
3672 if (!add_dynamic_entry (DT_DEBUG, 0))
3673 return FALSE;
3674 }
3675
3676 if (htab->elf.splt->size != 0)
3677 {
3678 /* DT_PLTGOT is used by prelink even if there is no PLT
3679 relocation. */
3680 if (!add_dynamic_entry (DT_PLTGOT, 0))
3681 return FALSE;
3682
3683 if (htab->elf.srelplt->size != 0)
3684 {
3685 if (!add_dynamic_entry (DT_PLTRELSZ, 0)
3686 || !add_dynamic_entry (DT_PLTREL, DT_RELA)
3687 || !add_dynamic_entry (DT_JMPREL, 0))
3688 return FALSE;
3689 }
3690
3691 if (htab->tlsdesc_plt
3692 && (!add_dynamic_entry (DT_TLSDESC_PLT, 0)
3693 || !add_dynamic_entry (DT_TLSDESC_GOT, 0)))
3694 return FALSE;
3695 }
3696
3697 if (relocs)
3698 {
3699 if (!add_dynamic_entry (DT_RELA, 0)
3700 || !add_dynamic_entry (DT_RELASZ, 0)
3701 || !add_dynamic_entry (DT_RELAENT, bed->s->sizeof_rela))
3702 return FALSE;
3703
3704 /* If any dynamic relocs apply to a read-only section,
3705 then we need a DT_TEXTREL entry. */
3706 if ((info->flags & DF_TEXTREL) == 0)
3707 elf_link_hash_traverse (&htab->elf,
3708 elf_x86_64_readonly_dynrelocs,
3709 info);
3710
3711 if ((info->flags & DF_TEXTREL) != 0)
3712 {
3713 if ((elf_tdata (output_bfd)->has_gnu_symbols
3714 & elf_gnu_symbol_ifunc) == elf_gnu_symbol_ifunc)
3715 {
3716 info->callbacks->einfo
3717 (_("%P%X: read-only segment has dynamic IFUNC relocations; recompile with -fPIC\n"));
3718 bfd_set_error (bfd_error_bad_value);
3719 return FALSE;
3720 }
3721
3722 if (!add_dynamic_entry (DT_TEXTREL, 0))
3723 return FALSE;
3724 }
3725 }
3726 }
3727 #undef add_dynamic_entry
3728
3729 return TRUE;
3730 }
3731
3732 static bfd_boolean
3733 elf_x86_64_always_size_sections (bfd *output_bfd,
3734 struct bfd_link_info *info)
3735 {
3736 asection *tls_sec = elf_hash_table (info)->tls_sec;
3737
3738 if (tls_sec)
3739 {
3740 struct elf_link_hash_entry *tlsbase;
3741
3742 tlsbase = elf_link_hash_lookup (elf_hash_table (info),
3743 "_TLS_MODULE_BASE_",
3744 FALSE, FALSE, FALSE);
3745
3746 if (tlsbase && tlsbase->type == STT_TLS)
3747 {
3748 struct elf_x86_64_link_hash_table *htab;
3749 struct bfd_link_hash_entry *bh = NULL;
3750 const struct elf_backend_data *bed
3751 = get_elf_backend_data (output_bfd);
3752
3753 htab = elf_x86_64_hash_table (info);
3754 if (htab == NULL)
3755 return FALSE;
3756
3757 if (!(_bfd_generic_link_add_one_symbol
3758 (info, output_bfd, "_TLS_MODULE_BASE_", BSF_LOCAL,
3759 tls_sec, 0, NULL, FALSE,
3760 bed->collect, &bh)))
3761 return FALSE;
3762
3763 htab->tls_module_base = bh;
3764
3765 tlsbase = (struct elf_link_hash_entry *)bh;
3766 tlsbase->def_regular = 1;
3767 tlsbase->other = STV_HIDDEN;
3768 tlsbase->root.linker_def = 1;
3769 (*bed->elf_backend_hide_symbol) (info, tlsbase, TRUE);
3770 }
3771 }
3772
3773 return TRUE;
3774 }
3775
3776 /* _TLS_MODULE_BASE_ needs to be treated especially when linking
3777 executables. Rather than setting it to the beginning of the TLS
3778 section, we have to set it to the end. This function may be called
3779 multiple times, it is idempotent. */
3780
3781 static void
3782 elf_x86_64_set_tls_module_base (struct bfd_link_info *info)
3783 {
3784 struct elf_x86_64_link_hash_table *htab;
3785 struct bfd_link_hash_entry *base;
3786
3787 if (!bfd_link_executable (info))
3788 return;
3789
3790 htab = elf_x86_64_hash_table (info);
3791 if (htab == NULL)
3792 return;
3793
3794 base = htab->tls_module_base;
3795 if (base == NULL)
3796 return;
3797
3798 base->u.def.value = htab->elf.tls_size;
3799 }
3800
3801 /* Return the base VMA address which should be subtracted from real addresses
3802 when resolving @dtpoff relocation.
3803 This is PT_TLS segment p_vaddr. */
3804
3805 static bfd_vma
3806 elf_x86_64_dtpoff_base (struct bfd_link_info *info)
3807 {
3808 /* If tls_sec is NULL, we should have signalled an error already. */
3809 if (elf_hash_table (info)->tls_sec == NULL)
3810 return 0;
3811 return elf_hash_table (info)->tls_sec->vma;
3812 }
3813
3814 /* Return the relocation value for @tpoff relocation
3815 if STT_TLS virtual address is ADDRESS. */
3816
3817 static bfd_vma
3818 elf_x86_64_tpoff (struct bfd_link_info *info, bfd_vma address)
3819 {
3820 struct elf_link_hash_table *htab = elf_hash_table (info);
3821 const struct elf_backend_data *bed = get_elf_backend_data (info->output_bfd);
3822 bfd_vma static_tls_size;
3823
3824 /* If tls_segment is NULL, we should have signalled an error already. */
3825 if (htab->tls_sec == NULL)
3826 return 0;
3827
3828 /* Consider special static TLS alignment requirements. */
3829 static_tls_size = BFD_ALIGN (htab->tls_size, bed->static_tls_alignment);
3830 return address - static_tls_size - htab->tls_sec->vma;
3831 }
3832
3833 /* Is the instruction before OFFSET in CONTENTS a 32bit relative
3834 branch? */
3835
3836 static bfd_boolean
3837 is_32bit_relative_branch (bfd_byte *contents, bfd_vma offset)
3838 {
3839 /* Opcode Instruction
3840 0xe8 call
3841 0xe9 jump
3842 0x0f 0x8x conditional jump */
3843 return ((offset > 0
3844 && (contents [offset - 1] == 0xe8
3845 || contents [offset - 1] == 0xe9))
3846 || (offset > 1
3847 && contents [offset - 2] == 0x0f
3848 && (contents [offset - 1] & 0xf0) == 0x80));
3849 }
3850
3851 /* Relocate an x86_64 ELF section. */
3852
3853 static bfd_boolean
3854 elf_x86_64_relocate_section (bfd *output_bfd,
3855 struct bfd_link_info *info,
3856 bfd *input_bfd,
3857 asection *input_section,
3858 bfd_byte *contents,
3859 Elf_Internal_Rela *relocs,
3860 Elf_Internal_Sym *local_syms,
3861 asection **local_sections)
3862 {
3863 struct elf_x86_64_link_hash_table *htab;
3864 Elf_Internal_Shdr *symtab_hdr;
3865 struct elf_link_hash_entry **sym_hashes;
3866 bfd_vma *local_got_offsets;
3867 bfd_vma *local_tlsdesc_gotents;
3868 Elf_Internal_Rela *rel;
3869 Elf_Internal_Rela *wrel;
3870 Elf_Internal_Rela *relend;
3871 const unsigned int plt_entry_size = GET_PLT_ENTRY_SIZE (info->output_bfd);
3872
3873 BFD_ASSERT (is_x86_64_elf (input_bfd));
3874
3875 htab = elf_x86_64_hash_table (info);
3876 if (htab == NULL)
3877 return FALSE;
3878 symtab_hdr = &elf_symtab_hdr (input_bfd);
3879 sym_hashes = elf_sym_hashes (input_bfd);
3880 local_got_offsets = elf_local_got_offsets (input_bfd);
3881 local_tlsdesc_gotents = elf_x86_64_local_tlsdesc_gotent (input_bfd);
3882
3883 elf_x86_64_set_tls_module_base (info);
3884
3885 rel = wrel = relocs;
3886 relend = relocs + input_section->reloc_count;
3887 for (; rel < relend; wrel++, rel++)
3888 {
3889 unsigned int r_type;
3890 reloc_howto_type *howto;
3891 unsigned long r_symndx;
3892 struct elf_link_hash_entry *h;
3893 struct elf_x86_64_link_hash_entry *eh;
3894 Elf_Internal_Sym *sym;
3895 asection *sec;
3896 bfd_vma off, offplt, plt_offset;
3897 bfd_vma relocation;
3898 bfd_boolean unresolved_reloc;
3899 bfd_reloc_status_type r;
3900 int tls_type;
3901 asection *base_got, *resolved_plt;
3902 bfd_vma st_size;
3903
3904 r_type = ELF32_R_TYPE (rel->r_info);
3905 if (r_type == (int) R_X86_64_GNU_VTINHERIT
3906 || r_type == (int) R_X86_64_GNU_VTENTRY)
3907 {
3908 if (wrel != rel)
3909 *wrel = *rel;
3910 continue;
3911 }
3912
3913 if (r_type >= (int) R_X86_64_standard)
3914 {
3915 (*_bfd_error_handler)
3916 (_("%B: unrecognized relocation (0x%x) in section `%A'"),
3917 input_bfd, input_section, r_type);
3918 bfd_set_error (bfd_error_bad_value);
3919 return FALSE;
3920 }
3921
3922 if (r_type != (int) R_X86_64_32
3923 || ABI_64_P (output_bfd))
3924 howto = x86_64_elf_howto_table + r_type;
3925 else
3926 howto = (x86_64_elf_howto_table
3927 + ARRAY_SIZE (x86_64_elf_howto_table) - 1);
3928 r_symndx = htab->r_sym (rel->r_info);
3929 h = NULL;
3930 sym = NULL;
3931 sec = NULL;
3932 unresolved_reloc = FALSE;
3933 if (r_symndx < symtab_hdr->sh_info)
3934 {
3935 sym = local_syms + r_symndx;
3936 sec = local_sections[r_symndx];
3937
3938 relocation = _bfd_elf_rela_local_sym (output_bfd, sym,
3939 &sec, rel);
3940 st_size = sym->st_size;
3941
3942 /* Relocate against local STT_GNU_IFUNC symbol. */
3943 if (!bfd_link_relocatable (info)
3944 && ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC)
3945 {
3946 h = elf_x86_64_get_local_sym_hash (htab, input_bfd,
3947 rel, FALSE);
3948 if (h == NULL)
3949 abort ();
3950
3951 /* Set STT_GNU_IFUNC symbol value. */
3952 h->root.u.def.value = sym->st_value;
3953 h->root.u.def.section = sec;
3954 }
3955 }
3956 else
3957 {
3958 bfd_boolean warned ATTRIBUTE_UNUSED;
3959 bfd_boolean ignored ATTRIBUTE_UNUSED;
3960
3961 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
3962 r_symndx, symtab_hdr, sym_hashes,
3963 h, sec, relocation,
3964 unresolved_reloc, warned, ignored);
3965 st_size = h->size;
3966 }
3967
3968 if (sec != NULL && discarded_section (sec))
3969 {
3970 _bfd_clear_contents (howto, input_bfd, input_section,
3971 contents + rel->r_offset);
3972 wrel->r_offset = rel->r_offset;
3973 wrel->r_info = 0;
3974 wrel->r_addend = 0;
3975
3976 /* For ld -r, remove relocations in debug sections against
3977 sections defined in discarded sections. Not done for
3978 eh_frame editing code expects to be present. */
3979 if (bfd_link_relocatable (info)
3980 && (input_section->flags & SEC_DEBUGGING))
3981 wrel--;
3982
3983 continue;
3984 }
3985
3986 if (bfd_link_relocatable (info))
3987 {
3988 if (wrel != rel)
3989 *wrel = *rel;
3990 continue;
3991 }
3992
3993 if (rel->r_addend == 0 && !ABI_64_P (output_bfd))
3994 {
3995 if (r_type == R_X86_64_64)
3996 {
3997 /* For x32, treat R_X86_64_64 like R_X86_64_32 and
3998 zero-extend it to 64bit if addend is zero. */
3999 r_type = R_X86_64_32;
4000 memset (contents + rel->r_offset + 4, 0, 4);
4001 }
4002 else if (r_type == R_X86_64_SIZE64)
4003 {
4004 /* For x32, treat R_X86_64_SIZE64 like R_X86_64_SIZE32 and
4005 zero-extend it to 64bit if addend is zero. */
4006 r_type = R_X86_64_SIZE32;
4007 memset (contents + rel->r_offset + 4, 0, 4);
4008 }
4009 }
4010
4011 eh = (struct elf_x86_64_link_hash_entry *) h;
4012
4013 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle
4014 it here if it is defined in a non-shared object. */
4015 if (h != NULL
4016 && h->type == STT_GNU_IFUNC
4017 && h->def_regular)
4018 {
4019 bfd_vma plt_index;
4020 const char *name;
4021
4022 if ((input_section->flags & SEC_ALLOC) == 0)
4023 {
4024 /* Dynamic relocs are not propagated for SEC_DEBUGGING
4025 sections because such sections are not SEC_ALLOC and
4026 thus ld.so will not process them. */
4027 if ((input_section->flags & SEC_DEBUGGING) != 0)
4028 continue;
4029 abort ();
4030 }
4031 else if (h->plt.offset == (bfd_vma) -1)
4032 abort ();
4033
4034 /* STT_GNU_IFUNC symbol must go through PLT. */
4035 if (htab->elf.splt != NULL)
4036 {
4037 if (htab->plt_bnd != NULL)
4038 {
4039 resolved_plt = htab->plt_bnd;
4040 plt_offset = eh->plt_bnd.offset;
4041 }
4042 else
4043 {
4044 resolved_plt = htab->elf.splt;
4045 plt_offset = h->plt.offset;
4046 }
4047 }
4048 else
4049 {
4050 resolved_plt = htab->elf.iplt;
4051 plt_offset = h->plt.offset;
4052 }
4053
4054 relocation = (resolved_plt->output_section->vma
4055 + resolved_plt->output_offset + plt_offset);
4056
4057 switch (r_type)
4058 {
4059 default:
4060 if (h->root.root.string)
4061 name = h->root.root.string;
4062 else
4063 name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym,
4064 NULL);
4065 (*_bfd_error_handler)
4066 (_("%B: relocation %s against STT_GNU_IFUNC "
4067 "symbol `%s' isn't handled by %s"), input_bfd,
4068 x86_64_elf_howto_table[r_type].name,
4069 name, __FUNCTION__);
4070 bfd_set_error (bfd_error_bad_value);
4071 return FALSE;
4072
4073 case R_X86_64_32S:
4074 if (bfd_link_pic (info))
4075 abort ();
4076 goto do_relocation;
4077
4078 case R_X86_64_32:
4079 if (ABI_64_P (output_bfd))
4080 goto do_relocation;
4081 /* FALLTHROUGH */
4082 case R_X86_64_64:
4083 if (rel->r_addend != 0)
4084 {
4085 if (h->root.root.string)
4086 name = h->root.root.string;
4087 else
4088 name = bfd_elf_sym_name (input_bfd, symtab_hdr,
4089 sym, NULL);
4090 (*_bfd_error_handler)
4091 (_("%B: relocation %s against STT_GNU_IFUNC "
4092 "symbol `%s' has non-zero addend: %d"),
4093 input_bfd, x86_64_elf_howto_table[r_type].name,
4094 name, rel->r_addend);
4095 bfd_set_error (bfd_error_bad_value);
4096 return FALSE;
4097 }
4098
4099 /* Generate dynamic relcoation only when there is a
4100 non-GOT reference in a shared object. */
4101 if (bfd_link_pic (info) && h->non_got_ref)
4102 {
4103 Elf_Internal_Rela outrel;
4104 asection *sreloc;
4105
4106 /* Need a dynamic relocation to get the real function
4107 address. */
4108 outrel.r_offset = _bfd_elf_section_offset (output_bfd,
4109 info,
4110 input_section,
4111 rel->r_offset);
4112 if (outrel.r_offset == (bfd_vma) -1
4113 || outrel.r_offset == (bfd_vma) -2)
4114 abort ();
4115
4116 outrel.r_offset += (input_section->output_section->vma
4117 + input_section->output_offset);
4118
4119 if (h->dynindx == -1
4120 || h->forced_local
4121 || bfd_link_executable (info))
4122 {
4123 /* This symbol is resolved locally. */
4124 outrel.r_info = htab->r_info (0, R_X86_64_IRELATIVE);
4125 outrel.r_addend = (h->root.u.def.value
4126 + h->root.u.def.section->output_section->vma
4127 + h->root.u.def.section->output_offset);
4128 }
4129 else
4130 {
4131 outrel.r_info = htab->r_info (h->dynindx, r_type);
4132 outrel.r_addend = 0;
4133 }
4134
4135 sreloc = htab->elf.irelifunc;
4136 elf_append_rela (output_bfd, sreloc, &outrel);
4137
4138 /* If this reloc is against an external symbol, we
4139 do not want to fiddle with the addend. Otherwise,
4140 we need to include the symbol value so that it
4141 becomes an addend for the dynamic reloc. For an
4142 internal symbol, we have updated addend. */
4143 continue;
4144 }
4145 /* FALLTHROUGH */
4146 case R_X86_64_PC32:
4147 case R_X86_64_PC32_BND:
4148 case R_X86_64_PC64:
4149 case R_X86_64_PLT32:
4150 case R_X86_64_PLT32_BND:
4151 goto do_relocation;
4152
4153 case R_X86_64_GOTPCREL:
4154 case R_X86_64_GOTPCRELX:
4155 case R_X86_64_REX_GOTPCRELX:
4156 case R_X86_64_GOTPCREL64:
4157 base_got = htab->elf.sgot;
4158 off = h->got.offset;
4159
4160 if (base_got == NULL)
4161 abort ();
4162
4163 if (off == (bfd_vma) -1)
4164 {
4165 /* We can't use h->got.offset here to save state, or
4166 even just remember the offset, as finish_dynamic_symbol
4167 would use that as offset into .got. */
4168
4169 if (htab->elf.splt != NULL)
4170 {
4171 plt_index = h->plt.offset / plt_entry_size - 1;
4172 off = (plt_index + 3) * GOT_ENTRY_SIZE;
4173 base_got = htab->elf.sgotplt;
4174 }
4175 else
4176 {
4177 plt_index = h->plt.offset / plt_entry_size;
4178 off = plt_index * GOT_ENTRY_SIZE;
4179 base_got = htab->elf.igotplt;
4180 }
4181
4182 if (h->dynindx == -1
4183 || h->forced_local
4184 || info->symbolic)
4185 {
4186 /* This references the local defitionion. We must
4187 initialize this entry in the global offset table.
4188 Since the offset must always be a multiple of 8,
4189 we use the least significant bit to record
4190 whether we have initialized it already.
4191
4192 When doing a dynamic link, we create a .rela.got
4193 relocation entry to initialize the value. This
4194 is done in the finish_dynamic_symbol routine. */
4195 if ((off & 1) != 0)
4196 off &= ~1;
4197 else
4198 {
4199 bfd_put_64 (output_bfd, relocation,
4200 base_got->contents + off);
4201 /* Note that this is harmless for the GOTPLT64
4202 case, as -1 | 1 still is -1. */
4203 h->got.offset |= 1;
4204 }
4205 }
4206 }
4207
4208 relocation = (base_got->output_section->vma
4209 + base_got->output_offset + off);
4210
4211 goto do_relocation;
4212 }
4213 }
4214
4215 /* When generating a shared object, the relocations handled here are
4216 copied into the output file to be resolved at run time. */
4217 switch (r_type)
4218 {
4219 case R_X86_64_GOT32:
4220 case R_X86_64_GOT64:
4221 /* Relocation is to the entry for this symbol in the global
4222 offset table. */
4223 case R_X86_64_GOTPCREL:
4224 case R_X86_64_GOTPCRELX:
4225 case R_X86_64_REX_GOTPCRELX:
4226 case R_X86_64_GOTPCREL64:
4227 /* Use global offset table entry as symbol value. */
4228 case R_X86_64_GOTPLT64:
4229 /* This is obsolete and treated the the same as GOT64. */
4230 base_got = htab->elf.sgot;
4231
4232 if (htab->elf.sgot == NULL)
4233 abort ();
4234
4235 if (h != NULL)
4236 {
4237 bfd_boolean dyn;
4238
4239 off = h->got.offset;
4240 if (h->needs_plt
4241 && h->plt.offset != (bfd_vma)-1
4242 && off == (bfd_vma)-1)
4243 {
4244 /* We can't use h->got.offset here to save
4245 state, or even just remember the offset, as
4246 finish_dynamic_symbol would use that as offset into
4247 .got. */
4248 bfd_vma plt_index = h->plt.offset / plt_entry_size - 1;
4249 off = (plt_index + 3) * GOT_ENTRY_SIZE;
4250 base_got = htab->elf.sgotplt;
4251 }
4252
4253 dyn = htab->elf.dynamic_sections_created;
4254
4255 if (! WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, bfd_link_pic (info), h)
4256 || (bfd_link_pic (info)
4257 && SYMBOL_REFERENCES_LOCAL (info, h))
4258 || (ELF_ST_VISIBILITY (h->other)
4259 && h->root.type == bfd_link_hash_undefweak))
4260 {
4261 /* This is actually a static link, or it is a -Bsymbolic
4262 link and the symbol is defined locally, or the symbol
4263 was forced to be local because of a version file. We
4264 must initialize this entry in the global offset table.
4265 Since the offset must always be a multiple of 8, we
4266 use the least significant bit to record whether we
4267 have initialized it already.
4268
4269 When doing a dynamic link, we create a .rela.got
4270 relocation entry to initialize the value. This is
4271 done in the finish_dynamic_symbol routine. */
4272 if ((off & 1) != 0)
4273 off &= ~1;
4274 else
4275 {
4276 bfd_put_64 (output_bfd, relocation,
4277 base_got->contents + off);
4278 /* Note that this is harmless for the GOTPLT64 case,
4279 as -1 | 1 still is -1. */
4280 h->got.offset |= 1;
4281 }
4282 }
4283 else
4284 unresolved_reloc = FALSE;
4285 }
4286 else
4287 {
4288 if (local_got_offsets == NULL)
4289 abort ();
4290
4291 off = local_got_offsets[r_symndx];
4292
4293 /* The offset must always be a multiple of 8. We use
4294 the least significant bit to record whether we have
4295 already generated the necessary reloc. */
4296 if ((off & 1) != 0)
4297 off &= ~1;
4298 else
4299 {
4300 bfd_put_64 (output_bfd, relocation,
4301 base_got->contents + off);
4302
4303 if (bfd_link_pic (info))
4304 {
4305 asection *s;
4306 Elf_Internal_Rela outrel;
4307
4308 /* We need to generate a R_X86_64_RELATIVE reloc
4309 for the dynamic linker. */
4310 s = htab->elf.srelgot;
4311 if (s == NULL)
4312 abort ();
4313
4314 outrel.r_offset = (base_got->output_section->vma
4315 + base_got->output_offset
4316 + off);
4317 outrel.r_info = htab->r_info (0, R_X86_64_RELATIVE);
4318 outrel.r_addend = relocation;
4319 elf_append_rela (output_bfd, s, &outrel);
4320 }
4321
4322 local_got_offsets[r_symndx] |= 1;
4323 }
4324 }
4325
4326 if (off >= (bfd_vma) -2)
4327 abort ();
4328
4329 relocation = base_got->output_section->vma
4330 + base_got->output_offset + off;
4331 if (r_type != R_X86_64_GOTPCREL
4332 && r_type != R_X86_64_GOTPCRELX
4333 && r_type != R_X86_64_REX_GOTPCRELX
4334 && r_type != R_X86_64_GOTPCREL64)
4335 relocation -= htab->elf.sgotplt->output_section->vma
4336 - htab->elf.sgotplt->output_offset;
4337
4338 break;
4339
4340 case R_X86_64_GOTOFF64:
4341 /* Relocation is relative to the start of the global offset
4342 table. */
4343
4344 /* Check to make sure it isn't a protected function or data
4345 symbol for shared library since it may not be local when
4346 used as function address or with copy relocation. We also
4347 need to make sure that a symbol is referenced locally. */
4348 if (bfd_link_pic (info) && h)
4349 {
4350 if (!h->def_regular)
4351 {
4352 const char *v;
4353
4354 switch (ELF_ST_VISIBILITY (h->other))
4355 {
4356 case STV_HIDDEN:
4357 v = _("hidden symbol");
4358 break;
4359 case STV_INTERNAL:
4360 v = _("internal symbol");
4361 break;
4362 case STV_PROTECTED:
4363 v = _("protected symbol");
4364 break;
4365 default:
4366 v = _("symbol");
4367 break;
4368 }
4369
4370 (*_bfd_error_handler)
4371 (_("%B: relocation R_X86_64_GOTOFF64 against undefined %s `%s' can not be used when making a shared object"),
4372 input_bfd, v, h->root.root.string);
4373 bfd_set_error (bfd_error_bad_value);
4374 return FALSE;
4375 }
4376 else if (!bfd_link_executable (info)
4377 && !SYMBOL_REFERENCES_LOCAL (info, h)
4378 && (h->type == STT_FUNC
4379 || h->type == STT_OBJECT)
4380 && ELF_ST_VISIBILITY (h->other) == STV_PROTECTED)
4381 {
4382 (*_bfd_error_handler)
4383 (_("%B: relocation R_X86_64_GOTOFF64 against protected %s `%s' can not be used when making a shared object"),
4384 input_bfd,
4385 h->type == STT_FUNC ? "function" : "data",
4386 h->root.root.string);
4387 bfd_set_error (bfd_error_bad_value);
4388 return FALSE;
4389 }
4390 }
4391
4392 /* Note that sgot is not involved in this
4393 calculation. We always want the start of .got.plt. If we
4394 defined _GLOBAL_OFFSET_TABLE_ in a different way, as is
4395 permitted by the ABI, we might have to change this
4396 calculation. */
4397 relocation -= htab->elf.sgotplt->output_section->vma
4398 + htab->elf.sgotplt->output_offset;
4399 break;
4400
4401 case R_X86_64_GOTPC32:
4402 case R_X86_64_GOTPC64:
4403 /* Use global offset table as symbol value. */
4404 relocation = htab->elf.sgotplt->output_section->vma
4405 + htab->elf.sgotplt->output_offset;
4406 unresolved_reloc = FALSE;
4407 break;
4408
4409 case R_X86_64_PLTOFF64:
4410 /* Relocation is PLT entry relative to GOT. For local
4411 symbols it's the symbol itself relative to GOT. */
4412 if (h != NULL
4413 /* See PLT32 handling. */
4414 && h->plt.offset != (bfd_vma) -1
4415 && htab->elf.splt != NULL)
4416 {
4417 if (htab->plt_bnd != NULL)
4418 {
4419 resolved_plt = htab->plt_bnd;
4420 plt_offset = eh->plt_bnd.offset;
4421 }
4422 else
4423 {
4424 resolved_plt = htab->elf.splt;
4425 plt_offset = h->plt.offset;
4426 }
4427
4428 relocation = (resolved_plt->output_section->vma
4429 + resolved_plt->output_offset
4430 + plt_offset);
4431 unresolved_reloc = FALSE;
4432 }
4433
4434 relocation -= htab->elf.sgotplt->output_section->vma
4435 + htab->elf.sgotplt->output_offset;
4436 break;
4437
4438 case R_X86_64_PLT32:
4439 case R_X86_64_PLT32_BND:
4440 /* Relocation is to the entry for this symbol in the
4441 procedure linkage table. */
4442
4443 /* Resolve a PLT32 reloc against a local symbol directly,
4444 without using the procedure linkage table. */
4445 if (h == NULL)
4446 break;
4447
4448 if ((h->plt.offset == (bfd_vma) -1
4449 && eh->plt_got.offset == (bfd_vma) -1)
4450 || htab->elf.splt == NULL)
4451 {
4452 /* We didn't make a PLT entry for this symbol. This
4453 happens when statically linking PIC code, or when
4454 using -Bsymbolic. */
4455 break;
4456 }
4457
4458 if (h->plt.offset != (bfd_vma) -1)
4459 {
4460 if (htab->plt_bnd != NULL)
4461 {
4462 resolved_plt = htab->plt_bnd;
4463 plt_offset = eh->plt_bnd.offset;
4464 }
4465 else
4466 {
4467 resolved_plt = htab->elf.splt;
4468 plt_offset = h->plt.offset;
4469 }
4470 }
4471 else
4472 {
4473 /* Use the GOT PLT. */
4474 resolved_plt = htab->plt_got;
4475 plt_offset = eh->plt_got.offset;
4476 }
4477
4478 relocation = (resolved_plt->output_section->vma
4479 + resolved_plt->output_offset
4480 + plt_offset);
4481 unresolved_reloc = FALSE;
4482 break;
4483
4484 case R_X86_64_SIZE32:
4485 case R_X86_64_SIZE64:
4486 /* Set to symbol size. */
4487 relocation = st_size;
4488 goto direct;
4489
4490 case R_X86_64_PC8:
4491 case R_X86_64_PC16:
4492 case R_X86_64_PC32:
4493 case R_X86_64_PC32_BND:
4494 /* Don't complain about -fPIC if the symbol is undefined when
4495 building executable. */
4496 if (bfd_link_pic (info)
4497 && (input_section->flags & SEC_ALLOC) != 0
4498 && (input_section->flags & SEC_READONLY) != 0
4499 && h != NULL
4500 && !(bfd_link_executable (info)
4501 && h->root.type == bfd_link_hash_undefined))
4502 {
4503 bfd_boolean fail = FALSE;
4504 bfd_boolean branch
4505 = ((r_type == R_X86_64_PC32
4506 || r_type == R_X86_64_PC32_BND)
4507 && is_32bit_relative_branch (contents, rel->r_offset));
4508
4509 if (SYMBOL_REFERENCES_LOCAL (info, h))
4510 {
4511 /* Symbol is referenced locally. Make sure it is
4512 defined locally or for a branch. */
4513 fail = !h->def_regular && !branch;
4514 }
4515 else if (!(bfd_link_executable (info)
4516 && (h->needs_copy || eh->needs_copy)))
4517 {
4518 /* Symbol doesn't need copy reloc and isn't referenced
4519 locally. We only allow branch to symbol with
4520 non-default visibility. */
4521 fail = (!branch
4522 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT);
4523 }
4524
4525 if (fail)
4526 {
4527 const char *fmt;
4528 const char *v;
4529 const char *pic = "";
4530
4531 switch (ELF_ST_VISIBILITY (h->other))
4532 {
4533 case STV_HIDDEN:
4534 v = _("hidden symbol");
4535 break;
4536 case STV_INTERNAL:
4537 v = _("internal symbol");
4538 break;
4539 case STV_PROTECTED:
4540 v = _("protected symbol");
4541 break;
4542 default:
4543 v = _("symbol");
4544 pic = _("; recompile with -fPIC");
4545 break;
4546 }
4547
4548 if (h->def_regular)
4549 fmt = _("%B: relocation %s against %s `%s' can not be used when making a shared object%s");
4550 else
4551 fmt = _("%B: relocation %s against undefined %s `%s' can not be used when making a shared object%s");
4552
4553 (*_bfd_error_handler) (fmt, input_bfd,
4554 x86_64_elf_howto_table[r_type].name,
4555 v, h->root.root.string, pic);
4556 bfd_set_error (bfd_error_bad_value);
4557 return FALSE;
4558 }
4559 }
4560 /* Fall through. */
4561
4562 case R_X86_64_8:
4563 case R_X86_64_16:
4564 case R_X86_64_32:
4565 case R_X86_64_PC64:
4566 case R_X86_64_64:
4567 /* FIXME: The ABI says the linker should make sure the value is
4568 the same when it's zeroextended to 64 bit. */
4569
4570 direct:
4571 if ((input_section->flags & SEC_ALLOC) == 0)
4572 break;
4573
4574 /* Don't copy a pc-relative relocation into the output file
4575 if the symbol needs copy reloc or the symbol is undefined
4576 when building executable. Copy dynamic function pointer
4577 relocations. */
4578 if ((bfd_link_pic (info)
4579 && !(bfd_link_executable (info)
4580 && h != NULL
4581 && (h->needs_copy
4582 || eh->needs_copy
4583 || h->root.type == bfd_link_hash_undefined)
4584 && IS_X86_64_PCREL_TYPE (r_type))
4585 && (h == NULL
4586 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
4587 || h->root.type != bfd_link_hash_undefweak)
4588 && ((! IS_X86_64_PCREL_TYPE (r_type)
4589 && r_type != R_X86_64_SIZE32
4590 && r_type != R_X86_64_SIZE64)
4591 || ! SYMBOL_CALLS_LOCAL (info, h)))
4592 || (ELIMINATE_COPY_RELOCS
4593 && !bfd_link_pic (info)
4594 && h != NULL
4595 && h->dynindx != -1
4596 && (!h->non_got_ref || eh->func_pointer_refcount > 0)
4597 && ((h->def_dynamic
4598 && !h->def_regular)
4599 || h->root.type == bfd_link_hash_undefweak
4600 || h->root.type == bfd_link_hash_undefined)))
4601 {
4602 Elf_Internal_Rela outrel;
4603 bfd_boolean skip, relocate;
4604 asection *sreloc;
4605
4606 /* When generating a shared object, these relocations
4607 are copied into the output file to be resolved at run
4608 time. */
4609 skip = FALSE;
4610 relocate = FALSE;
4611
4612 outrel.r_offset =
4613 _bfd_elf_section_offset (output_bfd, info, input_section,
4614 rel->r_offset);
4615 if (outrel.r_offset == (bfd_vma) -1)
4616 skip = TRUE;
4617 else if (outrel.r_offset == (bfd_vma) -2)
4618 skip = TRUE, relocate = TRUE;
4619
4620 outrel.r_offset += (input_section->output_section->vma
4621 + input_section->output_offset);
4622
4623 if (skip)
4624 memset (&outrel, 0, sizeof outrel);
4625
4626 /* h->dynindx may be -1 if this symbol was marked to
4627 become local. */
4628 else if (h != NULL
4629 && h->dynindx != -1
4630 && (IS_X86_64_PCREL_TYPE (r_type)
4631 || ! bfd_link_pic (info)
4632 || ! SYMBOLIC_BIND (info, h)
4633 || ! h->def_regular))
4634 {
4635 outrel.r_info = htab->r_info (h->dynindx, r_type);
4636 outrel.r_addend = rel->r_addend;
4637 }
4638 else
4639 {
4640 /* This symbol is local, or marked to become local. */
4641 if (r_type == htab->pointer_r_type)
4642 {
4643 relocate = TRUE;
4644 outrel.r_info = htab->r_info (0, R_X86_64_RELATIVE);
4645 outrel.r_addend = relocation + rel->r_addend;
4646 }
4647 else if (r_type == R_X86_64_64
4648 && !ABI_64_P (output_bfd))
4649 {
4650 relocate = TRUE;
4651 outrel.r_info = htab->r_info (0,
4652 R_X86_64_RELATIVE64);
4653 outrel.r_addend = relocation + rel->r_addend;
4654 /* Check addend overflow. */
4655 if ((outrel.r_addend & 0x80000000)
4656 != (rel->r_addend & 0x80000000))
4657 {
4658 const char *name;
4659 int addend = rel->r_addend;
4660 if (h && h->root.root.string)
4661 name = h->root.root.string;
4662 else
4663 name = bfd_elf_sym_name (input_bfd, symtab_hdr,
4664 sym, NULL);
4665 if (addend < 0)
4666 (*_bfd_error_handler)
4667 (_("%B: addend -0x%x in relocation %s against "
4668 "symbol `%s' at 0x%lx in section `%A' is "
4669 "out of range"),
4670 input_bfd, input_section, addend,
4671 x86_64_elf_howto_table[r_type].name,
4672 name, (unsigned long) rel->r_offset);
4673 else
4674 (*_bfd_error_handler)
4675 (_("%B: addend 0x%x in relocation %s against "
4676 "symbol `%s' at 0x%lx in section `%A' is "
4677 "out of range"),
4678 input_bfd, input_section, addend,
4679 x86_64_elf_howto_table[r_type].name,
4680 name, (unsigned long) rel->r_offset);
4681 bfd_set_error (bfd_error_bad_value);
4682 return FALSE;
4683 }
4684 }
4685 else
4686 {
4687 long sindx;
4688
4689 if (bfd_is_abs_section (sec))
4690 sindx = 0;
4691 else if (sec == NULL || sec->owner == NULL)
4692 {
4693 bfd_set_error (bfd_error_bad_value);
4694 return FALSE;
4695 }
4696 else
4697 {
4698 asection *osec;
4699
4700 /* We are turning this relocation into one
4701 against a section symbol. It would be
4702 proper to subtract the symbol's value,
4703 osec->vma, from the emitted reloc addend,
4704 but ld.so expects buggy relocs. */
4705 osec = sec->output_section;
4706 sindx = elf_section_data (osec)->dynindx;
4707 if (sindx == 0)
4708 {
4709 asection *oi = htab->elf.text_index_section;
4710 sindx = elf_section_data (oi)->dynindx;
4711 }
4712 BFD_ASSERT (sindx != 0);
4713 }
4714
4715 outrel.r_info = htab->r_info (sindx, r_type);
4716 outrel.r_addend = relocation + rel->r_addend;
4717 }
4718 }
4719
4720 sreloc = elf_section_data (input_section)->sreloc;
4721
4722 if (sreloc == NULL || sreloc->contents == NULL)
4723 {
4724 r = bfd_reloc_notsupported;
4725 goto check_relocation_error;
4726 }
4727
4728 elf_append_rela (output_bfd, sreloc, &outrel);
4729
4730 /* If this reloc is against an external symbol, we do
4731 not want to fiddle with the addend. Otherwise, we
4732 need to include the symbol value so that it becomes
4733 an addend for the dynamic reloc. */
4734 if (! relocate)
4735 continue;
4736 }
4737
4738 break;
4739
4740 case R_X86_64_TLSGD:
4741 case R_X86_64_GOTPC32_TLSDESC:
4742 case R_X86_64_TLSDESC_CALL:
4743 case R_X86_64_GOTTPOFF:
4744 tls_type = GOT_UNKNOWN;
4745 if (h == NULL && local_got_offsets)
4746 tls_type = elf_x86_64_local_got_tls_type (input_bfd) [r_symndx];
4747 else if (h != NULL)
4748 tls_type = elf_x86_64_hash_entry (h)->tls_type;
4749
4750 if (! elf_x86_64_tls_transition (info, input_bfd,
4751 input_section, contents,
4752 symtab_hdr, sym_hashes,
4753 &r_type, tls_type, rel,
4754 relend, h, r_symndx))
4755 return FALSE;
4756
4757 if (r_type == R_X86_64_TPOFF32)
4758 {
4759 bfd_vma roff = rel->r_offset;
4760
4761 BFD_ASSERT (! unresolved_reloc);
4762
4763 if (ELF32_R_TYPE (rel->r_info) == R_X86_64_TLSGD)
4764 {
4765 /* GD->LE transition. For 64bit, change
4766 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
4767 .word 0x6666; rex64; call __tls_get_addr
4768 into:
4769 movq %fs:0, %rax
4770 leaq foo@tpoff(%rax), %rax
4771 For 32bit, change
4772 leaq foo@tlsgd(%rip), %rdi
4773 .word 0x6666; rex64; call __tls_get_addr
4774 into:
4775 movl %fs:0, %eax
4776 leaq foo@tpoff(%rax), %rax
4777 For largepic, change:
4778 leaq foo@tlsgd(%rip), %rdi
4779 movabsq $__tls_get_addr@pltoff, %rax
4780 addq %rbx, %rax
4781 call *%rax
4782 into:
4783 movq %fs:0, %rax
4784 leaq foo@tpoff(%rax), %rax
4785 nopw 0x0(%rax,%rax,1) */
4786 int largepic = 0;
4787 if (ABI_64_P (output_bfd)
4788 && contents[roff + 5] == (bfd_byte) '\xb8')
4789 {
4790 memcpy (contents + roff - 3,
4791 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x8d\x80"
4792 "\0\0\0\0\x66\x0f\x1f\x44\0", 22);
4793 largepic = 1;
4794 }
4795 else if (ABI_64_P (output_bfd))
4796 memcpy (contents + roff - 4,
4797 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x8d\x80\0\0\0",
4798 16);
4799 else
4800 memcpy (contents + roff - 3,
4801 "\x64\x8b\x04\x25\0\0\0\0\x48\x8d\x80\0\0\0",
4802 15);
4803 bfd_put_32 (output_bfd,
4804 elf_x86_64_tpoff (info, relocation),
4805 contents + roff + 8 + largepic);
4806 /* Skip R_X86_64_PC32/R_X86_64_PLT32/R_X86_64_PLTOFF64. */
4807 rel++;
4808 wrel++;
4809 continue;
4810 }
4811 else if (ELF32_R_TYPE (rel->r_info) == R_X86_64_GOTPC32_TLSDESC)
4812 {
4813 /* GDesc -> LE transition.
4814 It's originally something like:
4815 leaq x@tlsdesc(%rip), %rax
4816
4817 Change it to:
4818 movl $x@tpoff, %rax. */
4819
4820 unsigned int val, type;
4821
4822 type = bfd_get_8 (input_bfd, contents + roff - 3);
4823 val = bfd_get_8 (input_bfd, contents + roff - 1);
4824 bfd_put_8 (output_bfd, 0x48 | ((type >> 2) & 1),
4825 contents + roff - 3);
4826 bfd_put_8 (output_bfd, 0xc7, contents + roff - 2);
4827 bfd_put_8 (output_bfd, 0xc0 | ((val >> 3) & 7),
4828 contents + roff - 1);
4829 bfd_put_32 (output_bfd,
4830 elf_x86_64_tpoff (info, relocation),
4831 contents + roff);
4832 continue;
4833 }
4834 else if (ELF32_R_TYPE (rel->r_info) == R_X86_64_TLSDESC_CALL)
4835 {
4836 /* GDesc -> LE transition.
4837 It's originally:
4838 call *(%rax)
4839 Turn it into:
4840 xchg %ax,%ax. */
4841 bfd_put_8 (output_bfd, 0x66, contents + roff);
4842 bfd_put_8 (output_bfd, 0x90, contents + roff + 1);
4843 continue;
4844 }
4845 else if (ELF32_R_TYPE (rel->r_info) == R_X86_64_GOTTPOFF)
4846 {
4847 /* IE->LE transition:
4848 For 64bit, originally it can be one of:
4849 movq foo@gottpoff(%rip), %reg
4850 addq foo@gottpoff(%rip), %reg
4851 We change it into:
4852 movq $foo, %reg
4853 leaq foo(%reg), %reg
4854 addq $foo, %reg.
4855 For 32bit, originally it can be one of:
4856 movq foo@gottpoff(%rip), %reg
4857 addl foo@gottpoff(%rip), %reg
4858 We change it into:
4859 movq $foo, %reg
4860 leal foo(%reg), %reg
4861 addl $foo, %reg. */
4862
4863 unsigned int val, type, reg;
4864
4865 if (roff >= 3)
4866 val = bfd_get_8 (input_bfd, contents + roff - 3);
4867 else
4868 val = 0;
4869 type = bfd_get_8 (input_bfd, contents + roff - 2);
4870 reg = bfd_get_8 (input_bfd, contents + roff - 1);
4871 reg >>= 3;
4872 if (type == 0x8b)
4873 {
4874 /* movq */
4875 if (val == 0x4c)
4876 bfd_put_8 (output_bfd, 0x49,
4877 contents + roff - 3);
4878 else if (!ABI_64_P (output_bfd) && val == 0x44)
4879 bfd_put_8 (output_bfd, 0x41,
4880 contents + roff - 3);
4881 bfd_put_8 (output_bfd, 0xc7,
4882 contents + roff - 2);
4883 bfd_put_8 (output_bfd, 0xc0 | reg,
4884 contents + roff - 1);
4885 }
4886 else if (reg == 4)
4887 {
4888 /* addq/addl -> addq/addl - addressing with %rsp/%r12
4889 is special */
4890 if (val == 0x4c)
4891 bfd_put_8 (output_bfd, 0x49,
4892 contents + roff - 3);
4893 else if (!ABI_64_P (output_bfd) && val == 0x44)
4894 bfd_put_8 (output_bfd, 0x41,
4895 contents + roff - 3);
4896 bfd_put_8 (output_bfd, 0x81,
4897 contents + roff - 2);
4898 bfd_put_8 (output_bfd, 0xc0 | reg,
4899 contents + roff - 1);
4900 }
4901 else
4902 {
4903 /* addq/addl -> leaq/leal */
4904 if (val == 0x4c)
4905 bfd_put_8 (output_bfd, 0x4d,
4906 contents + roff - 3);
4907 else if (!ABI_64_P (output_bfd) && val == 0x44)
4908 bfd_put_8 (output_bfd, 0x45,
4909 contents + roff - 3);
4910 bfd_put_8 (output_bfd, 0x8d,
4911 contents + roff - 2);
4912 bfd_put_8 (output_bfd, 0x80 | reg | (reg << 3),
4913 contents + roff - 1);
4914 }
4915 bfd_put_32 (output_bfd,
4916 elf_x86_64_tpoff (info, relocation),
4917 contents + roff);
4918 continue;
4919 }
4920 else
4921 BFD_ASSERT (FALSE);
4922 }
4923
4924 if (htab->elf.sgot == NULL)
4925 abort ();
4926
4927 if (h != NULL)
4928 {
4929 off = h->got.offset;
4930 offplt = elf_x86_64_hash_entry (h)->tlsdesc_got;
4931 }
4932 else
4933 {
4934 if (local_got_offsets == NULL)
4935 abort ();
4936
4937 off = local_got_offsets[r_symndx];
4938 offplt = local_tlsdesc_gotents[r_symndx];
4939 }
4940
4941 if ((off & 1) != 0)
4942 off &= ~1;
4943 else
4944 {
4945 Elf_Internal_Rela outrel;
4946 int dr_type, indx;
4947 asection *sreloc;
4948
4949 if (htab->elf.srelgot == NULL)
4950 abort ();
4951
4952 indx = h && h->dynindx != -1 ? h->dynindx : 0;
4953
4954 if (GOT_TLS_GDESC_P (tls_type))
4955 {
4956 outrel.r_info = htab->r_info (indx, R_X86_64_TLSDESC);
4957 BFD_ASSERT (htab->sgotplt_jump_table_size + offplt
4958 + 2 * GOT_ENTRY_SIZE <= htab->elf.sgotplt->size);
4959 outrel.r_offset = (htab->elf.sgotplt->output_section->vma
4960 + htab->elf.sgotplt->output_offset
4961 + offplt
4962 + htab->sgotplt_jump_table_size);
4963 sreloc = htab->elf.srelplt;
4964 if (indx == 0)
4965 outrel.r_addend = relocation - elf_x86_64_dtpoff_base (info);
4966 else
4967 outrel.r_addend = 0;
4968 elf_append_rela (output_bfd, sreloc, &outrel);
4969 }
4970
4971 sreloc = htab->elf.srelgot;
4972
4973 outrel.r_offset = (htab->elf.sgot->output_section->vma
4974 + htab->elf.sgot->output_offset + off);
4975
4976 if (GOT_TLS_GD_P (tls_type))
4977 dr_type = R_X86_64_DTPMOD64;
4978 else if (GOT_TLS_GDESC_P (tls_type))
4979 goto dr_done;
4980 else
4981 dr_type = R_X86_64_TPOFF64;
4982
4983 bfd_put_64 (output_bfd, 0, htab->elf.sgot->contents + off);
4984 outrel.r_addend = 0;
4985 if ((dr_type == R_X86_64_TPOFF64
4986 || dr_type == R_X86_64_TLSDESC) && indx == 0)
4987 outrel.r_addend = relocation - elf_x86_64_dtpoff_base (info);
4988 outrel.r_info = htab->r_info (indx, dr_type);
4989
4990 elf_append_rela (output_bfd, sreloc, &outrel);
4991
4992 if (GOT_TLS_GD_P (tls_type))
4993 {
4994 if (indx == 0)
4995 {
4996 BFD_ASSERT (! unresolved_reloc);
4997 bfd_put_64 (output_bfd,
4998 relocation - elf_x86_64_dtpoff_base (info),
4999 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
5000 }
5001 else
5002 {
5003 bfd_put_64 (output_bfd, 0,
5004 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
5005 outrel.r_info = htab->r_info (indx,
5006 R_X86_64_DTPOFF64);
5007 outrel.r_offset += GOT_ENTRY_SIZE;
5008 elf_append_rela (output_bfd, sreloc,
5009 &outrel);
5010 }
5011 }
5012
5013 dr_done:
5014 if (h != NULL)
5015 h->got.offset |= 1;
5016 else
5017 local_got_offsets[r_symndx] |= 1;
5018 }
5019
5020 if (off >= (bfd_vma) -2
5021 && ! GOT_TLS_GDESC_P (tls_type))
5022 abort ();
5023 if (r_type == ELF32_R_TYPE (rel->r_info))
5024 {
5025 if (r_type == R_X86_64_GOTPC32_TLSDESC
5026 || r_type == R_X86_64_TLSDESC_CALL)
5027 relocation = htab->elf.sgotplt->output_section->vma
5028 + htab->elf.sgotplt->output_offset
5029 + offplt + htab->sgotplt_jump_table_size;
5030 else
5031 relocation = htab->elf.sgot->output_section->vma
5032 + htab->elf.sgot->output_offset + off;
5033 unresolved_reloc = FALSE;
5034 }
5035 else
5036 {
5037 bfd_vma roff = rel->r_offset;
5038
5039 if (ELF32_R_TYPE (rel->r_info) == R_X86_64_TLSGD)
5040 {
5041 /* GD->IE transition. For 64bit, change
5042 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
5043 .word 0x6666; rex64; call __tls_get_addr@plt
5044 into:
5045 movq %fs:0, %rax
5046 addq foo@gottpoff(%rip), %rax
5047 For 32bit, change
5048 leaq foo@tlsgd(%rip), %rdi
5049 .word 0x6666; rex64; call __tls_get_addr@plt
5050 into:
5051 movl %fs:0, %eax
5052 addq foo@gottpoff(%rip), %rax
5053 For largepic, change:
5054 leaq foo@tlsgd(%rip), %rdi
5055 movabsq $__tls_get_addr@pltoff, %rax
5056 addq %rbx, %rax
5057 call *%rax
5058 into:
5059 movq %fs:0, %rax
5060 addq foo@gottpoff(%rax), %rax
5061 nopw 0x0(%rax,%rax,1) */
5062 int largepic = 0;
5063 if (ABI_64_P (output_bfd)
5064 && contents[roff + 5] == (bfd_byte) '\xb8')
5065 {
5066 memcpy (contents + roff - 3,
5067 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x03\x05"
5068 "\0\0\0\0\x66\x0f\x1f\x44\0", 22);
5069 largepic = 1;
5070 }
5071 else if (ABI_64_P (output_bfd))
5072 memcpy (contents + roff - 4,
5073 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x03\x05\0\0\0",
5074 16);
5075 else
5076 memcpy (contents + roff - 3,
5077 "\x64\x8b\x04\x25\0\0\0\0\x48\x03\x05\0\0\0",
5078 15);
5079
5080 relocation = (htab->elf.sgot->output_section->vma
5081 + htab->elf.sgot->output_offset + off
5082 - roff
5083 - largepic
5084 - input_section->output_section->vma
5085 - input_section->output_offset
5086 - 12);
5087 bfd_put_32 (output_bfd, relocation,
5088 contents + roff + 8 + largepic);
5089 /* Skip R_X86_64_PLT32/R_X86_64_PLTOFF64. */
5090 rel++;
5091 wrel++;
5092 continue;
5093 }
5094 else if (ELF32_R_TYPE (rel->r_info) == R_X86_64_GOTPC32_TLSDESC)
5095 {
5096 /* GDesc -> IE transition.
5097 It's originally something like:
5098 leaq x@tlsdesc(%rip), %rax
5099
5100 Change it to:
5101 movq x@gottpoff(%rip), %rax # before xchg %ax,%ax. */
5102
5103 /* Now modify the instruction as appropriate. To
5104 turn a leaq into a movq in the form we use it, it
5105 suffices to change the second byte from 0x8d to
5106 0x8b. */
5107 bfd_put_8 (output_bfd, 0x8b, contents + roff - 2);
5108
5109 bfd_put_32 (output_bfd,
5110 htab->elf.sgot->output_section->vma
5111 + htab->elf.sgot->output_offset + off
5112 - rel->r_offset
5113 - input_section->output_section->vma
5114 - input_section->output_offset
5115 - 4,
5116 contents + roff);
5117 continue;
5118 }
5119 else if (ELF32_R_TYPE (rel->r_info) == R_X86_64_TLSDESC_CALL)
5120 {
5121 /* GDesc -> IE transition.
5122 It's originally:
5123 call *(%rax)
5124
5125 Change it to:
5126 xchg %ax, %ax. */
5127
5128 bfd_put_8 (output_bfd, 0x66, contents + roff);
5129 bfd_put_8 (output_bfd, 0x90, contents + roff + 1);
5130 continue;
5131 }
5132 else
5133 BFD_ASSERT (FALSE);
5134 }
5135 break;
5136
5137 case R_X86_64_TLSLD:
5138 if (! elf_x86_64_tls_transition (info, input_bfd,
5139 input_section, contents,
5140 symtab_hdr, sym_hashes,
5141 &r_type, GOT_UNKNOWN,
5142 rel, relend, h, r_symndx))
5143 return FALSE;
5144
5145 if (r_type != R_X86_64_TLSLD)
5146 {
5147 /* LD->LE transition:
5148 leaq foo@tlsld(%rip), %rdi; call __tls_get_addr.
5149 For 64bit, we change it into:
5150 .word 0x6666; .byte 0x66; movq %fs:0, %rax.
5151 For 32bit, we change it into:
5152 nopl 0x0(%rax); movl %fs:0, %eax.
5153 For largepic, change:
5154 leaq foo@tlsgd(%rip), %rdi
5155 movabsq $__tls_get_addr@pltoff, %rax
5156 addq %rbx, %rax
5157 call *%rax
5158 into:
5159 data32 data32 data32 nopw %cs:0x0(%rax,%rax,1)
5160 movq %fs:0, %eax */
5161
5162 BFD_ASSERT (r_type == R_X86_64_TPOFF32);
5163 if (ABI_64_P (output_bfd)
5164 && contents[rel->r_offset + 5] == (bfd_byte) '\xb8')
5165 memcpy (contents + rel->r_offset - 3,
5166 "\x66\x66\x66\x66\x2e\x0f\x1f\x84\0\0\0\0\0"
5167 "\x64\x48\x8b\x04\x25\0\0\0", 22);
5168 else if (ABI_64_P (output_bfd))
5169 memcpy (contents + rel->r_offset - 3,
5170 "\x66\x66\x66\x64\x48\x8b\x04\x25\0\0\0", 12);
5171 else
5172 memcpy (contents + rel->r_offset - 3,
5173 "\x0f\x1f\x40\x00\x64\x8b\x04\x25\0\0\0", 12);
5174 /* Skip R_X86_64_PC32/R_X86_64_PLT32/R_X86_64_PLTOFF64. */
5175 rel++;
5176 wrel++;
5177 continue;
5178 }
5179
5180 if (htab->elf.sgot == NULL)
5181 abort ();
5182
5183 off = htab->tls_ld_got.offset;
5184 if (off & 1)
5185 off &= ~1;
5186 else
5187 {
5188 Elf_Internal_Rela outrel;
5189
5190 if (htab->elf.srelgot == NULL)
5191 abort ();
5192
5193 outrel.r_offset = (htab->elf.sgot->output_section->vma
5194 + htab->elf.sgot->output_offset + off);
5195
5196 bfd_put_64 (output_bfd, 0,
5197 htab->elf.sgot->contents + off);
5198 bfd_put_64 (output_bfd, 0,
5199 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
5200 outrel.r_info = htab->r_info (0, R_X86_64_DTPMOD64);
5201 outrel.r_addend = 0;
5202 elf_append_rela (output_bfd, htab->elf.srelgot,
5203 &outrel);
5204 htab->tls_ld_got.offset |= 1;
5205 }
5206 relocation = htab->elf.sgot->output_section->vma
5207 + htab->elf.sgot->output_offset + off;
5208 unresolved_reloc = FALSE;
5209 break;
5210
5211 case R_X86_64_DTPOFF32:
5212 if (!bfd_link_executable (info)
5213 || (input_section->flags & SEC_CODE) == 0)
5214 relocation -= elf_x86_64_dtpoff_base (info);
5215 else
5216 relocation = elf_x86_64_tpoff (info, relocation);
5217 break;
5218
5219 case R_X86_64_TPOFF32:
5220 case R_X86_64_TPOFF64:
5221 BFD_ASSERT (bfd_link_executable (info));
5222 relocation = elf_x86_64_tpoff (info, relocation);
5223 break;
5224
5225 case R_X86_64_DTPOFF64:
5226 BFD_ASSERT ((input_section->flags & SEC_CODE) == 0);
5227 relocation -= elf_x86_64_dtpoff_base (info);
5228 break;
5229
5230 default:
5231 break;
5232 }
5233
5234 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
5235 because such sections are not SEC_ALLOC and thus ld.so will
5236 not process them. */
5237 if (unresolved_reloc
5238 && !((input_section->flags & SEC_DEBUGGING) != 0
5239 && h->def_dynamic)
5240 && _bfd_elf_section_offset (output_bfd, info, input_section,
5241 rel->r_offset) != (bfd_vma) -1)
5242 {
5243 (*_bfd_error_handler)
5244 (_("%B(%A+0x%lx): unresolvable %s relocation against symbol `%s'"),
5245 input_bfd,
5246 input_section,
5247 (long) rel->r_offset,
5248 howto->name,
5249 h->root.root.string);
5250 return FALSE;
5251 }
5252
5253 do_relocation:
5254 r = _bfd_final_link_relocate (howto, input_bfd, input_section,
5255 contents, rel->r_offset,
5256 relocation, rel->r_addend);
5257
5258 check_relocation_error:
5259 if (r != bfd_reloc_ok)
5260 {
5261 const char *name;
5262
5263 if (h != NULL)
5264 name = h->root.root.string;
5265 else
5266 {
5267 name = bfd_elf_string_from_elf_section (input_bfd,
5268 symtab_hdr->sh_link,
5269 sym->st_name);
5270 if (name == NULL)
5271 return FALSE;
5272 if (*name == '\0')
5273 name = bfd_section_name (input_bfd, sec);
5274 }
5275
5276 if (r == bfd_reloc_overflow)
5277 {
5278 if (! ((*info->callbacks->reloc_overflow)
5279 (info, (h ? &h->root : NULL), name, howto->name,
5280 (bfd_vma) 0, input_bfd, input_section,
5281 rel->r_offset)))
5282 return FALSE;
5283 }
5284 else
5285 {
5286 (*_bfd_error_handler)
5287 (_("%B(%A+0x%lx): reloc against `%s': error %d"),
5288 input_bfd, input_section,
5289 (long) rel->r_offset, name, (int) r);
5290 return FALSE;
5291 }
5292 }
5293
5294 if (wrel != rel)
5295 *wrel = *rel;
5296 }
5297
5298 if (wrel != rel)
5299 {
5300 Elf_Internal_Shdr *rel_hdr;
5301 size_t deleted = rel - wrel;
5302
5303 rel_hdr = _bfd_elf_single_rel_hdr (input_section->output_section);
5304 rel_hdr->sh_size -= rel_hdr->sh_entsize * deleted;
5305 if (rel_hdr->sh_size == 0)
5306 {
5307 /* It is too late to remove an empty reloc section. Leave
5308 one NONE reloc.
5309 ??? What is wrong with an empty section??? */
5310 rel_hdr->sh_size = rel_hdr->sh_entsize;
5311 deleted -= 1;
5312 }
5313 rel_hdr = _bfd_elf_single_rel_hdr (input_section);
5314 rel_hdr->sh_size -= rel_hdr->sh_entsize * deleted;
5315 input_section->reloc_count -= deleted;
5316 }
5317
5318 return TRUE;
5319 }
5320
5321 /* Finish up dynamic symbol handling. We set the contents of various
5322 dynamic sections here. */
5323
5324 static bfd_boolean
5325 elf_x86_64_finish_dynamic_symbol (bfd *output_bfd,
5326 struct bfd_link_info *info,
5327 struct elf_link_hash_entry *h,
5328 Elf_Internal_Sym *sym ATTRIBUTE_UNUSED)
5329 {
5330 struct elf_x86_64_link_hash_table *htab;
5331 const struct elf_x86_64_backend_data *abed;
5332 bfd_boolean use_plt_bnd;
5333 struct elf_x86_64_link_hash_entry *eh;
5334
5335 htab = elf_x86_64_hash_table (info);
5336 if (htab == NULL)
5337 return FALSE;
5338
5339 /* Use MPX backend data in case of BND relocation. Use .plt_bnd
5340 section only if there is .plt section. */
5341 use_plt_bnd = htab->elf.splt != NULL && htab->plt_bnd != NULL;
5342 abed = (use_plt_bnd
5343 ? &elf_x86_64_bnd_arch_bed
5344 : get_elf_x86_64_backend_data (output_bfd));
5345
5346 eh = (struct elf_x86_64_link_hash_entry *) h;
5347
5348 if (h->plt.offset != (bfd_vma) -1)
5349 {
5350 bfd_vma plt_index;
5351 bfd_vma got_offset, plt_offset, plt_plt_offset, plt_got_offset;
5352 bfd_vma plt_plt_insn_end, plt_got_insn_size;
5353 Elf_Internal_Rela rela;
5354 bfd_byte *loc;
5355 asection *plt, *gotplt, *relplt, *resolved_plt;
5356 const struct elf_backend_data *bed;
5357 bfd_vma plt_got_pcrel_offset;
5358
5359 /* When building a static executable, use .iplt, .igot.plt and
5360 .rela.iplt sections for STT_GNU_IFUNC symbols. */
5361 if (htab->elf.splt != NULL)
5362 {
5363 plt = htab->elf.splt;
5364 gotplt = htab->elf.sgotplt;
5365 relplt = htab->elf.srelplt;
5366 }
5367 else
5368 {
5369 plt = htab->elf.iplt;
5370 gotplt = htab->elf.igotplt;
5371 relplt = htab->elf.irelplt;
5372 }
5373
5374 /* This symbol has an entry in the procedure linkage table. Set
5375 it up. */
5376 if ((h->dynindx == -1
5377 && !((h->forced_local || bfd_link_executable (info))
5378 && h->def_regular
5379 && h->type == STT_GNU_IFUNC))
5380 || plt == NULL
5381 || gotplt == NULL
5382 || relplt == NULL)
5383 abort ();
5384
5385 /* Get the index in the procedure linkage table which
5386 corresponds to this symbol. This is the index of this symbol
5387 in all the symbols for which we are making plt entries. The
5388 first entry in the procedure linkage table is reserved.
5389
5390 Get the offset into the .got table of the entry that
5391 corresponds to this function. Each .got entry is GOT_ENTRY_SIZE
5392 bytes. The first three are reserved for the dynamic linker.
5393
5394 For static executables, we don't reserve anything. */
5395
5396 if (plt == htab->elf.splt)
5397 {
5398 got_offset = h->plt.offset / abed->plt_entry_size - 1;
5399 got_offset = (got_offset + 3) * GOT_ENTRY_SIZE;
5400 }
5401 else
5402 {
5403 got_offset = h->plt.offset / abed->plt_entry_size;
5404 got_offset = got_offset * GOT_ENTRY_SIZE;
5405 }
5406
5407 plt_plt_insn_end = abed->plt_plt_insn_end;
5408 plt_plt_offset = abed->plt_plt_offset;
5409 plt_got_insn_size = abed->plt_got_insn_size;
5410 plt_got_offset = abed->plt_got_offset;
5411 if (use_plt_bnd)
5412 {
5413 /* Use the second PLT with BND relocations. */
5414 const bfd_byte *plt_entry, *plt2_entry;
5415
5416 if (eh->has_bnd_reloc)
5417 {
5418 plt_entry = elf_x86_64_bnd_plt_entry;
5419 plt2_entry = elf_x86_64_bnd_plt2_entry;
5420 }
5421 else
5422 {
5423 plt_entry = elf_x86_64_legacy_plt_entry;
5424 plt2_entry = elf_x86_64_legacy_plt2_entry;
5425
5426 /* Subtract 1 since there is no BND prefix. */
5427 plt_plt_insn_end -= 1;
5428 plt_plt_offset -= 1;
5429 plt_got_insn_size -= 1;
5430 plt_got_offset -= 1;
5431 }
5432
5433 BFD_ASSERT (sizeof (elf_x86_64_bnd_plt_entry)
5434 == sizeof (elf_x86_64_legacy_plt_entry));
5435
5436 /* Fill in the entry in the procedure linkage table. */
5437 memcpy (plt->contents + h->plt.offset,
5438 plt_entry, sizeof (elf_x86_64_legacy_plt_entry));
5439 /* Fill in the entry in the second PLT. */
5440 memcpy (htab->plt_bnd->contents + eh->plt_bnd.offset,
5441 plt2_entry, sizeof (elf_x86_64_legacy_plt2_entry));
5442
5443 resolved_plt = htab->plt_bnd;
5444 plt_offset = eh->plt_bnd.offset;
5445 }
5446 else
5447 {
5448 /* Fill in the entry in the procedure linkage table. */
5449 memcpy (plt->contents + h->plt.offset, abed->plt_entry,
5450 abed->plt_entry_size);
5451
5452 resolved_plt = plt;
5453 plt_offset = h->plt.offset;
5454 }
5455
5456 /* Insert the relocation positions of the plt section. */
5457
5458 /* Put offset the PC-relative instruction referring to the GOT entry,
5459 subtracting the size of that instruction. */
5460 plt_got_pcrel_offset = (gotplt->output_section->vma
5461 + gotplt->output_offset
5462 + got_offset
5463 - resolved_plt->output_section->vma
5464 - resolved_plt->output_offset
5465 - plt_offset
5466 - plt_got_insn_size);
5467
5468 /* Check PC-relative offset overflow in PLT entry. */
5469 if ((plt_got_pcrel_offset + 0x80000000) > 0xffffffff)
5470 info->callbacks->einfo (_("%F%B: PC-relative offset overflow in PLT entry for `%s'\n"),
5471 output_bfd, h->root.root.string);
5472
5473 bfd_put_32 (output_bfd, plt_got_pcrel_offset,
5474 resolved_plt->contents + plt_offset + plt_got_offset);
5475
5476 /* Fill in the entry in the global offset table, initially this
5477 points to the second part of the PLT entry. */
5478 bfd_put_64 (output_bfd, (plt->output_section->vma
5479 + plt->output_offset
5480 + h->plt.offset + abed->plt_lazy_offset),
5481 gotplt->contents + got_offset);
5482
5483 /* Fill in the entry in the .rela.plt section. */
5484 rela.r_offset = (gotplt->output_section->vma
5485 + gotplt->output_offset
5486 + got_offset);
5487 if (h->dynindx == -1
5488 || ((bfd_link_executable (info)
5489 || ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
5490 && h->def_regular
5491 && h->type == STT_GNU_IFUNC))
5492 {
5493 /* If an STT_GNU_IFUNC symbol is locally defined, generate
5494 R_X86_64_IRELATIVE instead of R_X86_64_JUMP_SLOT. */
5495 rela.r_info = htab->r_info (0, R_X86_64_IRELATIVE);
5496 rela.r_addend = (h->root.u.def.value
5497 + h->root.u.def.section->output_section->vma
5498 + h->root.u.def.section->output_offset);
5499 /* R_X86_64_IRELATIVE comes last. */
5500 plt_index = htab->next_irelative_index--;
5501 }
5502 else
5503 {
5504 rela.r_info = htab->r_info (h->dynindx, R_X86_64_JUMP_SLOT);
5505 rela.r_addend = 0;
5506 plt_index = htab->next_jump_slot_index++;
5507 }
5508
5509 /* Don't fill PLT entry for static executables. */
5510 if (plt == htab->elf.splt)
5511 {
5512 bfd_vma plt0_offset = h->plt.offset + plt_plt_insn_end;
5513
5514 /* Put relocation index. */
5515 bfd_put_32 (output_bfd, plt_index,
5516 plt->contents + h->plt.offset + abed->plt_reloc_offset);
5517
5518 /* Put offset for jmp .PLT0 and check for overflow. We don't
5519 check relocation index for overflow since branch displacement
5520 will overflow first. */
5521 if (plt0_offset > 0x80000000)
5522 info->callbacks->einfo (_("%F%B: branch displacement overflow in PLT entry for `%s'\n"),
5523 output_bfd, h->root.root.string);
5524 bfd_put_32 (output_bfd, - plt0_offset,
5525 plt->contents + h->plt.offset + plt_plt_offset);
5526 }
5527
5528 bed = get_elf_backend_data (output_bfd);
5529 loc = relplt->contents + plt_index * bed->s->sizeof_rela;
5530 bed->s->swap_reloca_out (output_bfd, &rela, loc);
5531 }
5532 else if (eh->plt_got.offset != (bfd_vma) -1)
5533 {
5534 bfd_vma got_offset, plt_offset, plt_got_offset, plt_got_insn_size;
5535 asection *plt, *got;
5536 bfd_boolean got_after_plt;
5537 int32_t got_pcrel_offset;
5538 const bfd_byte *got_plt_entry;
5539
5540 /* Set the entry in the GOT procedure linkage table. */
5541 plt = htab->plt_got;
5542 got = htab->elf.sgot;
5543 got_offset = h->got.offset;
5544
5545 if (got_offset == (bfd_vma) -1
5546 || h->type == STT_GNU_IFUNC
5547 || plt == NULL
5548 || got == NULL)
5549 abort ();
5550
5551 /* Use the second PLT entry template for the GOT PLT since they
5552 are the identical. */
5553 plt_got_insn_size = elf_x86_64_bnd_arch_bed.plt_got_insn_size;
5554 plt_got_offset = elf_x86_64_bnd_arch_bed.plt_got_offset;
5555 if (eh->has_bnd_reloc)
5556 got_plt_entry = elf_x86_64_bnd_plt2_entry;
5557 else
5558 {
5559 got_plt_entry = elf_x86_64_legacy_plt2_entry;
5560
5561 /* Subtract 1 since there is no BND prefix. */
5562 plt_got_insn_size -= 1;
5563 plt_got_offset -= 1;
5564 }
5565
5566 /* Fill in the entry in the GOT procedure linkage table. */
5567 plt_offset = eh->plt_got.offset;
5568 memcpy (plt->contents + plt_offset,
5569 got_plt_entry, sizeof (elf_x86_64_legacy_plt2_entry));
5570
5571 /* Put offset the PC-relative instruction referring to the GOT
5572 entry, subtracting the size of that instruction. */
5573 got_pcrel_offset = (got->output_section->vma
5574 + got->output_offset
5575 + got_offset
5576 - plt->output_section->vma
5577 - plt->output_offset
5578 - plt_offset
5579 - plt_got_insn_size);
5580
5581 /* Check PC-relative offset overflow in GOT PLT entry. */
5582 got_after_plt = got->output_section->vma > plt->output_section->vma;
5583 if ((got_after_plt && got_pcrel_offset < 0)
5584 || (!got_after_plt && got_pcrel_offset > 0))
5585 info->callbacks->einfo (_("%F%B: PC-relative offset overflow in GOT PLT entry for `%s'\n"),
5586 output_bfd, h->root.root.string);
5587
5588 bfd_put_32 (output_bfd, got_pcrel_offset,
5589 plt->contents + plt_offset + plt_got_offset);
5590 }
5591
5592 if (!h->def_regular
5593 && (h->plt.offset != (bfd_vma) -1
5594 || eh->plt_got.offset != (bfd_vma) -1))
5595 {
5596 /* Mark the symbol as undefined, rather than as defined in
5597 the .plt section. Leave the value if there were any
5598 relocations where pointer equality matters (this is a clue
5599 for the dynamic linker, to make function pointer
5600 comparisons work between an application and shared
5601 library), otherwise set it to zero. If a function is only
5602 called from a binary, there is no need to slow down
5603 shared libraries because of that. */
5604 sym->st_shndx = SHN_UNDEF;
5605 if (!h->pointer_equality_needed)
5606 sym->st_value = 0;
5607 }
5608
5609 if (h->got.offset != (bfd_vma) -1
5610 && ! GOT_TLS_GD_ANY_P (elf_x86_64_hash_entry (h)->tls_type)
5611 && elf_x86_64_hash_entry (h)->tls_type != GOT_TLS_IE)
5612 {
5613 Elf_Internal_Rela rela;
5614
5615 /* This symbol has an entry in the global offset table. Set it
5616 up. */
5617 if (htab->elf.sgot == NULL || htab->elf.srelgot == NULL)
5618 abort ();
5619
5620 rela.r_offset = (htab->elf.sgot->output_section->vma
5621 + htab->elf.sgot->output_offset
5622 + (h->got.offset &~ (bfd_vma) 1));
5623
5624 /* If this is a static link, or it is a -Bsymbolic link and the
5625 symbol is defined locally or was forced to be local because
5626 of a version file, we just want to emit a RELATIVE reloc.
5627 The entry in the global offset table will already have been
5628 initialized in the relocate_section function. */
5629 if (h->def_regular
5630 && h->type == STT_GNU_IFUNC)
5631 {
5632 if (bfd_link_pic (info))
5633 {
5634 /* Generate R_X86_64_GLOB_DAT. */
5635 goto do_glob_dat;
5636 }
5637 else
5638 {
5639 asection *plt;
5640
5641 if (!h->pointer_equality_needed)
5642 abort ();
5643
5644 /* For non-shared object, we can't use .got.plt, which
5645 contains the real function addres if we need pointer
5646 equality. We load the GOT entry with the PLT entry. */
5647 plt = htab->elf.splt ? htab->elf.splt : htab->elf.iplt;
5648 bfd_put_64 (output_bfd, (plt->output_section->vma
5649 + plt->output_offset
5650 + h->plt.offset),
5651 htab->elf.sgot->contents + h->got.offset);
5652 return TRUE;
5653 }
5654 }
5655 else if (bfd_link_pic (info)
5656 && SYMBOL_REFERENCES_LOCAL (info, h))
5657 {
5658 if (!h->def_regular)
5659 return FALSE;
5660 BFD_ASSERT((h->got.offset & 1) != 0);
5661 rela.r_info = htab->r_info (0, R_X86_64_RELATIVE);
5662 rela.r_addend = (h->root.u.def.value
5663 + h->root.u.def.section->output_section->vma
5664 + h->root.u.def.section->output_offset);
5665 }
5666 else
5667 {
5668 BFD_ASSERT((h->got.offset & 1) == 0);
5669 do_glob_dat:
5670 bfd_put_64 (output_bfd, (bfd_vma) 0,
5671 htab->elf.sgot->contents + h->got.offset);
5672 rela.r_info = htab->r_info (h->dynindx, R_X86_64_GLOB_DAT);
5673 rela.r_addend = 0;
5674 }
5675
5676 elf_append_rela (output_bfd, htab->elf.srelgot, &rela);
5677 }
5678
5679 if (h->needs_copy)
5680 {
5681 Elf_Internal_Rela rela;
5682
5683 /* This symbol needs a copy reloc. Set it up. */
5684
5685 if (h->dynindx == -1
5686 || (h->root.type != bfd_link_hash_defined
5687 && h->root.type != bfd_link_hash_defweak)
5688 || htab->srelbss == NULL)
5689 abort ();
5690
5691 rela.r_offset = (h->root.u.def.value
5692 + h->root.u.def.section->output_section->vma
5693 + h->root.u.def.section->output_offset);
5694 rela.r_info = htab->r_info (h->dynindx, R_X86_64_COPY);
5695 rela.r_addend = 0;
5696 elf_append_rela (output_bfd, htab->srelbss, &rela);
5697 }
5698
5699 return TRUE;
5700 }
5701
5702 /* Finish up local dynamic symbol handling. We set the contents of
5703 various dynamic sections here. */
5704
5705 static bfd_boolean
5706 elf_x86_64_finish_local_dynamic_symbol (void **slot, void *inf)
5707 {
5708 struct elf_link_hash_entry *h
5709 = (struct elf_link_hash_entry *) *slot;
5710 struct bfd_link_info *info
5711 = (struct bfd_link_info *) inf;
5712
5713 return elf_x86_64_finish_dynamic_symbol (info->output_bfd,
5714 info, h, NULL);
5715 }
5716
5717 /* Used to decide how to sort relocs in an optimal manner for the
5718 dynamic linker, before writing them out. */
5719
5720 static enum elf_reloc_type_class
5721 elf_x86_64_reloc_type_class (const struct bfd_link_info *info,
5722 const asection *rel_sec ATTRIBUTE_UNUSED,
5723 const Elf_Internal_Rela *rela)
5724 {
5725 bfd *abfd = info->output_bfd;
5726 const struct elf_backend_data *bed = get_elf_backend_data (abfd);
5727 struct elf_x86_64_link_hash_table *htab = elf_x86_64_hash_table (info);
5728 unsigned long r_symndx = htab->r_sym (rela->r_info);
5729 Elf_Internal_Sym sym;
5730
5731 if (htab->elf.dynsym == NULL
5732 || !bed->s->swap_symbol_in (abfd,
5733 (htab->elf.dynsym->contents
5734 + r_symndx * bed->s->sizeof_sym),
5735 0, &sym))
5736 abort ();
5737
5738 /* Check relocation against STT_GNU_IFUNC symbol. */
5739 if (ELF_ST_TYPE (sym.st_info) == STT_GNU_IFUNC)
5740 return reloc_class_ifunc;
5741
5742 switch ((int) ELF32_R_TYPE (rela->r_info))
5743 {
5744 case R_X86_64_RELATIVE:
5745 case R_X86_64_RELATIVE64:
5746 return reloc_class_relative;
5747 case R_X86_64_JUMP_SLOT:
5748 return reloc_class_plt;
5749 case R_X86_64_COPY:
5750 return reloc_class_copy;
5751 default:
5752 return reloc_class_normal;
5753 }
5754 }
5755
5756 /* Finish up the dynamic sections. */
5757
5758 static bfd_boolean
5759 elf_x86_64_finish_dynamic_sections (bfd *output_bfd,
5760 struct bfd_link_info *info)
5761 {
5762 struct elf_x86_64_link_hash_table *htab;
5763 bfd *dynobj;
5764 asection *sdyn;
5765 const struct elf_x86_64_backend_data *abed;
5766
5767 htab = elf_x86_64_hash_table (info);
5768 if (htab == NULL)
5769 return FALSE;
5770
5771 /* Use MPX backend data in case of BND relocation. Use .plt_bnd
5772 section only if there is .plt section. */
5773 abed = (htab->elf.splt != NULL && htab->plt_bnd != NULL
5774 ? &elf_x86_64_bnd_arch_bed
5775 : get_elf_x86_64_backend_data (output_bfd));
5776
5777 dynobj = htab->elf.dynobj;
5778 sdyn = bfd_get_linker_section (dynobj, ".dynamic");
5779
5780 if (htab->elf.dynamic_sections_created)
5781 {
5782 bfd_byte *dyncon, *dynconend;
5783 const struct elf_backend_data *bed;
5784 bfd_size_type sizeof_dyn;
5785
5786 if (sdyn == NULL || htab->elf.sgot == NULL)
5787 abort ();
5788
5789 bed = get_elf_backend_data (dynobj);
5790 sizeof_dyn = bed->s->sizeof_dyn;
5791 dyncon = sdyn->contents;
5792 dynconend = sdyn->contents + sdyn->size;
5793 for (; dyncon < dynconend; dyncon += sizeof_dyn)
5794 {
5795 Elf_Internal_Dyn dyn;
5796 asection *s;
5797
5798 (*bed->s->swap_dyn_in) (dynobj, dyncon, &dyn);
5799
5800 switch (dyn.d_tag)
5801 {
5802 default:
5803 continue;
5804
5805 case DT_PLTGOT:
5806 s = htab->elf.sgotplt;
5807 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset;
5808 break;
5809
5810 case DT_JMPREL:
5811 dyn.d_un.d_ptr = htab->elf.srelplt->output_section->vma;
5812 break;
5813
5814 case DT_PLTRELSZ:
5815 s = htab->elf.srelplt->output_section;
5816 dyn.d_un.d_val = s->size;
5817 break;
5818
5819 case DT_RELASZ:
5820 /* The procedure linkage table relocs (DT_JMPREL) should
5821 not be included in the overall relocs (DT_RELA).
5822 Therefore, we override the DT_RELASZ entry here to
5823 make it not include the JMPREL relocs. Since the
5824 linker script arranges for .rela.plt to follow all
5825 other relocation sections, we don't have to worry
5826 about changing the DT_RELA entry. */
5827 if (htab->elf.srelplt != NULL)
5828 {
5829 s = htab->elf.srelplt->output_section;
5830 dyn.d_un.d_val -= s->size;
5831 }
5832 break;
5833
5834 case DT_TLSDESC_PLT:
5835 s = htab->elf.splt;
5836 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset
5837 + htab->tlsdesc_plt;
5838 break;
5839
5840 case DT_TLSDESC_GOT:
5841 s = htab->elf.sgot;
5842 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset
5843 + htab->tlsdesc_got;
5844 break;
5845 }
5846
5847 (*bed->s->swap_dyn_out) (output_bfd, &dyn, dyncon);
5848 }
5849
5850 /* Fill in the special first entry in the procedure linkage table. */
5851 if (htab->elf.splt && htab->elf.splt->size > 0)
5852 {
5853 /* Fill in the first entry in the procedure linkage table. */
5854 memcpy (htab->elf.splt->contents,
5855 abed->plt0_entry, abed->plt_entry_size);
5856 /* Add offset for pushq GOT+8(%rip), since the instruction
5857 uses 6 bytes subtract this value. */
5858 bfd_put_32 (output_bfd,
5859 (htab->elf.sgotplt->output_section->vma
5860 + htab->elf.sgotplt->output_offset
5861 + 8
5862 - htab->elf.splt->output_section->vma
5863 - htab->elf.splt->output_offset
5864 - 6),
5865 htab->elf.splt->contents + abed->plt0_got1_offset);
5866 /* Add offset for the PC-relative instruction accessing GOT+16,
5867 subtracting the offset to the end of that instruction. */
5868 bfd_put_32 (output_bfd,
5869 (htab->elf.sgotplt->output_section->vma
5870 + htab->elf.sgotplt->output_offset
5871 + 16
5872 - htab->elf.splt->output_section->vma
5873 - htab->elf.splt->output_offset
5874 - abed->plt0_got2_insn_end),
5875 htab->elf.splt->contents + abed->plt0_got2_offset);
5876
5877 elf_section_data (htab->elf.splt->output_section)
5878 ->this_hdr.sh_entsize = abed->plt_entry_size;
5879
5880 if (htab->tlsdesc_plt)
5881 {
5882 bfd_put_64 (output_bfd, (bfd_vma) 0,
5883 htab->elf.sgot->contents + htab->tlsdesc_got);
5884
5885 memcpy (htab->elf.splt->contents + htab->tlsdesc_plt,
5886 abed->plt0_entry, abed->plt_entry_size);
5887
5888 /* Add offset for pushq GOT+8(%rip), since the
5889 instruction uses 6 bytes subtract this value. */
5890 bfd_put_32 (output_bfd,
5891 (htab->elf.sgotplt->output_section->vma
5892 + htab->elf.sgotplt->output_offset
5893 + 8
5894 - htab->elf.splt->output_section->vma
5895 - htab->elf.splt->output_offset
5896 - htab->tlsdesc_plt
5897 - 6),
5898 htab->elf.splt->contents
5899 + htab->tlsdesc_plt + abed->plt0_got1_offset);
5900 /* Add offset for the PC-relative instruction accessing GOT+TDG,
5901 where TGD stands for htab->tlsdesc_got, subtracting the offset
5902 to the end of that instruction. */
5903 bfd_put_32 (output_bfd,
5904 (htab->elf.sgot->output_section->vma
5905 + htab->elf.sgot->output_offset
5906 + htab->tlsdesc_got
5907 - htab->elf.splt->output_section->vma
5908 - htab->elf.splt->output_offset
5909 - htab->tlsdesc_plt
5910 - abed->plt0_got2_insn_end),
5911 htab->elf.splt->contents
5912 + htab->tlsdesc_plt + abed->plt0_got2_offset);
5913 }
5914 }
5915 }
5916
5917 if (htab->plt_bnd != NULL)
5918 elf_section_data (htab->plt_bnd->output_section)
5919 ->this_hdr.sh_entsize = sizeof (elf_x86_64_bnd_plt2_entry);
5920
5921 if (htab->elf.sgotplt)
5922 {
5923 if (bfd_is_abs_section (htab->elf.sgotplt->output_section))
5924 {
5925 (*_bfd_error_handler)
5926 (_("discarded output section: `%A'"), htab->elf.sgotplt);
5927 return FALSE;
5928 }
5929
5930 /* Fill in the first three entries in the global offset table. */
5931 if (htab->elf.sgotplt->size > 0)
5932 {
5933 /* Set the first entry in the global offset table to the address of
5934 the dynamic section. */
5935 if (sdyn == NULL)
5936 bfd_put_64 (output_bfd, (bfd_vma) 0, htab->elf.sgotplt->contents);
5937 else
5938 bfd_put_64 (output_bfd,
5939 sdyn->output_section->vma + sdyn->output_offset,
5940 htab->elf.sgotplt->contents);
5941 /* Write GOT[1] and GOT[2], needed for the dynamic linker. */
5942 bfd_put_64 (output_bfd, (bfd_vma) 0, htab->elf.sgotplt->contents + GOT_ENTRY_SIZE);
5943 bfd_put_64 (output_bfd, (bfd_vma) 0, htab->elf.sgotplt->contents + GOT_ENTRY_SIZE*2);
5944 }
5945
5946 elf_section_data (htab->elf.sgotplt->output_section)->this_hdr.sh_entsize =
5947 GOT_ENTRY_SIZE;
5948 }
5949
5950 /* Adjust .eh_frame for .plt section. */
5951 if (htab->plt_eh_frame != NULL
5952 && htab->plt_eh_frame->contents != NULL)
5953 {
5954 if (htab->elf.splt != NULL
5955 && htab->elf.splt->size != 0
5956 && (htab->elf.splt->flags & SEC_EXCLUDE) == 0
5957 && htab->elf.splt->output_section != NULL
5958 && htab->plt_eh_frame->output_section != NULL)
5959 {
5960 bfd_vma plt_start = htab->elf.splt->output_section->vma;
5961 bfd_vma eh_frame_start = htab->plt_eh_frame->output_section->vma
5962 + htab->plt_eh_frame->output_offset
5963 + PLT_FDE_START_OFFSET;
5964 bfd_put_signed_32 (dynobj, plt_start - eh_frame_start,
5965 htab->plt_eh_frame->contents
5966 + PLT_FDE_START_OFFSET);
5967 }
5968 if (htab->plt_eh_frame->sec_info_type == SEC_INFO_TYPE_EH_FRAME)
5969 {
5970 if (! _bfd_elf_write_section_eh_frame (output_bfd, info,
5971 htab->plt_eh_frame,
5972 htab->plt_eh_frame->contents))
5973 return FALSE;
5974 }
5975 }
5976
5977 if (htab->elf.sgot && htab->elf.sgot->size > 0)
5978 elf_section_data (htab->elf.sgot->output_section)->this_hdr.sh_entsize
5979 = GOT_ENTRY_SIZE;
5980
5981 /* Fill PLT and GOT entries for local STT_GNU_IFUNC symbols. */
5982 htab_traverse (htab->loc_hash_table,
5983 elf_x86_64_finish_local_dynamic_symbol,
5984 info);
5985
5986 return TRUE;
5987 }
5988
5989 /* Return an array of PLT entry symbol values. */
5990
5991 static bfd_vma *
5992 elf_x86_64_get_plt_sym_val (bfd *abfd, asymbol **dynsyms, asection *plt,
5993 asection *relplt)
5994 {
5995 bfd_boolean (*slurp_relocs) (bfd *, asection *, asymbol **, bfd_boolean);
5996 arelent *p;
5997 long count, i;
5998 bfd_vma *plt_sym_val;
5999 bfd_vma plt_offset;
6000 bfd_byte *plt_contents;
6001 const struct elf_x86_64_backend_data *bed;
6002 Elf_Internal_Shdr *hdr;
6003 asection *plt_bnd;
6004
6005 /* Get the .plt section contents. PLT passed down may point to the
6006 .plt.bnd section. Make sure that PLT always points to the .plt
6007 section. */
6008 plt_bnd = bfd_get_section_by_name (abfd, ".plt.bnd");
6009 if (plt_bnd)
6010 {
6011 if (plt != plt_bnd)
6012 abort ();
6013 plt = bfd_get_section_by_name (abfd, ".plt");
6014 if (plt == NULL)
6015 abort ();
6016 bed = &elf_x86_64_bnd_arch_bed;
6017 }
6018 else
6019 bed = get_elf_x86_64_backend_data (abfd);
6020
6021 plt_contents = (bfd_byte *) bfd_malloc (plt->size);
6022 if (plt_contents == NULL)
6023 return NULL;
6024 if (!bfd_get_section_contents (abfd, (asection *) plt,
6025 plt_contents, 0, plt->size))
6026 {
6027 bad_return:
6028 free (plt_contents);
6029 return NULL;
6030 }
6031
6032 slurp_relocs = get_elf_backend_data (abfd)->s->slurp_reloc_table;
6033 if (! (*slurp_relocs) (abfd, relplt, dynsyms, TRUE))
6034 goto bad_return;
6035
6036 hdr = &elf_section_data (relplt)->this_hdr;
6037 count = relplt->size / hdr->sh_entsize;
6038
6039 plt_sym_val = (bfd_vma *) bfd_malloc (sizeof (bfd_vma) * count);
6040 if (plt_sym_val == NULL)
6041 goto bad_return;
6042
6043 for (i = 0; i < count; i++)
6044 plt_sym_val[i] = -1;
6045
6046 plt_offset = bed->plt_entry_size;
6047 p = relplt->relocation;
6048 for (i = 0; i < count; i++, p++)
6049 {
6050 long reloc_index;
6051
6052 /* Skip unknown relocation. */
6053 if (p->howto == NULL)
6054 continue;
6055
6056 if (p->howto->type != R_X86_64_JUMP_SLOT
6057 && p->howto->type != R_X86_64_IRELATIVE)
6058 continue;
6059
6060 reloc_index = H_GET_32 (abfd, (plt_contents + plt_offset
6061 + bed->plt_reloc_offset));
6062 if (reloc_index >= count)
6063 abort ();
6064 if (plt_bnd)
6065 {
6066 /* This is the index in .plt section. */
6067 long plt_index = plt_offset / bed->plt_entry_size;
6068 /* Store VMA + the offset in .plt.bnd section. */
6069 plt_sym_val[reloc_index] =
6070 (plt_bnd->vma
6071 + (plt_index - 1) * sizeof (elf_x86_64_legacy_plt2_entry));
6072 }
6073 else
6074 plt_sym_val[reloc_index] = plt->vma + plt_offset;
6075 plt_offset += bed->plt_entry_size;
6076
6077 /* PR binutils/18437: Skip extra relocations in the .rela.plt
6078 section. */
6079 if (plt_offset >= plt->size)
6080 break;
6081 }
6082
6083 free (plt_contents);
6084
6085 return plt_sym_val;
6086 }
6087
6088 /* Similar to _bfd_elf_get_synthetic_symtab, with .plt.bnd section
6089 support. */
6090
6091 static long
6092 elf_x86_64_get_synthetic_symtab (bfd *abfd,
6093 long symcount,
6094 asymbol **syms,
6095 long dynsymcount,
6096 asymbol **dynsyms,
6097 asymbol **ret)
6098 {
6099 /* Pass the .plt.bnd section to _bfd_elf_ifunc_get_synthetic_symtab
6100 as PLT if it exists. */
6101 asection *plt = bfd_get_section_by_name (abfd, ".plt.bnd");
6102 if (plt == NULL)
6103 plt = bfd_get_section_by_name (abfd, ".plt");
6104 return _bfd_elf_ifunc_get_synthetic_symtab (abfd, symcount, syms,
6105 dynsymcount, dynsyms, ret,
6106 plt,
6107 elf_x86_64_get_plt_sym_val);
6108 }
6109
6110 /* Handle an x86-64 specific section when reading an object file. This
6111 is called when elfcode.h finds a section with an unknown type. */
6112
6113 static bfd_boolean
6114 elf_x86_64_section_from_shdr (bfd *abfd, Elf_Internal_Shdr *hdr,
6115 const char *name, int shindex)
6116 {
6117 if (hdr->sh_type != SHT_X86_64_UNWIND)
6118 return FALSE;
6119
6120 if (! _bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
6121 return FALSE;
6122
6123 return TRUE;
6124 }
6125
6126 /* Hook called by the linker routine which adds symbols from an object
6127 file. We use it to put SHN_X86_64_LCOMMON items in .lbss, instead
6128 of .bss. */
6129
6130 static bfd_boolean
6131 elf_x86_64_add_symbol_hook (bfd *abfd,
6132 struct bfd_link_info *info,
6133 Elf_Internal_Sym *sym,
6134 const char **namep ATTRIBUTE_UNUSED,
6135 flagword *flagsp ATTRIBUTE_UNUSED,
6136 asection **secp,
6137 bfd_vma *valp)
6138 {
6139 asection *lcomm;
6140
6141 switch (sym->st_shndx)
6142 {
6143 case SHN_X86_64_LCOMMON:
6144 lcomm = bfd_get_section_by_name (abfd, "LARGE_COMMON");
6145 if (lcomm == NULL)
6146 {
6147 lcomm = bfd_make_section_with_flags (abfd,
6148 "LARGE_COMMON",
6149 (SEC_ALLOC
6150 | SEC_IS_COMMON
6151 | SEC_LINKER_CREATED));
6152 if (lcomm == NULL)
6153 return FALSE;
6154 elf_section_flags (lcomm) |= SHF_X86_64_LARGE;
6155 }
6156 *secp = lcomm;
6157 *valp = sym->st_size;
6158 return TRUE;
6159 }
6160
6161 if (ELF_ST_BIND (sym->st_info) == STB_GNU_UNIQUE
6162 && (abfd->flags & DYNAMIC) == 0
6163 && bfd_get_flavour (info->output_bfd) == bfd_target_elf_flavour)
6164 elf_tdata (info->output_bfd)->has_gnu_symbols
6165 |= elf_gnu_symbol_unique;
6166
6167 return TRUE;
6168 }
6169
6170
6171 /* Given a BFD section, try to locate the corresponding ELF section
6172 index. */
6173
6174 static bfd_boolean
6175 elf_x86_64_elf_section_from_bfd_section (bfd *abfd ATTRIBUTE_UNUSED,
6176 asection *sec, int *index_return)
6177 {
6178 if (sec == &_bfd_elf_large_com_section)
6179 {
6180 *index_return = SHN_X86_64_LCOMMON;
6181 return TRUE;
6182 }
6183 return FALSE;
6184 }
6185
6186 /* Process a symbol. */
6187
6188 static void
6189 elf_x86_64_symbol_processing (bfd *abfd ATTRIBUTE_UNUSED,
6190 asymbol *asym)
6191 {
6192 elf_symbol_type *elfsym = (elf_symbol_type *) asym;
6193
6194 switch (elfsym->internal_elf_sym.st_shndx)
6195 {
6196 case SHN_X86_64_LCOMMON:
6197 asym->section = &_bfd_elf_large_com_section;
6198 asym->value = elfsym->internal_elf_sym.st_size;
6199 /* Common symbol doesn't set BSF_GLOBAL. */
6200 asym->flags &= ~BSF_GLOBAL;
6201 break;
6202 }
6203 }
6204
6205 static bfd_boolean
6206 elf_x86_64_common_definition (Elf_Internal_Sym *sym)
6207 {
6208 return (sym->st_shndx == SHN_COMMON
6209 || sym->st_shndx == SHN_X86_64_LCOMMON);
6210 }
6211
6212 static unsigned int
6213 elf_x86_64_common_section_index (asection *sec)
6214 {
6215 if ((elf_section_flags (sec) & SHF_X86_64_LARGE) == 0)
6216 return SHN_COMMON;
6217 else
6218 return SHN_X86_64_LCOMMON;
6219 }
6220
6221 static asection *
6222 elf_x86_64_common_section (asection *sec)
6223 {
6224 if ((elf_section_flags (sec) & SHF_X86_64_LARGE) == 0)
6225 return bfd_com_section_ptr;
6226 else
6227 return &_bfd_elf_large_com_section;
6228 }
6229
6230 static bfd_boolean
6231 elf_x86_64_merge_symbol (struct elf_link_hash_entry *h,
6232 const Elf_Internal_Sym *sym,
6233 asection **psec,
6234 bfd_boolean newdef,
6235 bfd_boolean olddef,
6236 bfd *oldbfd,
6237 const asection *oldsec)
6238 {
6239 /* A normal common symbol and a large common symbol result in a
6240 normal common symbol. We turn the large common symbol into a
6241 normal one. */
6242 if (!olddef
6243 && h->root.type == bfd_link_hash_common
6244 && !newdef
6245 && bfd_is_com_section (*psec)
6246 && oldsec != *psec)
6247 {
6248 if (sym->st_shndx == SHN_COMMON
6249 && (elf_section_flags (oldsec) & SHF_X86_64_LARGE) != 0)
6250 {
6251 h->root.u.c.p->section
6252 = bfd_make_section_old_way (oldbfd, "COMMON");
6253 h->root.u.c.p->section->flags = SEC_ALLOC;
6254 }
6255 else if (sym->st_shndx == SHN_X86_64_LCOMMON
6256 && (elf_section_flags (oldsec) & SHF_X86_64_LARGE) == 0)
6257 *psec = bfd_com_section_ptr;
6258 }
6259
6260 return TRUE;
6261 }
6262
6263 static int
6264 elf_x86_64_additional_program_headers (bfd *abfd,
6265 struct bfd_link_info *info ATTRIBUTE_UNUSED)
6266 {
6267 asection *s;
6268 int count = 0;
6269
6270 /* Check to see if we need a large readonly segment. */
6271 s = bfd_get_section_by_name (abfd, ".lrodata");
6272 if (s && (s->flags & SEC_LOAD))
6273 count++;
6274
6275 /* Check to see if we need a large data segment. Since .lbss sections
6276 is placed right after the .bss section, there should be no need for
6277 a large data segment just because of .lbss. */
6278 s = bfd_get_section_by_name (abfd, ".ldata");
6279 if (s && (s->flags & SEC_LOAD))
6280 count++;
6281
6282 return count;
6283 }
6284
6285 /* Return TRUE if symbol should be hashed in the `.gnu.hash' section. */
6286
6287 static bfd_boolean
6288 elf_x86_64_hash_symbol (struct elf_link_hash_entry *h)
6289 {
6290 if (h->plt.offset != (bfd_vma) -1
6291 && !h->def_regular
6292 && !h->pointer_equality_needed)
6293 return FALSE;
6294
6295 return _bfd_elf_hash_symbol (h);
6296 }
6297
6298 /* Return TRUE iff relocations for INPUT are compatible with OUTPUT. */
6299
6300 static bfd_boolean
6301 elf_x86_64_relocs_compatible (const bfd_target *input,
6302 const bfd_target *output)
6303 {
6304 return ((xvec_get_elf_backend_data (input)->s->elfclass
6305 == xvec_get_elf_backend_data (output)->s->elfclass)
6306 && _bfd_elf_relocs_compatible (input, output));
6307 }
6308
6309 static const struct bfd_elf_special_section
6310 elf_x86_64_special_sections[]=
6311 {
6312 { STRING_COMMA_LEN (".gnu.linkonce.lb"), -2, SHT_NOBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
6313 { STRING_COMMA_LEN (".gnu.linkonce.lr"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_X86_64_LARGE},
6314 { STRING_COMMA_LEN (".gnu.linkonce.lt"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_EXECINSTR + SHF_X86_64_LARGE},
6315 { STRING_COMMA_LEN (".lbss"), -2, SHT_NOBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
6316 { STRING_COMMA_LEN (".ldata"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
6317 { STRING_COMMA_LEN (".lrodata"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_X86_64_LARGE},
6318 { NULL, 0, 0, 0, 0 }
6319 };
6320
6321 #define TARGET_LITTLE_SYM x86_64_elf64_vec
6322 #define TARGET_LITTLE_NAME "elf64-x86-64"
6323 #define ELF_ARCH bfd_arch_i386
6324 #define ELF_TARGET_ID X86_64_ELF_DATA
6325 #define ELF_MACHINE_CODE EM_X86_64
6326 #define ELF_MAXPAGESIZE 0x200000
6327 #define ELF_MINPAGESIZE 0x1000
6328 #define ELF_COMMONPAGESIZE 0x1000
6329
6330 #define elf_backend_can_gc_sections 1
6331 #define elf_backend_can_refcount 1
6332 #define elf_backend_want_got_plt 1
6333 #define elf_backend_plt_readonly 1
6334 #define elf_backend_want_plt_sym 0
6335 #define elf_backend_got_header_size (GOT_ENTRY_SIZE*3)
6336 #define elf_backend_rela_normal 1
6337 #define elf_backend_plt_alignment 4
6338 #define elf_backend_extern_protected_data 1
6339
6340 #define elf_info_to_howto elf_x86_64_info_to_howto
6341
6342 #define bfd_elf64_bfd_link_hash_table_create \
6343 elf_x86_64_link_hash_table_create
6344 #define bfd_elf64_bfd_reloc_type_lookup elf_x86_64_reloc_type_lookup
6345 #define bfd_elf64_bfd_reloc_name_lookup \
6346 elf_x86_64_reloc_name_lookup
6347
6348 #define elf_backend_adjust_dynamic_symbol elf_x86_64_adjust_dynamic_symbol
6349 #define elf_backend_relocs_compatible elf_x86_64_relocs_compatible
6350 #define elf_backend_check_relocs elf_x86_64_check_relocs
6351 #define elf_backend_copy_indirect_symbol elf_x86_64_copy_indirect_symbol
6352 #define elf_backend_create_dynamic_sections elf_x86_64_create_dynamic_sections
6353 #define elf_backend_finish_dynamic_sections elf_x86_64_finish_dynamic_sections
6354 #define elf_backend_finish_dynamic_symbol elf_x86_64_finish_dynamic_symbol
6355 #define elf_backend_gc_mark_hook elf_x86_64_gc_mark_hook
6356 #define elf_backend_gc_sweep_hook elf_x86_64_gc_sweep_hook
6357 #define elf_backend_grok_prstatus elf_x86_64_grok_prstatus
6358 #define elf_backend_grok_psinfo elf_x86_64_grok_psinfo
6359 #ifdef CORE_HEADER
6360 #define elf_backend_write_core_note elf_x86_64_write_core_note
6361 #endif
6362 #define elf_backend_reloc_type_class elf_x86_64_reloc_type_class
6363 #define elf_backend_relocate_section elf_x86_64_relocate_section
6364 #define elf_backend_size_dynamic_sections elf_x86_64_size_dynamic_sections
6365 #define elf_backend_always_size_sections elf_x86_64_always_size_sections
6366 #define elf_backend_init_index_section _bfd_elf_init_1_index_section
6367 #define elf_backend_object_p elf64_x86_64_elf_object_p
6368 #define bfd_elf64_mkobject elf_x86_64_mkobject
6369 #define bfd_elf64_get_synthetic_symtab elf_x86_64_get_synthetic_symtab
6370
6371 #define elf_backend_section_from_shdr \
6372 elf_x86_64_section_from_shdr
6373
6374 #define elf_backend_section_from_bfd_section \
6375 elf_x86_64_elf_section_from_bfd_section
6376 #define elf_backend_add_symbol_hook \
6377 elf_x86_64_add_symbol_hook
6378 #define elf_backend_symbol_processing \
6379 elf_x86_64_symbol_processing
6380 #define elf_backend_common_section_index \
6381 elf_x86_64_common_section_index
6382 #define elf_backend_common_section \
6383 elf_x86_64_common_section
6384 #define elf_backend_common_definition \
6385 elf_x86_64_common_definition
6386 #define elf_backend_merge_symbol \
6387 elf_x86_64_merge_symbol
6388 #define elf_backend_special_sections \
6389 elf_x86_64_special_sections
6390 #define elf_backend_additional_program_headers \
6391 elf_x86_64_additional_program_headers
6392 #define elf_backend_hash_symbol \
6393 elf_x86_64_hash_symbol
6394
6395 #include "elf64-target.h"
6396
6397 /* CloudABI support. */
6398
6399 #undef TARGET_LITTLE_SYM
6400 #define TARGET_LITTLE_SYM x86_64_elf64_cloudabi_vec
6401 #undef TARGET_LITTLE_NAME
6402 #define TARGET_LITTLE_NAME "elf64-x86-64-cloudabi"
6403
6404 #undef ELF_OSABI
6405 #define ELF_OSABI ELFOSABI_CLOUDABI
6406
6407 #undef elf64_bed
6408 #define elf64_bed elf64_x86_64_cloudabi_bed
6409
6410 #include "elf64-target.h"
6411
6412 /* FreeBSD support. */
6413
6414 #undef TARGET_LITTLE_SYM
6415 #define TARGET_LITTLE_SYM x86_64_elf64_fbsd_vec
6416 #undef TARGET_LITTLE_NAME
6417 #define TARGET_LITTLE_NAME "elf64-x86-64-freebsd"
6418
6419 #undef ELF_OSABI
6420 #define ELF_OSABI ELFOSABI_FREEBSD
6421
6422 #undef elf64_bed
6423 #define elf64_bed elf64_x86_64_fbsd_bed
6424
6425 #include "elf64-target.h"
6426
6427 /* Solaris 2 support. */
6428
6429 #undef TARGET_LITTLE_SYM
6430 #define TARGET_LITTLE_SYM x86_64_elf64_sol2_vec
6431 #undef TARGET_LITTLE_NAME
6432 #define TARGET_LITTLE_NAME "elf64-x86-64-sol2"
6433
6434 /* Restore default: we cannot use ELFOSABI_SOLARIS, otherwise ELFOSABI_NONE
6435 objects won't be recognized. */
6436 #undef ELF_OSABI
6437
6438 #undef elf64_bed
6439 #define elf64_bed elf64_x86_64_sol2_bed
6440
6441 /* The 64-bit static TLS arena size is rounded to the nearest 16-byte
6442 boundary. */
6443 #undef elf_backend_static_tls_alignment
6444 #define elf_backend_static_tls_alignment 16
6445
6446 /* The Solaris 2 ABI requires a plt symbol on all platforms.
6447
6448 Cf. Linker and Libraries Guide, Ch. 2, Link-Editor, Generating the Output
6449 File, p.63. */
6450 #undef elf_backend_want_plt_sym
6451 #define elf_backend_want_plt_sym 1
6452
6453 #include "elf64-target.h"
6454
6455 /* Native Client support. */
6456
6457 static bfd_boolean
6458 elf64_x86_64_nacl_elf_object_p (bfd *abfd)
6459 {
6460 /* Set the right machine number for a NaCl x86-64 ELF64 file. */
6461 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x86_64_nacl);
6462 return TRUE;
6463 }
6464
6465 #undef TARGET_LITTLE_SYM
6466 #define TARGET_LITTLE_SYM x86_64_elf64_nacl_vec
6467 #undef TARGET_LITTLE_NAME
6468 #define TARGET_LITTLE_NAME "elf64-x86-64-nacl"
6469 #undef elf64_bed
6470 #define elf64_bed elf64_x86_64_nacl_bed
6471
6472 #undef ELF_MAXPAGESIZE
6473 #undef ELF_MINPAGESIZE
6474 #undef ELF_COMMONPAGESIZE
6475 #define ELF_MAXPAGESIZE 0x10000
6476 #define ELF_MINPAGESIZE 0x10000
6477 #define ELF_COMMONPAGESIZE 0x10000
6478
6479 /* Restore defaults. */
6480 #undef ELF_OSABI
6481 #undef elf_backend_static_tls_alignment
6482 #undef elf_backend_want_plt_sym
6483 #define elf_backend_want_plt_sym 0
6484
6485 /* NaCl uses substantially different PLT entries for the same effects. */
6486
6487 #undef elf_backend_plt_alignment
6488 #define elf_backend_plt_alignment 5
6489 #define NACL_PLT_ENTRY_SIZE 64
6490 #define NACLMASK 0xe0 /* 32-byte alignment mask. */
6491
6492 static const bfd_byte elf_x86_64_nacl_plt0_entry[NACL_PLT_ENTRY_SIZE] =
6493 {
6494 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
6495 0x4c, 0x8b, 0x1d, 16, 0, 0, 0, /* mov GOT+16(%rip), %r11 */
6496 0x41, 0x83, 0xe3, NACLMASK, /* and $-32, %r11d */
6497 0x4d, 0x01, 0xfb, /* add %r15, %r11 */
6498 0x41, 0xff, 0xe3, /* jmpq *%r11 */
6499
6500 /* 9-byte nop sequence to pad out to the next 32-byte boundary. */
6501 0x66, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw 0x0(%rax,%rax,1) */
6502
6503 /* 32 bytes of nop to pad out to the standard size. */
6504 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data32 prefixes */
6505 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
6506 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data32 prefixes */
6507 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
6508 0x66, /* excess data32 prefix */
6509 0x90 /* nop */
6510 };
6511
6512 static const bfd_byte elf_x86_64_nacl_plt_entry[NACL_PLT_ENTRY_SIZE] =
6513 {
6514 0x4c, 0x8b, 0x1d, 0, 0, 0, 0, /* mov name@GOTPCREL(%rip),%r11 */
6515 0x41, 0x83, 0xe3, NACLMASK, /* and $-32, %r11d */
6516 0x4d, 0x01, 0xfb, /* add %r15, %r11 */
6517 0x41, 0xff, 0xe3, /* jmpq *%r11 */
6518
6519 /* 15-byte nop sequence to pad out to the next 32-byte boundary. */
6520 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data32 prefixes */
6521 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
6522
6523 /* Lazy GOT entries point here (32-byte aligned). */
6524 0x68, /* pushq immediate */
6525 0, 0, 0, 0, /* replaced with index into relocation table. */
6526 0xe9, /* jmp relative */
6527 0, 0, 0, 0, /* replaced with offset to start of .plt0. */
6528
6529 /* 22 bytes of nop to pad out to the standard size. */
6530 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data32 prefixes */
6531 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
6532 0x0f, 0x1f, 0x80, 0, 0, 0, 0, /* nopl 0x0(%rax) */
6533 };
6534
6535 /* .eh_frame covering the .plt section. */
6536
6537 static const bfd_byte elf_x86_64_nacl_eh_frame_plt[] =
6538 {
6539 #if (PLT_CIE_LENGTH != 20 \
6540 || PLT_FDE_LENGTH != 36 \
6541 || PLT_FDE_START_OFFSET != 4 + PLT_CIE_LENGTH + 8 \
6542 || PLT_FDE_LEN_OFFSET != 4 + PLT_CIE_LENGTH + 12)
6543 # error "Need elf_x86_64_backend_data parameters for eh_frame_plt offsets!"
6544 #endif
6545 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
6546 0, 0, 0, 0, /* CIE ID */
6547 1, /* CIE version */
6548 'z', 'R', 0, /* Augmentation string */
6549 1, /* Code alignment factor */
6550 0x78, /* Data alignment factor */
6551 16, /* Return address column */
6552 1, /* Augmentation size */
6553 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
6554 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
6555 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
6556 DW_CFA_nop, DW_CFA_nop,
6557
6558 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
6559 PLT_CIE_LENGTH + 8, 0, 0, 0,/* CIE pointer */
6560 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
6561 0, 0, 0, 0, /* .plt size goes here */
6562 0, /* Augmentation size */
6563 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
6564 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
6565 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
6566 DW_CFA_advance_loc + 58, /* DW_CFA_advance_loc: 58 to __PLT__+64 */
6567 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
6568 13, /* Block length */
6569 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
6570 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
6571 DW_OP_const1u, 63, DW_OP_and, DW_OP_const1u, 37, DW_OP_ge,
6572 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
6573 DW_CFA_nop, DW_CFA_nop
6574 };
6575
6576 static const struct elf_x86_64_backend_data elf_x86_64_nacl_arch_bed =
6577 {
6578 elf_x86_64_nacl_plt0_entry, /* plt0_entry */
6579 elf_x86_64_nacl_plt_entry, /* plt_entry */
6580 NACL_PLT_ENTRY_SIZE, /* plt_entry_size */
6581 2, /* plt0_got1_offset */
6582 9, /* plt0_got2_offset */
6583 13, /* plt0_got2_insn_end */
6584 3, /* plt_got_offset */
6585 33, /* plt_reloc_offset */
6586 38, /* plt_plt_offset */
6587 7, /* plt_got_insn_size */
6588 42, /* plt_plt_insn_end */
6589 32, /* plt_lazy_offset */
6590 elf_x86_64_nacl_eh_frame_plt, /* eh_frame_plt */
6591 sizeof (elf_x86_64_nacl_eh_frame_plt), /* eh_frame_plt_size */
6592 };
6593
6594 #undef elf_backend_arch_data
6595 #define elf_backend_arch_data &elf_x86_64_nacl_arch_bed
6596
6597 #undef elf_backend_object_p
6598 #define elf_backend_object_p elf64_x86_64_nacl_elf_object_p
6599 #undef elf_backend_modify_segment_map
6600 #define elf_backend_modify_segment_map nacl_modify_segment_map
6601 #undef elf_backend_modify_program_headers
6602 #define elf_backend_modify_program_headers nacl_modify_program_headers
6603 #undef elf_backend_final_write_processing
6604 #define elf_backend_final_write_processing nacl_final_write_processing
6605
6606 #include "elf64-target.h"
6607
6608 /* Native Client x32 support. */
6609
6610 static bfd_boolean
6611 elf32_x86_64_nacl_elf_object_p (bfd *abfd)
6612 {
6613 /* Set the right machine number for a NaCl x86-64 ELF32 file. */
6614 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x64_32_nacl);
6615 return TRUE;
6616 }
6617
6618 #undef TARGET_LITTLE_SYM
6619 #define TARGET_LITTLE_SYM x86_64_elf32_nacl_vec
6620 #undef TARGET_LITTLE_NAME
6621 #define TARGET_LITTLE_NAME "elf32-x86-64-nacl"
6622 #undef elf32_bed
6623 #define elf32_bed elf32_x86_64_nacl_bed
6624
6625 #define bfd_elf32_bfd_link_hash_table_create \
6626 elf_x86_64_link_hash_table_create
6627 #define bfd_elf32_bfd_reloc_type_lookup \
6628 elf_x86_64_reloc_type_lookup
6629 #define bfd_elf32_bfd_reloc_name_lookup \
6630 elf_x86_64_reloc_name_lookup
6631 #define bfd_elf32_mkobject \
6632 elf_x86_64_mkobject
6633 #define bfd_elf32_get_synthetic_symtab \
6634 elf_x86_64_get_synthetic_symtab
6635
6636 #undef elf_backend_object_p
6637 #define elf_backend_object_p \
6638 elf32_x86_64_nacl_elf_object_p
6639
6640 #undef elf_backend_bfd_from_remote_memory
6641 #define elf_backend_bfd_from_remote_memory \
6642 _bfd_elf32_bfd_from_remote_memory
6643
6644 #undef elf_backend_size_info
6645 #define elf_backend_size_info \
6646 _bfd_elf32_size_info
6647
6648 #include "elf32-target.h"
6649
6650 /* Restore defaults. */
6651 #undef elf_backend_object_p
6652 #define elf_backend_object_p elf64_x86_64_elf_object_p
6653 #undef elf_backend_bfd_from_remote_memory
6654 #undef elf_backend_size_info
6655 #undef elf_backend_modify_segment_map
6656 #undef elf_backend_modify_program_headers
6657 #undef elf_backend_final_write_processing
6658
6659 /* Intel L1OM support. */
6660
6661 static bfd_boolean
6662 elf64_l1om_elf_object_p (bfd *abfd)
6663 {
6664 /* Set the right machine number for an L1OM elf64 file. */
6665 bfd_default_set_arch_mach (abfd, bfd_arch_l1om, bfd_mach_l1om);
6666 return TRUE;
6667 }
6668
6669 #undef TARGET_LITTLE_SYM
6670 #define TARGET_LITTLE_SYM l1om_elf64_vec
6671 #undef TARGET_LITTLE_NAME
6672 #define TARGET_LITTLE_NAME "elf64-l1om"
6673 #undef ELF_ARCH
6674 #define ELF_ARCH bfd_arch_l1om
6675
6676 #undef ELF_MACHINE_CODE
6677 #define ELF_MACHINE_CODE EM_L1OM
6678
6679 #undef ELF_OSABI
6680
6681 #undef elf64_bed
6682 #define elf64_bed elf64_l1om_bed
6683
6684 #undef elf_backend_object_p
6685 #define elf_backend_object_p elf64_l1om_elf_object_p
6686
6687 /* Restore defaults. */
6688 #undef ELF_MAXPAGESIZE
6689 #undef ELF_MINPAGESIZE
6690 #undef ELF_COMMONPAGESIZE
6691 #define ELF_MAXPAGESIZE 0x200000
6692 #define ELF_MINPAGESIZE 0x1000
6693 #define ELF_COMMONPAGESIZE 0x1000
6694 #undef elf_backend_plt_alignment
6695 #define elf_backend_plt_alignment 4
6696 #undef elf_backend_arch_data
6697 #define elf_backend_arch_data &elf_x86_64_arch_bed
6698
6699 #include "elf64-target.h"
6700
6701 /* FreeBSD L1OM support. */
6702
6703 #undef TARGET_LITTLE_SYM
6704 #define TARGET_LITTLE_SYM l1om_elf64_fbsd_vec
6705 #undef TARGET_LITTLE_NAME
6706 #define TARGET_LITTLE_NAME "elf64-l1om-freebsd"
6707
6708 #undef ELF_OSABI
6709 #define ELF_OSABI ELFOSABI_FREEBSD
6710
6711 #undef elf64_bed
6712 #define elf64_bed elf64_l1om_fbsd_bed
6713
6714 #include "elf64-target.h"
6715
6716 /* Intel K1OM support. */
6717
6718 static bfd_boolean
6719 elf64_k1om_elf_object_p (bfd *abfd)
6720 {
6721 /* Set the right machine number for an K1OM elf64 file. */
6722 bfd_default_set_arch_mach (abfd, bfd_arch_k1om, bfd_mach_k1om);
6723 return TRUE;
6724 }
6725
6726 #undef TARGET_LITTLE_SYM
6727 #define TARGET_LITTLE_SYM k1om_elf64_vec
6728 #undef TARGET_LITTLE_NAME
6729 #define TARGET_LITTLE_NAME "elf64-k1om"
6730 #undef ELF_ARCH
6731 #define ELF_ARCH bfd_arch_k1om
6732
6733 #undef ELF_MACHINE_CODE
6734 #define ELF_MACHINE_CODE EM_K1OM
6735
6736 #undef ELF_OSABI
6737
6738 #undef elf64_bed
6739 #define elf64_bed elf64_k1om_bed
6740
6741 #undef elf_backend_object_p
6742 #define elf_backend_object_p elf64_k1om_elf_object_p
6743
6744 #undef elf_backend_static_tls_alignment
6745
6746 #undef elf_backend_want_plt_sym
6747 #define elf_backend_want_plt_sym 0
6748
6749 #include "elf64-target.h"
6750
6751 /* FreeBSD K1OM support. */
6752
6753 #undef TARGET_LITTLE_SYM
6754 #define TARGET_LITTLE_SYM k1om_elf64_fbsd_vec
6755 #undef TARGET_LITTLE_NAME
6756 #define TARGET_LITTLE_NAME "elf64-k1om-freebsd"
6757
6758 #undef ELF_OSABI
6759 #define ELF_OSABI ELFOSABI_FREEBSD
6760
6761 #undef elf64_bed
6762 #define elf64_bed elf64_k1om_fbsd_bed
6763
6764 #include "elf64-target.h"
6765
6766 /* 32bit x86-64 support. */
6767
6768 #undef TARGET_LITTLE_SYM
6769 #define TARGET_LITTLE_SYM x86_64_elf32_vec
6770 #undef TARGET_LITTLE_NAME
6771 #define TARGET_LITTLE_NAME "elf32-x86-64"
6772 #undef elf32_bed
6773
6774 #undef ELF_ARCH
6775 #define ELF_ARCH bfd_arch_i386
6776
6777 #undef ELF_MACHINE_CODE
6778 #define ELF_MACHINE_CODE EM_X86_64
6779
6780 #undef ELF_OSABI
6781
6782 #undef elf_backend_object_p
6783 #define elf_backend_object_p \
6784 elf32_x86_64_elf_object_p
6785
6786 #undef elf_backend_bfd_from_remote_memory
6787 #define elf_backend_bfd_from_remote_memory \
6788 _bfd_elf32_bfd_from_remote_memory
6789
6790 #undef elf_backend_size_info
6791 #define elf_backend_size_info \
6792 _bfd_elf32_size_info
6793
6794 #include "elf32-target.h"