2 * Copyright 2014-2019 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include "ac_binary.h"
34 #include "ac_gpu_info.h"
35 #include "util/u_dynarray.h"
36 #include "util/u_math.h"
38 // Old distributions may not have this enum constant
39 #define MY_EM_AMDGPU 224
41 #ifndef STT_AMDGPU_LDS
42 #define STT_AMDGPU_LDS 13
46 #define R_AMDGPU_NONE 0
47 #define R_AMDGPU_ABS32_LO 1
48 #define R_AMDGPU_ABS32_HI 2
49 #define R_AMDGPU_ABS64 3
50 #define R_AMDGPU_REL32 4
51 #define R_AMDGPU_REL64 5
52 #define R_AMDGPU_ABS32 6
53 #define R_AMDGPU_GOTPCREL 7
54 #define R_AMDGPU_GOTPCREL32_LO 8
55 #define R_AMDGPU_GOTPCREL32_HI 9
56 #define R_AMDGPU_REL32_LO 10
57 #define R_AMDGPU_REL32_HI 11
58 #define R_AMDGPU_RELATIVE64 13
61 /* For the UMR disassembler. */
62 #define DEBUGGER_END_OF_CODE_MARKER 0xbf9f0000 /* invalid instruction */
63 #define DEBUGGER_NUM_MARKERS 5
65 struct ac_rtld_section
{
67 bool is_pasted_text
: 1;
74 struct ac_rtld_section
*sections
;
75 unsigned num_sections
;
78 static void report_erroraf(const char *fmt
, va_list va
)
81 int ret
= asprintf(&msg
, fmt
, va
);
83 msg
= "(asprintf failed)";
85 fprintf(stderr
, "ac_rtld error: %s\n", msg
);
91 static void report_errorf(const char *fmt
, ...) PRINTFLIKE(1, 2);
93 static void report_errorf(const char *fmt
, ...)
97 report_erroraf(fmt
, va
);
101 static void report_elf_errorf(const char *fmt
, ...) PRINTFLIKE(1, 2);
103 static void report_elf_errorf(const char *fmt
, ...)
107 report_erroraf(fmt
, va
);
110 fprintf(stderr
, "ELF error: %s\n", elf_errmsg(elf_errno()));
114 * Find a symbol in a dynarray of struct ac_rtld_symbol by \p name and shader
117 static const struct ac_rtld_symbol
*find_symbol(const struct util_dynarray
*symbols
,
118 const char *name
, unsigned part_idx
)
120 util_dynarray_foreach(symbols
, struct ac_rtld_symbol
, symbol
) {
121 if ((symbol
->part_idx
== ~0u || symbol
->part_idx
== part_idx
) &&
122 !strcmp(name
, symbol
->name
))
128 static int compare_symbol_by_align(const void *lhsp
, const void *rhsp
)
130 const struct ac_rtld_symbol
*lhs
= lhsp
;
131 const struct ac_rtld_symbol
*rhs
= rhsp
;
132 if (rhs
->align
> lhs
->align
)
134 if (rhs
->align
< lhs
->align
)
140 * Sort the given symbol list by decreasing alignment and assign offsets.
142 static bool layout_symbols(struct ac_rtld_symbol
*symbols
, unsigned num_symbols
,
143 uint64_t *ptotal_size
)
145 qsort(symbols
, num_symbols
, sizeof(*symbols
), compare_symbol_by_align
);
147 uint64_t total_size
= *ptotal_size
;
149 for (unsigned i
= 0; i
< num_symbols
; ++i
) {
150 struct ac_rtld_symbol
*s
= &symbols
[i
];
151 assert(util_is_power_of_two_nonzero(s
->align
));
153 total_size
= align64(total_size
, s
->align
);
154 s
->offset
= total_size
;
156 if (total_size
+ s
->size
< total_size
) {
157 report_errorf("%s: size overflow", __FUNCTION__
);
161 total_size
+= s
->size
;
164 *ptotal_size
= total_size
;
169 * Read LDS symbols from the given \p section of the ELF of \p part and append
170 * them to the LDS symbols list.
172 * Shared LDS symbols are filtered out.
174 static bool read_private_lds_symbols(struct ac_rtld_binary
*binary
,
177 uint32_t *lds_end_align
)
179 #define report_elf_if(cond) \
182 report_errorf(#cond); \
187 struct ac_rtld_part
*part
= &binary
->parts
[part_idx
];
188 Elf64_Shdr
*shdr
= elf64_getshdr(section
);
189 uint32_t strtabidx
= shdr
->sh_link
;
190 Elf_Data
*symbols_data
= elf_getdata(section
, NULL
);
191 report_elf_if(!symbols_data
);
193 const Elf64_Sym
*symbol
= symbols_data
->d_buf
;
194 size_t num_symbols
= symbols_data
->d_size
/ sizeof(Elf64_Sym
);
196 for (size_t j
= 0; j
< num_symbols
; ++j
, ++symbol
) {
197 if (ELF64_ST_TYPE(symbol
->st_info
) != STT_AMDGPU_LDS
)
200 report_elf_if(symbol
->st_size
> 1u << 29);
202 struct ac_rtld_symbol s
= {};
203 s
.name
= elf_strptr(part
->elf
, strtabidx
, symbol
->st_name
);
204 s
.size
= symbol
->st_size
;
205 s
.align
= MIN2(1u << (symbol
->st_other
>> 3), 1u << 16);
206 s
.part_idx
= part_idx
;
208 if (!strcmp(s
.name
, "__lds_end")) {
209 report_elf_if(s
.size
!= 0);
210 *lds_end_align
= MAX2(*lds_end_align
, s
.align
);
214 const struct ac_rtld_symbol
*shared
=
215 find_symbol(&binary
->lds_symbols
, s
.name
, part_idx
);
217 report_elf_if(s
.align
> shared
->align
);
218 report_elf_if(s
.size
> shared
->size
);
222 util_dynarray_append(&binary
->lds_symbols
, struct ac_rtld_symbol
, s
);
231 * Open a binary consisting of one or more shader parts.
233 * \param binary the uninitialized struct
234 * \param i binary opening parameters
236 bool ac_rtld_open(struct ac_rtld_binary
*binary
,
237 struct ac_rtld_open_info i
)
239 /* One of the libelf implementations
240 * (http://www.mr511.de/software/english.htm) requires calling
241 * elf_version() before elf_memory().
243 elf_version(EV_CURRENT
);
245 memset(binary
, 0, sizeof(*binary
));
246 binary
->num_parts
= i
.num_parts
;
247 binary
->parts
= calloc(sizeof(*binary
->parts
), i
.num_parts
);
251 uint64_t pasted_text_size
= 0;
252 uint64_t rx_align
= 1;
253 uint64_t rx_size
= 0;
255 #define report_if(cond) \
258 report_errorf(#cond); \
262 #define report_elf_if(cond) \
265 report_elf_errorf(#cond); \
270 /* Copy and layout shared LDS symbols. */
271 if (i
.num_shared_lds_symbols
) {
272 if (!util_dynarray_resize(&binary
->lds_symbols
, struct ac_rtld_symbol
,
273 i
.num_shared_lds_symbols
))
276 memcpy(binary
->lds_symbols
.data
, i
.shared_lds_symbols
, binary
->lds_symbols
.size
);
279 util_dynarray_foreach(&binary
->lds_symbols
, struct ac_rtld_symbol
, symbol
)
280 symbol
->part_idx
= ~0u;
282 unsigned max_lds_size
= i
.info
->chip_class
>= GFX7
? 64 * 1024 : 32 * 1024;
283 uint64_t shared_lds_size
= 0;
284 if (!layout_symbols(binary
->lds_symbols
.data
, i
.num_shared_lds_symbols
, &shared_lds_size
))
286 report_if(shared_lds_size
> max_lds_size
);
287 binary
->lds_size
= shared_lds_size
;
289 /* First pass over all parts: open ELFs, pre-determine the placement of
290 * sections in the memory image, and collect and layout private LDS symbols. */
291 uint32_t lds_end_align
= 0;
293 for (unsigned part_idx
= 0; part_idx
< i
.num_parts
; ++part_idx
) {
294 struct ac_rtld_part
*part
= &binary
->parts
[part_idx
];
295 unsigned part_lds_symbols_begin
=
296 util_dynarray_num_elements(&binary
->lds_symbols
, struct ac_rtld_symbol
);
298 part
->elf
= elf_memory((char *)i
.elf_ptrs
[part_idx
], i
.elf_sizes
[part_idx
]);
299 report_elf_if(!part
->elf
);
301 const Elf64_Ehdr
*ehdr
= elf64_getehdr(part
->elf
);
302 report_elf_if(!ehdr
);
303 report_if(ehdr
->e_machine
!= MY_EM_AMDGPU
);
305 size_t section_str_index
;
307 report_elf_if(elf_getshdrstrndx(part
->elf
, §ion_str_index
) < 0);
308 report_elf_if(elf_getshdrnum(part
->elf
, &num_shdrs
) < 0);
310 part
->num_sections
= num_shdrs
;
311 part
->sections
= calloc(sizeof(*part
->sections
), num_shdrs
);
312 report_if(!part
->sections
);
314 Elf_Scn
*section
= NULL
;
315 while ((section
= elf_nextscn(part
->elf
, section
))) {
316 Elf64_Shdr
*shdr
= elf64_getshdr(section
);
317 struct ac_rtld_section
*s
= &part
->sections
[elf_ndxscn(section
)];
318 s
->name
= elf_strptr(part
->elf
, section_str_index
, shdr
->sh_name
);
319 report_elf_if(!s
->name
);
321 /* Cannot actually handle linked objects yet */
322 report_elf_if(shdr
->sh_addr
!= 0);
324 /* Alignment must be 0 or a power of two */
325 report_elf_if(shdr
->sh_addralign
& (shdr
->sh_addralign
- 1));
326 uint64_t sh_align
= MAX2(shdr
->sh_addralign
, 1);
328 if (shdr
->sh_flags
& SHF_ALLOC
&&
329 shdr
->sh_type
!= SHT_NOTE
) {
330 report_if(shdr
->sh_flags
& SHF_WRITE
);
334 if (shdr
->sh_flags
& SHF_EXECINSTR
) {
335 report_elf_if(shdr
->sh_size
& 3);
337 if (!strcmp(s
->name
, ".text"))
338 s
->is_pasted_text
= true;
341 if (s
->is_pasted_text
) {
342 s
->offset
= pasted_text_size
;
343 pasted_text_size
+= shdr
->sh_size
;
345 rx_align
= align(rx_align
, sh_align
);
346 rx_size
= align(rx_size
, sh_align
);
348 rx_size
+= shdr
->sh_size
;
350 } else if (shdr
->sh_type
== SHT_SYMTAB
) {
351 if (!read_private_lds_symbols(binary
, part_idx
, section
, &lds_end_align
))
356 uint64_t part_lds_size
= shared_lds_size
;
358 util_dynarray_element(&binary
->lds_symbols
, struct ac_rtld_symbol
, part_lds_symbols_begin
),
359 util_dynarray_num_elements(&binary
->lds_symbols
, struct ac_rtld_symbol
) - part_lds_symbols_begin
,
362 binary
->lds_size
= MAX2(binary
->lds_size
, part_lds_size
);
365 binary
->rx_end_markers
= pasted_text_size
;
366 pasted_text_size
+= 4 * DEBUGGER_NUM_MARKERS
;
368 /* __lds_end is a special symbol that points at the end of the memory
369 * occupied by other LDS symbols. Its alignment is taken as the
370 * maximum of its alignment over all shader parts where it occurs.
373 binary
->lds_size
= align(binary
->lds_size
, lds_end_align
);
375 struct ac_rtld_symbol
*lds_end
=
376 util_dynarray_grow(&binary
->lds_symbols
, struct ac_rtld_symbol
, 1);
377 lds_end
->name
= "__lds_end";
379 lds_end
->align
= lds_end_align
;
380 lds_end
->offset
= binary
->lds_size
;
381 lds_end
->part_idx
= ~0u;
384 report_elf_if(binary
->lds_size
> max_lds_size
);
386 /* Second pass: Adjust offsets of non-pasted text sections. */
387 binary
->rx_size
= pasted_text_size
;
388 binary
->rx_size
= align(binary
->rx_size
, rx_align
);
390 for (unsigned part_idx
= 0; part_idx
< i
.num_parts
; ++part_idx
) {
391 struct ac_rtld_part
*part
= &binary
->parts
[part_idx
];
393 elf_getshdrnum(part
->elf
, &num_shdrs
);
395 for (unsigned j
= 0; j
< num_shdrs
; ++j
) {
396 struct ac_rtld_section
*s
= &part
->sections
[j
];
397 if (s
->is_rx
&& !s
->is_pasted_text
)
398 s
->offset
+= binary
->rx_size
;
402 binary
->rx_size
+= rx_size
;
410 ac_rtld_close(binary
);
414 void ac_rtld_close(struct ac_rtld_binary
*binary
)
416 for (unsigned i
= 0; i
< binary
->num_parts
; ++i
) {
417 struct ac_rtld_part
*part
= &binary
->parts
[i
];
418 free(part
->sections
);
422 util_dynarray_fini(&binary
->lds_symbols
);
424 binary
->parts
= NULL
;
425 binary
->num_parts
= 0;
428 static bool get_section_by_name(struct ac_rtld_part
*part
, const char *name
,
429 const char **data
, size_t *nbytes
)
431 for (unsigned i
= 0; i
< part
->num_sections
; ++i
) {
432 struct ac_rtld_section
*s
= &part
->sections
[i
];
433 if (s
->name
&& !strcmp(name
, s
->name
)) {
434 Elf_Scn
*target_scn
= elf_getscn(part
->elf
, i
);
435 Elf_Data
*target_data
= elf_getdata(target_scn
, NULL
);
437 report_elf_errorf("ac_rtld: get_section_by_name: elf_getdata");
441 *data
= target_data
->d_buf
;
442 *nbytes
= target_data
->d_size
;
449 bool ac_rtld_get_section_by_name(struct ac_rtld_binary
*binary
, const char *name
,
450 const char **data
, size_t *nbytes
)
452 assert(binary
->num_parts
== 1);
453 return get_section_by_name(&binary
->parts
[0], name
, data
, nbytes
);
456 bool ac_rtld_read_config(struct ac_rtld_binary
*binary
,
457 struct ac_shader_config
*config
)
459 for (unsigned i
= 0; i
< binary
->num_parts
; ++i
) {
460 struct ac_rtld_part
*part
= &binary
->parts
[i
];
461 const char *config_data
;
462 size_t config_nbytes
;
464 if (!get_section_by_name(part
, ".AMDGPU.config",
465 &config_data
, &config_nbytes
))
468 /* TODO: be precise about scratch use? */
469 struct ac_shader_config c
= {};
470 ac_parse_shader_binary_config(config_data
, config_nbytes
, true, &c
);
472 config
->num_sgprs
= MAX2(config
->num_sgprs
, c
.num_sgprs
);
473 config
->num_vgprs
= MAX2(config
->num_vgprs
, c
.num_vgprs
);
474 config
->spilled_sgprs
= MAX2(config
->spilled_sgprs
, c
.spilled_sgprs
);
475 config
->spilled_vgprs
= MAX2(config
->spilled_vgprs
, c
.spilled_vgprs
);
476 config
->scratch_bytes_per_wave
= MAX2(config
->scratch_bytes_per_wave
,
477 c
.scratch_bytes_per_wave
);
479 assert(i
== 0 || config
->float_mode
== c
.float_mode
);
480 config
->float_mode
= c
.float_mode
;
482 /* SPI_PS_INPUT_ENA/ADDR can't be combined. Only the value from
483 * the main shader part is used. */
484 assert(config
->spi_ps_input_ena
== 0 &&
485 config
->spi_ps_input_addr
== 0);
486 config
->spi_ps_input_ena
= c
.spi_ps_input_ena
;
487 config
->spi_ps_input_addr
= c
.spi_ps_input_addr
;
489 /* TODO: consistently use LDS symbols for this */
490 config
->lds_size
= MAX2(config
->lds_size
, c
.lds_size
);
492 /* TODO: Should we combine these somehow? It's currently only
493 * used for radeonsi's compute, where multiple parts aren't used. */
494 assert(config
->rsrc1
== 0 && config
->rsrc2
== 0);
495 config
->rsrc1
= c
.rsrc1
;
496 config
->rsrc2
= c
.rsrc2
;
502 static bool resolve_symbol(const struct ac_rtld_upload_info
*u
,
503 unsigned part_idx
, const Elf64_Sym
*sym
,
504 const char *name
, uint64_t *value
)
506 if (sym
->st_shndx
== SHN_UNDEF
) {
507 const struct ac_rtld_symbol
*lds_sym
=
508 find_symbol(&u
->binary
->lds_symbols
, name
, part_idx
);
511 *value
= lds_sym
->offset
;
515 /* TODO: resolve from other parts */
517 if (u
->get_external_symbol(u
->cb_data
, name
, value
))
520 report_errorf("symbol %s: unknown", name
);
524 struct ac_rtld_part
*part
= &u
->binary
->parts
[part_idx
];
525 if (sym
->st_shndx
>= part
->num_sections
) {
526 report_errorf("symbol %s: section out of bounds", name
);
530 struct ac_rtld_section
*s
= &part
->sections
[sym
->st_shndx
];
532 report_errorf("symbol %s: bad section", name
);
536 uint64_t section_base
= u
->rx_va
+ s
->offset
;
538 *value
= section_base
+ sym
->st_value
;
542 static bool apply_relocs(const struct ac_rtld_upload_info
*u
,
543 unsigned part_idx
, const Elf64_Shdr
*reloc_shdr
,
544 const Elf_Data
*reloc_data
)
546 #define report_if(cond) \
549 report_errorf(#cond); \
553 #define report_elf_if(cond) \
556 report_elf_errorf(#cond); \
561 struct ac_rtld_part
*part
= &u
->binary
->parts
[part_idx
];
562 Elf_Scn
*target_scn
= elf_getscn(part
->elf
, reloc_shdr
->sh_info
);
563 report_elf_if(!target_scn
);
565 Elf_Data
*target_data
= elf_getdata(target_scn
, NULL
);
566 report_elf_if(!target_data
);
568 Elf_Scn
*symbols_scn
= elf_getscn(part
->elf
, reloc_shdr
->sh_link
);
569 report_elf_if(!symbols_scn
);
571 Elf64_Shdr
*symbols_shdr
= elf64_getshdr(symbols_scn
);
572 report_elf_if(!symbols_shdr
);
573 uint32_t strtabidx
= symbols_shdr
->sh_link
;
575 Elf_Data
*symbols_data
= elf_getdata(symbols_scn
, NULL
);
576 report_elf_if(!symbols_data
);
578 const Elf64_Sym
*symbols
= symbols_data
->d_buf
;
579 size_t num_symbols
= symbols_data
->d_size
/ sizeof(Elf64_Sym
);
581 struct ac_rtld_section
*s
= &part
->sections
[reloc_shdr
->sh_info
];
582 report_if(!s
->is_rx
);
584 const char *orig_base
= target_data
->d_buf
;
585 char *dst_base
= u
->rx_ptr
+ s
->offset
;
586 uint64_t va_base
= u
->rx_va
+ s
->offset
;
588 Elf64_Rel
*rel
= reloc_data
->d_buf
;
589 size_t num_relocs
= reloc_data
->d_size
/ sizeof(*rel
);
590 for (size_t i
= 0; i
< num_relocs
; ++i
, ++rel
) {
591 size_t r_sym
= ELF64_R_SYM(rel
->r_info
);
592 unsigned r_type
= ELF64_R_TYPE(rel
->r_info
);
594 const char *orig_ptr
= orig_base
+ rel
->r_offset
;
595 char *dst_ptr
= dst_base
+ rel
->r_offset
;
596 uint64_t va
= va_base
+ rel
->r_offset
;
601 if (r_sym
== STN_UNDEF
) {
604 report_elf_if(r_sym
>= num_symbols
);
606 const Elf64_Sym
*sym
= &symbols
[r_sym
];
607 const char *symbol_name
=
608 elf_strptr(part
->elf
, strtabidx
, sym
->st_name
);
609 report_elf_if(!symbol_name
);
611 if (!resolve_symbol(u
, part_idx
, sym
, symbol_name
, &symbol
))
615 /* TODO: Should we also support .rela sections, where the
616 * addend is part of the relocation record? */
618 /* Load the addend from the ELF instead of the destination,
619 * because the destination may be in VRAM. */
622 case R_AMDGPU_ABS32_LO
:
623 case R_AMDGPU_ABS32_HI
:
625 case R_AMDGPU_REL32_LO
:
626 case R_AMDGPU_REL32_HI
:
627 addend
= *(const uint32_t *)orig_ptr
;
631 addend
= *(const uint64_t *)orig_ptr
;
634 report_errorf("unsupported r_type == %u", r_type
);
638 uint64_t abs
= symbol
+ addend
;
642 assert((uint32_t)abs
== abs
);
643 case R_AMDGPU_ABS32_LO
:
644 *(uint32_t *)dst_ptr
= util_cpu_to_le32(abs
);
646 case R_AMDGPU_ABS32_HI
:
647 *(uint32_t *)dst_ptr
= util_cpu_to_le32(abs
>> 32);
650 *(uint64_t *)dst_ptr
= util_cpu_to_le64(abs
);
653 assert((int64_t)(int32_t)(abs
- va
) == (int64_t)(abs
- va
));
654 case R_AMDGPU_REL32_LO
:
655 *(uint32_t *)dst_ptr
= util_cpu_to_le32(abs
- va
);
657 case R_AMDGPU_REL32_HI
:
658 *(uint32_t *)dst_ptr
= util_cpu_to_le32((abs
- va
) >> 32);
661 *(uint64_t *)dst_ptr
= util_cpu_to_le64(abs
- va
);
664 unreachable("bad r_type");
675 * Upload the binary or binaries to the provided GPU buffers, including
678 bool ac_rtld_upload(struct ac_rtld_upload_info
*u
)
680 #define report_if(cond) \
683 report_errorf(#cond); \
687 #define report_elf_if(cond) \
690 report_errorf(#cond); \
695 /* First pass: upload raw section data and lay out private LDS symbols. */
696 for (unsigned i
= 0; i
< u
->binary
->num_parts
; ++i
) {
697 struct ac_rtld_part
*part
= &u
->binary
->parts
[i
];
699 Elf_Scn
*section
= NULL
;
700 while ((section
= elf_nextscn(part
->elf
, section
))) {
701 Elf64_Shdr
*shdr
= elf64_getshdr(section
);
702 struct ac_rtld_section
*s
= &part
->sections
[elf_ndxscn(section
)];
707 report_if(shdr
->sh_type
!= SHT_PROGBITS
);
709 Elf_Data
*data
= elf_getdata(section
, NULL
);
710 report_elf_if(!data
|| data
->d_size
!= shdr
->sh_size
);
711 memcpy(u
->rx_ptr
+ s
->offset
, data
->d_buf
, shdr
->sh_size
);
715 if (u
->binary
->rx_end_markers
) {
716 uint32_t *dst
= (uint32_t *)(u
->rx_ptr
+ u
->binary
->rx_end_markers
);
717 for (unsigned i
= 0; i
< DEBUGGER_NUM_MARKERS
; ++i
)
718 *dst
++ = util_cpu_to_le32(DEBUGGER_END_OF_CODE_MARKER
);
721 /* Second pass: handle relocations, overwriting uploaded data where
723 for (unsigned i
= 0; i
< u
->binary
->num_parts
; ++i
) {
724 struct ac_rtld_part
*part
= &u
->binary
->parts
[i
];
725 Elf_Scn
*section
= NULL
;
726 while ((section
= elf_nextscn(part
->elf
, section
))) {
727 Elf64_Shdr
*shdr
= elf64_getshdr(section
);
728 if (shdr
->sh_type
== SHT_REL
) {
729 Elf_Data
*relocs
= elf_getdata(section
, NULL
);
730 report_elf_if(!relocs
|| relocs
->d_size
!= shdr
->sh_size
);
731 if (!apply_relocs(u
, i
, shdr
, relocs
))
733 } else if (shdr
->sh_type
== SHT_RELA
) {
734 report_errorf("SHT_RELA not supported");