radeonsi: add support for nir atomic_inc_wrap/atomic_dec_wrap
[mesa.git] / src / amd / common / ac_rtld.c
1 /*
2 * Copyright 2014-2019 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 */
23
24 #include "ac_rtld.h"
25
26 #include <gelf.h>
27 #include <libelf.h>
28 #include <stdarg.h>
29 #include <stdio.h>
30 #include <stdlib.h>
31 #include <string.h>
32
33 #include "ac_binary.h"
34 #include "ac_gpu_info.h"
35 #include "util/u_dynarray.h"
36 #include "util/u_math.h"
37
38 // Old distributions may not have this enum constant
39 #define MY_EM_AMDGPU 224
40
41 #ifndef STT_AMDGPU_LDS
42 #define STT_AMDGPU_LDS 13 // this is deprecated -- remove
43 #endif
44
45 #ifndef SHN_AMDGPU_LDS
46 #define SHN_AMDGPU_LDS 0xff00
47 #endif
48
49 #ifndef R_AMDGPU_NONE
50 #define R_AMDGPU_NONE 0
51 #define R_AMDGPU_ABS32_LO 1
52 #define R_AMDGPU_ABS32_HI 2
53 #define R_AMDGPU_ABS64 3
54 #define R_AMDGPU_REL32 4
55 #define R_AMDGPU_REL64 5
56 #define R_AMDGPU_ABS32 6
57 #define R_AMDGPU_GOTPCREL 7
58 #define R_AMDGPU_GOTPCREL32_LO 8
59 #define R_AMDGPU_GOTPCREL32_HI 9
60 #define R_AMDGPU_REL32_LO 10
61 #define R_AMDGPU_REL32_HI 11
62 #define R_AMDGPU_RELATIVE64 13
63 #endif
64
65 /* For the UMR disassembler. */
66 #define DEBUGGER_END_OF_CODE_MARKER 0xbf9f0000 /* invalid instruction */
67 #define DEBUGGER_NUM_MARKERS 5
68
69 struct ac_rtld_section {
70 bool is_rx : 1;
71 bool is_pasted_text : 1;
72 uint64_t offset;
73 const char *name;
74 };
75
76 struct ac_rtld_part {
77 Elf *elf;
78 struct ac_rtld_section *sections;
79 unsigned num_sections;
80 };
81
82 static void report_erroraf(const char *fmt, va_list va)
83 {
84 char *msg;
85 int ret = asprintf(&msg, fmt, va);
86 if (ret < 0)
87 msg = "(asprintf failed)";
88
89 fprintf(stderr, "ac_rtld error: %s\n", msg);
90
91 if (ret >= 0)
92 free(msg);
93 }
94
95 static void report_errorf(const char *fmt, ...) PRINTFLIKE(1, 2);
96
97 static void report_errorf(const char *fmt, ...)
98 {
99 va_list va;
100 va_start(va, fmt);
101 report_erroraf(fmt, va);
102 va_end(va);
103 }
104
105 static void report_elf_errorf(const char *fmt, ...) PRINTFLIKE(1, 2);
106
107 static void report_elf_errorf(const char *fmt, ...)
108 {
109 va_list va;
110 va_start(va, fmt);
111 report_erroraf(fmt, va);
112 va_end(va);
113
114 fprintf(stderr, "ELF error: %s\n", elf_errmsg(elf_errno()));
115 }
116
117 /**
118 * Find a symbol in a dynarray of struct ac_rtld_symbol by \p name and shader
119 * \p part_idx.
120 */
121 static const struct ac_rtld_symbol *find_symbol(const struct util_dynarray *symbols,
122 const char *name, unsigned part_idx)
123 {
124 util_dynarray_foreach(symbols, struct ac_rtld_symbol, symbol) {
125 if ((symbol->part_idx == ~0u || symbol->part_idx == part_idx) &&
126 !strcmp(name, symbol->name))
127 return symbol;
128 }
129 return 0;
130 }
131
132 static int compare_symbol_by_align(const void *lhsp, const void *rhsp)
133 {
134 const struct ac_rtld_symbol *lhs = lhsp;
135 const struct ac_rtld_symbol *rhs = rhsp;
136 if (rhs->align > lhs->align)
137 return 1;
138 if (rhs->align < lhs->align)
139 return -1;
140 return 0;
141 }
142
143 /**
144 * Sort the given symbol list by decreasing alignment and assign offsets.
145 */
146 static bool layout_symbols(struct ac_rtld_symbol *symbols, unsigned num_symbols,
147 uint64_t *ptotal_size)
148 {
149 qsort(symbols, num_symbols, sizeof(*symbols), compare_symbol_by_align);
150
151 uint64_t total_size = *ptotal_size;
152
153 for (unsigned i = 0; i < num_symbols; ++i) {
154 struct ac_rtld_symbol *s = &symbols[i];
155 assert(util_is_power_of_two_nonzero(s->align));
156
157 total_size = align64(total_size, s->align);
158 s->offset = total_size;
159
160 if (total_size + s->size < total_size) {
161 report_errorf("%s: size overflow", __FUNCTION__);
162 return false;
163 }
164
165 total_size += s->size;
166 }
167
168 *ptotal_size = total_size;
169 return true;
170 }
171
172 /**
173 * Read LDS symbols from the given \p section of the ELF of \p part and append
174 * them to the LDS symbols list.
175 *
176 * Shared LDS symbols are filtered out.
177 */
178 static bool read_private_lds_symbols(struct ac_rtld_binary *binary,
179 unsigned part_idx,
180 Elf_Scn *section,
181 uint32_t *lds_end_align)
182 {
183 #define report_if(cond) \
184 do { \
185 if ((cond)) { \
186 report_errorf(#cond); \
187 return false; \
188 } \
189 } while (false)
190 #define report_elf_if(cond) \
191 do { \
192 if ((cond)) { \
193 report_elf_errorf(#cond); \
194 return false; \
195 } \
196 } while (false)
197
198 struct ac_rtld_part *part = &binary->parts[part_idx];
199 Elf64_Shdr *shdr = elf64_getshdr(section);
200 uint32_t strtabidx = shdr->sh_link;
201 Elf_Data *symbols_data = elf_getdata(section, NULL);
202 report_elf_if(!symbols_data);
203
204 const Elf64_Sym *symbol = symbols_data->d_buf;
205 size_t num_symbols = symbols_data->d_size / sizeof(Elf64_Sym);
206
207 for (size_t j = 0; j < num_symbols; ++j, ++symbol) {
208 struct ac_rtld_symbol s = {};
209
210 if (ELF64_ST_TYPE(symbol->st_info) == STT_AMDGPU_LDS) {
211 /* old-style LDS symbols from initial prototype -- remove eventually */
212 s.align = MIN2(1u << (symbol->st_other >> 3), 1u << 16);
213 } else if (symbol->st_shndx == SHN_AMDGPU_LDS) {
214 s.align = MIN2(symbol->st_value, 1u << 16);
215 report_if(!util_is_power_of_two_nonzero(s.align));
216 } else
217 continue;
218
219 report_if(symbol->st_size > 1u << 29);
220
221 s.name = elf_strptr(part->elf, strtabidx, symbol->st_name);
222 s.size = symbol->st_size;
223 s.part_idx = part_idx;
224
225 if (!strcmp(s.name, "__lds_end")) {
226 report_elf_if(s.size != 0);
227 *lds_end_align = MAX2(*lds_end_align, s.align);
228 continue;
229 }
230
231 const struct ac_rtld_symbol *shared =
232 find_symbol(&binary->lds_symbols, s.name, part_idx);
233 if (shared) {
234 report_elf_if(s.align > shared->align);
235 report_elf_if(s.size > shared->size);
236 continue;
237 }
238
239 util_dynarray_append(&binary->lds_symbols, struct ac_rtld_symbol, s);
240 }
241
242 return true;
243
244 #undef report_if
245 #undef report_elf_if
246 }
247
248 /**
249 * Open a binary consisting of one or more shader parts.
250 *
251 * \param binary the uninitialized struct
252 * \param i binary opening parameters
253 */
254 bool ac_rtld_open(struct ac_rtld_binary *binary,
255 struct ac_rtld_open_info i)
256 {
257 /* One of the libelf implementations
258 * (http://www.mr511.de/software/english.htm) requires calling
259 * elf_version() before elf_memory().
260 */
261 elf_version(EV_CURRENT);
262
263 memset(binary, 0, sizeof(*binary));
264 memcpy(&binary->options, &i.options, sizeof(binary->options));
265 binary->wave_size = i.wave_size;
266 binary->num_parts = i.num_parts;
267 binary->parts = calloc(sizeof(*binary->parts), i.num_parts);
268 if (!binary->parts)
269 return false;
270
271 uint64_t pasted_text_size = 0;
272 uint64_t rx_align = 1;
273 uint64_t rx_size = 0;
274
275 #define report_if(cond) \
276 do { \
277 if ((cond)) { \
278 report_errorf(#cond); \
279 goto fail; \
280 } \
281 } while (false)
282 #define report_elf_if(cond) \
283 do { \
284 if ((cond)) { \
285 report_elf_errorf(#cond); \
286 goto fail; \
287 } \
288 } while (false)
289
290 /* Copy and layout shared LDS symbols. */
291 if (i.num_shared_lds_symbols) {
292 if (!util_dynarray_resize(&binary->lds_symbols, struct ac_rtld_symbol,
293 i.num_shared_lds_symbols))
294 goto fail;
295
296 memcpy(binary->lds_symbols.data, i.shared_lds_symbols, binary->lds_symbols.size);
297 }
298
299 util_dynarray_foreach(&binary->lds_symbols, struct ac_rtld_symbol, symbol)
300 symbol->part_idx = ~0u;
301
302 unsigned max_lds_size = 64 * 1024;
303
304 if (i.info->chip_class == GFX6 ||
305 (i.shader_type != MESA_SHADER_COMPUTE &&
306 i.shader_type != MESA_SHADER_FRAGMENT))
307 max_lds_size = 32 * 1024;
308
309 uint64_t shared_lds_size = 0;
310 if (!layout_symbols(binary->lds_symbols.data, i.num_shared_lds_symbols, &shared_lds_size))
311 goto fail;
312
313 if (shared_lds_size > max_lds_size) {
314 fprintf(stderr, "ac_rtld error(1): too much LDS (used = %u, max = %u)\n",
315 (unsigned)shared_lds_size, max_lds_size);
316 goto fail;
317 }
318 binary->lds_size = shared_lds_size;
319
320 /* First pass over all parts: open ELFs, pre-determine the placement of
321 * sections in the memory image, and collect and layout private LDS symbols. */
322 uint32_t lds_end_align = 0;
323
324 if (binary->options.halt_at_entry)
325 pasted_text_size += 4;
326
327 for (unsigned part_idx = 0; part_idx < i.num_parts; ++part_idx) {
328 struct ac_rtld_part *part = &binary->parts[part_idx];
329 unsigned part_lds_symbols_begin =
330 util_dynarray_num_elements(&binary->lds_symbols, struct ac_rtld_symbol);
331
332 part->elf = elf_memory((char *)i.elf_ptrs[part_idx], i.elf_sizes[part_idx]);
333 report_elf_if(!part->elf);
334
335 const Elf64_Ehdr *ehdr = elf64_getehdr(part->elf);
336 report_elf_if(!ehdr);
337 report_if(ehdr->e_machine != MY_EM_AMDGPU);
338
339 size_t section_str_index;
340 size_t num_shdrs;
341 report_elf_if(elf_getshdrstrndx(part->elf, &section_str_index) < 0);
342 report_elf_if(elf_getshdrnum(part->elf, &num_shdrs) < 0);
343
344 part->num_sections = num_shdrs;
345 part->sections = calloc(sizeof(*part->sections), num_shdrs);
346 report_if(!part->sections);
347
348 Elf_Scn *section = NULL;
349 while ((section = elf_nextscn(part->elf, section))) {
350 Elf64_Shdr *shdr = elf64_getshdr(section);
351 struct ac_rtld_section *s = &part->sections[elf_ndxscn(section)];
352 s->name = elf_strptr(part->elf, section_str_index, shdr->sh_name);
353 report_elf_if(!s->name);
354
355 /* Cannot actually handle linked objects yet */
356 report_elf_if(shdr->sh_addr != 0);
357
358 /* Alignment must be 0 or a power of two */
359 report_elf_if(shdr->sh_addralign & (shdr->sh_addralign - 1));
360 uint64_t sh_align = MAX2(shdr->sh_addralign, 1);
361
362 if (shdr->sh_flags & SHF_ALLOC &&
363 shdr->sh_type != SHT_NOTE) {
364 report_if(shdr->sh_flags & SHF_WRITE);
365
366 s->is_rx = true;
367
368 if (shdr->sh_flags & SHF_EXECINSTR) {
369 report_elf_if(shdr->sh_size & 3);
370
371 if (!strcmp(s->name, ".text"))
372 s->is_pasted_text = true;
373 }
374
375 if (s->is_pasted_text) {
376 s->offset = pasted_text_size;
377 pasted_text_size += shdr->sh_size;
378 } else {
379 rx_align = align(rx_align, sh_align);
380 rx_size = align(rx_size, sh_align);
381 s->offset = rx_size;
382 rx_size += shdr->sh_size;
383 }
384 } else if (shdr->sh_type == SHT_SYMTAB) {
385 if (!read_private_lds_symbols(binary, part_idx, section, &lds_end_align))
386 goto fail;
387 }
388 }
389
390 uint64_t part_lds_size = shared_lds_size;
391 if (!layout_symbols(
392 util_dynarray_element(&binary->lds_symbols, struct ac_rtld_symbol, part_lds_symbols_begin),
393 util_dynarray_num_elements(&binary->lds_symbols, struct ac_rtld_symbol) - part_lds_symbols_begin,
394 &part_lds_size))
395 goto fail;
396 binary->lds_size = MAX2(binary->lds_size, part_lds_size);
397 }
398
399 binary->rx_end_markers = pasted_text_size;
400 pasted_text_size += 4 * DEBUGGER_NUM_MARKERS;
401
402 /* __lds_end is a special symbol that points at the end of the memory
403 * occupied by other LDS symbols. Its alignment is taken as the
404 * maximum of its alignment over all shader parts where it occurs.
405 */
406 if (lds_end_align) {
407 binary->lds_size = align(binary->lds_size, lds_end_align);
408
409 struct ac_rtld_symbol *lds_end =
410 util_dynarray_grow(&binary->lds_symbols, struct ac_rtld_symbol, 1);
411 lds_end->name = "__lds_end";
412 lds_end->size = 0;
413 lds_end->align = lds_end_align;
414 lds_end->offset = binary->lds_size;
415 lds_end->part_idx = ~0u;
416 }
417
418 if (binary->lds_size > max_lds_size) {
419 fprintf(stderr, "ac_rtld error(2): too much LDS (used = %u, max = %u)\n",
420 (unsigned)binary->lds_size, max_lds_size);
421 goto fail;
422 }
423
424 /* Second pass: Adjust offsets of non-pasted text sections. */
425 binary->rx_size = pasted_text_size;
426 binary->rx_size = align(binary->rx_size, rx_align);
427
428 for (unsigned part_idx = 0; part_idx < i.num_parts; ++part_idx) {
429 struct ac_rtld_part *part = &binary->parts[part_idx];
430 size_t num_shdrs;
431 elf_getshdrnum(part->elf, &num_shdrs);
432
433 for (unsigned j = 0; j < num_shdrs; ++j) {
434 struct ac_rtld_section *s = &part->sections[j];
435 if (s->is_rx && !s->is_pasted_text)
436 s->offset += binary->rx_size;
437 }
438 }
439
440 binary->rx_size += rx_size;
441
442 if (i.info->chip_class >= GFX10) {
443 /* In gfx10, the SQ fetches up to 3 cache lines of 16 dwords
444 * ahead of the PC, configurable by SH_MEM_CONFIG and
445 * S_INST_PREFETCH. This can cause two issues:
446 *
447 * (1) Crossing a page boundary to an unmapped page. The logic
448 * does not distinguish between a required fetch and a "mere"
449 * prefetch and will fault.
450 *
451 * (2) Prefetching instructions that will be changed for a
452 * different shader.
453 *
454 * (2) is not currently an issue because we flush the I$ at IB
455 * boundaries, but (1) needs to be addressed. Due to buffer
456 * suballocation, we just play it safe.
457 */
458 binary->rx_size = align(binary->rx_size + 3 * 64, 64);
459 }
460
461 return true;
462
463 #undef report_if
464 #undef report_elf_if
465
466 fail:
467 ac_rtld_close(binary);
468 return false;
469 }
470
471 void ac_rtld_close(struct ac_rtld_binary *binary)
472 {
473 for (unsigned i = 0; i < binary->num_parts; ++i) {
474 struct ac_rtld_part *part = &binary->parts[i];
475 free(part->sections);
476 elf_end(part->elf);
477 }
478
479 util_dynarray_fini(&binary->lds_symbols);
480 free(binary->parts);
481 binary->parts = NULL;
482 binary->num_parts = 0;
483 }
484
485 static bool get_section_by_name(struct ac_rtld_part *part, const char *name,
486 const char **data, size_t *nbytes)
487 {
488 for (unsigned i = 0; i < part->num_sections; ++i) {
489 struct ac_rtld_section *s = &part->sections[i];
490 if (s->name && !strcmp(name, s->name)) {
491 Elf_Scn *target_scn = elf_getscn(part->elf, i);
492 Elf_Data *target_data = elf_getdata(target_scn, NULL);
493 if (!target_data) {
494 report_elf_errorf("ac_rtld: get_section_by_name: elf_getdata");
495 return false;
496 }
497
498 *data = target_data->d_buf;
499 *nbytes = target_data->d_size;
500 return true;
501 }
502 }
503 return false;
504 }
505
506 bool ac_rtld_get_section_by_name(struct ac_rtld_binary *binary, const char *name,
507 const char **data, size_t *nbytes)
508 {
509 assert(binary->num_parts == 1);
510 return get_section_by_name(&binary->parts[0], name, data, nbytes);
511 }
512
513 bool ac_rtld_read_config(struct ac_rtld_binary *binary,
514 struct ac_shader_config *config)
515 {
516 for (unsigned i = 0; i < binary->num_parts; ++i) {
517 struct ac_rtld_part *part = &binary->parts[i];
518 const char *config_data;
519 size_t config_nbytes;
520
521 if (!get_section_by_name(part, ".AMDGPU.config",
522 &config_data, &config_nbytes))
523 return false;
524
525 /* TODO: be precise about scratch use? */
526 struct ac_shader_config c = {};
527 ac_parse_shader_binary_config(config_data, config_nbytes,
528 binary->wave_size, true, &c);
529
530 config->num_sgprs = MAX2(config->num_sgprs, c.num_sgprs);
531 config->num_vgprs = MAX2(config->num_vgprs, c.num_vgprs);
532 config->spilled_sgprs = MAX2(config->spilled_sgprs, c.spilled_sgprs);
533 config->spilled_vgprs = MAX2(config->spilled_vgprs, c.spilled_vgprs);
534 config->scratch_bytes_per_wave = MAX2(config->scratch_bytes_per_wave,
535 c.scratch_bytes_per_wave);
536
537 assert(i == 0 || config->float_mode == c.float_mode);
538 config->float_mode = c.float_mode;
539
540 /* SPI_PS_INPUT_ENA/ADDR can't be combined. Only the value from
541 * the main shader part is used. */
542 assert(config->spi_ps_input_ena == 0 &&
543 config->spi_ps_input_addr == 0);
544 config->spi_ps_input_ena = c.spi_ps_input_ena;
545 config->spi_ps_input_addr = c.spi_ps_input_addr;
546
547 /* TODO: consistently use LDS symbols for this */
548 config->lds_size = MAX2(config->lds_size, c.lds_size);
549
550 /* TODO: Should we combine these somehow? It's currently only
551 * used for radeonsi's compute, where multiple parts aren't used. */
552 assert(config->rsrc1 == 0 && config->rsrc2 == 0);
553 config->rsrc1 = c.rsrc1;
554 config->rsrc2 = c.rsrc2;
555 }
556
557 return true;
558 }
559
560 static bool resolve_symbol(const struct ac_rtld_upload_info *u,
561 unsigned part_idx, const Elf64_Sym *sym,
562 const char *name, uint64_t *value)
563 {
564 /* TODO: properly disentangle the undef and the LDS cases once
565 * STT_AMDGPU_LDS is retired. */
566 if (sym->st_shndx == SHN_UNDEF || sym->st_shndx == SHN_AMDGPU_LDS) {
567 const struct ac_rtld_symbol *lds_sym =
568 find_symbol(&u->binary->lds_symbols, name, part_idx);
569
570 if (lds_sym) {
571 *value = lds_sym->offset;
572 return true;
573 }
574
575 /* TODO: resolve from other parts */
576
577 if (u->get_external_symbol(u->cb_data, name, value))
578 return true;
579
580 report_errorf("symbol %s: unknown", name);
581 return false;
582 }
583
584 struct ac_rtld_part *part = &u->binary->parts[part_idx];
585 if (sym->st_shndx >= part->num_sections) {
586 report_errorf("symbol %s: section out of bounds", name);
587 return false;
588 }
589
590 struct ac_rtld_section *s = &part->sections[sym->st_shndx];
591 if (!s->is_rx) {
592 report_errorf("symbol %s: bad section", name);
593 return false;
594 }
595
596 uint64_t section_base = u->rx_va + s->offset;
597
598 *value = section_base + sym->st_value;
599 return true;
600 }
601
602 static bool apply_relocs(const struct ac_rtld_upload_info *u,
603 unsigned part_idx, const Elf64_Shdr *reloc_shdr,
604 const Elf_Data *reloc_data)
605 {
606 #define report_if(cond) \
607 do { \
608 if ((cond)) { \
609 report_errorf(#cond); \
610 return false; \
611 } \
612 } while (false)
613 #define report_elf_if(cond) \
614 do { \
615 if ((cond)) { \
616 report_elf_errorf(#cond); \
617 return false; \
618 } \
619 } while (false)
620
621 struct ac_rtld_part *part = &u->binary->parts[part_idx];
622 Elf_Scn *target_scn = elf_getscn(part->elf, reloc_shdr->sh_info);
623 report_elf_if(!target_scn);
624
625 Elf_Data *target_data = elf_getdata(target_scn, NULL);
626 report_elf_if(!target_data);
627
628 Elf_Scn *symbols_scn = elf_getscn(part->elf, reloc_shdr->sh_link);
629 report_elf_if(!symbols_scn);
630
631 Elf64_Shdr *symbols_shdr = elf64_getshdr(symbols_scn);
632 report_elf_if(!symbols_shdr);
633 uint32_t strtabidx = symbols_shdr->sh_link;
634
635 Elf_Data *symbols_data = elf_getdata(symbols_scn, NULL);
636 report_elf_if(!symbols_data);
637
638 const Elf64_Sym *symbols = symbols_data->d_buf;
639 size_t num_symbols = symbols_data->d_size / sizeof(Elf64_Sym);
640
641 struct ac_rtld_section *s = &part->sections[reloc_shdr->sh_info];
642 report_if(!s->is_rx);
643
644 const char *orig_base = target_data->d_buf;
645 char *dst_base = u->rx_ptr + s->offset;
646 uint64_t va_base = u->rx_va + s->offset;
647
648 Elf64_Rel *rel = reloc_data->d_buf;
649 size_t num_relocs = reloc_data->d_size / sizeof(*rel);
650 for (size_t i = 0; i < num_relocs; ++i, ++rel) {
651 size_t r_sym = ELF64_R_SYM(rel->r_info);
652 unsigned r_type = ELF64_R_TYPE(rel->r_info);
653
654 const char *orig_ptr = orig_base + rel->r_offset;
655 char *dst_ptr = dst_base + rel->r_offset;
656 uint64_t va = va_base + rel->r_offset;
657
658 uint64_t symbol;
659 uint64_t addend;
660
661 if (r_sym == STN_UNDEF) {
662 symbol = 0;
663 } else {
664 report_elf_if(r_sym >= num_symbols);
665
666 const Elf64_Sym *sym = &symbols[r_sym];
667 const char *symbol_name =
668 elf_strptr(part->elf, strtabidx, sym->st_name);
669 report_elf_if(!symbol_name);
670
671 if (!resolve_symbol(u, part_idx, sym, symbol_name, &symbol))
672 return false;
673 }
674
675 /* TODO: Should we also support .rela sections, where the
676 * addend is part of the relocation record? */
677
678 /* Load the addend from the ELF instead of the destination,
679 * because the destination may be in VRAM. */
680 switch (r_type) {
681 case R_AMDGPU_ABS32:
682 case R_AMDGPU_ABS32_LO:
683 case R_AMDGPU_ABS32_HI:
684 case R_AMDGPU_REL32:
685 case R_AMDGPU_REL32_LO:
686 case R_AMDGPU_REL32_HI:
687 addend = *(const uint32_t *)orig_ptr;
688 break;
689 case R_AMDGPU_ABS64:
690 case R_AMDGPU_REL64:
691 addend = *(const uint64_t *)orig_ptr;
692 break;
693 default:
694 report_errorf("unsupported r_type == %u", r_type);
695 return false;
696 }
697
698 uint64_t abs = symbol + addend;
699
700 switch (r_type) {
701 case R_AMDGPU_ABS32:
702 assert((uint32_t)abs == abs);
703 case R_AMDGPU_ABS32_LO:
704 *(uint32_t *)dst_ptr = util_cpu_to_le32(abs);
705 break;
706 case R_AMDGPU_ABS32_HI:
707 *(uint32_t *)dst_ptr = util_cpu_to_le32(abs >> 32);
708 break;
709 case R_AMDGPU_ABS64:
710 *(uint64_t *)dst_ptr = util_cpu_to_le64(abs);
711 break;
712 case R_AMDGPU_REL32:
713 assert((int64_t)(int32_t)(abs - va) == (int64_t)(abs - va));
714 case R_AMDGPU_REL32_LO:
715 *(uint32_t *)dst_ptr = util_cpu_to_le32(abs - va);
716 break;
717 case R_AMDGPU_REL32_HI:
718 *(uint32_t *)dst_ptr = util_cpu_to_le32((abs - va) >> 32);
719 break;
720 case R_AMDGPU_REL64:
721 *(uint64_t *)dst_ptr = util_cpu_to_le64(abs - va);
722 break;
723 default:
724 unreachable("bad r_type");
725 }
726 }
727
728 return true;
729
730 #undef report_if
731 #undef report_elf_if
732 }
733
734 /**
735 * Upload the binary or binaries to the provided GPU buffers, including
736 * relocations.
737 */
738 bool ac_rtld_upload(struct ac_rtld_upload_info *u)
739 {
740 #define report_if(cond) \
741 do { \
742 if ((cond)) { \
743 report_errorf(#cond); \
744 return false; \
745 } \
746 } while (false)
747 #define report_elf_if(cond) \
748 do { \
749 if ((cond)) { \
750 report_errorf(#cond); \
751 return false; \
752 } \
753 } while (false)
754
755 if (u->binary->options.halt_at_entry) {
756 /* s_sethalt 1 */
757 *(uint32_t *)u->rx_ptr = util_cpu_to_le32(0xbf8d0001);
758 }
759
760 /* First pass: upload raw section data and lay out private LDS symbols. */
761 for (unsigned i = 0; i < u->binary->num_parts; ++i) {
762 struct ac_rtld_part *part = &u->binary->parts[i];
763
764 Elf_Scn *section = NULL;
765 while ((section = elf_nextscn(part->elf, section))) {
766 Elf64_Shdr *shdr = elf64_getshdr(section);
767 struct ac_rtld_section *s = &part->sections[elf_ndxscn(section)];
768
769 if (!s->is_rx)
770 continue;
771
772 report_if(shdr->sh_type != SHT_PROGBITS);
773
774 Elf_Data *data = elf_getdata(section, NULL);
775 report_elf_if(!data || data->d_size != shdr->sh_size);
776 memcpy(u->rx_ptr + s->offset, data->d_buf, shdr->sh_size);
777 }
778 }
779
780 if (u->binary->rx_end_markers) {
781 uint32_t *dst = (uint32_t *)(u->rx_ptr + u->binary->rx_end_markers);
782 for (unsigned i = 0; i < DEBUGGER_NUM_MARKERS; ++i)
783 *dst++ = util_cpu_to_le32(DEBUGGER_END_OF_CODE_MARKER);
784 }
785
786 /* Second pass: handle relocations, overwriting uploaded data where
787 * appropriate. */
788 for (unsigned i = 0; i < u->binary->num_parts; ++i) {
789 struct ac_rtld_part *part = &u->binary->parts[i];
790 Elf_Scn *section = NULL;
791 while ((section = elf_nextscn(part->elf, section))) {
792 Elf64_Shdr *shdr = elf64_getshdr(section);
793 if (shdr->sh_type == SHT_REL) {
794 Elf_Data *relocs = elf_getdata(section, NULL);
795 report_elf_if(!relocs || relocs->d_size != shdr->sh_size);
796 if (!apply_relocs(u, i, shdr, relocs))
797 return false;
798 } else if (shdr->sh_type == SHT_RELA) {
799 report_errorf("SHT_RELA not supported");
800 return false;
801 }
802 }
803 }
804
805 return true;
806
807 #undef report_if
808 #undef report_elf_if
809 }