radeonsi: remove the always_nir option
[mesa.git] / src / gallium / drivers / radeonsi / si_compute.c
1 /*
2 * Copyright 2013 Advanced Micro Devices, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 *
24 */
25
26 #include "nir/tgsi_to_nir.h"
27 #include "tgsi/tgsi_parse.h"
28 #include "util/u_async_debug.h"
29 #include "util/u_memory.h"
30 #include "util/u_upload_mgr.h"
31
32 #include "ac_rtld.h"
33 #include "amd_kernel_code_t.h"
34 #include "si_build_pm4.h"
35 #include "si_compute.h"
36
37 #define COMPUTE_DBG(sscreen, fmt, args...) \
38 do { \
39 if ((sscreen->debug_flags & DBG(COMPUTE))) fprintf(stderr, fmt, ##args); \
40 } while (0);
41
42 struct dispatch_packet {
43 uint16_t header;
44 uint16_t setup;
45 uint16_t workgroup_size_x;
46 uint16_t workgroup_size_y;
47 uint16_t workgroup_size_z;
48 uint16_t reserved0;
49 uint32_t grid_size_x;
50 uint32_t grid_size_y;
51 uint32_t grid_size_z;
52 uint32_t private_segment_size;
53 uint32_t group_segment_size;
54 uint64_t kernel_object;
55 uint64_t kernarg_address;
56 uint64_t reserved2;
57 };
58
59 static const amd_kernel_code_t *si_compute_get_code_object(
60 const struct si_compute *program,
61 uint64_t symbol_offset)
62 {
63 const struct si_shader_selector *sel = &program->sel;
64
65 if (program->ir_type != PIPE_SHADER_IR_NATIVE)
66 return NULL;
67
68 struct ac_rtld_binary rtld;
69 if (!ac_rtld_open(&rtld, (struct ac_rtld_open_info){
70 .info = &sel->screen->info,
71 .shader_type = MESA_SHADER_COMPUTE,
72 .wave_size = sel->screen->compute_wave_size,
73 .num_parts = 1,
74 .elf_ptrs = &program->shader.binary.elf_buffer,
75 .elf_sizes = &program->shader.binary.elf_size }))
76 return NULL;
77
78 const amd_kernel_code_t *result = NULL;
79 const char *text;
80 size_t size;
81 if (!ac_rtld_get_section_by_name(&rtld, ".text", &text, &size))
82 goto out;
83
84 if (symbol_offset + sizeof(amd_kernel_code_t) > size)
85 goto out;
86
87 result = (const amd_kernel_code_t*)(text + symbol_offset);
88
89 out:
90 ac_rtld_close(&rtld);
91 return result;
92 }
93
94 static void code_object_to_config(const amd_kernel_code_t *code_object,
95 struct ac_shader_config *out_config) {
96
97 uint32_t rsrc1 = code_object->compute_pgm_resource_registers;
98 uint32_t rsrc2 = code_object->compute_pgm_resource_registers >> 32;
99 out_config->num_sgprs = code_object->wavefront_sgpr_count;
100 out_config->num_vgprs = code_object->workitem_vgpr_count;
101 out_config->float_mode = G_00B028_FLOAT_MODE(rsrc1);
102 out_config->rsrc1 = rsrc1;
103 out_config->lds_size = MAX2(out_config->lds_size, G_00B84C_LDS_SIZE(rsrc2));
104 out_config->rsrc2 = rsrc2;
105 out_config->scratch_bytes_per_wave =
106 align(code_object->workitem_private_segment_byte_size * 64, 1024);
107 }
108
109 /* Asynchronous compute shader compilation. */
110 static void si_create_compute_state_async(void *job, int thread_index)
111 {
112 struct si_compute *program = (struct si_compute *)job;
113 struct si_shader_selector *sel = &program->sel;
114 struct si_shader *shader = &program->shader;
115 struct ac_llvm_compiler *compiler;
116 struct pipe_debug_callback *debug = &sel->compiler_ctx_state.debug;
117 struct si_screen *sscreen = sel->screen;
118
119 assert(!debug->debug_message || debug->async);
120 assert(thread_index >= 0);
121 assert(thread_index < ARRAY_SIZE(sscreen->compiler));
122 compiler = &sscreen->compiler[thread_index];
123
124 if (program->ir_type == PIPE_SHADER_IR_TGSI) {
125 tgsi_scan_shader(sel->tokens, &sel->info);
126 } else {
127 assert(program->ir_type == PIPE_SHADER_IR_NIR);
128
129 si_nir_opts(sel->nir);
130 si_nir_scan_shader(sel->nir, &sel->info);
131 si_lower_nir(sel, sscreen->compute_wave_size);
132 }
133
134 /* Store the declared LDS size into tgsi_shader_info for the shader
135 * cache to include it.
136 */
137 sel->info.properties[TGSI_PROPERTY_CS_LOCAL_SIZE] = program->local_size;
138
139 si_get_active_slot_masks(&sel->info,
140 &sel->active_const_and_shader_buffers,
141 &sel->active_samplers_and_images);
142
143 program->shader.is_monolithic = true;
144 program->reads_variable_block_size =
145 sel->info.uses_block_size &&
146 sel->info.properties[TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH] == 0;
147 program->num_cs_user_data_dwords =
148 sel->info.properties[TGSI_PROPERTY_CS_USER_DATA_COMPONENTS_AMD];
149
150 void *ir_binary = si_get_ir_binary(sel);
151
152 /* Try to load the shader from the shader cache. */
153 mtx_lock(&sscreen->shader_cache_mutex);
154
155 if (ir_binary &&
156 si_shader_cache_load_shader(sscreen, ir_binary, shader)) {
157 mtx_unlock(&sscreen->shader_cache_mutex);
158
159 si_shader_dump_stats_for_shader_db(sscreen, shader, debug);
160 si_shader_dump(sscreen, shader, debug, stderr, true);
161
162 if (!si_shader_binary_upload(sscreen, shader, 0))
163 program->shader.compilation_failed = true;
164 } else {
165 mtx_unlock(&sscreen->shader_cache_mutex);
166
167 if (!si_shader_create(sscreen, compiler, &program->shader, debug)) {
168 program->shader.compilation_failed = true;
169
170 if (program->ir_type == PIPE_SHADER_IR_TGSI)
171 FREE(sel->tokens);
172 return;
173 }
174
175 bool scratch_enabled = shader->config.scratch_bytes_per_wave > 0;
176 unsigned user_sgprs = SI_NUM_RESOURCE_SGPRS +
177 (sel->info.uses_grid_size ? 3 : 0) +
178 (program->reads_variable_block_size ? 3 : 0) +
179 program->num_cs_user_data_dwords;
180
181 shader->config.rsrc1 =
182 S_00B848_VGPRS((shader->config.num_vgprs - 1) /
183 (sscreen->compute_wave_size == 32 ? 8 : 4)) |
184 S_00B848_DX10_CLAMP(1) |
185 S_00B848_MEM_ORDERED(sscreen->info.chip_class >= GFX10) |
186 S_00B848_WGP_MODE(sscreen->info.chip_class >= GFX10) |
187 S_00B848_FLOAT_MODE(shader->config.float_mode);
188
189 if (sscreen->info.chip_class < GFX10) {
190 shader->config.rsrc1 |=
191 S_00B848_SGPRS((shader->config.num_sgprs - 1) / 8);
192 }
193
194 shader->config.rsrc2 =
195 S_00B84C_USER_SGPR(user_sgprs) |
196 S_00B84C_SCRATCH_EN(scratch_enabled) |
197 S_00B84C_TGID_X_EN(sel->info.uses_block_id[0]) |
198 S_00B84C_TGID_Y_EN(sel->info.uses_block_id[1]) |
199 S_00B84C_TGID_Z_EN(sel->info.uses_block_id[2]) |
200 S_00B84C_TIDIG_COMP_CNT(sel->info.uses_thread_id[2] ? 2 :
201 sel->info.uses_thread_id[1] ? 1 : 0) |
202 S_00B84C_LDS_SIZE(shader->config.lds_size);
203
204 if (ir_binary) {
205 mtx_lock(&sscreen->shader_cache_mutex);
206 if (!si_shader_cache_insert_shader(sscreen, ir_binary, shader, true))
207 FREE(ir_binary);
208 mtx_unlock(&sscreen->shader_cache_mutex);
209 }
210 }
211
212 if (program->ir_type == PIPE_SHADER_IR_TGSI)
213 FREE(sel->tokens);
214 }
215
216 static void *si_create_compute_state(
217 struct pipe_context *ctx,
218 const struct pipe_compute_state *cso)
219 {
220 struct si_context *sctx = (struct si_context *)ctx;
221 struct si_screen *sscreen = (struct si_screen *)ctx->screen;
222 struct si_compute *program = CALLOC_STRUCT(si_compute);
223 struct si_shader_selector *sel = &program->sel;
224
225 pipe_reference_init(&sel->reference, 1);
226 sel->type = PIPE_SHADER_COMPUTE;
227 sel->screen = sscreen;
228 program->shader.selector = &program->sel;
229 program->ir_type = cso->ir_type;
230 program->local_size = cso->req_local_mem;
231 program->private_size = cso->req_private_mem;
232 program->input_size = cso->req_input_mem;
233
234 if (cso->ir_type != PIPE_SHADER_IR_NATIVE) {
235 if (sscreen->options.enable_nir &&
236 cso->ir_type == PIPE_SHADER_IR_TGSI) {
237 program->ir_type = PIPE_SHADER_IR_NIR;
238 sel->nir = tgsi_to_nir(cso->prog, ctx->screen);
239 } else if (cso->ir_type == PIPE_SHADER_IR_TGSI) {
240 sel->tokens = tgsi_dup_tokens(cso->prog);
241 if (!sel->tokens) {
242 FREE(program);
243 return NULL;
244 }
245 } else {
246 assert(cso->ir_type == PIPE_SHADER_IR_NIR);
247 sel->nir = (struct nir_shader *) cso->prog;
248 }
249
250 sel->compiler_ctx_state.debug = sctx->debug;
251 sel->compiler_ctx_state.is_debug_context = sctx->is_debug;
252 p_atomic_inc(&sscreen->num_shaders_created);
253
254 si_schedule_initial_compile(sctx, PIPE_SHADER_COMPUTE,
255 &sel->ready,
256 &sel->compiler_ctx_state,
257 program, si_create_compute_state_async);
258 } else {
259 const struct pipe_llvm_program_header *header;
260 const char *code;
261 header = cso->prog;
262 code = cso->prog + sizeof(struct pipe_llvm_program_header);
263
264 program->shader.binary.elf_size = header->num_bytes;
265 program->shader.binary.elf_buffer = malloc(header->num_bytes);
266 if (!program->shader.binary.elf_buffer) {
267 FREE(program);
268 return NULL;
269 }
270 memcpy((void *)program->shader.binary.elf_buffer, code, header->num_bytes);
271
272 const amd_kernel_code_t *code_object =
273 si_compute_get_code_object(program, 0);
274 code_object_to_config(code_object, &program->shader.config);
275
276 si_shader_dump(sctx->screen, &program->shader, &sctx->debug, stderr, true);
277 if (!si_shader_binary_upload(sctx->screen, &program->shader, 0)) {
278 fprintf(stderr, "LLVM failed to upload shader\n");
279 free((void *)program->shader.binary.elf_buffer);
280 FREE(program);
281 return NULL;
282 }
283 }
284
285 return program;
286 }
287
288 static void si_bind_compute_state(struct pipe_context *ctx, void *state)
289 {
290 struct si_context *sctx = (struct si_context*)ctx;
291 struct si_compute *program = (struct si_compute*)state;
292 struct si_shader_selector *sel = &program->sel;
293
294 sctx->cs_shader_state.program = program;
295 if (!program)
296 return;
297
298 /* Wait because we need active slot usage masks. */
299 if (program->ir_type != PIPE_SHADER_IR_NATIVE)
300 util_queue_fence_wait(&sel->ready);
301
302 si_set_active_descriptors(sctx,
303 SI_DESCS_FIRST_COMPUTE +
304 SI_SHADER_DESCS_CONST_AND_SHADER_BUFFERS,
305 sel->active_const_and_shader_buffers);
306 si_set_active_descriptors(sctx,
307 SI_DESCS_FIRST_COMPUTE +
308 SI_SHADER_DESCS_SAMPLERS_AND_IMAGES,
309 sel->active_samplers_and_images);
310 }
311
312 static void si_set_global_binding(
313 struct pipe_context *ctx, unsigned first, unsigned n,
314 struct pipe_resource **resources,
315 uint32_t **handles)
316 {
317 unsigned i;
318 struct si_context *sctx = (struct si_context*)ctx;
319 struct si_compute *program = sctx->cs_shader_state.program;
320
321 assert(first + n <= MAX_GLOBAL_BUFFERS);
322
323 if (!resources) {
324 for (i = 0; i < n; i++) {
325 pipe_resource_reference(&program->global_buffers[first + i], NULL);
326 }
327 return;
328 }
329
330 for (i = 0; i < n; i++) {
331 uint64_t va;
332 uint32_t offset;
333 pipe_resource_reference(&program->global_buffers[first + i], resources[i]);
334 va = si_resource(resources[i])->gpu_address;
335 offset = util_le32_to_cpu(*handles[i]);
336 va += offset;
337 va = util_cpu_to_le64(va);
338 memcpy(handles[i], &va, sizeof(va));
339 }
340 }
341
342 void si_emit_initial_compute_regs(struct si_context *sctx, struct radeon_cmdbuf *cs)
343 {
344 uint64_t bc_va;
345
346 radeon_set_sh_reg_seq(cs, R_00B858_COMPUTE_STATIC_THREAD_MGMT_SE0, 2);
347 /* R_00B858_COMPUTE_STATIC_THREAD_MGMT_SE0 / SE1,
348 * renamed COMPUTE_DESTINATION_EN_SEn on gfx10. */
349 radeon_emit(cs, S_00B858_SH0_CU_EN(0xffff) | S_00B858_SH1_CU_EN(0xffff));
350 radeon_emit(cs, S_00B858_SH0_CU_EN(0xffff) | S_00B858_SH1_CU_EN(0xffff));
351
352 if (sctx->chip_class >= GFX7) {
353 /* Also set R_00B858_COMPUTE_STATIC_THREAD_MGMT_SE2 / SE3 */
354 radeon_set_sh_reg_seq(cs,
355 R_00B864_COMPUTE_STATIC_THREAD_MGMT_SE2, 2);
356 radeon_emit(cs, S_00B858_SH0_CU_EN(0xffff) |
357 S_00B858_SH1_CU_EN(0xffff));
358 radeon_emit(cs, S_00B858_SH0_CU_EN(0xffff) |
359 S_00B858_SH1_CU_EN(0xffff));
360 }
361
362 if (sctx->chip_class >= GFX10)
363 radeon_set_sh_reg(cs, R_00B8A0_COMPUTE_PGM_RSRC3, 0);
364
365 /* This register has been moved to R_00CD20_COMPUTE_MAX_WAVE_ID
366 * and is now per pipe, so it should be handled in the
367 * kernel if we want to use something other than the default value,
368 * which is now 0x22f.
369 */
370 if (sctx->chip_class <= GFX6) {
371 /* XXX: This should be:
372 * (number of compute units) * 4 * (waves per simd) - 1 */
373
374 radeon_set_sh_reg(cs, R_00B82C_COMPUTE_MAX_WAVE_ID,
375 0x190 /* Default value */);
376 }
377
378 /* Set the pointer to border colors. */
379 bc_va = sctx->border_color_buffer->gpu_address;
380
381 if (sctx->chip_class >= GFX7) {
382 radeon_set_uconfig_reg_seq(cs, R_030E00_TA_CS_BC_BASE_ADDR, 2);
383 radeon_emit(cs, bc_va >> 8); /* R_030E00_TA_CS_BC_BASE_ADDR */
384 radeon_emit(cs, S_030E04_ADDRESS(bc_va >> 40)); /* R_030E04_TA_CS_BC_BASE_ADDR_HI */
385 } else {
386 if (sctx->screen->info.si_TA_CS_BC_BASE_ADDR_allowed) {
387 radeon_set_config_reg(cs, R_00950C_TA_CS_BC_BASE_ADDR,
388 bc_va >> 8);
389 }
390 }
391 }
392
393 static bool si_setup_compute_scratch_buffer(struct si_context *sctx,
394 struct si_shader *shader,
395 struct ac_shader_config *config)
396 {
397 uint64_t scratch_bo_size, scratch_needed;
398 scratch_bo_size = 0;
399 scratch_needed = config->scratch_bytes_per_wave * sctx->scratch_waves;
400 if (sctx->compute_scratch_buffer)
401 scratch_bo_size = sctx->compute_scratch_buffer->b.b.width0;
402
403 if (scratch_bo_size < scratch_needed) {
404 si_resource_reference(&sctx->compute_scratch_buffer, NULL);
405
406 sctx->compute_scratch_buffer =
407 si_aligned_buffer_create(&sctx->screen->b,
408 SI_RESOURCE_FLAG_UNMAPPABLE,
409 PIPE_USAGE_DEFAULT,
410 scratch_needed, 256);
411
412 if (!sctx->compute_scratch_buffer)
413 return false;
414 }
415
416 if (sctx->compute_scratch_buffer != shader->scratch_bo && scratch_needed) {
417 uint64_t scratch_va = sctx->compute_scratch_buffer->gpu_address;
418
419 if (!si_shader_binary_upload(sctx->screen, shader, scratch_va))
420 return false;
421
422 si_resource_reference(&shader->scratch_bo,
423 sctx->compute_scratch_buffer);
424 }
425
426 return true;
427 }
428
429 static bool si_switch_compute_shader(struct si_context *sctx,
430 struct si_compute *program,
431 struct si_shader *shader,
432 const amd_kernel_code_t *code_object,
433 unsigned offset)
434 {
435 struct radeon_cmdbuf *cs = sctx->gfx_cs;
436 struct ac_shader_config inline_config = {0};
437 struct ac_shader_config *config;
438 uint64_t shader_va;
439
440 if (sctx->cs_shader_state.emitted_program == program &&
441 sctx->cs_shader_state.offset == offset)
442 return true;
443
444 if (program->ir_type != PIPE_SHADER_IR_NATIVE) {
445 config = &shader->config;
446 } else {
447 unsigned lds_blocks;
448
449 config = &inline_config;
450 code_object_to_config(code_object, config);
451
452 lds_blocks = config->lds_size;
453 /* XXX: We are over allocating LDS. For GFX6, the shader reports
454 * LDS in blocks of 256 bytes, so if there are 4 bytes lds
455 * allocated in the shader and 4 bytes allocated by the state
456 * tracker, then we will set LDS_SIZE to 512 bytes rather than 256.
457 */
458 if (sctx->chip_class <= GFX6) {
459 lds_blocks += align(program->local_size, 256) >> 8;
460 } else {
461 lds_blocks += align(program->local_size, 512) >> 9;
462 }
463
464 /* TODO: use si_multiwave_lds_size_workaround */
465 assert(lds_blocks <= 0xFF);
466
467 config->rsrc2 &= C_00B84C_LDS_SIZE;
468 config->rsrc2 |= S_00B84C_LDS_SIZE(lds_blocks);
469 }
470
471 if (!si_setup_compute_scratch_buffer(sctx, shader, config))
472 return false;
473
474 if (shader->scratch_bo) {
475 COMPUTE_DBG(sctx->screen, "Waves: %u; Scratch per wave: %u bytes; "
476 "Total Scratch: %u bytes\n", sctx->scratch_waves,
477 config->scratch_bytes_per_wave,
478 config->scratch_bytes_per_wave *
479 sctx->scratch_waves);
480
481 radeon_add_to_buffer_list(sctx, sctx->gfx_cs,
482 shader->scratch_bo, RADEON_USAGE_READWRITE,
483 RADEON_PRIO_SCRATCH_BUFFER);
484 }
485
486 /* Prefetch the compute shader to TC L2.
487 *
488 * We should also prefetch graphics shaders if a compute dispatch was
489 * the last command, and the compute shader if a draw call was the last
490 * command. However, that would add more complexity and we're likely
491 * to get a shader state change in that case anyway.
492 */
493 if (sctx->chip_class >= GFX7) {
494 cik_prefetch_TC_L2_async(sctx, &program->shader.bo->b.b,
495 0, program->shader.bo->b.b.width0);
496 }
497
498 shader_va = shader->bo->gpu_address + offset;
499 if (program->ir_type == PIPE_SHADER_IR_NATIVE) {
500 /* Shader code is placed after the amd_kernel_code_t
501 * struct. */
502 shader_va += sizeof(amd_kernel_code_t);
503 }
504
505 radeon_add_to_buffer_list(sctx, sctx->gfx_cs, shader->bo,
506 RADEON_USAGE_READ, RADEON_PRIO_SHADER_BINARY);
507
508 radeon_set_sh_reg_seq(cs, R_00B830_COMPUTE_PGM_LO, 2);
509 radeon_emit(cs, shader_va >> 8);
510 radeon_emit(cs, S_00B834_DATA(shader_va >> 40));
511
512 radeon_set_sh_reg_seq(cs, R_00B848_COMPUTE_PGM_RSRC1, 2);
513 radeon_emit(cs, config->rsrc1);
514 radeon_emit(cs, config->rsrc2);
515
516 COMPUTE_DBG(sctx->screen, "COMPUTE_PGM_RSRC1: 0x%08x "
517 "COMPUTE_PGM_RSRC2: 0x%08x\n", config->rsrc1, config->rsrc2);
518
519 radeon_set_sh_reg(cs, R_00B860_COMPUTE_TMPRING_SIZE,
520 S_00B860_WAVES(sctx->scratch_waves)
521 | S_00B860_WAVESIZE(config->scratch_bytes_per_wave >> 10));
522
523 sctx->cs_shader_state.emitted_program = program;
524 sctx->cs_shader_state.offset = offset;
525 sctx->cs_shader_state.uses_scratch =
526 config->scratch_bytes_per_wave != 0;
527
528 return true;
529 }
530
531 static void setup_scratch_rsrc_user_sgprs(struct si_context *sctx,
532 const amd_kernel_code_t *code_object,
533 unsigned user_sgpr)
534 {
535 struct radeon_cmdbuf *cs = sctx->gfx_cs;
536 uint64_t scratch_va = sctx->compute_scratch_buffer->gpu_address;
537
538 unsigned max_private_element_size = AMD_HSA_BITS_GET(
539 code_object->code_properties,
540 AMD_CODE_PROPERTY_PRIVATE_ELEMENT_SIZE);
541
542 uint32_t scratch_dword0 = scratch_va & 0xffffffff;
543 uint32_t scratch_dword1 =
544 S_008F04_BASE_ADDRESS_HI(scratch_va >> 32) |
545 S_008F04_SWIZZLE_ENABLE(1);
546
547 /* Disable address clamping */
548 uint32_t scratch_dword2 = 0xffffffff;
549 uint32_t scratch_dword3 =
550 S_008F0C_INDEX_STRIDE(3) |
551 S_008F0C_ADD_TID_ENABLE(1);
552
553 if (sctx->chip_class >= GFX9) {
554 assert(max_private_element_size == 1); /* always 4 bytes on GFX9 */
555 } else {
556 scratch_dword3 |= S_008F0C_ELEMENT_SIZE(max_private_element_size);
557
558 if (sctx->chip_class < GFX8) {
559 /* BUF_DATA_FORMAT is ignored, but it cannot be
560 * BUF_DATA_FORMAT_INVALID. */
561 scratch_dword3 |=
562 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_8);
563 }
564 }
565
566 radeon_set_sh_reg_seq(cs, R_00B900_COMPUTE_USER_DATA_0 +
567 (user_sgpr * 4), 4);
568 radeon_emit(cs, scratch_dword0);
569 radeon_emit(cs, scratch_dword1);
570 radeon_emit(cs, scratch_dword2);
571 radeon_emit(cs, scratch_dword3);
572 }
573
574 static void si_setup_user_sgprs_co_v2(struct si_context *sctx,
575 const amd_kernel_code_t *code_object,
576 const struct pipe_grid_info *info,
577 uint64_t kernel_args_va)
578 {
579 struct si_compute *program = sctx->cs_shader_state.program;
580 struct radeon_cmdbuf *cs = sctx->gfx_cs;
581
582 static const enum amd_code_property_mask_t workgroup_count_masks [] = {
583 AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_X,
584 AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_Y,
585 AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_Z
586 };
587
588 unsigned i, user_sgpr = 0;
589 if (AMD_HSA_BITS_GET(code_object->code_properties,
590 AMD_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_BUFFER)) {
591 if (code_object->workitem_private_segment_byte_size > 0) {
592 setup_scratch_rsrc_user_sgprs(sctx, code_object,
593 user_sgpr);
594 }
595 user_sgpr += 4;
596 }
597
598 if (AMD_HSA_BITS_GET(code_object->code_properties,
599 AMD_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_PTR)) {
600 struct dispatch_packet dispatch;
601 unsigned dispatch_offset;
602 struct si_resource *dispatch_buf = NULL;
603 uint64_t dispatch_va;
604
605 /* Upload dispatch ptr */
606 memset(&dispatch, 0, sizeof(dispatch));
607
608 dispatch.workgroup_size_x = util_cpu_to_le16(info->block[0]);
609 dispatch.workgroup_size_y = util_cpu_to_le16(info->block[1]);
610 dispatch.workgroup_size_z = util_cpu_to_le16(info->block[2]);
611
612 dispatch.grid_size_x = util_cpu_to_le32(info->grid[0] * info->block[0]);
613 dispatch.grid_size_y = util_cpu_to_le32(info->grid[1] * info->block[1]);
614 dispatch.grid_size_z = util_cpu_to_le32(info->grid[2] * info->block[2]);
615
616 dispatch.private_segment_size = util_cpu_to_le32(program->private_size);
617 dispatch.group_segment_size = util_cpu_to_le32(program->local_size);
618
619 dispatch.kernarg_address = util_cpu_to_le64(kernel_args_va);
620
621 u_upload_data(sctx->b.const_uploader, 0, sizeof(dispatch),
622 256, &dispatch, &dispatch_offset,
623 (struct pipe_resource**)&dispatch_buf);
624
625 if (!dispatch_buf) {
626 fprintf(stderr, "Error: Failed to allocate dispatch "
627 "packet.");
628 }
629 radeon_add_to_buffer_list(sctx, sctx->gfx_cs, dispatch_buf,
630 RADEON_USAGE_READ, RADEON_PRIO_CONST_BUFFER);
631
632 dispatch_va = dispatch_buf->gpu_address + dispatch_offset;
633
634 radeon_set_sh_reg_seq(cs, R_00B900_COMPUTE_USER_DATA_0 +
635 (user_sgpr * 4), 2);
636 radeon_emit(cs, dispatch_va);
637 radeon_emit(cs, S_008F04_BASE_ADDRESS_HI(dispatch_va >> 32) |
638 S_008F04_STRIDE(0));
639
640 si_resource_reference(&dispatch_buf, NULL);
641 user_sgpr += 2;
642 }
643
644 if (AMD_HSA_BITS_GET(code_object->code_properties,
645 AMD_CODE_PROPERTY_ENABLE_SGPR_KERNARG_SEGMENT_PTR)) {
646 radeon_set_sh_reg_seq(cs, R_00B900_COMPUTE_USER_DATA_0 +
647 (user_sgpr * 4), 2);
648 radeon_emit(cs, kernel_args_va);
649 radeon_emit(cs, S_008F04_BASE_ADDRESS_HI (kernel_args_va >> 32) |
650 S_008F04_STRIDE(0));
651 user_sgpr += 2;
652 }
653
654 for (i = 0; i < 3 && user_sgpr < 16; i++) {
655 if (code_object->code_properties & workgroup_count_masks[i]) {
656 radeon_set_sh_reg_seq(cs,
657 R_00B900_COMPUTE_USER_DATA_0 +
658 (user_sgpr * 4), 1);
659 radeon_emit(cs, info->grid[i]);
660 user_sgpr += 1;
661 }
662 }
663 }
664
665 static bool si_upload_compute_input(struct si_context *sctx,
666 const amd_kernel_code_t *code_object,
667 const struct pipe_grid_info *info)
668 {
669 struct si_compute *program = sctx->cs_shader_state.program;
670 struct si_resource *input_buffer = NULL;
671 uint32_t kernel_args_offset = 0;
672 uint32_t *kernel_args;
673 void *kernel_args_ptr;
674 uint64_t kernel_args_va;
675
676 u_upload_alloc(sctx->b.const_uploader, 0, program->input_size,
677 sctx->screen->info.tcc_cache_line_size,
678 &kernel_args_offset,
679 (struct pipe_resource**)&input_buffer, &kernel_args_ptr);
680
681 if (unlikely(!kernel_args_ptr))
682 return false;
683
684 kernel_args = (uint32_t*)kernel_args_ptr;
685 kernel_args_va = input_buffer->gpu_address + kernel_args_offset;
686
687 memcpy(kernel_args, info->input, program->input_size);
688
689 for (unsigned i = 0; i < program->input_size / 4; i++) {
690 COMPUTE_DBG(sctx->screen, "input %u : %u\n", i,
691 kernel_args[i]);
692 }
693
694 radeon_add_to_buffer_list(sctx, sctx->gfx_cs, input_buffer,
695 RADEON_USAGE_READ, RADEON_PRIO_CONST_BUFFER);
696
697 si_setup_user_sgprs_co_v2(sctx, code_object, info, kernel_args_va);
698 si_resource_reference(&input_buffer, NULL);
699 return true;
700 }
701
702 static void si_setup_tgsi_user_data(struct si_context *sctx,
703 const struct pipe_grid_info *info)
704 {
705 struct si_compute *program = sctx->cs_shader_state.program;
706 struct si_shader_selector *sel = &program->sel;
707 struct radeon_cmdbuf *cs = sctx->gfx_cs;
708 unsigned grid_size_reg = R_00B900_COMPUTE_USER_DATA_0 +
709 4 * SI_NUM_RESOURCE_SGPRS;
710 unsigned block_size_reg = grid_size_reg +
711 /* 12 bytes = 3 dwords. */
712 12 * sel->info.uses_grid_size;
713 unsigned cs_user_data_reg = block_size_reg +
714 12 * program->reads_variable_block_size;
715
716 if (info->indirect) {
717 if (sel->info.uses_grid_size) {
718 for (unsigned i = 0; i < 3; ++i) {
719 si_cp_copy_data(sctx, sctx->gfx_cs,
720 COPY_DATA_REG, NULL, (grid_size_reg >> 2) + i,
721 COPY_DATA_SRC_MEM, si_resource(info->indirect),
722 info->indirect_offset + 4 * i);
723 }
724 }
725 } else {
726 if (sel->info.uses_grid_size) {
727 radeon_set_sh_reg_seq(cs, grid_size_reg, 3);
728 radeon_emit(cs, info->grid[0]);
729 radeon_emit(cs, info->grid[1]);
730 radeon_emit(cs, info->grid[2]);
731 }
732 if (program->reads_variable_block_size) {
733 radeon_set_sh_reg_seq(cs, block_size_reg, 3);
734 radeon_emit(cs, info->block[0]);
735 radeon_emit(cs, info->block[1]);
736 radeon_emit(cs, info->block[2]);
737 }
738 }
739
740 if (program->num_cs_user_data_dwords) {
741 radeon_set_sh_reg_seq(cs, cs_user_data_reg, program->num_cs_user_data_dwords);
742 radeon_emit_array(cs, sctx->cs_user_data, program->num_cs_user_data_dwords);
743 }
744 }
745
746 static void si_emit_dispatch_packets(struct si_context *sctx,
747 const struct pipe_grid_info *info)
748 {
749 struct si_screen *sscreen = sctx->screen;
750 struct radeon_cmdbuf *cs = sctx->gfx_cs;
751 bool render_cond_bit = sctx->render_cond && !sctx->render_cond_force_off;
752 unsigned threads_per_threadgroup =
753 info->block[0] * info->block[1] * info->block[2];
754 unsigned waves_per_threadgroup =
755 DIV_ROUND_UP(threads_per_threadgroup, sscreen->compute_wave_size);
756 unsigned threadgroups_per_cu = 1;
757
758 if (sctx->chip_class >= GFX10 && waves_per_threadgroup == 1)
759 threadgroups_per_cu = 2;
760
761 radeon_set_sh_reg(cs, R_00B854_COMPUTE_RESOURCE_LIMITS,
762 ac_get_compute_resource_limits(&sscreen->info,
763 waves_per_threadgroup,
764 sctx->cs_max_waves_per_sh,
765 threadgroups_per_cu));
766
767 unsigned dispatch_initiator =
768 S_00B800_COMPUTE_SHADER_EN(1) |
769 S_00B800_FORCE_START_AT_000(1) |
770 /* If the KMD allows it (there is a KMD hw register for it),
771 * allow launching waves out-of-order. (same as Vulkan) */
772 S_00B800_ORDER_MODE(sctx->chip_class >= GFX7) |
773 S_00B800_CS_W32_EN(sscreen->compute_wave_size == 32);
774
775 const uint *last_block = info->last_block;
776 bool partial_block_en = last_block[0] || last_block[1] || last_block[2];
777
778 radeon_set_sh_reg_seq(cs, R_00B81C_COMPUTE_NUM_THREAD_X, 3);
779
780 if (partial_block_en) {
781 unsigned partial[3];
782
783 /* If no partial_block, these should be an entire block size, not 0. */
784 partial[0] = last_block[0] ? last_block[0] : info->block[0];
785 partial[1] = last_block[1] ? last_block[1] : info->block[1];
786 partial[2] = last_block[2] ? last_block[2] : info->block[2];
787
788 radeon_emit(cs, S_00B81C_NUM_THREAD_FULL(info->block[0]) |
789 S_00B81C_NUM_THREAD_PARTIAL(partial[0]));
790 radeon_emit(cs, S_00B820_NUM_THREAD_FULL(info->block[1]) |
791 S_00B820_NUM_THREAD_PARTIAL(partial[1]));
792 radeon_emit(cs, S_00B824_NUM_THREAD_FULL(info->block[2]) |
793 S_00B824_NUM_THREAD_PARTIAL(partial[2]));
794
795 dispatch_initiator |= S_00B800_PARTIAL_TG_EN(1);
796 } else {
797 radeon_emit(cs, S_00B81C_NUM_THREAD_FULL(info->block[0]));
798 radeon_emit(cs, S_00B820_NUM_THREAD_FULL(info->block[1]));
799 radeon_emit(cs, S_00B824_NUM_THREAD_FULL(info->block[2]));
800 }
801
802 if (info->indirect) {
803 uint64_t base_va = si_resource(info->indirect)->gpu_address;
804
805 radeon_add_to_buffer_list(sctx, sctx->gfx_cs,
806 si_resource(info->indirect),
807 RADEON_USAGE_READ, RADEON_PRIO_DRAW_INDIRECT);
808
809 radeon_emit(cs, PKT3(PKT3_SET_BASE, 2, 0) |
810 PKT3_SHADER_TYPE_S(1));
811 radeon_emit(cs, 1);
812 radeon_emit(cs, base_va);
813 radeon_emit(cs, base_va >> 32);
814
815 radeon_emit(cs, PKT3(PKT3_DISPATCH_INDIRECT, 1, render_cond_bit) |
816 PKT3_SHADER_TYPE_S(1));
817 radeon_emit(cs, info->indirect_offset);
818 radeon_emit(cs, dispatch_initiator);
819 } else {
820 radeon_emit(cs, PKT3(PKT3_DISPATCH_DIRECT, 3, render_cond_bit) |
821 PKT3_SHADER_TYPE_S(1));
822 radeon_emit(cs, info->grid[0]);
823 radeon_emit(cs, info->grid[1]);
824 radeon_emit(cs, info->grid[2]);
825 radeon_emit(cs, dispatch_initiator);
826 }
827 }
828
829
830 static void si_launch_grid(
831 struct pipe_context *ctx, const struct pipe_grid_info *info)
832 {
833 struct si_context *sctx = (struct si_context*)ctx;
834 struct si_compute *program = sctx->cs_shader_state.program;
835 const amd_kernel_code_t *code_object =
836 si_compute_get_code_object(program, info->pc);
837 int i;
838 /* HW bug workaround when CS threadgroups > 256 threads and async
839 * compute isn't used, i.e. only one compute job can run at a time.
840 * If async compute is possible, the threadgroup size must be limited
841 * to 256 threads on all queues to avoid the bug.
842 * Only GFX6 and certain GFX7 chips are affected.
843 */
844 bool cs_regalloc_hang =
845 (sctx->chip_class == GFX6 ||
846 sctx->family == CHIP_BONAIRE ||
847 sctx->family == CHIP_KABINI) &&
848 info->block[0] * info->block[1] * info->block[2] > 256;
849
850 if (cs_regalloc_hang)
851 sctx->flags |= SI_CONTEXT_PS_PARTIAL_FLUSH |
852 SI_CONTEXT_CS_PARTIAL_FLUSH;
853
854 if (program->ir_type != PIPE_SHADER_IR_NATIVE &&
855 program->shader.compilation_failed)
856 return;
857
858 if (sctx->has_graphics) {
859 if (sctx->last_num_draw_calls != sctx->num_draw_calls) {
860 si_update_fb_dirtiness_after_rendering(sctx);
861 sctx->last_num_draw_calls = sctx->num_draw_calls;
862 }
863
864 si_decompress_textures(sctx, 1 << PIPE_SHADER_COMPUTE);
865 }
866
867 /* Add buffer sizes for memory checking in need_cs_space. */
868 si_context_add_resource_size(sctx, &program->shader.bo->b.b);
869 /* TODO: add the scratch buffer */
870
871 if (info->indirect) {
872 si_context_add_resource_size(sctx, info->indirect);
873
874 /* Indirect buffers use TC L2 on GFX9, but not older hw. */
875 if (sctx->chip_class <= GFX8 &&
876 si_resource(info->indirect)->TC_L2_dirty) {
877 sctx->flags |= SI_CONTEXT_WB_L2;
878 si_resource(info->indirect)->TC_L2_dirty = false;
879 }
880 }
881
882 si_need_gfx_cs_space(sctx);
883
884 if (sctx->bo_list_add_all_compute_resources)
885 si_compute_resources_add_all_to_bo_list(sctx);
886
887 if (!sctx->cs_shader_state.initialized) {
888 si_emit_initial_compute_regs(sctx, sctx->gfx_cs);
889
890 sctx->cs_shader_state.emitted_program = NULL;
891 sctx->cs_shader_state.initialized = true;
892 }
893
894 if (sctx->flags)
895 sctx->emit_cache_flush(sctx);
896
897 if (!si_switch_compute_shader(sctx, program, &program->shader,
898 code_object, info->pc))
899 return;
900
901 si_upload_compute_shader_descriptors(sctx);
902 si_emit_compute_shader_pointers(sctx);
903
904 if (sctx->has_graphics &&
905 si_is_atom_dirty(sctx, &sctx->atoms.s.render_cond)) {
906 sctx->atoms.s.render_cond.emit(sctx);
907 si_set_atom_dirty(sctx, &sctx->atoms.s.render_cond, false);
908 }
909
910 if (program->ir_type == PIPE_SHADER_IR_NATIVE &&
911 unlikely(!si_upload_compute_input(sctx, code_object, info)))
912 return;
913
914 /* Global buffers */
915 for (i = 0; i < MAX_GLOBAL_BUFFERS; i++) {
916 struct si_resource *buffer =
917 si_resource(program->global_buffers[i]);
918 if (!buffer) {
919 continue;
920 }
921 radeon_add_to_buffer_list(sctx, sctx->gfx_cs, buffer,
922 RADEON_USAGE_READWRITE,
923 RADEON_PRIO_COMPUTE_GLOBAL);
924 }
925
926 if (program->ir_type != PIPE_SHADER_IR_NATIVE)
927 si_setup_tgsi_user_data(sctx, info);
928
929 si_emit_dispatch_packets(sctx, info);
930
931 if (unlikely(sctx->current_saved_cs)) {
932 si_trace_emit(sctx);
933 si_log_compute_state(sctx, sctx->log);
934 }
935
936 sctx->compute_is_busy = true;
937 sctx->num_compute_calls++;
938 if (sctx->cs_shader_state.uses_scratch)
939 sctx->num_spill_compute_calls++;
940
941 if (cs_regalloc_hang)
942 sctx->flags |= SI_CONTEXT_CS_PARTIAL_FLUSH;
943 }
944
945 void si_destroy_compute(struct si_compute *program)
946 {
947 struct si_shader_selector *sel = &program->sel;
948
949 if (program->ir_type != PIPE_SHADER_IR_NATIVE) {
950 util_queue_drop_job(&sel->screen->shader_compiler_queue,
951 &sel->ready);
952 util_queue_fence_destroy(&sel->ready);
953 }
954
955 si_shader_destroy(&program->shader);
956 ralloc_free(program->sel.nir);
957 FREE(program);
958 }
959
960 static void si_delete_compute_state(struct pipe_context *ctx, void* state){
961 struct si_compute *program = (struct si_compute *)state;
962 struct si_context *sctx = (struct si_context*)ctx;
963
964 if (!state)
965 return;
966
967 if (program == sctx->cs_shader_state.program)
968 sctx->cs_shader_state.program = NULL;
969
970 if (program == sctx->cs_shader_state.emitted_program)
971 sctx->cs_shader_state.emitted_program = NULL;
972
973 si_compute_reference(&program, NULL);
974 }
975
976 static void si_set_compute_resources(struct pipe_context * ctx_,
977 unsigned start, unsigned count,
978 struct pipe_surface ** surfaces) { }
979
980 void si_init_compute_functions(struct si_context *sctx)
981 {
982 sctx->b.create_compute_state = si_create_compute_state;
983 sctx->b.delete_compute_state = si_delete_compute_state;
984 sctx->b.bind_compute_state = si_bind_compute_state;
985 sctx->b.set_compute_resources = si_set_compute_resources;
986 sctx->b.set_global_binding = si_set_global_binding;
987 sctx->b.launch_grid = si_launch_grid;
988 }