8dc7360066199ca587de3e0c16b8c8646d7f8d02
[mesa.git] / src / gallium / drivers / radeonsi / si_compute.c
1 /*
2 * Copyright 2013 Advanced Micro Devices, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 *
24 */
25
26 #include "si_compute.h"
27
28 #include "ac_rtld.h"
29 #include "amd_kernel_code_t.h"
30 #include "nir/tgsi_to_nir.h"
31 #include "si_build_pm4.h"
32 #include "util/u_async_debug.h"
33 #include "util/u_memory.h"
34 #include "util/u_upload_mgr.h"
35
36 #define COMPUTE_DBG(sscreen, fmt, args...) \
37 do { \
38 if ((sscreen->debug_flags & DBG(COMPUTE))) \
39 fprintf(stderr, fmt, ##args); \
40 } while (0);
41
42 struct dispatch_packet {
43 uint16_t header;
44 uint16_t setup;
45 uint16_t workgroup_size_x;
46 uint16_t workgroup_size_y;
47 uint16_t workgroup_size_z;
48 uint16_t reserved0;
49 uint32_t grid_size_x;
50 uint32_t grid_size_y;
51 uint32_t grid_size_z;
52 uint32_t private_segment_size;
53 uint32_t group_segment_size;
54 uint64_t kernel_object;
55 uint64_t kernarg_address;
56 uint64_t reserved2;
57 };
58
59 static const amd_kernel_code_t *si_compute_get_code_object(const struct si_compute *program,
60 uint64_t symbol_offset)
61 {
62 const struct si_shader_selector *sel = &program->sel;
63
64 if (program->ir_type != PIPE_SHADER_IR_NATIVE)
65 return NULL;
66
67 struct ac_rtld_binary rtld;
68 if (!ac_rtld_open(&rtld,
69 (struct ac_rtld_open_info){.info = &sel->screen->info,
70 .shader_type = MESA_SHADER_COMPUTE,
71 .wave_size = sel->screen->compute_wave_size,
72 .num_parts = 1,
73 .elf_ptrs = &program->shader.binary.elf_buffer,
74 .elf_sizes = &program->shader.binary.elf_size}))
75 return NULL;
76
77 const amd_kernel_code_t *result = NULL;
78 const char *text;
79 size_t size;
80 if (!ac_rtld_get_section_by_name(&rtld, ".text", &text, &size))
81 goto out;
82
83 if (symbol_offset + sizeof(amd_kernel_code_t) > size)
84 goto out;
85
86 result = (const amd_kernel_code_t *)(text + symbol_offset);
87
88 out:
89 ac_rtld_close(&rtld);
90 return result;
91 }
92
93 static void code_object_to_config(const amd_kernel_code_t *code_object,
94 struct ac_shader_config *out_config)
95 {
96
97 uint32_t rsrc1 = code_object->compute_pgm_resource_registers;
98 uint32_t rsrc2 = code_object->compute_pgm_resource_registers >> 32;
99 out_config->num_sgprs = code_object->wavefront_sgpr_count;
100 out_config->num_vgprs = code_object->workitem_vgpr_count;
101 out_config->float_mode = G_00B028_FLOAT_MODE(rsrc1);
102 out_config->rsrc1 = rsrc1;
103 out_config->lds_size = MAX2(out_config->lds_size, G_00B84C_LDS_SIZE(rsrc2));
104 out_config->rsrc2 = rsrc2;
105 out_config->scratch_bytes_per_wave =
106 align(code_object->workitem_private_segment_byte_size * 64, 1024);
107 }
108
109 /* Asynchronous compute shader compilation. */
110 static void si_create_compute_state_async(void *job, int thread_index)
111 {
112 struct si_compute *program = (struct si_compute *)job;
113 struct si_shader_selector *sel = &program->sel;
114 struct si_shader *shader = &program->shader;
115 struct ac_llvm_compiler *compiler;
116 struct pipe_debug_callback *debug = &sel->compiler_ctx_state.debug;
117 struct si_screen *sscreen = sel->screen;
118
119 assert(!debug->debug_message || debug->async);
120 assert(thread_index >= 0);
121 assert(thread_index < ARRAY_SIZE(sscreen->compiler));
122 compiler = &sscreen->compiler[thread_index];
123
124 if (!compiler->passes)
125 si_init_compiler(sscreen, compiler);
126
127 assert(program->ir_type == PIPE_SHADER_IR_NIR);
128 si_nir_scan_shader(sel->nir, &sel->info);
129
130 sel->info.base.cs.shared_size = program->local_size;
131 si_get_active_slot_masks(&sel->info, &sel->active_const_and_shader_buffers,
132 &sel->active_samplers_and_images);
133
134 program->shader.is_monolithic = true;
135 program->reads_variable_block_size =
136 sel->info.uses_block_size && sel->info.base.cs.local_size[0] == 0;
137 program->num_cs_user_data_dwords =
138 sel->info.base.cs.user_data_components_amd;
139
140 unsigned user_sgprs = SI_NUM_RESOURCE_SGPRS + (sel->info.uses_grid_size ? 3 : 0) +
141 (program->reads_variable_block_size ? 3 : 0) +
142 program->num_cs_user_data_dwords;
143
144 /* Fast path for compute shaders - some descriptors passed via user SGPRs. */
145 /* Shader buffers in user SGPRs. */
146 for (unsigned i = 0; i < MIN2(3, sel->info.base.num_ssbos) && user_sgprs <= 12; i++) {
147 user_sgprs = align(user_sgprs, 4);
148 if (i == 0)
149 sel->cs_shaderbufs_sgpr_index = user_sgprs;
150 user_sgprs += 4;
151 sel->cs_num_shaderbufs_in_user_sgprs++;
152 }
153
154 /* Images in user SGPRs. */
155 unsigned non_msaa_images = sel->info.images_declared & ~sel->info.base.msaa_images;
156
157 for (unsigned i = 0; i < 3 && non_msaa_images & (1 << i); i++) {
158 unsigned num_sgprs = sel->info.base.image_buffers & (1 << i) ? 4 : 8;
159
160 if (align(user_sgprs, num_sgprs) + num_sgprs > 16)
161 break;
162
163 user_sgprs = align(user_sgprs, num_sgprs);
164 if (i == 0)
165 sel->cs_images_sgpr_index = user_sgprs;
166 user_sgprs += num_sgprs;
167 sel->cs_num_images_in_user_sgprs++;
168 }
169 sel->cs_images_num_sgprs = user_sgprs - sel->cs_images_sgpr_index;
170 assert(user_sgprs <= 16);
171
172 unsigned char ir_sha1_cache_key[20];
173 si_get_ir_cache_key(sel, false, false, ir_sha1_cache_key);
174
175 /* Try to load the shader from the shader cache. */
176 simple_mtx_lock(&sscreen->shader_cache_mutex);
177
178 if (si_shader_cache_load_shader(sscreen, ir_sha1_cache_key, shader)) {
179 simple_mtx_unlock(&sscreen->shader_cache_mutex);
180
181 si_shader_dump_stats_for_shader_db(sscreen, shader, debug);
182 si_shader_dump(sscreen, shader, debug, stderr, true);
183
184 if (!si_shader_binary_upload(sscreen, shader, 0))
185 program->shader.compilation_failed = true;
186 } else {
187 simple_mtx_unlock(&sscreen->shader_cache_mutex);
188
189 if (!si_create_shader_variant(sscreen, compiler, &program->shader, debug)) {
190 program->shader.compilation_failed = true;
191 return;
192 }
193
194 bool scratch_enabled = shader->config.scratch_bytes_per_wave > 0;
195
196 shader->config.rsrc1 = S_00B848_VGPRS((shader->config.num_vgprs - 1) /
197 (sscreen->compute_wave_size == 32 ? 8 : 4)) |
198 S_00B848_DX10_CLAMP(1) |
199 S_00B848_MEM_ORDERED(sscreen->info.chip_class >= GFX10) |
200 S_00B848_WGP_MODE(sscreen->info.chip_class >= GFX10) |
201 S_00B848_FLOAT_MODE(shader->config.float_mode);
202
203 if (sscreen->info.chip_class < GFX10) {
204 shader->config.rsrc1 |= S_00B848_SGPRS((shader->config.num_sgprs - 1) / 8);
205 }
206
207 shader->config.rsrc2 = S_00B84C_USER_SGPR(user_sgprs) | S_00B84C_SCRATCH_EN(scratch_enabled) |
208 S_00B84C_TGID_X_EN(sel->info.uses_block_id[0]) |
209 S_00B84C_TGID_Y_EN(sel->info.uses_block_id[1]) |
210 S_00B84C_TGID_Z_EN(sel->info.uses_block_id[2]) |
211 S_00B84C_TG_SIZE_EN(sel->info.uses_subgroup_info) |
212 S_00B84C_TIDIG_COMP_CNT(sel->info.uses_thread_id[2]
213 ? 2
214 : sel->info.uses_thread_id[1] ? 1 : 0) |
215 S_00B84C_LDS_SIZE(shader->config.lds_size);
216
217 simple_mtx_lock(&sscreen->shader_cache_mutex);
218 si_shader_cache_insert_shader(sscreen, ir_sha1_cache_key, shader, true);
219 simple_mtx_unlock(&sscreen->shader_cache_mutex);
220 }
221
222 ralloc_free(sel->nir);
223 sel->nir = NULL;
224 }
225
226 static void *si_create_compute_state(struct pipe_context *ctx, const struct pipe_compute_state *cso)
227 {
228 struct si_context *sctx = (struct si_context *)ctx;
229 struct si_screen *sscreen = (struct si_screen *)ctx->screen;
230 struct si_compute *program = CALLOC_STRUCT(si_compute);
231 struct si_shader_selector *sel = &program->sel;
232
233 pipe_reference_init(&sel->base.reference, 1);
234 sel->info.stage = MESA_SHADER_COMPUTE;
235 sel->screen = sscreen;
236 sel->const_and_shader_buf_descriptors_index =
237 si_const_and_shader_buffer_descriptors_idx(PIPE_SHADER_COMPUTE);
238 sel->sampler_and_images_descriptors_index =
239 si_sampler_and_image_descriptors_idx(PIPE_SHADER_COMPUTE);
240 program->shader.selector = &program->sel;
241 program->ir_type = cso->ir_type;
242 program->local_size = cso->req_local_mem;
243 program->private_size = cso->req_private_mem;
244 program->input_size = cso->req_input_mem;
245
246 if (cso->ir_type != PIPE_SHADER_IR_NATIVE) {
247 if (cso->ir_type == PIPE_SHADER_IR_TGSI) {
248 program->ir_type = PIPE_SHADER_IR_NIR;
249 sel->nir = tgsi_to_nir(cso->prog, ctx->screen, true);
250 } else {
251 assert(cso->ir_type == PIPE_SHADER_IR_NIR);
252 sel->nir = (struct nir_shader *)cso->prog;
253 }
254
255 sel->compiler_ctx_state.debug = sctx->debug;
256 sel->compiler_ctx_state.is_debug_context = sctx->is_debug;
257 p_atomic_inc(&sscreen->num_shaders_created);
258
259 si_schedule_initial_compile(sctx, MESA_SHADER_COMPUTE, &sel->ready, &sel->compiler_ctx_state,
260 program, si_create_compute_state_async);
261 } else {
262 const struct pipe_binary_program_header *header;
263 header = cso->prog;
264
265 program->shader.binary.elf_size = header->num_bytes;
266 program->shader.binary.elf_buffer = malloc(header->num_bytes);
267 if (!program->shader.binary.elf_buffer) {
268 FREE(program);
269 return NULL;
270 }
271 memcpy((void *)program->shader.binary.elf_buffer, header->blob, header->num_bytes);
272
273 const amd_kernel_code_t *code_object = si_compute_get_code_object(program, 0);
274 code_object_to_config(code_object, &program->shader.config);
275
276 si_shader_dump(sctx->screen, &program->shader, &sctx->debug, stderr, true);
277 if (!si_shader_binary_upload(sctx->screen, &program->shader, 0)) {
278 fprintf(stderr, "LLVM failed to upload shader\n");
279 free((void *)program->shader.binary.elf_buffer);
280 FREE(program);
281 return NULL;
282 }
283 }
284
285 return program;
286 }
287
288 static void si_bind_compute_state(struct pipe_context *ctx, void *state)
289 {
290 struct si_context *sctx = (struct si_context *)ctx;
291 struct si_compute *program = (struct si_compute *)state;
292 struct si_shader_selector *sel = &program->sel;
293
294 sctx->cs_shader_state.program = program;
295 if (!program)
296 return;
297
298 /* Wait because we need active slot usage masks. */
299 if (program->ir_type != PIPE_SHADER_IR_NATIVE)
300 util_queue_fence_wait(&sel->ready);
301
302 si_set_active_descriptors(sctx,
303 SI_DESCS_FIRST_COMPUTE + SI_SHADER_DESCS_CONST_AND_SHADER_BUFFERS,
304 sel->active_const_and_shader_buffers);
305 si_set_active_descriptors(sctx, SI_DESCS_FIRST_COMPUTE + SI_SHADER_DESCS_SAMPLERS_AND_IMAGES,
306 sel->active_samplers_and_images);
307
308 sctx->compute_shaderbuf_sgprs_dirty = true;
309 sctx->compute_image_sgprs_dirty = true;
310 }
311
312 static void si_set_global_binding(struct pipe_context *ctx, unsigned first, unsigned n,
313 struct pipe_resource **resources, uint32_t **handles)
314 {
315 unsigned i;
316 struct si_context *sctx = (struct si_context *)ctx;
317 struct si_compute *program = sctx->cs_shader_state.program;
318
319 if (first + n > program->max_global_buffers) {
320 unsigned old_max = program->max_global_buffers;
321 program->max_global_buffers = first + n;
322 program->global_buffers = realloc(
323 program->global_buffers, program->max_global_buffers * sizeof(program->global_buffers[0]));
324 if (!program->global_buffers) {
325 fprintf(stderr, "radeonsi: failed to allocate compute global_buffers\n");
326 return;
327 }
328
329 memset(&program->global_buffers[old_max], 0,
330 (program->max_global_buffers - old_max) * sizeof(program->global_buffers[0]));
331 }
332
333 if (!resources) {
334 for (i = 0; i < n; i++) {
335 pipe_resource_reference(&program->global_buffers[first + i], NULL);
336 }
337 return;
338 }
339
340 for (i = 0; i < n; i++) {
341 uint64_t va;
342 uint32_t offset;
343 pipe_resource_reference(&program->global_buffers[first + i], resources[i]);
344 va = si_resource(resources[i])->gpu_address;
345 offset = util_le32_to_cpu(*handles[i]);
346 va += offset;
347 va = util_cpu_to_le64(va);
348 memcpy(handles[i], &va, sizeof(va));
349 }
350 }
351
352 void si_emit_initial_compute_regs(struct si_context *sctx, struct radeon_cmdbuf *cs)
353 {
354 uint64_t bc_va = sctx->border_color_buffer->gpu_address;
355
356 radeon_set_sh_reg_seq(cs, R_00B858_COMPUTE_STATIC_THREAD_MGMT_SE0, 2);
357 /* R_00B858_COMPUTE_STATIC_THREAD_MGMT_SE0 / SE1,
358 * renamed COMPUTE_DESTINATION_EN_SEn on gfx10. */
359 radeon_emit(cs, S_00B858_SH0_CU_EN(0xffff) | S_00B858_SH1_CU_EN(0xffff));
360 radeon_emit(cs, S_00B858_SH0_CU_EN(0xffff) | S_00B858_SH1_CU_EN(0xffff));
361
362 if (sctx->chip_class == GFX6) {
363 /* This register has been moved to R_00CD20_COMPUTE_MAX_WAVE_ID
364 * and is now per pipe, so it should be handled in the
365 * kernel if we want to use something other than the default value.
366 *
367 * TODO: This should be:
368 * (number of compute units) * 4 * (waves per simd) - 1
369 */
370 radeon_set_sh_reg(cs, R_00B82C_COMPUTE_MAX_WAVE_ID, 0x190 /* Default value */);
371
372 if (sctx->screen->info.si_TA_CS_BC_BASE_ADDR_allowed)
373 radeon_set_config_reg(cs, R_00950C_TA_CS_BC_BASE_ADDR, bc_va >> 8);
374 }
375
376 if (sctx->chip_class >= GFX7) {
377 /* Also set R_00B858_COMPUTE_STATIC_THREAD_MGMT_SE2 / SE3 */
378 radeon_set_sh_reg_seq(cs, R_00B864_COMPUTE_STATIC_THREAD_MGMT_SE2, 2);
379 radeon_emit(cs, S_00B858_SH0_CU_EN(0xffff) | S_00B858_SH1_CU_EN(0xffff));
380 radeon_emit(cs, S_00B858_SH0_CU_EN(0xffff) | S_00B858_SH1_CU_EN(0xffff));
381
382 /* Disable profiling on compute queues. */
383 if (cs != sctx->gfx_cs || !sctx->screen->info.has_graphics) {
384 radeon_set_sh_reg(cs, R_00B82C_COMPUTE_PERFCOUNT_ENABLE, 0);
385 radeon_set_sh_reg(cs, R_00B878_COMPUTE_THREAD_TRACE_ENABLE, 0);
386 }
387
388 /* Set the pointer to border colors. */
389 radeon_set_uconfig_reg_seq(cs, R_030E00_TA_CS_BC_BASE_ADDR, 2);
390 radeon_emit(cs, bc_va >> 8); /* R_030E00_TA_CS_BC_BASE_ADDR */
391 radeon_emit(cs, S_030E04_ADDRESS(bc_va >> 40)); /* R_030E04_TA_CS_BC_BASE_ADDR_HI */
392 }
393
394 /* cs_preamble_state initializes this for the gfx queue, so only do this
395 * if we are on a compute queue.
396 */
397 if (sctx->chip_class >= GFX9 &&
398 (cs != sctx->gfx_cs || !sctx->screen->info.has_graphics)) {
399 radeon_set_uconfig_reg(cs, R_0301EC_CP_COHER_START_DELAY,
400 sctx->chip_class >= GFX10 ? 0x20 : 0);
401 }
402
403 if (sctx->chip_class >= GFX10) {
404 radeon_set_sh_reg(cs, R_00B890_COMPUTE_USER_ACCUM_0, 0);
405 radeon_set_sh_reg(cs, R_00B894_COMPUTE_USER_ACCUM_1, 0);
406 radeon_set_sh_reg(cs, R_00B898_COMPUTE_USER_ACCUM_2, 0);
407 radeon_set_sh_reg(cs, R_00B89C_COMPUTE_USER_ACCUM_3, 0);
408 radeon_set_sh_reg(cs, R_00B8A0_COMPUTE_PGM_RSRC3, 0);
409 radeon_set_sh_reg(cs, R_00B9F4_COMPUTE_DISPATCH_TUNNEL, 0);
410 }
411 }
412
413 static bool si_setup_compute_scratch_buffer(struct si_context *sctx, struct si_shader *shader,
414 struct ac_shader_config *config)
415 {
416 uint64_t scratch_bo_size, scratch_needed;
417 scratch_bo_size = 0;
418 scratch_needed = config->scratch_bytes_per_wave * sctx->scratch_waves;
419 if (sctx->compute_scratch_buffer)
420 scratch_bo_size = sctx->compute_scratch_buffer->b.b.width0;
421
422 if (scratch_bo_size < scratch_needed) {
423 si_resource_reference(&sctx->compute_scratch_buffer, NULL);
424
425 sctx->compute_scratch_buffer =
426 si_aligned_buffer_create(&sctx->screen->b, SI_RESOURCE_FLAG_UNMAPPABLE, PIPE_USAGE_DEFAULT,
427 scratch_needed, sctx->screen->info.pte_fragment_size);
428
429 if (!sctx->compute_scratch_buffer)
430 return false;
431 }
432
433 if (sctx->compute_scratch_buffer != shader->scratch_bo && scratch_needed) {
434 uint64_t scratch_va = sctx->compute_scratch_buffer->gpu_address;
435
436 if (!si_shader_binary_upload(sctx->screen, shader, scratch_va))
437 return false;
438
439 si_resource_reference(&shader->scratch_bo, sctx->compute_scratch_buffer);
440 }
441
442 return true;
443 }
444
445 static bool si_switch_compute_shader(struct si_context *sctx, struct si_compute *program,
446 struct si_shader *shader, const amd_kernel_code_t *code_object,
447 unsigned offset)
448 {
449 struct radeon_cmdbuf *cs = sctx->gfx_cs;
450 struct ac_shader_config inline_config = {0};
451 struct ac_shader_config *config;
452 uint64_t shader_va;
453
454 if (sctx->cs_shader_state.emitted_program == program && sctx->cs_shader_state.offset == offset)
455 return true;
456
457 if (program->ir_type != PIPE_SHADER_IR_NATIVE) {
458 config = &shader->config;
459 } else {
460 unsigned lds_blocks;
461
462 config = &inline_config;
463 code_object_to_config(code_object, config);
464
465 lds_blocks = config->lds_size;
466 /* XXX: We are over allocating LDS. For GFX6, the shader reports
467 * LDS in blocks of 256 bytes, so if there are 4 bytes lds
468 * allocated in the shader and 4 bytes allocated by the state
469 * tracker, then we will set LDS_SIZE to 512 bytes rather than 256.
470 */
471 if (sctx->chip_class <= GFX6) {
472 lds_blocks += align(program->local_size, 256) >> 8;
473 } else {
474 lds_blocks += align(program->local_size, 512) >> 9;
475 }
476
477 /* TODO: use si_multiwave_lds_size_workaround */
478 assert(lds_blocks <= 0xFF);
479
480 config->rsrc2 &= C_00B84C_LDS_SIZE;
481 config->rsrc2 |= S_00B84C_LDS_SIZE(lds_blocks);
482 }
483
484 if (!si_setup_compute_scratch_buffer(sctx, shader, config))
485 return false;
486
487 if (shader->scratch_bo) {
488 COMPUTE_DBG(sctx->screen,
489 "Waves: %u; Scratch per wave: %u bytes; "
490 "Total Scratch: %u bytes\n",
491 sctx->scratch_waves, config->scratch_bytes_per_wave,
492 config->scratch_bytes_per_wave * sctx->scratch_waves);
493
494 radeon_add_to_buffer_list(sctx, sctx->gfx_cs, shader->scratch_bo, RADEON_USAGE_READWRITE,
495 RADEON_PRIO_SCRATCH_BUFFER);
496 }
497
498 /* Prefetch the compute shader to TC L2.
499 *
500 * We should also prefetch graphics shaders if a compute dispatch was
501 * the last command, and the compute shader if a draw call was the last
502 * command. However, that would add more complexity and we're likely
503 * to get a shader state change in that case anyway.
504 */
505 if (sctx->chip_class >= GFX7) {
506 cik_prefetch_TC_L2_async(sctx, &program->shader.bo->b.b, 0, program->shader.bo->b.b.width0);
507 }
508
509 shader_va = shader->bo->gpu_address + offset;
510 if (program->ir_type == PIPE_SHADER_IR_NATIVE) {
511 /* Shader code is placed after the amd_kernel_code_t
512 * struct. */
513 shader_va += sizeof(amd_kernel_code_t);
514 }
515
516 radeon_add_to_buffer_list(sctx, sctx->gfx_cs, shader->bo, RADEON_USAGE_READ,
517 RADEON_PRIO_SHADER_BINARY);
518
519 radeon_set_sh_reg_seq(cs, R_00B830_COMPUTE_PGM_LO, 2);
520 radeon_emit(cs, shader_va >> 8);
521 radeon_emit(cs, S_00B834_DATA(shader_va >> 40));
522
523 radeon_set_sh_reg_seq(cs, R_00B848_COMPUTE_PGM_RSRC1, 2);
524 radeon_emit(cs, config->rsrc1);
525 radeon_emit(cs, config->rsrc2);
526
527 COMPUTE_DBG(sctx->screen,
528 "COMPUTE_PGM_RSRC1: 0x%08x "
529 "COMPUTE_PGM_RSRC2: 0x%08x\n",
530 config->rsrc1, config->rsrc2);
531
532 sctx->max_seen_compute_scratch_bytes_per_wave =
533 MAX2(sctx->max_seen_compute_scratch_bytes_per_wave, config->scratch_bytes_per_wave);
534
535 radeon_set_sh_reg(cs, R_00B860_COMPUTE_TMPRING_SIZE,
536 S_00B860_WAVES(sctx->scratch_waves) |
537 S_00B860_WAVESIZE(sctx->max_seen_compute_scratch_bytes_per_wave >> 10));
538
539 sctx->cs_shader_state.emitted_program = program;
540 sctx->cs_shader_state.offset = offset;
541 sctx->cs_shader_state.uses_scratch = config->scratch_bytes_per_wave != 0;
542
543 return true;
544 }
545
546 static void setup_scratch_rsrc_user_sgprs(struct si_context *sctx,
547 const amd_kernel_code_t *code_object, unsigned user_sgpr)
548 {
549 struct radeon_cmdbuf *cs = sctx->gfx_cs;
550 uint64_t scratch_va = sctx->compute_scratch_buffer->gpu_address;
551
552 unsigned max_private_element_size =
553 AMD_HSA_BITS_GET(code_object->code_properties, AMD_CODE_PROPERTY_PRIVATE_ELEMENT_SIZE);
554
555 uint32_t scratch_dword0 = scratch_va & 0xffffffff;
556 uint32_t scratch_dword1 =
557 S_008F04_BASE_ADDRESS_HI(scratch_va >> 32) | S_008F04_SWIZZLE_ENABLE(1);
558
559 /* Disable address clamping */
560 uint32_t scratch_dword2 = 0xffffffff;
561 uint32_t scratch_dword3 = S_008F0C_INDEX_STRIDE(3) | S_008F0C_ADD_TID_ENABLE(1);
562
563 if (sctx->chip_class >= GFX9) {
564 assert(max_private_element_size == 1); /* always 4 bytes on GFX9 */
565 } else {
566 scratch_dword3 |= S_008F0C_ELEMENT_SIZE(max_private_element_size);
567
568 if (sctx->chip_class < GFX8) {
569 /* BUF_DATA_FORMAT is ignored, but it cannot be
570 * BUF_DATA_FORMAT_INVALID. */
571 scratch_dword3 |= S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_8);
572 }
573 }
574
575 radeon_set_sh_reg_seq(cs, R_00B900_COMPUTE_USER_DATA_0 + (user_sgpr * 4), 4);
576 radeon_emit(cs, scratch_dword0);
577 radeon_emit(cs, scratch_dword1);
578 radeon_emit(cs, scratch_dword2);
579 radeon_emit(cs, scratch_dword3);
580 }
581
582 static void si_setup_user_sgprs_co_v2(struct si_context *sctx, const amd_kernel_code_t *code_object,
583 const struct pipe_grid_info *info, uint64_t kernel_args_va)
584 {
585 struct si_compute *program = sctx->cs_shader_state.program;
586 struct radeon_cmdbuf *cs = sctx->gfx_cs;
587
588 static const enum amd_code_property_mask_t workgroup_count_masks[] = {
589 AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_X,
590 AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_Y,
591 AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_Z};
592
593 unsigned i, user_sgpr = 0;
594 if (AMD_HSA_BITS_GET(code_object->code_properties,
595 AMD_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_BUFFER)) {
596 if (code_object->workitem_private_segment_byte_size > 0) {
597 setup_scratch_rsrc_user_sgprs(sctx, code_object, user_sgpr);
598 }
599 user_sgpr += 4;
600 }
601
602 if (AMD_HSA_BITS_GET(code_object->code_properties, AMD_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_PTR)) {
603 struct dispatch_packet dispatch;
604 unsigned dispatch_offset;
605 struct si_resource *dispatch_buf = NULL;
606 uint64_t dispatch_va;
607
608 /* Upload dispatch ptr */
609 memset(&dispatch, 0, sizeof(dispatch));
610
611 dispatch.workgroup_size_x = util_cpu_to_le16(info->block[0]);
612 dispatch.workgroup_size_y = util_cpu_to_le16(info->block[1]);
613 dispatch.workgroup_size_z = util_cpu_to_le16(info->block[2]);
614
615 dispatch.grid_size_x = util_cpu_to_le32(info->grid[0] * info->block[0]);
616 dispatch.grid_size_y = util_cpu_to_le32(info->grid[1] * info->block[1]);
617 dispatch.grid_size_z = util_cpu_to_le32(info->grid[2] * info->block[2]);
618
619 dispatch.private_segment_size = util_cpu_to_le32(program->private_size);
620 dispatch.group_segment_size = util_cpu_to_le32(program->local_size);
621
622 dispatch.kernarg_address = util_cpu_to_le64(kernel_args_va);
623
624 u_upload_data(sctx->b.const_uploader, 0, sizeof(dispatch), 256, &dispatch, &dispatch_offset,
625 (struct pipe_resource **)&dispatch_buf);
626
627 if (!dispatch_buf) {
628 fprintf(stderr, "Error: Failed to allocate dispatch "
629 "packet.");
630 }
631 radeon_add_to_buffer_list(sctx, sctx->gfx_cs, dispatch_buf, RADEON_USAGE_READ,
632 RADEON_PRIO_CONST_BUFFER);
633
634 dispatch_va = dispatch_buf->gpu_address + dispatch_offset;
635
636 radeon_set_sh_reg_seq(cs, R_00B900_COMPUTE_USER_DATA_0 + (user_sgpr * 4), 2);
637 radeon_emit(cs, dispatch_va);
638 radeon_emit(cs, S_008F04_BASE_ADDRESS_HI(dispatch_va >> 32) | S_008F04_STRIDE(0));
639
640 si_resource_reference(&dispatch_buf, NULL);
641 user_sgpr += 2;
642 }
643
644 if (AMD_HSA_BITS_GET(code_object->code_properties,
645 AMD_CODE_PROPERTY_ENABLE_SGPR_KERNARG_SEGMENT_PTR)) {
646 radeon_set_sh_reg_seq(cs, R_00B900_COMPUTE_USER_DATA_0 + (user_sgpr * 4), 2);
647 radeon_emit(cs, kernel_args_va);
648 radeon_emit(cs, S_008F04_BASE_ADDRESS_HI(kernel_args_va >> 32) | S_008F04_STRIDE(0));
649 user_sgpr += 2;
650 }
651
652 for (i = 0; i < 3 && user_sgpr < 16; i++) {
653 if (code_object->code_properties & workgroup_count_masks[i]) {
654 radeon_set_sh_reg_seq(cs, R_00B900_COMPUTE_USER_DATA_0 + (user_sgpr * 4), 1);
655 radeon_emit(cs, info->grid[i]);
656 user_sgpr += 1;
657 }
658 }
659 }
660
661 static bool si_upload_compute_input(struct si_context *sctx, const amd_kernel_code_t *code_object,
662 const struct pipe_grid_info *info)
663 {
664 struct si_compute *program = sctx->cs_shader_state.program;
665 struct si_resource *input_buffer = NULL;
666 uint32_t kernel_args_offset = 0;
667 uint32_t *kernel_args;
668 void *kernel_args_ptr;
669 uint64_t kernel_args_va;
670
671 u_upload_alloc(sctx->b.const_uploader, 0, program->input_size,
672 sctx->screen->info.tcc_cache_line_size, &kernel_args_offset,
673 (struct pipe_resource **)&input_buffer, &kernel_args_ptr);
674
675 if (unlikely(!kernel_args_ptr))
676 return false;
677
678 kernel_args = (uint32_t *)kernel_args_ptr;
679 kernel_args_va = input_buffer->gpu_address + kernel_args_offset;
680
681 memcpy(kernel_args, info->input, program->input_size);
682
683 for (unsigned i = 0; i < program->input_size / 4; i++) {
684 COMPUTE_DBG(sctx->screen, "input %u : %u\n", i, kernel_args[i]);
685 }
686
687 radeon_add_to_buffer_list(sctx, sctx->gfx_cs, input_buffer, RADEON_USAGE_READ,
688 RADEON_PRIO_CONST_BUFFER);
689
690 si_setup_user_sgprs_co_v2(sctx, code_object, info, kernel_args_va);
691 si_resource_reference(&input_buffer, NULL);
692 return true;
693 }
694
695 static void si_setup_nir_user_data(struct si_context *sctx, const struct pipe_grid_info *info)
696 {
697 struct si_compute *program = sctx->cs_shader_state.program;
698 struct si_shader_selector *sel = &program->sel;
699 struct radeon_cmdbuf *cs = sctx->gfx_cs;
700 unsigned grid_size_reg = R_00B900_COMPUTE_USER_DATA_0 + 4 * SI_NUM_RESOURCE_SGPRS;
701 unsigned block_size_reg = grid_size_reg +
702 /* 12 bytes = 3 dwords. */
703 12 * sel->info.uses_grid_size;
704 unsigned cs_user_data_reg = block_size_reg + 12 * program->reads_variable_block_size;
705
706 if (info->indirect) {
707 if (sel->info.uses_grid_size) {
708 for (unsigned i = 0; i < 3; ++i) {
709 si_cp_copy_data(sctx, sctx->gfx_cs, COPY_DATA_REG, NULL, (grid_size_reg >> 2) + i,
710 COPY_DATA_SRC_MEM, si_resource(info->indirect),
711 info->indirect_offset + 4 * i);
712 }
713 }
714 } else {
715 if (sel->info.uses_grid_size) {
716 radeon_set_sh_reg_seq(cs, grid_size_reg, 3);
717 radeon_emit(cs, info->grid[0]);
718 radeon_emit(cs, info->grid[1]);
719 radeon_emit(cs, info->grid[2]);
720 }
721 if (program->reads_variable_block_size) {
722 radeon_set_sh_reg_seq(cs, block_size_reg, 3);
723 radeon_emit(cs, info->block[0]);
724 radeon_emit(cs, info->block[1]);
725 radeon_emit(cs, info->block[2]);
726 }
727 }
728
729 if (program->num_cs_user_data_dwords) {
730 radeon_set_sh_reg_seq(cs, cs_user_data_reg, program->num_cs_user_data_dwords);
731 radeon_emit_array(cs, sctx->cs_user_data, program->num_cs_user_data_dwords);
732 }
733 }
734
735 static void si_emit_dispatch_packets(struct si_context *sctx, const struct pipe_grid_info *info)
736 {
737 struct si_screen *sscreen = sctx->screen;
738 struct radeon_cmdbuf *cs = sctx->gfx_cs;
739 bool render_cond_bit = sctx->render_cond && !sctx->render_cond_force_off;
740 unsigned threads_per_threadgroup = info->block[0] * info->block[1] * info->block[2];
741 unsigned waves_per_threadgroup =
742 DIV_ROUND_UP(threads_per_threadgroup, sscreen->compute_wave_size);
743 unsigned threadgroups_per_cu = 1;
744
745 if (sctx->chip_class >= GFX10 && waves_per_threadgroup == 1)
746 threadgroups_per_cu = 2;
747
748 radeon_set_sh_reg(
749 cs, R_00B854_COMPUTE_RESOURCE_LIMITS,
750 ac_get_compute_resource_limits(&sscreen->info, waves_per_threadgroup,
751 sctx->cs_max_waves_per_sh, threadgroups_per_cu));
752
753 unsigned dispatch_initiator = S_00B800_COMPUTE_SHADER_EN(1) | S_00B800_FORCE_START_AT_000(1) |
754 /* If the KMD allows it (there is a KMD hw register for it),
755 * allow launching waves out-of-order. (same as Vulkan) */
756 S_00B800_ORDER_MODE(sctx->chip_class >= GFX7) |
757 S_00B800_CS_W32_EN(sscreen->compute_wave_size == 32);
758
759 const uint *last_block = info->last_block;
760 bool partial_block_en = last_block[0] || last_block[1] || last_block[2];
761
762 radeon_set_sh_reg_seq(cs, R_00B81C_COMPUTE_NUM_THREAD_X, 3);
763
764 if (partial_block_en) {
765 unsigned partial[3];
766
767 /* If no partial_block, these should be an entire block size, not 0. */
768 partial[0] = last_block[0] ? last_block[0] : info->block[0];
769 partial[1] = last_block[1] ? last_block[1] : info->block[1];
770 partial[2] = last_block[2] ? last_block[2] : info->block[2];
771
772 radeon_emit(
773 cs, S_00B81C_NUM_THREAD_FULL(info->block[0]) | S_00B81C_NUM_THREAD_PARTIAL(partial[0]));
774 radeon_emit(
775 cs, S_00B820_NUM_THREAD_FULL(info->block[1]) | S_00B820_NUM_THREAD_PARTIAL(partial[1]));
776 radeon_emit(
777 cs, S_00B824_NUM_THREAD_FULL(info->block[2]) | S_00B824_NUM_THREAD_PARTIAL(partial[2]));
778
779 dispatch_initiator |= S_00B800_PARTIAL_TG_EN(1);
780 } else {
781 radeon_emit(cs, S_00B81C_NUM_THREAD_FULL(info->block[0]));
782 radeon_emit(cs, S_00B820_NUM_THREAD_FULL(info->block[1]));
783 radeon_emit(cs, S_00B824_NUM_THREAD_FULL(info->block[2]));
784 }
785
786 if (info->indirect) {
787 uint64_t base_va = si_resource(info->indirect)->gpu_address;
788
789 radeon_add_to_buffer_list(sctx, sctx->gfx_cs, si_resource(info->indirect), RADEON_USAGE_READ,
790 RADEON_PRIO_DRAW_INDIRECT);
791
792 radeon_emit(cs, PKT3(PKT3_SET_BASE, 2, 0) | PKT3_SHADER_TYPE_S(1));
793 radeon_emit(cs, 1);
794 radeon_emit(cs, base_va);
795 radeon_emit(cs, base_va >> 32);
796
797 radeon_emit(cs, PKT3(PKT3_DISPATCH_INDIRECT, 1, render_cond_bit) | PKT3_SHADER_TYPE_S(1));
798 radeon_emit(cs, info->indirect_offset);
799 radeon_emit(cs, dispatch_initiator);
800 } else {
801 radeon_emit(cs, PKT3(PKT3_DISPATCH_DIRECT, 3, render_cond_bit) | PKT3_SHADER_TYPE_S(1));
802 radeon_emit(cs, info->grid[0]);
803 radeon_emit(cs, info->grid[1]);
804 radeon_emit(cs, info->grid[2]);
805 radeon_emit(cs, dispatch_initiator);
806 }
807 }
808
809 static void si_launch_grid(struct pipe_context *ctx, const struct pipe_grid_info *info)
810 {
811 struct si_context *sctx = (struct si_context *)ctx;
812 struct si_compute *program = sctx->cs_shader_state.program;
813 const amd_kernel_code_t *code_object = si_compute_get_code_object(program, info->pc);
814 int i;
815 /* HW bug workaround when CS threadgroups > 256 threads and async
816 * compute isn't used, i.e. only one compute job can run at a time.
817 * If async compute is possible, the threadgroup size must be limited
818 * to 256 threads on all queues to avoid the bug.
819 * Only GFX6 and certain GFX7 chips are affected.
820 */
821 bool cs_regalloc_hang =
822 (sctx->chip_class == GFX6 || sctx->family == CHIP_BONAIRE || sctx->family == CHIP_KABINI) &&
823 info->block[0] * info->block[1] * info->block[2] > 256;
824
825 if (cs_regalloc_hang)
826 sctx->flags |= SI_CONTEXT_PS_PARTIAL_FLUSH | SI_CONTEXT_CS_PARTIAL_FLUSH;
827
828 if (program->ir_type != PIPE_SHADER_IR_NATIVE && program->shader.compilation_failed)
829 return;
830
831 if (sctx->has_graphics) {
832 if (sctx->last_num_draw_calls != sctx->num_draw_calls) {
833 si_update_fb_dirtiness_after_rendering(sctx);
834 sctx->last_num_draw_calls = sctx->num_draw_calls;
835 }
836
837 si_decompress_textures(sctx, 1 << PIPE_SHADER_COMPUTE);
838 }
839
840 /* Add buffer sizes for memory checking in need_cs_space. */
841 si_context_add_resource_size(sctx, &program->shader.bo->b.b);
842 /* TODO: add the scratch buffer */
843
844 if (info->indirect) {
845 si_context_add_resource_size(sctx, info->indirect);
846
847 /* Indirect buffers use TC L2 on GFX9, but not older hw. */
848 if (sctx->chip_class <= GFX8 && si_resource(info->indirect)->TC_L2_dirty) {
849 sctx->flags |= SI_CONTEXT_WB_L2;
850 si_resource(info->indirect)->TC_L2_dirty = false;
851 }
852 }
853
854 si_need_gfx_cs_space(sctx);
855
856 /* If we're using a secure context, determine if cs must be secure or not */
857 if (unlikely(sctx->ws->ws_is_secure(sctx->ws))) {
858 bool secure = si_compute_resources_check_encrypted(sctx);
859 if (secure != sctx->ws->cs_is_secure(sctx->gfx_cs)) {
860 si_flush_gfx_cs(sctx, RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW, NULL);
861 sctx->ws->cs_set_secure(sctx->gfx_cs, secure);
862 }
863 }
864
865 if (sctx->bo_list_add_all_compute_resources)
866 si_compute_resources_add_all_to_bo_list(sctx);
867
868 if (!sctx->cs_shader_state.initialized) {
869 si_emit_initial_compute_regs(sctx, sctx->gfx_cs);
870
871 sctx->cs_shader_state.emitted_program = NULL;
872 sctx->cs_shader_state.initialized = true;
873 }
874
875 if (sctx->flags)
876 sctx->emit_cache_flush(sctx);
877
878 if (!si_switch_compute_shader(sctx, program, &program->shader, code_object, info->pc))
879 return;
880
881 si_upload_compute_shader_descriptors(sctx);
882 si_emit_compute_shader_pointers(sctx);
883
884 if (sctx->has_graphics && si_is_atom_dirty(sctx, &sctx->atoms.s.render_cond)) {
885 sctx->atoms.s.render_cond.emit(sctx);
886 si_set_atom_dirty(sctx, &sctx->atoms.s.render_cond, false);
887 }
888
889 if (program->ir_type == PIPE_SHADER_IR_NATIVE &&
890 unlikely(!si_upload_compute_input(sctx, code_object, info)))
891 return;
892
893 /* Global buffers */
894 for (i = 0; i < program->max_global_buffers; i++) {
895 struct si_resource *buffer = si_resource(program->global_buffers[i]);
896 if (!buffer) {
897 continue;
898 }
899 radeon_add_to_buffer_list(sctx, sctx->gfx_cs, buffer, RADEON_USAGE_READWRITE,
900 RADEON_PRIO_COMPUTE_GLOBAL);
901 }
902
903 if (program->ir_type != PIPE_SHADER_IR_NATIVE)
904 si_setup_nir_user_data(sctx, info);
905
906 si_emit_dispatch_packets(sctx, info);
907
908 if (unlikely(sctx->current_saved_cs)) {
909 si_trace_emit(sctx);
910 si_log_compute_state(sctx, sctx->log);
911 }
912
913 sctx->compute_is_busy = true;
914 sctx->num_compute_calls++;
915 if (sctx->cs_shader_state.uses_scratch)
916 sctx->num_spill_compute_calls++;
917
918 if (cs_regalloc_hang)
919 sctx->flags |= SI_CONTEXT_CS_PARTIAL_FLUSH;
920 }
921
922 void si_destroy_compute(struct si_compute *program)
923 {
924 struct si_shader_selector *sel = &program->sel;
925
926 if (program->ir_type != PIPE_SHADER_IR_NATIVE) {
927 util_queue_drop_job(&sel->screen->shader_compiler_queue, &sel->ready);
928 util_queue_fence_destroy(&sel->ready);
929 }
930
931 for (unsigned i = 0; i < program->max_global_buffers; i++)
932 pipe_resource_reference(&program->global_buffers[i], NULL);
933 FREE(program->global_buffers);
934
935 si_shader_destroy(&program->shader);
936 ralloc_free(program->sel.nir);
937 FREE(program);
938 }
939
940 static void si_delete_compute_state(struct pipe_context *ctx, void *state)
941 {
942 struct si_compute *program = (struct si_compute *)state;
943 struct si_context *sctx = (struct si_context *)ctx;
944
945 if (!state)
946 return;
947
948 if (program == sctx->cs_shader_state.program)
949 sctx->cs_shader_state.program = NULL;
950
951 if (program == sctx->cs_shader_state.emitted_program)
952 sctx->cs_shader_state.emitted_program = NULL;
953
954 si_compute_reference(&program, NULL);
955 }
956
957 static void si_set_compute_resources(struct pipe_context *ctx_, unsigned start, unsigned count,
958 struct pipe_surface **surfaces)
959 {
960 }
961
962 void si_init_compute_functions(struct si_context *sctx)
963 {
964 sctx->b.create_compute_state = si_create_compute_state;
965 sctx->b.delete_compute_state = si_delete_compute_state;
966 sctx->b.bind_compute_state = si_bind_compute_state;
967 sctx->b.set_compute_resources = si_set_compute_resources;
968 sctx->b.set_global_binding = si_set_global_binding;
969 sctx->b.launch_grid = si_launch_grid;
970 }