2 * Copyright 2013 Advanced Micro Devices, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 #include "tgsi/tgsi_parse.h"
27 #include "util/u_async_debug.h"
28 #include "util/u_memory.h"
29 #include "util/u_upload_mgr.h"
32 #include "amd_kernel_code_t.h"
33 #include "si_build_pm4.h"
34 #include "si_compute.h"
36 #define COMPUTE_DBG(sscreen, fmt, args...) \
38 if ((sscreen->debug_flags & DBG(COMPUTE))) fprintf(stderr, fmt, ##args); \
41 struct dispatch_packet
{
44 uint16_t workgroup_size_x
;
45 uint16_t workgroup_size_y
;
46 uint16_t workgroup_size_z
;
51 uint32_t private_segment_size
;
52 uint32_t group_segment_size
;
53 uint64_t kernel_object
;
54 uint64_t kernarg_address
;
58 static const amd_kernel_code_t
*si_compute_get_code_object(
59 const struct si_compute
*program
,
60 uint64_t symbol_offset
)
62 if (!program
->use_code_object_v2
) {
66 struct ac_rtld_binary rtld
;
67 if (!ac_rtld_open(&rtld
, (struct ac_rtld_open_info
){
68 .info
= &program
->screen
->info
,
69 .shader_type
= MESA_SHADER_COMPUTE
,
71 .elf_ptrs
= &program
->shader
.binary
.elf_buffer
,
72 .elf_sizes
= &program
->shader
.binary
.elf_size
}))
75 const amd_kernel_code_t
*result
= NULL
;
78 if (!ac_rtld_get_section_by_name(&rtld
, ".text", &text
, &size
))
81 if (symbol_offset
+ sizeof(amd_kernel_code_t
) > size
)
84 result
= (const amd_kernel_code_t
*)(text
+ symbol_offset
);
91 static void code_object_to_config(const amd_kernel_code_t
*code_object
,
92 struct ac_shader_config
*out_config
) {
94 uint32_t rsrc1
= code_object
->compute_pgm_resource_registers
;
95 uint32_t rsrc2
= code_object
->compute_pgm_resource_registers
>> 32;
96 out_config
->num_sgprs
= code_object
->wavefront_sgpr_count
;
97 out_config
->num_vgprs
= code_object
->workitem_vgpr_count
;
98 out_config
->float_mode
= G_00B028_FLOAT_MODE(rsrc1
);
99 out_config
->rsrc1
= rsrc1
;
100 out_config
->lds_size
= MAX2(out_config
->lds_size
, G_00B84C_LDS_SIZE(rsrc2
));
101 out_config
->rsrc2
= rsrc2
;
102 out_config
->scratch_bytes_per_wave
=
103 align(code_object
->workitem_private_segment_byte_size
* 64, 1024);
106 /* Asynchronous compute shader compilation. */
107 static void si_create_compute_state_async(void *job
, int thread_index
)
109 struct si_compute
*program
= (struct si_compute
*)job
;
110 struct si_shader
*shader
= &program
->shader
;
111 struct si_shader_selector sel
;
112 struct ac_llvm_compiler
*compiler
;
113 struct pipe_debug_callback
*debug
= &program
->compiler_ctx_state
.debug
;
114 struct si_screen
*sscreen
= program
->screen
;
116 assert(!debug
->debug_message
|| debug
->async
);
117 assert(thread_index
>= 0);
118 assert(thread_index
< ARRAY_SIZE(sscreen
->compiler
));
119 compiler
= &sscreen
->compiler
[thread_index
];
121 memset(&sel
, 0, sizeof(sel
));
123 sel
.screen
= sscreen
;
125 if (program
->ir_type
== PIPE_SHADER_IR_TGSI
) {
126 tgsi_scan_shader(program
->ir
.tgsi
, &sel
.info
);
127 sel
.tokens
= program
->ir
.tgsi
;
129 assert(program
->ir_type
== PIPE_SHADER_IR_NIR
);
130 sel
.nir
= program
->ir
.nir
;
132 si_nir_opts(sel
.nir
);
133 si_nir_scan_shader(sel
.nir
, &sel
.info
);
137 /* Store the declared LDS size into tgsi_shader_info for the shader
138 * cache to include it.
140 sel
.info
.properties
[TGSI_PROPERTY_CS_LOCAL_SIZE
] = program
->local_size
;
142 sel
.type
= PIPE_SHADER_COMPUTE
;
143 si_get_active_slot_masks(&sel
.info
,
144 &program
->active_const_and_shader_buffers
,
145 &program
->active_samplers_and_images
);
147 program
->shader
.selector
= &sel
;
148 program
->shader
.is_monolithic
= true;
149 program
->uses_grid_size
= sel
.info
.uses_grid_size
;
150 program
->uses_bindless_samplers
= sel
.info
.uses_bindless_samplers
;
151 program
->uses_bindless_images
= sel
.info
.uses_bindless_images
;
152 program
->reads_variable_block_size
=
153 sel
.info
.uses_block_size
&&
154 sel
.info
.properties
[TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH
] == 0;
155 program
->num_cs_user_data_dwords
=
156 sel
.info
.properties
[TGSI_PROPERTY_CS_USER_DATA_DWORDS
];
158 void *ir_binary
= si_get_ir_binary(&sel
);
160 /* Try to load the shader from the shader cache. */
161 mtx_lock(&sscreen
->shader_cache_mutex
);
164 si_shader_cache_load_shader(sscreen
, ir_binary
, shader
)) {
165 mtx_unlock(&sscreen
->shader_cache_mutex
);
167 si_shader_dump_stats_for_shader_db(sscreen
, shader
, debug
);
168 si_shader_dump(sscreen
, shader
, debug
, stderr
, true);
170 if (!si_shader_binary_upload(sscreen
, shader
, 0))
171 program
->shader
.compilation_failed
= true;
173 mtx_unlock(&sscreen
->shader_cache_mutex
);
175 if (!si_shader_create(sscreen
, compiler
, &program
->shader
, debug
)) {
176 program
->shader
.compilation_failed
= true;
178 if (program
->ir_type
== PIPE_SHADER_IR_TGSI
)
179 FREE(program
->ir
.tgsi
);
180 program
->shader
.selector
= NULL
;
184 bool scratch_enabled
= shader
->config
.scratch_bytes_per_wave
> 0;
185 unsigned user_sgprs
= SI_NUM_RESOURCE_SGPRS
+
186 (sel
.info
.uses_grid_size
? 3 : 0) +
187 (program
->reads_variable_block_size
? 3 : 0) +
188 program
->num_cs_user_data_dwords
;
190 shader
->config
.rsrc1
=
191 S_00B848_VGPRS((shader
->config
.num_vgprs
- 1) / 4) |
192 S_00B848_DX10_CLAMP(1) |
193 S_00B848_MEM_ORDERED(sscreen
->info
.chip_class
>= GFX10
) |
194 S_00B848_FLOAT_MODE(shader
->config
.float_mode
);
196 if (program
->screen
->info
.chip_class
< GFX10
) {
197 shader
->config
.rsrc1
|=
198 S_00B848_SGPRS((shader
->config
.num_sgprs
- 1) / 8);
201 shader
->config
.rsrc2
=
202 S_00B84C_USER_SGPR(user_sgprs
) |
203 S_00B84C_SCRATCH_EN(scratch_enabled
) |
204 S_00B84C_TGID_X_EN(sel
.info
.uses_block_id
[0]) |
205 S_00B84C_TGID_Y_EN(sel
.info
.uses_block_id
[1]) |
206 S_00B84C_TGID_Z_EN(sel
.info
.uses_block_id
[2]) |
207 S_00B84C_TIDIG_COMP_CNT(sel
.info
.uses_thread_id
[2] ? 2 :
208 sel
.info
.uses_thread_id
[1] ? 1 : 0) |
209 S_00B84C_LDS_SIZE(shader
->config
.lds_size
);
212 mtx_lock(&sscreen
->shader_cache_mutex
);
213 if (!si_shader_cache_insert_shader(sscreen
, ir_binary
, shader
, true))
215 mtx_unlock(&sscreen
->shader_cache_mutex
);
219 if (program
->ir_type
== PIPE_SHADER_IR_TGSI
)
220 FREE(program
->ir
.tgsi
);
222 program
->shader
.selector
= NULL
;
225 static void *si_create_compute_state(
226 struct pipe_context
*ctx
,
227 const struct pipe_compute_state
*cso
)
229 struct si_context
*sctx
= (struct si_context
*)ctx
;
230 struct si_screen
*sscreen
= (struct si_screen
*)ctx
->screen
;
231 struct si_compute
*program
= CALLOC_STRUCT(si_compute
);
233 pipe_reference_init(&program
->reference
, 1);
234 program
->screen
= (struct si_screen
*)ctx
->screen
;
235 program
->ir_type
= cso
->ir_type
;
236 program
->local_size
= cso
->req_local_mem
;
237 program
->private_size
= cso
->req_private_mem
;
238 program
->input_size
= cso
->req_input_mem
;
239 program
->use_code_object_v2
= cso
->ir_type
== PIPE_SHADER_IR_NATIVE
;
241 if (cso
->ir_type
!= PIPE_SHADER_IR_NATIVE
) {
242 if (cso
->ir_type
== PIPE_SHADER_IR_TGSI
) {
243 program
->ir
.tgsi
= tgsi_dup_tokens(cso
->prog
);
244 if (!program
->ir
.tgsi
) {
249 assert(cso
->ir_type
== PIPE_SHADER_IR_NIR
);
250 program
->ir
.nir
= (struct nir_shader
*) cso
->prog
;
253 program
->compiler_ctx_state
.debug
= sctx
->debug
;
254 program
->compiler_ctx_state
.is_debug_context
= sctx
->is_debug
;
255 p_atomic_inc(&sscreen
->num_shaders_created
);
257 si_schedule_initial_compile(sctx
, PIPE_SHADER_COMPUTE
,
259 &program
->compiler_ctx_state
,
260 program
, si_create_compute_state_async
);
262 const struct pipe_llvm_program_header
*header
;
265 code
= cso
->prog
+ sizeof(struct pipe_llvm_program_header
);
267 program
->shader
.binary
.elf_size
= header
->num_bytes
;
268 program
->shader
.binary
.elf_buffer
= malloc(header
->num_bytes
);
269 if (!program
->shader
.binary
.elf_buffer
) {
273 memcpy((void *)program
->shader
.binary
.elf_buffer
, code
, header
->num_bytes
);
275 const amd_kernel_code_t
*code_object
=
276 si_compute_get_code_object(program
, 0);
277 code_object_to_config(code_object
, &program
->shader
.config
);
279 si_shader_dump(sctx
->screen
, &program
->shader
, &sctx
->debug
, stderr
, true);
280 if (!si_shader_binary_upload(sctx
->screen
, &program
->shader
, 0)) {
281 fprintf(stderr
, "LLVM failed to upload shader\n");
282 free((void *)program
->shader
.binary
.elf_buffer
);
291 static void si_bind_compute_state(struct pipe_context
*ctx
, void *state
)
293 struct si_context
*sctx
= (struct si_context
*)ctx
;
294 struct si_compute
*program
= (struct si_compute
*)state
;
296 sctx
->cs_shader_state
.program
= program
;
300 /* Wait because we need active slot usage masks. */
301 if (program
->ir_type
!= PIPE_SHADER_IR_NATIVE
)
302 util_queue_fence_wait(&program
->ready
);
304 si_set_active_descriptors(sctx
,
305 SI_DESCS_FIRST_COMPUTE
+
306 SI_SHADER_DESCS_CONST_AND_SHADER_BUFFERS
,
307 program
->active_const_and_shader_buffers
);
308 si_set_active_descriptors(sctx
,
309 SI_DESCS_FIRST_COMPUTE
+
310 SI_SHADER_DESCS_SAMPLERS_AND_IMAGES
,
311 program
->active_samplers_and_images
);
314 static void si_set_global_binding(
315 struct pipe_context
*ctx
, unsigned first
, unsigned n
,
316 struct pipe_resource
**resources
,
320 struct si_context
*sctx
= (struct si_context
*)ctx
;
321 struct si_compute
*program
= sctx
->cs_shader_state
.program
;
323 assert(first
+ n
<= MAX_GLOBAL_BUFFERS
);
326 for (i
= 0; i
< n
; i
++) {
327 pipe_resource_reference(&program
->global_buffers
[first
+ i
], NULL
);
332 for (i
= 0; i
< n
; i
++) {
335 pipe_resource_reference(&program
->global_buffers
[first
+ i
], resources
[i
]);
336 va
= si_resource(resources
[i
])->gpu_address
;
337 offset
= util_le32_to_cpu(*handles
[i
]);
339 va
= util_cpu_to_le64(va
);
340 memcpy(handles
[i
], &va
, sizeof(va
));
344 void si_emit_initial_compute_regs(struct si_context
*sctx
, struct radeon_cmdbuf
*cs
)
348 radeon_set_sh_reg_seq(cs
, R_00B858_COMPUTE_STATIC_THREAD_MGMT_SE0
, 2);
349 /* R_00B858_COMPUTE_STATIC_THREAD_MGMT_SE0 / SE1,
350 * renamed COMPUTE_DESTINATION_EN_SEn on gfx10. */
351 radeon_emit(cs
, S_00B858_SH0_CU_EN(0xffff) | S_00B858_SH1_CU_EN(0xffff));
352 radeon_emit(cs
, S_00B858_SH0_CU_EN(0xffff) | S_00B858_SH1_CU_EN(0xffff));
354 if (sctx
->chip_class
>= GFX7
) {
355 /* Also set R_00B858_COMPUTE_STATIC_THREAD_MGMT_SE2 / SE3 */
356 radeon_set_sh_reg_seq(cs
,
357 R_00B864_COMPUTE_STATIC_THREAD_MGMT_SE2
, 2);
358 radeon_emit(cs
, S_00B858_SH0_CU_EN(0xffff) |
359 S_00B858_SH1_CU_EN(0xffff));
360 radeon_emit(cs
, S_00B858_SH0_CU_EN(0xffff) |
361 S_00B858_SH1_CU_EN(0xffff));
364 if (sctx
->chip_class
>= GFX10
)
365 radeon_set_sh_reg(cs
, R_00B8A0_COMPUTE_PGM_RSRC3
, 0);
367 /* This register has been moved to R_00CD20_COMPUTE_MAX_WAVE_ID
368 * and is now per pipe, so it should be handled in the
369 * kernel if we want to use something other than the default value,
370 * which is now 0x22f.
372 if (sctx
->chip_class
<= GFX6
) {
373 /* XXX: This should be:
374 * (number of compute units) * 4 * (waves per simd) - 1 */
376 radeon_set_sh_reg(cs
, R_00B82C_COMPUTE_MAX_WAVE_ID
,
377 0x190 /* Default value */);
380 /* Set the pointer to border colors. */
381 bc_va
= sctx
->border_color_buffer
->gpu_address
;
383 if (sctx
->chip_class
>= GFX7
) {
384 radeon_set_uconfig_reg_seq(cs
, R_030E00_TA_CS_BC_BASE_ADDR
, 2);
385 radeon_emit(cs
, bc_va
>> 8); /* R_030E00_TA_CS_BC_BASE_ADDR */
386 radeon_emit(cs
, S_030E04_ADDRESS(bc_va
>> 40)); /* R_030E04_TA_CS_BC_BASE_ADDR_HI */
388 if (sctx
->screen
->info
.si_TA_CS_BC_BASE_ADDR_allowed
) {
389 radeon_set_config_reg(cs
, R_00950C_TA_CS_BC_BASE_ADDR
,
395 static bool si_setup_compute_scratch_buffer(struct si_context
*sctx
,
396 struct si_shader
*shader
,
397 struct ac_shader_config
*config
)
399 uint64_t scratch_bo_size
, scratch_needed
;
401 scratch_needed
= config
->scratch_bytes_per_wave
* sctx
->scratch_waves
;
402 if (sctx
->compute_scratch_buffer
)
403 scratch_bo_size
= sctx
->compute_scratch_buffer
->b
.b
.width0
;
405 if (scratch_bo_size
< scratch_needed
) {
406 si_resource_reference(&sctx
->compute_scratch_buffer
, NULL
);
408 sctx
->compute_scratch_buffer
=
409 si_aligned_buffer_create(&sctx
->screen
->b
,
410 SI_RESOURCE_FLAG_UNMAPPABLE
,
412 scratch_needed
, 256);
414 if (!sctx
->compute_scratch_buffer
)
418 if (sctx
->compute_scratch_buffer
!= shader
->scratch_bo
&& scratch_needed
) {
419 uint64_t scratch_va
= sctx
->compute_scratch_buffer
->gpu_address
;
421 if (!si_shader_binary_upload(sctx
->screen
, shader
, scratch_va
))
424 si_resource_reference(&shader
->scratch_bo
,
425 sctx
->compute_scratch_buffer
);
431 static bool si_switch_compute_shader(struct si_context
*sctx
,
432 struct si_compute
*program
,
433 struct si_shader
*shader
,
434 const amd_kernel_code_t
*code_object
,
437 struct radeon_cmdbuf
*cs
= sctx
->gfx_cs
;
438 struct ac_shader_config inline_config
= {0};
439 struct ac_shader_config
*config
;
442 if (sctx
->cs_shader_state
.emitted_program
== program
&&
443 sctx
->cs_shader_state
.offset
== offset
)
446 if (program
->ir_type
!= PIPE_SHADER_IR_NATIVE
) {
447 config
= &shader
->config
;
451 config
= &inline_config
;
452 code_object_to_config(code_object
, config
);
454 lds_blocks
= config
->lds_size
;
455 /* XXX: We are over allocating LDS. For GFX6, the shader reports
456 * LDS in blocks of 256 bytes, so if there are 4 bytes lds
457 * allocated in the shader and 4 bytes allocated by the state
458 * tracker, then we will set LDS_SIZE to 512 bytes rather than 256.
460 if (sctx
->chip_class
<= GFX6
) {
461 lds_blocks
+= align(program
->local_size
, 256) >> 8;
463 lds_blocks
+= align(program
->local_size
, 512) >> 9;
466 /* TODO: use si_multiwave_lds_size_workaround */
467 assert(lds_blocks
<= 0xFF);
469 config
->rsrc2
&= C_00B84C_LDS_SIZE
;
470 config
->rsrc2
|= S_00B84C_LDS_SIZE(lds_blocks
);
473 if (!si_setup_compute_scratch_buffer(sctx
, shader
, config
))
476 if (shader
->scratch_bo
) {
477 COMPUTE_DBG(sctx
->screen
, "Waves: %u; Scratch per wave: %u bytes; "
478 "Total Scratch: %u bytes\n", sctx
->scratch_waves
,
479 config
->scratch_bytes_per_wave
,
480 config
->scratch_bytes_per_wave
*
481 sctx
->scratch_waves
);
483 radeon_add_to_buffer_list(sctx
, sctx
->gfx_cs
,
484 shader
->scratch_bo
, RADEON_USAGE_READWRITE
,
485 RADEON_PRIO_SCRATCH_BUFFER
);
488 /* Prefetch the compute shader to TC L2.
490 * We should also prefetch graphics shaders if a compute dispatch was
491 * the last command, and the compute shader if a draw call was the last
492 * command. However, that would add more complexity and we're likely
493 * to get a shader state change in that case anyway.
495 if (sctx
->chip_class
>= GFX7
) {
496 cik_prefetch_TC_L2_async(sctx
, &program
->shader
.bo
->b
.b
,
497 0, program
->shader
.bo
->b
.b
.width0
);
500 shader_va
= shader
->bo
->gpu_address
+ offset
;
501 if (program
->use_code_object_v2
) {
502 /* Shader code is placed after the amd_kernel_code_t
504 shader_va
+= sizeof(amd_kernel_code_t
);
507 radeon_add_to_buffer_list(sctx
, sctx
->gfx_cs
, shader
->bo
,
508 RADEON_USAGE_READ
, RADEON_PRIO_SHADER_BINARY
);
510 radeon_set_sh_reg_seq(cs
, R_00B830_COMPUTE_PGM_LO
, 2);
511 radeon_emit(cs
, shader_va
>> 8);
512 radeon_emit(cs
, S_00B834_DATA(shader_va
>> 40));
514 radeon_set_sh_reg_seq(cs
, R_00B848_COMPUTE_PGM_RSRC1
, 2);
515 radeon_emit(cs
, config
->rsrc1
);
516 radeon_emit(cs
, config
->rsrc2
);
518 COMPUTE_DBG(sctx
->screen
, "COMPUTE_PGM_RSRC1: 0x%08x "
519 "COMPUTE_PGM_RSRC2: 0x%08x\n", config
->rsrc1
, config
->rsrc2
);
521 radeon_set_sh_reg(cs
, R_00B860_COMPUTE_TMPRING_SIZE
,
522 S_00B860_WAVES(sctx
->scratch_waves
)
523 | S_00B860_WAVESIZE(config
->scratch_bytes_per_wave
>> 10));
525 sctx
->cs_shader_state
.emitted_program
= program
;
526 sctx
->cs_shader_state
.offset
= offset
;
527 sctx
->cs_shader_state
.uses_scratch
=
528 config
->scratch_bytes_per_wave
!= 0;
533 static void setup_scratch_rsrc_user_sgprs(struct si_context
*sctx
,
534 const amd_kernel_code_t
*code_object
,
537 struct radeon_cmdbuf
*cs
= sctx
->gfx_cs
;
538 uint64_t scratch_va
= sctx
->compute_scratch_buffer
->gpu_address
;
540 unsigned max_private_element_size
= AMD_HSA_BITS_GET(
541 code_object
->code_properties
,
542 AMD_CODE_PROPERTY_PRIVATE_ELEMENT_SIZE
);
544 uint32_t scratch_dword0
= scratch_va
& 0xffffffff;
545 uint32_t scratch_dword1
=
546 S_008F04_BASE_ADDRESS_HI(scratch_va
>> 32) |
547 S_008F04_SWIZZLE_ENABLE(1);
549 /* Disable address clamping */
550 uint32_t scratch_dword2
= 0xffffffff;
551 uint32_t scratch_dword3
=
552 S_008F0C_INDEX_STRIDE(3) |
553 S_008F0C_ADD_TID_ENABLE(1);
555 if (sctx
->chip_class
>= GFX9
) {
556 assert(max_private_element_size
== 1); /* always 4 bytes on GFX9 */
558 scratch_dword3
|= S_008F0C_ELEMENT_SIZE(max_private_element_size
);
560 if (sctx
->chip_class
< GFX8
) {
561 /* BUF_DATA_FORMAT is ignored, but it cannot be
562 * BUF_DATA_FORMAT_INVALID. */
564 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_8
);
568 radeon_set_sh_reg_seq(cs
, R_00B900_COMPUTE_USER_DATA_0
+
570 radeon_emit(cs
, scratch_dword0
);
571 radeon_emit(cs
, scratch_dword1
);
572 radeon_emit(cs
, scratch_dword2
);
573 radeon_emit(cs
, scratch_dword3
);
576 static void si_setup_user_sgprs_co_v2(struct si_context
*sctx
,
577 const amd_kernel_code_t
*code_object
,
578 const struct pipe_grid_info
*info
,
579 uint64_t kernel_args_va
)
581 struct si_compute
*program
= sctx
->cs_shader_state
.program
;
582 struct radeon_cmdbuf
*cs
= sctx
->gfx_cs
;
584 static const enum amd_code_property_mask_t workgroup_count_masks
[] = {
585 AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_X
,
586 AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_Y
,
587 AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_Z
590 unsigned i
, user_sgpr
= 0;
591 if (AMD_HSA_BITS_GET(code_object
->code_properties
,
592 AMD_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_BUFFER
)) {
593 if (code_object
->workitem_private_segment_byte_size
> 0) {
594 setup_scratch_rsrc_user_sgprs(sctx
, code_object
,
600 if (AMD_HSA_BITS_GET(code_object
->code_properties
,
601 AMD_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_PTR
)) {
602 struct dispatch_packet dispatch
;
603 unsigned dispatch_offset
;
604 struct si_resource
*dispatch_buf
= NULL
;
605 uint64_t dispatch_va
;
607 /* Upload dispatch ptr */
608 memset(&dispatch
, 0, sizeof(dispatch
));
610 dispatch
.workgroup_size_x
= util_cpu_to_le16(info
->block
[0]);
611 dispatch
.workgroup_size_y
= util_cpu_to_le16(info
->block
[1]);
612 dispatch
.workgroup_size_z
= util_cpu_to_le16(info
->block
[2]);
614 dispatch
.grid_size_x
= util_cpu_to_le32(info
->grid
[0] * info
->block
[0]);
615 dispatch
.grid_size_y
= util_cpu_to_le32(info
->grid
[1] * info
->block
[1]);
616 dispatch
.grid_size_z
= util_cpu_to_le32(info
->grid
[2] * info
->block
[2]);
618 dispatch
.private_segment_size
= util_cpu_to_le32(program
->private_size
);
619 dispatch
.group_segment_size
= util_cpu_to_le32(program
->local_size
);
621 dispatch
.kernarg_address
= util_cpu_to_le64(kernel_args_va
);
623 u_upload_data(sctx
->b
.const_uploader
, 0, sizeof(dispatch
),
624 256, &dispatch
, &dispatch_offset
,
625 (struct pipe_resource
**)&dispatch_buf
);
628 fprintf(stderr
, "Error: Failed to allocate dispatch "
631 radeon_add_to_buffer_list(sctx
, sctx
->gfx_cs
, dispatch_buf
,
632 RADEON_USAGE_READ
, RADEON_PRIO_CONST_BUFFER
);
634 dispatch_va
= dispatch_buf
->gpu_address
+ dispatch_offset
;
636 radeon_set_sh_reg_seq(cs
, R_00B900_COMPUTE_USER_DATA_0
+
638 radeon_emit(cs
, dispatch_va
);
639 radeon_emit(cs
, S_008F04_BASE_ADDRESS_HI(dispatch_va
>> 32) |
642 si_resource_reference(&dispatch_buf
, NULL
);
646 if (AMD_HSA_BITS_GET(code_object
->code_properties
,
647 AMD_CODE_PROPERTY_ENABLE_SGPR_KERNARG_SEGMENT_PTR
)) {
648 radeon_set_sh_reg_seq(cs
, R_00B900_COMPUTE_USER_DATA_0
+
650 radeon_emit(cs
, kernel_args_va
);
651 radeon_emit(cs
, S_008F04_BASE_ADDRESS_HI (kernel_args_va
>> 32) |
656 for (i
= 0; i
< 3 && user_sgpr
< 16; i
++) {
657 if (code_object
->code_properties
& workgroup_count_masks
[i
]) {
658 radeon_set_sh_reg_seq(cs
,
659 R_00B900_COMPUTE_USER_DATA_0
+
661 radeon_emit(cs
, info
->grid
[i
]);
667 static bool si_upload_compute_input(struct si_context
*sctx
,
668 const amd_kernel_code_t
*code_object
,
669 const struct pipe_grid_info
*info
)
671 struct radeon_cmdbuf
*cs
= sctx
->gfx_cs
;
672 struct si_compute
*program
= sctx
->cs_shader_state
.program
;
673 struct si_resource
*input_buffer
= NULL
;
674 unsigned kernel_args_size
;
675 unsigned num_work_size_bytes
= program
->use_code_object_v2
? 0 : 36;
676 uint32_t kernel_args_offset
= 0;
677 uint32_t *kernel_args
;
678 void *kernel_args_ptr
;
679 uint64_t kernel_args_va
;
682 /* The extra num_work_size_bytes are for work group / work item size information */
683 kernel_args_size
= program
->input_size
+ num_work_size_bytes
;
685 u_upload_alloc(sctx
->b
.const_uploader
, 0, kernel_args_size
,
686 sctx
->screen
->info
.tcc_cache_line_size
,
688 (struct pipe_resource
**)&input_buffer
, &kernel_args_ptr
);
690 if (unlikely(!kernel_args_ptr
))
693 kernel_args
= (uint32_t*)kernel_args_ptr
;
694 kernel_args_va
= input_buffer
->gpu_address
+ kernel_args_offset
;
697 for (i
= 0; i
< 3; i
++) {
698 kernel_args
[i
] = util_cpu_to_le32(info
->grid
[i
]);
699 kernel_args
[i
+ 3] = util_cpu_to_le32(info
->grid
[i
] * info
->block
[i
]);
700 kernel_args
[i
+ 6] = util_cpu_to_le32(info
->block
[i
]);
704 memcpy(kernel_args
+ (num_work_size_bytes
/ 4), info
->input
,
705 program
->input_size
);
708 for (i
= 0; i
< (kernel_args_size
/ 4); i
++) {
709 COMPUTE_DBG(sctx
->screen
, "input %u : %u\n", i
,
714 radeon_add_to_buffer_list(sctx
, sctx
->gfx_cs
, input_buffer
,
715 RADEON_USAGE_READ
, RADEON_PRIO_CONST_BUFFER
);
718 si_setup_user_sgprs_co_v2(sctx
, code_object
, info
, kernel_args_va
);
720 radeon_set_sh_reg_seq(cs
, R_00B900_COMPUTE_USER_DATA_0
, 2);
721 radeon_emit(cs
, kernel_args_va
);
722 radeon_emit(cs
, S_008F04_BASE_ADDRESS_HI (kernel_args_va
>> 32) |
726 si_resource_reference(&input_buffer
, NULL
);
731 static void si_setup_tgsi_user_data(struct si_context
*sctx
,
732 const struct pipe_grid_info
*info
)
734 struct si_compute
*program
= sctx
->cs_shader_state
.program
;
735 struct radeon_cmdbuf
*cs
= sctx
->gfx_cs
;
736 unsigned grid_size_reg
= R_00B900_COMPUTE_USER_DATA_0
+
737 4 * SI_NUM_RESOURCE_SGPRS
;
738 unsigned block_size_reg
= grid_size_reg
+
739 /* 12 bytes = 3 dwords. */
740 12 * program
->uses_grid_size
;
741 unsigned cs_user_data_reg
= block_size_reg
+
742 12 * program
->reads_variable_block_size
;
744 if (info
->indirect
) {
745 if (program
->uses_grid_size
) {
746 for (unsigned i
= 0; i
< 3; ++i
) {
747 si_cp_copy_data(sctx
, sctx
->gfx_cs
,
748 COPY_DATA_REG
, NULL
, (grid_size_reg
>> 2) + i
,
749 COPY_DATA_SRC_MEM
, si_resource(info
->indirect
),
750 info
->indirect_offset
+ 4 * i
);
754 if (program
->uses_grid_size
) {
755 radeon_set_sh_reg_seq(cs
, grid_size_reg
, 3);
756 radeon_emit(cs
, info
->grid
[0]);
757 radeon_emit(cs
, info
->grid
[1]);
758 radeon_emit(cs
, info
->grid
[2]);
760 if (program
->reads_variable_block_size
) {
761 radeon_set_sh_reg_seq(cs
, block_size_reg
, 3);
762 radeon_emit(cs
, info
->block
[0]);
763 radeon_emit(cs
, info
->block
[1]);
764 radeon_emit(cs
, info
->block
[2]);
768 if (program
->num_cs_user_data_dwords
) {
769 radeon_set_sh_reg_seq(cs
, cs_user_data_reg
, program
->num_cs_user_data_dwords
);
770 radeon_emit_array(cs
, sctx
->cs_user_data
, program
->num_cs_user_data_dwords
);
774 unsigned si_get_compute_resource_limits(struct si_screen
*sscreen
,
775 unsigned waves_per_threadgroup
,
776 unsigned max_waves_per_sh
,
777 unsigned threadgroups_per_cu
)
779 unsigned compute_resource_limits
=
780 S_00B854_SIMD_DEST_CNTL(waves_per_threadgroup
% 4 == 0);
782 if (sscreen
->info
.chip_class
>= GFX7
) {
783 unsigned num_cu_per_se
= sscreen
->info
.num_good_compute_units
/
784 sscreen
->info
.max_se
;
786 /* Force even distribution on all SIMDs in CU if the workgroup
787 * size is 64. This has shown some good improvements if # of CUs
788 * per SE is not a multiple of 4.
790 if (num_cu_per_se
% 4 && waves_per_threadgroup
== 1)
791 compute_resource_limits
|= S_00B854_FORCE_SIMD_DIST(1);
793 assert(threadgroups_per_cu
>= 1 && threadgroups_per_cu
<= 8);
794 compute_resource_limits
|= S_00B854_WAVES_PER_SH(max_waves_per_sh
) |
795 S_00B854_CU_GROUP_COUNT(threadgroups_per_cu
- 1);
798 if (max_waves_per_sh
) {
799 unsigned limit_div16
= DIV_ROUND_UP(max_waves_per_sh
, 16);
800 compute_resource_limits
|= S_00B854_WAVES_PER_SH_SI(limit_div16
);
803 return compute_resource_limits
;
806 static void si_emit_dispatch_packets(struct si_context
*sctx
,
807 const struct pipe_grid_info
*info
)
809 struct si_screen
*sscreen
= sctx
->screen
;
810 struct radeon_cmdbuf
*cs
= sctx
->gfx_cs
;
811 bool render_cond_bit
= sctx
->render_cond
&& !sctx
->render_cond_force_off
;
812 unsigned waves_per_threadgroup
=
813 DIV_ROUND_UP(info
->block
[0] * info
->block
[1] * info
->block
[2], 64);
815 radeon_set_sh_reg(cs
, R_00B854_COMPUTE_RESOURCE_LIMITS
,
816 si_get_compute_resource_limits(sscreen
, waves_per_threadgroup
,
817 sctx
->cs_max_waves_per_sh
, 1));
819 unsigned dispatch_initiator
=
820 S_00B800_COMPUTE_SHADER_EN(1) |
821 S_00B800_FORCE_START_AT_000(1) |
822 /* If the KMD allows it (there is a KMD hw register for it),
823 * allow launching waves out-of-order. (same as Vulkan) */
824 S_00B800_ORDER_MODE(sctx
->chip_class
>= GFX7
);
826 const uint
*last_block
= info
->last_block
;
827 bool partial_block_en
= last_block
[0] || last_block
[1] || last_block
[2];
829 radeon_set_sh_reg_seq(cs
, R_00B81C_COMPUTE_NUM_THREAD_X
, 3);
831 if (partial_block_en
) {
834 /* If no partial_block, these should be an entire block size, not 0. */
835 partial
[0] = last_block
[0] ? last_block
[0] : info
->block
[0];
836 partial
[1] = last_block
[1] ? last_block
[1] : info
->block
[1];
837 partial
[2] = last_block
[2] ? last_block
[2] : info
->block
[2];
839 radeon_emit(cs
, S_00B81C_NUM_THREAD_FULL(info
->block
[0]) |
840 S_00B81C_NUM_THREAD_PARTIAL(partial
[0]));
841 radeon_emit(cs
, S_00B820_NUM_THREAD_FULL(info
->block
[1]) |
842 S_00B820_NUM_THREAD_PARTIAL(partial
[1]));
843 radeon_emit(cs
, S_00B824_NUM_THREAD_FULL(info
->block
[2]) |
844 S_00B824_NUM_THREAD_PARTIAL(partial
[2]));
846 dispatch_initiator
|= S_00B800_PARTIAL_TG_EN(1);
848 radeon_emit(cs
, S_00B81C_NUM_THREAD_FULL(info
->block
[0]));
849 radeon_emit(cs
, S_00B820_NUM_THREAD_FULL(info
->block
[1]));
850 radeon_emit(cs
, S_00B824_NUM_THREAD_FULL(info
->block
[2]));
853 if (info
->indirect
) {
854 uint64_t base_va
= si_resource(info
->indirect
)->gpu_address
;
856 radeon_add_to_buffer_list(sctx
, sctx
->gfx_cs
,
857 si_resource(info
->indirect
),
858 RADEON_USAGE_READ
, RADEON_PRIO_DRAW_INDIRECT
);
860 radeon_emit(cs
, PKT3(PKT3_SET_BASE
, 2, 0) |
861 PKT3_SHADER_TYPE_S(1));
863 radeon_emit(cs
, base_va
);
864 radeon_emit(cs
, base_va
>> 32);
866 radeon_emit(cs
, PKT3(PKT3_DISPATCH_INDIRECT
, 1, render_cond_bit
) |
867 PKT3_SHADER_TYPE_S(1));
868 radeon_emit(cs
, info
->indirect_offset
);
869 radeon_emit(cs
, dispatch_initiator
);
871 radeon_emit(cs
, PKT3(PKT3_DISPATCH_DIRECT
, 3, render_cond_bit
) |
872 PKT3_SHADER_TYPE_S(1));
873 radeon_emit(cs
, info
->grid
[0]);
874 radeon_emit(cs
, info
->grid
[1]);
875 radeon_emit(cs
, info
->grid
[2]);
876 radeon_emit(cs
, dispatch_initiator
);
881 static void si_launch_grid(
882 struct pipe_context
*ctx
, const struct pipe_grid_info
*info
)
884 struct si_context
*sctx
= (struct si_context
*)ctx
;
885 struct si_compute
*program
= sctx
->cs_shader_state
.program
;
886 const amd_kernel_code_t
*code_object
=
887 si_compute_get_code_object(program
, info
->pc
);
889 /* HW bug workaround when CS threadgroups > 256 threads and async
890 * compute isn't used, i.e. only one compute job can run at a time.
891 * If async compute is possible, the threadgroup size must be limited
892 * to 256 threads on all queues to avoid the bug.
893 * Only GFX6 and certain GFX7 chips are affected.
895 bool cs_regalloc_hang
=
896 (sctx
->chip_class
== GFX6
||
897 sctx
->family
== CHIP_BONAIRE
||
898 sctx
->family
== CHIP_KABINI
) &&
899 info
->block
[0] * info
->block
[1] * info
->block
[2] > 256;
901 if (cs_regalloc_hang
)
902 sctx
->flags
|= SI_CONTEXT_PS_PARTIAL_FLUSH
|
903 SI_CONTEXT_CS_PARTIAL_FLUSH
;
905 if (program
->ir_type
!= PIPE_SHADER_IR_NATIVE
&&
906 program
->shader
.compilation_failed
)
909 if (sctx
->has_graphics
) {
910 if (sctx
->last_num_draw_calls
!= sctx
->num_draw_calls
) {
911 si_update_fb_dirtiness_after_rendering(sctx
);
912 sctx
->last_num_draw_calls
= sctx
->num_draw_calls
;
915 si_decompress_textures(sctx
, 1 << PIPE_SHADER_COMPUTE
);
918 /* Add buffer sizes for memory checking in need_cs_space. */
919 si_context_add_resource_size(sctx
, &program
->shader
.bo
->b
.b
);
920 /* TODO: add the scratch buffer */
922 if (info
->indirect
) {
923 si_context_add_resource_size(sctx
, info
->indirect
);
925 /* Indirect buffers use TC L2 on GFX9, but not older hw. */
926 if (sctx
->chip_class
<= GFX8
&&
927 si_resource(info
->indirect
)->TC_L2_dirty
) {
928 sctx
->flags
|= SI_CONTEXT_WB_L2
;
929 si_resource(info
->indirect
)->TC_L2_dirty
= false;
933 si_need_gfx_cs_space(sctx
);
935 if (sctx
->bo_list_add_all_compute_resources
)
936 si_compute_resources_add_all_to_bo_list(sctx
);
938 if (!sctx
->cs_shader_state
.initialized
) {
939 si_emit_initial_compute_regs(sctx
, sctx
->gfx_cs
);
941 sctx
->cs_shader_state
.emitted_program
= NULL
;
942 sctx
->cs_shader_state
.initialized
= true;
946 sctx
->emit_cache_flush(sctx
);
948 if (!si_switch_compute_shader(sctx
, program
, &program
->shader
,
949 code_object
, info
->pc
))
952 si_upload_compute_shader_descriptors(sctx
);
953 si_emit_compute_shader_pointers(sctx
);
955 if (sctx
->has_graphics
&&
956 si_is_atom_dirty(sctx
, &sctx
->atoms
.s
.render_cond
)) {
957 sctx
->atoms
.s
.render_cond
.emit(sctx
);
958 si_set_atom_dirty(sctx
, &sctx
->atoms
.s
.render_cond
, false);
961 if ((program
->input_size
||
962 program
->ir_type
== PIPE_SHADER_IR_NATIVE
) &&
963 unlikely(!si_upload_compute_input(sctx
, code_object
, info
))) {
968 for (i
= 0; i
< MAX_GLOBAL_BUFFERS
; i
++) {
969 struct si_resource
*buffer
=
970 si_resource(program
->global_buffers
[i
]);
974 radeon_add_to_buffer_list(sctx
, sctx
->gfx_cs
, buffer
,
975 RADEON_USAGE_READWRITE
,
976 RADEON_PRIO_COMPUTE_GLOBAL
);
979 if (program
->ir_type
!= PIPE_SHADER_IR_NATIVE
)
980 si_setup_tgsi_user_data(sctx
, info
);
982 si_emit_dispatch_packets(sctx
, info
);
984 if (unlikely(sctx
->current_saved_cs
)) {
986 si_log_compute_state(sctx
, sctx
->log
);
989 sctx
->compute_is_busy
= true;
990 sctx
->num_compute_calls
++;
991 if (sctx
->cs_shader_state
.uses_scratch
)
992 sctx
->num_spill_compute_calls
++;
994 if (cs_regalloc_hang
)
995 sctx
->flags
|= SI_CONTEXT_CS_PARTIAL_FLUSH
;
998 void si_destroy_compute(struct si_compute
*program
)
1000 if (program
->ir_type
!= PIPE_SHADER_IR_NATIVE
) {
1001 util_queue_drop_job(&program
->screen
->shader_compiler_queue
,
1003 util_queue_fence_destroy(&program
->ready
);
1006 si_shader_destroy(&program
->shader
);
1010 static void si_delete_compute_state(struct pipe_context
*ctx
, void* state
){
1011 struct si_compute
*program
= (struct si_compute
*)state
;
1012 struct si_context
*sctx
= (struct si_context
*)ctx
;
1017 if (program
== sctx
->cs_shader_state
.program
)
1018 sctx
->cs_shader_state
.program
= NULL
;
1020 if (program
== sctx
->cs_shader_state
.emitted_program
)
1021 sctx
->cs_shader_state
.emitted_program
= NULL
;
1023 si_compute_reference(&program
, NULL
);
1026 static void si_set_compute_resources(struct pipe_context
* ctx_
,
1027 unsigned start
, unsigned count
,
1028 struct pipe_surface
** surfaces
) { }
1030 void si_init_compute_functions(struct si_context
*sctx
)
1032 sctx
->b
.create_compute_state
= si_create_compute_state
;
1033 sctx
->b
.delete_compute_state
= si_delete_compute_state
;
1034 sctx
->b
.bind_compute_state
= si_bind_compute_state
;
1035 sctx
->b
.set_compute_resources
= si_set_compute_resources
;
1036 sctx
->b
.set_global_binding
= si_set_global_binding
;
1037 sctx
->b
.launch_grid
= si_launch_grid
;