2 * Copyright 2013 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 #include "tgsi/tgsi_parse.h"
26 #include "util/u_async_debug.h"
27 #include "util/u_memory.h"
28 #include "util/u_upload_mgr.h"
30 #include "amd_kernel_code_t.h"
31 #include "radeon/r600_cs.h"
33 #include "si_compute.h"
36 #define COMPUTE_DBG(rscreen, fmt, args...) \
38 if ((rscreen->debug_flags & DBG(COMPUTE))) fprintf(stderr, fmt, ##args); \
41 struct dispatch_packet
{
44 uint16_t workgroup_size_x
;
45 uint16_t workgroup_size_y
;
46 uint16_t workgroup_size_z
;
51 uint32_t private_segment_size
;
52 uint32_t group_segment_size
;
53 uint64_t kernel_object
;
54 uint64_t kernarg_address
;
58 static const amd_kernel_code_t
*si_compute_get_code_object(
59 const struct si_compute
*program
,
60 uint64_t symbol_offset
)
62 if (!program
->use_code_object_v2
) {
65 return (const amd_kernel_code_t
*)
66 (program
->shader
.binary
.code
+ symbol_offset
);
69 static void code_object_to_config(const amd_kernel_code_t
*code_object
,
70 struct si_shader_config
*out_config
) {
72 uint32_t rsrc1
= code_object
->compute_pgm_resource_registers
;
73 uint32_t rsrc2
= code_object
->compute_pgm_resource_registers
>> 32;
74 out_config
->num_sgprs
= code_object
->wavefront_sgpr_count
;
75 out_config
->num_vgprs
= code_object
->workitem_vgpr_count
;
76 out_config
->float_mode
= G_00B028_FLOAT_MODE(rsrc1
);
77 out_config
->rsrc1
= rsrc1
;
78 out_config
->lds_size
= MAX2(out_config
->lds_size
, G_00B84C_LDS_SIZE(rsrc2
));
79 out_config
->rsrc2
= rsrc2
;
80 out_config
->scratch_bytes_per_wave
=
81 align(code_object
->workitem_private_segment_byte_size
* 64, 1024);
84 /* Asynchronous compute shader compilation. */
85 static void si_create_compute_state_async(void *job
, int thread_index
)
87 struct si_compute
*program
= (struct si_compute
*)job
;
88 struct si_shader
*shader
= &program
->shader
;
89 struct si_shader_selector sel
;
90 LLVMTargetMachineRef tm
;
91 struct pipe_debug_callback
*debug
= &program
->compiler_ctx_state
.debug
;
93 assert(!debug
->debug_message
|| debug
->async
);
94 assert(thread_index
>= 0);
95 assert(thread_index
< ARRAY_SIZE(program
->screen
->tm
));
96 tm
= program
->screen
->tm
[thread_index
];
98 memset(&sel
, 0, sizeof(sel
));
100 sel
.screen
= program
->screen
;
101 tgsi_scan_shader(program
->tokens
, &sel
.info
);
102 sel
.tokens
= program
->tokens
;
103 sel
.type
= PIPE_SHADER_COMPUTE
;
104 sel
.local_size
= program
->local_size
;
105 si_get_active_slot_masks(&sel
.info
,
106 &program
->active_const_and_shader_buffers
,
107 &program
->active_samplers_and_images
);
109 program
->shader
.selector
= &sel
;
110 program
->shader
.is_monolithic
= true;
111 program
->uses_grid_size
= sel
.info
.uses_grid_size
;
112 program
->uses_block_size
= sel
.info
.uses_block_size
;
113 program
->uses_bindless_samplers
= sel
.info
.uses_bindless_samplers
;
114 program
->uses_bindless_images
= sel
.info
.uses_bindless_images
;
116 if (si_shader_create(program
->screen
, tm
, &program
->shader
, debug
)) {
117 program
->shader
.compilation_failed
= true;
119 bool scratch_enabled
= shader
->config
.scratch_bytes_per_wave
> 0;
120 unsigned user_sgprs
= SI_NUM_RESOURCE_SGPRS
+
121 (sel
.info
.uses_grid_size
? 3 : 0) +
122 (sel
.info
.uses_block_size
? 3 : 0);
124 shader
->config
.rsrc1
=
125 S_00B848_VGPRS((shader
->config
.num_vgprs
- 1) / 4) |
126 S_00B848_SGPRS((shader
->config
.num_sgprs
- 1) / 8) |
127 S_00B848_DX10_CLAMP(1) |
128 S_00B848_FLOAT_MODE(shader
->config
.float_mode
);
130 shader
->config
.rsrc2
=
131 S_00B84C_USER_SGPR(user_sgprs
) |
132 S_00B84C_SCRATCH_EN(scratch_enabled
) |
133 S_00B84C_TGID_X_EN(sel
.info
.uses_block_id
[0]) |
134 S_00B84C_TGID_Y_EN(sel
.info
.uses_block_id
[1]) |
135 S_00B84C_TGID_Z_EN(sel
.info
.uses_block_id
[2]) |
136 S_00B84C_TIDIG_COMP_CNT(sel
.info
.uses_thread_id
[2] ? 2 :
137 sel
.info
.uses_thread_id
[1] ? 1 : 0) |
138 S_00B84C_LDS_SIZE(shader
->config
.lds_size
);
140 program
->variable_group_size
=
141 sel
.info
.properties
[TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH
] == 0;
144 FREE(program
->tokens
);
145 program
->shader
.selector
= NULL
;
148 static void *si_create_compute_state(
149 struct pipe_context
*ctx
,
150 const struct pipe_compute_state
*cso
)
152 struct si_context
*sctx
= (struct si_context
*)ctx
;
153 struct si_screen
*sscreen
= (struct si_screen
*)ctx
->screen
;
154 struct si_compute
*program
= CALLOC_STRUCT(si_compute
);
156 pipe_reference_init(&program
->reference
, 1);
157 program
->screen
= (struct si_screen
*)ctx
->screen
;
158 program
->ir_type
= cso
->ir_type
;
159 program
->local_size
= cso
->req_local_mem
;
160 program
->private_size
= cso
->req_private_mem
;
161 program
->input_size
= cso
->req_input_mem
;
162 program
->use_code_object_v2
= HAVE_LLVM
>= 0x0400 &&
163 cso
->ir_type
== PIPE_SHADER_IR_NATIVE
;
165 if (cso
->ir_type
== PIPE_SHADER_IR_TGSI
) {
166 program
->tokens
= tgsi_dup_tokens(cso
->prog
);
167 if (!program
->tokens
) {
172 program
->compiler_ctx_state
.debug
= sctx
->debug
;
173 program
->compiler_ctx_state
.is_debug_context
= sctx
->is_debug
;
174 p_atomic_inc(&sscreen
->num_shaders_created
);
175 util_queue_fence_init(&program
->ready
);
177 struct util_async_debug_callback async_debug
;
179 (sctx
->debug
.debug_message
&& !sctx
->debug
.async
) ||
181 si_can_dump_shader(sscreen
, PIPE_SHADER_COMPUTE
);
184 u_async_debug_init(&async_debug
);
185 program
->compiler_ctx_state
.debug
= async_debug
.base
;
188 util_queue_add_job(&sscreen
->shader_compiler_queue
,
189 program
, &program
->ready
,
190 si_create_compute_state_async
, NULL
);
193 util_queue_fence_wait(&program
->ready
);
194 u_async_debug_drain(&async_debug
, &sctx
->debug
);
195 u_async_debug_cleanup(&async_debug
);
198 const struct pipe_llvm_program_header
*header
;
201 code
= cso
->prog
+ sizeof(struct pipe_llvm_program_header
);
203 ac_elf_read(code
, header
->num_bytes
, &program
->shader
.binary
);
204 if (program
->use_code_object_v2
) {
205 const amd_kernel_code_t
*code_object
=
206 si_compute_get_code_object(program
, 0);
207 code_object_to_config(code_object
, &program
->shader
.config
);
209 si_shader_binary_read_config(&program
->shader
.binary
,
210 &program
->shader
.config
, 0);
212 si_shader_dump(sctx
->screen
, &program
->shader
, &sctx
->debug
,
213 PIPE_SHADER_COMPUTE
, stderr
, true);
214 if (si_shader_binary_upload(sctx
->screen
, &program
->shader
) < 0) {
215 fprintf(stderr
, "LLVM failed to upload shader\n");
224 static void si_bind_compute_state(struct pipe_context
*ctx
, void *state
)
226 struct si_context
*sctx
= (struct si_context
*)ctx
;
227 struct si_compute
*program
= (struct si_compute
*)state
;
229 sctx
->cs_shader_state
.program
= program
;
233 /* Wait because we need active slot usage masks. */
234 if (program
->ir_type
== PIPE_SHADER_IR_TGSI
)
235 util_queue_fence_wait(&program
->ready
);
237 si_set_active_descriptors(sctx
,
238 SI_DESCS_FIRST_COMPUTE
+
239 SI_SHADER_DESCS_CONST_AND_SHADER_BUFFERS
,
240 program
->active_const_and_shader_buffers
);
241 si_set_active_descriptors(sctx
,
242 SI_DESCS_FIRST_COMPUTE
+
243 SI_SHADER_DESCS_SAMPLERS_AND_IMAGES
,
244 program
->active_samplers_and_images
);
247 static void si_set_global_binding(
248 struct pipe_context
*ctx
, unsigned first
, unsigned n
,
249 struct pipe_resource
**resources
,
253 struct si_context
*sctx
= (struct si_context
*)ctx
;
254 struct si_compute
*program
= sctx
->cs_shader_state
.program
;
256 assert(first
+ n
<= MAX_GLOBAL_BUFFERS
);
259 for (i
= 0; i
< n
; i
++) {
260 pipe_resource_reference(&program
->global_buffers
[first
+ i
], NULL
);
265 for (i
= 0; i
< n
; i
++) {
268 pipe_resource_reference(&program
->global_buffers
[first
+ i
], resources
[i
]);
269 va
= r600_resource(resources
[i
])->gpu_address
;
270 offset
= util_le32_to_cpu(*handles
[i
]);
272 va
= util_cpu_to_le64(va
);
273 memcpy(handles
[i
], &va
, sizeof(va
));
277 static void si_initialize_compute(struct si_context
*sctx
)
279 struct radeon_winsys_cs
*cs
= sctx
->b
.gfx
.cs
;
282 radeon_set_sh_reg_seq(cs
, R_00B858_COMPUTE_STATIC_THREAD_MGMT_SE0
, 2);
283 /* R_00B858_COMPUTE_STATIC_THREAD_MGMT_SE0 / SE1 */
284 radeon_emit(cs
, S_00B858_SH0_CU_EN(0xffff) | S_00B858_SH1_CU_EN(0xffff));
285 radeon_emit(cs
, S_00B85C_SH0_CU_EN(0xffff) | S_00B85C_SH1_CU_EN(0xffff));
287 if (sctx
->b
.chip_class
>= CIK
) {
288 /* Also set R_00B858_COMPUTE_STATIC_THREAD_MGMT_SE2 / SE3 */
289 radeon_set_sh_reg_seq(cs
,
290 R_00B864_COMPUTE_STATIC_THREAD_MGMT_SE2
, 2);
291 radeon_emit(cs
, S_00B864_SH0_CU_EN(0xffff) |
292 S_00B864_SH1_CU_EN(0xffff));
293 radeon_emit(cs
, S_00B868_SH0_CU_EN(0xffff) |
294 S_00B868_SH1_CU_EN(0xffff));
297 /* This register has been moved to R_00CD20_COMPUTE_MAX_WAVE_ID
298 * and is now per pipe, so it should be handled in the
299 * kernel if we want to use something other than the default value,
300 * which is now 0x22f.
302 if (sctx
->b
.chip_class
<= SI
) {
303 /* XXX: This should be:
304 * (number of compute units) * 4 * (waves per simd) - 1 */
306 radeon_set_sh_reg(cs
, R_00B82C_COMPUTE_MAX_WAVE_ID
,
307 0x190 /* Default value */);
310 /* Set the pointer to border colors. */
311 bc_va
= sctx
->border_color_buffer
->gpu_address
;
313 if (sctx
->b
.chip_class
>= CIK
) {
314 radeon_set_uconfig_reg_seq(cs
, R_030E00_TA_CS_BC_BASE_ADDR
, 2);
315 radeon_emit(cs
, bc_va
>> 8); /* R_030E00_TA_CS_BC_BASE_ADDR */
316 radeon_emit(cs
, bc_va
>> 40); /* R_030E04_TA_CS_BC_BASE_ADDR_HI */
318 if (sctx
->screen
->info
.drm_major
== 3 ||
319 (sctx
->screen
->info
.drm_major
== 2 &&
320 sctx
->screen
->info
.drm_minor
>= 48)) {
321 radeon_set_config_reg(cs
, R_00950C_TA_CS_BC_BASE_ADDR
,
326 sctx
->cs_shader_state
.emitted_program
= NULL
;
327 sctx
->cs_shader_state
.initialized
= true;
330 static bool si_setup_compute_scratch_buffer(struct si_context
*sctx
,
331 struct si_shader
*shader
,
332 struct si_shader_config
*config
)
334 uint64_t scratch_bo_size
, scratch_needed
;
336 scratch_needed
= config
->scratch_bytes_per_wave
* sctx
->scratch_waves
;
337 if (sctx
->compute_scratch_buffer
)
338 scratch_bo_size
= sctx
->compute_scratch_buffer
->b
.b
.width0
;
340 if (scratch_bo_size
< scratch_needed
) {
341 r600_resource_reference(&sctx
->compute_scratch_buffer
, NULL
);
343 sctx
->compute_scratch_buffer
= (struct r600_resource
*)
344 si_aligned_buffer_create(&sctx
->screen
->b
,
345 R600_RESOURCE_FLAG_UNMAPPABLE
,
347 scratch_needed
, 256);
349 if (!sctx
->compute_scratch_buffer
)
353 if (sctx
->compute_scratch_buffer
!= shader
->scratch_bo
&& scratch_needed
) {
354 uint64_t scratch_va
= sctx
->compute_scratch_buffer
->gpu_address
;
356 si_shader_apply_scratch_relocs(shader
, scratch_va
);
358 if (si_shader_binary_upload(sctx
->screen
, shader
))
361 r600_resource_reference(&shader
->scratch_bo
,
362 sctx
->compute_scratch_buffer
);
368 static bool si_switch_compute_shader(struct si_context
*sctx
,
369 struct si_compute
*program
,
370 struct si_shader
*shader
,
371 const amd_kernel_code_t
*code_object
,
374 struct radeon_winsys_cs
*cs
= sctx
->b
.gfx
.cs
;
375 struct si_shader_config inline_config
= {0};
376 struct si_shader_config
*config
;
379 if (sctx
->cs_shader_state
.emitted_program
== program
&&
380 sctx
->cs_shader_state
.offset
== offset
)
383 if (program
->ir_type
== PIPE_SHADER_IR_TGSI
) {
384 config
= &shader
->config
;
388 config
= &inline_config
;
390 code_object_to_config(code_object
, config
);
392 si_shader_binary_read_config(&shader
->binary
, config
, offset
);
395 lds_blocks
= config
->lds_size
;
396 /* XXX: We are over allocating LDS. For SI, the shader reports
397 * LDS in blocks of 256 bytes, so if there are 4 bytes lds
398 * allocated in the shader and 4 bytes allocated by the state
399 * tracker, then we will set LDS_SIZE to 512 bytes rather than 256.
401 if (sctx
->b
.chip_class
<= SI
) {
402 lds_blocks
+= align(program
->local_size
, 256) >> 8;
404 lds_blocks
+= align(program
->local_size
, 512) >> 9;
407 /* TODO: use si_multiwave_lds_size_workaround */
408 assert(lds_blocks
<= 0xFF);
410 config
->rsrc2
&= C_00B84C_LDS_SIZE
;
411 config
->rsrc2
|= S_00B84C_LDS_SIZE(lds_blocks
);
414 if (!si_setup_compute_scratch_buffer(sctx
, shader
, config
))
417 if (shader
->scratch_bo
) {
418 COMPUTE_DBG(sctx
->screen
, "Waves: %u; Scratch per wave: %u bytes; "
419 "Total Scratch: %u bytes\n", sctx
->scratch_waves
,
420 config
->scratch_bytes_per_wave
,
421 config
->scratch_bytes_per_wave
*
422 sctx
->scratch_waves
);
424 radeon_add_to_buffer_list(&sctx
->b
, &sctx
->b
.gfx
,
425 shader
->scratch_bo
, RADEON_USAGE_READWRITE
,
426 RADEON_PRIO_SCRATCH_BUFFER
);
429 /* Prefetch the compute shader to TC L2.
431 * We should also prefetch graphics shaders if a compute dispatch was
432 * the last command, and the compute shader if a draw call was the last
433 * command. However, that would add more complexity and we're likely
434 * to get a shader state change in that case anyway.
436 if (sctx
->b
.chip_class
>= CIK
) {
437 cik_prefetch_TC_L2_async(sctx
, &program
->shader
.bo
->b
.b
,
438 0, program
->shader
.bo
->b
.b
.width0
);
441 shader_va
= shader
->bo
->gpu_address
+ offset
;
442 if (program
->use_code_object_v2
) {
443 /* Shader code is placed after the amd_kernel_code_t
445 shader_va
+= sizeof(amd_kernel_code_t
);
448 radeon_add_to_buffer_list(&sctx
->b
, &sctx
->b
.gfx
, shader
->bo
,
449 RADEON_USAGE_READ
, RADEON_PRIO_SHADER_BINARY
);
451 radeon_set_sh_reg_seq(cs
, R_00B830_COMPUTE_PGM_LO
, 2);
452 radeon_emit(cs
, shader_va
>> 8);
453 radeon_emit(cs
, shader_va
>> 40);
455 radeon_set_sh_reg_seq(cs
, R_00B848_COMPUTE_PGM_RSRC1
, 2);
456 radeon_emit(cs
, config
->rsrc1
);
457 radeon_emit(cs
, config
->rsrc2
);
459 COMPUTE_DBG(sctx
->screen
, "COMPUTE_PGM_RSRC1: 0x%08x "
460 "COMPUTE_PGM_RSRC2: 0x%08x\n", config
->rsrc1
, config
->rsrc2
);
462 radeon_set_sh_reg(cs
, R_00B860_COMPUTE_TMPRING_SIZE
,
463 S_00B860_WAVES(sctx
->scratch_waves
)
464 | S_00B860_WAVESIZE(config
->scratch_bytes_per_wave
>> 10));
466 sctx
->cs_shader_state
.emitted_program
= program
;
467 sctx
->cs_shader_state
.offset
= offset
;
468 sctx
->cs_shader_state
.uses_scratch
=
469 config
->scratch_bytes_per_wave
!= 0;
474 static void setup_scratch_rsrc_user_sgprs(struct si_context
*sctx
,
475 const amd_kernel_code_t
*code_object
,
478 struct radeon_winsys_cs
*cs
= sctx
->b
.gfx
.cs
;
479 uint64_t scratch_va
= sctx
->compute_scratch_buffer
->gpu_address
;
481 unsigned max_private_element_size
= AMD_HSA_BITS_GET(
482 code_object
->code_properties
,
483 AMD_CODE_PROPERTY_PRIVATE_ELEMENT_SIZE
);
485 uint32_t scratch_dword0
= scratch_va
& 0xffffffff;
486 uint32_t scratch_dword1
=
487 S_008F04_BASE_ADDRESS_HI(scratch_va
>> 32) |
488 S_008F04_SWIZZLE_ENABLE(1);
490 /* Disable address clamping */
491 uint32_t scratch_dword2
= 0xffffffff;
492 uint32_t scratch_dword3
=
493 S_008F0C_INDEX_STRIDE(3) |
494 S_008F0C_ADD_TID_ENABLE(1);
496 if (sctx
->b
.chip_class
>= GFX9
) {
497 assert(max_private_element_size
== 1); /* always 4 bytes on GFX9 */
499 scratch_dword3
|= S_008F0C_ELEMENT_SIZE(max_private_element_size
);
501 if (sctx
->b
.chip_class
< VI
) {
502 /* BUF_DATA_FORMAT is ignored, but it cannot be
503 * BUF_DATA_FORMAT_INVALID. */
505 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_8
);
509 radeon_set_sh_reg_seq(cs
, R_00B900_COMPUTE_USER_DATA_0
+
511 radeon_emit(cs
, scratch_dword0
);
512 radeon_emit(cs
, scratch_dword1
);
513 radeon_emit(cs
, scratch_dword2
);
514 radeon_emit(cs
, scratch_dword3
);
517 static void si_setup_user_sgprs_co_v2(struct si_context
*sctx
,
518 const amd_kernel_code_t
*code_object
,
519 const struct pipe_grid_info
*info
,
520 uint64_t kernel_args_va
)
522 struct si_compute
*program
= sctx
->cs_shader_state
.program
;
523 struct radeon_winsys_cs
*cs
= sctx
->b
.gfx
.cs
;
525 static const enum amd_code_property_mask_t workgroup_count_masks
[] = {
526 AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_X
,
527 AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_Y
,
528 AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_Z
531 unsigned i
, user_sgpr
= 0;
532 if (AMD_HSA_BITS_GET(code_object
->code_properties
,
533 AMD_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_BUFFER
)) {
534 if (code_object
->workitem_private_segment_byte_size
> 0) {
535 setup_scratch_rsrc_user_sgprs(sctx
, code_object
,
541 if (AMD_HSA_BITS_GET(code_object
->code_properties
,
542 AMD_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_PTR
)) {
543 struct dispatch_packet dispatch
;
544 unsigned dispatch_offset
;
545 struct r600_resource
*dispatch_buf
= NULL
;
546 uint64_t dispatch_va
;
548 /* Upload dispatch ptr */
549 memset(&dispatch
, 0, sizeof(dispatch
));
551 dispatch
.workgroup_size_x
= info
->block
[0];
552 dispatch
.workgroup_size_y
= info
->block
[1];
553 dispatch
.workgroup_size_z
= info
->block
[2];
555 dispatch
.grid_size_x
= info
->grid
[0] * info
->block
[0];
556 dispatch
.grid_size_y
= info
->grid
[1] * info
->block
[1];
557 dispatch
.grid_size_z
= info
->grid
[2] * info
->block
[2];
559 dispatch
.private_segment_size
= program
->private_size
;
560 dispatch
.group_segment_size
= program
->local_size
;
562 dispatch
.kernarg_address
= kernel_args_va
;
564 u_upload_data(sctx
->b
.b
.const_uploader
, 0, sizeof(dispatch
),
565 256, &dispatch
, &dispatch_offset
,
566 (struct pipe_resource
**)&dispatch_buf
);
569 fprintf(stderr
, "Error: Failed to allocate dispatch "
572 radeon_add_to_buffer_list(&sctx
->b
, &sctx
->b
.gfx
, dispatch_buf
,
573 RADEON_USAGE_READ
, RADEON_PRIO_CONST_BUFFER
);
575 dispatch_va
= dispatch_buf
->gpu_address
+ dispatch_offset
;
577 radeon_set_sh_reg_seq(cs
, R_00B900_COMPUTE_USER_DATA_0
+
579 radeon_emit(cs
, dispatch_va
);
580 radeon_emit(cs
, S_008F04_BASE_ADDRESS_HI(dispatch_va
>> 32) |
583 r600_resource_reference(&dispatch_buf
, NULL
);
587 if (AMD_HSA_BITS_GET(code_object
->code_properties
,
588 AMD_CODE_PROPERTY_ENABLE_SGPR_KERNARG_SEGMENT_PTR
)) {
589 radeon_set_sh_reg_seq(cs
, R_00B900_COMPUTE_USER_DATA_0
+
591 radeon_emit(cs
, kernel_args_va
);
592 radeon_emit(cs
, S_008F04_BASE_ADDRESS_HI (kernel_args_va
>> 32) |
597 for (i
= 0; i
< 3 && user_sgpr
< 16; i
++) {
598 if (code_object
->code_properties
& workgroup_count_masks
[i
]) {
599 radeon_set_sh_reg_seq(cs
,
600 R_00B900_COMPUTE_USER_DATA_0
+
602 radeon_emit(cs
, info
->grid
[i
]);
608 static bool si_upload_compute_input(struct si_context
*sctx
,
609 const amd_kernel_code_t
*code_object
,
610 const struct pipe_grid_info
*info
)
612 struct radeon_winsys_cs
*cs
= sctx
->b
.gfx
.cs
;
613 struct si_compute
*program
= sctx
->cs_shader_state
.program
;
614 struct r600_resource
*input_buffer
= NULL
;
615 unsigned kernel_args_size
;
616 unsigned num_work_size_bytes
= program
->use_code_object_v2
? 0 : 36;
617 uint32_t kernel_args_offset
= 0;
618 uint32_t *kernel_args
;
619 void *kernel_args_ptr
;
620 uint64_t kernel_args_va
;
623 /* The extra num_work_size_bytes are for work group / work item size information */
624 kernel_args_size
= program
->input_size
+ num_work_size_bytes
;
626 u_upload_alloc(sctx
->b
.b
.const_uploader
, 0, kernel_args_size
,
627 sctx
->screen
->info
.tcc_cache_line_size
,
629 (struct pipe_resource
**)&input_buffer
, &kernel_args_ptr
);
631 if (unlikely(!kernel_args_ptr
))
634 kernel_args
= (uint32_t*)kernel_args_ptr
;
635 kernel_args_va
= input_buffer
->gpu_address
+ kernel_args_offset
;
638 for (i
= 0; i
< 3; i
++) {
639 kernel_args
[i
] = info
->grid
[i
];
640 kernel_args
[i
+ 3] = info
->grid
[i
] * info
->block
[i
];
641 kernel_args
[i
+ 6] = info
->block
[i
];
645 memcpy(kernel_args
+ (num_work_size_bytes
/ 4), info
->input
,
646 program
->input_size
);
649 for (i
= 0; i
< (kernel_args_size
/ 4); i
++) {
650 COMPUTE_DBG(sctx
->screen
, "input %u : %u\n", i
,
655 radeon_add_to_buffer_list(&sctx
->b
, &sctx
->b
.gfx
, input_buffer
,
656 RADEON_USAGE_READ
, RADEON_PRIO_CONST_BUFFER
);
659 si_setup_user_sgprs_co_v2(sctx
, code_object
, info
, kernel_args_va
);
661 radeon_set_sh_reg_seq(cs
, R_00B900_COMPUTE_USER_DATA_0
, 2);
662 radeon_emit(cs
, kernel_args_va
);
663 radeon_emit(cs
, S_008F04_BASE_ADDRESS_HI (kernel_args_va
>> 32) |
667 r600_resource_reference(&input_buffer
, NULL
);
672 static void si_setup_tgsi_grid(struct si_context
*sctx
,
673 const struct pipe_grid_info
*info
)
675 struct si_compute
*program
= sctx
->cs_shader_state
.program
;
676 struct radeon_winsys_cs
*cs
= sctx
->b
.gfx
.cs
;
677 unsigned grid_size_reg
= R_00B900_COMPUTE_USER_DATA_0
+
678 4 * SI_NUM_RESOURCE_SGPRS
;
679 unsigned block_size_reg
= grid_size_reg
+
680 /* 12 bytes = 3 dwords. */
681 12 * program
->uses_grid_size
;
683 if (info
->indirect
) {
684 if (program
->uses_grid_size
) {
685 uint64_t base_va
= r600_resource(info
->indirect
)->gpu_address
;
686 uint64_t va
= base_va
+ info
->indirect_offset
;
689 radeon_add_to_buffer_list(&sctx
->b
, &sctx
->b
.gfx
,
690 (struct r600_resource
*)info
->indirect
,
691 RADEON_USAGE_READ
, RADEON_PRIO_DRAW_INDIRECT
);
693 for (i
= 0; i
< 3; ++i
) {
694 radeon_emit(cs
, PKT3(PKT3_COPY_DATA
, 4, 0));
695 radeon_emit(cs
, COPY_DATA_SRC_SEL(COPY_DATA_MEM
) |
696 COPY_DATA_DST_SEL(COPY_DATA_REG
));
697 radeon_emit(cs
, (va
+ 4 * i
));
698 radeon_emit(cs
, (va
+ 4 * i
) >> 32);
699 radeon_emit(cs
, (grid_size_reg
>> 2) + i
);
704 if (program
->uses_grid_size
) {
705 radeon_set_sh_reg_seq(cs
, grid_size_reg
, 3);
706 radeon_emit(cs
, info
->grid
[0]);
707 radeon_emit(cs
, info
->grid
[1]);
708 radeon_emit(cs
, info
->grid
[2]);
710 if (program
->variable_group_size
&& program
->uses_block_size
) {
711 radeon_set_sh_reg_seq(cs
, block_size_reg
, 3);
712 radeon_emit(cs
, info
->block
[0]);
713 radeon_emit(cs
, info
->block
[1]);
714 radeon_emit(cs
, info
->block
[2]);
719 static void si_emit_dispatch_packets(struct si_context
*sctx
,
720 const struct pipe_grid_info
*info
)
722 struct si_screen
*sscreen
= sctx
->screen
;
723 struct radeon_winsys_cs
*cs
= sctx
->b
.gfx
.cs
;
724 bool render_cond_bit
= sctx
->b
.render_cond
&& !sctx
->b
.render_cond_force_off
;
725 unsigned waves_per_threadgroup
=
726 DIV_ROUND_UP(info
->block
[0] * info
->block
[1] * info
->block
[2], 64);
727 unsigned compute_resource_limits
=
728 S_00B854_SIMD_DEST_CNTL(waves_per_threadgroup
% 4 == 0);
730 if (sctx
->b
.chip_class
>= CIK
) {
731 unsigned num_cu_per_se
= sscreen
->info
.num_good_compute_units
/
732 sscreen
->info
.max_se
;
734 /* Force even distribution on all SIMDs in CU if the workgroup
735 * size is 64. This has shown some good improvements if # of CUs
736 * per SE is not a multiple of 4.
738 if (num_cu_per_se
% 4 && waves_per_threadgroup
== 1)
739 compute_resource_limits
|= S_00B854_FORCE_SIMD_DIST(1);
742 radeon_set_sh_reg(cs
, R_00B854_COMPUTE_RESOURCE_LIMITS
,
743 compute_resource_limits
);
745 radeon_set_sh_reg_seq(cs
, R_00B81C_COMPUTE_NUM_THREAD_X
, 3);
746 radeon_emit(cs
, S_00B81C_NUM_THREAD_FULL(info
->block
[0]));
747 radeon_emit(cs
, S_00B820_NUM_THREAD_FULL(info
->block
[1]));
748 radeon_emit(cs
, S_00B824_NUM_THREAD_FULL(info
->block
[2]));
750 unsigned dispatch_initiator
=
751 S_00B800_COMPUTE_SHADER_EN(1) |
752 S_00B800_FORCE_START_AT_000(1) |
753 /* If the KMD allows it (there is a KMD hw register for it),
754 * allow launching waves out-of-order. (same as Vulkan) */
755 S_00B800_ORDER_MODE(sctx
->b
.chip_class
>= CIK
);
757 if (info
->indirect
) {
758 uint64_t base_va
= r600_resource(info
->indirect
)->gpu_address
;
760 radeon_add_to_buffer_list(&sctx
->b
, &sctx
->b
.gfx
,
761 (struct r600_resource
*)info
->indirect
,
762 RADEON_USAGE_READ
, RADEON_PRIO_DRAW_INDIRECT
);
764 radeon_emit(cs
, PKT3(PKT3_SET_BASE
, 2, 0) |
765 PKT3_SHADER_TYPE_S(1));
767 radeon_emit(cs
, base_va
);
768 radeon_emit(cs
, base_va
>> 32);
770 radeon_emit(cs
, PKT3(PKT3_DISPATCH_INDIRECT
, 1, render_cond_bit
) |
771 PKT3_SHADER_TYPE_S(1));
772 radeon_emit(cs
, info
->indirect_offset
);
773 radeon_emit(cs
, dispatch_initiator
);
775 radeon_emit(cs
, PKT3(PKT3_DISPATCH_DIRECT
, 3, render_cond_bit
) |
776 PKT3_SHADER_TYPE_S(1));
777 radeon_emit(cs
, info
->grid
[0]);
778 radeon_emit(cs
, info
->grid
[1]);
779 radeon_emit(cs
, info
->grid
[2]);
780 radeon_emit(cs
, dispatch_initiator
);
785 static void si_launch_grid(
786 struct pipe_context
*ctx
, const struct pipe_grid_info
*info
)
788 struct si_context
*sctx
= (struct si_context
*)ctx
;
789 struct si_compute
*program
= sctx
->cs_shader_state
.program
;
790 const amd_kernel_code_t
*code_object
=
791 si_compute_get_code_object(program
, info
->pc
);
793 /* HW bug workaround when CS threadgroups > 256 threads and async
794 * compute isn't used, i.e. only one compute job can run at a time.
795 * If async compute is possible, the threadgroup size must be limited
796 * to 256 threads on all queues to avoid the bug.
797 * Only SI and certain CIK chips are affected.
799 bool cs_regalloc_hang
=
800 (sctx
->b
.chip_class
== SI
||
801 sctx
->b
.family
== CHIP_BONAIRE
||
802 sctx
->b
.family
== CHIP_KABINI
) &&
803 info
->block
[0] * info
->block
[1] * info
->block
[2] > 256;
805 if (cs_regalloc_hang
)
806 sctx
->b
.flags
|= SI_CONTEXT_PS_PARTIAL_FLUSH
|
807 SI_CONTEXT_CS_PARTIAL_FLUSH
;
809 if (program
->ir_type
== PIPE_SHADER_IR_TGSI
&&
810 program
->shader
.compilation_failed
)
813 if (sctx
->b
.last_num_draw_calls
!= sctx
->b
.num_draw_calls
) {
814 si_update_fb_dirtiness_after_rendering(sctx
);
815 sctx
->b
.last_num_draw_calls
= sctx
->b
.num_draw_calls
;
818 si_decompress_textures(sctx
, 1 << PIPE_SHADER_COMPUTE
);
820 /* Add buffer sizes for memory checking in need_cs_space. */
821 si_context_add_resource_size(ctx
, &program
->shader
.bo
->b
.b
);
822 /* TODO: add the scratch buffer */
824 if (info
->indirect
) {
825 si_context_add_resource_size(ctx
, info
->indirect
);
827 /* Indirect buffers use TC L2 on GFX9, but not older hw. */
828 if (sctx
->b
.chip_class
<= VI
&&
829 r600_resource(info
->indirect
)->TC_L2_dirty
) {
830 sctx
->b
.flags
|= SI_CONTEXT_WRITEBACK_GLOBAL_L2
;
831 r600_resource(info
->indirect
)->TC_L2_dirty
= false;
835 si_need_cs_space(sctx
);
837 if (!sctx
->cs_shader_state
.initialized
)
838 si_initialize_compute(sctx
);
841 si_emit_cache_flush(sctx
);
843 if (!si_switch_compute_shader(sctx
, program
, &program
->shader
,
844 code_object
, info
->pc
))
847 si_upload_compute_shader_descriptors(sctx
);
848 si_emit_compute_shader_pointers(sctx
);
850 if (si_is_atom_dirty(sctx
, sctx
->atoms
.s
.render_cond
)) {
851 sctx
->atoms
.s
.render_cond
->emit(&sctx
->b
,
852 sctx
->atoms
.s
.render_cond
);
853 si_set_atom_dirty(sctx
, sctx
->atoms
.s
.render_cond
, false);
856 if ((program
->input_size
||
857 program
->ir_type
== PIPE_SHADER_IR_NATIVE
) &&
858 unlikely(!si_upload_compute_input(sctx
, code_object
, info
))) {
863 for (i
= 0; i
< MAX_GLOBAL_BUFFERS
; i
++) {
864 struct r600_resource
*buffer
=
865 (struct r600_resource
*)program
->global_buffers
[i
];
869 radeon_add_to_buffer_list(&sctx
->b
, &sctx
->b
.gfx
, buffer
,
870 RADEON_USAGE_READWRITE
,
871 RADEON_PRIO_COMPUTE_GLOBAL
);
874 if (program
->ir_type
== PIPE_SHADER_IR_TGSI
)
875 si_setup_tgsi_grid(sctx
, info
);
877 si_emit_dispatch_packets(sctx
, info
);
879 if (unlikely(sctx
->current_saved_cs
)) {
881 si_log_compute_state(sctx
, sctx
->b
.log
);
884 sctx
->compute_is_busy
= true;
885 sctx
->b
.num_compute_calls
++;
886 if (sctx
->cs_shader_state
.uses_scratch
)
887 sctx
->b
.num_spill_compute_calls
++;
889 if (cs_regalloc_hang
)
890 sctx
->b
.flags
|= SI_CONTEXT_CS_PARTIAL_FLUSH
;
893 void si_destroy_compute(struct si_compute
*program
)
895 if (program
->ir_type
== PIPE_SHADER_IR_TGSI
) {
896 util_queue_drop_job(&program
->screen
->shader_compiler_queue
,
898 util_queue_fence_destroy(&program
->ready
);
901 si_shader_destroy(&program
->shader
);
905 static void si_delete_compute_state(struct pipe_context
*ctx
, void* state
){
906 struct si_compute
*program
= (struct si_compute
*)state
;
907 struct si_context
*sctx
= (struct si_context
*)ctx
;
912 if (program
== sctx
->cs_shader_state
.program
)
913 sctx
->cs_shader_state
.program
= NULL
;
915 if (program
== sctx
->cs_shader_state
.emitted_program
)
916 sctx
->cs_shader_state
.emitted_program
= NULL
;
918 si_compute_reference(&program
, NULL
);
921 static void si_set_compute_resources(struct pipe_context
* ctx_
,
922 unsigned start
, unsigned count
,
923 struct pipe_surface
** surfaces
) { }
925 void si_init_compute_functions(struct si_context
*sctx
)
927 sctx
->b
.b
.create_compute_state
= si_create_compute_state
;
928 sctx
->b
.b
.delete_compute_state
= si_delete_compute_state
;
929 sctx
->b
.b
.bind_compute_state
= si_bind_compute_state
;
930 /* ctx->context.create_sampler_view = evergreen_compute_create_sampler_view; */
931 sctx
->b
.b
.set_compute_resources
= si_set_compute_resources
;
932 sctx
->b
.b
.set_global_binding
= si_set_global_binding
;
933 sctx
->b
.b
.launch_grid
= si_launch_grid
;