2 * Copyright 2013 Advanced Micro Devices, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 #include "si_compute.h"
29 #include "amd_kernel_code_t.h"
30 #include "nir/tgsi_to_nir.h"
31 #include "si_build_pm4.h"
32 #include "util/u_async_debug.h"
33 #include "util/u_memory.h"
34 #include "util/u_upload_mgr.h"
36 #define COMPUTE_DBG(sscreen, fmt, args...) \
38 if ((sscreen->debug_flags & DBG(COMPUTE))) \
39 fprintf(stderr, fmt, ##args); \
42 struct dispatch_packet
{
45 uint16_t workgroup_size_x
;
46 uint16_t workgroup_size_y
;
47 uint16_t workgroup_size_z
;
52 uint32_t private_segment_size
;
53 uint32_t group_segment_size
;
54 uint64_t kernel_object
;
55 uint64_t kernarg_address
;
59 static const amd_kernel_code_t
*si_compute_get_code_object(const struct si_compute
*program
,
60 uint64_t symbol_offset
)
62 const struct si_shader_selector
*sel
= &program
->sel
;
64 if (program
->ir_type
!= PIPE_SHADER_IR_NATIVE
)
67 struct ac_rtld_binary rtld
;
68 if (!ac_rtld_open(&rtld
,
69 (struct ac_rtld_open_info
){.info
= &sel
->screen
->info
,
70 .shader_type
= MESA_SHADER_COMPUTE
,
71 .wave_size
= sel
->screen
->compute_wave_size
,
73 .elf_ptrs
= &program
->shader
.binary
.elf_buffer
,
74 .elf_sizes
= &program
->shader
.binary
.elf_size
}))
77 const amd_kernel_code_t
*result
= NULL
;
80 if (!ac_rtld_get_section_by_name(&rtld
, ".text", &text
, &size
))
83 if (symbol_offset
+ sizeof(amd_kernel_code_t
) > size
)
86 result
= (const amd_kernel_code_t
*)(text
+ symbol_offset
);
93 static void code_object_to_config(const amd_kernel_code_t
*code_object
,
94 struct ac_shader_config
*out_config
)
97 uint32_t rsrc1
= code_object
->compute_pgm_resource_registers
;
98 uint32_t rsrc2
= code_object
->compute_pgm_resource_registers
>> 32;
99 out_config
->num_sgprs
= code_object
->wavefront_sgpr_count
;
100 out_config
->num_vgprs
= code_object
->workitem_vgpr_count
;
101 out_config
->float_mode
= G_00B028_FLOAT_MODE(rsrc1
);
102 out_config
->rsrc1
= rsrc1
;
103 out_config
->lds_size
= MAX2(out_config
->lds_size
, G_00B84C_LDS_SIZE(rsrc2
));
104 out_config
->rsrc2
= rsrc2
;
105 out_config
->scratch_bytes_per_wave
=
106 align(code_object
->workitem_private_segment_byte_size
* 64, 1024);
109 /* Asynchronous compute shader compilation. */
110 static void si_create_compute_state_async(void *job
, int thread_index
)
112 struct si_compute
*program
= (struct si_compute
*)job
;
113 struct si_shader_selector
*sel
= &program
->sel
;
114 struct si_shader
*shader
= &program
->shader
;
115 struct ac_llvm_compiler
*compiler
;
116 struct pipe_debug_callback
*debug
= &sel
->compiler_ctx_state
.debug
;
117 struct si_screen
*sscreen
= sel
->screen
;
119 assert(!debug
->debug_message
|| debug
->async
);
120 assert(thread_index
>= 0);
121 assert(thread_index
< ARRAY_SIZE(sscreen
->compiler
));
122 compiler
= &sscreen
->compiler
[thread_index
];
124 if (!compiler
->passes
)
125 si_init_compiler(sscreen
, compiler
);
127 assert(program
->ir_type
== PIPE_SHADER_IR_NIR
);
128 si_nir_scan_shader(sel
->nir
, &sel
->info
);
130 /* Store the declared LDS size into si_shader_info for the shader
131 * cache to include it.
133 sel
->info
.properties
[TGSI_PROPERTY_CS_LOCAL_SIZE
] = program
->local_size
;
135 si_get_active_slot_masks(&sel
->info
, &sel
->active_const_and_shader_buffers
,
136 &sel
->active_samplers_and_images
);
138 program
->shader
.is_monolithic
= true;
139 program
->reads_variable_block_size
=
140 sel
->info
.uses_block_size
&& sel
->info
.properties
[TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH
] == 0;
141 program
->num_cs_user_data_dwords
=
142 sel
->info
.properties
[TGSI_PROPERTY_CS_USER_DATA_COMPONENTS_AMD
];
144 unsigned user_sgprs
= SI_NUM_RESOURCE_SGPRS
+ (sel
->info
.uses_grid_size
? 3 : 0) +
145 (program
->reads_variable_block_size
? 3 : 0) +
146 program
->num_cs_user_data_dwords
;
148 /* Fast path for compute shaders - some descriptors passed via user SGPRs. */
149 /* Shader buffers in user SGPRs. */
150 for (unsigned i
= 0; i
< 3 && user_sgprs
<= 12 && sel
->info
.shader_buffers_declared
& (1 << i
); i
++) {
151 user_sgprs
= align(user_sgprs
, 4);
153 sel
->cs_shaderbufs_sgpr_index
= user_sgprs
;
155 sel
->cs_num_shaderbufs_in_user_sgprs
++;
158 /* Images in user SGPRs. */
159 unsigned non_msaa_images
= sel
->info
.images_declared
& ~sel
->info
.msaa_images_declared
;
161 for (unsigned i
= 0; i
< 3 && non_msaa_images
& (1 << i
); i
++) {
162 unsigned num_sgprs
= sel
->info
.image_buffers
& (1 << i
) ? 4 : 8;
164 if (align(user_sgprs
, num_sgprs
) + num_sgprs
> 16)
167 user_sgprs
= align(user_sgprs
, num_sgprs
);
169 sel
->cs_images_sgpr_index
= user_sgprs
;
170 user_sgprs
+= num_sgprs
;
171 sel
->cs_num_images_in_user_sgprs
++;
173 sel
->cs_images_num_sgprs
= user_sgprs
- sel
->cs_images_sgpr_index
;
174 assert(user_sgprs
<= 16);
176 unsigned char ir_sha1_cache_key
[20];
177 si_get_ir_cache_key(sel
, false, false, ir_sha1_cache_key
);
179 /* Try to load the shader from the shader cache. */
180 simple_mtx_lock(&sscreen
->shader_cache_mutex
);
182 if (si_shader_cache_load_shader(sscreen
, ir_sha1_cache_key
, shader
)) {
183 simple_mtx_unlock(&sscreen
->shader_cache_mutex
);
185 si_shader_dump_stats_for_shader_db(sscreen
, shader
, debug
);
186 si_shader_dump(sscreen
, shader
, debug
, stderr
, true);
188 if (!si_shader_binary_upload(sscreen
, shader
, 0))
189 program
->shader
.compilation_failed
= true;
191 simple_mtx_unlock(&sscreen
->shader_cache_mutex
);
193 if (!si_create_shader_variant(sscreen
, compiler
, &program
->shader
, debug
)) {
194 program
->shader
.compilation_failed
= true;
198 bool scratch_enabled
= shader
->config
.scratch_bytes_per_wave
> 0;
200 shader
->config
.rsrc1
= S_00B848_VGPRS((shader
->config
.num_vgprs
- 1) /
201 (sscreen
->compute_wave_size
== 32 ? 8 : 4)) |
202 S_00B848_DX10_CLAMP(1) |
203 S_00B848_MEM_ORDERED(sscreen
->info
.chip_class
>= GFX10
) |
204 S_00B848_WGP_MODE(sscreen
->info
.chip_class
>= GFX10
) |
205 S_00B848_FLOAT_MODE(shader
->config
.float_mode
);
207 if (sscreen
->info
.chip_class
< GFX10
) {
208 shader
->config
.rsrc1
|= S_00B848_SGPRS((shader
->config
.num_sgprs
- 1) / 8);
211 shader
->config
.rsrc2
= S_00B84C_USER_SGPR(user_sgprs
) | S_00B84C_SCRATCH_EN(scratch_enabled
) |
212 S_00B84C_TGID_X_EN(sel
->info
.uses_block_id
[0]) |
213 S_00B84C_TGID_Y_EN(sel
->info
.uses_block_id
[1]) |
214 S_00B84C_TGID_Z_EN(sel
->info
.uses_block_id
[2]) |
215 S_00B84C_TG_SIZE_EN(sel
->info
.uses_subgroup_info
) |
216 S_00B84C_TIDIG_COMP_CNT(sel
->info
.uses_thread_id
[2]
218 : sel
->info
.uses_thread_id
[1] ? 1 : 0) |
219 S_00B84C_LDS_SIZE(shader
->config
.lds_size
);
221 simple_mtx_lock(&sscreen
->shader_cache_mutex
);
222 si_shader_cache_insert_shader(sscreen
, ir_sha1_cache_key
, shader
, true);
223 simple_mtx_unlock(&sscreen
->shader_cache_mutex
);
226 ralloc_free(sel
->nir
);
230 static void *si_create_compute_state(struct pipe_context
*ctx
, const struct pipe_compute_state
*cso
)
232 struct si_context
*sctx
= (struct si_context
*)ctx
;
233 struct si_screen
*sscreen
= (struct si_screen
*)ctx
->screen
;
234 struct si_compute
*program
= CALLOC_STRUCT(si_compute
);
235 struct si_shader_selector
*sel
= &program
->sel
;
237 pipe_reference_init(&sel
->base
.reference
, 1);
238 sel
->type
= PIPE_SHADER_COMPUTE
;
239 sel
->screen
= sscreen
;
240 program
->shader
.selector
= &program
->sel
;
241 program
->ir_type
= cso
->ir_type
;
242 program
->local_size
= cso
->req_local_mem
;
243 program
->private_size
= cso
->req_private_mem
;
244 program
->input_size
= cso
->req_input_mem
;
246 if (cso
->ir_type
!= PIPE_SHADER_IR_NATIVE
) {
247 if (cso
->ir_type
== PIPE_SHADER_IR_TGSI
) {
248 program
->ir_type
= PIPE_SHADER_IR_NIR
;
249 sel
->nir
= tgsi_to_nir(cso
->prog
, ctx
->screen
, true);
251 assert(cso
->ir_type
== PIPE_SHADER_IR_NIR
);
252 sel
->nir
= (struct nir_shader
*)cso
->prog
;
255 sel
->compiler_ctx_state
.debug
= sctx
->debug
;
256 sel
->compiler_ctx_state
.is_debug_context
= sctx
->is_debug
;
257 p_atomic_inc(&sscreen
->num_shaders_created
);
259 si_schedule_initial_compile(sctx
, PIPE_SHADER_COMPUTE
, &sel
->ready
, &sel
->compiler_ctx_state
,
260 program
, si_create_compute_state_async
);
262 const struct pipe_binary_program_header
*header
;
265 program
->shader
.binary
.elf_size
= header
->num_bytes
;
266 program
->shader
.binary
.elf_buffer
= malloc(header
->num_bytes
);
267 if (!program
->shader
.binary
.elf_buffer
) {
271 memcpy((void *)program
->shader
.binary
.elf_buffer
, header
->blob
, header
->num_bytes
);
273 const amd_kernel_code_t
*code_object
= si_compute_get_code_object(program
, 0);
274 code_object_to_config(code_object
, &program
->shader
.config
);
276 si_shader_dump(sctx
->screen
, &program
->shader
, &sctx
->debug
, stderr
, true);
277 if (!si_shader_binary_upload(sctx
->screen
, &program
->shader
, 0)) {
278 fprintf(stderr
, "LLVM failed to upload shader\n");
279 free((void *)program
->shader
.binary
.elf_buffer
);
288 static void si_bind_compute_state(struct pipe_context
*ctx
, void *state
)
290 struct si_context
*sctx
= (struct si_context
*)ctx
;
291 struct si_compute
*program
= (struct si_compute
*)state
;
292 struct si_shader_selector
*sel
= &program
->sel
;
294 sctx
->cs_shader_state
.program
= program
;
298 /* Wait because we need active slot usage masks. */
299 if (program
->ir_type
!= PIPE_SHADER_IR_NATIVE
)
300 util_queue_fence_wait(&sel
->ready
);
302 si_set_active_descriptors(sctx
,
303 SI_DESCS_FIRST_COMPUTE
+ SI_SHADER_DESCS_CONST_AND_SHADER_BUFFERS
,
304 sel
->active_const_and_shader_buffers
);
305 si_set_active_descriptors(sctx
, SI_DESCS_FIRST_COMPUTE
+ SI_SHADER_DESCS_SAMPLERS_AND_IMAGES
,
306 sel
->active_samplers_and_images
);
308 sctx
->compute_shaderbuf_sgprs_dirty
= true;
309 sctx
->compute_image_sgprs_dirty
= true;
312 static void si_set_global_binding(struct pipe_context
*ctx
, unsigned first
, unsigned n
,
313 struct pipe_resource
**resources
, uint32_t **handles
)
316 struct si_context
*sctx
= (struct si_context
*)ctx
;
317 struct si_compute
*program
= sctx
->cs_shader_state
.program
;
319 if (first
+ n
> program
->max_global_buffers
) {
320 unsigned old_max
= program
->max_global_buffers
;
321 program
->max_global_buffers
= first
+ n
;
322 program
->global_buffers
= realloc(
323 program
->global_buffers
, program
->max_global_buffers
* sizeof(program
->global_buffers
[0]));
324 if (!program
->global_buffers
) {
325 fprintf(stderr
, "radeonsi: failed to allocate compute global_buffers\n");
329 memset(&program
->global_buffers
[old_max
], 0,
330 (program
->max_global_buffers
- old_max
) * sizeof(program
->global_buffers
[0]));
334 for (i
= 0; i
< n
; i
++) {
335 pipe_resource_reference(&program
->global_buffers
[first
+ i
], NULL
);
340 for (i
= 0; i
< n
; i
++) {
343 pipe_resource_reference(&program
->global_buffers
[first
+ i
], resources
[i
]);
344 va
= si_resource(resources
[i
])->gpu_address
;
345 offset
= util_le32_to_cpu(*handles
[i
]);
347 va
= util_cpu_to_le64(va
);
348 memcpy(handles
[i
], &va
, sizeof(va
));
352 void si_emit_initial_compute_regs(struct si_context
*sctx
, struct radeon_cmdbuf
*cs
)
354 uint64_t bc_va
= sctx
->border_color_buffer
->gpu_address
;
356 radeon_set_sh_reg_seq(cs
, R_00B858_COMPUTE_STATIC_THREAD_MGMT_SE0
, 2);
357 /* R_00B858_COMPUTE_STATIC_THREAD_MGMT_SE0 / SE1,
358 * renamed COMPUTE_DESTINATION_EN_SEn on gfx10. */
359 radeon_emit(cs
, S_00B858_SH0_CU_EN(0xffff) | S_00B858_SH1_CU_EN(0xffff));
360 radeon_emit(cs
, S_00B858_SH0_CU_EN(0xffff) | S_00B858_SH1_CU_EN(0xffff));
362 if (sctx
->chip_class
== GFX6
) {
363 /* This register has been moved to R_00CD20_COMPUTE_MAX_WAVE_ID
364 * and is now per pipe, so it should be handled in the
365 * kernel if we want to use something other than the default value.
367 * TODO: This should be:
368 * (number of compute units) * 4 * (waves per simd) - 1
370 radeon_set_sh_reg(cs
, R_00B82C_COMPUTE_MAX_WAVE_ID
, 0x190 /* Default value */);
372 if (sctx
->screen
->info
.si_TA_CS_BC_BASE_ADDR_allowed
)
373 radeon_set_config_reg(cs
, R_00950C_TA_CS_BC_BASE_ADDR
, bc_va
>> 8);
376 if (sctx
->chip_class
>= GFX7
) {
377 /* Also set R_00B858_COMPUTE_STATIC_THREAD_MGMT_SE2 / SE3 */
378 radeon_set_sh_reg_seq(cs
, R_00B864_COMPUTE_STATIC_THREAD_MGMT_SE2
, 2);
379 radeon_emit(cs
, S_00B858_SH0_CU_EN(0xffff) | S_00B858_SH1_CU_EN(0xffff));
380 radeon_emit(cs
, S_00B858_SH0_CU_EN(0xffff) | S_00B858_SH1_CU_EN(0xffff));
382 /* Disable profiling on compute queues. */
383 if (cs
!= sctx
->gfx_cs
|| !sctx
->screen
->info
.has_graphics
) {
384 radeon_set_sh_reg(cs
, R_00B82C_COMPUTE_PERFCOUNT_ENABLE
, 0);
385 radeon_set_sh_reg(cs
, R_00B878_COMPUTE_THREAD_TRACE_ENABLE
, 0);
388 /* Set the pointer to border colors. */
389 radeon_set_uconfig_reg_seq(cs
, R_030E00_TA_CS_BC_BASE_ADDR
, 2);
390 radeon_emit(cs
, bc_va
>> 8); /* R_030E00_TA_CS_BC_BASE_ADDR */
391 radeon_emit(cs
, S_030E04_ADDRESS(bc_va
>> 40)); /* R_030E04_TA_CS_BC_BASE_ADDR_HI */
394 /* cs_preamble_state initializes this for the gfx queue, so only do this
395 * if we are on a compute queue.
397 if (sctx
->chip_class
>= GFX9
&&
398 (cs
!= sctx
->gfx_cs
|| !sctx
->screen
->info
.has_graphics
)) {
399 radeon_set_uconfig_reg(cs
, R_0301EC_CP_COHER_START_DELAY
,
400 sctx
->chip_class
>= GFX10
? 0x20 : 0);
403 if (sctx
->chip_class
>= GFX10
) {
404 radeon_set_sh_reg(cs
, R_00B890_COMPUTE_USER_ACCUM_0
, 0);
405 radeon_set_sh_reg(cs
, R_00B894_COMPUTE_USER_ACCUM_1
, 0);
406 radeon_set_sh_reg(cs
, R_00B898_COMPUTE_USER_ACCUM_2
, 0);
407 radeon_set_sh_reg(cs
, R_00B89C_COMPUTE_USER_ACCUM_3
, 0);
408 radeon_set_sh_reg(cs
, R_00B8A0_COMPUTE_PGM_RSRC3
, 0);
409 radeon_set_sh_reg(cs
, R_00B9F4_COMPUTE_DISPATCH_TUNNEL
, 0);
413 static bool si_setup_compute_scratch_buffer(struct si_context
*sctx
, struct si_shader
*shader
,
414 struct ac_shader_config
*config
)
416 uint64_t scratch_bo_size
, scratch_needed
;
418 scratch_needed
= config
->scratch_bytes_per_wave
* sctx
->scratch_waves
;
419 if (sctx
->compute_scratch_buffer
)
420 scratch_bo_size
= sctx
->compute_scratch_buffer
->b
.b
.width0
;
422 if (scratch_bo_size
< scratch_needed
) {
423 si_resource_reference(&sctx
->compute_scratch_buffer
, NULL
);
425 sctx
->compute_scratch_buffer
=
426 si_aligned_buffer_create(&sctx
->screen
->b
, SI_RESOURCE_FLAG_UNMAPPABLE
, PIPE_USAGE_DEFAULT
,
427 scratch_needed
, sctx
->screen
->info
.pte_fragment_size
);
429 if (!sctx
->compute_scratch_buffer
)
433 if (sctx
->compute_scratch_buffer
!= shader
->scratch_bo
&& scratch_needed
) {
434 uint64_t scratch_va
= sctx
->compute_scratch_buffer
->gpu_address
;
436 if (!si_shader_binary_upload(sctx
->screen
, shader
, scratch_va
))
439 si_resource_reference(&shader
->scratch_bo
, sctx
->compute_scratch_buffer
);
445 static bool si_switch_compute_shader(struct si_context
*sctx
, struct si_compute
*program
,
446 struct si_shader
*shader
, const amd_kernel_code_t
*code_object
,
449 struct radeon_cmdbuf
*cs
= sctx
->gfx_cs
;
450 struct ac_shader_config inline_config
= {0};
451 struct ac_shader_config
*config
;
454 if (sctx
->cs_shader_state
.emitted_program
== program
&& sctx
->cs_shader_state
.offset
== offset
)
457 if (program
->ir_type
!= PIPE_SHADER_IR_NATIVE
) {
458 config
= &shader
->config
;
462 config
= &inline_config
;
463 code_object_to_config(code_object
, config
);
465 lds_blocks
= config
->lds_size
;
466 /* XXX: We are over allocating LDS. For GFX6, the shader reports
467 * LDS in blocks of 256 bytes, so if there are 4 bytes lds
468 * allocated in the shader and 4 bytes allocated by the state
469 * tracker, then we will set LDS_SIZE to 512 bytes rather than 256.
471 if (sctx
->chip_class
<= GFX6
) {
472 lds_blocks
+= align(program
->local_size
, 256) >> 8;
474 lds_blocks
+= align(program
->local_size
, 512) >> 9;
477 /* TODO: use si_multiwave_lds_size_workaround */
478 assert(lds_blocks
<= 0xFF);
480 config
->rsrc2
&= C_00B84C_LDS_SIZE
;
481 config
->rsrc2
|= S_00B84C_LDS_SIZE(lds_blocks
);
484 if (!si_setup_compute_scratch_buffer(sctx
, shader
, config
))
487 if (shader
->scratch_bo
) {
488 COMPUTE_DBG(sctx
->screen
,
489 "Waves: %u; Scratch per wave: %u bytes; "
490 "Total Scratch: %u bytes\n",
491 sctx
->scratch_waves
, config
->scratch_bytes_per_wave
,
492 config
->scratch_bytes_per_wave
* sctx
->scratch_waves
);
494 radeon_add_to_buffer_list(sctx
, sctx
->gfx_cs
, shader
->scratch_bo
, RADEON_USAGE_READWRITE
,
495 RADEON_PRIO_SCRATCH_BUFFER
);
498 /* Prefetch the compute shader to TC L2.
500 * We should also prefetch graphics shaders if a compute dispatch was
501 * the last command, and the compute shader if a draw call was the last
502 * command. However, that would add more complexity and we're likely
503 * to get a shader state change in that case anyway.
505 if (sctx
->chip_class
>= GFX7
) {
506 cik_prefetch_TC_L2_async(sctx
, &program
->shader
.bo
->b
.b
, 0, program
->shader
.bo
->b
.b
.width0
);
509 shader_va
= shader
->bo
->gpu_address
+ offset
;
510 if (program
->ir_type
== PIPE_SHADER_IR_NATIVE
) {
511 /* Shader code is placed after the amd_kernel_code_t
513 shader_va
+= sizeof(amd_kernel_code_t
);
516 radeon_add_to_buffer_list(sctx
, sctx
->gfx_cs
, shader
->bo
, RADEON_USAGE_READ
,
517 RADEON_PRIO_SHADER_BINARY
);
519 radeon_set_sh_reg_seq(cs
, R_00B830_COMPUTE_PGM_LO
, 2);
520 radeon_emit(cs
, shader_va
>> 8);
521 radeon_emit(cs
, S_00B834_DATA(shader_va
>> 40));
523 radeon_set_sh_reg_seq(cs
, R_00B848_COMPUTE_PGM_RSRC1
, 2);
524 radeon_emit(cs
, config
->rsrc1
);
525 radeon_emit(cs
, config
->rsrc2
);
527 COMPUTE_DBG(sctx
->screen
,
528 "COMPUTE_PGM_RSRC1: 0x%08x "
529 "COMPUTE_PGM_RSRC2: 0x%08x\n",
530 config
->rsrc1
, config
->rsrc2
);
532 sctx
->max_seen_compute_scratch_bytes_per_wave
=
533 MAX2(sctx
->max_seen_compute_scratch_bytes_per_wave
, config
->scratch_bytes_per_wave
);
535 radeon_set_sh_reg(cs
, R_00B860_COMPUTE_TMPRING_SIZE
,
536 S_00B860_WAVES(sctx
->scratch_waves
) |
537 S_00B860_WAVESIZE(sctx
->max_seen_compute_scratch_bytes_per_wave
>> 10));
539 sctx
->cs_shader_state
.emitted_program
= program
;
540 sctx
->cs_shader_state
.offset
= offset
;
541 sctx
->cs_shader_state
.uses_scratch
= config
->scratch_bytes_per_wave
!= 0;
546 static void setup_scratch_rsrc_user_sgprs(struct si_context
*sctx
,
547 const amd_kernel_code_t
*code_object
, unsigned user_sgpr
)
549 struct radeon_cmdbuf
*cs
= sctx
->gfx_cs
;
550 uint64_t scratch_va
= sctx
->compute_scratch_buffer
->gpu_address
;
552 unsigned max_private_element_size
=
553 AMD_HSA_BITS_GET(code_object
->code_properties
, AMD_CODE_PROPERTY_PRIVATE_ELEMENT_SIZE
);
555 uint32_t scratch_dword0
= scratch_va
& 0xffffffff;
556 uint32_t scratch_dword1
=
557 S_008F04_BASE_ADDRESS_HI(scratch_va
>> 32) | S_008F04_SWIZZLE_ENABLE(1);
559 /* Disable address clamping */
560 uint32_t scratch_dword2
= 0xffffffff;
561 uint32_t scratch_dword3
= S_008F0C_INDEX_STRIDE(3) | S_008F0C_ADD_TID_ENABLE(1);
563 if (sctx
->chip_class
>= GFX9
) {
564 assert(max_private_element_size
== 1); /* always 4 bytes on GFX9 */
566 scratch_dword3
|= S_008F0C_ELEMENT_SIZE(max_private_element_size
);
568 if (sctx
->chip_class
< GFX8
) {
569 /* BUF_DATA_FORMAT is ignored, but it cannot be
570 * BUF_DATA_FORMAT_INVALID. */
571 scratch_dword3
|= S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_8
);
575 radeon_set_sh_reg_seq(cs
, R_00B900_COMPUTE_USER_DATA_0
+ (user_sgpr
* 4), 4);
576 radeon_emit(cs
, scratch_dword0
);
577 radeon_emit(cs
, scratch_dword1
);
578 radeon_emit(cs
, scratch_dword2
);
579 radeon_emit(cs
, scratch_dword3
);
582 static void si_setup_user_sgprs_co_v2(struct si_context
*sctx
, const amd_kernel_code_t
*code_object
,
583 const struct pipe_grid_info
*info
, uint64_t kernel_args_va
)
585 struct si_compute
*program
= sctx
->cs_shader_state
.program
;
586 struct radeon_cmdbuf
*cs
= sctx
->gfx_cs
;
588 static const enum amd_code_property_mask_t workgroup_count_masks
[] = {
589 AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_X
,
590 AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_Y
,
591 AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_Z
};
593 unsigned i
, user_sgpr
= 0;
594 if (AMD_HSA_BITS_GET(code_object
->code_properties
,
595 AMD_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_BUFFER
)) {
596 if (code_object
->workitem_private_segment_byte_size
> 0) {
597 setup_scratch_rsrc_user_sgprs(sctx
, code_object
, user_sgpr
);
602 if (AMD_HSA_BITS_GET(code_object
->code_properties
, AMD_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_PTR
)) {
603 struct dispatch_packet dispatch
;
604 unsigned dispatch_offset
;
605 struct si_resource
*dispatch_buf
= NULL
;
606 uint64_t dispatch_va
;
608 /* Upload dispatch ptr */
609 memset(&dispatch
, 0, sizeof(dispatch
));
611 dispatch
.workgroup_size_x
= util_cpu_to_le16(info
->block
[0]);
612 dispatch
.workgroup_size_y
= util_cpu_to_le16(info
->block
[1]);
613 dispatch
.workgroup_size_z
= util_cpu_to_le16(info
->block
[2]);
615 dispatch
.grid_size_x
= util_cpu_to_le32(info
->grid
[0] * info
->block
[0]);
616 dispatch
.grid_size_y
= util_cpu_to_le32(info
->grid
[1] * info
->block
[1]);
617 dispatch
.grid_size_z
= util_cpu_to_le32(info
->grid
[2] * info
->block
[2]);
619 dispatch
.private_segment_size
= util_cpu_to_le32(program
->private_size
);
620 dispatch
.group_segment_size
= util_cpu_to_le32(program
->local_size
);
622 dispatch
.kernarg_address
= util_cpu_to_le64(kernel_args_va
);
624 u_upload_data(sctx
->b
.const_uploader
, 0, sizeof(dispatch
), 256, &dispatch
, &dispatch_offset
,
625 (struct pipe_resource
**)&dispatch_buf
);
628 fprintf(stderr
, "Error: Failed to allocate dispatch "
631 radeon_add_to_buffer_list(sctx
, sctx
->gfx_cs
, dispatch_buf
, RADEON_USAGE_READ
,
632 RADEON_PRIO_CONST_BUFFER
);
634 dispatch_va
= dispatch_buf
->gpu_address
+ dispatch_offset
;
636 radeon_set_sh_reg_seq(cs
, R_00B900_COMPUTE_USER_DATA_0
+ (user_sgpr
* 4), 2);
637 radeon_emit(cs
, dispatch_va
);
638 radeon_emit(cs
, S_008F04_BASE_ADDRESS_HI(dispatch_va
>> 32) | S_008F04_STRIDE(0));
640 si_resource_reference(&dispatch_buf
, NULL
);
644 if (AMD_HSA_BITS_GET(code_object
->code_properties
,
645 AMD_CODE_PROPERTY_ENABLE_SGPR_KERNARG_SEGMENT_PTR
)) {
646 radeon_set_sh_reg_seq(cs
, R_00B900_COMPUTE_USER_DATA_0
+ (user_sgpr
* 4), 2);
647 radeon_emit(cs
, kernel_args_va
);
648 radeon_emit(cs
, S_008F04_BASE_ADDRESS_HI(kernel_args_va
>> 32) | S_008F04_STRIDE(0));
652 for (i
= 0; i
< 3 && user_sgpr
< 16; i
++) {
653 if (code_object
->code_properties
& workgroup_count_masks
[i
]) {
654 radeon_set_sh_reg_seq(cs
, R_00B900_COMPUTE_USER_DATA_0
+ (user_sgpr
* 4), 1);
655 radeon_emit(cs
, info
->grid
[i
]);
661 static bool si_upload_compute_input(struct si_context
*sctx
, const amd_kernel_code_t
*code_object
,
662 const struct pipe_grid_info
*info
)
664 struct si_compute
*program
= sctx
->cs_shader_state
.program
;
665 struct si_resource
*input_buffer
= NULL
;
666 uint32_t kernel_args_offset
= 0;
667 uint32_t *kernel_args
;
668 void *kernel_args_ptr
;
669 uint64_t kernel_args_va
;
671 u_upload_alloc(sctx
->b
.const_uploader
, 0, program
->input_size
,
672 sctx
->screen
->info
.tcc_cache_line_size
, &kernel_args_offset
,
673 (struct pipe_resource
**)&input_buffer
, &kernel_args_ptr
);
675 if (unlikely(!kernel_args_ptr
))
678 kernel_args
= (uint32_t *)kernel_args_ptr
;
679 kernel_args_va
= input_buffer
->gpu_address
+ kernel_args_offset
;
681 memcpy(kernel_args
, info
->input
, program
->input_size
);
683 for (unsigned i
= 0; i
< program
->input_size
/ 4; i
++) {
684 COMPUTE_DBG(sctx
->screen
, "input %u : %u\n", i
, kernel_args
[i
]);
687 radeon_add_to_buffer_list(sctx
, sctx
->gfx_cs
, input_buffer
, RADEON_USAGE_READ
,
688 RADEON_PRIO_CONST_BUFFER
);
690 si_setup_user_sgprs_co_v2(sctx
, code_object
, info
, kernel_args_va
);
691 si_resource_reference(&input_buffer
, NULL
);
695 static void si_setup_nir_user_data(struct si_context
*sctx
, const struct pipe_grid_info
*info
)
697 struct si_compute
*program
= sctx
->cs_shader_state
.program
;
698 struct si_shader_selector
*sel
= &program
->sel
;
699 struct radeon_cmdbuf
*cs
= sctx
->gfx_cs
;
700 unsigned grid_size_reg
= R_00B900_COMPUTE_USER_DATA_0
+ 4 * SI_NUM_RESOURCE_SGPRS
;
701 unsigned block_size_reg
= grid_size_reg
+
702 /* 12 bytes = 3 dwords. */
703 12 * sel
->info
.uses_grid_size
;
704 unsigned cs_user_data_reg
= block_size_reg
+ 12 * program
->reads_variable_block_size
;
706 if (info
->indirect
) {
707 if (sel
->info
.uses_grid_size
) {
708 for (unsigned i
= 0; i
< 3; ++i
) {
709 si_cp_copy_data(sctx
, sctx
->gfx_cs
, COPY_DATA_REG
, NULL
, (grid_size_reg
>> 2) + i
,
710 COPY_DATA_SRC_MEM
, si_resource(info
->indirect
),
711 info
->indirect_offset
+ 4 * i
);
715 if (sel
->info
.uses_grid_size
) {
716 radeon_set_sh_reg_seq(cs
, grid_size_reg
, 3);
717 radeon_emit(cs
, info
->grid
[0]);
718 radeon_emit(cs
, info
->grid
[1]);
719 radeon_emit(cs
, info
->grid
[2]);
721 if (program
->reads_variable_block_size
) {
722 radeon_set_sh_reg_seq(cs
, block_size_reg
, 3);
723 radeon_emit(cs
, info
->block
[0]);
724 radeon_emit(cs
, info
->block
[1]);
725 radeon_emit(cs
, info
->block
[2]);
729 if (program
->num_cs_user_data_dwords
) {
730 radeon_set_sh_reg_seq(cs
, cs_user_data_reg
, program
->num_cs_user_data_dwords
);
731 radeon_emit_array(cs
, sctx
->cs_user_data
, program
->num_cs_user_data_dwords
);
735 static void si_emit_dispatch_packets(struct si_context
*sctx
, const struct pipe_grid_info
*info
)
737 struct si_screen
*sscreen
= sctx
->screen
;
738 struct radeon_cmdbuf
*cs
= sctx
->gfx_cs
;
739 bool render_cond_bit
= sctx
->render_cond
&& !sctx
->render_cond_force_off
;
740 unsigned threads_per_threadgroup
= info
->block
[0] * info
->block
[1] * info
->block
[2];
741 unsigned waves_per_threadgroup
=
742 DIV_ROUND_UP(threads_per_threadgroup
, sscreen
->compute_wave_size
);
743 unsigned threadgroups_per_cu
= 1;
745 if (sctx
->chip_class
>= GFX10
&& waves_per_threadgroup
== 1)
746 threadgroups_per_cu
= 2;
749 cs
, R_00B854_COMPUTE_RESOURCE_LIMITS
,
750 ac_get_compute_resource_limits(&sscreen
->info
, waves_per_threadgroup
,
751 sctx
->cs_max_waves_per_sh
, threadgroups_per_cu
));
753 unsigned dispatch_initiator
= S_00B800_COMPUTE_SHADER_EN(1) | S_00B800_FORCE_START_AT_000(1) |
754 /* If the KMD allows it (there is a KMD hw register for it),
755 * allow launching waves out-of-order. (same as Vulkan) */
756 S_00B800_ORDER_MODE(sctx
->chip_class
>= GFX7
) |
757 S_00B800_CS_W32_EN(sscreen
->compute_wave_size
== 32);
759 const uint
*last_block
= info
->last_block
;
760 bool partial_block_en
= last_block
[0] || last_block
[1] || last_block
[2];
762 radeon_set_sh_reg_seq(cs
, R_00B81C_COMPUTE_NUM_THREAD_X
, 3);
764 if (partial_block_en
) {
767 /* If no partial_block, these should be an entire block size, not 0. */
768 partial
[0] = last_block
[0] ? last_block
[0] : info
->block
[0];
769 partial
[1] = last_block
[1] ? last_block
[1] : info
->block
[1];
770 partial
[2] = last_block
[2] ? last_block
[2] : info
->block
[2];
773 cs
, S_00B81C_NUM_THREAD_FULL(info
->block
[0]) | S_00B81C_NUM_THREAD_PARTIAL(partial
[0]));
775 cs
, S_00B820_NUM_THREAD_FULL(info
->block
[1]) | S_00B820_NUM_THREAD_PARTIAL(partial
[1]));
777 cs
, S_00B824_NUM_THREAD_FULL(info
->block
[2]) | S_00B824_NUM_THREAD_PARTIAL(partial
[2]));
779 dispatch_initiator
|= S_00B800_PARTIAL_TG_EN(1);
781 radeon_emit(cs
, S_00B81C_NUM_THREAD_FULL(info
->block
[0]));
782 radeon_emit(cs
, S_00B820_NUM_THREAD_FULL(info
->block
[1]));
783 radeon_emit(cs
, S_00B824_NUM_THREAD_FULL(info
->block
[2]));
786 if (info
->indirect
) {
787 uint64_t base_va
= si_resource(info
->indirect
)->gpu_address
;
789 radeon_add_to_buffer_list(sctx
, sctx
->gfx_cs
, si_resource(info
->indirect
), RADEON_USAGE_READ
,
790 RADEON_PRIO_DRAW_INDIRECT
);
792 radeon_emit(cs
, PKT3(PKT3_SET_BASE
, 2, 0) | PKT3_SHADER_TYPE_S(1));
794 radeon_emit(cs
, base_va
);
795 radeon_emit(cs
, base_va
>> 32);
797 radeon_emit(cs
, PKT3(PKT3_DISPATCH_INDIRECT
, 1, render_cond_bit
) | PKT3_SHADER_TYPE_S(1));
798 radeon_emit(cs
, info
->indirect_offset
);
799 radeon_emit(cs
, dispatch_initiator
);
801 radeon_emit(cs
, PKT3(PKT3_DISPATCH_DIRECT
, 3, render_cond_bit
) | PKT3_SHADER_TYPE_S(1));
802 radeon_emit(cs
, info
->grid
[0]);
803 radeon_emit(cs
, info
->grid
[1]);
804 radeon_emit(cs
, info
->grid
[2]);
805 radeon_emit(cs
, dispatch_initiator
);
809 static void si_launch_grid(struct pipe_context
*ctx
, const struct pipe_grid_info
*info
)
811 struct si_context
*sctx
= (struct si_context
*)ctx
;
812 struct si_compute
*program
= sctx
->cs_shader_state
.program
;
813 const amd_kernel_code_t
*code_object
= si_compute_get_code_object(program
, info
->pc
);
815 /* HW bug workaround when CS threadgroups > 256 threads and async
816 * compute isn't used, i.e. only one compute job can run at a time.
817 * If async compute is possible, the threadgroup size must be limited
818 * to 256 threads on all queues to avoid the bug.
819 * Only GFX6 and certain GFX7 chips are affected.
821 bool cs_regalloc_hang
=
822 (sctx
->chip_class
== GFX6
|| sctx
->family
== CHIP_BONAIRE
|| sctx
->family
== CHIP_KABINI
) &&
823 info
->block
[0] * info
->block
[1] * info
->block
[2] > 256;
825 if (cs_regalloc_hang
)
826 sctx
->flags
|= SI_CONTEXT_PS_PARTIAL_FLUSH
| SI_CONTEXT_CS_PARTIAL_FLUSH
;
828 if (program
->ir_type
!= PIPE_SHADER_IR_NATIVE
&& program
->shader
.compilation_failed
)
831 if (sctx
->has_graphics
) {
832 if (sctx
->last_num_draw_calls
!= sctx
->num_draw_calls
) {
833 si_update_fb_dirtiness_after_rendering(sctx
);
834 sctx
->last_num_draw_calls
= sctx
->num_draw_calls
;
837 si_decompress_textures(sctx
, 1 << PIPE_SHADER_COMPUTE
);
840 /* Add buffer sizes for memory checking in need_cs_space. */
841 si_context_add_resource_size(sctx
, &program
->shader
.bo
->b
.b
);
842 /* TODO: add the scratch buffer */
844 if (info
->indirect
) {
845 si_context_add_resource_size(sctx
, info
->indirect
);
847 /* Indirect buffers use TC L2 on GFX9, but not older hw. */
848 if (sctx
->chip_class
<= GFX8
&& si_resource(info
->indirect
)->TC_L2_dirty
) {
849 sctx
->flags
|= SI_CONTEXT_WB_L2
;
850 si_resource(info
->indirect
)->TC_L2_dirty
= false;
854 si_need_gfx_cs_space(sctx
);
856 /* If we're using a secure context, determine if cs must be secure or not */
857 if (unlikely(sctx
->ws
->ws_is_secure(sctx
->ws
))) {
858 bool secure
= si_compute_resources_check_encrypted(sctx
);
859 if (secure
!= sctx
->ws
->cs_is_secure(sctx
->gfx_cs
)) {
860 si_flush_gfx_cs(sctx
, RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW
, NULL
);
861 sctx
->ws
->cs_set_secure(sctx
->gfx_cs
, secure
);
865 if (sctx
->bo_list_add_all_compute_resources
)
866 si_compute_resources_add_all_to_bo_list(sctx
);
868 if (!sctx
->cs_shader_state
.initialized
) {
869 si_emit_initial_compute_regs(sctx
, sctx
->gfx_cs
);
871 sctx
->cs_shader_state
.emitted_program
= NULL
;
872 sctx
->cs_shader_state
.initialized
= true;
876 sctx
->emit_cache_flush(sctx
);
878 if (!si_switch_compute_shader(sctx
, program
, &program
->shader
, code_object
, info
->pc
))
881 si_upload_compute_shader_descriptors(sctx
);
882 si_emit_compute_shader_pointers(sctx
);
884 if (sctx
->has_graphics
&& si_is_atom_dirty(sctx
, &sctx
->atoms
.s
.render_cond
)) {
885 sctx
->atoms
.s
.render_cond
.emit(sctx
);
886 si_set_atom_dirty(sctx
, &sctx
->atoms
.s
.render_cond
, false);
889 if (program
->ir_type
== PIPE_SHADER_IR_NATIVE
&&
890 unlikely(!si_upload_compute_input(sctx
, code_object
, info
)))
894 for (i
= 0; i
< program
->max_global_buffers
; i
++) {
895 struct si_resource
*buffer
= si_resource(program
->global_buffers
[i
]);
899 radeon_add_to_buffer_list(sctx
, sctx
->gfx_cs
, buffer
, RADEON_USAGE_READWRITE
,
900 RADEON_PRIO_COMPUTE_GLOBAL
);
903 if (program
->ir_type
!= PIPE_SHADER_IR_NATIVE
)
904 si_setup_nir_user_data(sctx
, info
);
906 si_emit_dispatch_packets(sctx
, info
);
908 if (unlikely(sctx
->current_saved_cs
)) {
910 si_log_compute_state(sctx
, sctx
->log
);
913 sctx
->compute_is_busy
= true;
914 sctx
->num_compute_calls
++;
915 if (sctx
->cs_shader_state
.uses_scratch
)
916 sctx
->num_spill_compute_calls
++;
918 if (cs_regalloc_hang
)
919 sctx
->flags
|= SI_CONTEXT_CS_PARTIAL_FLUSH
;
922 void si_destroy_compute(struct si_compute
*program
)
924 struct si_shader_selector
*sel
= &program
->sel
;
926 if (program
->ir_type
!= PIPE_SHADER_IR_NATIVE
) {
927 util_queue_drop_job(&sel
->screen
->shader_compiler_queue
, &sel
->ready
);
928 util_queue_fence_destroy(&sel
->ready
);
931 for (unsigned i
= 0; i
< program
->max_global_buffers
; i
++)
932 pipe_resource_reference(&program
->global_buffers
[i
], NULL
);
933 FREE(program
->global_buffers
);
935 si_shader_destroy(&program
->shader
);
936 ralloc_free(program
->sel
.nir
);
940 static void si_delete_compute_state(struct pipe_context
*ctx
, void *state
)
942 struct si_compute
*program
= (struct si_compute
*)state
;
943 struct si_context
*sctx
= (struct si_context
*)ctx
;
948 if (program
== sctx
->cs_shader_state
.program
)
949 sctx
->cs_shader_state
.program
= NULL
;
951 if (program
== sctx
->cs_shader_state
.emitted_program
)
952 sctx
->cs_shader_state
.emitted_program
= NULL
;
954 si_compute_reference(&program
, NULL
);
957 static void si_set_compute_resources(struct pipe_context
*ctx_
, unsigned start
, unsigned count
,
958 struct pipe_surface
**surfaces
)
962 void si_init_compute_functions(struct si_context
*sctx
)
964 sctx
->b
.create_compute_state
= si_create_compute_state
;
965 sctx
->b
.delete_compute_state
= si_delete_compute_state
;
966 sctx
->b
.bind_compute_state
= si_bind_compute_state
;
967 sctx
->b
.set_compute_resources
= si_set_compute_resources
;
968 sctx
->b
.set_global_binding
= si_set_global_binding
;
969 sctx
->b
.launch_grid
= si_launch_grid
;