2 * Copyright 2013 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 #include "tgsi/tgsi_parse.h"
26 #include "util/u_memory.h"
27 #include "util/u_upload_mgr.h"
28 #include "radeon/radeon_elf_util.h"
30 #include "amd_kernel_code_t.h"
31 #include "radeon/r600_cs.h"
35 #define MAX_GLOBAL_BUFFERS 20
40 unsigned private_size
;
42 struct si_shader shader
;
44 struct pipe_resource
*global_buffers
[MAX_GLOBAL_BUFFERS
];
45 bool use_code_object_v2
;
48 struct dispatch_packet
{
51 uint16_t workgroup_size_x
;
52 uint16_t workgroup_size_y
;
53 uint16_t workgroup_size_z
;
58 uint32_t private_segment_size
;
59 uint32_t group_segment_size
;
60 uint64_t kernel_object
;
61 uint64_t kernarg_address
;
65 static const amd_kernel_code_t
*si_compute_get_code_object(
66 const struct si_compute
*program
,
67 uint64_t symbol_offset
)
69 if (!program
->use_code_object_v2
) {
72 return (const amd_kernel_code_t
*)
73 (program
->shader
.binary
.code
+ symbol_offset
);
76 static void code_object_to_config(const amd_kernel_code_t
*code_object
,
77 struct si_shader_config
*out_config
) {
79 uint32_t rsrc1
= code_object
->compute_pgm_resource_registers
;
80 uint32_t rsrc2
= code_object
->compute_pgm_resource_registers
>> 32;
81 out_config
->num_sgprs
= code_object
->wavefront_sgpr_count
;
82 out_config
->num_vgprs
= code_object
->workitem_vgpr_count
;
83 out_config
->float_mode
= G_00B028_FLOAT_MODE(rsrc1
);
84 out_config
->rsrc1
= rsrc1
;
85 out_config
->lds_size
= MAX2(out_config
->lds_size
, G_00B84C_LDS_SIZE(rsrc2
));
86 out_config
->rsrc2
= rsrc2
;
87 out_config
->scratch_bytes_per_wave
=
88 align(code_object
->workitem_private_segment_byte_size
* 64, 1024);
91 static void *si_create_compute_state(
92 struct pipe_context
*ctx
,
93 const struct pipe_compute_state
*cso
)
95 struct si_context
*sctx
= (struct si_context
*)ctx
;
96 struct si_screen
*sscreen
= (struct si_screen
*)ctx
->screen
;
97 struct si_compute
*program
= CALLOC_STRUCT(si_compute
);
98 struct si_shader
*shader
= &program
->shader
;
101 program
->ir_type
= cso
->ir_type
;
102 program
->local_size
= cso
->req_local_mem
;
103 program
->private_size
= cso
->req_private_mem
;
104 program
->input_size
= cso
->req_input_mem
;
105 program
->use_code_object_v2
= HAVE_LLVM
>= 0x0400 &&
106 cso
->ir_type
== PIPE_SHADER_IR_NATIVE
;
109 if (cso
->ir_type
== PIPE_SHADER_IR_TGSI
) {
110 struct si_shader_selector sel
;
111 bool scratch_enabled
;
113 memset(&sel
, 0, sizeof(sel
));
115 sel
.tokens
= tgsi_dup_tokens(cso
->prog
);
121 tgsi_scan_shader(cso
->prog
, &sel
.info
);
122 sel
.type
= PIPE_SHADER_COMPUTE
;
123 sel
.local_size
= cso
->req_local_mem
;
125 p_atomic_inc(&sscreen
->b
.num_shaders_created
);
127 program
->shader
.selector
= &sel
;
129 if (si_shader_create(sscreen
, sctx
->tm
, &program
->shader
,
136 scratch_enabled
= shader
->config
.scratch_bytes_per_wave
> 0;
138 shader
->config
.rsrc1
=
139 S_00B848_VGPRS((shader
->config
.num_vgprs
- 1) / 4) |
140 S_00B848_SGPRS((shader
->config
.num_sgprs
- 1) / 8) |
141 S_00B848_DX10_CLAMP(1) |
142 S_00B848_FLOAT_MODE(shader
->config
.float_mode
);
144 shader
->config
.rsrc2
= S_00B84C_USER_SGPR(SI_CS_NUM_USER_SGPR
) |
145 S_00B84C_SCRATCH_EN(scratch_enabled
) |
146 S_00B84C_TGID_X_EN(1) | S_00B84C_TGID_Y_EN(1) |
147 S_00B84C_TGID_Z_EN(1) | S_00B84C_TIDIG_COMP_CNT(2) |
148 S_00B84C_LDS_SIZE(shader
->config
.lds_size
);
152 const struct pipe_llvm_program_header
*header
;
155 code
= cso
->prog
+ sizeof(struct pipe_llvm_program_header
);
157 radeon_elf_read(code
, header
->num_bytes
, &program
->shader
.binary
);
158 if (program
->use_code_object_v2
) {
159 const amd_kernel_code_t
*code_object
=
160 si_compute_get_code_object(program
, 0);
161 code_object_to_config(code_object
, &program
->shader
.config
);
163 si_shader_binary_read_config(&program
->shader
.binary
,
164 &program
->shader
.config
, 0);
166 si_shader_dump(sctx
->screen
, &program
->shader
, &sctx
->b
.debug
,
167 PIPE_SHADER_COMPUTE
, stderr
);
168 si_shader_binary_upload(sctx
->screen
, &program
->shader
);
174 static void si_bind_compute_state(struct pipe_context
*ctx
, void *state
)
176 struct si_context
*sctx
= (struct si_context
*)ctx
;
177 sctx
->cs_shader_state
.program
= (struct si_compute
*)state
;
180 static void si_set_global_binding(
181 struct pipe_context
*ctx
, unsigned first
, unsigned n
,
182 struct pipe_resource
**resources
,
186 struct si_context
*sctx
= (struct si_context
*)ctx
;
187 struct si_compute
*program
= sctx
->cs_shader_state
.program
;
190 for (i
= first
; i
< first
+ n
; i
++) {
191 pipe_resource_reference(&program
->global_buffers
[i
], NULL
);
196 for (i
= first
; i
< first
+ n
; i
++) {
199 pipe_resource_reference(&program
->global_buffers
[i
], resources
[i
]);
200 va
= r600_resource(resources
[i
])->gpu_address
;
201 offset
= util_le32_to_cpu(*handles
[i
]);
203 va
= util_cpu_to_le64(va
);
204 memcpy(handles
[i
], &va
, sizeof(va
));
208 static void si_initialize_compute(struct si_context
*sctx
)
210 struct radeon_winsys_cs
*cs
= sctx
->b
.gfx
.cs
;
213 radeon_set_sh_reg_seq(cs
, R_00B810_COMPUTE_START_X
, 3);
218 radeon_set_sh_reg_seq(cs
, R_00B858_COMPUTE_STATIC_THREAD_MGMT_SE0
, 2);
219 /* R_00B858_COMPUTE_STATIC_THREAD_MGMT_SE0 / SE1 */
220 radeon_emit(cs
, S_00B858_SH0_CU_EN(0xffff) | S_00B858_SH1_CU_EN(0xffff));
221 radeon_emit(cs
, S_00B85C_SH0_CU_EN(0xffff) | S_00B85C_SH1_CU_EN(0xffff));
223 if (sctx
->b
.chip_class
>= CIK
) {
224 /* Also set R_00B858_COMPUTE_STATIC_THREAD_MGMT_SE2 / SE3 */
225 radeon_set_sh_reg_seq(cs
,
226 R_00B864_COMPUTE_STATIC_THREAD_MGMT_SE2
, 2);
227 radeon_emit(cs
, S_00B864_SH0_CU_EN(0xffff) |
228 S_00B864_SH1_CU_EN(0xffff));
229 radeon_emit(cs
, S_00B868_SH0_CU_EN(0xffff) |
230 S_00B868_SH1_CU_EN(0xffff));
233 /* This register has been moved to R_00CD20_COMPUTE_MAX_WAVE_ID
234 * and is now per pipe, so it should be handled in the
235 * kernel if we want to use something other than the default value,
236 * which is now 0x22f.
238 if (sctx
->b
.chip_class
<= SI
) {
239 /* XXX: This should be:
240 * (number of compute units) * 4 * (waves per simd) - 1 */
242 radeon_set_sh_reg(cs
, R_00B82C_COMPUTE_MAX_WAVE_ID
,
243 0x190 /* Default value */);
246 /* Set the pointer to border colors. */
247 bc_va
= sctx
->border_color_buffer
->gpu_address
;
249 if (sctx
->b
.chip_class
>= CIK
) {
250 radeon_set_uconfig_reg_seq(cs
, R_030E00_TA_CS_BC_BASE_ADDR
, 2);
251 radeon_emit(cs
, bc_va
>> 8); /* R_030E00_TA_CS_BC_BASE_ADDR */
252 radeon_emit(cs
, bc_va
>> 40); /* R_030E04_TA_CS_BC_BASE_ADDR_HI */
254 if (sctx
->screen
->b
.info
.drm_major
== 3 ||
255 (sctx
->screen
->b
.info
.drm_major
== 2 &&
256 sctx
->screen
->b
.info
.drm_minor
>= 48)) {
257 radeon_set_config_reg(cs
, R_00950C_TA_CS_BC_BASE_ADDR
,
262 sctx
->cs_shader_state
.emitted_program
= NULL
;
263 sctx
->cs_shader_state
.initialized
= true;
266 static bool si_setup_compute_scratch_buffer(struct si_context
*sctx
,
267 struct si_shader
*shader
,
268 struct si_shader_config
*config
)
270 uint64_t scratch_bo_size
, scratch_needed
;
272 scratch_needed
= config
->scratch_bytes_per_wave
* sctx
->scratch_waves
;
273 if (sctx
->compute_scratch_buffer
)
274 scratch_bo_size
= sctx
->compute_scratch_buffer
->b
.b
.width0
;
276 if (scratch_bo_size
< scratch_needed
) {
277 r600_resource_reference(&sctx
->compute_scratch_buffer
, NULL
);
279 sctx
->compute_scratch_buffer
=
280 si_resource_create_custom(&sctx
->screen
->b
.b
,
281 PIPE_USAGE_DEFAULT
, scratch_needed
);
283 if (!sctx
->compute_scratch_buffer
)
287 if (sctx
->compute_scratch_buffer
!= shader
->scratch_bo
&& scratch_needed
) {
288 uint64_t scratch_va
= sctx
->compute_scratch_buffer
->gpu_address
;
290 si_shader_apply_scratch_relocs(sctx
, shader
, config
, scratch_va
);
292 if (si_shader_binary_upload(sctx
->screen
, shader
))
295 r600_resource_reference(&shader
->scratch_bo
,
296 sctx
->compute_scratch_buffer
);
302 static bool si_switch_compute_shader(struct si_context
*sctx
,
303 struct si_compute
*program
,
304 struct si_shader
*shader
,
305 const amd_kernel_code_t
*code_object
,
308 struct radeon_winsys_cs
*cs
= sctx
->b
.gfx
.cs
;
309 struct si_shader_config inline_config
= {0};
310 struct si_shader_config
*config
;
313 if (sctx
->cs_shader_state
.emitted_program
== program
&&
314 sctx
->cs_shader_state
.offset
== offset
)
317 if (program
->ir_type
== PIPE_SHADER_IR_TGSI
) {
318 config
= &shader
->config
;
322 config
= &inline_config
;
324 code_object_to_config(code_object
, config
);
326 si_shader_binary_read_config(&shader
->binary
, config
, offset
);
329 lds_blocks
= config
->lds_size
;
330 /* XXX: We are over allocating LDS. For SI, the shader reports
331 * LDS in blocks of 256 bytes, so if there are 4 bytes lds
332 * allocated in the shader and 4 bytes allocated by the state
333 * tracker, then we will set LDS_SIZE to 512 bytes rather than 256.
335 if (sctx
->b
.chip_class
<= SI
) {
336 lds_blocks
+= align(program
->local_size
, 256) >> 8;
338 lds_blocks
+= align(program
->local_size
, 512) >> 9;
341 assert(lds_blocks
<= 0xFF);
343 config
->rsrc2
&= C_00B84C_LDS_SIZE
;
344 config
->rsrc2
|= S_00B84C_LDS_SIZE(lds_blocks
);
347 if (!si_setup_compute_scratch_buffer(sctx
, shader
, config
))
350 if (shader
->scratch_bo
) {
351 COMPUTE_DBG(sctx
->screen
, "Waves: %u; Scratch per wave: %u bytes; "
352 "Total Scratch: %u bytes\n", sctx
->scratch_waves
,
353 config
->scratch_bytes_per_wave
,
354 config
->scratch_bytes_per_wave
*
355 sctx
->scratch_waves
);
357 radeon_add_to_buffer_list(&sctx
->b
, &sctx
->b
.gfx
,
358 shader
->scratch_bo
, RADEON_USAGE_READWRITE
,
359 RADEON_PRIO_SCRATCH_BUFFER
);
362 shader_va
= shader
->bo
->gpu_address
+ offset
;
363 if (program
->use_code_object_v2
) {
364 /* Shader code is placed after the amd_kernel_code_t
366 shader_va
+= sizeof(amd_kernel_code_t
);
369 radeon_add_to_buffer_list(&sctx
->b
, &sctx
->b
.gfx
, shader
->bo
,
370 RADEON_USAGE_READ
, RADEON_PRIO_SHADER_BINARY
);
372 radeon_set_sh_reg_seq(cs
, R_00B830_COMPUTE_PGM_LO
, 2);
373 radeon_emit(cs
, shader_va
>> 8);
374 radeon_emit(cs
, shader_va
>> 40);
376 radeon_set_sh_reg_seq(cs
, R_00B848_COMPUTE_PGM_RSRC1
, 2);
377 radeon_emit(cs
, config
->rsrc1
);
378 radeon_emit(cs
, config
->rsrc2
);
380 COMPUTE_DBG(sctx
->screen
, "COMPUTE_PGM_RSRC1: 0x%08x "
381 "COMPUTE_PGM_RSRC2: 0x%08x\n", config
->rsrc1
, config
->rsrc2
);
383 radeon_set_sh_reg(cs
, R_00B860_COMPUTE_TMPRING_SIZE
,
384 S_00B860_WAVES(sctx
->scratch_waves
)
385 | S_00B860_WAVESIZE(config
->scratch_bytes_per_wave
>> 10));
387 sctx
->cs_shader_state
.emitted_program
= program
;
388 sctx
->cs_shader_state
.offset
= offset
;
389 sctx
->cs_shader_state
.uses_scratch
=
390 config
->scratch_bytes_per_wave
!= 0;
395 static void setup_scratch_rsrc_user_sgprs(struct si_context
*sctx
,
396 const amd_kernel_code_t
*code_object
,
399 struct radeon_winsys_cs
*cs
= sctx
->b
.gfx
.cs
;
400 uint64_t scratch_va
= sctx
->compute_scratch_buffer
->gpu_address
;
402 unsigned max_private_element_size
= AMD_HSA_BITS_GET(
403 code_object
->code_properties
,
404 AMD_CODE_PROPERTY_PRIVATE_ELEMENT_SIZE
);
406 uint32_t scratch_dword0
= scratch_va
& 0xffffffff;
407 uint32_t scratch_dword1
=
408 S_008F04_BASE_ADDRESS_HI(scratch_va
>> 32) |
409 S_008F04_SWIZZLE_ENABLE(1);
411 /* Disable address clamping */
412 uint32_t scratch_dword2
= 0xffffffff;
413 uint32_t scratch_dword3
=
414 S_008F0C_ELEMENT_SIZE(max_private_element_size
) |
415 S_008F0C_INDEX_STRIDE(3) |
416 S_008F0C_ADD_TID_ENABLE(1);
419 if (sctx
->screen
->b
.chip_class
< VI
) {
420 /* BUF_DATA_FORMAT is ignored, but it cannot be
421 BUF_DATA_FORMAT_INVALID. */
423 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_8
);
426 radeon_set_sh_reg_seq(cs
, R_00B900_COMPUTE_USER_DATA_0
+
428 radeon_emit(cs
, scratch_dword0
);
429 radeon_emit(cs
, scratch_dword1
);
430 radeon_emit(cs
, scratch_dword2
);
431 radeon_emit(cs
, scratch_dword3
);
434 static void si_setup_user_sgprs_co_v2(struct si_context
*sctx
,
435 const amd_kernel_code_t
*code_object
,
436 const struct pipe_grid_info
*info
,
437 uint64_t kernel_args_va
)
439 struct si_compute
*program
= sctx
->cs_shader_state
.program
;
440 struct radeon_winsys_cs
*cs
= sctx
->b
.gfx
.cs
;
442 static const enum amd_code_property_mask_t workgroup_count_masks
[] = {
443 AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_X
,
444 AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_Y
,
445 AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_Z
448 unsigned i
, user_sgpr
= 0;
449 if (AMD_HSA_BITS_GET(code_object
->code_properties
,
450 AMD_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_BUFFER
)) {
451 if (code_object
->workitem_private_segment_byte_size
> 0) {
452 setup_scratch_rsrc_user_sgprs(sctx
, code_object
,
458 if (AMD_HSA_BITS_GET(code_object
->code_properties
,
459 AMD_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_PTR
)) {
460 struct dispatch_packet dispatch
;
461 unsigned dispatch_offset
;
462 struct r600_resource
*dispatch_buf
= NULL
;
463 uint64_t dispatch_va
;
465 /* Upload dispatch ptr */
466 memset(&dispatch
, 0, sizeof(dispatch
));
468 dispatch
.workgroup_size_x
= info
->block
[0];
469 dispatch
.workgroup_size_y
= info
->block
[1];
470 dispatch
.workgroup_size_z
= info
->block
[2];
472 dispatch
.grid_size_x
= info
->grid
[0] * info
->block
[0];
473 dispatch
.grid_size_y
= info
->grid
[1] * info
->block
[1];
474 dispatch
.grid_size_z
= info
->grid
[2] * info
->block
[2];
476 dispatch
.private_segment_size
= program
->private_size
;
477 dispatch
.group_segment_size
= program
->local_size
;
479 dispatch
.kernarg_address
= kernel_args_va
;
481 u_upload_data(sctx
->b
.uploader
, 0, sizeof(dispatch
), 256,
482 &dispatch
, &dispatch_offset
,
483 (struct pipe_resource
**)&dispatch_buf
);
486 fprintf(stderr
, "Error: Failed to allocate dispatch "
489 radeon_add_to_buffer_list(&sctx
->b
, &sctx
->b
.gfx
, dispatch_buf
,
490 RADEON_USAGE_READ
, RADEON_PRIO_CONST_BUFFER
);
492 dispatch_va
= dispatch_buf
->gpu_address
+ dispatch_offset
;
494 radeon_set_sh_reg_seq(cs
, R_00B900_COMPUTE_USER_DATA_0
+
496 radeon_emit(cs
, dispatch_va
);
497 radeon_emit(cs
, S_008F04_BASE_ADDRESS_HI(dispatch_va
>> 32) |
500 r600_resource_reference(&dispatch_buf
, NULL
);
504 if (AMD_HSA_BITS_GET(code_object
->code_properties
,
505 AMD_CODE_PROPERTY_ENABLE_SGPR_KERNARG_SEGMENT_PTR
)) {
506 radeon_set_sh_reg_seq(cs
, R_00B900_COMPUTE_USER_DATA_0
+
508 radeon_emit(cs
, kernel_args_va
);
509 radeon_emit(cs
, S_008F04_BASE_ADDRESS_HI (kernel_args_va
>> 32) |
514 for (i
= 0; i
< 3 && user_sgpr
< 16; i
++) {
515 if (code_object
->code_properties
& workgroup_count_masks
[i
]) {
516 radeon_set_sh_reg_seq(cs
,
517 R_00B900_COMPUTE_USER_DATA_0
+
519 radeon_emit(cs
, info
->grid
[i
]);
525 static void si_upload_compute_input(struct si_context
*sctx
,
526 const amd_kernel_code_t
*code_object
,
527 const struct pipe_grid_info
*info
)
529 struct radeon_winsys_cs
*cs
= sctx
->b
.gfx
.cs
;
530 struct si_compute
*program
= sctx
->cs_shader_state
.program
;
531 struct r600_resource
*input_buffer
= NULL
;
532 unsigned kernel_args_size
;
533 unsigned num_work_size_bytes
= program
->use_code_object_v2
? 0 : 36;
534 uint32_t kernel_args_offset
= 0;
535 uint32_t *kernel_args
;
536 void *kernel_args_ptr
;
537 uint64_t kernel_args_va
;
540 /* The extra num_work_size_bytes are for work group / work item size information */
541 kernel_args_size
= program
->input_size
+ num_work_size_bytes
;
543 u_upload_alloc(sctx
->b
.uploader
, 0, kernel_args_size
, 256,
545 (struct pipe_resource
**)&input_buffer
, &kernel_args_ptr
);
547 kernel_args
= (uint32_t*)kernel_args_ptr
;
548 kernel_args_va
= input_buffer
->gpu_address
+ kernel_args_offset
;
551 for (i
= 0; i
< 3; i
++) {
552 kernel_args
[i
] = info
->grid
[i
];
553 kernel_args
[i
+ 3] = info
->grid
[i
] * info
->block
[i
];
554 kernel_args
[i
+ 6] = info
->block
[i
];
558 memcpy(kernel_args
+ (num_work_size_bytes
/ 4), info
->input
,
559 program
->input_size
);
562 for (i
= 0; i
< (kernel_args_size
/ 4); i
++) {
563 COMPUTE_DBG(sctx
->screen
, "input %u : %u\n", i
,
568 radeon_add_to_buffer_list(&sctx
->b
, &sctx
->b
.gfx
, input_buffer
,
569 RADEON_USAGE_READ
, RADEON_PRIO_CONST_BUFFER
);
572 si_setup_user_sgprs_co_v2(sctx
, code_object
, info
, kernel_args_va
);
574 radeon_set_sh_reg_seq(cs
, R_00B900_COMPUTE_USER_DATA_0
, 2);
575 radeon_emit(cs
, kernel_args_va
);
576 radeon_emit(cs
, S_008F04_BASE_ADDRESS_HI (kernel_args_va
>> 32) |
580 r600_resource_reference(&input_buffer
, NULL
);
583 static void si_setup_tgsi_grid(struct si_context
*sctx
,
584 const struct pipe_grid_info
*info
)
586 struct radeon_winsys_cs
*cs
= sctx
->b
.gfx
.cs
;
587 unsigned grid_size_reg
= R_00B900_COMPUTE_USER_DATA_0
+
588 4 * SI_SGPR_GRID_SIZE
;
590 if (info
->indirect
) {
591 uint64_t base_va
= r600_resource(info
->indirect
)->gpu_address
;
592 uint64_t va
= base_va
+ info
->indirect_offset
;
595 radeon_add_to_buffer_list(&sctx
->b
, &sctx
->b
.gfx
,
596 (struct r600_resource
*)info
->indirect
,
597 RADEON_USAGE_READ
, RADEON_PRIO_DRAW_INDIRECT
);
599 for (i
= 0; i
< 3; ++i
) {
600 radeon_emit(cs
, PKT3(PKT3_COPY_DATA
, 4, 0));
601 radeon_emit(cs
, COPY_DATA_SRC_SEL(COPY_DATA_MEM
) |
602 COPY_DATA_DST_SEL(COPY_DATA_REG
));
603 radeon_emit(cs
, (va
+ 4 * i
));
604 radeon_emit(cs
, (va
+ 4 * i
) >> 32);
605 radeon_emit(cs
, (grid_size_reg
>> 2) + i
);
609 struct si_compute
*program
= sctx
->cs_shader_state
.program
;
610 bool variable_group_size
=
611 program
->shader
.selector
->info
.properties
[TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH
] == 0;
613 radeon_set_sh_reg_seq(cs
, grid_size_reg
, variable_group_size
? 6 : 3);
614 radeon_emit(cs
, info
->grid
[0]);
615 radeon_emit(cs
, info
->grid
[1]);
616 radeon_emit(cs
, info
->grid
[2]);
617 if (variable_group_size
) {
618 radeon_emit(cs
, info
->block
[0]);
619 radeon_emit(cs
, info
->block
[1]);
620 radeon_emit(cs
, info
->block
[2]);
625 static void si_emit_dispatch_packets(struct si_context
*sctx
,
626 const struct pipe_grid_info
*info
)
628 struct radeon_winsys_cs
*cs
= sctx
->b
.gfx
.cs
;
629 bool render_cond_bit
= sctx
->b
.render_cond
&& !sctx
->b
.render_cond_force_off
;
630 unsigned waves_per_threadgroup
=
631 DIV_ROUND_UP(info
->block
[0] * info
->block
[1] * info
->block
[2], 64);
633 radeon_set_sh_reg(cs
, R_00B854_COMPUTE_RESOURCE_LIMITS
,
634 S_00B854_SIMD_DEST_CNTL(waves_per_threadgroup
% 4 == 0));
636 radeon_set_sh_reg_seq(cs
, R_00B81C_COMPUTE_NUM_THREAD_X
, 3);
637 radeon_emit(cs
, S_00B81C_NUM_THREAD_FULL(info
->block
[0]));
638 radeon_emit(cs
, S_00B820_NUM_THREAD_FULL(info
->block
[1]));
639 radeon_emit(cs
, S_00B824_NUM_THREAD_FULL(info
->block
[2]));
641 if (info
->indirect
) {
642 uint64_t base_va
= r600_resource(info
->indirect
)->gpu_address
;
644 radeon_add_to_buffer_list(&sctx
->b
, &sctx
->b
.gfx
,
645 (struct r600_resource
*)info
->indirect
,
646 RADEON_USAGE_READ
, RADEON_PRIO_DRAW_INDIRECT
);
648 radeon_emit(cs
, PKT3(PKT3_SET_BASE
, 2, 0) |
649 PKT3_SHADER_TYPE_S(1));
651 radeon_emit(cs
, base_va
);
652 radeon_emit(cs
, base_va
>> 32);
654 radeon_emit(cs
, PKT3(PKT3_DISPATCH_INDIRECT
, 1, render_cond_bit
) |
655 PKT3_SHADER_TYPE_S(1));
656 radeon_emit(cs
, info
->indirect_offset
);
659 radeon_emit(cs
, PKT3(PKT3_DISPATCH_DIRECT
, 3, render_cond_bit
) |
660 PKT3_SHADER_TYPE_S(1));
661 radeon_emit(cs
, info
->grid
[0]);
662 radeon_emit(cs
, info
->grid
[1]);
663 radeon_emit(cs
, info
->grid
[2]);
669 static void si_launch_grid(
670 struct pipe_context
*ctx
, const struct pipe_grid_info
*info
)
672 struct si_context
*sctx
= (struct si_context
*)ctx
;
673 struct si_compute
*program
= sctx
->cs_shader_state
.program
;
674 const amd_kernel_code_t
*code_object
=
675 si_compute_get_code_object(program
, info
->pc
);
677 /* HW bug workaround when CS threadgroups > 256 threads and async
678 * compute isn't used, i.e. only one compute job can run at a time.
679 * If async compute is possible, the threadgroup size must be limited
680 * to 256 threads on all queues to avoid the bug.
681 * Only SI and certain CIK chips are affected.
683 bool cs_regalloc_hang
=
684 (sctx
->b
.chip_class
== SI
||
685 sctx
->b
.family
== CHIP_BONAIRE
||
686 sctx
->b
.family
== CHIP_KABINI
) &&
687 info
->block
[0] * info
->block
[1] * info
->block
[2] > 256;
689 if (cs_regalloc_hang
)
690 sctx
->b
.flags
|= SI_CONTEXT_PS_PARTIAL_FLUSH
|
691 SI_CONTEXT_CS_PARTIAL_FLUSH
;
693 si_decompress_compute_textures(sctx
);
695 /* Add buffer sizes for memory checking in need_cs_space. */
696 r600_context_add_resource_size(ctx
, &program
->shader
.bo
->b
.b
);
697 /* TODO: add the scratch buffer */
699 if (info
->indirect
) {
700 r600_context_add_resource_size(ctx
, info
->indirect
);
702 /* The hw doesn't read the indirect buffer via TC L2. */
703 if (r600_resource(info
->indirect
)->TC_L2_dirty
) {
704 sctx
->b
.flags
|= SI_CONTEXT_WRITEBACK_GLOBAL_L2
;
705 r600_resource(info
->indirect
)->TC_L2_dirty
= false;
709 si_need_cs_space(sctx
);
711 if (!sctx
->cs_shader_state
.initialized
)
712 si_initialize_compute(sctx
);
715 si_emit_cache_flush(sctx
);
717 if (!si_switch_compute_shader(sctx
, program
, &program
->shader
,
718 code_object
, info
->pc
))
721 si_upload_compute_shader_descriptors(sctx
);
722 si_emit_compute_shader_userdata(sctx
);
724 if (si_is_atom_dirty(sctx
, sctx
->atoms
.s
.render_cond
)) {
725 sctx
->atoms
.s
.render_cond
->emit(&sctx
->b
,
726 sctx
->atoms
.s
.render_cond
);
727 si_set_atom_dirty(sctx
, sctx
->atoms
.s
.render_cond
, false);
730 if (program
->input_size
|| program
->ir_type
== PIPE_SHADER_IR_NATIVE
)
731 si_upload_compute_input(sctx
, code_object
, info
);
734 for (i
= 0; i
< MAX_GLOBAL_BUFFERS
; i
++) {
735 struct r600_resource
*buffer
=
736 (struct r600_resource
*)program
->global_buffers
[i
];
740 radeon_add_to_buffer_list(&sctx
->b
, &sctx
->b
.gfx
, buffer
,
741 RADEON_USAGE_READWRITE
,
742 RADEON_PRIO_COMPUTE_GLOBAL
);
745 if (program
->ir_type
== PIPE_SHADER_IR_TGSI
)
746 si_setup_tgsi_grid(sctx
, info
);
748 si_ce_pre_draw_synchronization(sctx
);
750 si_emit_dispatch_packets(sctx
, info
);
752 si_ce_post_draw_synchronization(sctx
);
754 sctx
->compute_is_busy
= true;
755 sctx
->b
.num_compute_calls
++;
756 if (sctx
->cs_shader_state
.uses_scratch
)
757 sctx
->b
.num_spill_compute_calls
++;
759 if (cs_regalloc_hang
)
760 sctx
->b
.flags
|= SI_CONTEXT_CS_PARTIAL_FLUSH
;
764 static void si_delete_compute_state(struct pipe_context
*ctx
, void* state
){
765 struct si_compute
*program
= (struct si_compute
*)state
;
766 struct si_context
*sctx
= (struct si_context
*)ctx
;
772 if (program
== sctx
->cs_shader_state
.program
)
773 sctx
->cs_shader_state
.program
= NULL
;
775 if (program
== sctx
->cs_shader_state
.emitted_program
)
776 sctx
->cs_shader_state
.emitted_program
= NULL
;
778 si_shader_destroy(&program
->shader
);
782 static void si_set_compute_resources(struct pipe_context
* ctx_
,
783 unsigned start
, unsigned count
,
784 struct pipe_surface
** surfaces
) { }
786 void si_init_compute_functions(struct si_context
*sctx
)
788 sctx
->b
.b
.create_compute_state
= si_create_compute_state
;
789 sctx
->b
.b
.delete_compute_state
= si_delete_compute_state
;
790 sctx
->b
.b
.bind_compute_state
= si_bind_compute_state
;
791 /* ctx->context.create_sampler_view = evergreen_compute_create_sampler_view; */
792 sctx
->b
.b
.set_compute_resources
= si_set_compute_resources
;
793 sctx
->b
.b
.set_global_binding
= si_set_global_binding
;
794 sctx
->b
.b
.launch_grid
= si_launch_grid
;