2 * Copyright 2013 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 #include "util/u_memory.h"
26 #include "radeon/r600_pipe_common.h"
27 #include "radeon/radeon_elf_util.h"
28 #include "radeon/radeon_llvm_util.h"
30 #include "radeon/r600_cs.h"
32 #include "si_shader.h"
35 #define MAX_GLOBAL_BUFFERS 20
37 /* XXX: Even though we don't pass the scratch buffer via user sgprs any more
38 * LLVM still expects that we specify 4 USER_SGPRS so it can remain compatible
40 #define NUM_USER_SGPRS 4
43 struct si_context
*ctx
;
46 unsigned private_size
;
48 struct si_shader shader
;
49 unsigned num_user_sgprs
;
51 struct r600_resource
*input_buffer
;
52 struct pipe_resource
*global_buffers
[MAX_GLOBAL_BUFFERS
];
54 #if HAVE_LLVM < 0x0306
56 struct si_shader
*kernels
;
57 LLVMContextRef llvm_ctx
;
61 static void init_scratch_buffer(struct si_context
*sctx
, struct si_compute
*program
)
63 unsigned scratch_bytes
= 0;
64 uint64_t scratch_buffer_va
;
67 /* Compute the scratch buffer size using the maximum number of waves.
68 * This way we don't need to recompute it for each kernel launch. */
69 unsigned scratch_waves
= 32 * sctx
->screen
->b
.info
.max_compute_units
;
70 for (i
= 0; i
< program
->shader
.binary
.global_symbol_count
; i
++) {
72 program
->shader
.binary
.global_symbol_offsets
[i
];
73 unsigned scratch_bytes_needed
;
75 si_shader_binary_read_config(sctx
->screen
,
76 &program
->shader
, offset
);
77 scratch_bytes_needed
= program
->shader
.scratch_bytes_per_wave
;
78 scratch_bytes
= MAX2(scratch_bytes
, scratch_bytes_needed
);
81 if (scratch_bytes
== 0)
84 program
->shader
.scratch_bo
= (struct r600_resource
*)
85 si_resource_create_custom(sctx
->b
.b
.screen
,
87 scratch_bytes
* scratch_waves
);
89 scratch_buffer_va
= program
->shader
.scratch_bo
->gpu_address
;
91 /* apply_scratch_relocs needs scratch_bytes_per_wave to be set
92 * to the maximum bytes needed, so it can compute the stride
95 program
->shader
.scratch_bytes_per_wave
= scratch_bytes
;
97 /* Patch the shader with the scratch buffer address. */
98 si_shader_apply_scratch_relocs(sctx
,
99 &program
->shader
, scratch_buffer_va
);
102 static void *si_create_compute_state(
103 struct pipe_context
*ctx
,
104 const struct pipe_compute_state
*cso
)
106 struct si_context
*sctx
= (struct si_context
*)ctx
;
107 struct si_compute
*program
= CALLOC_STRUCT(si_compute
);
108 const struct pipe_llvm_program_header
*header
;
112 code
= cso
->prog
+ sizeof(struct pipe_llvm_program_header
);
115 program
->local_size
= cso
->req_local_mem
;
116 program
->private_size
= cso
->req_private_mem
;
117 program
->input_size
= cso
->req_input_mem
;
119 #if HAVE_LLVM < 0x0306
122 program
->llvm_ctx
= LLVMContextCreate();
123 program
->num_kernels
= radeon_llvm_get_num_kernels(program
->llvm_ctx
,
124 code
, header
->num_bytes
);
125 program
->kernels
= CALLOC(sizeof(struct si_shader
),
126 program
->num_kernels
);
127 for (i
= 0; i
< program
->num_kernels
; i
++) {
128 LLVMModuleRef mod
= radeon_llvm_get_kernel_module(program
->llvm_ctx
, i
,
129 code
, header
->num_bytes
);
130 si_compile_llvm(sctx
->screen
, &program
->kernels
[i
], sctx
->tm
,
132 LLVMDisposeModule(mod
);
137 radeon_elf_read(code
, header
->num_bytes
, &program
->shader
.binary
);
139 /* init_scratch_buffer patches the shader code with the scratch address,
140 * so we need to call it before si_shader_binary_read() which uploads
141 * the shader code to the GPU.
143 init_scratch_buffer(sctx
, program
);
144 si_shader_binary_read(sctx
->screen
, &program
->shader
);
147 program
->input_buffer
= si_resource_create_custom(sctx
->b
.b
.screen
,
148 PIPE_USAGE_IMMUTABLE
, program
->input_size
);
153 static void si_bind_compute_state(struct pipe_context
*ctx
, void *state
)
155 struct si_context
*sctx
= (struct si_context
*)ctx
;
156 sctx
->cs_shader_state
.program
= (struct si_compute
*)state
;
159 static void si_set_global_binding(
160 struct pipe_context
*ctx
, unsigned first
, unsigned n
,
161 struct pipe_resource
**resources
,
165 struct si_context
*sctx
= (struct si_context
*)ctx
;
166 struct si_compute
*program
= sctx
->cs_shader_state
.program
;
169 for (i
= first
; i
< first
+ n
; i
++) {
170 pipe_resource_reference(&program
->global_buffers
[i
], NULL
);
175 for (i
= first
; i
< first
+ n
; i
++) {
178 pipe_resource_reference(&program
->global_buffers
[i
], resources
[i
]);
179 va
= r600_resource(resources
[i
])->gpu_address
;
180 offset
= util_le32_to_cpu(*handles
[i
]);
182 va
= util_cpu_to_le64(va
);
183 memcpy(handles
[i
], &va
, sizeof(va
));
188 * This function computes the value for R_00B860_COMPUTE_TMPRING_SIZE.WAVES
189 * /p block_layout is the number of threads in each work group.
190 * /p grid layout is the number of work groups.
192 static unsigned compute_num_waves_for_scratch(
193 const struct radeon_info
*info
,
194 const uint
*block_layout
,
195 const uint
*grid_layout
)
197 unsigned num_sh
= MAX2(info
->max_sh_per_se
, 1);
198 unsigned num_se
= MAX2(info
->max_se
, 1);
199 unsigned num_blocks
= 1;
200 unsigned threads_per_block
= 1;
201 unsigned waves_per_block
;
202 unsigned waves_per_sh
;
204 unsigned scratch_waves
;
207 for (i
= 0; i
< 3; i
++) {
208 threads_per_block
*= block_layout
[i
];
209 num_blocks
*= grid_layout
[i
];
212 waves_per_block
= align(threads_per_block
, 64) / 64;
213 waves
= waves_per_block
* num_blocks
;
214 waves_per_sh
= align(waves
, num_sh
* num_se
) / (num_sh
* num_se
);
215 scratch_waves
= waves_per_sh
* num_sh
* num_se
;
217 if (waves_per_block
> waves_per_sh
) {
218 scratch_waves
= waves_per_block
* num_sh
* num_se
;
221 return scratch_waves
;
224 static void si_launch_grid(
225 struct pipe_context
*ctx
,
226 const uint
*block_layout
, const uint
*grid_layout
,
227 uint32_t pc
, const void *input
)
229 struct si_context
*sctx
= (struct si_context
*)ctx
;
230 struct radeon_winsys_cs
*cs
= sctx
->b
.rings
.gfx
.cs
;
231 struct si_compute
*program
= sctx
->cs_shader_state
.program
;
232 struct si_pm4_state
*pm4
= CALLOC_STRUCT(si_pm4_state
);
233 struct r600_resource
*input_buffer
= program
->input_buffer
;
234 unsigned kernel_args_size
;
235 unsigned num_work_size_bytes
= 36;
236 uint32_t kernel_args_offset
= 0;
237 uint32_t *kernel_args
;
238 uint64_t kernel_args_va
;
239 uint64_t scratch_buffer_va
= 0;
241 unsigned arg_user_sgpr_count
= NUM_USER_SGPRS
;
243 struct si_shader
*shader
= &program
->shader
;
245 unsigned num_waves_for_scratch
;
247 #if HAVE_LLVM < 0x0306
248 shader
= &program
->kernels
[pc
];
252 radeon_emit(cs
, PKT3(PKT3_CONTEXT_CONTROL
, 1, 0) | PKT3_SHADER_TYPE_S(1));
253 radeon_emit(cs
, 0x80000000);
254 radeon_emit(cs
, 0x80000000);
256 sctx
->b
.flags
|= SI_CONTEXT_INV_TC_L1
|
257 SI_CONTEXT_INV_TC_L2
|
258 SI_CONTEXT_INV_ICACHE
|
259 SI_CONTEXT_INV_KCACHE
|
260 SI_CONTEXT_FLUSH_WITH_INV_L2
|
261 SI_CONTEXT_FLAG_COMPUTE
;
262 si_emit_cache_flush(sctx
, NULL
);
264 pm4
->compute_pkt
= true;
266 #if HAVE_LLVM >= 0x0306
267 /* Read the config information */
268 si_shader_binary_read_config(sctx
->screen
, shader
, pc
);
271 /* Upload the kernel arguments */
273 /* The extra num_work_size_bytes are for work group / work item size information */
274 kernel_args_size
= program
->input_size
+ num_work_size_bytes
+ 8 /* For scratch va */;
276 kernel_args
= sctx
->b
.ws
->buffer_map(input_buffer
->cs_buf
,
277 sctx
->b
.rings
.gfx
.cs
, PIPE_TRANSFER_WRITE
);
278 for (i
= 0; i
< 3; i
++) {
279 kernel_args
[i
] = grid_layout
[i
];
280 kernel_args
[i
+ 3] = grid_layout
[i
] * block_layout
[i
];
281 kernel_args
[i
+ 6] = block_layout
[i
];
284 num_waves_for_scratch
= compute_num_waves_for_scratch(
285 &sctx
->screen
->b
.info
, block_layout
, grid_layout
);
287 memcpy(kernel_args
+ (num_work_size_bytes
/ 4), input
, program
->input_size
);
289 if (shader
->scratch_bytes_per_wave
> 0) {
291 COMPUTE_DBG(sctx
->screen
, "Waves: %u; Scratch per wave: %u bytes; "
292 "Total Scratch: %u bytes\n", num_waves_for_scratch
,
293 shader
->scratch_bytes_per_wave
,
294 shader
->scratch_bytes_per_wave
*
295 num_waves_for_scratch
);
297 radeon_add_to_buffer_list(&sctx
->b
, &sctx
->b
.rings
.gfx
,
299 RADEON_USAGE_READWRITE
,
300 RADEON_PRIO_SCRATCH_BUFFER
);
302 scratch_buffer_va
= shader
->scratch_bo
->gpu_address
;
305 for (i
= 0; i
< (kernel_args_size
/ 4); i
++) {
306 COMPUTE_DBG(sctx
->screen
, "input %u : %u\n", i
,
310 kernel_args_va
= input_buffer
->gpu_address
;
311 kernel_args_va
+= kernel_args_offset
;
313 radeon_add_to_buffer_list(&sctx
->b
, &sctx
->b
.rings
.gfx
, input_buffer
,
314 RADEON_USAGE_READ
, RADEON_PRIO_CONST_BUFFER
);
316 si_pm4_set_reg(pm4
, R_00B900_COMPUTE_USER_DATA_0
, kernel_args_va
);
317 si_pm4_set_reg(pm4
, R_00B900_COMPUTE_USER_DATA_0
+ 4, S_008F04_BASE_ADDRESS_HI (kernel_args_va
>> 32) | S_008F04_STRIDE(0));
318 si_pm4_set_reg(pm4
, R_00B900_COMPUTE_USER_DATA_0
+ 8, scratch_buffer_va
);
319 si_pm4_set_reg(pm4
, R_00B900_COMPUTE_USER_DATA_0
+ 12,
320 S_008F04_BASE_ADDRESS_HI(scratch_buffer_va
>> 32)
321 | S_008F04_STRIDE(shader
->scratch_bytes_per_wave
/ 64));
323 si_pm4_set_reg(pm4
, R_00B810_COMPUTE_START_X
, 0);
324 si_pm4_set_reg(pm4
, R_00B814_COMPUTE_START_Y
, 0);
325 si_pm4_set_reg(pm4
, R_00B818_COMPUTE_START_Z
, 0);
327 si_pm4_set_reg(pm4
, R_00B81C_COMPUTE_NUM_THREAD_X
,
328 S_00B81C_NUM_THREAD_FULL(block_layout
[0]));
329 si_pm4_set_reg(pm4
, R_00B820_COMPUTE_NUM_THREAD_Y
,
330 S_00B820_NUM_THREAD_FULL(block_layout
[1]));
331 si_pm4_set_reg(pm4
, R_00B824_COMPUTE_NUM_THREAD_Z
,
332 S_00B824_NUM_THREAD_FULL(block_layout
[2]));
335 for (i
= 0; i
< MAX_GLOBAL_BUFFERS
; i
++) {
336 struct r600_resource
*buffer
=
337 (struct r600_resource
*)program
->global_buffers
[i
];
341 radeon_add_to_buffer_list(&sctx
->b
, &sctx
->b
.rings
.gfx
, buffer
,
342 RADEON_USAGE_READWRITE
,
343 RADEON_PRIO_COMPUTE_GLOBAL
);
346 /* This register has been moved to R_00CD20_COMPUTE_MAX_WAVE_ID
347 * and is now per pipe, so it should be handled in the
348 * kernel if we want to use something other than the default value,
349 * which is now 0x22f.
351 if (sctx
->b
.chip_class
<= SI
) {
352 /* XXX: This should be:
353 * (number of compute units) * 4 * (waves per simd) - 1 */
355 si_pm4_set_reg(pm4
, R_00B82C_COMPUTE_MAX_WAVE_ID
,
356 0x190 /* Default value */);
359 shader_va
= shader
->bo
->gpu_address
;
361 #if HAVE_LLVM >= 0x0306
364 radeon_add_to_buffer_list(&sctx
->b
, &sctx
->b
.rings
.gfx
, shader
->bo
,
365 RADEON_USAGE_READ
, RADEON_PRIO_USER_SHADER
);
366 si_pm4_set_reg(pm4
, R_00B830_COMPUTE_PGM_LO
, shader_va
>> 8);
367 si_pm4_set_reg(pm4
, R_00B834_COMPUTE_PGM_HI
, shader_va
>> 40);
369 si_pm4_set_reg(pm4
, R_00B848_COMPUTE_PGM_RSRC1
,
370 /* We always use at least 3 VGPRS, these come from
372 * XXX: The compiler should account for this.
374 S_00B848_VGPRS((MAX2(3, shader
->num_vgprs
) - 1) / 4)
375 /* We always use at least 4 + arg_user_sgpr_count. The 4 extra
376 * sgprs are from TGID_X_EN, TGID_Y_EN, TGID_Z_EN, TG_SIZE_EN
377 * XXX: The compiler should account for this.
379 | S_00B848_SGPRS(((MAX2(4 + arg_user_sgpr_count
,
380 shader
->num_sgprs
)) - 1) / 8)
381 | S_00B028_FLOAT_MODE(shader
->float_mode
))
384 lds_blocks
= shader
->lds_size
;
385 /* XXX: We are over allocating LDS. For SI, the shader reports LDS in
386 * blocks of 256 bytes, so if there are 4 bytes lds allocated in
387 * the shader and 4 bytes allocated by the state tracker, then
388 * we will set LDS_SIZE to 512 bytes rather than 256.
390 if (sctx
->b
.chip_class
<= SI
) {
391 lds_blocks
+= align(program
->local_size
, 256) >> 8;
393 lds_blocks
+= align(program
->local_size
, 512) >> 9;
396 assert(lds_blocks
<= 0xFF);
398 si_pm4_set_reg(pm4
, R_00B84C_COMPUTE_PGM_RSRC2
,
399 S_00B84C_SCRATCH_EN(shader
->scratch_bytes_per_wave
> 0)
400 | S_00B84C_USER_SGPR(arg_user_sgpr_count
)
401 | S_00B84C_TGID_X_EN(1)
402 | S_00B84C_TGID_Y_EN(1)
403 | S_00B84C_TGID_Z_EN(1)
404 | S_00B84C_TG_SIZE_EN(1)
405 | S_00B84C_TIDIG_COMP_CNT(2)
406 | S_00B84C_LDS_SIZE(lds_blocks
)
407 | S_00B84C_EXCP_EN(0))
409 si_pm4_set_reg(pm4
, R_00B854_COMPUTE_RESOURCE_LIMITS
, 0);
411 si_pm4_set_reg(pm4
, R_00B858_COMPUTE_STATIC_THREAD_MGMT_SE0
,
412 S_00B858_SH0_CU_EN(0xffff /* Default value */)
413 | S_00B858_SH1_CU_EN(0xffff /* Default value */))
416 si_pm4_set_reg(pm4
, R_00B85C_COMPUTE_STATIC_THREAD_MGMT_SE1
,
417 S_00B85C_SH0_CU_EN(0xffff /* Default value */)
418 | S_00B85C_SH1_CU_EN(0xffff /* Default value */))
421 num_waves_for_scratch
=
422 MIN2(num_waves_for_scratch
,
423 32 * sctx
->screen
->b
.info
.max_compute_units
);
424 si_pm4_set_reg(pm4
, R_00B860_COMPUTE_TMPRING_SIZE
,
425 /* The maximum value for WAVES is 32 * num CU.
426 * If you program this value incorrectly, the GPU will hang if
427 * COMPUTE_PGM_RSRC2.SCRATCH_EN is enabled.
429 S_00B860_WAVES(num_waves_for_scratch
)
430 | S_00B860_WAVESIZE(shader
->scratch_bytes_per_wave
>> 10))
433 si_pm4_cmd_begin(pm4
, PKT3_DISPATCH_DIRECT
);
434 si_pm4_cmd_add(pm4
, grid_layout
[0]); /* Thread groups DIM_X */
435 si_pm4_cmd_add(pm4
, grid_layout
[1]); /* Thread groups DIM_Y */
436 si_pm4_cmd_add(pm4
, grid_layout
[2]); /* Thread gropus DIM_Z */
437 si_pm4_cmd_add(pm4
, 1); /* DISPATCH_INITIATOR */
438 si_pm4_cmd_end(pm4
, false);
440 si_pm4_emit(sctx
, pm4
);
443 fprintf(stderr
, "cdw: %i\n", sctx
->cs
->cdw
);
444 for (i
= 0; i
< sctx
->cs
->cdw
; i
++) {
445 fprintf(stderr
, "%4i : 0x%08X\n", i
, sctx
->cs
->buf
[i
]);
449 si_pm4_free_state(sctx
, pm4
, ~0);
451 sctx
->b
.flags
|= SI_CONTEXT_CS_PARTIAL_FLUSH
|
452 SI_CONTEXT_INV_TC_L1
|
453 SI_CONTEXT_INV_TC_L2
|
454 SI_CONTEXT_INV_ICACHE
|
455 SI_CONTEXT_INV_KCACHE
|
456 SI_CONTEXT_FLAG_COMPUTE
;
457 si_emit_cache_flush(sctx
, NULL
);
461 static void si_delete_compute_state(struct pipe_context
*ctx
, void* state
){
462 struct si_compute
*program
= (struct si_compute
*)state
;
468 #if HAVE_LLVM < 0x0306
469 if (program
->kernels
) {
470 for (int i
= 0; i
< program
->num_kernels
; i
++){
471 if (program
->kernels
[i
].bo
){
472 si_shader_destroy(&program
->kernels
[i
]);
475 FREE(program
->kernels
);
478 if (program
->llvm_ctx
){
479 LLVMContextDispose(program
->llvm_ctx
);
482 FREE(program
->shader
.binary
.config
);
483 FREE(program
->shader
.binary
.rodata
);
484 FREE(program
->shader
.binary
.global_symbol_offsets
);
485 si_shader_destroy(&program
->shader
);
488 pipe_resource_reference(
489 (struct pipe_resource
**)&program
->input_buffer
, NULL
);
494 static void si_set_compute_resources(struct pipe_context
* ctx_
,
495 unsigned start
, unsigned count
,
496 struct pipe_surface
** surfaces
) { }
498 void si_init_compute_functions(struct si_context
*sctx
)
500 sctx
->b
.b
.create_compute_state
= si_create_compute_state
;
501 sctx
->b
.b
.delete_compute_state
= si_delete_compute_state
;
502 sctx
->b
.b
.bind_compute_state
= si_bind_compute_state
;
503 /* ctx->context.create_sampler_view = evergreen_compute_create_sampler_view; */
504 sctx
->b
.b
.set_compute_resources
= si_set_compute_resources
;
505 sctx
->b
.b
.set_global_binding
= si_set_global_binding
;
506 sctx
->b
.b
.launch_grid
= si_launch_grid
;