2 * Copyright 2013 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 #include "tgsi/tgsi_parse.h"
26 #include "util/u_memory.h"
27 #include "util/u_upload_mgr.h"
28 #include "radeon/r600_pipe_common.h"
29 #include "radeon/radeon_elf_util.h"
30 #include "radeon/radeon_llvm_util.h"
32 #include "radeon/r600_cs.h"
34 #include "si_shader.h"
37 #define MAX_GLOBAL_BUFFERS 20
42 unsigned private_size
;
44 struct si_shader shader
;
46 struct pipe_resource
*global_buffers
[MAX_GLOBAL_BUFFERS
];
49 static void *si_create_compute_state(
50 struct pipe_context
*ctx
,
51 const struct pipe_compute_state
*cso
)
53 struct si_context
*sctx
= (struct si_context
*)ctx
;
54 struct si_screen
*sscreen
= (struct si_screen
*)ctx
->screen
;
55 struct si_compute
*program
= CALLOC_STRUCT(si_compute
);
56 struct si_shader
*shader
= &program
->shader
;
59 program
->ir_type
= cso
->ir_type
;
60 program
->local_size
= cso
->req_local_mem
;
61 program
->private_size
= cso
->req_private_mem
;
62 program
->input_size
= cso
->req_input_mem
;
65 if (cso
->ir_type
== PIPE_SHADER_IR_TGSI
) {
66 struct si_shader_selector sel
;
69 memset(&sel
, 0, sizeof(sel
));
71 sel
.tokens
= tgsi_dup_tokens(cso
->prog
);
77 tgsi_scan_shader(cso
->prog
, &sel
.info
);
78 sel
.type
= PIPE_SHADER_COMPUTE
;
79 sel
.local_size
= cso
->req_local_mem
;
81 p_atomic_inc(&sscreen
->b
.num_shaders_created
);
83 program
->shader
.selector
= &sel
;
85 if (si_shader_create(sscreen
, sctx
->tm
, &program
->shader
,
92 scratch_enabled
= shader
->config
.scratch_bytes_per_wave
> 0;
94 shader
->config
.rsrc1
=
95 S_00B848_VGPRS((shader
->config
.num_vgprs
- 1) / 4) |
96 S_00B848_SGPRS((shader
->config
.num_sgprs
- 1) / 8) |
97 S_00B848_DX10_CLAMP(1) |
98 S_00B848_FLOAT_MODE(shader
->config
.float_mode
);
100 shader
->config
.rsrc2
= S_00B84C_USER_SGPR(SI_CS_NUM_USER_SGPR
) |
101 S_00B84C_SCRATCH_EN(scratch_enabled
) |
102 S_00B84C_TGID_X_EN(1) | S_00B84C_TGID_Y_EN(1) |
103 S_00B84C_TGID_Z_EN(1) | S_00B84C_TIDIG_COMP_CNT(2) |
104 S_00B84C_LDS_SIZE(shader
->config
.lds_size
);
108 const struct pipe_llvm_program_header
*header
;
111 code
= cso
->prog
+ sizeof(struct pipe_llvm_program_header
);
113 radeon_elf_read(code
, header
->num_bytes
, &program
->shader
.binary
);
114 si_shader_binary_read_config(&program
->shader
.binary
,
115 &program
->shader
.config
, 0);
116 si_shader_dump(sctx
->screen
, &program
->shader
, &sctx
->b
.debug
,
117 PIPE_SHADER_COMPUTE
, stderr
);
118 si_shader_binary_upload(sctx
->screen
, &program
->shader
);
124 static void si_bind_compute_state(struct pipe_context
*ctx
, void *state
)
126 struct si_context
*sctx
= (struct si_context
*)ctx
;
127 sctx
->cs_shader_state
.program
= (struct si_compute
*)state
;
130 static void si_set_global_binding(
131 struct pipe_context
*ctx
, unsigned first
, unsigned n
,
132 struct pipe_resource
**resources
,
136 struct si_context
*sctx
= (struct si_context
*)ctx
;
137 struct si_compute
*program
= sctx
->cs_shader_state
.program
;
140 for (i
= first
; i
< first
+ n
; i
++) {
141 pipe_resource_reference(&program
->global_buffers
[i
], NULL
);
146 for (i
= first
; i
< first
+ n
; i
++) {
149 pipe_resource_reference(&program
->global_buffers
[i
], resources
[i
]);
150 va
= r600_resource(resources
[i
])->gpu_address
;
151 offset
= util_le32_to_cpu(*handles
[i
]);
153 va
= util_cpu_to_le64(va
);
154 memcpy(handles
[i
], &va
, sizeof(va
));
158 static void si_initialize_compute(struct si_context
*sctx
)
160 struct radeon_winsys_cs
*cs
= sctx
->b
.gfx
.cs
;
162 radeon_set_sh_reg_seq(cs
, R_00B810_COMPUTE_START_X
, 3);
167 radeon_set_sh_reg_seq(cs
, R_00B854_COMPUTE_RESOURCE_LIMITS
, 3);
169 /* R_00B858_COMPUTE_STATIC_THREAD_MGMT_SE0 / SE1 */
170 radeon_emit(cs
, S_00B858_SH0_CU_EN(0xffff) | S_00B858_SH1_CU_EN(0xffff));
171 radeon_emit(cs
, S_00B85C_SH0_CU_EN(0xffff) | S_00B85C_SH1_CU_EN(0xffff));
173 if (sctx
->b
.chip_class
>= CIK
) {
174 /* Also set R_00B858_COMPUTE_STATIC_THREAD_MGMT_SE2 / SE3 */
175 radeon_set_sh_reg_seq(cs
,
176 R_00B864_COMPUTE_STATIC_THREAD_MGMT_SE2
, 2);
177 radeon_emit(cs
, S_00B864_SH0_CU_EN(0xffff) |
178 S_00B864_SH1_CU_EN(0xffff));
179 radeon_emit(cs
, S_00B868_SH0_CU_EN(0xffff) |
180 S_00B868_SH1_CU_EN(0xffff));
183 /* This register has been moved to R_00CD20_COMPUTE_MAX_WAVE_ID
184 * and is now per pipe, so it should be handled in the
185 * kernel if we want to use something other than the default value,
186 * which is now 0x22f.
188 if (sctx
->b
.chip_class
<= SI
) {
189 /* XXX: This should be:
190 * (number of compute units) * 4 * (waves per simd) - 1 */
192 radeon_set_sh_reg(cs
, R_00B82C_COMPUTE_MAX_WAVE_ID
,
193 0x190 /* Default value */);
196 sctx
->cs_shader_state
.emitted_program
= NULL
;
197 sctx
->cs_shader_state
.initialized
= true;
200 static bool si_setup_compute_scratch_buffer(struct si_context
*sctx
,
201 struct si_shader
*shader
,
202 struct si_shader_config
*config
)
204 uint64_t scratch_bo_size
, scratch_needed
;
206 scratch_needed
= config
->scratch_bytes_per_wave
* sctx
->scratch_waves
;
207 if (sctx
->compute_scratch_buffer
)
208 scratch_bo_size
= sctx
->compute_scratch_buffer
->b
.b
.width0
;
210 if (scratch_bo_size
< scratch_needed
) {
211 pipe_resource_reference(
212 (struct pipe_resource
**)&sctx
->compute_scratch_buffer
,
215 sctx
->compute_scratch_buffer
=
216 si_resource_create_custom(&sctx
->screen
->b
.b
,
217 PIPE_USAGE_DEFAULT
, scratch_needed
);
219 if (!sctx
->compute_scratch_buffer
)
223 if (sctx
->compute_scratch_buffer
!= shader
->scratch_bo
&& scratch_needed
) {
224 uint64_t scratch_va
= sctx
->compute_scratch_buffer
->gpu_address
;
226 si_shader_apply_scratch_relocs(sctx
, shader
, config
, scratch_va
);
228 if (si_shader_binary_upload(sctx
->screen
, shader
))
231 r600_resource_reference(&shader
->scratch_bo
,
232 sctx
->compute_scratch_buffer
);
238 static bool si_switch_compute_shader(struct si_context
*sctx
,
239 struct si_compute
*program
,
240 struct si_shader
*shader
, unsigned offset
)
242 struct radeon_winsys_cs
*cs
= sctx
->b
.gfx
.cs
;
243 struct si_shader_config inline_config
= {0};
244 struct si_shader_config
*config
;
247 if (sctx
->cs_shader_state
.emitted_program
== program
&&
248 sctx
->cs_shader_state
.offset
== offset
)
251 if (program
->ir_type
== PIPE_SHADER_IR_TGSI
) {
252 config
= &shader
->config
;
256 config
= &inline_config
;
257 si_shader_binary_read_config(&shader
->binary
, config
, offset
);
259 lds_blocks
= config
->lds_size
;
260 /* XXX: We are over allocating LDS. For SI, the shader reports
261 * LDS in blocks of 256 bytes, so if there are 4 bytes lds
262 * allocated in the shader and 4 bytes allocated by the state
263 * tracker, then we will set LDS_SIZE to 512 bytes rather than 256.
265 if (sctx
->b
.chip_class
<= SI
) {
266 lds_blocks
+= align(program
->local_size
, 256) >> 8;
268 lds_blocks
+= align(program
->local_size
, 512) >> 9;
271 assert(lds_blocks
<= 0xFF);
273 config
->rsrc2
&= C_00B84C_LDS_SIZE
;
274 config
->rsrc2
|= S_00B84C_LDS_SIZE(lds_blocks
);
277 if (!si_setup_compute_scratch_buffer(sctx
, shader
, config
))
280 if (shader
->scratch_bo
) {
281 COMPUTE_DBG(sctx
->screen
, "Waves: %u; Scratch per wave: %u bytes; "
282 "Total Scratch: %u bytes\n", sctx
->scratch_waves
,
283 config
->scratch_bytes_per_wave
,
284 config
->scratch_bytes_per_wave
*
285 sctx
->scratch_waves
);
287 radeon_add_to_buffer_list(&sctx
->b
, &sctx
->b
.gfx
,
288 shader
->scratch_bo
, RADEON_USAGE_READWRITE
,
289 RADEON_PRIO_SCRATCH_BUFFER
);
292 shader_va
= shader
->bo
->gpu_address
+ offset
;
294 radeon_add_to_buffer_list(&sctx
->b
, &sctx
->b
.gfx
, shader
->bo
,
295 RADEON_USAGE_READ
, RADEON_PRIO_USER_SHADER
);
297 radeon_set_sh_reg_seq(cs
, R_00B830_COMPUTE_PGM_LO
, 2);
298 radeon_emit(cs
, shader_va
>> 8);
299 radeon_emit(cs
, shader_va
>> 40);
301 radeon_set_sh_reg_seq(cs
, R_00B848_COMPUTE_PGM_RSRC1
, 2);
302 radeon_emit(cs
, config
->rsrc1
);
303 radeon_emit(cs
, config
->rsrc2
);
305 radeon_set_sh_reg(cs
, R_00B860_COMPUTE_TMPRING_SIZE
,
306 S_00B860_WAVES(sctx
->scratch_waves
)
307 | S_00B860_WAVESIZE(config
->scratch_bytes_per_wave
>> 10));
309 sctx
->cs_shader_state
.emitted_program
= program
;
310 sctx
->cs_shader_state
.offset
= offset
;
311 sctx
->cs_shader_state
.uses_scratch
=
312 config
->scratch_bytes_per_wave
!= 0;
317 static void si_upload_compute_input(struct si_context
*sctx
,
318 const struct pipe_grid_info
*info
)
320 struct radeon_winsys_cs
*cs
= sctx
->b
.gfx
.cs
;
321 struct si_compute
*program
= sctx
->cs_shader_state
.program
;
322 struct r600_resource
*input_buffer
= NULL
;
323 unsigned kernel_args_size
;
324 unsigned num_work_size_bytes
= 36;
325 uint32_t kernel_args_offset
= 0;
326 uint32_t *kernel_args
;
327 void *kernel_args_ptr
;
328 uint64_t kernel_args_va
;
331 /* The extra num_work_size_bytes are for work group / work item size information */
332 kernel_args_size
= program
->input_size
+ num_work_size_bytes
;
334 u_upload_alloc(sctx
->b
.uploader
, 0, kernel_args_size
, 256,
336 (struct pipe_resource
**)&input_buffer
, &kernel_args_ptr
);
338 kernel_args
= (uint32_t*)kernel_args_ptr
;
339 for (i
= 0; i
< 3; i
++) {
340 kernel_args
[i
] = info
->grid
[i
];
341 kernel_args
[i
+ 3] = info
->grid
[i
] * info
->block
[i
];
342 kernel_args
[i
+ 6] = info
->block
[i
];
345 memcpy(kernel_args
+ (num_work_size_bytes
/ 4), info
->input
,
346 program
->input_size
);
349 for (i
= 0; i
< (kernel_args_size
/ 4); i
++) {
350 COMPUTE_DBG(sctx
->screen
, "input %u : %u\n", i
,
354 kernel_args_va
= input_buffer
->gpu_address
+ kernel_args_offset
;
356 radeon_add_to_buffer_list(&sctx
->b
, &sctx
->b
.gfx
, input_buffer
,
357 RADEON_USAGE_READ
, RADEON_PRIO_CONST_BUFFER
);
359 radeon_set_sh_reg_seq(cs
, R_00B900_COMPUTE_USER_DATA_0
, 2);
360 radeon_emit(cs
, kernel_args_va
);
361 radeon_emit(cs
, S_008F04_BASE_ADDRESS_HI (kernel_args_va
>> 32) |
364 pipe_resource_reference((struct pipe_resource
**)&input_buffer
, NULL
);
367 static void si_setup_tgsi_grid(struct si_context
*sctx
,
368 const struct pipe_grid_info
*info
)
370 struct radeon_winsys_cs
*cs
= sctx
->b
.gfx
.cs
;
371 unsigned grid_size_reg
= R_00B900_COMPUTE_USER_DATA_0
+
372 4 * SI_SGPR_GRID_SIZE
;
374 if (info
->indirect
) {
375 uint64_t base_va
= r600_resource(info
->indirect
)->gpu_address
;
376 uint64_t va
= base_va
+ info
->indirect_offset
;
379 radeon_add_to_buffer_list(&sctx
->b
, &sctx
->b
.gfx
,
380 (struct r600_resource
*)info
->indirect
,
381 RADEON_USAGE_READ
, RADEON_PRIO_DRAW_INDIRECT
);
383 for (i
= 0; i
< 3; ++i
) {
384 radeon_emit(cs
, PKT3(PKT3_COPY_DATA
, 4, 0));
385 radeon_emit(cs
, COPY_DATA_SRC_SEL(COPY_DATA_MEM
) |
386 COPY_DATA_DST_SEL(COPY_DATA_REG
));
387 radeon_emit(cs
, (va
+ 4 * i
));
388 radeon_emit(cs
, (va
+ 4 * i
) >> 32);
389 radeon_emit(cs
, (grid_size_reg
>> 2) + i
);
394 radeon_set_sh_reg_seq(cs
, grid_size_reg
, 3);
395 radeon_emit(cs
, info
->grid
[0]);
396 radeon_emit(cs
, info
->grid
[1]);
397 radeon_emit(cs
, info
->grid
[2]);
401 static void si_emit_dispatch_packets(struct si_context
*sctx
,
402 const struct pipe_grid_info
*info
)
404 struct radeon_winsys_cs
*cs
= sctx
->b
.gfx
.cs
;
405 bool render_cond_bit
= sctx
->b
.render_cond
&& !sctx
->b
.render_cond_force_off
;
407 radeon_set_sh_reg_seq(cs
, R_00B81C_COMPUTE_NUM_THREAD_X
, 3);
408 radeon_emit(cs
, S_00B81C_NUM_THREAD_FULL(info
->block
[0]));
409 radeon_emit(cs
, S_00B820_NUM_THREAD_FULL(info
->block
[1]));
410 radeon_emit(cs
, S_00B824_NUM_THREAD_FULL(info
->block
[2]));
412 if (info
->indirect
) {
413 uint64_t base_va
= r600_resource(info
->indirect
)->gpu_address
;
415 radeon_add_to_buffer_list(&sctx
->b
, &sctx
->b
.gfx
,
416 (struct r600_resource
*)info
->indirect
,
417 RADEON_USAGE_READ
, RADEON_PRIO_DRAW_INDIRECT
);
419 radeon_emit(cs
, PKT3(PKT3_SET_BASE
, 2, 0) |
420 PKT3_SHADER_TYPE_S(1));
422 radeon_emit(cs
, base_va
);
423 radeon_emit(cs
, base_va
>> 32);
425 radeon_emit(cs
, PKT3(PKT3_DISPATCH_INDIRECT
, 1, render_cond_bit
) |
426 PKT3_SHADER_TYPE_S(1));
427 radeon_emit(cs
, info
->indirect_offset
);
430 radeon_emit(cs
, PKT3(PKT3_DISPATCH_DIRECT
, 3, render_cond_bit
) |
431 PKT3_SHADER_TYPE_S(1));
432 radeon_emit(cs
, info
->grid
[0]);
433 radeon_emit(cs
, info
->grid
[1]);
434 radeon_emit(cs
, info
->grid
[2]);
440 static void si_launch_grid(
441 struct pipe_context
*ctx
, const struct pipe_grid_info
*info
)
443 struct si_context
*sctx
= (struct si_context
*)ctx
;
444 struct si_compute
*program
= sctx
->cs_shader_state
.program
;
446 /* HW bug workaround when CS threadgroups > 256 threads and async
447 * compute isn't used, i.e. only one compute job can run at a time.
448 * If async compute is possible, the threadgroup size must be limited
449 * to 256 threads on all queues to avoid the bug.
450 * Only SI and certain CIK chips are affected.
452 bool cs_regalloc_hang
=
453 (sctx
->b
.chip_class
== SI
||
454 sctx
->b
.family
== CHIP_BONAIRE
||
455 sctx
->b
.family
== CHIP_KABINI
) &&
456 info
->block
[0] * info
->block
[1] * info
->block
[2] > 256;
458 if (cs_regalloc_hang
)
459 sctx
->b
.flags
|= SI_CONTEXT_PS_PARTIAL_FLUSH
|
460 SI_CONTEXT_CS_PARTIAL_FLUSH
;
462 si_decompress_compute_textures(sctx
);
464 si_need_cs_space(sctx
);
466 if (!sctx
->cs_shader_state
.initialized
)
467 si_initialize_compute(sctx
);
470 si_emit_cache_flush(sctx
, NULL
);
472 if (!si_switch_compute_shader(sctx
, program
, &program
->shader
, info
->pc
))
475 si_upload_compute_shader_descriptors(sctx
);
476 si_emit_compute_shader_userdata(sctx
);
478 if (si_is_atom_dirty(sctx
, sctx
->atoms
.s
.render_cond
)) {
479 sctx
->atoms
.s
.render_cond
->emit(&sctx
->b
,
480 sctx
->atoms
.s
.render_cond
);
481 si_set_atom_dirty(sctx
, sctx
->atoms
.s
.render_cond
, false);
484 if (program
->input_size
|| program
->ir_type
== PIPE_SHADER_IR_NATIVE
)
485 si_upload_compute_input(sctx
, info
);
488 for (i
= 0; i
< MAX_GLOBAL_BUFFERS
; i
++) {
489 struct r600_resource
*buffer
=
490 (struct r600_resource
*)program
->global_buffers
[i
];
494 radeon_add_to_buffer_list(&sctx
->b
, &sctx
->b
.gfx
, buffer
,
495 RADEON_USAGE_READWRITE
,
496 RADEON_PRIO_COMPUTE_GLOBAL
);
499 if (program
->ir_type
== PIPE_SHADER_IR_TGSI
)
500 si_setup_tgsi_grid(sctx
, info
);
502 si_ce_pre_draw_synchronization(sctx
);
504 si_emit_dispatch_packets(sctx
, info
);
506 si_ce_post_draw_synchronization(sctx
);
508 sctx
->b
.num_compute_calls
++;
509 if (sctx
->cs_shader_state
.uses_scratch
)
510 sctx
->b
.num_spill_compute_calls
++;
512 if (cs_regalloc_hang
)
513 sctx
->b
.flags
|= SI_CONTEXT_CS_PARTIAL_FLUSH
;
517 static void si_delete_compute_state(struct pipe_context
*ctx
, void* state
){
518 struct si_compute
*program
= (struct si_compute
*)state
;
519 struct si_context
*sctx
= (struct si_context
*)ctx
;
525 if (program
== sctx
->cs_shader_state
.program
)
526 sctx
->cs_shader_state
.program
= NULL
;
528 if (program
== sctx
->cs_shader_state
.emitted_program
)
529 sctx
->cs_shader_state
.emitted_program
= NULL
;
531 si_shader_destroy(&program
->shader
);
535 static void si_set_compute_resources(struct pipe_context
* ctx_
,
536 unsigned start
, unsigned count
,
537 struct pipe_surface
** surfaces
) { }
539 void si_init_compute_functions(struct si_context
*sctx
)
541 sctx
->b
.b
.create_compute_state
= si_create_compute_state
;
542 sctx
->b
.b
.delete_compute_state
= si_delete_compute_state
;
543 sctx
->b
.b
.bind_compute_state
= si_bind_compute_state
;
544 /* ctx->context.create_sampler_view = evergreen_compute_create_sampler_view; */
545 sctx
->b
.b
.set_compute_resources
= si_set_compute_resources
;
546 sctx
->b
.b
.set_global_binding
= si_set_global_binding
;
547 sctx
->b
.b
.launch_grid
= si_launch_grid
;