2 * Copyright 2013 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 #include "util/u_memory.h"
27 #include "radeon/r600_cs.h"
29 #include "si_shader.h"
32 #include "radeon/radeon_llvm_util.h"
34 #define MAX_GLOBAL_BUFFERS 20
35 #if HAVE_LLVM < 0x0305
36 #define NUM_USER_SGPRS 2
38 #define NUM_USER_SGPRS 4
42 struct si_context
*ctx
;
45 unsigned private_size
;
48 struct si_shader
*kernels
;
49 unsigned num_user_sgprs
;
51 struct r600_resource
*input_buffer
;
52 struct pipe_resource
*global_buffers
[MAX_GLOBAL_BUFFERS
];
54 LLVMContextRef llvm_ctx
;
57 static void *si_create_compute_state(
58 struct pipe_context
*ctx
,
59 const struct pipe_compute_state
*cso
)
61 struct si_context
*sctx
= (struct si_context
*)ctx
;
62 struct si_compute
*program
= CALLOC_STRUCT(si_compute
);
63 const struct pipe_llvm_program_header
*header
;
64 const unsigned char *code
;
67 program
->llvm_ctx
= LLVMContextCreate();
70 code
= cso
->prog
+ sizeof(struct pipe_llvm_program_header
);
73 program
->local_size
= cso
->req_local_mem
;
74 program
->private_size
= cso
->req_private_mem
;
75 program
->input_size
= cso
->req_input_mem
;
77 program
->num_kernels
= radeon_llvm_get_num_kernels(program
->llvm_ctx
, code
,
79 program
->kernels
= CALLOC(sizeof(struct si_shader
),
80 program
->num_kernels
);
81 for (i
= 0; i
< program
->num_kernels
; i
++) {
82 LLVMModuleRef mod
= radeon_llvm_get_kernel_module(program
->llvm_ctx
, i
,
83 code
, header
->num_bytes
);
84 si_compile_llvm(sctx
->screen
, &program
->kernels
[i
], mod
);
85 LLVMDisposeModule(mod
);
88 program
->input_buffer
= si_resource_create_custom(sctx
->b
.b
.screen
,
89 PIPE_USAGE_IMMUTABLE
, program
->input_size
);
94 static void si_bind_compute_state(struct pipe_context
*ctx
, void *state
)
96 struct si_context
*sctx
= (struct si_context
*)ctx
;
97 sctx
->cs_shader_state
.program
= (struct si_compute
*)state
;
100 static void si_set_global_binding(
101 struct pipe_context
*ctx
, unsigned first
, unsigned n
,
102 struct pipe_resource
**resources
,
106 struct si_context
*sctx
= (struct si_context
*)ctx
;
107 struct si_compute
*program
= sctx
->cs_shader_state
.program
;
110 for (i
= first
; i
< first
+ n
; i
++) {
111 pipe_resource_reference(&program
->global_buffers
[i
], NULL
);
116 for (i
= first
; i
< first
+ n
; i
++) {
119 pipe_resource_reference(&program
->global_buffers
[i
], resources
[i
]);
120 va
= r600_resource(resources
[i
])->gpu_address
;
121 offset
= util_le32_to_cpu(*handles
[i
]);
123 va
= util_cpu_to_le64(va
);
124 memcpy(handles
[i
], &va
, sizeof(va
));
129 * This function computes the value for R_00B860_COMPUTE_TMPRING_SIZE.WAVES
130 * /p block_layout is the number of threads in each work group.
131 * /p grid layout is the number of work groups.
133 static unsigned compute_num_waves_for_scratch(
134 const struct radeon_info
*info
,
135 const uint
*block_layout
,
136 const uint
*grid_layout
)
138 unsigned num_sh
= MAX2(info
->max_sh_per_se
, 1);
139 unsigned num_se
= MAX2(info
->max_se
, 1);
140 unsigned num_blocks
= 1;
141 unsigned threads_per_block
= 1;
142 unsigned waves_per_block
;
143 unsigned waves_per_sh
;
145 unsigned scratch_waves
;
148 for (i
= 0; i
< 3; i
++) {
149 threads_per_block
*= block_layout
[i
];
150 num_blocks
*= grid_layout
[i
];
153 waves_per_block
= align(threads_per_block
, 64) / 64;
154 waves
= waves_per_block
* num_blocks
;
155 waves_per_sh
= align(waves
, num_sh
* num_se
) / (num_sh
* num_se
);
156 scratch_waves
= waves_per_sh
* num_sh
* num_se
;
158 if (waves_per_block
> waves_per_sh
) {
159 scratch_waves
= waves_per_block
* num_sh
* num_se
;
162 return scratch_waves
;
165 static void si_launch_grid(
166 struct pipe_context
*ctx
,
167 const uint
*block_layout
, const uint
*grid_layout
,
168 uint32_t pc
, const void *input
)
170 struct si_context
*sctx
= (struct si_context
*)ctx
;
171 struct radeon_winsys_cs
*cs
= sctx
->b
.rings
.gfx
.cs
;
172 struct si_compute
*program
= sctx
->cs_shader_state
.program
;
173 struct si_pm4_state
*pm4
= CALLOC_STRUCT(si_pm4_state
);
174 struct r600_resource
*input_buffer
= program
->input_buffer
;
175 unsigned kernel_args_size
;
176 unsigned num_work_size_bytes
= 36;
177 uint32_t kernel_args_offset
= 0;
178 uint32_t *kernel_args
;
179 uint64_t kernel_args_va
;
180 uint64_t scratch_buffer_va
= 0;
182 unsigned arg_user_sgpr_count
= NUM_USER_SGPRS
;
184 struct si_shader
*shader
= &program
->kernels
[pc
];
186 unsigned num_waves_for_scratch
;
188 radeon_emit(cs
, PKT3(PKT3_CONTEXT_CONTROL
, 1, 0) | PKT3_SHADER_TYPE_S(1));
189 radeon_emit(cs
, 0x80000000);
190 radeon_emit(cs
, 0x80000000);
192 sctx
->b
.flags
|= R600_CONTEXT_INV_TEX_CACHE
|
193 R600_CONTEXT_INV_SHADER_CACHE
|
194 R600_CONTEXT_INV_CONST_CACHE
|
195 R600_CONTEXT_FLUSH_WITH_INV_L2
|
196 R600_CONTEXT_FLAG_COMPUTE
;
197 si_emit_cache_flush(&sctx
->b
, NULL
);
199 pm4
->compute_pkt
= true;
201 /* Upload the kernel arguments */
203 /* The extra num_work_size_bytes are for work group / work item size information */
204 kernel_args_size
= program
->input_size
+ num_work_size_bytes
+ 8 /* For scratch va */;
206 kernel_args
= sctx
->b
.ws
->buffer_map(input_buffer
->cs_buf
,
207 sctx
->b
.rings
.gfx
.cs
, PIPE_TRANSFER_WRITE
);
208 for (i
= 0; i
< 3; i
++) {
209 kernel_args
[i
] = grid_layout
[i
];
210 kernel_args
[i
+ 3] = grid_layout
[i
] * block_layout
[i
];
211 kernel_args
[i
+ 6] = block_layout
[i
];
214 num_waves_for_scratch
= compute_num_waves_for_scratch(
215 &sctx
->screen
->b
.info
, block_layout
, grid_layout
);
217 memcpy(kernel_args
+ (num_work_size_bytes
/ 4), input
, program
->input_size
);
219 if (shader
->scratch_bytes_per_wave
> 0) {
220 unsigned scratch_bytes
= shader
->scratch_bytes_per_wave
*
221 num_waves_for_scratch
;
223 COMPUTE_DBG(sctx
->screen
, "Waves: %u; Scratch per wave: %u bytes; "
224 "Total Scratch: %u bytes\n", num_waves_for_scratch
,
225 shader
->scratch_bytes_per_wave
, scratch_bytes
);
226 if (!shader
->scratch_bo
) {
227 shader
->scratch_bo
= (struct r600_resource
*)
228 si_resource_create_custom(sctx
->b
.b
.screen
,
229 PIPE_USAGE_DEFAULT
, scratch_bytes
);
231 scratch_buffer_va
= shader
->scratch_bo
->gpu_address
;
232 si_pm4_add_bo(pm4
, shader
->scratch_bo
,
233 RADEON_USAGE_READWRITE
,
234 RADEON_PRIO_SHADER_RESOURCE_RW
);
238 for (i
= 0; i
< (kernel_args_size
/ 4); i
++) {
239 COMPUTE_DBG(sctx
->screen
, "input %u : %u\n", i
,
243 sctx
->b
.ws
->buffer_unmap(input_buffer
->cs_buf
);
245 kernel_args_va
= input_buffer
->gpu_address
;
246 kernel_args_va
+= kernel_args_offset
;
248 si_pm4_add_bo(pm4
, input_buffer
, RADEON_USAGE_READ
,
249 RADEON_PRIO_SHADER_DATA
);
251 si_pm4_set_reg(pm4
, R_00B900_COMPUTE_USER_DATA_0
, kernel_args_va
);
252 si_pm4_set_reg(pm4
, R_00B900_COMPUTE_USER_DATA_0
+ 4, S_008F04_BASE_ADDRESS_HI (kernel_args_va
>> 32) | S_008F04_STRIDE(0));
253 si_pm4_set_reg(pm4
, R_00B900_COMPUTE_USER_DATA_0
+ 8, scratch_buffer_va
);
254 si_pm4_set_reg(pm4
, R_00B900_COMPUTE_USER_DATA_0
+ 12,
255 S_008F04_BASE_ADDRESS_HI(scratch_buffer_va
>> 32)
256 | S_008F04_STRIDE(shader
->scratch_bytes_per_wave
/ 64));
258 si_pm4_set_reg(pm4
, R_00B810_COMPUTE_START_X
, 0);
259 si_pm4_set_reg(pm4
, R_00B814_COMPUTE_START_Y
, 0);
260 si_pm4_set_reg(pm4
, R_00B818_COMPUTE_START_Z
, 0);
262 si_pm4_set_reg(pm4
, R_00B81C_COMPUTE_NUM_THREAD_X
,
263 S_00B81C_NUM_THREAD_FULL(block_layout
[0]));
264 si_pm4_set_reg(pm4
, R_00B820_COMPUTE_NUM_THREAD_Y
,
265 S_00B820_NUM_THREAD_FULL(block_layout
[1]));
266 si_pm4_set_reg(pm4
, R_00B824_COMPUTE_NUM_THREAD_Z
,
267 S_00B824_NUM_THREAD_FULL(block_layout
[2]));
270 for (i
= 0; i
< MAX_GLOBAL_BUFFERS
; i
++) {
271 struct r600_resource
*buffer
=
272 (struct r600_resource
*)program
->global_buffers
[i
];
276 si_pm4_add_bo(pm4
, buffer
, RADEON_USAGE_READWRITE
, RADEON_PRIO_SHADER_RESOURCE_RW
);
279 /* This register has been moved to R_00CD20_COMPUTE_MAX_WAVE_ID
280 * and is now per pipe, so it should be handled in the
281 * kernel if we want to use something other than the default value,
282 * which is now 0x22f.
284 if (sctx
->b
.chip_class
<= SI
) {
285 /* XXX: This should be:
286 * (number of compute units) * 4 * (waves per simd) - 1 */
288 si_pm4_set_reg(pm4
, R_00B82C_COMPUTE_MAX_WAVE_ID
,
289 0x190 /* Default value */);
292 shader_va
= shader
->bo
->gpu_address
;
293 si_pm4_add_bo(pm4
, shader
->bo
, RADEON_USAGE_READ
, RADEON_PRIO_SHADER_DATA
);
294 si_pm4_set_reg(pm4
, R_00B830_COMPUTE_PGM_LO
, (shader_va
>> 8) & 0xffffffff);
295 si_pm4_set_reg(pm4
, R_00B834_COMPUTE_PGM_HI
, shader_va
>> 40);
297 si_pm4_set_reg(pm4
, R_00B848_COMPUTE_PGM_RSRC1
,
298 /* We always use at least 3 VGPRS, these come from
300 * XXX: The compiler should account for this.
302 S_00B848_VGPRS((MAX2(3, shader
->num_vgprs
) - 1) / 4)
303 /* We always use at least 4 + arg_user_sgpr_count. The 4 extra
304 * sgprs are from TGID_X_EN, TGID_Y_EN, TGID_Z_EN, TG_SIZE_EN
305 * XXX: The compiler should account for this.
307 | S_00B848_SGPRS(((MAX2(4 + arg_user_sgpr_count
,
308 shader
->num_sgprs
)) - 1) / 8))
311 lds_blocks
= shader
->lds_size
;
312 /* XXX: We are over allocating LDS. For SI, the shader reports LDS in
313 * blocks of 256 bytes, so if there are 4 bytes lds allocated in
314 * the shader and 4 bytes allocated by the state tracker, then
315 * we will set LDS_SIZE to 512 bytes rather than 256.
317 if (sctx
->b
.chip_class
<= SI
) {
318 lds_blocks
+= align(program
->local_size
, 256) >> 8;
320 lds_blocks
+= align(program
->local_size
, 512) >> 9;
323 assert(lds_blocks
<= 0xFF);
325 si_pm4_set_reg(pm4
, R_00B84C_COMPUTE_PGM_RSRC2
,
326 S_00B84C_SCRATCH_EN(shader
->scratch_bytes_per_wave
> 0)
327 | S_00B84C_USER_SGPR(arg_user_sgpr_count
)
328 | S_00B84C_TGID_X_EN(1)
329 | S_00B84C_TGID_Y_EN(1)
330 | S_00B84C_TGID_Z_EN(1)
331 | S_00B84C_TG_SIZE_EN(1)
332 | S_00B84C_TIDIG_COMP_CNT(2)
333 | S_00B84C_LDS_SIZE(lds_blocks
)
334 | S_00B84C_EXCP_EN(0))
336 si_pm4_set_reg(pm4
, R_00B854_COMPUTE_RESOURCE_LIMITS
, 0);
338 si_pm4_set_reg(pm4
, R_00B858_COMPUTE_STATIC_THREAD_MGMT_SE0
,
339 S_00B858_SH0_CU_EN(0xffff /* Default value */)
340 | S_00B858_SH1_CU_EN(0xffff /* Default value */))
343 si_pm4_set_reg(pm4
, R_00B85C_COMPUTE_STATIC_THREAD_MGMT_SE1
,
344 S_00B85C_SH0_CU_EN(0xffff /* Default value */)
345 | S_00B85C_SH1_CU_EN(0xffff /* Default value */))
348 si_pm4_set_reg(pm4
, R_00B860_COMPUTE_TMPRING_SIZE
,
349 /* The maximum value for WAVES is 32 * num CU.
350 * If you program this value incorrectly, the GPU will hang if
351 * COMPUTE_PGM_RSRC2.SCRATCH_EN is enabled.
353 S_00B860_WAVES(num_waves_for_scratch
)
354 | S_00B860_WAVESIZE(shader
->scratch_bytes_per_wave
>> 10))
357 si_pm4_cmd_begin(pm4
, PKT3_DISPATCH_DIRECT
);
358 si_pm4_cmd_add(pm4
, grid_layout
[0]); /* Thread groups DIM_X */
359 si_pm4_cmd_add(pm4
, grid_layout
[1]); /* Thread groups DIM_Y */
360 si_pm4_cmd_add(pm4
, grid_layout
[2]); /* Thread gropus DIM_Z */
361 si_pm4_cmd_add(pm4
, 1); /* DISPATCH_INITIATOR */
362 si_pm4_cmd_end(pm4
, false);
364 si_pm4_emit(sctx
, pm4
);
367 fprintf(stderr
, "cdw: %i\n", sctx
->cs
->cdw
);
368 for (i
= 0; i
< sctx
->cs
->cdw
; i
++) {
369 fprintf(stderr
, "%4i : 0x%08X\n", i
, sctx
->cs
->buf
[i
]);
373 si_pm4_free_state(sctx
, pm4
, ~0);
375 sctx
->b
.flags
|= R600_CONTEXT_CS_PARTIAL_FLUSH
|
376 R600_CONTEXT_INV_TEX_CACHE
|
377 R600_CONTEXT_INV_SHADER_CACHE
|
378 R600_CONTEXT_INV_CONST_CACHE
|
379 R600_CONTEXT_FLAG_COMPUTE
;
380 si_emit_cache_flush(&sctx
->b
, NULL
);
384 static void si_delete_compute_state(struct pipe_context
*ctx
, void* state
){
385 struct si_compute
*program
= (struct si_compute
*)state
;
391 if (program
->kernels
) {
392 for (int i
= 0; i
< program
->num_kernels
; i
++){
393 if (program
->kernels
[i
].bo
){
394 si_shader_destroy(ctx
, &program
->kernels
[i
]);
397 FREE(program
->kernels
);
400 if (program
->llvm_ctx
){
401 LLVMContextDispose(program
->llvm_ctx
);
403 pipe_resource_reference(
404 (struct pipe_resource
**)&program
->input_buffer
, NULL
);
406 //And then free the program itself.
410 static void si_set_compute_resources(struct pipe_context
* ctx_
,
411 unsigned start
, unsigned count
,
412 struct pipe_surface
** surfaces
) { }
414 void si_init_compute_functions(struct si_context
*sctx
)
416 sctx
->b
.b
.create_compute_state
= si_create_compute_state
;
417 sctx
->b
.b
.delete_compute_state
= si_delete_compute_state
;
418 sctx
->b
.b
.bind_compute_state
= si_bind_compute_state
;
419 /* ctx->context.create_sampler_view = evergreen_compute_create_sampler_view; */
420 sctx
->b
.b
.set_compute_resources
= si_set_compute_resources
;
421 sctx
->b
.b
.set_global_binding
= si_set_global_binding
;
422 sctx
->b
.b
.launch_grid
= si_launch_grid
;