radeonsi: use R600_RESOURCE_FLAG_UNMAPPABLE where it's desirable
[mesa.git] / src / gallium / drivers / radeonsi / si_compute.c
1 /*
2 * Copyright 2013 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 *
23 */
24
25 #include "tgsi/tgsi_parse.h"
26 #include "util/u_memory.h"
27 #include "util/u_upload_mgr.h"
28 #include "radeon/radeon_elf_util.h"
29
30 #include "amd_kernel_code_t.h"
31 #include "radeon/r600_cs.h"
32 #include "si_pipe.h"
33 #include "sid.h"
34
35 #define MAX_GLOBAL_BUFFERS 22
36
37 struct si_compute {
38 unsigned ir_type;
39 unsigned local_size;
40 unsigned private_size;
41 unsigned input_size;
42 struct si_shader shader;
43
44 struct pipe_resource *global_buffers[MAX_GLOBAL_BUFFERS];
45 unsigned use_code_object_v2 : 1;
46 unsigned variable_group_size : 1;
47 };
48
49 struct dispatch_packet {
50 uint16_t header;
51 uint16_t setup;
52 uint16_t workgroup_size_x;
53 uint16_t workgroup_size_y;
54 uint16_t workgroup_size_z;
55 uint16_t reserved0;
56 uint32_t grid_size_x;
57 uint32_t grid_size_y;
58 uint32_t grid_size_z;
59 uint32_t private_segment_size;
60 uint32_t group_segment_size;
61 uint64_t kernel_object;
62 uint64_t kernarg_address;
63 uint64_t reserved2;
64 };
65
66 static const amd_kernel_code_t *si_compute_get_code_object(
67 const struct si_compute *program,
68 uint64_t symbol_offset)
69 {
70 if (!program->use_code_object_v2) {
71 return NULL;
72 }
73 return (const amd_kernel_code_t*)
74 (program->shader.binary.code + symbol_offset);
75 }
76
77 static void code_object_to_config(const amd_kernel_code_t *code_object,
78 struct si_shader_config *out_config) {
79
80 uint32_t rsrc1 = code_object->compute_pgm_resource_registers;
81 uint32_t rsrc2 = code_object->compute_pgm_resource_registers >> 32;
82 out_config->num_sgprs = code_object->wavefront_sgpr_count;
83 out_config->num_vgprs = code_object->workitem_vgpr_count;
84 out_config->float_mode = G_00B028_FLOAT_MODE(rsrc1);
85 out_config->rsrc1 = rsrc1;
86 out_config->lds_size = MAX2(out_config->lds_size, G_00B84C_LDS_SIZE(rsrc2));
87 out_config->rsrc2 = rsrc2;
88 out_config->scratch_bytes_per_wave =
89 align(code_object->workitem_private_segment_byte_size * 64, 1024);
90 }
91
92 static void *si_create_compute_state(
93 struct pipe_context *ctx,
94 const struct pipe_compute_state *cso)
95 {
96 struct si_context *sctx = (struct si_context *)ctx;
97 struct si_screen *sscreen = (struct si_screen *)ctx->screen;
98 struct si_compute *program = CALLOC_STRUCT(si_compute);
99 struct si_shader *shader = &program->shader;
100
101
102 program->ir_type = cso->ir_type;
103 program->local_size = cso->req_local_mem;
104 program->private_size = cso->req_private_mem;
105 program->input_size = cso->req_input_mem;
106 program->use_code_object_v2 = HAVE_LLVM >= 0x0400 &&
107 cso->ir_type == PIPE_SHADER_IR_NATIVE;
108
109
110 if (cso->ir_type == PIPE_SHADER_IR_TGSI) {
111 struct si_shader_selector sel;
112 bool scratch_enabled;
113
114 memset(&sel, 0, sizeof(sel));
115
116 sel.tokens = tgsi_dup_tokens(cso->prog);
117 if (!sel.tokens) {
118 FREE(program);
119 return NULL;
120 }
121
122 tgsi_scan_shader(cso->prog, &sel.info);
123 sel.type = PIPE_SHADER_COMPUTE;
124 sel.local_size = cso->req_local_mem;
125
126 p_atomic_inc(&sscreen->b.num_shaders_created);
127
128 program->shader.selector = &sel;
129 program->shader.is_monolithic = true;
130
131 if (si_shader_create(sscreen, sctx->tm, &program->shader,
132 &sctx->b.debug)) {
133 FREE(sel.tokens);
134 FREE(program);
135 return NULL;
136 }
137
138 scratch_enabled = shader->config.scratch_bytes_per_wave > 0;
139
140 shader->config.rsrc1 =
141 S_00B848_VGPRS((shader->config.num_vgprs - 1) / 4) |
142 S_00B848_SGPRS((shader->config.num_sgprs - 1) / 8) |
143 S_00B848_DX10_CLAMP(1) |
144 S_00B848_FLOAT_MODE(shader->config.float_mode);
145
146 shader->config.rsrc2 = S_00B84C_USER_SGPR(SI_CS_NUM_USER_SGPR) |
147 S_00B84C_SCRATCH_EN(scratch_enabled) |
148 S_00B84C_TGID_X_EN(1) | S_00B84C_TGID_Y_EN(1) |
149 S_00B84C_TGID_Z_EN(1) | S_00B84C_TIDIG_COMP_CNT(2) |
150 S_00B84C_LDS_SIZE(shader->config.lds_size);
151
152 program->variable_group_size =
153 sel.info.properties[TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH] == 0;
154
155 FREE(sel.tokens);
156 program->shader.selector = NULL;
157 } else {
158 const struct pipe_llvm_program_header *header;
159 const char *code;
160 header = cso->prog;
161 code = cso->prog + sizeof(struct pipe_llvm_program_header);
162
163 radeon_elf_read(code, header->num_bytes, &program->shader.binary);
164 if (program->use_code_object_v2) {
165 const amd_kernel_code_t *code_object =
166 si_compute_get_code_object(program, 0);
167 code_object_to_config(code_object, &program->shader.config);
168 } else {
169 si_shader_binary_read_config(&program->shader.binary,
170 &program->shader.config, 0);
171 }
172 si_shader_dump(sctx->screen, &program->shader, &sctx->b.debug,
173 PIPE_SHADER_COMPUTE, stderr, true);
174 if (si_shader_binary_upload(sctx->screen, &program->shader) < 0) {
175 fprintf(stderr, "LLVM failed to upload shader\n");
176 FREE(program);
177 return NULL;
178 }
179 }
180
181 return program;
182 }
183
184 static void si_bind_compute_state(struct pipe_context *ctx, void *state)
185 {
186 struct si_context *sctx = (struct si_context*)ctx;
187 sctx->cs_shader_state.program = (struct si_compute*)state;
188 }
189
190 static void si_set_global_binding(
191 struct pipe_context *ctx, unsigned first, unsigned n,
192 struct pipe_resource **resources,
193 uint32_t **handles)
194 {
195 unsigned i;
196 struct si_context *sctx = (struct si_context*)ctx;
197 struct si_compute *program = sctx->cs_shader_state.program;
198
199 assert(first + n <= MAX_GLOBAL_BUFFERS);
200
201 if (!resources) {
202 for (i = 0; i < n; i++) {
203 pipe_resource_reference(&program->global_buffers[first + i], NULL);
204 }
205 return;
206 }
207
208 for (i = 0; i < n; i++) {
209 uint64_t va;
210 uint32_t offset;
211 pipe_resource_reference(&program->global_buffers[first + i], resources[i]);
212 va = r600_resource(resources[i])->gpu_address;
213 offset = util_le32_to_cpu(*handles[i]);
214 va += offset;
215 va = util_cpu_to_le64(va);
216 memcpy(handles[i], &va, sizeof(va));
217 }
218 }
219
220 static void si_initialize_compute(struct si_context *sctx)
221 {
222 struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
223 uint64_t bc_va;
224
225 radeon_set_sh_reg_seq(cs, R_00B810_COMPUTE_START_X, 3);
226 radeon_emit(cs, 0);
227 radeon_emit(cs, 0);
228 radeon_emit(cs, 0);
229
230 radeon_set_sh_reg_seq(cs, R_00B858_COMPUTE_STATIC_THREAD_MGMT_SE0, 2);
231 /* R_00B858_COMPUTE_STATIC_THREAD_MGMT_SE0 / SE1 */
232 radeon_emit(cs, S_00B858_SH0_CU_EN(0xffff) | S_00B858_SH1_CU_EN(0xffff));
233 radeon_emit(cs, S_00B85C_SH0_CU_EN(0xffff) | S_00B85C_SH1_CU_EN(0xffff));
234
235 if (sctx->b.chip_class >= CIK) {
236 /* Also set R_00B858_COMPUTE_STATIC_THREAD_MGMT_SE2 / SE3 */
237 radeon_set_sh_reg_seq(cs,
238 R_00B864_COMPUTE_STATIC_THREAD_MGMT_SE2, 2);
239 radeon_emit(cs, S_00B864_SH0_CU_EN(0xffff) |
240 S_00B864_SH1_CU_EN(0xffff));
241 radeon_emit(cs, S_00B868_SH0_CU_EN(0xffff) |
242 S_00B868_SH1_CU_EN(0xffff));
243 }
244
245 /* This register has been moved to R_00CD20_COMPUTE_MAX_WAVE_ID
246 * and is now per pipe, so it should be handled in the
247 * kernel if we want to use something other than the default value,
248 * which is now 0x22f.
249 */
250 if (sctx->b.chip_class <= SI) {
251 /* XXX: This should be:
252 * (number of compute units) * 4 * (waves per simd) - 1 */
253
254 radeon_set_sh_reg(cs, R_00B82C_COMPUTE_MAX_WAVE_ID,
255 0x190 /* Default value */);
256 }
257
258 /* Set the pointer to border colors. */
259 bc_va = sctx->border_color_buffer->gpu_address;
260
261 if (sctx->b.chip_class >= CIK) {
262 radeon_set_uconfig_reg_seq(cs, R_030E00_TA_CS_BC_BASE_ADDR, 2);
263 radeon_emit(cs, bc_va >> 8); /* R_030E00_TA_CS_BC_BASE_ADDR */
264 radeon_emit(cs, bc_va >> 40); /* R_030E04_TA_CS_BC_BASE_ADDR_HI */
265 } else {
266 if (sctx->screen->b.info.drm_major == 3 ||
267 (sctx->screen->b.info.drm_major == 2 &&
268 sctx->screen->b.info.drm_minor >= 48)) {
269 radeon_set_config_reg(cs, R_00950C_TA_CS_BC_BASE_ADDR,
270 bc_va >> 8);
271 }
272 }
273
274 sctx->cs_shader_state.emitted_program = NULL;
275 sctx->cs_shader_state.initialized = true;
276 }
277
278 static bool si_setup_compute_scratch_buffer(struct si_context *sctx,
279 struct si_shader *shader,
280 struct si_shader_config *config)
281 {
282 uint64_t scratch_bo_size, scratch_needed;
283 scratch_bo_size = 0;
284 scratch_needed = config->scratch_bytes_per_wave * sctx->scratch_waves;
285 if (sctx->compute_scratch_buffer)
286 scratch_bo_size = sctx->compute_scratch_buffer->b.b.width0;
287
288 if (scratch_bo_size < scratch_needed) {
289 r600_resource_reference(&sctx->compute_scratch_buffer, NULL);
290
291 sctx->compute_scratch_buffer = (struct r600_resource*)
292 r600_aligned_buffer_create(&sctx->screen->b.b,
293 R600_RESOURCE_FLAG_UNMAPPABLE,
294 PIPE_USAGE_DEFAULT,
295 scratch_needed, 256);
296
297 if (!sctx->compute_scratch_buffer)
298 return false;
299 }
300
301 if (sctx->compute_scratch_buffer != shader->scratch_bo && scratch_needed) {
302 uint64_t scratch_va = sctx->compute_scratch_buffer->gpu_address;
303
304 si_shader_apply_scratch_relocs(sctx, shader, config, scratch_va);
305
306 if (si_shader_binary_upload(sctx->screen, shader))
307 return false;
308
309 r600_resource_reference(&shader->scratch_bo,
310 sctx->compute_scratch_buffer);
311 }
312
313 return true;
314 }
315
316 static bool si_switch_compute_shader(struct si_context *sctx,
317 struct si_compute *program,
318 struct si_shader *shader,
319 const amd_kernel_code_t *code_object,
320 unsigned offset)
321 {
322 struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
323 struct si_shader_config inline_config = {0};
324 struct si_shader_config *config;
325 uint64_t shader_va;
326
327 if (sctx->cs_shader_state.emitted_program == program &&
328 sctx->cs_shader_state.offset == offset)
329 return true;
330
331 if (program->ir_type == PIPE_SHADER_IR_TGSI) {
332 config = &shader->config;
333 } else {
334 unsigned lds_blocks;
335
336 config = &inline_config;
337 if (code_object) {
338 code_object_to_config(code_object, config);
339 } else {
340 si_shader_binary_read_config(&shader->binary, config, offset);
341 }
342
343 lds_blocks = config->lds_size;
344 /* XXX: We are over allocating LDS. For SI, the shader reports
345 * LDS in blocks of 256 bytes, so if there are 4 bytes lds
346 * allocated in the shader and 4 bytes allocated by the state
347 * tracker, then we will set LDS_SIZE to 512 bytes rather than 256.
348 */
349 if (sctx->b.chip_class <= SI) {
350 lds_blocks += align(program->local_size, 256) >> 8;
351 } else {
352 lds_blocks += align(program->local_size, 512) >> 9;
353 }
354
355 /* TODO: use si_multiwave_lds_size_workaround */
356 assert(lds_blocks <= 0xFF);
357
358 config->rsrc2 &= C_00B84C_LDS_SIZE;
359 config->rsrc2 |= S_00B84C_LDS_SIZE(lds_blocks);
360 }
361
362 if (!si_setup_compute_scratch_buffer(sctx, shader, config))
363 return false;
364
365 if (shader->scratch_bo) {
366 COMPUTE_DBG(sctx->screen, "Waves: %u; Scratch per wave: %u bytes; "
367 "Total Scratch: %u bytes\n", sctx->scratch_waves,
368 config->scratch_bytes_per_wave,
369 config->scratch_bytes_per_wave *
370 sctx->scratch_waves);
371
372 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
373 shader->scratch_bo, RADEON_USAGE_READWRITE,
374 RADEON_PRIO_SCRATCH_BUFFER);
375 }
376
377 /* Prefetch the compute shader to TC L2.
378 *
379 * We should also prefetch graphics shaders if a compute dispatch was
380 * the last command, and the compute shader if a draw call was the last
381 * command. However, that would add more complexity and we're likely
382 * to get a shader state change in that case anyway.
383 */
384 if (sctx->b.chip_class >= CIK) {
385 cik_prefetch_TC_L2_async(sctx, &program->shader.bo->b.b,
386 0, program->shader.bo->b.b.width0);
387 }
388
389 shader_va = shader->bo->gpu_address + offset;
390 if (program->use_code_object_v2) {
391 /* Shader code is placed after the amd_kernel_code_t
392 * struct. */
393 shader_va += sizeof(amd_kernel_code_t);
394 }
395
396 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, shader->bo,
397 RADEON_USAGE_READ, RADEON_PRIO_SHADER_BINARY);
398
399 radeon_set_sh_reg_seq(cs, R_00B830_COMPUTE_PGM_LO, 2);
400 radeon_emit(cs, shader_va >> 8);
401 radeon_emit(cs, shader_va >> 40);
402
403 radeon_set_sh_reg_seq(cs, R_00B848_COMPUTE_PGM_RSRC1, 2);
404 radeon_emit(cs, config->rsrc1);
405 radeon_emit(cs, config->rsrc2);
406
407 COMPUTE_DBG(sctx->screen, "COMPUTE_PGM_RSRC1: 0x%08x "
408 "COMPUTE_PGM_RSRC2: 0x%08x\n", config->rsrc1, config->rsrc2);
409
410 radeon_set_sh_reg(cs, R_00B860_COMPUTE_TMPRING_SIZE,
411 S_00B860_WAVES(sctx->scratch_waves)
412 | S_00B860_WAVESIZE(config->scratch_bytes_per_wave >> 10));
413
414 sctx->cs_shader_state.emitted_program = program;
415 sctx->cs_shader_state.offset = offset;
416 sctx->cs_shader_state.uses_scratch =
417 config->scratch_bytes_per_wave != 0;
418
419 return true;
420 }
421
422 static void setup_scratch_rsrc_user_sgprs(struct si_context *sctx,
423 const amd_kernel_code_t *code_object,
424 unsigned user_sgpr)
425 {
426 struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
427 uint64_t scratch_va = sctx->compute_scratch_buffer->gpu_address;
428
429 unsigned max_private_element_size = AMD_HSA_BITS_GET(
430 code_object->code_properties,
431 AMD_CODE_PROPERTY_PRIVATE_ELEMENT_SIZE);
432
433 uint32_t scratch_dword0 = scratch_va & 0xffffffff;
434 uint32_t scratch_dword1 =
435 S_008F04_BASE_ADDRESS_HI(scratch_va >> 32) |
436 S_008F04_SWIZZLE_ENABLE(1);
437
438 /* Disable address clamping */
439 uint32_t scratch_dword2 = 0xffffffff;
440 uint32_t scratch_dword3 =
441 S_008F0C_ELEMENT_SIZE(max_private_element_size) |
442 S_008F0C_INDEX_STRIDE(3) |
443 S_008F0C_ADD_TID_ENABLE(1);
444
445
446 if (sctx->screen->b.chip_class < VI) {
447 /* BUF_DATA_FORMAT is ignored, but it cannot be
448 BUF_DATA_FORMAT_INVALID. */
449 scratch_dword3 |=
450 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_8);
451 }
452
453 radeon_set_sh_reg_seq(cs, R_00B900_COMPUTE_USER_DATA_0 +
454 (user_sgpr * 4), 4);
455 radeon_emit(cs, scratch_dword0);
456 radeon_emit(cs, scratch_dword1);
457 radeon_emit(cs, scratch_dword2);
458 radeon_emit(cs, scratch_dword3);
459 }
460
461 static void si_setup_user_sgprs_co_v2(struct si_context *sctx,
462 const amd_kernel_code_t *code_object,
463 const struct pipe_grid_info *info,
464 uint64_t kernel_args_va)
465 {
466 struct si_compute *program = sctx->cs_shader_state.program;
467 struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
468
469 static const enum amd_code_property_mask_t workgroup_count_masks [] = {
470 AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_X,
471 AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_Y,
472 AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_Z
473 };
474
475 unsigned i, user_sgpr = 0;
476 if (AMD_HSA_BITS_GET(code_object->code_properties,
477 AMD_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_BUFFER)) {
478 if (code_object->workitem_private_segment_byte_size > 0) {
479 setup_scratch_rsrc_user_sgprs(sctx, code_object,
480 user_sgpr);
481 }
482 user_sgpr += 4;
483 }
484
485 if (AMD_HSA_BITS_GET(code_object->code_properties,
486 AMD_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_PTR)) {
487 struct dispatch_packet dispatch;
488 unsigned dispatch_offset;
489 struct r600_resource *dispatch_buf = NULL;
490 uint64_t dispatch_va;
491
492 /* Upload dispatch ptr */
493 memset(&dispatch, 0, sizeof(dispatch));
494
495 dispatch.workgroup_size_x = info->block[0];
496 dispatch.workgroup_size_y = info->block[1];
497 dispatch.workgroup_size_z = info->block[2];
498
499 dispatch.grid_size_x = info->grid[0] * info->block[0];
500 dispatch.grid_size_y = info->grid[1] * info->block[1];
501 dispatch.grid_size_z = info->grid[2] * info->block[2];
502
503 dispatch.private_segment_size = program->private_size;
504 dispatch.group_segment_size = program->local_size;
505
506 dispatch.kernarg_address = kernel_args_va;
507
508 u_upload_data(sctx->b.b.const_uploader, 0, sizeof(dispatch),
509 256, &dispatch, &dispatch_offset,
510 (struct pipe_resource**)&dispatch_buf);
511
512 if (!dispatch_buf) {
513 fprintf(stderr, "Error: Failed to allocate dispatch "
514 "packet.");
515 }
516 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, dispatch_buf,
517 RADEON_USAGE_READ, RADEON_PRIO_CONST_BUFFER);
518
519 dispatch_va = dispatch_buf->gpu_address + dispatch_offset;
520
521 radeon_set_sh_reg_seq(cs, R_00B900_COMPUTE_USER_DATA_0 +
522 (user_sgpr * 4), 2);
523 radeon_emit(cs, dispatch_va);
524 radeon_emit(cs, S_008F04_BASE_ADDRESS_HI(dispatch_va >> 32) |
525 S_008F04_STRIDE(0));
526
527 r600_resource_reference(&dispatch_buf, NULL);
528 user_sgpr += 2;
529 }
530
531 if (AMD_HSA_BITS_GET(code_object->code_properties,
532 AMD_CODE_PROPERTY_ENABLE_SGPR_KERNARG_SEGMENT_PTR)) {
533 radeon_set_sh_reg_seq(cs, R_00B900_COMPUTE_USER_DATA_0 +
534 (user_sgpr * 4), 2);
535 radeon_emit(cs, kernel_args_va);
536 radeon_emit(cs, S_008F04_BASE_ADDRESS_HI (kernel_args_va >> 32) |
537 S_008F04_STRIDE(0));
538 user_sgpr += 2;
539 }
540
541 for (i = 0; i < 3 && user_sgpr < 16; i++) {
542 if (code_object->code_properties & workgroup_count_masks[i]) {
543 radeon_set_sh_reg_seq(cs,
544 R_00B900_COMPUTE_USER_DATA_0 +
545 (user_sgpr * 4), 1);
546 radeon_emit(cs, info->grid[i]);
547 user_sgpr += 1;
548 }
549 }
550 }
551
552 static void si_upload_compute_input(struct si_context *sctx,
553 const amd_kernel_code_t *code_object,
554 const struct pipe_grid_info *info)
555 {
556 struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
557 struct si_compute *program = sctx->cs_shader_state.program;
558 struct r600_resource *input_buffer = NULL;
559 unsigned kernel_args_size;
560 unsigned num_work_size_bytes = program->use_code_object_v2 ? 0 : 36;
561 uint32_t kernel_args_offset = 0;
562 uint32_t *kernel_args;
563 void *kernel_args_ptr;
564 uint64_t kernel_args_va;
565 unsigned i;
566
567 /* The extra num_work_size_bytes are for work group / work item size information */
568 kernel_args_size = program->input_size + num_work_size_bytes;
569
570 u_upload_alloc(sctx->b.b.const_uploader, 0, kernel_args_size,
571 sctx->screen->b.info.tcc_cache_line_size,
572 &kernel_args_offset,
573 (struct pipe_resource**)&input_buffer, &kernel_args_ptr);
574
575 kernel_args = (uint32_t*)kernel_args_ptr;
576 kernel_args_va = input_buffer->gpu_address + kernel_args_offset;
577
578 if (!code_object) {
579 for (i = 0; i < 3; i++) {
580 kernel_args[i] = info->grid[i];
581 kernel_args[i + 3] = info->grid[i] * info->block[i];
582 kernel_args[i + 6] = info->block[i];
583 }
584 }
585
586 memcpy(kernel_args + (num_work_size_bytes / 4), info->input,
587 program->input_size);
588
589
590 for (i = 0; i < (kernel_args_size / 4); i++) {
591 COMPUTE_DBG(sctx->screen, "input %u : %u\n", i,
592 kernel_args[i]);
593 }
594
595
596 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, input_buffer,
597 RADEON_USAGE_READ, RADEON_PRIO_CONST_BUFFER);
598
599 if (code_object) {
600 si_setup_user_sgprs_co_v2(sctx, code_object, info, kernel_args_va);
601 } else {
602 radeon_set_sh_reg_seq(cs, R_00B900_COMPUTE_USER_DATA_0, 2);
603 radeon_emit(cs, kernel_args_va);
604 radeon_emit(cs, S_008F04_BASE_ADDRESS_HI (kernel_args_va >> 32) |
605 S_008F04_STRIDE(0));
606 }
607
608 r600_resource_reference(&input_buffer, NULL);
609 }
610
611 static void si_setup_tgsi_grid(struct si_context *sctx,
612 const struct pipe_grid_info *info)
613 {
614 struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
615 unsigned grid_size_reg = R_00B900_COMPUTE_USER_DATA_0 +
616 4 * SI_SGPR_GRID_SIZE;
617
618 if (info->indirect) {
619 uint64_t base_va = r600_resource(info->indirect)->gpu_address;
620 uint64_t va = base_va + info->indirect_offset;
621 int i;
622
623 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
624 (struct r600_resource *)info->indirect,
625 RADEON_USAGE_READ, RADEON_PRIO_DRAW_INDIRECT);
626
627 for (i = 0; i < 3; ++i) {
628 radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0));
629 radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_MEM) |
630 COPY_DATA_DST_SEL(COPY_DATA_REG));
631 radeon_emit(cs, (va + 4 * i));
632 radeon_emit(cs, (va + 4 * i) >> 32);
633 radeon_emit(cs, (grid_size_reg >> 2) + i);
634 radeon_emit(cs, 0);
635 }
636 } else {
637 struct si_compute *program = sctx->cs_shader_state.program;
638
639 radeon_set_sh_reg_seq(cs, grid_size_reg, program->variable_group_size ? 6 : 3);
640 radeon_emit(cs, info->grid[0]);
641 radeon_emit(cs, info->grid[1]);
642 radeon_emit(cs, info->grid[2]);
643 if (program->variable_group_size) {
644 radeon_emit(cs, info->block[0]);
645 radeon_emit(cs, info->block[1]);
646 radeon_emit(cs, info->block[2]);
647 }
648 }
649 }
650
651 static void si_emit_dispatch_packets(struct si_context *sctx,
652 const struct pipe_grid_info *info)
653 {
654 struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
655 bool render_cond_bit = sctx->b.render_cond && !sctx->b.render_cond_force_off;
656 unsigned waves_per_threadgroup =
657 DIV_ROUND_UP(info->block[0] * info->block[1] * info->block[2], 64);
658
659 radeon_set_sh_reg(cs, R_00B854_COMPUTE_RESOURCE_LIMITS,
660 S_00B854_SIMD_DEST_CNTL(waves_per_threadgroup % 4 == 0));
661
662 radeon_set_sh_reg_seq(cs, R_00B81C_COMPUTE_NUM_THREAD_X, 3);
663 radeon_emit(cs, S_00B81C_NUM_THREAD_FULL(info->block[0]));
664 radeon_emit(cs, S_00B820_NUM_THREAD_FULL(info->block[1]));
665 radeon_emit(cs, S_00B824_NUM_THREAD_FULL(info->block[2]));
666
667 if (info->indirect) {
668 uint64_t base_va = r600_resource(info->indirect)->gpu_address;
669
670 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
671 (struct r600_resource *)info->indirect,
672 RADEON_USAGE_READ, RADEON_PRIO_DRAW_INDIRECT);
673
674 radeon_emit(cs, PKT3(PKT3_SET_BASE, 2, 0) |
675 PKT3_SHADER_TYPE_S(1));
676 radeon_emit(cs, 1);
677 radeon_emit(cs, base_va);
678 radeon_emit(cs, base_va >> 32);
679
680 radeon_emit(cs, PKT3(PKT3_DISPATCH_INDIRECT, 1, render_cond_bit) |
681 PKT3_SHADER_TYPE_S(1));
682 radeon_emit(cs, info->indirect_offset);
683 radeon_emit(cs, 1);
684 } else {
685 radeon_emit(cs, PKT3(PKT3_DISPATCH_DIRECT, 3, render_cond_bit) |
686 PKT3_SHADER_TYPE_S(1));
687 radeon_emit(cs, info->grid[0]);
688 radeon_emit(cs, info->grid[1]);
689 radeon_emit(cs, info->grid[2]);
690 radeon_emit(cs, 1);
691 }
692 }
693
694
695 static void si_launch_grid(
696 struct pipe_context *ctx, const struct pipe_grid_info *info)
697 {
698 struct si_context *sctx = (struct si_context*)ctx;
699 struct si_compute *program = sctx->cs_shader_state.program;
700 const amd_kernel_code_t *code_object =
701 si_compute_get_code_object(program, info->pc);
702 int i;
703 /* HW bug workaround when CS threadgroups > 256 threads and async
704 * compute isn't used, i.e. only one compute job can run at a time.
705 * If async compute is possible, the threadgroup size must be limited
706 * to 256 threads on all queues to avoid the bug.
707 * Only SI and certain CIK chips are affected.
708 */
709 bool cs_regalloc_hang =
710 (sctx->b.chip_class == SI ||
711 sctx->b.family == CHIP_BONAIRE ||
712 sctx->b.family == CHIP_KABINI) &&
713 info->block[0] * info->block[1] * info->block[2] > 256;
714
715 if (cs_regalloc_hang)
716 sctx->b.flags |= SI_CONTEXT_PS_PARTIAL_FLUSH |
717 SI_CONTEXT_CS_PARTIAL_FLUSH;
718
719 si_decompress_compute_textures(sctx);
720
721 /* Add buffer sizes for memory checking in need_cs_space. */
722 r600_context_add_resource_size(ctx, &program->shader.bo->b.b);
723 /* TODO: add the scratch buffer */
724
725 if (info->indirect) {
726 r600_context_add_resource_size(ctx, info->indirect);
727
728 /* The hw doesn't read the indirect buffer via TC L2. */
729 if (r600_resource(info->indirect)->TC_L2_dirty) {
730 sctx->b.flags |= SI_CONTEXT_WRITEBACK_GLOBAL_L2;
731 r600_resource(info->indirect)->TC_L2_dirty = false;
732 }
733 }
734
735 si_need_cs_space(sctx);
736
737 if (!sctx->cs_shader_state.initialized)
738 si_initialize_compute(sctx);
739
740 if (sctx->b.flags)
741 si_emit_cache_flush(sctx);
742
743 if (!si_switch_compute_shader(sctx, program, &program->shader,
744 code_object, info->pc))
745 return;
746
747 si_upload_compute_shader_descriptors(sctx);
748 si_emit_compute_shader_userdata(sctx);
749
750 if (si_is_atom_dirty(sctx, sctx->atoms.s.render_cond)) {
751 sctx->atoms.s.render_cond->emit(&sctx->b,
752 sctx->atoms.s.render_cond);
753 si_set_atom_dirty(sctx, sctx->atoms.s.render_cond, false);
754 }
755
756 if (program->input_size || program->ir_type == PIPE_SHADER_IR_NATIVE)
757 si_upload_compute_input(sctx, code_object, info);
758
759 /* Global buffers */
760 for (i = 0; i < MAX_GLOBAL_BUFFERS; i++) {
761 struct r600_resource *buffer =
762 (struct r600_resource*)program->global_buffers[i];
763 if (!buffer) {
764 continue;
765 }
766 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, buffer,
767 RADEON_USAGE_READWRITE,
768 RADEON_PRIO_COMPUTE_GLOBAL);
769 }
770
771 if (program->ir_type == PIPE_SHADER_IR_TGSI)
772 si_setup_tgsi_grid(sctx, info);
773
774 si_ce_pre_draw_synchronization(sctx);
775
776 si_emit_dispatch_packets(sctx, info);
777
778 si_ce_post_draw_synchronization(sctx);
779
780 sctx->compute_is_busy = true;
781 sctx->b.num_compute_calls++;
782 if (sctx->cs_shader_state.uses_scratch)
783 sctx->b.num_spill_compute_calls++;
784
785 if (cs_regalloc_hang)
786 sctx->b.flags |= SI_CONTEXT_CS_PARTIAL_FLUSH;
787 }
788
789
790 static void si_delete_compute_state(struct pipe_context *ctx, void* state){
791 struct si_compute *program = (struct si_compute *)state;
792 struct si_context *sctx = (struct si_context*)ctx;
793
794 if (!state) {
795 return;
796 }
797
798 if (program == sctx->cs_shader_state.program)
799 sctx->cs_shader_state.program = NULL;
800
801 if (program == sctx->cs_shader_state.emitted_program)
802 sctx->cs_shader_state.emitted_program = NULL;
803
804 si_shader_destroy(&program->shader);
805 FREE(program);
806 }
807
808 static void si_set_compute_resources(struct pipe_context * ctx_,
809 unsigned start, unsigned count,
810 struct pipe_surface ** surfaces) { }
811
812 void si_init_compute_functions(struct si_context *sctx)
813 {
814 sctx->b.b.create_compute_state = si_create_compute_state;
815 sctx->b.b.delete_compute_state = si_delete_compute_state;
816 sctx->b.b.bind_compute_state = si_bind_compute_state;
817 /* ctx->context.create_sampler_view = evergreen_compute_create_sampler_view; */
818 sctx->b.b.set_compute_resources = si_set_compute_resources;
819 sctx->b.b.set_global_binding = si_set_global_binding;
820 sctx->b.b.launch_grid = si_launch_grid;
821 }