radeonsi: remove unnecessary #includes
[mesa.git] / src / gallium / drivers / radeonsi / si_compute.c
1 /*
2 * Copyright 2013 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 *
23 */
24
25 #include "tgsi/tgsi_parse.h"
26 #include "util/u_memory.h"
27 #include "util/u_upload_mgr.h"
28 #include "radeon/radeon_elf_util.h"
29
30 #include "amd_kernel_code_t.h"
31 #include "radeon/r600_cs.h"
32 #include "si_pipe.h"
33 #include "sid.h"
34
35 #define MAX_GLOBAL_BUFFERS 20
36
37 struct si_compute {
38 unsigned ir_type;
39 unsigned local_size;
40 unsigned private_size;
41 unsigned input_size;
42 struct si_shader shader;
43
44 struct pipe_resource *global_buffers[MAX_GLOBAL_BUFFERS];
45 bool use_code_object_v2;
46 };
47
48 struct dispatch_packet {
49 uint16_t header;
50 uint16_t setup;
51 uint16_t workgroup_size_x;
52 uint16_t workgroup_size_y;
53 uint16_t workgroup_size_z;
54 uint16_t reserved0;
55 uint32_t grid_size_x;
56 uint32_t grid_size_y;
57 uint32_t grid_size_z;
58 uint32_t private_segment_size;
59 uint32_t group_segment_size;
60 uint64_t kernel_object;
61 uint64_t kernarg_address;
62 uint64_t reserved2;
63 };
64
65 static const amd_kernel_code_t *si_compute_get_code_object(
66 const struct si_compute *program,
67 uint64_t symbol_offset)
68 {
69 if (!program->use_code_object_v2) {
70 return NULL;
71 }
72 return (const amd_kernel_code_t*)
73 (program->shader.binary.code + symbol_offset);
74 }
75
76 static void code_object_to_config(const amd_kernel_code_t *code_object,
77 struct si_shader_config *out_config) {
78
79 uint32_t rsrc1 = code_object->compute_pgm_resource_registers;
80 uint32_t rsrc2 = code_object->compute_pgm_resource_registers >> 32;
81 out_config->num_sgprs = code_object->wavefront_sgpr_count;
82 out_config->num_vgprs = code_object->workitem_vgpr_count;
83 out_config->float_mode = G_00B028_FLOAT_MODE(rsrc1);
84 out_config->rsrc1 = rsrc1;
85 out_config->lds_size = MAX2(out_config->lds_size, G_00B84C_LDS_SIZE(rsrc2));
86 out_config->rsrc2 = rsrc2;
87 out_config->scratch_bytes_per_wave =
88 align(code_object->workitem_private_segment_byte_size * 64, 1024);
89 }
90
91 static void *si_create_compute_state(
92 struct pipe_context *ctx,
93 const struct pipe_compute_state *cso)
94 {
95 struct si_context *sctx = (struct si_context *)ctx;
96 struct si_screen *sscreen = (struct si_screen *)ctx->screen;
97 struct si_compute *program = CALLOC_STRUCT(si_compute);
98 struct si_shader *shader = &program->shader;
99
100
101 program->ir_type = cso->ir_type;
102 program->local_size = cso->req_local_mem;
103 program->private_size = cso->req_private_mem;
104 program->input_size = cso->req_input_mem;
105 program->use_code_object_v2 = HAVE_LLVM >= 0x0400 &&
106 cso->ir_type == PIPE_SHADER_IR_NATIVE;
107
108
109 if (cso->ir_type == PIPE_SHADER_IR_TGSI) {
110 struct si_shader_selector sel;
111 bool scratch_enabled;
112
113 memset(&sel, 0, sizeof(sel));
114
115 sel.tokens = tgsi_dup_tokens(cso->prog);
116 if (!sel.tokens) {
117 FREE(program);
118 return NULL;
119 }
120
121 tgsi_scan_shader(cso->prog, &sel.info);
122 sel.type = PIPE_SHADER_COMPUTE;
123 sel.local_size = cso->req_local_mem;
124
125 p_atomic_inc(&sscreen->b.num_shaders_created);
126
127 program->shader.selector = &sel;
128
129 if (si_shader_create(sscreen, sctx->tm, &program->shader,
130 &sctx->b.debug)) {
131 FREE(sel.tokens);
132 FREE(program);
133 return NULL;
134 }
135
136 scratch_enabled = shader->config.scratch_bytes_per_wave > 0;
137
138 shader->config.rsrc1 =
139 S_00B848_VGPRS((shader->config.num_vgprs - 1) / 4) |
140 S_00B848_SGPRS((shader->config.num_sgprs - 1) / 8) |
141 S_00B848_DX10_CLAMP(1) |
142 S_00B848_FLOAT_MODE(shader->config.float_mode);
143
144 shader->config.rsrc2 = S_00B84C_USER_SGPR(SI_CS_NUM_USER_SGPR) |
145 S_00B84C_SCRATCH_EN(scratch_enabled) |
146 S_00B84C_TGID_X_EN(1) | S_00B84C_TGID_Y_EN(1) |
147 S_00B84C_TGID_Z_EN(1) | S_00B84C_TIDIG_COMP_CNT(2) |
148 S_00B84C_LDS_SIZE(shader->config.lds_size);
149
150 FREE(sel.tokens);
151 } else {
152 const struct pipe_llvm_program_header *header;
153 const char *code;
154 header = cso->prog;
155 code = cso->prog + sizeof(struct pipe_llvm_program_header);
156
157 radeon_elf_read(code, header->num_bytes, &program->shader.binary);
158 if (program->use_code_object_v2) {
159 const amd_kernel_code_t *code_object =
160 si_compute_get_code_object(program, 0);
161 code_object_to_config(code_object, &program->shader.config);
162 } else {
163 si_shader_binary_read_config(&program->shader.binary,
164 &program->shader.config, 0);
165 }
166 si_shader_dump(sctx->screen, &program->shader, &sctx->b.debug,
167 PIPE_SHADER_COMPUTE, stderr);
168 si_shader_binary_upload(sctx->screen, &program->shader);
169 }
170
171 return program;
172 }
173
174 static void si_bind_compute_state(struct pipe_context *ctx, void *state)
175 {
176 struct si_context *sctx = (struct si_context*)ctx;
177 sctx->cs_shader_state.program = (struct si_compute*)state;
178 }
179
180 static void si_set_global_binding(
181 struct pipe_context *ctx, unsigned first, unsigned n,
182 struct pipe_resource **resources,
183 uint32_t **handles)
184 {
185 unsigned i;
186 struct si_context *sctx = (struct si_context*)ctx;
187 struct si_compute *program = sctx->cs_shader_state.program;
188
189 if (!resources) {
190 for (i = first; i < first + n; i++) {
191 pipe_resource_reference(&program->global_buffers[i], NULL);
192 }
193 return;
194 }
195
196 for (i = first; i < first + n; i++) {
197 uint64_t va;
198 uint32_t offset;
199 pipe_resource_reference(&program->global_buffers[i], resources[i]);
200 va = r600_resource(resources[i])->gpu_address;
201 offset = util_le32_to_cpu(*handles[i]);
202 va += offset;
203 va = util_cpu_to_le64(va);
204 memcpy(handles[i], &va, sizeof(va));
205 }
206 }
207
208 static void si_initialize_compute(struct si_context *sctx)
209 {
210 struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
211
212 radeon_set_sh_reg_seq(cs, R_00B810_COMPUTE_START_X, 3);
213 radeon_emit(cs, 0);
214 radeon_emit(cs, 0);
215 radeon_emit(cs, 0);
216
217 radeon_set_sh_reg_seq(cs, R_00B858_COMPUTE_STATIC_THREAD_MGMT_SE0, 2);
218 /* R_00B858_COMPUTE_STATIC_THREAD_MGMT_SE0 / SE1 */
219 radeon_emit(cs, S_00B858_SH0_CU_EN(0xffff) | S_00B858_SH1_CU_EN(0xffff));
220 radeon_emit(cs, S_00B85C_SH0_CU_EN(0xffff) | S_00B85C_SH1_CU_EN(0xffff));
221
222 if (sctx->b.chip_class >= CIK) {
223 /* Also set R_00B858_COMPUTE_STATIC_THREAD_MGMT_SE2 / SE3 */
224 radeon_set_sh_reg_seq(cs,
225 R_00B864_COMPUTE_STATIC_THREAD_MGMT_SE2, 2);
226 radeon_emit(cs, S_00B864_SH0_CU_EN(0xffff) |
227 S_00B864_SH1_CU_EN(0xffff));
228 radeon_emit(cs, S_00B868_SH0_CU_EN(0xffff) |
229 S_00B868_SH1_CU_EN(0xffff));
230 }
231
232 /* This register has been moved to R_00CD20_COMPUTE_MAX_WAVE_ID
233 * and is now per pipe, so it should be handled in the
234 * kernel if we want to use something other than the default value,
235 * which is now 0x22f.
236 */
237 if (sctx->b.chip_class <= SI) {
238 /* XXX: This should be:
239 * (number of compute units) * 4 * (waves per simd) - 1 */
240
241 radeon_set_sh_reg(cs, R_00B82C_COMPUTE_MAX_WAVE_ID,
242 0x190 /* Default value */);
243 }
244
245 sctx->cs_shader_state.emitted_program = NULL;
246 sctx->cs_shader_state.initialized = true;
247 }
248
249 static bool si_setup_compute_scratch_buffer(struct si_context *sctx,
250 struct si_shader *shader,
251 struct si_shader_config *config)
252 {
253 uint64_t scratch_bo_size, scratch_needed;
254 scratch_bo_size = 0;
255 scratch_needed = config->scratch_bytes_per_wave * sctx->scratch_waves;
256 if (sctx->compute_scratch_buffer)
257 scratch_bo_size = sctx->compute_scratch_buffer->b.b.width0;
258
259 if (scratch_bo_size < scratch_needed) {
260 r600_resource_reference(&sctx->compute_scratch_buffer, NULL);
261
262 sctx->compute_scratch_buffer =
263 si_resource_create_custom(&sctx->screen->b.b,
264 PIPE_USAGE_DEFAULT, scratch_needed);
265
266 if (!sctx->compute_scratch_buffer)
267 return false;
268 }
269
270 if (sctx->compute_scratch_buffer != shader->scratch_bo && scratch_needed) {
271 uint64_t scratch_va = sctx->compute_scratch_buffer->gpu_address;
272
273 si_shader_apply_scratch_relocs(sctx, shader, config, scratch_va);
274
275 if (si_shader_binary_upload(sctx->screen, shader))
276 return false;
277
278 r600_resource_reference(&shader->scratch_bo,
279 sctx->compute_scratch_buffer);
280 }
281
282 return true;
283 }
284
285 static bool si_switch_compute_shader(struct si_context *sctx,
286 struct si_compute *program,
287 struct si_shader *shader,
288 const amd_kernel_code_t *code_object,
289 unsigned offset)
290 {
291 struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
292 struct si_shader_config inline_config = {0};
293 struct si_shader_config *config;
294 uint64_t shader_va;
295
296 if (sctx->cs_shader_state.emitted_program == program &&
297 sctx->cs_shader_state.offset == offset)
298 return true;
299
300 if (program->ir_type == PIPE_SHADER_IR_TGSI) {
301 config = &shader->config;
302 } else {
303 unsigned lds_blocks;
304
305 config = &inline_config;
306 if (code_object) {
307 code_object_to_config(code_object, config);
308 } else {
309 si_shader_binary_read_config(&shader->binary, config, offset);
310 }
311
312 lds_blocks = config->lds_size;
313 /* XXX: We are over allocating LDS. For SI, the shader reports
314 * LDS in blocks of 256 bytes, so if there are 4 bytes lds
315 * allocated in the shader and 4 bytes allocated by the state
316 * tracker, then we will set LDS_SIZE to 512 bytes rather than 256.
317 */
318 if (sctx->b.chip_class <= SI) {
319 lds_blocks += align(program->local_size, 256) >> 8;
320 } else {
321 lds_blocks += align(program->local_size, 512) >> 9;
322 }
323
324 assert(lds_blocks <= 0xFF);
325
326 config->rsrc2 &= C_00B84C_LDS_SIZE;
327 config->rsrc2 |= S_00B84C_LDS_SIZE(lds_blocks);
328 }
329
330 if (!si_setup_compute_scratch_buffer(sctx, shader, config))
331 return false;
332
333 if (shader->scratch_bo) {
334 COMPUTE_DBG(sctx->screen, "Waves: %u; Scratch per wave: %u bytes; "
335 "Total Scratch: %u bytes\n", sctx->scratch_waves,
336 config->scratch_bytes_per_wave,
337 config->scratch_bytes_per_wave *
338 sctx->scratch_waves);
339
340 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
341 shader->scratch_bo, RADEON_USAGE_READWRITE,
342 RADEON_PRIO_SCRATCH_BUFFER);
343 }
344
345 shader_va = shader->bo->gpu_address + offset;
346 if (program->use_code_object_v2) {
347 /* Shader code is placed after the amd_kernel_code_t
348 * struct. */
349 shader_va += sizeof(amd_kernel_code_t);
350 }
351
352 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, shader->bo,
353 RADEON_USAGE_READ, RADEON_PRIO_SHADER_BINARY);
354
355 radeon_set_sh_reg_seq(cs, R_00B830_COMPUTE_PGM_LO, 2);
356 radeon_emit(cs, shader_va >> 8);
357 radeon_emit(cs, shader_va >> 40);
358
359 radeon_set_sh_reg_seq(cs, R_00B848_COMPUTE_PGM_RSRC1, 2);
360 radeon_emit(cs, config->rsrc1);
361 radeon_emit(cs, config->rsrc2);
362
363 COMPUTE_DBG(sctx->screen, "COMPUTE_PGM_RSRC1: 0x%08x "
364 "COMPUTE_PGM_RSRC2: 0x%08x\n", config->rsrc1, config->rsrc2);
365
366 radeon_set_sh_reg(cs, R_00B860_COMPUTE_TMPRING_SIZE,
367 S_00B860_WAVES(sctx->scratch_waves)
368 | S_00B860_WAVESIZE(config->scratch_bytes_per_wave >> 10));
369
370 sctx->cs_shader_state.emitted_program = program;
371 sctx->cs_shader_state.offset = offset;
372 sctx->cs_shader_state.uses_scratch =
373 config->scratch_bytes_per_wave != 0;
374
375 return true;
376 }
377
378 static void setup_scratch_rsrc_user_sgprs(struct si_context *sctx,
379 const amd_kernel_code_t *code_object,
380 unsigned user_sgpr)
381 {
382 struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
383 uint64_t scratch_va = sctx->compute_scratch_buffer->gpu_address;
384
385 unsigned max_private_element_size = AMD_HSA_BITS_GET(
386 code_object->code_properties,
387 AMD_CODE_PROPERTY_PRIVATE_ELEMENT_SIZE);
388
389 uint32_t scratch_dword0 = scratch_va & 0xffffffff;
390 uint32_t scratch_dword1 =
391 S_008F04_BASE_ADDRESS_HI(scratch_va >> 32) |
392 S_008F04_SWIZZLE_ENABLE(1);
393
394 /* Disable address clamping */
395 uint32_t scratch_dword2 = 0xffffffff;
396 uint32_t scratch_dword3 =
397 S_008F0C_ELEMENT_SIZE(max_private_element_size) |
398 S_008F0C_INDEX_STRIDE(3) |
399 S_008F0C_ADD_TID_ENABLE(1);
400
401
402 if (sctx->screen->b.chip_class < VI) {
403 /* BUF_DATA_FORMAT is ignored, but it cannot be
404 BUF_DATA_FORMAT_INVALID. */
405 scratch_dword3 |=
406 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_8);
407 }
408
409 radeon_set_sh_reg_seq(cs, R_00B900_COMPUTE_USER_DATA_0 +
410 (user_sgpr * 4), 4);
411 radeon_emit(cs, scratch_dword0);
412 radeon_emit(cs, scratch_dword1);
413 radeon_emit(cs, scratch_dword2);
414 radeon_emit(cs, scratch_dword3);
415 }
416
417 static void si_setup_user_sgprs_co_v2(struct si_context *sctx,
418 const amd_kernel_code_t *code_object,
419 const struct pipe_grid_info *info,
420 uint64_t kernel_args_va)
421 {
422 struct si_compute *program = sctx->cs_shader_state.program;
423 struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
424
425 static const enum amd_code_property_mask_t workgroup_count_masks [] = {
426 AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_X,
427 AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_Y,
428 AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_Z
429 };
430
431 unsigned i, user_sgpr = 0;
432 if (AMD_HSA_BITS_GET(code_object->code_properties,
433 AMD_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_BUFFER)) {
434 if (code_object->workitem_private_segment_byte_size > 0) {
435 setup_scratch_rsrc_user_sgprs(sctx, code_object,
436 user_sgpr);
437 }
438 user_sgpr += 4;
439 }
440
441 if (AMD_HSA_BITS_GET(code_object->code_properties,
442 AMD_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_PTR)) {
443 struct dispatch_packet dispatch;
444 unsigned dispatch_offset;
445 struct r600_resource *dispatch_buf = NULL;
446 uint64_t dispatch_va;
447
448 /* Upload dispatch ptr */
449 memset(&dispatch, 0, sizeof(dispatch));
450
451 dispatch.workgroup_size_x = info->block[0];
452 dispatch.workgroup_size_y = info->block[1];
453 dispatch.workgroup_size_z = info->block[2];
454
455 dispatch.grid_size_x = info->grid[0] * info->block[0];
456 dispatch.grid_size_y = info->grid[1] * info->block[1];
457 dispatch.grid_size_z = info->grid[2] * info->block[2];
458
459 dispatch.private_segment_size = program->private_size;
460 dispatch.group_segment_size = program->local_size;
461
462 dispatch.kernarg_address = kernel_args_va;
463
464 u_upload_data(sctx->b.uploader, 0, sizeof(dispatch), 256,
465 &dispatch, &dispatch_offset,
466 (struct pipe_resource**)&dispatch_buf);
467
468 if (!dispatch_buf) {
469 fprintf(stderr, "Error: Failed to allocate dispatch "
470 "packet.");
471 }
472 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, dispatch_buf,
473 RADEON_USAGE_READ, RADEON_PRIO_CONST_BUFFER);
474
475 dispatch_va = dispatch_buf->gpu_address + dispatch_offset;
476
477 radeon_set_sh_reg_seq(cs, R_00B900_COMPUTE_USER_DATA_0 +
478 (user_sgpr * 4), 2);
479 radeon_emit(cs, dispatch_va);
480 radeon_emit(cs, S_008F04_BASE_ADDRESS_HI(dispatch_va >> 32) |
481 S_008F04_STRIDE(0));
482
483 r600_resource_reference(&dispatch_buf, NULL);
484 user_sgpr += 2;
485 }
486
487 if (AMD_HSA_BITS_GET(code_object->code_properties,
488 AMD_CODE_PROPERTY_ENABLE_SGPR_KERNARG_SEGMENT_PTR)) {
489 radeon_set_sh_reg_seq(cs, R_00B900_COMPUTE_USER_DATA_0 +
490 (user_sgpr * 4), 2);
491 radeon_emit(cs, kernel_args_va);
492 radeon_emit(cs, S_008F04_BASE_ADDRESS_HI (kernel_args_va >> 32) |
493 S_008F04_STRIDE(0));
494 user_sgpr += 2;
495 }
496
497 for (i = 0; i < 3 && user_sgpr < 16; i++) {
498 if (code_object->code_properties & workgroup_count_masks[i]) {
499 radeon_set_sh_reg_seq(cs,
500 R_00B900_COMPUTE_USER_DATA_0 +
501 (user_sgpr * 4), 1);
502 radeon_emit(cs, info->grid[i]);
503 user_sgpr += 1;
504 }
505 }
506 }
507
508 static void si_upload_compute_input(struct si_context *sctx,
509 const amd_kernel_code_t *code_object,
510 const struct pipe_grid_info *info)
511 {
512 struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
513 struct si_compute *program = sctx->cs_shader_state.program;
514 struct r600_resource *input_buffer = NULL;
515 unsigned kernel_args_size;
516 unsigned num_work_size_bytes = program->use_code_object_v2 ? 0 : 36;
517 uint32_t kernel_args_offset = 0;
518 uint32_t *kernel_args;
519 void *kernel_args_ptr;
520 uint64_t kernel_args_va;
521 unsigned i;
522
523 /* The extra num_work_size_bytes are for work group / work item size information */
524 kernel_args_size = program->input_size + num_work_size_bytes;
525
526 u_upload_alloc(sctx->b.uploader, 0, kernel_args_size, 256,
527 &kernel_args_offset,
528 (struct pipe_resource**)&input_buffer, &kernel_args_ptr);
529
530 kernel_args = (uint32_t*)kernel_args_ptr;
531 kernel_args_va = input_buffer->gpu_address + kernel_args_offset;
532
533 if (!code_object) {
534 for (i = 0; i < 3; i++) {
535 kernel_args[i] = info->grid[i];
536 kernel_args[i + 3] = info->grid[i] * info->block[i];
537 kernel_args[i + 6] = info->block[i];
538 }
539 }
540
541 memcpy(kernel_args + (num_work_size_bytes / 4), info->input,
542 program->input_size);
543
544
545 for (i = 0; i < (kernel_args_size / 4); i++) {
546 COMPUTE_DBG(sctx->screen, "input %u : %u\n", i,
547 kernel_args[i]);
548 }
549
550
551 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, input_buffer,
552 RADEON_USAGE_READ, RADEON_PRIO_CONST_BUFFER);
553
554 if (code_object) {
555 si_setup_user_sgprs_co_v2(sctx, code_object, info, kernel_args_va);
556 } else {
557 radeon_set_sh_reg_seq(cs, R_00B900_COMPUTE_USER_DATA_0, 2);
558 radeon_emit(cs, kernel_args_va);
559 radeon_emit(cs, S_008F04_BASE_ADDRESS_HI (kernel_args_va >> 32) |
560 S_008F04_STRIDE(0));
561 }
562
563 r600_resource_reference(&input_buffer, NULL);
564 }
565
566 static void si_setup_tgsi_grid(struct si_context *sctx,
567 const struct pipe_grid_info *info)
568 {
569 struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
570 unsigned grid_size_reg = R_00B900_COMPUTE_USER_DATA_0 +
571 4 * SI_SGPR_GRID_SIZE;
572
573 if (info->indirect) {
574 uint64_t base_va = r600_resource(info->indirect)->gpu_address;
575 uint64_t va = base_va + info->indirect_offset;
576 int i;
577
578 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
579 (struct r600_resource *)info->indirect,
580 RADEON_USAGE_READ, RADEON_PRIO_DRAW_INDIRECT);
581
582 for (i = 0; i < 3; ++i) {
583 radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0));
584 radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_MEM) |
585 COPY_DATA_DST_SEL(COPY_DATA_REG));
586 radeon_emit(cs, (va + 4 * i));
587 radeon_emit(cs, (va + 4 * i) >> 32);
588 radeon_emit(cs, (grid_size_reg >> 2) + i);
589 radeon_emit(cs, 0);
590 }
591 } else {
592
593 radeon_set_sh_reg_seq(cs, grid_size_reg, 3);
594 radeon_emit(cs, info->grid[0]);
595 radeon_emit(cs, info->grid[1]);
596 radeon_emit(cs, info->grid[2]);
597 }
598 }
599
600 static void si_emit_dispatch_packets(struct si_context *sctx,
601 const struct pipe_grid_info *info)
602 {
603 struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
604 bool render_cond_bit = sctx->b.render_cond && !sctx->b.render_cond_force_off;
605 unsigned waves_per_threadgroup =
606 DIV_ROUND_UP(info->block[0] * info->block[1] * info->block[2], 64);
607
608 radeon_set_sh_reg(cs, R_00B854_COMPUTE_RESOURCE_LIMITS,
609 S_00B854_SIMD_DEST_CNTL(waves_per_threadgroup % 4 == 0));
610
611 radeon_set_sh_reg_seq(cs, R_00B81C_COMPUTE_NUM_THREAD_X, 3);
612 radeon_emit(cs, S_00B81C_NUM_THREAD_FULL(info->block[0]));
613 radeon_emit(cs, S_00B820_NUM_THREAD_FULL(info->block[1]));
614 radeon_emit(cs, S_00B824_NUM_THREAD_FULL(info->block[2]));
615
616 if (info->indirect) {
617 uint64_t base_va = r600_resource(info->indirect)->gpu_address;
618
619 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
620 (struct r600_resource *)info->indirect,
621 RADEON_USAGE_READ, RADEON_PRIO_DRAW_INDIRECT);
622
623 radeon_emit(cs, PKT3(PKT3_SET_BASE, 2, 0) |
624 PKT3_SHADER_TYPE_S(1));
625 radeon_emit(cs, 1);
626 radeon_emit(cs, base_va);
627 radeon_emit(cs, base_va >> 32);
628
629 radeon_emit(cs, PKT3(PKT3_DISPATCH_INDIRECT, 1, render_cond_bit) |
630 PKT3_SHADER_TYPE_S(1));
631 radeon_emit(cs, info->indirect_offset);
632 radeon_emit(cs, 1);
633 } else {
634 radeon_emit(cs, PKT3(PKT3_DISPATCH_DIRECT, 3, render_cond_bit) |
635 PKT3_SHADER_TYPE_S(1));
636 radeon_emit(cs, info->grid[0]);
637 radeon_emit(cs, info->grid[1]);
638 radeon_emit(cs, info->grid[2]);
639 radeon_emit(cs, 1);
640 }
641 }
642
643
644 static void si_launch_grid(
645 struct pipe_context *ctx, const struct pipe_grid_info *info)
646 {
647 struct si_context *sctx = (struct si_context*)ctx;
648 struct si_compute *program = sctx->cs_shader_state.program;
649 const amd_kernel_code_t *code_object =
650 si_compute_get_code_object(program, info->pc);
651 int i;
652 /* HW bug workaround when CS threadgroups > 256 threads and async
653 * compute isn't used, i.e. only one compute job can run at a time.
654 * If async compute is possible, the threadgroup size must be limited
655 * to 256 threads on all queues to avoid the bug.
656 * Only SI and certain CIK chips are affected.
657 */
658 bool cs_regalloc_hang =
659 (sctx->b.chip_class == SI ||
660 sctx->b.family == CHIP_BONAIRE ||
661 sctx->b.family == CHIP_KABINI) &&
662 info->block[0] * info->block[1] * info->block[2] > 256;
663
664 if (cs_regalloc_hang)
665 sctx->b.flags |= SI_CONTEXT_PS_PARTIAL_FLUSH |
666 SI_CONTEXT_CS_PARTIAL_FLUSH;
667
668 si_decompress_compute_textures(sctx);
669
670 /* Add buffer sizes for memory checking in need_cs_space. */
671 r600_context_add_resource_size(ctx, &program->shader.bo->b.b);
672 /* TODO: add the scratch buffer */
673
674 if (info->indirect) {
675 r600_context_add_resource_size(ctx, info->indirect);
676
677 /* The hw doesn't read the indirect buffer via TC L2. */
678 if (r600_resource(info->indirect)->TC_L2_dirty) {
679 sctx->b.flags |= SI_CONTEXT_INV_GLOBAL_L2;
680 r600_resource(info->indirect)->TC_L2_dirty = false;
681 }
682 }
683
684 si_need_cs_space(sctx);
685
686 if (!sctx->cs_shader_state.initialized)
687 si_initialize_compute(sctx);
688
689 if (sctx->b.flags)
690 si_emit_cache_flush(sctx);
691
692 if (!si_switch_compute_shader(sctx, program, &program->shader,
693 code_object, info->pc))
694 return;
695
696 si_upload_compute_shader_descriptors(sctx);
697 si_emit_compute_shader_userdata(sctx);
698
699 if (si_is_atom_dirty(sctx, sctx->atoms.s.render_cond)) {
700 sctx->atoms.s.render_cond->emit(&sctx->b,
701 sctx->atoms.s.render_cond);
702 si_set_atom_dirty(sctx, sctx->atoms.s.render_cond, false);
703 }
704
705 if (program->input_size || program->ir_type == PIPE_SHADER_IR_NATIVE)
706 si_upload_compute_input(sctx, code_object, info);
707
708 /* Global buffers */
709 for (i = 0; i < MAX_GLOBAL_BUFFERS; i++) {
710 struct r600_resource *buffer =
711 (struct r600_resource*)program->global_buffers[i];
712 if (!buffer) {
713 continue;
714 }
715 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, buffer,
716 RADEON_USAGE_READWRITE,
717 RADEON_PRIO_COMPUTE_GLOBAL);
718 }
719
720 if (program->ir_type == PIPE_SHADER_IR_TGSI)
721 si_setup_tgsi_grid(sctx, info);
722
723 si_ce_pre_draw_synchronization(sctx);
724
725 si_emit_dispatch_packets(sctx, info);
726
727 si_ce_post_draw_synchronization(sctx);
728
729 sctx->compute_is_busy = true;
730 sctx->b.num_compute_calls++;
731 if (sctx->cs_shader_state.uses_scratch)
732 sctx->b.num_spill_compute_calls++;
733
734 if (cs_regalloc_hang)
735 sctx->b.flags |= SI_CONTEXT_CS_PARTIAL_FLUSH;
736 }
737
738
739 static void si_delete_compute_state(struct pipe_context *ctx, void* state){
740 struct si_compute *program = (struct si_compute *)state;
741 struct si_context *sctx = (struct si_context*)ctx;
742
743 if (!state) {
744 return;
745 }
746
747 if (program == sctx->cs_shader_state.program)
748 sctx->cs_shader_state.program = NULL;
749
750 if (program == sctx->cs_shader_state.emitted_program)
751 sctx->cs_shader_state.emitted_program = NULL;
752
753 si_shader_destroy(&program->shader);
754 FREE(program);
755 }
756
757 static void si_set_compute_resources(struct pipe_context * ctx_,
758 unsigned start, unsigned count,
759 struct pipe_surface ** surfaces) { }
760
761 void si_init_compute_functions(struct si_context *sctx)
762 {
763 sctx->b.b.create_compute_state = si_create_compute_state;
764 sctx->b.b.delete_compute_state = si_delete_compute_state;
765 sctx->b.b.bind_compute_state = si_bind_compute_state;
766 /* ctx->context.create_sampler_view = evergreen_compute_create_sampler_view; */
767 sctx->b.b.set_compute_resources = si_set_compute_resources;
768 sctx->b.b.set_global_binding = si_set_global_binding;
769 sctx->b.b.launch_grid = si_launch_grid;
770 }