r600g/compute: Fix input buffer size calculation
[mesa.git] / src / gallium / drivers / r600 / evergreen_compute.c
1 /*
2 * Copyright 2011 Adam Rak <adam.rak@streamnovation.com>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Adam Rak <adam.rak@streamnovation.com>
25 */
26
27 #include <stdio.h>
28 #include <errno.h>
29 #include "pipe/p_defines.h"
30 #include "pipe/p_state.h"
31 #include "pipe/p_context.h"
32 #include "util/u_blitter.h"
33 #include "util/u_double_list.h"
34 #include "util/u_transfer.h"
35 #include "util/u_surface.h"
36 #include "util/u_pack_color.h"
37 #include "util/u_memory.h"
38 #include "util/u_inlines.h"
39 #include "util/u_framebuffer.h"
40 #include "pipebuffer/pb_buffer.h"
41 #include "evergreend.h"
42 #include "r600_resource.h"
43 #include "r600_shader.h"
44 #include "r600_pipe.h"
45 #include "r600_formats.h"
46 #include "evergreen_compute.h"
47 #include "evergreen_compute_internal.h"
48 #include "compute_memory_pool.h"
49 #ifdef HAVE_OPENCL
50 #include "radeon_llvm_util.h"
51 #endif
52
53 /**
54 RAT0 is for global binding write
55 VTX1 is for global binding read
56
57 for wrting images RAT1...
58 for reading images TEX2...
59 TEX2-RAT1 is paired
60
61 TEX2... consumes the same fetch resources, that VTX2... would consume
62
63 CONST0 and VTX0 is for parameters
64 CONST0 is binding smaller input parameter buffer, and for constant indexing,
65 also constant cached
66 VTX0 is for indirect/non-constant indexing, or if the input is bigger than
67 the constant cache can handle
68
69 RAT-s are limited to 12, so we can only bind at most 11 texture for writing
70 because we reserve RAT0 for global bindings. With byteaddressing enabled,
71 we should reserve another one too.=> 10 image binding for writing max.
72
73 from Nvidia OpenCL:
74 CL_DEVICE_MAX_READ_IMAGE_ARGS: 128
75 CL_DEVICE_MAX_WRITE_IMAGE_ARGS: 8
76
77 so 10 for writing is enough. 176 is the max for reading according to the docs
78
79 writable images should be listed first < 10, so their id corresponds to RAT(id+1)
80 writable images will consume TEX slots, VTX slots too because of linear indexing
81
82 */
83
84 static void evergreen_cs_set_vertex_buffer(
85 struct r600_context * rctx,
86 unsigned vb_index,
87 unsigned offset,
88 struct pipe_resource * buffer)
89 {
90 struct r600_vertexbuf_state *state = &rctx->cs_vertex_buffer_state;
91 struct pipe_vertex_buffer *vb = &state->vb[vb_index];
92 vb->stride = 1;
93 vb->buffer_offset = offset;
94 vb->buffer = buffer;
95 vb->user_buffer = NULL;
96
97 /* The vertex instructions in the compute shaders use the texture cache,
98 * so we need to invalidate it. */
99 rctx->flags |= R600_CONTEXT_INVAL_READ_CACHES;
100 state->enabled_mask |= 1 << vb_index;
101 state->dirty_mask |= 1 << vb_index;
102 state->atom.dirty = true;
103 }
104
105 static const struct u_resource_vtbl r600_global_buffer_vtbl =
106 {
107 u_default_resource_get_handle, /* get_handle */
108 r600_compute_global_buffer_destroy, /* resource_destroy */
109 r600_compute_global_transfer_map, /* transfer_map */
110 r600_compute_global_transfer_flush_region,/* transfer_flush_region */
111 r600_compute_global_transfer_unmap, /* transfer_unmap */
112 r600_compute_global_transfer_inline_write /* transfer_inline_write */
113 };
114
115
116 void *evergreen_create_compute_state(
117 struct pipe_context *ctx_,
118 const const struct pipe_compute_state *cso)
119 {
120 struct r600_context *ctx = (struct r600_context *)ctx_;
121 struct r600_pipe_compute *shader = CALLOC_STRUCT(r600_pipe_compute);
122
123 #ifdef HAVE_OPENCL
124 const struct pipe_llvm_program_header * header;
125 const unsigned char * code;
126 unsigned i;
127
128 COMPUTE_DBG(ctx->screen, "*** evergreen_create_compute_state\n");
129
130 header = cso->prog;
131 code = cso->prog + sizeof(struct pipe_llvm_program_header);
132 #endif
133
134 shader->ctx = (struct r600_context*)ctx;
135 shader->resources = (struct evergreen_compute_resource*)
136 CALLOC(sizeof(struct evergreen_compute_resource),
137 get_compute_resource_num());
138 shader->local_size = cso->req_local_mem; ///TODO: assert it
139 shader->private_size = cso->req_private_mem;
140 shader->input_size = cso->req_input_mem;
141
142 #ifdef HAVE_OPENCL
143 shader->num_kernels = radeon_llvm_get_num_kernels(code, header->num_bytes);
144 shader->kernels = CALLOC(sizeof(struct r600_kernel), shader->num_kernels);
145
146 for (i = 0; i < shader->num_kernels; i++) {
147 struct r600_kernel *kernel = &shader->kernels[i];
148 kernel->llvm_module = radeon_llvm_get_kernel_module(i, code,
149 header->num_bytes);
150 }
151 #endif
152 return shader;
153 }
154
155 void evergreen_delete_compute_state(struct pipe_context *ctx, void* state)
156 {
157 struct r600_pipe_compute *shader = (struct r600_pipe_compute *)state;
158
159 free(shader->resources);
160 free(shader);
161 }
162
163 static void evergreen_bind_compute_state(struct pipe_context *ctx_, void *state)
164 {
165 struct r600_context *ctx = (struct r600_context *)ctx_;
166
167 COMPUTE_DBG(ctx->screen, "*** evergreen_bind_compute_state\n");
168
169 ctx->cs_shader_state.shader = (struct r600_pipe_compute *)state;
170 }
171
172 /* The kernel parameters are stored a vtx buffer (ID=0), besides the explicit
173 * kernel parameters there are inplicit parameters that need to be stored
174 * in the vertex buffer as well. Here is how these parameters are organized in
175 * the buffer:
176 *
177 * DWORDS 0-2: Number of work groups in each dimension (x,y,z)
178 * DWORDS 3-5: Number of global work items in each dimension (x,y,z)
179 * DWORDS 6-8: Number of work items within each work group in each dimension
180 * (x,y,z)
181 * DWORDS 9+ : Kernel parameters
182 */
183 void evergreen_compute_upload_input(
184 struct pipe_context *ctx_,
185 const uint *block_layout,
186 const uint *grid_layout,
187 const void *input)
188 {
189 struct r600_context *ctx = (struct r600_context *)ctx_;
190 struct r600_pipe_compute *shader = ctx->cs_shader_state.shader;
191 int i;
192 unsigned kernel_parameters_offset_bytes = 36;
193 uint32_t * num_work_groups_start;
194 uint32_t * global_size_start;
195 uint32_t * local_size_start;
196 uint32_t * kernel_parameters_start;
197
198 if (shader->input_size == 0) {
199 return;
200 }
201
202 if (!shader->kernel_param) {
203 unsigned buffer_size = shader->input_size;
204
205 /* Add space for the grid dimensions */
206 buffer_size += kernel_parameters_offset_bytes;
207 shader->kernel_param = r600_compute_buffer_alloc_vram(
208 ctx->screen, buffer_size);
209 }
210
211 num_work_groups_start = r600_buffer_mmap_sync_with_rings(ctx, shader->kernel_param, PIPE_TRANSFER_WRITE);
212 global_size_start = num_work_groups_start + (3 * (sizeof(uint) /4));
213 local_size_start = global_size_start + (3 * (sizeof(uint)) / 4);
214 kernel_parameters_start = local_size_start + (3 * (sizeof(uint)) / 4);
215
216 /* Copy the work group size */
217 memcpy(num_work_groups_start, grid_layout, 3 * sizeof(uint));
218
219 /* Copy the global size */
220 for (i = 0; i < 3; i++) {
221 global_size_start[i] = grid_layout[i] * block_layout[i];
222 }
223
224 /* Copy the local dimensions */
225 memcpy(local_size_start, block_layout, 3 * sizeof(uint));
226
227 /* Copy the kernel inputs */
228 memcpy(kernel_parameters_start, input, shader->input_size);
229
230 for (i = 0; i < (kernel_parameters_offset_bytes / 4) +
231 (shader->input_size / 4); i++) {
232 COMPUTE_DBG(ctx->screen, "input %i : %i\n", i,
233 ((unsigned*)num_work_groups_start)[i]);
234 }
235
236 ctx->ws->buffer_unmap(shader->kernel_param->cs_buf);
237
238 ///ID=0 is reserved for the parameters
239 evergreen_cs_set_vertex_buffer(ctx, 0, 0,
240 (struct pipe_resource*)shader->kernel_param);
241 ///ID=0 is reserved for parameters
242 evergreen_set_const_cache(shader, 0, shader->kernel_param,
243 shader->input_size, 0);
244 }
245
246 static void evergreen_emit_direct_dispatch(
247 struct r600_context *rctx,
248 const uint *block_layout, const uint *grid_layout)
249 {
250 int i;
251 struct radeon_winsys_cs *cs = rctx->rings.gfx.cs;
252 unsigned num_waves;
253 unsigned num_pipes = rctx->screen->info.r600_max_pipes;
254 unsigned wave_divisor = (16 * num_pipes);
255 int group_size = 1;
256 int grid_size = 1;
257 /* XXX: Enable lds and get size from cs_shader_state */
258 unsigned lds_size = 0;
259
260 /* Calculate group_size/grid_size */
261 for (i = 0; i < 3; i++) {
262 group_size *= block_layout[i];
263 }
264
265 for (i = 0; i < 3; i++) {
266 grid_size *= grid_layout[i];
267 }
268
269 /* num_waves = ceil((tg_size.x * tg_size.y, tg_size.z) / (16 * num_pipes)) */
270 num_waves = (block_layout[0] * block_layout[1] * block_layout[2] +
271 wave_divisor - 1) / wave_divisor;
272
273 COMPUTE_DBG(rctx->screen, "Using %u pipes, there are %u wavefronts per thread block\n",
274 num_pipes, num_waves);
275
276 /* XXX: Partition the LDS between PS/CS. By default half (4096 dwords
277 * on Evergreen) oes to Pixel Shaders and half goes to Compute Shaders.
278 * We may need to allocat the entire LDS space for Compute Shaders.
279 *
280 * EG: R_008E2C_SQ_LDS_RESOURCE_MGMT := S_008E2C_NUM_LS_LDS(lds_dwords)
281 * CM: CM_R_0286FC_SPI_LDS_MGMT := S_0286FC_NUM_LS_LDS(lds_dwords)
282 */
283
284 r600_write_config_reg(cs, R_008970_VGT_NUM_INDICES, group_size);
285
286 r600_write_config_reg_seq(cs, R_00899C_VGT_COMPUTE_START_X, 3);
287 r600_write_value(cs, 0); /* R_00899C_VGT_COMPUTE_START_X */
288 r600_write_value(cs, 0); /* R_0089A0_VGT_COMPUTE_START_Y */
289 r600_write_value(cs, 0); /* R_0089A4_VGT_COMPUTE_START_Z */
290
291 r600_write_config_reg(cs, R_0089AC_VGT_COMPUTE_THREAD_GROUP_SIZE,
292 group_size);
293
294 r600_write_compute_context_reg_seq(cs, R_0286EC_SPI_COMPUTE_NUM_THREAD_X, 3);
295 r600_write_value(cs, block_layout[0]); /* R_0286EC_SPI_COMPUTE_NUM_THREAD_X */
296 r600_write_value(cs, block_layout[1]); /* R_0286F0_SPI_COMPUTE_NUM_THREAD_Y */
297 r600_write_value(cs, block_layout[2]); /* R_0286F4_SPI_COMPUTE_NUM_THREAD_Z */
298
299 r600_write_compute_context_reg(cs, CM_R_0288E8_SQ_LDS_ALLOC,
300 lds_size | (num_waves << 14));
301
302 /* Dispatch packet */
303 r600_write_value(cs, PKT3C(PKT3_DISPATCH_DIRECT, 3, 0));
304 r600_write_value(cs, grid_layout[0]);
305 r600_write_value(cs, grid_layout[1]);
306 r600_write_value(cs, grid_layout[2]);
307 /* VGT_DISPATCH_INITIATOR = COMPUTE_SHADER_EN */
308 r600_write_value(cs, 1);
309 }
310
311 static void compute_emit_cs(struct r600_context *ctx, const uint *block_layout,
312 const uint *grid_layout)
313 {
314 struct radeon_winsys_cs *cs = ctx->rings.gfx.cs;
315 unsigned flush_flags = 0;
316 int i;
317 struct r600_resource *onebo = NULL;
318 struct evergreen_compute_resource *resources =
319 ctx->cs_shader_state.shader->resources;
320
321 /* make sure that the gfx ring is only one active */
322 if (ctx->rings.dma.cs) {
323 ctx->rings.dma.flush(ctx, RADEON_FLUSH_ASYNC);
324 }
325
326 /* Initialize all the compute-related registers.
327 *
328 * See evergreen_init_atom_start_compute_cs() in this file for the list
329 * of registers initialized by the start_compute_cs_cmd atom.
330 */
331 r600_emit_command_buffer(cs, &ctx->start_compute_cs_cmd);
332
333 ctx->flags |= R600_CONTEXT_WAIT_3D_IDLE | R600_CONTEXT_FLUSH_AND_INV;
334 r600_flush_emit(ctx);
335
336 /* Emit colorbuffers. */
337 for (i = 0; i < ctx->framebuffer.state.nr_cbufs; i++) {
338 struct r600_surface *cb = (struct r600_surface*)ctx->framebuffer.state.cbufs[i];
339 unsigned reloc = r600_context_bo_reloc(ctx, &ctx->rings.gfx,
340 (struct r600_resource*)cb->base.texture,
341 RADEON_USAGE_READWRITE);
342
343 r600_write_compute_context_reg_seq(cs, R_028C60_CB_COLOR0_BASE + i * 0x3C, 7);
344 r600_write_value(cs, cb->cb_color_base); /* R_028C60_CB_COLOR0_BASE */
345 r600_write_value(cs, cb->cb_color_pitch); /* R_028C64_CB_COLOR0_PITCH */
346 r600_write_value(cs, cb->cb_color_slice); /* R_028C68_CB_COLOR0_SLICE */
347 r600_write_value(cs, cb->cb_color_view); /* R_028C6C_CB_COLOR0_VIEW */
348 r600_write_value(cs, cb->cb_color_info); /* R_028C70_CB_COLOR0_INFO */
349 r600_write_value(cs, cb->cb_color_attrib); /* R_028C74_CB_COLOR0_ATTRIB */
350 r600_write_value(cs, cb->cb_color_dim); /* R_028C78_CB_COLOR0_DIM */
351
352 r600_write_value(cs, PKT3(PKT3_NOP, 0, 0)); /* R_028C60_CB_COLOR0_BASE */
353 r600_write_value(cs, reloc);
354
355 if (!ctx->keep_tiling_flags) {
356 r600_write_value(cs, PKT3(PKT3_NOP, 0, 0)); /* R_028C70_CB_COLOR0_INFO */
357 r600_write_value(cs, reloc);
358 }
359
360 r600_write_value(cs, PKT3(PKT3_NOP, 0, 0)); /* R_028C74_CB_COLOR0_ATTRIB */
361 r600_write_value(cs, reloc);
362 }
363
364 /* Set CB_TARGET_MASK XXX: Use cb_misc_state */
365 r600_write_compute_context_reg(cs, R_028238_CB_TARGET_MASK,
366 ctx->compute_cb_target_mask);
367
368
369 /* Emit vertex buffer state */
370 ctx->cs_vertex_buffer_state.atom.num_dw = 12 * util_bitcount(ctx->cs_vertex_buffer_state.dirty_mask);
371 r600_emit_atom(ctx, &ctx->cs_vertex_buffer_state.atom);
372
373 /* Emit compute shader state */
374 r600_emit_atom(ctx, &ctx->cs_shader_state.atom);
375
376 for (i = 0; i < get_compute_resource_num(); i++) {
377 if (resources[i].enabled) {
378 int j;
379 COMPUTE_DBG(ctx->screen, "resnum: %i, cdw: %i\n", i, cs->cdw);
380
381 for (j = 0; j < resources[i].cs_end; j++) {
382 if (resources[i].do_reloc[j]) {
383 assert(resources[i].bo);
384 evergreen_emit_ctx_reloc(ctx,
385 resources[i].bo,
386 resources[i].usage);
387 }
388
389 cs->buf[cs->cdw++] = resources[i].cs[j];
390 }
391
392 if (resources[i].bo) {
393 onebo = resources[i].bo;
394 evergreen_emit_ctx_reloc(ctx,
395 resources[i].bo,
396 resources[i].usage);
397
398 ///special case for textures
399 if (resources[i].do_reloc
400 [resources[i].cs_end] == 2) {
401 evergreen_emit_ctx_reloc(ctx,
402 resources[i].bo,
403 resources[i].usage);
404 }
405 }
406 }
407 }
408
409 /* Emit dispatch state and dispatch packet */
410 evergreen_emit_direct_dispatch(ctx, block_layout, grid_layout);
411
412 /* XXX evergreen_flush_emit() hardcodes the CP_COHER_SIZE to 0xffffffff
413 */
414 ctx->flags |= R600_CONTEXT_INVAL_READ_CACHES;
415 r600_flush_emit(ctx);
416
417 #if 0
418 COMPUTE_DBG(ctx->screen, "cdw: %i\n", cs->cdw);
419 for (i = 0; i < cs->cdw; i++) {
420 COMPUTE_DBG(ctx->screen, "%4i : 0x%08X\n", i, ctx->cs->buf[i]);
421 }
422 #endif
423
424 flush_flags = RADEON_FLUSH_ASYNC | RADEON_FLUSH_COMPUTE;
425 if (ctx->keep_tiling_flags) {
426 flush_flags |= RADEON_FLUSH_KEEP_TILING_FLAGS;
427 }
428
429 ctx->ws->cs_flush(ctx->rings.gfx.cs, flush_flags);
430
431 ctx->flags = 0;
432
433 COMPUTE_DBG(ctx->screen, "shader started\n");
434
435 ctx->ws->buffer_wait(onebo->buf, 0);
436
437 COMPUTE_DBG(ctx->screen, "...\n");
438 }
439
440
441 /**
442 * Emit function for r600_cs_shader_state atom
443 */
444 void evergreen_emit_cs_shader(
445 struct r600_context *rctx,
446 struct r600_atom *atom)
447 {
448 struct r600_cs_shader_state *state =
449 (struct r600_cs_shader_state*)atom;
450 struct r600_pipe_compute *shader = state->shader;
451 struct r600_kernel *kernel = &shader->kernels[state->kernel_index];
452 struct radeon_winsys_cs *cs = rctx->rings.gfx.cs;
453 uint64_t va;
454
455 va = r600_resource_va(&rctx->screen->screen, &kernel->code_bo->b.b);
456
457 r600_write_compute_context_reg_seq(cs, R_0288D0_SQ_PGM_START_LS, 3);
458 r600_write_value(cs, va >> 8); /* R_0288D0_SQ_PGM_START_LS */
459 r600_write_value(cs, /* R_0288D4_SQ_PGM_RESOURCES_LS */
460 S_0288D4_NUM_GPRS(kernel->bc.ngpr)
461 | S_0288D4_STACK_SIZE(kernel->bc.nstack));
462 r600_write_value(cs, 0); /* R_0288D8_SQ_PGM_RESOURCES_LS_2 */
463
464 r600_write_value(cs, PKT3C(PKT3_NOP, 0, 0));
465 r600_write_value(cs, r600_context_bo_reloc(rctx, &rctx->rings.gfx,
466 kernel->code_bo, RADEON_USAGE_READ));
467
468 rctx->flags |= R600_CONTEXT_INVAL_READ_CACHES;
469 }
470
471 static void evergreen_launch_grid(
472 struct pipe_context *ctx_,
473 const uint *block_layout, const uint *grid_layout,
474 uint32_t pc, const void *input)
475 {
476 struct r600_context *ctx = (struct r600_context *)ctx_;
477
478 #ifdef HAVE_OPENCL
479 COMPUTE_DBG(ctx->screen, "*** evergreen_launch_grid: pc = %u\n", pc);
480
481 struct r600_pipe_compute *shader = ctx->cs_shader_state.shader;
482 if (!shader->kernels[pc].code_bo) {
483 void *p;
484 struct r600_kernel *kernel = &shader->kernels[pc];
485 r600_compute_shader_create(ctx_, kernel->llvm_module, &kernel->bc);
486 kernel->code_bo = r600_compute_buffer_alloc_vram(ctx->screen,
487 kernel->bc.ndw * 4);
488 p = r600_buffer_mmap_sync_with_rings(ctx, kernel->code_bo, PIPE_TRANSFER_WRITE);
489 memcpy(p, kernel->bc.bytecode, kernel->bc.ndw * 4);
490 ctx->ws->buffer_unmap(kernel->code_bo->cs_buf);
491 }
492 #endif
493
494 ctx->cs_shader_state.kernel_index = pc;
495 evergreen_compute_upload_input(ctx_, block_layout, grid_layout, input);
496 compute_emit_cs(ctx, block_layout, grid_layout);
497 }
498
499 static void evergreen_set_compute_resources(struct pipe_context * ctx_,
500 unsigned start, unsigned count,
501 struct pipe_surface ** surfaces)
502 {
503 struct r600_context *ctx = (struct r600_context *)ctx_;
504 struct r600_surface **resources = (struct r600_surface **)surfaces;
505
506 COMPUTE_DBG(ctx->screen, "*** evergreen_set_compute_resources: start = %u count = %u\n",
507 start, count);
508
509 for (int i = 0; i < count; i++) {
510 /* The First two vertex buffers are reserved for parameters and
511 * global buffers. */
512 unsigned vtx_id = 2 + i;
513 if (resources[i]) {
514 struct r600_resource_global *buffer =
515 (struct r600_resource_global*)
516 resources[i]->base.texture;
517 if (resources[i]->base.writable) {
518 assert(i+1 < 12);
519
520 evergreen_set_rat(ctx->cs_shader_state.shader, i+1,
521 (struct r600_resource *)resources[i]->base.texture,
522 buffer->chunk->start_in_dw*4,
523 resources[i]->base.texture->width0);
524 }
525
526 evergreen_cs_set_vertex_buffer(ctx, vtx_id,
527 buffer->chunk->start_in_dw * 4,
528 resources[i]->base.texture);
529 }
530 }
531 }
532
533 static void evergreen_set_cs_sampler_view(struct pipe_context *ctx_,
534 unsigned start_slot, unsigned count,
535 struct pipe_sampler_view **views)
536 {
537 struct r600_context *ctx = (struct r600_context *)ctx_;
538 struct r600_pipe_sampler_view **resource =
539 (struct r600_pipe_sampler_view **)views;
540
541 for (int i = 0; i < count; i++) {
542 if (resource[i]) {
543 assert(i+1 < 12);
544 ///FETCH0 = VTX0 (param buffer),
545 //FETCH1 = VTX1 (global buffer pool), FETCH2... = TEX
546 evergreen_set_tex_resource(ctx->cs_shader_state.shader, resource[i], i+2);
547 }
548 }
549 }
550
551 static void evergreen_bind_compute_sampler_states(
552 struct pipe_context *ctx_,
553 unsigned start_slot,
554 unsigned num_samplers,
555 void **samplers_)
556 {
557 struct r600_context *ctx = (struct r600_context *)ctx_;
558 struct compute_sampler_state ** samplers =
559 (struct compute_sampler_state **)samplers_;
560
561 for (int i = 0; i < num_samplers; i++) {
562 if (samplers[i]) {
563 evergreen_set_sampler_resource(
564 ctx->cs_shader_state.shader, samplers[i], i);
565 }
566 }
567 }
568
569 static void evergreen_set_global_binding(
570 struct pipe_context *ctx_, unsigned first, unsigned n,
571 struct pipe_resource **resources,
572 uint32_t **handles)
573 {
574 struct r600_context *ctx = (struct r600_context *)ctx_;
575 struct compute_memory_pool *pool = ctx->screen->global_pool;
576 struct r600_resource_global **buffers =
577 (struct r600_resource_global **)resources;
578
579 COMPUTE_DBG(ctx->screen, "*** evergreen_set_global_binding first = %u n = %u\n",
580 first, n);
581
582 if (!resources) {
583 /* XXX: Unset */
584 return;
585 }
586
587 compute_memory_finalize_pending(pool, ctx_);
588
589 for (int i = 0; i < n; i++)
590 {
591 assert(resources[i]->target == PIPE_BUFFER);
592 assert(resources[i]->bind & PIPE_BIND_GLOBAL);
593
594 *(handles[i]) = buffers[i]->chunk->start_in_dw * 4;
595 }
596
597 evergreen_set_rat(ctx->cs_shader_state.shader, 0, pool->bo, 0, pool->size_in_dw * 4);
598 evergreen_cs_set_vertex_buffer(ctx, 1, 0,
599 (struct pipe_resource*)pool->bo);
600 }
601
602 /**
603 * This function initializes all the compute specific registers that need to
604 * be initialized for each compute command stream. Registers that are common
605 * to both compute and 3D will be initialized at the beginning of each compute
606 * command stream by the start_cs_cmd atom. However, since the SET_CONTEXT_REG
607 * packet requires that the shader type bit be set, we must initialize all
608 * context registers needed for compute in this function. The registers
609 * intialized by the start_cs_cmd atom can be found in evereen_state.c in the
610 * functions evergreen_init_atom_start_cs or cayman_init_atom_start_cs depending
611 * on the GPU family.
612 */
613 void evergreen_init_atom_start_compute_cs(struct r600_context *ctx)
614 {
615 struct r600_command_buffer *cb = &ctx->start_compute_cs_cmd;
616 int num_threads;
617 int num_stack_entries;
618
619 /* since all required registers are initialised in the
620 * start_compute_cs_cmd atom, we can EMIT_EARLY here.
621 */
622 r600_init_command_buffer(cb, 256);
623 cb->pkt_flags = RADEON_CP_PACKET3_COMPUTE_MODE;
624
625 /* This must be first. */
626 r600_store_value(cb, PKT3(PKT3_CONTEXT_CONTROL, 1, 0));
627 r600_store_value(cb, 0x80000000);
628 r600_store_value(cb, 0x80000000);
629
630 /* We're setting config registers here. */
631 r600_store_value(cb, PKT3(PKT3_EVENT_WRITE, 0, 0));
632 r600_store_value(cb, EVENT_TYPE(EVENT_TYPE_CS_PARTIAL_FLUSH) | EVENT_INDEX(4));
633
634 switch (ctx->family) {
635 case CHIP_CEDAR:
636 default:
637 num_threads = 128;
638 num_stack_entries = 256;
639 break;
640 case CHIP_REDWOOD:
641 num_threads = 128;
642 num_stack_entries = 256;
643 break;
644 case CHIP_JUNIPER:
645 num_threads = 128;
646 num_stack_entries = 512;
647 break;
648 case CHIP_CYPRESS:
649 case CHIP_HEMLOCK:
650 num_threads = 128;
651 num_stack_entries = 512;
652 break;
653 case CHIP_PALM:
654 num_threads = 128;
655 num_stack_entries = 256;
656 break;
657 case CHIP_SUMO:
658 num_threads = 128;
659 num_stack_entries = 256;
660 break;
661 case CHIP_SUMO2:
662 num_threads = 128;
663 num_stack_entries = 512;
664 break;
665 case CHIP_BARTS:
666 num_threads = 128;
667 num_stack_entries = 512;
668 break;
669 case CHIP_TURKS:
670 num_threads = 128;
671 num_stack_entries = 256;
672 break;
673 case CHIP_CAICOS:
674 num_threads = 128;
675 num_stack_entries = 256;
676 break;
677 }
678
679 /* Config Registers */
680 if (ctx->chip_class < CAYMAN)
681 evergreen_init_common_regs(cb, ctx->chip_class, ctx->family,
682 ctx->screen->info.drm_minor);
683 else
684 cayman_init_common_regs(cb, ctx->chip_class, ctx->family,
685 ctx->screen->info.drm_minor);
686
687 /* The primitive type always needs to be POINTLIST for compute. */
688 r600_store_config_reg(cb, R_008958_VGT_PRIMITIVE_TYPE,
689 V_008958_DI_PT_POINTLIST);
690
691 if (ctx->chip_class < CAYMAN) {
692
693 /* These registers control which simds can be used by each stage.
694 * The default for these registers is 0xffffffff, which means
695 * all simds are available for each stage. It's possible we may
696 * want to play around with these in the future, but for now
697 * the default value is fine.
698 *
699 * R_008E20_SQ_STATIC_THREAD_MGMT1
700 * R_008E24_SQ_STATIC_THREAD_MGMT2
701 * R_008E28_SQ_STATIC_THREAD_MGMT3
702 */
703
704 /* XXX: We may need to adjust the thread and stack resouce
705 * values for 3D/compute interop */
706
707 r600_store_config_reg_seq(cb, R_008C18_SQ_THREAD_RESOURCE_MGMT_1, 5);
708
709 /* R_008C18_SQ_THREAD_RESOURCE_MGMT_1
710 * Set the number of threads used by the PS/VS/GS/ES stage to
711 * 0.
712 */
713 r600_store_value(cb, 0);
714
715 /* R_008C1C_SQ_THREAD_RESOURCE_MGMT_2
716 * Set the number of threads used by the CS (aka LS) stage to
717 * the maximum number of threads and set the number of threads
718 * for the HS stage to 0. */
719 r600_store_value(cb, S_008C1C_NUM_LS_THREADS(num_threads));
720
721 /* R_008C20_SQ_STACK_RESOURCE_MGMT_1
722 * Set the Control Flow stack entries to 0 for PS/VS stages */
723 r600_store_value(cb, 0);
724
725 /* R_008C24_SQ_STACK_RESOURCE_MGMT_2
726 * Set the Control Flow stack entries to 0 for GS/ES stages */
727 r600_store_value(cb, 0);
728
729 /* R_008C28_SQ_STACK_RESOURCE_MGMT_3
730 * Set the Contol Flow stack entries to 0 for the HS stage, and
731 * set it to the maximum value for the CS (aka LS) stage. */
732 r600_store_value(cb,
733 S_008C28_NUM_LS_STACK_ENTRIES(num_stack_entries));
734 }
735
736 /* Context Registers */
737
738 if (ctx->chip_class < CAYMAN) {
739 /* workaround for hw issues with dyn gpr - must set all limits
740 * to 240 instead of 0, 0x1e == 240 / 8
741 */
742 r600_store_context_reg(cb, R_028838_SQ_DYN_GPR_RESOURCE_LIMIT_1,
743 S_028838_PS_GPRS(0x1e) |
744 S_028838_VS_GPRS(0x1e) |
745 S_028838_GS_GPRS(0x1e) |
746 S_028838_ES_GPRS(0x1e) |
747 S_028838_HS_GPRS(0x1e) |
748 S_028838_LS_GPRS(0x1e));
749 }
750
751 /* XXX: Investigate setting bit 15, which is FAST_COMPUTE_MODE */
752 r600_store_context_reg(cb, R_028A40_VGT_GS_MODE,
753 S_028A40_COMPUTE_MODE(1) | S_028A40_PARTIAL_THD_AT_EOI(1));
754
755 r600_store_context_reg(cb, R_028B54_VGT_SHADER_STAGES_EN, 2/*CS_ON*/);
756
757 r600_store_context_reg(cb, R_0286E8_SPI_COMPUTE_INPUT_CNTL,
758 S_0286E8_TID_IN_GROUP_ENA
759 | S_0286E8_TGID_ENA
760 | S_0286E8_DISABLE_INDEX_PACK)
761 ;
762
763 /* The LOOP_CONST registers are an optimizations for loops that allows
764 * you to store the initial counter, increment value, and maximum
765 * counter value in a register so that hardware can calculate the
766 * correct number of iterations for the loop, so that you don't need
767 * to have the loop counter in your shader code. We don't currently use
768 * this optimization, so we must keep track of the counter in the
769 * shader and use a break instruction to exit loops. However, the
770 * hardware will still uses this register to determine when to exit a
771 * loop, so we need to initialize the counter to 0, set the increment
772 * value to 1 and the maximum counter value to the 4095 (0xfff) which
773 * is the maximum value allowed. This gives us a maximum of 4096
774 * iterations for our loops, but hopefully our break instruction will
775 * execute before some time before the 4096th iteration.
776 */
777 eg_store_loop_const(cb, R_03A200_SQ_LOOP_CONST_0 + (160 * 4), 0x1000FFF);
778 }
779
780 void evergreen_init_compute_state_functions(struct r600_context *ctx)
781 {
782 ctx->context.create_compute_state = evergreen_create_compute_state;
783 ctx->context.delete_compute_state = evergreen_delete_compute_state;
784 ctx->context.bind_compute_state = evergreen_bind_compute_state;
785 // ctx->context.create_sampler_view = evergreen_compute_create_sampler_view;
786 ctx->context.set_compute_resources = evergreen_set_compute_resources;
787 ctx->context.set_compute_sampler_views = evergreen_set_cs_sampler_view;
788 ctx->context.bind_compute_sampler_states = evergreen_bind_compute_sampler_states;
789 ctx->context.set_global_binding = evergreen_set_global_binding;
790 ctx->context.launch_grid = evergreen_launch_grid;
791
792 /* We always use at least two vertex buffers for compute, one for
793 * parameters and one for global memory */
794 ctx->cs_vertex_buffer_state.enabled_mask =
795 ctx->cs_vertex_buffer_state.dirty_mask = 1 | 2;
796 }
797
798
799 struct pipe_resource *r600_compute_global_buffer_create(
800 struct pipe_screen *screen,
801 const struct pipe_resource *templ)
802 {
803 struct r600_resource_global* result = NULL;
804 struct r600_screen* rscreen = NULL;
805 int size_in_dw = 0;
806
807 assert(templ->target == PIPE_BUFFER);
808 assert(templ->bind & PIPE_BIND_GLOBAL);
809 assert(templ->array_size == 1 || templ->array_size == 0);
810 assert(templ->depth0 == 1 || templ->depth0 == 0);
811 assert(templ->height0 == 1 || templ->height0 == 0);
812
813 result = (struct r600_resource_global*)
814 CALLOC(sizeof(struct r600_resource_global), 1);
815 rscreen = (struct r600_screen*)screen;
816
817 COMPUTE_DBG(rscreen, "*** r600_compute_global_buffer_create\n");
818 COMPUTE_DBG(rscreen, "width = %u array_size = %u\n", templ->width0,
819 templ->array_size);
820
821 result->base.b.vtbl = &r600_global_buffer_vtbl;
822 result->base.b.b.screen = screen;
823 result->base.b.b = *templ;
824 pipe_reference_init(&result->base.b.b.reference, 1);
825
826 size_in_dw = (templ->width0+3) / 4;
827
828 result->chunk = compute_memory_alloc(rscreen->global_pool, size_in_dw);
829
830 if (result->chunk == NULL)
831 {
832 free(result);
833 return NULL;
834 }
835
836 return &result->base.b.b;
837 }
838
839 void r600_compute_global_buffer_destroy(
840 struct pipe_screen *screen,
841 struct pipe_resource *res)
842 {
843 struct r600_resource_global* buffer = NULL;
844 struct r600_screen* rscreen = NULL;
845
846 assert(res->target == PIPE_BUFFER);
847 assert(res->bind & PIPE_BIND_GLOBAL);
848
849 buffer = (struct r600_resource_global*)res;
850 rscreen = (struct r600_screen*)screen;
851
852 compute_memory_free(rscreen->global_pool, buffer->chunk->id);
853
854 buffer->chunk = NULL;
855 free(res);
856 }
857
858 void *r600_compute_global_transfer_map(
859 struct pipe_context *ctx_,
860 struct pipe_resource *resource,
861 unsigned level,
862 unsigned usage,
863 const struct pipe_box *box,
864 struct pipe_transfer **ptransfer)
865 {
866 struct r600_context *rctx = (struct r600_context*)ctx_;
867 struct compute_memory_pool *pool = rctx->screen->global_pool;
868 struct pipe_transfer *transfer = util_slab_alloc(&rctx->pool_transfers);
869 struct r600_resource_global* buffer =
870 (struct r600_resource_global*)resource;
871 uint32_t* map;
872
873 compute_memory_finalize_pending(pool, ctx_);
874
875 assert(resource->target == PIPE_BUFFER);
876
877 COMPUTE_DBG(rctx->screen, "* r600_compute_global_get_transfer()\n"
878 "level = %u, usage = %u, box(x = %u, y = %u, z = %u "
879 "width = %u, height = %u, depth = %u)\n", level, usage,
880 box->x, box->y, box->z, box->width, box->height,
881 box->depth);
882
883 transfer->resource = resource;
884 transfer->level = level;
885 transfer->usage = usage;
886 transfer->box = *box;
887 transfer->stride = 0;
888 transfer->layer_stride = 0;
889
890 assert(transfer->resource->target == PIPE_BUFFER);
891 assert(transfer->resource->bind & PIPE_BIND_GLOBAL);
892 assert(transfer->box.x >= 0);
893 assert(transfer->box.y == 0);
894 assert(transfer->box.z == 0);
895
896 ///TODO: do it better, mapping is not possible if the pool is too big
897
898 COMPUTE_DBG(rctx->screen, "* r600_compute_global_transfer_map()\n");
899
900 if (!(map = r600_buffer_mmap_sync_with_rings(rctx, buffer->chunk->pool->bo, transfer->usage))) {
901 util_slab_free(&rctx->pool_transfers, transfer);
902 return NULL;
903 }
904
905 *ptransfer = transfer;
906
907 COMPUTE_DBG(rctx->screen, "Buffer: %p + %u (buffer offset in global memory) "
908 "+ %u (box.x)\n", map, buffer->chunk->start_in_dw, transfer->box.x);
909 return ((char*)(map + buffer->chunk->start_in_dw)) + transfer->box.x;
910 }
911
912 void r600_compute_global_transfer_unmap(
913 struct pipe_context *ctx_,
914 struct pipe_transfer* transfer)
915 {
916 struct r600_context *ctx = NULL;
917 struct r600_resource_global* buffer = NULL;
918
919 assert(transfer->resource->target == PIPE_BUFFER);
920 assert(transfer->resource->bind & PIPE_BIND_GLOBAL);
921
922 ctx = (struct r600_context *)ctx_;
923 buffer = (struct r600_resource_global*)transfer->resource;
924
925 COMPUTE_DBG(ctx->screen, "* r600_compute_global_transfer_unmap()\n");
926
927 ctx->ws->buffer_unmap(buffer->chunk->pool->bo->cs_buf);
928 util_slab_free(&ctx->pool_transfers, transfer);
929 }
930
931 void r600_compute_global_transfer_flush_region(
932 struct pipe_context *ctx_,
933 struct pipe_transfer *transfer,
934 const struct pipe_box *box)
935 {
936 assert(0 && "TODO");
937 }
938
939 void r600_compute_global_transfer_inline_write(
940 struct pipe_context *pipe,
941 struct pipe_resource *resource,
942 unsigned level,
943 unsigned usage,
944 const struct pipe_box *box,
945 const void *data,
946 unsigned stride,
947 unsigned layer_stride)
948 {
949 assert(0 && "TODO");
950 }