clover: Pass buffer offsets to the driver in set_global_binding() v3
[mesa.git] / src / gallium / drivers / r600 / evergreen_compute.c
1 /*
2 * Copyright 2011 Adam Rak <adam.rak@streamnovation.com>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Adam Rak <adam.rak@streamnovation.com>
25 */
26
27 #include <stdio.h>
28 #include <errno.h>
29 #include "pipe/p_defines.h"
30 #include "pipe/p_state.h"
31 #include "pipe/p_context.h"
32 #include "util/u_blitter.h"
33 #include "util/u_double_list.h"
34 #include "util/u_transfer.h"
35 #include "util/u_surface.h"
36 #include "util/u_pack_color.h"
37 #include "util/u_memory.h"
38 #include "util/u_inlines.h"
39 #include "util/u_framebuffer.h"
40 #include "pipebuffer/pb_buffer.h"
41 #include "evergreend.h"
42 #include "r600_resource.h"
43 #include "r600_shader.h"
44 #include "r600_pipe.h"
45 #include "r600_formats.h"
46 #include "evergreen_compute.h"
47 #include "evergreen_compute_internal.h"
48 #include "compute_memory_pool.h"
49 #include "sb/sb_public.h"
50 #ifdef HAVE_OPENCL
51 #include "radeon_llvm_util.h"
52 #endif
53
54 /**
55 RAT0 is for global binding write
56 VTX1 is for global binding read
57
58 for wrting images RAT1...
59 for reading images TEX2...
60 TEX2-RAT1 is paired
61
62 TEX2... consumes the same fetch resources, that VTX2... would consume
63
64 CONST0 and VTX0 is for parameters
65 CONST0 is binding smaller input parameter buffer, and for constant indexing,
66 also constant cached
67 VTX0 is for indirect/non-constant indexing, or if the input is bigger than
68 the constant cache can handle
69
70 RAT-s are limited to 12, so we can only bind at most 11 texture for writing
71 because we reserve RAT0 for global bindings. With byteaddressing enabled,
72 we should reserve another one too.=> 10 image binding for writing max.
73
74 from Nvidia OpenCL:
75 CL_DEVICE_MAX_READ_IMAGE_ARGS: 128
76 CL_DEVICE_MAX_WRITE_IMAGE_ARGS: 8
77
78 so 10 for writing is enough. 176 is the max for reading according to the docs
79
80 writable images should be listed first < 10, so their id corresponds to RAT(id+1)
81 writable images will consume TEX slots, VTX slots too because of linear indexing
82
83 */
84
85 struct r600_resource* r600_compute_buffer_alloc_vram(
86 struct r600_screen *screen,
87 unsigned size)
88 {
89 struct pipe_resource * buffer = NULL;
90 assert(size);
91
92 buffer = pipe_buffer_create(
93 (struct pipe_screen*) screen,
94 PIPE_BIND_CUSTOM,
95 PIPE_USAGE_IMMUTABLE,
96 size);
97
98 return (struct r600_resource *)buffer;
99 }
100
101
102 static void evergreen_set_rat(
103 struct r600_pipe_compute *pipe,
104 int id,
105 struct r600_resource* bo,
106 int start,
107 int size)
108 {
109 struct pipe_surface rat_templ;
110 struct r600_surface *surf = NULL;
111 struct r600_context *rctx = NULL;
112
113 assert(id < 12);
114 assert((size & 3) == 0);
115 assert((start & 0xFF) == 0);
116
117 rctx = pipe->ctx;
118
119 COMPUTE_DBG(rctx->screen, "bind rat: %i \n", id);
120
121 /* Create the RAT surface */
122 memset(&rat_templ, 0, sizeof(rat_templ));
123 rat_templ.format = PIPE_FORMAT_R32_UINT;
124 rat_templ.u.tex.level = 0;
125 rat_templ.u.tex.first_layer = 0;
126 rat_templ.u.tex.last_layer = 0;
127
128 /* Add the RAT the list of color buffers */
129 pipe->ctx->framebuffer.state.cbufs[id] = pipe->ctx->b.b.create_surface(
130 (struct pipe_context *)pipe->ctx,
131 (struct pipe_resource *)bo, &rat_templ);
132
133 /* Update the number of color buffers */
134 pipe->ctx->framebuffer.state.nr_cbufs =
135 MAX2(id + 1, pipe->ctx->framebuffer.state.nr_cbufs);
136
137 /* Update the cb_target_mask
138 * XXX: I think this is a potential spot for bugs once we start doing
139 * GL interop. cb_target_mask may be modified in the 3D sections
140 * of this driver. */
141 pipe->ctx->compute_cb_target_mask |= (0xf << (id * 4));
142
143 surf = (struct r600_surface*)pipe->ctx->framebuffer.state.cbufs[id];
144 evergreen_init_color_surface_rat(rctx, surf);
145 }
146
147 static void evergreen_cs_set_vertex_buffer(
148 struct r600_context * rctx,
149 unsigned vb_index,
150 unsigned offset,
151 struct pipe_resource * buffer)
152 {
153 struct r600_vertexbuf_state *state = &rctx->cs_vertex_buffer_state;
154 struct pipe_vertex_buffer *vb = &state->vb[vb_index];
155 vb->stride = 1;
156 vb->buffer_offset = offset;
157 vb->buffer = buffer;
158 vb->user_buffer = NULL;
159
160 /* The vertex instructions in the compute shaders use the texture cache,
161 * so we need to invalidate it. */
162 rctx->b.flags |= R600_CONTEXT_INV_VERTEX_CACHE;
163 state->enabled_mask |= 1 << vb_index;
164 state->dirty_mask |= 1 << vb_index;
165 state->atom.dirty = true;
166 }
167
168 static void evergreen_cs_set_constant_buffer(
169 struct r600_context * rctx,
170 unsigned cb_index,
171 unsigned offset,
172 unsigned size,
173 struct pipe_resource * buffer)
174 {
175 struct pipe_constant_buffer cb;
176 cb.buffer_size = size;
177 cb.buffer_offset = offset;
178 cb.buffer = buffer;
179 cb.user_buffer = NULL;
180
181 rctx->b.b.set_constant_buffer(&rctx->b.b, PIPE_SHADER_COMPUTE, cb_index, &cb);
182 }
183
184 static const struct u_resource_vtbl r600_global_buffer_vtbl =
185 {
186 u_default_resource_get_handle, /* get_handle */
187 r600_compute_global_buffer_destroy, /* resource_destroy */
188 r600_compute_global_transfer_map, /* transfer_map */
189 r600_compute_global_transfer_flush_region,/* transfer_flush_region */
190 r600_compute_global_transfer_unmap, /* transfer_unmap */
191 r600_compute_global_transfer_inline_write /* transfer_inline_write */
192 };
193
194
195 void *evergreen_create_compute_state(
196 struct pipe_context *ctx_,
197 const const struct pipe_compute_state *cso)
198 {
199 struct r600_context *ctx = (struct r600_context *)ctx_;
200 struct r600_pipe_compute *shader = CALLOC_STRUCT(r600_pipe_compute);
201
202 #ifdef HAVE_OPENCL
203 const struct pipe_llvm_program_header * header;
204 const unsigned char * code;
205 unsigned i;
206
207 shader->llvm_ctx = LLVMContextCreate();
208
209 COMPUTE_DBG(ctx->screen, "*** evergreen_create_compute_state\n");
210
211 header = cso->prog;
212 code = cso->prog + sizeof(struct pipe_llvm_program_header);
213 #endif
214
215 shader->ctx = (struct r600_context*)ctx;
216 shader->local_size = cso->req_local_mem;
217 shader->private_size = cso->req_private_mem;
218 shader->input_size = cso->req_input_mem;
219
220 #ifdef HAVE_OPENCL
221 shader->num_kernels = radeon_llvm_get_num_kernels(shader->llvm_ctx, code,
222 header->num_bytes);
223 shader->kernels = CALLOC(sizeof(struct r600_kernel), shader->num_kernels);
224
225 for (i = 0; i < shader->num_kernels; i++) {
226 struct r600_kernel *kernel = &shader->kernels[i];
227 kernel->llvm_module = radeon_llvm_get_kernel_module(shader->llvm_ctx, i,
228 code, header->num_bytes);
229 }
230 #endif
231 return shader;
232 }
233
234 void evergreen_delete_compute_state(struct pipe_context *ctx, void* state)
235 {
236 struct r600_pipe_compute *shader = (struct r600_pipe_compute *)state;
237
238 if (!shader)
239 return;
240
241 FREE(shader->kernels);
242
243 #ifdef HAVE_OPENCL
244 if (shader->llvm_ctx){
245 LLVMContextDispose(shader->llvm_ctx);
246 }
247 #endif
248
249 FREE(shader);
250 }
251
252 static void evergreen_bind_compute_state(struct pipe_context *ctx_, void *state)
253 {
254 struct r600_context *ctx = (struct r600_context *)ctx_;
255
256 COMPUTE_DBG(ctx->screen, "*** evergreen_bind_compute_state\n");
257
258 ctx->cs_shader_state.shader = (struct r600_pipe_compute *)state;
259 }
260
261 /* The kernel parameters are stored a vtx buffer (ID=0), besides the explicit
262 * kernel parameters there are implicit parameters that need to be stored
263 * in the vertex buffer as well. Here is how these parameters are organized in
264 * the buffer:
265 *
266 * DWORDS 0-2: Number of work groups in each dimension (x,y,z)
267 * DWORDS 3-5: Number of global work items in each dimension (x,y,z)
268 * DWORDS 6-8: Number of work items within each work group in each dimension
269 * (x,y,z)
270 * DWORDS 9+ : Kernel parameters
271 */
272 void evergreen_compute_upload_input(
273 struct pipe_context *ctx_,
274 const uint *block_layout,
275 const uint *grid_layout,
276 const void *input)
277 {
278 struct r600_context *ctx = (struct r600_context *)ctx_;
279 struct r600_pipe_compute *shader = ctx->cs_shader_state.shader;
280 int i;
281 /* We need to reserve 9 dwords (36 bytes) for implicit kernel
282 * parameters.
283 */
284 unsigned input_size = shader->input_size + 36;
285 uint32_t * num_work_groups_start;
286 uint32_t * global_size_start;
287 uint32_t * local_size_start;
288 uint32_t * kernel_parameters_start;
289 struct pipe_box box;
290 struct pipe_transfer *transfer = NULL;
291
292 if (shader->input_size == 0) {
293 return;
294 }
295
296 if (!shader->kernel_param) {
297 /* Add space for the grid dimensions */
298 shader->kernel_param = (struct r600_resource *)
299 pipe_buffer_create(ctx_->screen, PIPE_BIND_CUSTOM,
300 PIPE_USAGE_IMMUTABLE, input_size);
301 }
302
303 u_box_1d(0, input_size, &box);
304 num_work_groups_start = ctx_->transfer_map(ctx_,
305 (struct pipe_resource*)shader->kernel_param,
306 0, PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD_RANGE,
307 &box, &transfer);
308 global_size_start = num_work_groups_start + (3 * (sizeof(uint) /4));
309 local_size_start = global_size_start + (3 * (sizeof(uint)) / 4);
310 kernel_parameters_start = local_size_start + (3 * (sizeof(uint)) / 4);
311
312 /* Copy the work group size */
313 memcpy(num_work_groups_start, grid_layout, 3 * sizeof(uint));
314
315 /* Copy the global size */
316 for (i = 0; i < 3; i++) {
317 global_size_start[i] = grid_layout[i] * block_layout[i];
318 }
319
320 /* Copy the local dimensions */
321 memcpy(local_size_start, block_layout, 3 * sizeof(uint));
322
323 /* Copy the kernel inputs */
324 memcpy(kernel_parameters_start, input, shader->input_size);
325
326 for (i = 0; i < (input_size / 4); i++) {
327 COMPUTE_DBG(ctx->screen, "input %i : %i\n", i,
328 ((unsigned*)num_work_groups_start)[i]);
329 }
330
331 ctx_->transfer_unmap(ctx_, transfer);
332
333 /* ID=0 is reserved for the parameters */
334 evergreen_cs_set_constant_buffer(ctx, 0, 0, input_size,
335 (struct pipe_resource*)shader->kernel_param);
336 }
337
338 static void evergreen_emit_direct_dispatch(
339 struct r600_context *rctx,
340 const uint *block_layout, const uint *grid_layout)
341 {
342 int i;
343 struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs;
344 struct r600_pipe_compute *shader = rctx->cs_shader_state.shader;
345 unsigned num_waves;
346 unsigned num_pipes = rctx->screen->b.info.r600_max_pipes;
347 unsigned wave_divisor = (16 * num_pipes);
348 int group_size = 1;
349 int grid_size = 1;
350 unsigned lds_size = shader->local_size / 4 + shader->active_kernel->bc.nlds_dw;
351
352 /* Calculate group_size/grid_size */
353 for (i = 0; i < 3; i++) {
354 group_size *= block_layout[i];
355 }
356
357 for (i = 0; i < 3; i++) {
358 grid_size *= grid_layout[i];
359 }
360
361 /* num_waves = ceil((tg_size.x * tg_size.y, tg_size.z) / (16 * num_pipes)) */
362 num_waves = (block_layout[0] * block_layout[1] * block_layout[2] +
363 wave_divisor - 1) / wave_divisor;
364
365 COMPUTE_DBG(rctx->screen, "Using %u pipes, "
366 "%u wavefronts per thread block, "
367 "allocating %u dwords lds.\n",
368 num_pipes, num_waves, lds_size);
369
370 r600_write_config_reg(cs, R_008970_VGT_NUM_INDICES, group_size);
371
372 r600_write_config_reg_seq(cs, R_00899C_VGT_COMPUTE_START_X, 3);
373 radeon_emit(cs, 0); /* R_00899C_VGT_COMPUTE_START_X */
374 radeon_emit(cs, 0); /* R_0089A0_VGT_COMPUTE_START_Y */
375 radeon_emit(cs, 0); /* R_0089A4_VGT_COMPUTE_START_Z */
376
377 r600_write_config_reg(cs, R_0089AC_VGT_COMPUTE_THREAD_GROUP_SIZE,
378 group_size);
379
380 r600_write_compute_context_reg_seq(cs, R_0286EC_SPI_COMPUTE_NUM_THREAD_X, 3);
381 radeon_emit(cs, block_layout[0]); /* R_0286EC_SPI_COMPUTE_NUM_THREAD_X */
382 radeon_emit(cs, block_layout[1]); /* R_0286F0_SPI_COMPUTE_NUM_THREAD_Y */
383 radeon_emit(cs, block_layout[2]); /* R_0286F4_SPI_COMPUTE_NUM_THREAD_Z */
384
385 if (rctx->b.chip_class < CAYMAN) {
386 assert(lds_size <= 8192);
387 } else {
388 /* Cayman appears to have a slightly smaller limit, see the
389 * value of CM_R_0286FC_SPI_LDS_MGMT.NUM_LS_LDS */
390 assert(lds_size <= 8160);
391 }
392
393 r600_write_compute_context_reg(cs, CM_R_0288E8_SQ_LDS_ALLOC,
394 lds_size | (num_waves << 14));
395
396 /* Dispatch packet */
397 radeon_emit(cs, PKT3C(PKT3_DISPATCH_DIRECT, 3, 0));
398 radeon_emit(cs, grid_layout[0]);
399 radeon_emit(cs, grid_layout[1]);
400 radeon_emit(cs, grid_layout[2]);
401 /* VGT_DISPATCH_INITIATOR = COMPUTE_SHADER_EN */
402 radeon_emit(cs, 1);
403 }
404
405 static void compute_emit_cs(struct r600_context *ctx, const uint *block_layout,
406 const uint *grid_layout)
407 {
408 struct radeon_winsys_cs *cs = ctx->b.rings.gfx.cs;
409 int i;
410
411 /* make sure that the gfx ring is only one active */
412 if (ctx->b.rings.dma.cs) {
413 ctx->b.rings.dma.flush(ctx, RADEON_FLUSH_ASYNC);
414 }
415
416 /* Initialize all the compute-related registers.
417 *
418 * See evergreen_init_atom_start_compute_cs() in this file for the list
419 * of registers initialized by the start_compute_cs_cmd atom.
420 */
421 r600_emit_command_buffer(cs, &ctx->start_compute_cs_cmd);
422
423 ctx->b.flags |= R600_CONTEXT_WAIT_3D_IDLE | R600_CONTEXT_FLUSH_AND_INV;
424 r600_flush_emit(ctx);
425
426 /* Emit colorbuffers. */
427 /* XXX support more than 8 colorbuffers (the offsets are not a multiple of 0x3C for CB8-11) */
428 for (i = 0; i < 8 && i < ctx->framebuffer.state.nr_cbufs; i++) {
429 struct r600_surface *cb = (struct r600_surface*)ctx->framebuffer.state.cbufs[i];
430 unsigned reloc = r600_context_bo_reloc(&ctx->b, &ctx->b.rings.gfx,
431 (struct r600_resource*)cb->base.texture,
432 RADEON_USAGE_READWRITE);
433
434 r600_write_compute_context_reg_seq(cs, R_028C60_CB_COLOR0_BASE + i * 0x3C, 7);
435 radeon_emit(cs, cb->cb_color_base); /* R_028C60_CB_COLOR0_BASE */
436 radeon_emit(cs, cb->cb_color_pitch); /* R_028C64_CB_COLOR0_PITCH */
437 radeon_emit(cs, cb->cb_color_slice); /* R_028C68_CB_COLOR0_SLICE */
438 radeon_emit(cs, cb->cb_color_view); /* R_028C6C_CB_COLOR0_VIEW */
439 radeon_emit(cs, cb->cb_color_info); /* R_028C70_CB_COLOR0_INFO */
440 radeon_emit(cs, cb->cb_color_attrib); /* R_028C74_CB_COLOR0_ATTRIB */
441 radeon_emit(cs, cb->cb_color_dim); /* R_028C78_CB_COLOR0_DIM */
442
443 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); /* R_028C60_CB_COLOR0_BASE */
444 radeon_emit(cs, reloc);
445
446 if (!ctx->keep_tiling_flags) {
447 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); /* R_028C70_CB_COLOR0_INFO */
448 radeon_emit(cs, reloc);
449 }
450
451 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); /* R_028C74_CB_COLOR0_ATTRIB */
452 radeon_emit(cs, reloc);
453 }
454 if (ctx->keep_tiling_flags) {
455 for (; i < 8 ; i++) {
456 r600_write_compute_context_reg(cs, R_028C70_CB_COLOR0_INFO + i * 0x3C,
457 S_028C70_FORMAT(V_028C70_COLOR_INVALID));
458 }
459 for (; i < 12; i++) {
460 r600_write_compute_context_reg(cs, R_028E50_CB_COLOR8_INFO + (i - 8) * 0x1C,
461 S_028C70_FORMAT(V_028C70_COLOR_INVALID));
462 }
463 }
464
465 /* Set CB_TARGET_MASK XXX: Use cb_misc_state */
466 r600_write_compute_context_reg(cs, R_028238_CB_TARGET_MASK,
467 ctx->compute_cb_target_mask);
468
469
470 /* Emit vertex buffer state */
471 ctx->cs_vertex_buffer_state.atom.num_dw = 12 * util_bitcount(ctx->cs_vertex_buffer_state.dirty_mask);
472 r600_emit_atom(ctx, &ctx->cs_vertex_buffer_state.atom);
473
474 /* Emit constant buffer state */
475 r600_emit_atom(ctx, &ctx->constbuf_state[PIPE_SHADER_COMPUTE].atom);
476
477 /* Emit compute shader state */
478 r600_emit_atom(ctx, &ctx->cs_shader_state.atom);
479
480 /* Emit dispatch state and dispatch packet */
481 evergreen_emit_direct_dispatch(ctx, block_layout, grid_layout);
482
483 /* XXX evergreen_flush_emit() hardcodes the CP_COHER_SIZE to 0xffffffff
484 */
485 ctx->b.flags |= R600_CONTEXT_INV_CONST_CACHE |
486 R600_CONTEXT_INV_VERTEX_CACHE |
487 R600_CONTEXT_INV_TEX_CACHE;
488 r600_flush_emit(ctx);
489 ctx->b.flags = 0;
490
491 if (ctx->b.chip_class >= CAYMAN) {
492 cs->buf[cs->cdw++] = PKT3(PKT3_EVENT_WRITE, 0, 0);
493 cs->buf[cs->cdw++] = EVENT_TYPE(EVENT_TYPE_CS_PARTIAL_FLUSH) | EVENT_INDEX(4);
494 /* DEALLOC_STATE prevents the GPU from hanging when a
495 * SURFACE_SYNC packet is emitted some time after a DISPATCH_DIRECT
496 * with any of the CB*_DEST_BASE_ENA or DB_DEST_BASE_ENA bits set.
497 */
498 cs->buf[cs->cdw++] = PKT3C(PKT3_DEALLOC_STATE, 0, 0);
499 cs->buf[cs->cdw++] = 0;
500 }
501
502 #if 0
503 COMPUTE_DBG(ctx->screen, "cdw: %i\n", cs->cdw);
504 for (i = 0; i < cs->cdw; i++) {
505 COMPUTE_DBG(ctx->screen, "%4i : 0x%08X\n", i, cs->buf[i]);
506 }
507 #endif
508
509 }
510
511
512 /**
513 * Emit function for r600_cs_shader_state atom
514 */
515 void evergreen_emit_cs_shader(
516 struct r600_context *rctx,
517 struct r600_atom *atom)
518 {
519 struct r600_cs_shader_state *state =
520 (struct r600_cs_shader_state*)atom;
521 struct r600_pipe_compute *shader = state->shader;
522 struct r600_kernel *kernel = &shader->kernels[state->kernel_index];
523 struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs;
524 uint64_t va;
525
526 va = r600_resource_va(&rctx->screen->b.b, &kernel->code_bo->b.b);
527
528 r600_write_compute_context_reg_seq(cs, R_0288D0_SQ_PGM_START_LS, 3);
529 radeon_emit(cs, va >> 8); /* R_0288D0_SQ_PGM_START_LS */
530 radeon_emit(cs, /* R_0288D4_SQ_PGM_RESOURCES_LS */
531 S_0288D4_NUM_GPRS(kernel->bc.ngpr)
532 | S_0288D4_STACK_SIZE(kernel->bc.nstack));
533 radeon_emit(cs, 0); /* R_0288D8_SQ_PGM_RESOURCES_LS_2 */
534
535 radeon_emit(cs, PKT3C(PKT3_NOP, 0, 0));
536 radeon_emit(cs, r600_context_bo_reloc(&rctx->b, &rctx->b.rings.gfx,
537 kernel->code_bo, RADEON_USAGE_READ));
538 }
539
540 static void evergreen_launch_grid(
541 struct pipe_context *ctx_,
542 const uint *block_layout, const uint *grid_layout,
543 uint32_t pc, const void *input)
544 {
545 struct r600_context *ctx = (struct r600_context *)ctx_;
546
547 struct r600_pipe_compute *shader = ctx->cs_shader_state.shader;
548 struct r600_kernel *kernel = &shader->kernels[pc];
549
550 COMPUTE_DBG(ctx->screen, "*** evergreen_launch_grid: pc = %u\n", pc);
551
552 #ifdef HAVE_OPENCL
553
554 if (!kernel->code_bo) {
555 void *p;
556 struct r600_bytecode *bc = &kernel->bc;
557 LLVMModuleRef mod = kernel->llvm_module;
558 boolean use_kill = false;
559 bool dump = (ctx->screen->b.debug_flags & DBG_CS) != 0;
560 unsigned use_sb = ctx->screen->b.debug_flags & DBG_SB_CS;
561 unsigned sb_disasm = use_sb ||
562 (ctx->screen->b.debug_flags & DBG_SB_DISASM);
563
564 r600_bytecode_init(bc, ctx->b.chip_class, ctx->b.family,
565 ctx->screen->has_compressed_msaa_texturing);
566 bc->type = TGSI_PROCESSOR_COMPUTE;
567 bc->isa = ctx->isa;
568 r600_llvm_compile(mod, ctx->b.family, bc, &use_kill, dump);
569
570 if (dump && !sb_disasm) {
571 r600_bytecode_disasm(bc);
572 } else if ((dump && sb_disasm) || use_sb) {
573 if (r600_sb_bytecode_process(ctx, bc, NULL, dump, use_sb))
574 R600_ERR("r600_sb_bytecode_process failed!\n");
575 }
576
577 kernel->code_bo = r600_compute_buffer_alloc_vram(ctx->screen,
578 kernel->bc.ndw * 4);
579 p = r600_buffer_map_sync_with_rings(&ctx->b, kernel->code_bo, PIPE_TRANSFER_WRITE);
580 memcpy(p, kernel->bc.bytecode, kernel->bc.ndw * 4);
581 ctx->b.ws->buffer_unmap(kernel->code_bo->cs_buf);
582 }
583 #endif
584 shader->active_kernel = kernel;
585 ctx->cs_shader_state.kernel_index = pc;
586 evergreen_compute_upload_input(ctx_, block_layout, grid_layout, input);
587 compute_emit_cs(ctx, block_layout, grid_layout);
588 }
589
590 static void evergreen_set_compute_resources(struct pipe_context * ctx_,
591 unsigned start, unsigned count,
592 struct pipe_surface ** surfaces)
593 {
594 struct r600_context *ctx = (struct r600_context *)ctx_;
595 struct r600_surface **resources = (struct r600_surface **)surfaces;
596
597 COMPUTE_DBG(ctx->screen, "*** evergreen_set_compute_resources: start = %u count = %u\n",
598 start, count);
599
600 for (int i = 0; i < count; i++) {
601 /* The First two vertex buffers are reserved for parameters and
602 * global buffers. */
603 unsigned vtx_id = 2 + i;
604 if (resources[i]) {
605 struct r600_resource_global *buffer =
606 (struct r600_resource_global*)
607 resources[i]->base.texture;
608 if (resources[i]->base.writable) {
609 assert(i+1 < 12);
610
611 evergreen_set_rat(ctx->cs_shader_state.shader, i+1,
612 (struct r600_resource *)resources[i]->base.texture,
613 buffer->chunk->start_in_dw*4,
614 resources[i]->base.texture->width0);
615 }
616
617 evergreen_cs_set_vertex_buffer(ctx, vtx_id,
618 buffer->chunk->start_in_dw * 4,
619 resources[i]->base.texture);
620 }
621 }
622 }
623
624 void evergreen_set_cs_sampler_view(struct pipe_context *ctx_,
625 unsigned start_slot, unsigned count,
626 struct pipe_sampler_view **views)
627 {
628 struct r600_pipe_sampler_view **resource =
629 (struct r600_pipe_sampler_view **)views;
630
631 for (int i = 0; i < count; i++) {
632 if (resource[i]) {
633 assert(i+1 < 12);
634 /* XXX: Implement */
635 assert(!"Compute samplers not implemented.");
636 ///FETCH0 = VTX0 (param buffer),
637 //FETCH1 = VTX1 (global buffer pool), FETCH2... = TEX
638 }
639 }
640 }
641
642
643 static void evergreen_set_global_binding(
644 struct pipe_context *ctx_, unsigned first, unsigned n,
645 struct pipe_resource **resources,
646 uint32_t **handles)
647 {
648 struct r600_context *ctx = (struct r600_context *)ctx_;
649 struct compute_memory_pool *pool = ctx->screen->global_pool;
650 struct r600_resource_global **buffers =
651 (struct r600_resource_global **)resources;
652
653 COMPUTE_DBG(ctx->screen, "*** evergreen_set_global_binding first = %u n = %u\n",
654 first, n);
655
656 if (!resources) {
657 /* XXX: Unset */
658 return;
659 }
660
661 compute_memory_finalize_pending(pool, ctx_);
662
663 for (int i = 0; i < n; i++)
664 {
665 uint32_t buffer_offset;
666 uint32_t handle;
667 assert(resources[i]->target == PIPE_BUFFER);
668 assert(resources[i]->bind & PIPE_BIND_GLOBAL);
669
670 buffer_offset = util_le32_to_cpu(*(handles[i]));
671 handle = buffer_offset + buffers[i]->chunk->start_in_dw * 4;
672
673 *(handles[i]) = util_cpu_to_le32(handle);
674 }
675
676 evergreen_set_rat(ctx->cs_shader_state.shader, 0, pool->bo, 0, pool->size_in_dw * 4);
677 evergreen_cs_set_vertex_buffer(ctx, 1, 0,
678 (struct pipe_resource*)pool->bo);
679 }
680
681 /**
682 * This function initializes all the compute specific registers that need to
683 * be initialized for each compute command stream. Registers that are common
684 * to both compute and 3D will be initialized at the beginning of each compute
685 * command stream by the start_cs_cmd atom. However, since the SET_CONTEXT_REG
686 * packet requires that the shader type bit be set, we must initialize all
687 * context registers needed for compute in this function. The registers
688 * intialized by the start_cs_cmd atom can be found in evereen_state.c in the
689 * functions evergreen_init_atom_start_cs or cayman_init_atom_start_cs depending
690 * on the GPU family.
691 */
692 void evergreen_init_atom_start_compute_cs(struct r600_context *ctx)
693 {
694 struct r600_command_buffer *cb = &ctx->start_compute_cs_cmd;
695 int num_threads;
696 int num_stack_entries;
697
698 /* since all required registers are initialised in the
699 * start_compute_cs_cmd atom, we can EMIT_EARLY here.
700 */
701 r600_init_command_buffer(cb, 256);
702 cb->pkt_flags = RADEON_CP_PACKET3_COMPUTE_MODE;
703
704 /* This must be first. */
705 r600_store_value(cb, PKT3(PKT3_CONTEXT_CONTROL, 1, 0));
706 r600_store_value(cb, 0x80000000);
707 r600_store_value(cb, 0x80000000);
708
709 /* We're setting config registers here. */
710 r600_store_value(cb, PKT3(PKT3_EVENT_WRITE, 0, 0));
711 r600_store_value(cb, EVENT_TYPE(EVENT_TYPE_CS_PARTIAL_FLUSH) | EVENT_INDEX(4));
712
713 switch (ctx->b.family) {
714 case CHIP_CEDAR:
715 default:
716 num_threads = 128;
717 num_stack_entries = 256;
718 break;
719 case CHIP_REDWOOD:
720 num_threads = 128;
721 num_stack_entries = 256;
722 break;
723 case CHIP_JUNIPER:
724 num_threads = 128;
725 num_stack_entries = 512;
726 break;
727 case CHIP_CYPRESS:
728 case CHIP_HEMLOCK:
729 num_threads = 128;
730 num_stack_entries = 512;
731 break;
732 case CHIP_PALM:
733 num_threads = 128;
734 num_stack_entries = 256;
735 break;
736 case CHIP_SUMO:
737 num_threads = 128;
738 num_stack_entries = 256;
739 break;
740 case CHIP_SUMO2:
741 num_threads = 128;
742 num_stack_entries = 512;
743 break;
744 case CHIP_BARTS:
745 num_threads = 128;
746 num_stack_entries = 512;
747 break;
748 case CHIP_TURKS:
749 num_threads = 128;
750 num_stack_entries = 256;
751 break;
752 case CHIP_CAICOS:
753 num_threads = 128;
754 num_stack_entries = 256;
755 break;
756 }
757
758 /* Config Registers */
759 if (ctx->b.chip_class < CAYMAN)
760 evergreen_init_common_regs(cb, ctx->b.chip_class, ctx->b.family,
761 ctx->screen->b.info.drm_minor);
762 else
763 cayman_init_common_regs(cb, ctx->b.chip_class, ctx->b.family,
764 ctx->screen->b.info.drm_minor);
765
766 /* The primitive type always needs to be POINTLIST for compute. */
767 r600_store_config_reg(cb, R_008958_VGT_PRIMITIVE_TYPE,
768 V_008958_DI_PT_POINTLIST);
769
770 if (ctx->b.chip_class < CAYMAN) {
771
772 /* These registers control which simds can be used by each stage.
773 * The default for these registers is 0xffffffff, which means
774 * all simds are available for each stage. It's possible we may
775 * want to play around with these in the future, but for now
776 * the default value is fine.
777 *
778 * R_008E20_SQ_STATIC_THREAD_MGMT1
779 * R_008E24_SQ_STATIC_THREAD_MGMT2
780 * R_008E28_SQ_STATIC_THREAD_MGMT3
781 */
782
783 /* XXX: We may need to adjust the thread and stack resouce
784 * values for 3D/compute interop */
785
786 r600_store_config_reg_seq(cb, R_008C18_SQ_THREAD_RESOURCE_MGMT_1, 5);
787
788 /* R_008C18_SQ_THREAD_RESOURCE_MGMT_1
789 * Set the number of threads used by the PS/VS/GS/ES stage to
790 * 0.
791 */
792 r600_store_value(cb, 0);
793
794 /* R_008C1C_SQ_THREAD_RESOURCE_MGMT_2
795 * Set the number of threads used by the CS (aka LS) stage to
796 * the maximum number of threads and set the number of threads
797 * for the HS stage to 0. */
798 r600_store_value(cb, S_008C1C_NUM_LS_THREADS(num_threads));
799
800 /* R_008C20_SQ_STACK_RESOURCE_MGMT_1
801 * Set the Control Flow stack entries to 0 for PS/VS stages */
802 r600_store_value(cb, 0);
803
804 /* R_008C24_SQ_STACK_RESOURCE_MGMT_2
805 * Set the Control Flow stack entries to 0 for GS/ES stages */
806 r600_store_value(cb, 0);
807
808 /* R_008C28_SQ_STACK_RESOURCE_MGMT_3
809 * Set the Contol Flow stack entries to 0 for the HS stage, and
810 * set it to the maximum value for the CS (aka LS) stage. */
811 r600_store_value(cb,
812 S_008C28_NUM_LS_STACK_ENTRIES(num_stack_entries));
813 }
814 /* Give the compute shader all the available LDS space.
815 * NOTE: This only sets the maximum number of dwords that a compute
816 * shader can allocate. When a shader is executed, we still need to
817 * allocate the appropriate amount of LDS dwords using the
818 * CM_R_0288E8_SQ_LDS_ALLOC register.
819 */
820 if (ctx->b.chip_class < CAYMAN) {
821 r600_store_config_reg(cb, R_008E2C_SQ_LDS_RESOURCE_MGMT,
822 S_008E2C_NUM_PS_LDS(0x0000) | S_008E2C_NUM_LS_LDS(8192));
823 } else {
824 r600_store_context_reg(cb, CM_R_0286FC_SPI_LDS_MGMT,
825 S_0286FC_NUM_PS_LDS(0) |
826 S_0286FC_NUM_LS_LDS(255)); /* 255 * 32 = 8160 dwords */
827 }
828
829 /* Context Registers */
830
831 if (ctx->b.chip_class < CAYMAN) {
832 /* workaround for hw issues with dyn gpr - must set all limits
833 * to 240 instead of 0, 0x1e == 240 / 8
834 */
835 r600_store_context_reg(cb, R_028838_SQ_DYN_GPR_RESOURCE_LIMIT_1,
836 S_028838_PS_GPRS(0x1e) |
837 S_028838_VS_GPRS(0x1e) |
838 S_028838_GS_GPRS(0x1e) |
839 S_028838_ES_GPRS(0x1e) |
840 S_028838_HS_GPRS(0x1e) |
841 S_028838_LS_GPRS(0x1e));
842 }
843
844 /* XXX: Investigate setting bit 15, which is FAST_COMPUTE_MODE */
845 r600_store_context_reg(cb, R_028A40_VGT_GS_MODE,
846 S_028A40_COMPUTE_MODE(1) | S_028A40_PARTIAL_THD_AT_EOI(1));
847
848 r600_store_context_reg(cb, R_028B54_VGT_SHADER_STAGES_EN, 2/*CS_ON*/);
849
850 r600_store_context_reg(cb, R_0286E8_SPI_COMPUTE_INPUT_CNTL,
851 S_0286E8_TID_IN_GROUP_ENA
852 | S_0286E8_TGID_ENA
853 | S_0286E8_DISABLE_INDEX_PACK)
854 ;
855
856 /* The LOOP_CONST registers are an optimizations for loops that allows
857 * you to store the initial counter, increment value, and maximum
858 * counter value in a register so that hardware can calculate the
859 * correct number of iterations for the loop, so that you don't need
860 * to have the loop counter in your shader code. We don't currently use
861 * this optimization, so we must keep track of the counter in the
862 * shader and use a break instruction to exit loops. However, the
863 * hardware will still uses this register to determine when to exit a
864 * loop, so we need to initialize the counter to 0, set the increment
865 * value to 1 and the maximum counter value to the 4095 (0xfff) which
866 * is the maximum value allowed. This gives us a maximum of 4096
867 * iterations for our loops, but hopefully our break instruction will
868 * execute before some time before the 4096th iteration.
869 */
870 eg_store_loop_const(cb, R_03A200_SQ_LOOP_CONST_0 + (160 * 4), 0x1000FFF);
871 }
872
873 void evergreen_init_compute_state_functions(struct r600_context *ctx)
874 {
875 ctx->b.b.create_compute_state = evergreen_create_compute_state;
876 ctx->b.b.delete_compute_state = evergreen_delete_compute_state;
877 ctx->b.b.bind_compute_state = evergreen_bind_compute_state;
878 // ctx->context.create_sampler_view = evergreen_compute_create_sampler_view;
879 ctx->b.b.set_compute_resources = evergreen_set_compute_resources;
880 ctx->b.b.set_global_binding = evergreen_set_global_binding;
881 ctx->b.b.launch_grid = evergreen_launch_grid;
882
883 /* We always use at least one vertex buffer for parameters (id = 1)*/
884 ctx->cs_vertex_buffer_state.enabled_mask =
885 ctx->cs_vertex_buffer_state.dirty_mask = 0x2;
886 }
887
888 struct pipe_resource *r600_compute_global_buffer_create(
889 struct pipe_screen *screen,
890 const struct pipe_resource *templ)
891 {
892 struct r600_resource_global* result = NULL;
893 struct r600_screen* rscreen = NULL;
894 int size_in_dw = 0;
895
896 assert(templ->target == PIPE_BUFFER);
897 assert(templ->bind & PIPE_BIND_GLOBAL);
898 assert(templ->array_size == 1 || templ->array_size == 0);
899 assert(templ->depth0 == 1 || templ->depth0 == 0);
900 assert(templ->height0 == 1 || templ->height0 == 0);
901
902 result = (struct r600_resource_global*)
903 CALLOC(sizeof(struct r600_resource_global), 1);
904 rscreen = (struct r600_screen*)screen;
905
906 COMPUTE_DBG(rscreen, "*** r600_compute_global_buffer_create\n");
907 COMPUTE_DBG(rscreen, "width = %u array_size = %u\n", templ->width0,
908 templ->array_size);
909
910 result->base.b.vtbl = &r600_global_buffer_vtbl;
911 result->base.b.b.screen = screen;
912 result->base.b.b = *templ;
913 pipe_reference_init(&result->base.b.b.reference, 1);
914
915 size_in_dw = (templ->width0+3) / 4;
916
917 result->chunk = compute_memory_alloc(rscreen->global_pool, size_in_dw);
918
919 if (result->chunk == NULL)
920 {
921 free(result);
922 return NULL;
923 }
924
925 return &result->base.b.b;
926 }
927
928 void r600_compute_global_buffer_destroy(
929 struct pipe_screen *screen,
930 struct pipe_resource *res)
931 {
932 struct r600_resource_global* buffer = NULL;
933 struct r600_screen* rscreen = NULL;
934
935 assert(res->target == PIPE_BUFFER);
936 assert(res->bind & PIPE_BIND_GLOBAL);
937
938 buffer = (struct r600_resource_global*)res;
939 rscreen = (struct r600_screen*)screen;
940
941 compute_memory_free(rscreen->global_pool, buffer->chunk->id);
942
943 buffer->chunk = NULL;
944 free(res);
945 }
946
947 void *r600_compute_global_transfer_map(
948 struct pipe_context *ctx_,
949 struct pipe_resource *resource,
950 unsigned level,
951 unsigned usage,
952 const struct pipe_box *box,
953 struct pipe_transfer **ptransfer)
954 {
955 struct r600_context *rctx = (struct r600_context*)ctx_;
956 struct compute_memory_pool *pool = rctx->screen->global_pool;
957 struct r600_resource_global* buffer =
958 (struct r600_resource_global*)resource;
959
960 COMPUTE_DBG(rctx->screen, "* r600_compute_global_transfer_map()\n"
961 "level = %u, usage = %u, box(x = %u, y = %u, z = %u "
962 "width = %u, height = %u, depth = %u)\n", level, usage,
963 box->x, box->y, box->z, box->width, box->height,
964 box->depth);
965 COMPUTE_DBG(rctx->screen, "Buffer id = %u offset = "
966 "%u (box.x)\n", buffer->chunk->id, box->x);
967
968
969 compute_memory_finalize_pending(pool, ctx_);
970
971 assert(resource->target == PIPE_BUFFER);
972 assert(resource->bind & PIPE_BIND_GLOBAL);
973 assert(box->x >= 0);
974 assert(box->y == 0);
975 assert(box->z == 0);
976
977 ///TODO: do it better, mapping is not possible if the pool is too big
978 return pipe_buffer_map_range(ctx_, (struct pipe_resource*)buffer->chunk->pool->bo,
979 box->x + (buffer->chunk->start_in_dw * 4),
980 box->width, usage, ptransfer);
981 }
982
983 void r600_compute_global_transfer_unmap(
984 struct pipe_context *ctx_,
985 struct pipe_transfer* transfer)
986 {
987 /* struct r600_resource_global are not real resources, they just map
988 * to an offset within the compute memory pool. The function
989 * r600_compute_global_transfer_map() maps the memory pool
990 * resource rather than the struct r600_resource_global passed to
991 * it as an argument and then initalizes ptransfer->resource with
992 * the memory pool resource (via pipe_buffer_map_range).
993 * When transfer_unmap is called it uses the memory pool's
994 * vtable which calls r600_buffer_transfer_map() rather than
995 * this function.
996 */
997 assert (!"This function should not be called");
998 }
999
1000 void r600_compute_global_transfer_flush_region(
1001 struct pipe_context *ctx_,
1002 struct pipe_transfer *transfer,
1003 const struct pipe_box *box)
1004 {
1005 assert(0 && "TODO");
1006 }
1007
1008 void r600_compute_global_transfer_inline_write(
1009 struct pipe_context *pipe,
1010 struct pipe_resource *resource,
1011 unsigned level,
1012 unsigned usage,
1013 const struct pipe_box *box,
1014 const void *data,
1015 unsigned stride,
1016 unsigned layer_stride)
1017 {
1018 assert(0 && "TODO");
1019 }