winsys/radeon: consolidate tracing into winsys v2
[mesa.git] / src / gallium / drivers / r600 / evergreen_compute.c
1 /*
2 * Copyright 2011 Adam Rak <adam.rak@streamnovation.com>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Adam Rak <adam.rak@streamnovation.com>
25 */
26
27 #include <stdio.h>
28 #include <errno.h>
29 #include "pipe/p_defines.h"
30 #include "pipe/p_state.h"
31 #include "pipe/p_context.h"
32 #include "util/u_blitter.h"
33 #include "util/u_double_list.h"
34 #include "util/u_transfer.h"
35 #include "util/u_surface.h"
36 #include "util/u_pack_color.h"
37 #include "util/u_memory.h"
38 #include "util/u_inlines.h"
39 #include "util/u_framebuffer.h"
40 #include "pipebuffer/pb_buffer.h"
41 #include "evergreend.h"
42 #include "r600_resource.h"
43 #include "r600_shader.h"
44 #include "r600_pipe.h"
45 #include "r600_formats.h"
46 #include "evergreen_compute.h"
47 #include "evergreen_compute_internal.h"
48 #include "compute_memory_pool.h"
49 #ifdef HAVE_OPENCL
50 #include "radeon_llvm_util.h"
51 #endif
52
53 /**
54 RAT0 is for global binding write
55 VTX1 is for global binding read
56
57 for wrting images RAT1...
58 for reading images TEX2...
59 TEX2-RAT1 is paired
60
61 TEX2... consumes the same fetch resources, that VTX2... would consume
62
63 CONST0 and VTX0 is for parameters
64 CONST0 is binding smaller input parameter buffer, and for constant indexing,
65 also constant cached
66 VTX0 is for indirect/non-constant indexing, or if the input is bigger than
67 the constant cache can handle
68
69 RAT-s are limited to 12, so we can only bind at most 11 texture for writing
70 because we reserve RAT0 for global bindings. With byteaddressing enabled,
71 we should reserve another one too.=> 10 image binding for writing max.
72
73 from Nvidia OpenCL:
74 CL_DEVICE_MAX_READ_IMAGE_ARGS: 128
75 CL_DEVICE_MAX_WRITE_IMAGE_ARGS: 8
76
77 so 10 for writing is enough. 176 is the max for reading according to the docs
78
79 writable images should be listed first < 10, so their id corresponds to RAT(id+1)
80 writable images will consume TEX slots, VTX slots too because of linear indexing
81
82 */
83
84 struct r600_resource* r600_compute_buffer_alloc_vram(
85 struct r600_screen *screen,
86 unsigned size)
87 {
88 struct pipe_resource * buffer = NULL;
89 assert(size);
90
91 buffer = pipe_buffer_create(
92 (struct pipe_screen*) screen,
93 PIPE_BIND_CUSTOM,
94 PIPE_USAGE_IMMUTABLE,
95 size);
96
97 return (struct r600_resource *)buffer;
98 }
99
100
101 static void evergreen_set_rat(
102 struct r600_pipe_compute *pipe,
103 int id,
104 struct r600_resource* bo,
105 int start,
106 int size)
107 {
108 struct pipe_surface rat_templ;
109 struct r600_surface *surf = NULL;
110 struct r600_context *rctx = NULL;
111
112 assert(id < 12);
113 assert((size & 3) == 0);
114 assert((start & 0xFF) == 0);
115
116 rctx = pipe->ctx;
117
118 COMPUTE_DBG(rctx->screen, "bind rat: %i \n", id);
119
120 /* Create the RAT surface */
121 memset(&rat_templ, 0, sizeof(rat_templ));
122 rat_templ.format = PIPE_FORMAT_R32_UINT;
123 rat_templ.u.tex.level = 0;
124 rat_templ.u.tex.first_layer = 0;
125 rat_templ.u.tex.last_layer = 0;
126
127 /* Add the RAT the list of color buffers */
128 pipe->ctx->framebuffer.state.cbufs[id] = pipe->ctx->context.create_surface(
129 (struct pipe_context *)pipe->ctx,
130 (struct pipe_resource *)bo, &rat_templ);
131
132 /* Update the number of color buffers */
133 pipe->ctx->framebuffer.state.nr_cbufs =
134 MAX2(id + 1, pipe->ctx->framebuffer.state.nr_cbufs);
135
136 /* Update the cb_target_mask
137 * XXX: I think this is a potential spot for bugs once we start doing
138 * GL interop. cb_target_mask may be modified in the 3D sections
139 * of this driver. */
140 pipe->ctx->compute_cb_target_mask |= (0xf << (id * 4));
141
142 surf = (struct r600_surface*)pipe->ctx->framebuffer.state.cbufs[id];
143 evergreen_init_color_surface_rat(rctx, surf);
144 }
145
146 static void evergreen_cs_set_vertex_buffer(
147 struct r600_context * rctx,
148 unsigned vb_index,
149 unsigned offset,
150 struct pipe_resource * buffer)
151 {
152 struct r600_vertexbuf_state *state = &rctx->cs_vertex_buffer_state;
153 struct pipe_vertex_buffer *vb = &state->vb[vb_index];
154 vb->stride = 1;
155 vb->buffer_offset = offset;
156 vb->buffer = buffer;
157 vb->user_buffer = NULL;
158
159 /* The vertex instructions in the compute shaders use the texture cache,
160 * so we need to invalidate it. */
161 rctx->flags |= R600_CONTEXT_INVAL_READ_CACHES;
162 state->enabled_mask |= 1 << vb_index;
163 state->dirty_mask |= 1 << vb_index;
164 state->atom.dirty = true;
165 }
166
167 static void evergreen_cs_set_constant_buffer(
168 struct r600_context * rctx,
169 unsigned cb_index,
170 unsigned offset,
171 unsigned size,
172 struct pipe_resource * buffer)
173 {
174 struct pipe_constant_buffer cb;
175 cb.buffer_size = size;
176 cb.buffer_offset = offset;
177 cb.buffer = buffer;
178 cb.user_buffer = NULL;
179
180 rctx->context.set_constant_buffer(&rctx->context, PIPE_SHADER_COMPUTE, cb_index, &cb);
181 }
182
183 static const struct u_resource_vtbl r600_global_buffer_vtbl =
184 {
185 u_default_resource_get_handle, /* get_handle */
186 r600_compute_global_buffer_destroy, /* resource_destroy */
187 r600_compute_global_transfer_map, /* transfer_map */
188 r600_compute_global_transfer_flush_region,/* transfer_flush_region */
189 r600_compute_global_transfer_unmap, /* transfer_unmap */
190 r600_compute_global_transfer_inline_write /* transfer_inline_write */
191 };
192
193
194 void *evergreen_create_compute_state(
195 struct pipe_context *ctx_,
196 const const struct pipe_compute_state *cso)
197 {
198 struct r600_context *ctx = (struct r600_context *)ctx_;
199 struct r600_pipe_compute *shader = CALLOC_STRUCT(r600_pipe_compute);
200
201 #ifdef HAVE_OPENCL
202 const struct pipe_llvm_program_header * header;
203 const unsigned char * code;
204 unsigned i;
205
206 COMPUTE_DBG(ctx->screen, "*** evergreen_create_compute_state\n");
207
208 header = cso->prog;
209 code = cso->prog + sizeof(struct pipe_llvm_program_header);
210 #endif
211
212 shader->ctx = (struct r600_context*)ctx;
213 shader->local_size = cso->req_local_mem; ///TODO: assert it
214 shader->private_size = cso->req_private_mem;
215 shader->input_size = cso->req_input_mem;
216
217 #ifdef HAVE_OPENCL
218 shader->num_kernels = radeon_llvm_get_num_kernels(code, header->num_bytes);
219 shader->kernels = CALLOC(sizeof(struct r600_kernel), shader->num_kernels);
220
221 for (i = 0; i < shader->num_kernels; i++) {
222 struct r600_kernel *kernel = &shader->kernels[i];
223 kernel->llvm_module = radeon_llvm_get_kernel_module(i, code,
224 header->num_bytes);
225 }
226 #endif
227 return shader;
228 }
229
230 void evergreen_delete_compute_state(struct pipe_context *ctx, void* state)
231 {
232 struct r600_pipe_compute *shader = (struct r600_pipe_compute *)state;
233
234 free(shader);
235 }
236
237 static void evergreen_bind_compute_state(struct pipe_context *ctx_, void *state)
238 {
239 struct r600_context *ctx = (struct r600_context *)ctx_;
240
241 COMPUTE_DBG(ctx->screen, "*** evergreen_bind_compute_state\n");
242
243 ctx->cs_shader_state.shader = (struct r600_pipe_compute *)state;
244 }
245
246 /* The kernel parameters are stored a vtx buffer (ID=0), besides the explicit
247 * kernel parameters there are inplicit parameters that need to be stored
248 * in the vertex buffer as well. Here is how these parameters are organized in
249 * the buffer:
250 *
251 * DWORDS 0-2: Number of work groups in each dimension (x,y,z)
252 * DWORDS 3-5: Number of global work items in each dimension (x,y,z)
253 * DWORDS 6-8: Number of work items within each work group in each dimension
254 * (x,y,z)
255 * DWORDS 9+ : Kernel parameters
256 */
257 void evergreen_compute_upload_input(
258 struct pipe_context *ctx_,
259 const uint *block_layout,
260 const uint *grid_layout,
261 const void *input)
262 {
263 struct r600_context *ctx = (struct r600_context *)ctx_;
264 struct r600_pipe_compute *shader = ctx->cs_shader_state.shader;
265 int i;
266 /* We need to reserve 9 dwords (36 bytes) for implicit kernel
267 * parameters.
268 */
269 unsigned input_size = shader->input_size + 36;
270 uint32_t * num_work_groups_start;
271 uint32_t * global_size_start;
272 uint32_t * local_size_start;
273 uint32_t * kernel_parameters_start;
274
275 if (shader->input_size == 0) {
276 return;
277 }
278
279 if (!shader->kernel_param) {
280 /* Add space for the grid dimensions */
281 shader->kernel_param = r600_compute_buffer_alloc_vram(
282 ctx->screen, input_size);
283 }
284
285 num_work_groups_start = r600_buffer_mmap_sync_with_rings(ctx, shader->kernel_param, PIPE_TRANSFER_WRITE);
286 global_size_start = num_work_groups_start + (3 * (sizeof(uint) /4));
287 local_size_start = global_size_start + (3 * (sizeof(uint)) / 4);
288 kernel_parameters_start = local_size_start + (3 * (sizeof(uint)) / 4);
289
290 /* Copy the work group size */
291 memcpy(num_work_groups_start, grid_layout, 3 * sizeof(uint));
292
293 /* Copy the global size */
294 for (i = 0; i < 3; i++) {
295 global_size_start[i] = grid_layout[i] * block_layout[i];
296 }
297
298 /* Copy the local dimensions */
299 memcpy(local_size_start, block_layout, 3 * sizeof(uint));
300
301 /* Copy the kernel inputs */
302 memcpy(kernel_parameters_start, input, shader->input_size);
303
304 for (i = 0; i < (input_size / 4); i++) {
305 COMPUTE_DBG(ctx->screen, "input %i : %i\n", i,
306 ((unsigned*)num_work_groups_start)[i]);
307 }
308
309 ctx->ws->buffer_unmap(shader->kernel_param->cs_buf);
310
311 /* ID=0 is reserved for the parameters */
312 evergreen_cs_set_constant_buffer(ctx, 0, 0, input_size,
313 (struct pipe_resource*)shader->kernel_param);
314 }
315
316 static void evergreen_emit_direct_dispatch(
317 struct r600_context *rctx,
318 const uint *block_layout, const uint *grid_layout)
319 {
320 int i;
321 struct radeon_winsys_cs *cs = rctx->rings.gfx.cs;
322 unsigned num_waves;
323 unsigned num_pipes = rctx->screen->info.r600_max_pipes;
324 unsigned wave_divisor = (16 * num_pipes);
325 int group_size = 1;
326 int grid_size = 1;
327 /* XXX: Enable lds and get size from cs_shader_state */
328 unsigned lds_size = 0;
329
330 /* Calculate group_size/grid_size */
331 for (i = 0; i < 3; i++) {
332 group_size *= block_layout[i];
333 }
334
335 for (i = 0; i < 3; i++) {
336 grid_size *= grid_layout[i];
337 }
338
339 /* num_waves = ceil((tg_size.x * tg_size.y, tg_size.z) / (16 * num_pipes)) */
340 num_waves = (block_layout[0] * block_layout[1] * block_layout[2] +
341 wave_divisor - 1) / wave_divisor;
342
343 COMPUTE_DBG(rctx->screen, "Using %u pipes, there are %u wavefronts per thread block\n",
344 num_pipes, num_waves);
345
346 /* XXX: Partition the LDS between PS/CS. By default half (4096 dwords
347 * on Evergreen) oes to Pixel Shaders and half goes to Compute Shaders.
348 * We may need to allocat the entire LDS space for Compute Shaders.
349 *
350 * EG: R_008E2C_SQ_LDS_RESOURCE_MGMT := S_008E2C_NUM_LS_LDS(lds_dwords)
351 * CM: CM_R_0286FC_SPI_LDS_MGMT := S_0286FC_NUM_LS_LDS(lds_dwords)
352 */
353
354 r600_write_config_reg(cs, R_008970_VGT_NUM_INDICES, group_size);
355
356 r600_write_config_reg_seq(cs, R_00899C_VGT_COMPUTE_START_X, 3);
357 r600_write_value(cs, 0); /* R_00899C_VGT_COMPUTE_START_X */
358 r600_write_value(cs, 0); /* R_0089A0_VGT_COMPUTE_START_Y */
359 r600_write_value(cs, 0); /* R_0089A4_VGT_COMPUTE_START_Z */
360
361 r600_write_config_reg(cs, R_0089AC_VGT_COMPUTE_THREAD_GROUP_SIZE,
362 group_size);
363
364 r600_write_compute_context_reg_seq(cs, R_0286EC_SPI_COMPUTE_NUM_THREAD_X, 3);
365 r600_write_value(cs, block_layout[0]); /* R_0286EC_SPI_COMPUTE_NUM_THREAD_X */
366 r600_write_value(cs, block_layout[1]); /* R_0286F0_SPI_COMPUTE_NUM_THREAD_Y */
367 r600_write_value(cs, block_layout[2]); /* R_0286F4_SPI_COMPUTE_NUM_THREAD_Z */
368
369 r600_write_compute_context_reg(cs, CM_R_0288E8_SQ_LDS_ALLOC,
370 lds_size | (num_waves << 14));
371
372 /* Dispatch packet */
373 r600_write_value(cs, PKT3C(PKT3_DISPATCH_DIRECT, 3, 0));
374 r600_write_value(cs, grid_layout[0]);
375 r600_write_value(cs, grid_layout[1]);
376 r600_write_value(cs, grid_layout[2]);
377 /* VGT_DISPATCH_INITIATOR = COMPUTE_SHADER_EN */
378 r600_write_value(cs, 1);
379 }
380
381 static void compute_emit_cs(struct r600_context *ctx, const uint *block_layout,
382 const uint *grid_layout)
383 {
384 struct radeon_winsys_cs *cs = ctx->rings.gfx.cs;
385 unsigned flush_flags = 0;
386 int i;
387
388 /* make sure that the gfx ring is only one active */
389 if (ctx->rings.dma.cs) {
390 ctx->rings.dma.flush(ctx, RADEON_FLUSH_ASYNC);
391 }
392
393 /* Initialize all the compute-related registers.
394 *
395 * See evergreen_init_atom_start_compute_cs() in this file for the list
396 * of registers initialized by the start_compute_cs_cmd atom.
397 */
398 r600_emit_command_buffer(cs, &ctx->start_compute_cs_cmd);
399
400 ctx->flags |= R600_CONTEXT_WAIT_3D_IDLE | R600_CONTEXT_FLUSH_AND_INV;
401 r600_flush_emit(ctx);
402
403 /* Emit colorbuffers. */
404 for (i = 0; i < ctx->framebuffer.state.nr_cbufs; i++) {
405 struct r600_surface *cb = (struct r600_surface*)ctx->framebuffer.state.cbufs[i];
406 unsigned reloc = r600_context_bo_reloc(ctx, &ctx->rings.gfx,
407 (struct r600_resource*)cb->base.texture,
408 RADEON_USAGE_READWRITE);
409
410 r600_write_compute_context_reg_seq(cs, R_028C60_CB_COLOR0_BASE + i * 0x3C, 7);
411 r600_write_value(cs, cb->cb_color_base); /* R_028C60_CB_COLOR0_BASE */
412 r600_write_value(cs, cb->cb_color_pitch); /* R_028C64_CB_COLOR0_PITCH */
413 r600_write_value(cs, cb->cb_color_slice); /* R_028C68_CB_COLOR0_SLICE */
414 r600_write_value(cs, cb->cb_color_view); /* R_028C6C_CB_COLOR0_VIEW */
415 r600_write_value(cs, cb->cb_color_info); /* R_028C70_CB_COLOR0_INFO */
416 r600_write_value(cs, cb->cb_color_attrib); /* R_028C74_CB_COLOR0_ATTRIB */
417 r600_write_value(cs, cb->cb_color_dim); /* R_028C78_CB_COLOR0_DIM */
418
419 r600_write_value(cs, PKT3(PKT3_NOP, 0, 0)); /* R_028C60_CB_COLOR0_BASE */
420 r600_write_value(cs, reloc);
421
422 if (!ctx->keep_tiling_flags) {
423 r600_write_value(cs, PKT3(PKT3_NOP, 0, 0)); /* R_028C70_CB_COLOR0_INFO */
424 r600_write_value(cs, reloc);
425 }
426
427 r600_write_value(cs, PKT3(PKT3_NOP, 0, 0)); /* R_028C74_CB_COLOR0_ATTRIB */
428 r600_write_value(cs, reloc);
429 }
430
431 /* Set CB_TARGET_MASK XXX: Use cb_misc_state */
432 r600_write_compute_context_reg(cs, R_028238_CB_TARGET_MASK,
433 ctx->compute_cb_target_mask);
434
435
436 /* Emit vertex buffer state */
437 ctx->cs_vertex_buffer_state.atom.num_dw = 12 * util_bitcount(ctx->cs_vertex_buffer_state.dirty_mask);
438 r600_emit_atom(ctx, &ctx->cs_vertex_buffer_state.atom);
439
440 /* Emit constant buffer state */
441 r600_emit_atom(ctx, &ctx->constbuf_state[PIPE_SHADER_COMPUTE].atom);
442
443 /* Emit compute shader state */
444 r600_emit_atom(ctx, &ctx->cs_shader_state.atom);
445
446 /* Emit dispatch state and dispatch packet */
447 evergreen_emit_direct_dispatch(ctx, block_layout, grid_layout);
448
449 /* XXX evergreen_flush_emit() hardcodes the CP_COHER_SIZE to 0xffffffff
450 */
451 ctx->flags |= R600_CONTEXT_INVAL_READ_CACHES;
452 r600_flush_emit(ctx);
453
454 #if 0
455 COMPUTE_DBG(ctx->screen, "cdw: %i\n", cs->cdw);
456 for (i = 0; i < cs->cdw; i++) {
457 COMPUTE_DBG(ctx->screen, "%4i : 0x%08X\n", i, ctx->cs->buf[i]);
458 }
459 #endif
460
461 flush_flags = RADEON_FLUSH_ASYNC | RADEON_FLUSH_COMPUTE;
462 if (ctx->keep_tiling_flags) {
463 flush_flags |= RADEON_FLUSH_KEEP_TILING_FLAGS;
464 }
465
466 ctx->ws->cs_flush(ctx->rings.gfx.cs, flush_flags, ctx->screen->cs_count++);
467
468 ctx->flags = 0;
469
470 COMPUTE_DBG(ctx->screen, "shader started\n");
471 }
472
473
474 /**
475 * Emit function for r600_cs_shader_state atom
476 */
477 void evergreen_emit_cs_shader(
478 struct r600_context *rctx,
479 struct r600_atom *atom)
480 {
481 struct r600_cs_shader_state *state =
482 (struct r600_cs_shader_state*)atom;
483 struct r600_pipe_compute *shader = state->shader;
484 struct r600_kernel *kernel = &shader->kernels[state->kernel_index];
485 struct radeon_winsys_cs *cs = rctx->rings.gfx.cs;
486 uint64_t va;
487
488 va = r600_resource_va(&rctx->screen->screen, &kernel->code_bo->b.b);
489
490 r600_write_compute_context_reg_seq(cs, R_0288D0_SQ_PGM_START_LS, 3);
491 r600_write_value(cs, va >> 8); /* R_0288D0_SQ_PGM_START_LS */
492 r600_write_value(cs, /* R_0288D4_SQ_PGM_RESOURCES_LS */
493 S_0288D4_NUM_GPRS(kernel->bc.ngpr)
494 | S_0288D4_STACK_SIZE(kernel->bc.nstack));
495 r600_write_value(cs, 0); /* R_0288D8_SQ_PGM_RESOURCES_LS_2 */
496
497 r600_write_value(cs, PKT3C(PKT3_NOP, 0, 0));
498 r600_write_value(cs, r600_context_bo_reloc(rctx, &rctx->rings.gfx,
499 kernel->code_bo, RADEON_USAGE_READ));
500
501 rctx->flags |= R600_CONTEXT_INVAL_READ_CACHES;
502 }
503
504 static void evergreen_launch_grid(
505 struct pipe_context *ctx_,
506 const uint *block_layout, const uint *grid_layout,
507 uint32_t pc, const void *input)
508 {
509 struct r600_context *ctx = (struct r600_context *)ctx_;
510
511 #ifdef HAVE_OPENCL
512 COMPUTE_DBG(ctx->screen, "*** evergreen_launch_grid: pc = %u\n", pc);
513
514 struct r600_pipe_compute *shader = ctx->cs_shader_state.shader;
515 if (!shader->kernels[pc].code_bo) {
516 void *p;
517 struct r600_kernel *kernel = &shader->kernels[pc];
518 r600_compute_shader_create(ctx_, kernel->llvm_module, &kernel->bc);
519 kernel->code_bo = r600_compute_buffer_alloc_vram(ctx->screen,
520 kernel->bc.ndw * 4);
521 p = r600_buffer_mmap_sync_with_rings(ctx, kernel->code_bo, PIPE_TRANSFER_WRITE);
522 memcpy(p, kernel->bc.bytecode, kernel->bc.ndw * 4);
523 ctx->ws->buffer_unmap(kernel->code_bo->cs_buf);
524 }
525 #endif
526
527 ctx->cs_shader_state.kernel_index = pc;
528 evergreen_compute_upload_input(ctx_, block_layout, grid_layout, input);
529 compute_emit_cs(ctx, block_layout, grid_layout);
530 }
531
532 static void evergreen_set_compute_resources(struct pipe_context * ctx_,
533 unsigned start, unsigned count,
534 struct pipe_surface ** surfaces)
535 {
536 struct r600_context *ctx = (struct r600_context *)ctx_;
537 struct r600_surface **resources = (struct r600_surface **)surfaces;
538
539 COMPUTE_DBG(ctx->screen, "*** evergreen_set_compute_resources: start = %u count = %u\n",
540 start, count);
541
542 for (int i = 0; i < count; i++) {
543 /* The First two vertex buffers are reserved for parameters and
544 * global buffers. */
545 unsigned vtx_id = 2 + i;
546 if (resources[i]) {
547 struct r600_resource_global *buffer =
548 (struct r600_resource_global*)
549 resources[i]->base.texture;
550 if (resources[i]->base.writable) {
551 assert(i+1 < 12);
552
553 evergreen_set_rat(ctx->cs_shader_state.shader, i+1,
554 (struct r600_resource *)resources[i]->base.texture,
555 buffer->chunk->start_in_dw*4,
556 resources[i]->base.texture->width0);
557 }
558
559 evergreen_cs_set_vertex_buffer(ctx, vtx_id,
560 buffer->chunk->start_in_dw * 4,
561 resources[i]->base.texture);
562 }
563 }
564 }
565
566 static void evergreen_set_cs_sampler_view(struct pipe_context *ctx_,
567 unsigned start_slot, unsigned count,
568 struct pipe_sampler_view **views)
569 {
570 struct r600_pipe_sampler_view **resource =
571 (struct r600_pipe_sampler_view **)views;
572
573 for (int i = 0; i < count; i++) {
574 if (resource[i]) {
575 assert(i+1 < 12);
576 /* XXX: Implement */
577 assert(!"Compute samplers not implemented.");
578 ///FETCH0 = VTX0 (param buffer),
579 //FETCH1 = VTX1 (global buffer pool), FETCH2... = TEX
580 }
581 }
582 }
583
584 static void evergreen_bind_compute_sampler_states(
585 struct pipe_context *ctx_,
586 unsigned start_slot,
587 unsigned num_samplers,
588 void **samplers_)
589 {
590 struct compute_sampler_state ** samplers =
591 (struct compute_sampler_state **)samplers_;
592
593 for (int i = 0; i < num_samplers; i++) {
594 if (samplers[i]) {
595 /* XXX: Implement */
596 assert(!"Compute samplers not implemented.");
597 }
598 }
599 }
600
601 static void evergreen_set_global_binding(
602 struct pipe_context *ctx_, unsigned first, unsigned n,
603 struct pipe_resource **resources,
604 uint32_t **handles)
605 {
606 struct r600_context *ctx = (struct r600_context *)ctx_;
607 struct compute_memory_pool *pool = ctx->screen->global_pool;
608 struct r600_resource_global **buffers =
609 (struct r600_resource_global **)resources;
610
611 COMPUTE_DBG(ctx->screen, "*** evergreen_set_global_binding first = %u n = %u\n",
612 first, n);
613
614 if (!resources) {
615 /* XXX: Unset */
616 return;
617 }
618
619 compute_memory_finalize_pending(pool, ctx_);
620
621 for (int i = 0; i < n; i++)
622 {
623 assert(resources[i]->target == PIPE_BUFFER);
624 assert(resources[i]->bind & PIPE_BIND_GLOBAL);
625
626 *(handles[i]) = buffers[i]->chunk->start_in_dw * 4;
627 }
628
629 evergreen_set_rat(ctx->cs_shader_state.shader, 0, pool->bo, 0, pool->size_in_dw * 4);
630 evergreen_cs_set_vertex_buffer(ctx, 1, 0,
631 (struct pipe_resource*)pool->bo);
632 }
633
634 /**
635 * This function initializes all the compute specific registers that need to
636 * be initialized for each compute command stream. Registers that are common
637 * to both compute and 3D will be initialized at the beginning of each compute
638 * command stream by the start_cs_cmd atom. However, since the SET_CONTEXT_REG
639 * packet requires that the shader type bit be set, we must initialize all
640 * context registers needed for compute in this function. The registers
641 * intialized by the start_cs_cmd atom can be found in evereen_state.c in the
642 * functions evergreen_init_atom_start_cs or cayman_init_atom_start_cs depending
643 * on the GPU family.
644 */
645 void evergreen_init_atom_start_compute_cs(struct r600_context *ctx)
646 {
647 struct r600_command_buffer *cb = &ctx->start_compute_cs_cmd;
648 int num_threads;
649 int num_stack_entries;
650
651 /* since all required registers are initialised in the
652 * start_compute_cs_cmd atom, we can EMIT_EARLY here.
653 */
654 r600_init_command_buffer(cb, 256);
655 cb->pkt_flags = RADEON_CP_PACKET3_COMPUTE_MODE;
656
657 /* This must be first. */
658 r600_store_value(cb, PKT3(PKT3_CONTEXT_CONTROL, 1, 0));
659 r600_store_value(cb, 0x80000000);
660 r600_store_value(cb, 0x80000000);
661
662 /* We're setting config registers here. */
663 r600_store_value(cb, PKT3(PKT3_EVENT_WRITE, 0, 0));
664 r600_store_value(cb, EVENT_TYPE(EVENT_TYPE_CS_PARTIAL_FLUSH) | EVENT_INDEX(4));
665
666 switch (ctx->family) {
667 case CHIP_CEDAR:
668 default:
669 num_threads = 128;
670 num_stack_entries = 256;
671 break;
672 case CHIP_REDWOOD:
673 num_threads = 128;
674 num_stack_entries = 256;
675 break;
676 case CHIP_JUNIPER:
677 num_threads = 128;
678 num_stack_entries = 512;
679 break;
680 case CHIP_CYPRESS:
681 case CHIP_HEMLOCK:
682 num_threads = 128;
683 num_stack_entries = 512;
684 break;
685 case CHIP_PALM:
686 num_threads = 128;
687 num_stack_entries = 256;
688 break;
689 case CHIP_SUMO:
690 num_threads = 128;
691 num_stack_entries = 256;
692 break;
693 case CHIP_SUMO2:
694 num_threads = 128;
695 num_stack_entries = 512;
696 break;
697 case CHIP_BARTS:
698 num_threads = 128;
699 num_stack_entries = 512;
700 break;
701 case CHIP_TURKS:
702 num_threads = 128;
703 num_stack_entries = 256;
704 break;
705 case CHIP_CAICOS:
706 num_threads = 128;
707 num_stack_entries = 256;
708 break;
709 }
710
711 /* Config Registers */
712 if (ctx->chip_class < CAYMAN)
713 evergreen_init_common_regs(cb, ctx->chip_class, ctx->family,
714 ctx->screen->info.drm_minor);
715 else
716 cayman_init_common_regs(cb, ctx->chip_class, ctx->family,
717 ctx->screen->info.drm_minor);
718
719 /* The primitive type always needs to be POINTLIST for compute. */
720 r600_store_config_reg(cb, R_008958_VGT_PRIMITIVE_TYPE,
721 V_008958_DI_PT_POINTLIST);
722
723 if (ctx->chip_class < CAYMAN) {
724
725 /* These registers control which simds can be used by each stage.
726 * The default for these registers is 0xffffffff, which means
727 * all simds are available for each stage. It's possible we may
728 * want to play around with these in the future, but for now
729 * the default value is fine.
730 *
731 * R_008E20_SQ_STATIC_THREAD_MGMT1
732 * R_008E24_SQ_STATIC_THREAD_MGMT2
733 * R_008E28_SQ_STATIC_THREAD_MGMT3
734 */
735
736 /* XXX: We may need to adjust the thread and stack resouce
737 * values for 3D/compute interop */
738
739 r600_store_config_reg_seq(cb, R_008C18_SQ_THREAD_RESOURCE_MGMT_1, 5);
740
741 /* R_008C18_SQ_THREAD_RESOURCE_MGMT_1
742 * Set the number of threads used by the PS/VS/GS/ES stage to
743 * 0.
744 */
745 r600_store_value(cb, 0);
746
747 /* R_008C1C_SQ_THREAD_RESOURCE_MGMT_2
748 * Set the number of threads used by the CS (aka LS) stage to
749 * the maximum number of threads and set the number of threads
750 * for the HS stage to 0. */
751 r600_store_value(cb, S_008C1C_NUM_LS_THREADS(num_threads));
752
753 /* R_008C20_SQ_STACK_RESOURCE_MGMT_1
754 * Set the Control Flow stack entries to 0 for PS/VS stages */
755 r600_store_value(cb, 0);
756
757 /* R_008C24_SQ_STACK_RESOURCE_MGMT_2
758 * Set the Control Flow stack entries to 0 for GS/ES stages */
759 r600_store_value(cb, 0);
760
761 /* R_008C28_SQ_STACK_RESOURCE_MGMT_3
762 * Set the Contol Flow stack entries to 0 for the HS stage, and
763 * set it to the maximum value for the CS (aka LS) stage. */
764 r600_store_value(cb,
765 S_008C28_NUM_LS_STACK_ENTRIES(num_stack_entries));
766 }
767
768 /* Context Registers */
769
770 if (ctx->chip_class < CAYMAN) {
771 /* workaround for hw issues with dyn gpr - must set all limits
772 * to 240 instead of 0, 0x1e == 240 / 8
773 */
774 r600_store_context_reg(cb, R_028838_SQ_DYN_GPR_RESOURCE_LIMIT_1,
775 S_028838_PS_GPRS(0x1e) |
776 S_028838_VS_GPRS(0x1e) |
777 S_028838_GS_GPRS(0x1e) |
778 S_028838_ES_GPRS(0x1e) |
779 S_028838_HS_GPRS(0x1e) |
780 S_028838_LS_GPRS(0x1e));
781 }
782
783 /* XXX: Investigate setting bit 15, which is FAST_COMPUTE_MODE */
784 r600_store_context_reg(cb, R_028A40_VGT_GS_MODE,
785 S_028A40_COMPUTE_MODE(1) | S_028A40_PARTIAL_THD_AT_EOI(1));
786
787 r600_store_context_reg(cb, R_028B54_VGT_SHADER_STAGES_EN, 2/*CS_ON*/);
788
789 r600_store_context_reg(cb, R_0286E8_SPI_COMPUTE_INPUT_CNTL,
790 S_0286E8_TID_IN_GROUP_ENA
791 | S_0286E8_TGID_ENA
792 | S_0286E8_DISABLE_INDEX_PACK)
793 ;
794
795 /* The LOOP_CONST registers are an optimizations for loops that allows
796 * you to store the initial counter, increment value, and maximum
797 * counter value in a register so that hardware can calculate the
798 * correct number of iterations for the loop, so that you don't need
799 * to have the loop counter in your shader code. We don't currently use
800 * this optimization, so we must keep track of the counter in the
801 * shader and use a break instruction to exit loops. However, the
802 * hardware will still uses this register to determine when to exit a
803 * loop, so we need to initialize the counter to 0, set the increment
804 * value to 1 and the maximum counter value to the 4095 (0xfff) which
805 * is the maximum value allowed. This gives us a maximum of 4096
806 * iterations for our loops, but hopefully our break instruction will
807 * execute before some time before the 4096th iteration.
808 */
809 eg_store_loop_const(cb, R_03A200_SQ_LOOP_CONST_0 + (160 * 4), 0x1000FFF);
810 }
811
812 void evergreen_init_compute_state_functions(struct r600_context *ctx)
813 {
814 ctx->context.create_compute_state = evergreen_create_compute_state;
815 ctx->context.delete_compute_state = evergreen_delete_compute_state;
816 ctx->context.bind_compute_state = evergreen_bind_compute_state;
817 // ctx->context.create_sampler_view = evergreen_compute_create_sampler_view;
818 ctx->context.set_compute_resources = evergreen_set_compute_resources;
819 ctx->context.set_compute_sampler_views = evergreen_set_cs_sampler_view;
820 ctx->context.bind_compute_sampler_states = evergreen_bind_compute_sampler_states;
821 ctx->context.set_global_binding = evergreen_set_global_binding;
822 ctx->context.launch_grid = evergreen_launch_grid;
823
824 /* We always use at least one vertex buffer for parameters (id = 1)*/
825 ctx->cs_vertex_buffer_state.enabled_mask =
826 ctx->cs_vertex_buffer_state.dirty_mask = 0x2;
827 }
828
829
830 struct pipe_resource *r600_compute_global_buffer_create(
831 struct pipe_screen *screen,
832 const struct pipe_resource *templ)
833 {
834 struct r600_resource_global* result = NULL;
835 struct r600_screen* rscreen = NULL;
836 int size_in_dw = 0;
837
838 assert(templ->target == PIPE_BUFFER);
839 assert(templ->bind & PIPE_BIND_GLOBAL);
840 assert(templ->array_size == 1 || templ->array_size == 0);
841 assert(templ->depth0 == 1 || templ->depth0 == 0);
842 assert(templ->height0 == 1 || templ->height0 == 0);
843
844 result = (struct r600_resource_global*)
845 CALLOC(sizeof(struct r600_resource_global), 1);
846 rscreen = (struct r600_screen*)screen;
847
848 COMPUTE_DBG(rscreen, "*** r600_compute_global_buffer_create\n");
849 COMPUTE_DBG(rscreen, "width = %u array_size = %u\n", templ->width0,
850 templ->array_size);
851
852 result->base.b.vtbl = &r600_global_buffer_vtbl;
853 result->base.b.b.screen = screen;
854 result->base.b.b = *templ;
855 pipe_reference_init(&result->base.b.b.reference, 1);
856
857 size_in_dw = (templ->width0+3) / 4;
858
859 result->chunk = compute_memory_alloc(rscreen->global_pool, size_in_dw);
860
861 if (result->chunk == NULL)
862 {
863 free(result);
864 return NULL;
865 }
866
867 return &result->base.b.b;
868 }
869
870 void r600_compute_global_buffer_destroy(
871 struct pipe_screen *screen,
872 struct pipe_resource *res)
873 {
874 struct r600_resource_global* buffer = NULL;
875 struct r600_screen* rscreen = NULL;
876
877 assert(res->target == PIPE_BUFFER);
878 assert(res->bind & PIPE_BIND_GLOBAL);
879
880 buffer = (struct r600_resource_global*)res;
881 rscreen = (struct r600_screen*)screen;
882
883 compute_memory_free(rscreen->global_pool, buffer->chunk->id);
884
885 buffer->chunk = NULL;
886 free(res);
887 }
888
889 void *r600_compute_global_transfer_map(
890 struct pipe_context *ctx_,
891 struct pipe_resource *resource,
892 unsigned level,
893 unsigned usage,
894 const struct pipe_box *box,
895 struct pipe_transfer **ptransfer)
896 {
897 struct r600_context *rctx = (struct r600_context*)ctx_;
898 struct compute_memory_pool *pool = rctx->screen->global_pool;
899 struct pipe_transfer *transfer = util_slab_alloc(&rctx->pool_transfers);
900 struct r600_resource_global* buffer =
901 (struct r600_resource_global*)resource;
902 uint32_t* map;
903
904 compute_memory_finalize_pending(pool, ctx_);
905
906 assert(resource->target == PIPE_BUFFER);
907
908 COMPUTE_DBG(rctx->screen, "* r600_compute_global_get_transfer()\n"
909 "level = %u, usage = %u, box(x = %u, y = %u, z = %u "
910 "width = %u, height = %u, depth = %u)\n", level, usage,
911 box->x, box->y, box->z, box->width, box->height,
912 box->depth);
913
914 transfer->resource = resource;
915 transfer->level = level;
916 transfer->usage = usage;
917 transfer->box = *box;
918 transfer->stride = 0;
919 transfer->layer_stride = 0;
920
921 assert(transfer->resource->target == PIPE_BUFFER);
922 assert(transfer->resource->bind & PIPE_BIND_GLOBAL);
923 assert(transfer->box.x >= 0);
924 assert(transfer->box.y == 0);
925 assert(transfer->box.z == 0);
926
927 ///TODO: do it better, mapping is not possible if the pool is too big
928
929 COMPUTE_DBG(rctx->screen, "* r600_compute_global_transfer_map()\n");
930
931 if (!(map = r600_buffer_mmap_sync_with_rings(rctx, buffer->chunk->pool->bo, transfer->usage))) {
932 util_slab_free(&rctx->pool_transfers, transfer);
933 return NULL;
934 }
935
936 *ptransfer = transfer;
937
938 COMPUTE_DBG(rctx->screen, "Buffer: %p + %u (buffer offset in global memory) "
939 "+ %u (box.x)\n", map, buffer->chunk->start_in_dw, transfer->box.x);
940 return ((char*)(map + buffer->chunk->start_in_dw)) + transfer->box.x;
941 }
942
943 void r600_compute_global_transfer_unmap(
944 struct pipe_context *ctx_,
945 struct pipe_transfer* transfer)
946 {
947 struct r600_context *ctx = NULL;
948 struct r600_resource_global* buffer = NULL;
949
950 assert(transfer->resource->target == PIPE_BUFFER);
951 assert(transfer->resource->bind & PIPE_BIND_GLOBAL);
952
953 ctx = (struct r600_context *)ctx_;
954 buffer = (struct r600_resource_global*)transfer->resource;
955
956 COMPUTE_DBG(ctx->screen, "* r600_compute_global_transfer_unmap()\n");
957
958 ctx->ws->buffer_unmap(buffer->chunk->pool->bo->cs_buf);
959 util_slab_free(&ctx->pool_transfers, transfer);
960 }
961
962 void r600_compute_global_transfer_flush_region(
963 struct pipe_context *ctx_,
964 struct pipe_transfer *transfer,
965 const struct pipe_box *box)
966 {
967 assert(0 && "TODO");
968 }
969
970 void r600_compute_global_transfer_inline_write(
971 struct pipe_context *pipe,
972 struct pipe_resource *resource,
973 unsigned level,
974 unsigned usage,
975 const struct pipe_box *box,
976 const void *data,
977 unsigned stride,
978 unsigned layer_stride)
979 {
980 assert(0 && "TODO");
981 }