r600g/compute: Don't use radeon_winsys::buffer_wait() after dispatching a kernel
[mesa.git] / src / gallium / drivers / r600 / evergreen_compute.c
1 /*
2 * Copyright 2011 Adam Rak <adam.rak@streamnovation.com>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Adam Rak <adam.rak@streamnovation.com>
25 */
26
27 #include <stdio.h>
28 #include <errno.h>
29 #include "pipe/p_defines.h"
30 #include "pipe/p_state.h"
31 #include "pipe/p_context.h"
32 #include "util/u_blitter.h"
33 #include "util/u_double_list.h"
34 #include "util/u_transfer.h"
35 #include "util/u_surface.h"
36 #include "util/u_pack_color.h"
37 #include "util/u_memory.h"
38 #include "util/u_inlines.h"
39 #include "util/u_framebuffer.h"
40 #include "pipebuffer/pb_buffer.h"
41 #include "evergreend.h"
42 #include "r600_resource.h"
43 #include "r600_shader.h"
44 #include "r600_pipe.h"
45 #include "r600_formats.h"
46 #include "evergreen_compute.h"
47 #include "evergreen_compute_internal.h"
48 #include "compute_memory_pool.h"
49 #ifdef HAVE_OPENCL
50 #include "radeon_llvm_util.h"
51 #endif
52
53 /**
54 RAT0 is for global binding write
55 VTX1 is for global binding read
56
57 for wrting images RAT1...
58 for reading images TEX2...
59 TEX2-RAT1 is paired
60
61 TEX2... consumes the same fetch resources, that VTX2... would consume
62
63 CONST0 and VTX0 is for parameters
64 CONST0 is binding smaller input parameter buffer, and for constant indexing,
65 also constant cached
66 VTX0 is for indirect/non-constant indexing, or if the input is bigger than
67 the constant cache can handle
68
69 RAT-s are limited to 12, so we can only bind at most 11 texture for writing
70 because we reserve RAT0 for global bindings. With byteaddressing enabled,
71 we should reserve another one too.=> 10 image binding for writing max.
72
73 from Nvidia OpenCL:
74 CL_DEVICE_MAX_READ_IMAGE_ARGS: 128
75 CL_DEVICE_MAX_WRITE_IMAGE_ARGS: 8
76
77 so 10 for writing is enough. 176 is the max for reading according to the docs
78
79 writable images should be listed first < 10, so their id corresponds to RAT(id+1)
80 writable images will consume TEX slots, VTX slots too because of linear indexing
81
82 */
83
84 static void evergreen_cs_set_vertex_buffer(
85 struct r600_context * rctx,
86 unsigned vb_index,
87 unsigned offset,
88 struct pipe_resource * buffer)
89 {
90 struct r600_vertexbuf_state *state = &rctx->cs_vertex_buffer_state;
91 struct pipe_vertex_buffer *vb = &state->vb[vb_index];
92 vb->stride = 1;
93 vb->buffer_offset = offset;
94 vb->buffer = buffer;
95 vb->user_buffer = NULL;
96
97 /* The vertex instructions in the compute shaders use the texture cache,
98 * so we need to invalidate it. */
99 rctx->flags |= R600_CONTEXT_INVAL_READ_CACHES;
100 state->enabled_mask |= 1 << vb_index;
101 state->dirty_mask |= 1 << vb_index;
102 state->atom.dirty = true;
103 }
104
105 static const struct u_resource_vtbl r600_global_buffer_vtbl =
106 {
107 u_default_resource_get_handle, /* get_handle */
108 r600_compute_global_buffer_destroy, /* resource_destroy */
109 r600_compute_global_transfer_map, /* transfer_map */
110 r600_compute_global_transfer_flush_region,/* transfer_flush_region */
111 r600_compute_global_transfer_unmap, /* transfer_unmap */
112 r600_compute_global_transfer_inline_write /* transfer_inline_write */
113 };
114
115
116 void *evergreen_create_compute_state(
117 struct pipe_context *ctx_,
118 const const struct pipe_compute_state *cso)
119 {
120 struct r600_context *ctx = (struct r600_context *)ctx_;
121 struct r600_pipe_compute *shader = CALLOC_STRUCT(r600_pipe_compute);
122
123 #ifdef HAVE_OPENCL
124 const struct pipe_llvm_program_header * header;
125 const unsigned char * code;
126 unsigned i;
127
128 COMPUTE_DBG(ctx->screen, "*** evergreen_create_compute_state\n");
129
130 header = cso->prog;
131 code = cso->prog + sizeof(struct pipe_llvm_program_header);
132 #endif
133
134 shader->ctx = (struct r600_context*)ctx;
135 shader->resources = (struct evergreen_compute_resource*)
136 CALLOC(sizeof(struct evergreen_compute_resource),
137 get_compute_resource_num());
138 shader->local_size = cso->req_local_mem; ///TODO: assert it
139 shader->private_size = cso->req_private_mem;
140 shader->input_size = cso->req_input_mem;
141
142 #ifdef HAVE_OPENCL
143 shader->num_kernels = radeon_llvm_get_num_kernels(code, header->num_bytes);
144 shader->kernels = CALLOC(sizeof(struct r600_kernel), shader->num_kernels);
145
146 for (i = 0; i < shader->num_kernels; i++) {
147 struct r600_kernel *kernel = &shader->kernels[i];
148 kernel->llvm_module = radeon_llvm_get_kernel_module(i, code,
149 header->num_bytes);
150 }
151 #endif
152 return shader;
153 }
154
155 void evergreen_delete_compute_state(struct pipe_context *ctx, void* state)
156 {
157 struct r600_pipe_compute *shader = (struct r600_pipe_compute *)state;
158
159 free(shader->resources);
160 free(shader);
161 }
162
163 static void evergreen_bind_compute_state(struct pipe_context *ctx_, void *state)
164 {
165 struct r600_context *ctx = (struct r600_context *)ctx_;
166
167 COMPUTE_DBG(ctx->screen, "*** evergreen_bind_compute_state\n");
168
169 ctx->cs_shader_state.shader = (struct r600_pipe_compute *)state;
170 }
171
172 /* The kernel parameters are stored a vtx buffer (ID=0), besides the explicit
173 * kernel parameters there are inplicit parameters that need to be stored
174 * in the vertex buffer as well. Here is how these parameters are organized in
175 * the buffer:
176 *
177 * DWORDS 0-2: Number of work groups in each dimension (x,y,z)
178 * DWORDS 3-5: Number of global work items in each dimension (x,y,z)
179 * DWORDS 6-8: Number of work items within each work group in each dimension
180 * (x,y,z)
181 * DWORDS 9+ : Kernel parameters
182 */
183 void evergreen_compute_upload_input(
184 struct pipe_context *ctx_,
185 const uint *block_layout,
186 const uint *grid_layout,
187 const void *input)
188 {
189 struct r600_context *ctx = (struct r600_context *)ctx_;
190 struct r600_pipe_compute *shader = ctx->cs_shader_state.shader;
191 int i;
192 unsigned kernel_parameters_offset_bytes = 36;
193 uint32_t * num_work_groups_start;
194 uint32_t * global_size_start;
195 uint32_t * local_size_start;
196 uint32_t * kernel_parameters_start;
197
198 if (shader->input_size == 0) {
199 return;
200 }
201
202 if (!shader->kernel_param) {
203 unsigned buffer_size = shader->input_size;
204
205 /* Add space for the grid dimensions */
206 buffer_size += kernel_parameters_offset_bytes;
207 shader->kernel_param = r600_compute_buffer_alloc_vram(
208 ctx->screen, buffer_size);
209 }
210
211 num_work_groups_start = r600_buffer_mmap_sync_with_rings(ctx, shader->kernel_param, PIPE_TRANSFER_WRITE);
212 global_size_start = num_work_groups_start + (3 * (sizeof(uint) /4));
213 local_size_start = global_size_start + (3 * (sizeof(uint)) / 4);
214 kernel_parameters_start = local_size_start + (3 * (sizeof(uint)) / 4);
215
216 /* Copy the work group size */
217 memcpy(num_work_groups_start, grid_layout, 3 * sizeof(uint));
218
219 /* Copy the global size */
220 for (i = 0; i < 3; i++) {
221 global_size_start[i] = grid_layout[i] * block_layout[i];
222 }
223
224 /* Copy the local dimensions */
225 memcpy(local_size_start, block_layout, 3 * sizeof(uint));
226
227 /* Copy the kernel inputs */
228 memcpy(kernel_parameters_start, input, shader->input_size);
229
230 for (i = 0; i < (kernel_parameters_offset_bytes / 4) +
231 (shader->input_size / 4); i++) {
232 COMPUTE_DBG(ctx->screen, "input %i : %i\n", i,
233 ((unsigned*)num_work_groups_start)[i]);
234 }
235
236 ctx->ws->buffer_unmap(shader->kernel_param->cs_buf);
237
238 ///ID=0 is reserved for the parameters
239 evergreen_cs_set_vertex_buffer(ctx, 0, 0,
240 (struct pipe_resource*)shader->kernel_param);
241 ///ID=0 is reserved for parameters
242 evergreen_set_const_cache(shader, 0, shader->kernel_param,
243 shader->input_size, 0);
244 }
245
246 static void evergreen_emit_direct_dispatch(
247 struct r600_context *rctx,
248 const uint *block_layout, const uint *grid_layout)
249 {
250 int i;
251 struct radeon_winsys_cs *cs = rctx->rings.gfx.cs;
252 unsigned num_waves;
253 unsigned num_pipes = rctx->screen->info.r600_max_pipes;
254 unsigned wave_divisor = (16 * num_pipes);
255 int group_size = 1;
256 int grid_size = 1;
257 /* XXX: Enable lds and get size from cs_shader_state */
258 unsigned lds_size = 0;
259
260 /* Calculate group_size/grid_size */
261 for (i = 0; i < 3; i++) {
262 group_size *= block_layout[i];
263 }
264
265 for (i = 0; i < 3; i++) {
266 grid_size *= grid_layout[i];
267 }
268
269 /* num_waves = ceil((tg_size.x * tg_size.y, tg_size.z) / (16 * num_pipes)) */
270 num_waves = (block_layout[0] * block_layout[1] * block_layout[2] +
271 wave_divisor - 1) / wave_divisor;
272
273 COMPUTE_DBG(rctx->screen, "Using %u pipes, there are %u wavefronts per thread block\n",
274 num_pipes, num_waves);
275
276 /* XXX: Partition the LDS between PS/CS. By default half (4096 dwords
277 * on Evergreen) oes to Pixel Shaders and half goes to Compute Shaders.
278 * We may need to allocat the entire LDS space for Compute Shaders.
279 *
280 * EG: R_008E2C_SQ_LDS_RESOURCE_MGMT := S_008E2C_NUM_LS_LDS(lds_dwords)
281 * CM: CM_R_0286FC_SPI_LDS_MGMT := S_0286FC_NUM_LS_LDS(lds_dwords)
282 */
283
284 r600_write_config_reg(cs, R_008970_VGT_NUM_INDICES, group_size);
285
286 r600_write_config_reg_seq(cs, R_00899C_VGT_COMPUTE_START_X, 3);
287 r600_write_value(cs, 0); /* R_00899C_VGT_COMPUTE_START_X */
288 r600_write_value(cs, 0); /* R_0089A0_VGT_COMPUTE_START_Y */
289 r600_write_value(cs, 0); /* R_0089A4_VGT_COMPUTE_START_Z */
290
291 r600_write_config_reg(cs, R_0089AC_VGT_COMPUTE_THREAD_GROUP_SIZE,
292 group_size);
293
294 r600_write_compute_context_reg_seq(cs, R_0286EC_SPI_COMPUTE_NUM_THREAD_X, 3);
295 r600_write_value(cs, block_layout[0]); /* R_0286EC_SPI_COMPUTE_NUM_THREAD_X */
296 r600_write_value(cs, block_layout[1]); /* R_0286F0_SPI_COMPUTE_NUM_THREAD_Y */
297 r600_write_value(cs, block_layout[2]); /* R_0286F4_SPI_COMPUTE_NUM_THREAD_Z */
298
299 r600_write_compute_context_reg(cs, CM_R_0288E8_SQ_LDS_ALLOC,
300 lds_size | (num_waves << 14));
301
302 /* Dispatch packet */
303 r600_write_value(cs, PKT3C(PKT3_DISPATCH_DIRECT, 3, 0));
304 r600_write_value(cs, grid_layout[0]);
305 r600_write_value(cs, grid_layout[1]);
306 r600_write_value(cs, grid_layout[2]);
307 /* VGT_DISPATCH_INITIATOR = COMPUTE_SHADER_EN */
308 r600_write_value(cs, 1);
309 }
310
311 static void compute_emit_cs(struct r600_context *ctx, const uint *block_layout,
312 const uint *grid_layout)
313 {
314 struct radeon_winsys_cs *cs = ctx->rings.gfx.cs;
315 unsigned flush_flags = 0;
316 int i;
317 struct evergreen_compute_resource *resources =
318 ctx->cs_shader_state.shader->resources;
319
320 /* make sure that the gfx ring is only one active */
321 if (ctx->rings.dma.cs) {
322 ctx->rings.dma.flush(ctx, RADEON_FLUSH_ASYNC);
323 }
324
325 /* Initialize all the compute-related registers.
326 *
327 * See evergreen_init_atom_start_compute_cs() in this file for the list
328 * of registers initialized by the start_compute_cs_cmd atom.
329 */
330 r600_emit_command_buffer(cs, &ctx->start_compute_cs_cmd);
331
332 ctx->flags |= R600_CONTEXT_WAIT_3D_IDLE | R600_CONTEXT_FLUSH_AND_INV;
333 r600_flush_emit(ctx);
334
335 /* Emit colorbuffers. */
336 for (i = 0; i < ctx->framebuffer.state.nr_cbufs; i++) {
337 struct r600_surface *cb = (struct r600_surface*)ctx->framebuffer.state.cbufs[i];
338 unsigned reloc = r600_context_bo_reloc(ctx, &ctx->rings.gfx,
339 (struct r600_resource*)cb->base.texture,
340 RADEON_USAGE_READWRITE);
341
342 r600_write_compute_context_reg_seq(cs, R_028C60_CB_COLOR0_BASE + i * 0x3C, 7);
343 r600_write_value(cs, cb->cb_color_base); /* R_028C60_CB_COLOR0_BASE */
344 r600_write_value(cs, cb->cb_color_pitch); /* R_028C64_CB_COLOR0_PITCH */
345 r600_write_value(cs, cb->cb_color_slice); /* R_028C68_CB_COLOR0_SLICE */
346 r600_write_value(cs, cb->cb_color_view); /* R_028C6C_CB_COLOR0_VIEW */
347 r600_write_value(cs, cb->cb_color_info); /* R_028C70_CB_COLOR0_INFO */
348 r600_write_value(cs, cb->cb_color_attrib); /* R_028C74_CB_COLOR0_ATTRIB */
349 r600_write_value(cs, cb->cb_color_dim); /* R_028C78_CB_COLOR0_DIM */
350
351 r600_write_value(cs, PKT3(PKT3_NOP, 0, 0)); /* R_028C60_CB_COLOR0_BASE */
352 r600_write_value(cs, reloc);
353
354 if (!ctx->keep_tiling_flags) {
355 r600_write_value(cs, PKT3(PKT3_NOP, 0, 0)); /* R_028C70_CB_COLOR0_INFO */
356 r600_write_value(cs, reloc);
357 }
358
359 r600_write_value(cs, PKT3(PKT3_NOP, 0, 0)); /* R_028C74_CB_COLOR0_ATTRIB */
360 r600_write_value(cs, reloc);
361 }
362
363 /* Set CB_TARGET_MASK XXX: Use cb_misc_state */
364 r600_write_compute_context_reg(cs, R_028238_CB_TARGET_MASK,
365 ctx->compute_cb_target_mask);
366
367
368 /* Emit vertex buffer state */
369 ctx->cs_vertex_buffer_state.atom.num_dw = 12 * util_bitcount(ctx->cs_vertex_buffer_state.dirty_mask);
370 r600_emit_atom(ctx, &ctx->cs_vertex_buffer_state.atom);
371
372 /* Emit compute shader state */
373 r600_emit_atom(ctx, &ctx->cs_shader_state.atom);
374
375 for (i = 0; i < get_compute_resource_num(); i++) {
376 if (resources[i].enabled) {
377 int j;
378 COMPUTE_DBG(ctx->screen, "resnum: %i, cdw: %i\n", i, cs->cdw);
379
380 for (j = 0; j < resources[i].cs_end; j++) {
381 if (resources[i].do_reloc[j]) {
382 assert(resources[i].bo);
383 evergreen_emit_ctx_reloc(ctx,
384 resources[i].bo,
385 resources[i].usage);
386 }
387
388 cs->buf[cs->cdw++] = resources[i].cs[j];
389 }
390
391 if (resources[i].bo) {
392 evergreen_emit_ctx_reloc(ctx,
393 resources[i].bo,
394 resources[i].usage);
395
396 ///special case for textures
397 if (resources[i].do_reloc
398 [resources[i].cs_end] == 2) {
399 evergreen_emit_ctx_reloc(ctx,
400 resources[i].bo,
401 resources[i].usage);
402 }
403 }
404 }
405 }
406
407 /* Emit dispatch state and dispatch packet */
408 evergreen_emit_direct_dispatch(ctx, block_layout, grid_layout);
409
410 /* XXX evergreen_flush_emit() hardcodes the CP_COHER_SIZE to 0xffffffff
411 */
412 ctx->flags |= R600_CONTEXT_INVAL_READ_CACHES;
413 r600_flush_emit(ctx);
414
415 #if 0
416 COMPUTE_DBG(ctx->screen, "cdw: %i\n", cs->cdw);
417 for (i = 0; i < cs->cdw; i++) {
418 COMPUTE_DBG(ctx->screen, "%4i : 0x%08X\n", i, ctx->cs->buf[i]);
419 }
420 #endif
421
422 flush_flags = RADEON_FLUSH_ASYNC | RADEON_FLUSH_COMPUTE;
423 if (ctx->keep_tiling_flags) {
424 flush_flags |= RADEON_FLUSH_KEEP_TILING_FLAGS;
425 }
426
427 ctx->ws->cs_flush(ctx->rings.gfx.cs, flush_flags);
428
429 ctx->flags = 0;
430
431 COMPUTE_DBG(ctx->screen, "shader started\n");
432 }
433
434
435 /**
436 * Emit function for r600_cs_shader_state atom
437 */
438 void evergreen_emit_cs_shader(
439 struct r600_context *rctx,
440 struct r600_atom *atom)
441 {
442 struct r600_cs_shader_state *state =
443 (struct r600_cs_shader_state*)atom;
444 struct r600_pipe_compute *shader = state->shader;
445 struct r600_kernel *kernel = &shader->kernels[state->kernel_index];
446 struct radeon_winsys_cs *cs = rctx->rings.gfx.cs;
447 uint64_t va;
448
449 va = r600_resource_va(&rctx->screen->screen, &kernel->code_bo->b.b);
450
451 r600_write_compute_context_reg_seq(cs, R_0288D0_SQ_PGM_START_LS, 3);
452 r600_write_value(cs, va >> 8); /* R_0288D0_SQ_PGM_START_LS */
453 r600_write_value(cs, /* R_0288D4_SQ_PGM_RESOURCES_LS */
454 S_0288D4_NUM_GPRS(kernel->bc.ngpr)
455 | S_0288D4_STACK_SIZE(kernel->bc.nstack));
456 r600_write_value(cs, 0); /* R_0288D8_SQ_PGM_RESOURCES_LS_2 */
457
458 r600_write_value(cs, PKT3C(PKT3_NOP, 0, 0));
459 r600_write_value(cs, r600_context_bo_reloc(rctx, &rctx->rings.gfx,
460 kernel->code_bo, RADEON_USAGE_READ));
461
462 rctx->flags |= R600_CONTEXT_INVAL_READ_CACHES;
463 }
464
465 static void evergreen_launch_grid(
466 struct pipe_context *ctx_,
467 const uint *block_layout, const uint *grid_layout,
468 uint32_t pc, const void *input)
469 {
470 struct r600_context *ctx = (struct r600_context *)ctx_;
471
472 #ifdef HAVE_OPENCL
473 COMPUTE_DBG(ctx->screen, "*** evergreen_launch_grid: pc = %u\n", pc);
474
475 struct r600_pipe_compute *shader = ctx->cs_shader_state.shader;
476 if (!shader->kernels[pc].code_bo) {
477 void *p;
478 struct r600_kernel *kernel = &shader->kernels[pc];
479 r600_compute_shader_create(ctx_, kernel->llvm_module, &kernel->bc);
480 kernel->code_bo = r600_compute_buffer_alloc_vram(ctx->screen,
481 kernel->bc.ndw * 4);
482 p = r600_buffer_mmap_sync_with_rings(ctx, kernel->code_bo, PIPE_TRANSFER_WRITE);
483 memcpy(p, kernel->bc.bytecode, kernel->bc.ndw * 4);
484 ctx->ws->buffer_unmap(kernel->code_bo->cs_buf);
485 }
486 #endif
487
488 ctx->cs_shader_state.kernel_index = pc;
489 evergreen_compute_upload_input(ctx_, block_layout, grid_layout, input);
490 compute_emit_cs(ctx, block_layout, grid_layout);
491 }
492
493 static void evergreen_set_compute_resources(struct pipe_context * ctx_,
494 unsigned start, unsigned count,
495 struct pipe_surface ** surfaces)
496 {
497 struct r600_context *ctx = (struct r600_context *)ctx_;
498 struct r600_surface **resources = (struct r600_surface **)surfaces;
499
500 COMPUTE_DBG(ctx->screen, "*** evergreen_set_compute_resources: start = %u count = %u\n",
501 start, count);
502
503 for (int i = 0; i < count; i++) {
504 /* The First two vertex buffers are reserved for parameters and
505 * global buffers. */
506 unsigned vtx_id = 2 + i;
507 if (resources[i]) {
508 struct r600_resource_global *buffer =
509 (struct r600_resource_global*)
510 resources[i]->base.texture;
511 if (resources[i]->base.writable) {
512 assert(i+1 < 12);
513
514 evergreen_set_rat(ctx->cs_shader_state.shader, i+1,
515 (struct r600_resource *)resources[i]->base.texture,
516 buffer->chunk->start_in_dw*4,
517 resources[i]->base.texture->width0);
518 }
519
520 evergreen_cs_set_vertex_buffer(ctx, vtx_id,
521 buffer->chunk->start_in_dw * 4,
522 resources[i]->base.texture);
523 }
524 }
525 }
526
527 static void evergreen_set_cs_sampler_view(struct pipe_context *ctx_,
528 unsigned start_slot, unsigned count,
529 struct pipe_sampler_view **views)
530 {
531 struct r600_context *ctx = (struct r600_context *)ctx_;
532 struct r600_pipe_sampler_view **resource =
533 (struct r600_pipe_sampler_view **)views;
534
535 for (int i = 0; i < count; i++) {
536 if (resource[i]) {
537 assert(i+1 < 12);
538 ///FETCH0 = VTX0 (param buffer),
539 //FETCH1 = VTX1 (global buffer pool), FETCH2... = TEX
540 evergreen_set_tex_resource(ctx->cs_shader_state.shader, resource[i], i+2);
541 }
542 }
543 }
544
545 static void evergreen_bind_compute_sampler_states(
546 struct pipe_context *ctx_,
547 unsigned start_slot,
548 unsigned num_samplers,
549 void **samplers_)
550 {
551 struct r600_context *ctx = (struct r600_context *)ctx_;
552 struct compute_sampler_state ** samplers =
553 (struct compute_sampler_state **)samplers_;
554
555 for (int i = 0; i < num_samplers; i++) {
556 if (samplers[i]) {
557 evergreen_set_sampler_resource(
558 ctx->cs_shader_state.shader, samplers[i], i);
559 }
560 }
561 }
562
563 static void evergreen_set_global_binding(
564 struct pipe_context *ctx_, unsigned first, unsigned n,
565 struct pipe_resource **resources,
566 uint32_t **handles)
567 {
568 struct r600_context *ctx = (struct r600_context *)ctx_;
569 struct compute_memory_pool *pool = ctx->screen->global_pool;
570 struct r600_resource_global **buffers =
571 (struct r600_resource_global **)resources;
572
573 COMPUTE_DBG(ctx->screen, "*** evergreen_set_global_binding first = %u n = %u\n",
574 first, n);
575
576 if (!resources) {
577 /* XXX: Unset */
578 return;
579 }
580
581 compute_memory_finalize_pending(pool, ctx_);
582
583 for (int i = 0; i < n; i++)
584 {
585 assert(resources[i]->target == PIPE_BUFFER);
586 assert(resources[i]->bind & PIPE_BIND_GLOBAL);
587
588 *(handles[i]) = buffers[i]->chunk->start_in_dw * 4;
589 }
590
591 evergreen_set_rat(ctx->cs_shader_state.shader, 0, pool->bo, 0, pool->size_in_dw * 4);
592 evergreen_cs_set_vertex_buffer(ctx, 1, 0,
593 (struct pipe_resource*)pool->bo);
594 }
595
596 /**
597 * This function initializes all the compute specific registers that need to
598 * be initialized for each compute command stream. Registers that are common
599 * to both compute and 3D will be initialized at the beginning of each compute
600 * command stream by the start_cs_cmd atom. However, since the SET_CONTEXT_REG
601 * packet requires that the shader type bit be set, we must initialize all
602 * context registers needed for compute in this function. The registers
603 * intialized by the start_cs_cmd atom can be found in evereen_state.c in the
604 * functions evergreen_init_atom_start_cs or cayman_init_atom_start_cs depending
605 * on the GPU family.
606 */
607 void evergreen_init_atom_start_compute_cs(struct r600_context *ctx)
608 {
609 struct r600_command_buffer *cb = &ctx->start_compute_cs_cmd;
610 int num_threads;
611 int num_stack_entries;
612
613 /* since all required registers are initialised in the
614 * start_compute_cs_cmd atom, we can EMIT_EARLY here.
615 */
616 r600_init_command_buffer(cb, 256);
617 cb->pkt_flags = RADEON_CP_PACKET3_COMPUTE_MODE;
618
619 /* This must be first. */
620 r600_store_value(cb, PKT3(PKT3_CONTEXT_CONTROL, 1, 0));
621 r600_store_value(cb, 0x80000000);
622 r600_store_value(cb, 0x80000000);
623
624 /* We're setting config registers here. */
625 r600_store_value(cb, PKT3(PKT3_EVENT_WRITE, 0, 0));
626 r600_store_value(cb, EVENT_TYPE(EVENT_TYPE_CS_PARTIAL_FLUSH) | EVENT_INDEX(4));
627
628 switch (ctx->family) {
629 case CHIP_CEDAR:
630 default:
631 num_threads = 128;
632 num_stack_entries = 256;
633 break;
634 case CHIP_REDWOOD:
635 num_threads = 128;
636 num_stack_entries = 256;
637 break;
638 case CHIP_JUNIPER:
639 num_threads = 128;
640 num_stack_entries = 512;
641 break;
642 case CHIP_CYPRESS:
643 case CHIP_HEMLOCK:
644 num_threads = 128;
645 num_stack_entries = 512;
646 break;
647 case CHIP_PALM:
648 num_threads = 128;
649 num_stack_entries = 256;
650 break;
651 case CHIP_SUMO:
652 num_threads = 128;
653 num_stack_entries = 256;
654 break;
655 case CHIP_SUMO2:
656 num_threads = 128;
657 num_stack_entries = 512;
658 break;
659 case CHIP_BARTS:
660 num_threads = 128;
661 num_stack_entries = 512;
662 break;
663 case CHIP_TURKS:
664 num_threads = 128;
665 num_stack_entries = 256;
666 break;
667 case CHIP_CAICOS:
668 num_threads = 128;
669 num_stack_entries = 256;
670 break;
671 }
672
673 /* Config Registers */
674 if (ctx->chip_class < CAYMAN)
675 evergreen_init_common_regs(cb, ctx->chip_class, ctx->family,
676 ctx->screen->info.drm_minor);
677 else
678 cayman_init_common_regs(cb, ctx->chip_class, ctx->family,
679 ctx->screen->info.drm_minor);
680
681 /* The primitive type always needs to be POINTLIST for compute. */
682 r600_store_config_reg(cb, R_008958_VGT_PRIMITIVE_TYPE,
683 V_008958_DI_PT_POINTLIST);
684
685 if (ctx->chip_class < CAYMAN) {
686
687 /* These registers control which simds can be used by each stage.
688 * The default for these registers is 0xffffffff, which means
689 * all simds are available for each stage. It's possible we may
690 * want to play around with these in the future, but for now
691 * the default value is fine.
692 *
693 * R_008E20_SQ_STATIC_THREAD_MGMT1
694 * R_008E24_SQ_STATIC_THREAD_MGMT2
695 * R_008E28_SQ_STATIC_THREAD_MGMT3
696 */
697
698 /* XXX: We may need to adjust the thread and stack resouce
699 * values for 3D/compute interop */
700
701 r600_store_config_reg_seq(cb, R_008C18_SQ_THREAD_RESOURCE_MGMT_1, 5);
702
703 /* R_008C18_SQ_THREAD_RESOURCE_MGMT_1
704 * Set the number of threads used by the PS/VS/GS/ES stage to
705 * 0.
706 */
707 r600_store_value(cb, 0);
708
709 /* R_008C1C_SQ_THREAD_RESOURCE_MGMT_2
710 * Set the number of threads used by the CS (aka LS) stage to
711 * the maximum number of threads and set the number of threads
712 * for the HS stage to 0. */
713 r600_store_value(cb, S_008C1C_NUM_LS_THREADS(num_threads));
714
715 /* R_008C20_SQ_STACK_RESOURCE_MGMT_1
716 * Set the Control Flow stack entries to 0 for PS/VS stages */
717 r600_store_value(cb, 0);
718
719 /* R_008C24_SQ_STACK_RESOURCE_MGMT_2
720 * Set the Control Flow stack entries to 0 for GS/ES stages */
721 r600_store_value(cb, 0);
722
723 /* R_008C28_SQ_STACK_RESOURCE_MGMT_3
724 * Set the Contol Flow stack entries to 0 for the HS stage, and
725 * set it to the maximum value for the CS (aka LS) stage. */
726 r600_store_value(cb,
727 S_008C28_NUM_LS_STACK_ENTRIES(num_stack_entries));
728 }
729
730 /* Context Registers */
731
732 if (ctx->chip_class < CAYMAN) {
733 /* workaround for hw issues with dyn gpr - must set all limits
734 * to 240 instead of 0, 0x1e == 240 / 8
735 */
736 r600_store_context_reg(cb, R_028838_SQ_DYN_GPR_RESOURCE_LIMIT_1,
737 S_028838_PS_GPRS(0x1e) |
738 S_028838_VS_GPRS(0x1e) |
739 S_028838_GS_GPRS(0x1e) |
740 S_028838_ES_GPRS(0x1e) |
741 S_028838_HS_GPRS(0x1e) |
742 S_028838_LS_GPRS(0x1e));
743 }
744
745 /* XXX: Investigate setting bit 15, which is FAST_COMPUTE_MODE */
746 r600_store_context_reg(cb, R_028A40_VGT_GS_MODE,
747 S_028A40_COMPUTE_MODE(1) | S_028A40_PARTIAL_THD_AT_EOI(1));
748
749 r600_store_context_reg(cb, R_028B54_VGT_SHADER_STAGES_EN, 2/*CS_ON*/);
750
751 r600_store_context_reg(cb, R_0286E8_SPI_COMPUTE_INPUT_CNTL,
752 S_0286E8_TID_IN_GROUP_ENA
753 | S_0286E8_TGID_ENA
754 | S_0286E8_DISABLE_INDEX_PACK)
755 ;
756
757 /* The LOOP_CONST registers are an optimizations for loops that allows
758 * you to store the initial counter, increment value, and maximum
759 * counter value in a register so that hardware can calculate the
760 * correct number of iterations for the loop, so that you don't need
761 * to have the loop counter in your shader code. We don't currently use
762 * this optimization, so we must keep track of the counter in the
763 * shader and use a break instruction to exit loops. However, the
764 * hardware will still uses this register to determine when to exit a
765 * loop, so we need to initialize the counter to 0, set the increment
766 * value to 1 and the maximum counter value to the 4095 (0xfff) which
767 * is the maximum value allowed. This gives us a maximum of 4096
768 * iterations for our loops, but hopefully our break instruction will
769 * execute before some time before the 4096th iteration.
770 */
771 eg_store_loop_const(cb, R_03A200_SQ_LOOP_CONST_0 + (160 * 4), 0x1000FFF);
772 }
773
774 void evergreen_init_compute_state_functions(struct r600_context *ctx)
775 {
776 ctx->context.create_compute_state = evergreen_create_compute_state;
777 ctx->context.delete_compute_state = evergreen_delete_compute_state;
778 ctx->context.bind_compute_state = evergreen_bind_compute_state;
779 // ctx->context.create_sampler_view = evergreen_compute_create_sampler_view;
780 ctx->context.set_compute_resources = evergreen_set_compute_resources;
781 ctx->context.set_compute_sampler_views = evergreen_set_cs_sampler_view;
782 ctx->context.bind_compute_sampler_states = evergreen_bind_compute_sampler_states;
783 ctx->context.set_global_binding = evergreen_set_global_binding;
784 ctx->context.launch_grid = evergreen_launch_grid;
785
786 /* We always use at least two vertex buffers for compute, one for
787 * parameters and one for global memory */
788 ctx->cs_vertex_buffer_state.enabled_mask =
789 ctx->cs_vertex_buffer_state.dirty_mask = 1 | 2;
790 }
791
792
793 struct pipe_resource *r600_compute_global_buffer_create(
794 struct pipe_screen *screen,
795 const struct pipe_resource *templ)
796 {
797 struct r600_resource_global* result = NULL;
798 struct r600_screen* rscreen = NULL;
799 int size_in_dw = 0;
800
801 assert(templ->target == PIPE_BUFFER);
802 assert(templ->bind & PIPE_BIND_GLOBAL);
803 assert(templ->array_size == 1 || templ->array_size == 0);
804 assert(templ->depth0 == 1 || templ->depth0 == 0);
805 assert(templ->height0 == 1 || templ->height0 == 0);
806
807 result = (struct r600_resource_global*)
808 CALLOC(sizeof(struct r600_resource_global), 1);
809 rscreen = (struct r600_screen*)screen;
810
811 COMPUTE_DBG(rscreen, "*** r600_compute_global_buffer_create\n");
812 COMPUTE_DBG(rscreen, "width = %u array_size = %u\n", templ->width0,
813 templ->array_size);
814
815 result->base.b.vtbl = &r600_global_buffer_vtbl;
816 result->base.b.b.screen = screen;
817 result->base.b.b = *templ;
818 pipe_reference_init(&result->base.b.b.reference, 1);
819
820 size_in_dw = (templ->width0+3) / 4;
821
822 result->chunk = compute_memory_alloc(rscreen->global_pool, size_in_dw);
823
824 if (result->chunk == NULL)
825 {
826 free(result);
827 return NULL;
828 }
829
830 return &result->base.b.b;
831 }
832
833 void r600_compute_global_buffer_destroy(
834 struct pipe_screen *screen,
835 struct pipe_resource *res)
836 {
837 struct r600_resource_global* buffer = NULL;
838 struct r600_screen* rscreen = NULL;
839
840 assert(res->target == PIPE_BUFFER);
841 assert(res->bind & PIPE_BIND_GLOBAL);
842
843 buffer = (struct r600_resource_global*)res;
844 rscreen = (struct r600_screen*)screen;
845
846 compute_memory_free(rscreen->global_pool, buffer->chunk->id);
847
848 buffer->chunk = NULL;
849 free(res);
850 }
851
852 void *r600_compute_global_transfer_map(
853 struct pipe_context *ctx_,
854 struct pipe_resource *resource,
855 unsigned level,
856 unsigned usage,
857 const struct pipe_box *box,
858 struct pipe_transfer **ptransfer)
859 {
860 struct r600_context *rctx = (struct r600_context*)ctx_;
861 struct compute_memory_pool *pool = rctx->screen->global_pool;
862 struct pipe_transfer *transfer = util_slab_alloc(&rctx->pool_transfers);
863 struct r600_resource_global* buffer =
864 (struct r600_resource_global*)resource;
865 uint32_t* map;
866
867 compute_memory_finalize_pending(pool, ctx_);
868
869 assert(resource->target == PIPE_BUFFER);
870
871 COMPUTE_DBG(rctx->screen, "* r600_compute_global_get_transfer()\n"
872 "level = %u, usage = %u, box(x = %u, y = %u, z = %u "
873 "width = %u, height = %u, depth = %u)\n", level, usage,
874 box->x, box->y, box->z, box->width, box->height,
875 box->depth);
876
877 transfer->resource = resource;
878 transfer->level = level;
879 transfer->usage = usage;
880 transfer->box = *box;
881 transfer->stride = 0;
882 transfer->layer_stride = 0;
883
884 assert(transfer->resource->target == PIPE_BUFFER);
885 assert(transfer->resource->bind & PIPE_BIND_GLOBAL);
886 assert(transfer->box.x >= 0);
887 assert(transfer->box.y == 0);
888 assert(transfer->box.z == 0);
889
890 ///TODO: do it better, mapping is not possible if the pool is too big
891
892 COMPUTE_DBG(rctx->screen, "* r600_compute_global_transfer_map()\n");
893
894 if (!(map = r600_buffer_mmap_sync_with_rings(rctx, buffer->chunk->pool->bo, transfer->usage))) {
895 util_slab_free(&rctx->pool_transfers, transfer);
896 return NULL;
897 }
898
899 *ptransfer = transfer;
900
901 COMPUTE_DBG(rctx->screen, "Buffer: %p + %u (buffer offset in global memory) "
902 "+ %u (box.x)\n", map, buffer->chunk->start_in_dw, transfer->box.x);
903 return ((char*)(map + buffer->chunk->start_in_dw)) + transfer->box.x;
904 }
905
906 void r600_compute_global_transfer_unmap(
907 struct pipe_context *ctx_,
908 struct pipe_transfer* transfer)
909 {
910 struct r600_context *ctx = NULL;
911 struct r600_resource_global* buffer = NULL;
912
913 assert(transfer->resource->target == PIPE_BUFFER);
914 assert(transfer->resource->bind & PIPE_BIND_GLOBAL);
915
916 ctx = (struct r600_context *)ctx_;
917 buffer = (struct r600_resource_global*)transfer->resource;
918
919 COMPUTE_DBG(ctx->screen, "* r600_compute_global_transfer_unmap()\n");
920
921 ctx->ws->buffer_unmap(buffer->chunk->pool->bo->cs_buf);
922 util_slab_free(&ctx->pool_transfers, transfer);
923 }
924
925 void r600_compute_global_transfer_flush_region(
926 struct pipe_context *ctx_,
927 struct pipe_transfer *transfer,
928 const struct pipe_box *box)
929 {
930 assert(0 && "TODO");
931 }
932
933 void r600_compute_global_transfer_inline_write(
934 struct pipe_context *pipe,
935 struct pipe_resource *resource,
936 unsigned level,
937 unsigned usage,
938 const struct pipe_box *box,
939 const void *data,
940 unsigned stride,
941 unsigned layer_stride)
942 {
943 assert(0 && "TODO");
944 }