r600g: remove deprecated state management code
[mesa.git] / src / gallium / drivers / r600 / evergreen_compute.c
1 /*
2 * Copyright 2011 Adam Rak <adam.rak@streamnovation.com>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Adam Rak <adam.rak@streamnovation.com>
25 */
26
27 #include <stdio.h>
28 #include <errno.h>
29 #include "pipe/p_defines.h"
30 #include "pipe/p_state.h"
31 #include "pipe/p_context.h"
32 #include "util/u_blitter.h"
33 #include "util/u_double_list.h"
34 #include "util/u_transfer.h"
35 #include "util/u_surface.h"
36 #include "util/u_pack_color.h"
37 #include "util/u_memory.h"
38 #include "util/u_inlines.h"
39 #include "util/u_framebuffer.h"
40 #include "pipebuffer/pb_buffer.h"
41 #include "r600.h"
42 #include "evergreend.h"
43 #include "r600_resource.h"
44 #include "r600_shader.h"
45 #include "r600_pipe.h"
46 #include "r600_formats.h"
47 #include "evergreen_compute.h"
48 #include "r600_hw_context_priv.h"
49 #include "evergreen_compute_internal.h"
50 #include "compute_memory_pool.h"
51 #ifdef HAVE_OPENCL
52 #include "llvm_wrapper.h"
53 #endif
54
55 /**
56 RAT0 is for global binding write
57 VTX1 is for global binding read
58
59 for wrting images RAT1...
60 for reading images TEX2...
61 TEX2-RAT1 is paired
62
63 TEX2... consumes the same fetch resources, that VTX2... would consume
64
65 CONST0 and VTX0 is for parameters
66 CONST0 is binding smaller input parameter buffer, and for constant indexing,
67 also constant cached
68 VTX0 is for indirect/non-constant indexing, or if the input is bigger than
69 the constant cache can handle
70
71 RAT-s are limited to 12, so we can only bind at most 11 texture for writing
72 because we reserve RAT0 for global bindings. With byteaddressing enabled,
73 we should reserve another one too.=> 10 image binding for writing max.
74
75 from Nvidia OpenCL:
76 CL_DEVICE_MAX_READ_IMAGE_ARGS: 128
77 CL_DEVICE_MAX_WRITE_IMAGE_ARGS: 8
78
79 so 10 for writing is enough. 176 is the max for reading according to the docs
80
81 writable images should be listed first < 10, so their id corresponds to RAT(id+1)
82 writable images will consume TEX slots, VTX slots too because of linear indexing
83
84 */
85
86 static void evergreen_cs_set_vertex_buffer(
87 struct r600_context * rctx,
88 unsigned vb_index,
89 unsigned offset,
90 struct pipe_resource * buffer)
91 {
92 struct r600_vertexbuf_state *state = &rctx->cs_vertex_buffer_state;
93 struct pipe_vertex_buffer *vb = &state->vb[vb_index];
94 vb->stride = 1;
95 vb->buffer_offset = offset;
96 vb->buffer = buffer;
97 vb->user_buffer = NULL;
98
99 /* The vertex instructions in the compute shaders use the texture cache,
100 * so we need to invalidate it. */
101 rctx->flags |= R600_CONTEXT_INVAL_READ_CACHES;
102 state->enabled_mask |= 1 << vb_index;
103 state->dirty_mask |= 1 << vb_index;
104 state->atom.dirty = true;
105 }
106
107 static const struct u_resource_vtbl r600_global_buffer_vtbl =
108 {
109 u_default_resource_get_handle, /* get_handle */
110 r600_compute_global_buffer_destroy, /* resource_destroy */
111 r600_compute_global_transfer_map, /* transfer_map */
112 r600_compute_global_transfer_flush_region,/* transfer_flush_region */
113 r600_compute_global_transfer_unmap, /* transfer_unmap */
114 r600_compute_global_transfer_inline_write /* transfer_inline_write */
115 };
116
117
118 void *evergreen_create_compute_state(
119 struct pipe_context *ctx_,
120 const const struct pipe_compute_state *cso)
121 {
122 struct r600_context *ctx = (struct r600_context *)ctx_;
123 struct r600_pipe_compute *shader = CALLOC_STRUCT(r600_pipe_compute);
124
125 #ifdef HAVE_OPENCL
126 const struct pipe_llvm_program_header * header;
127 const unsigned char * code;
128 unsigned i;
129
130 COMPUTE_DBG(ctx->screen, "*** evergreen_create_compute_state\n");
131
132 header = cso->prog;
133 code = cso->prog + sizeof(struct pipe_llvm_program_header);
134 #endif
135
136 shader->ctx = (struct r600_context*)ctx;
137 shader->resources = (struct evergreen_compute_resource*)
138 CALLOC(sizeof(struct evergreen_compute_resource),
139 get_compute_resource_num());
140 shader->local_size = cso->req_local_mem; ///TODO: assert it
141 shader->private_size = cso->req_private_mem;
142 shader->input_size = cso->req_input_mem;
143
144 #ifdef HAVE_OPENCL
145 shader->num_kernels = llvm_get_num_kernels(code, header->num_bytes);
146 shader->kernels = CALLOC(sizeof(struct r600_kernel), shader->num_kernels);
147
148 for (i = 0; i < shader->num_kernels; i++) {
149 struct r600_kernel *kernel = &shader->kernels[i];
150 kernel->llvm_module = llvm_get_kernel_module(i, code,
151 header->num_bytes);
152 }
153 #endif
154 return shader;
155 }
156
157 void evergreen_delete_compute_state(struct pipe_context *ctx, void* state)
158 {
159 struct r600_pipe_compute *shader = (struct r600_pipe_compute *)state;
160
161 free(shader->resources);
162 free(shader);
163 }
164
165 static void evergreen_bind_compute_state(struct pipe_context *ctx_, void *state)
166 {
167 struct r600_context *ctx = (struct r600_context *)ctx_;
168
169 COMPUTE_DBG(ctx->screen, "*** evergreen_bind_compute_state\n");
170
171 ctx->cs_shader_state.shader = (struct r600_pipe_compute *)state;
172 }
173
174 /* The kernel parameters are stored a vtx buffer (ID=0), besides the explicit
175 * kernel parameters there are inplicit parameters that need to be stored
176 * in the vertex buffer as well. Here is how these parameters are organized in
177 * the buffer:
178 *
179 * DWORDS 0-2: Number of work groups in each dimension (x,y,z)
180 * DWORDS 3-5: Number of global work items in each dimension (x,y,z)
181 * DWORDS 6-8: Number of work items within each work group in each dimension
182 * (x,y,z)
183 * DWORDS 9+ : Kernel parameters
184 */
185 void evergreen_compute_upload_input(
186 struct pipe_context *ctx_,
187 const uint *block_layout,
188 const uint *grid_layout,
189 const void *input)
190 {
191 struct r600_context *ctx = (struct r600_context *)ctx_;
192 struct r600_pipe_compute *shader = ctx->cs_shader_state.shader;
193 int i;
194 unsigned kernel_parameters_offset_bytes = 36;
195 uint32_t * num_work_groups_start;
196 uint32_t * global_size_start;
197 uint32_t * local_size_start;
198 uint32_t * kernel_parameters_start;
199
200 if (shader->input_size == 0) {
201 return;
202 }
203
204 if (!shader->kernel_param) {
205 unsigned buffer_size = shader->input_size;
206
207 /* Add space for the grid dimensions */
208 buffer_size += kernel_parameters_offset_bytes * sizeof(uint);
209 shader->kernel_param = r600_compute_buffer_alloc_vram(
210 ctx->screen, buffer_size);
211 }
212
213 num_work_groups_start = r600_buffer_mmap_sync_with_rings(ctx, shader->kernel_param, PIPE_TRANSFER_WRITE);
214 global_size_start = num_work_groups_start + (3 * (sizeof(uint) /4));
215 local_size_start = global_size_start + (3 * (sizeof(uint)) / 4);
216 kernel_parameters_start = local_size_start + (3 * (sizeof(uint)) / 4);
217
218 /* Copy the work group size */
219 memcpy(num_work_groups_start, grid_layout, 3 * sizeof(uint));
220
221 /* Copy the global size */
222 for (i = 0; i < 3; i++) {
223 global_size_start[i] = grid_layout[i] * block_layout[i];
224 }
225
226 /* Copy the local dimensions */
227 memcpy(local_size_start, block_layout, 3 * sizeof(uint));
228
229 /* Copy the kernel inputs */
230 memcpy(kernel_parameters_start, input, shader->input_size);
231
232 for (i = 0; i < (kernel_parameters_offset_bytes / 4) +
233 (shader->input_size / 4); i++) {
234 COMPUTE_DBG(ctx->screen, "input %i : %i\n", i,
235 ((unsigned*)num_work_groups_start)[i]);
236 }
237
238 ctx->ws->buffer_unmap(shader->kernel_param->cs_buf);
239
240 ///ID=0 is reserved for the parameters
241 evergreen_cs_set_vertex_buffer(ctx, 0, 0,
242 (struct pipe_resource*)shader->kernel_param);
243 ///ID=0 is reserved for parameters
244 evergreen_set_const_cache(shader, 0, shader->kernel_param,
245 shader->input_size, 0);
246 }
247
248 static void evergreen_emit_direct_dispatch(
249 struct r600_context *rctx,
250 const uint *block_layout, const uint *grid_layout)
251 {
252 int i;
253 struct radeon_winsys_cs *cs = rctx->rings.gfx.cs;
254 unsigned num_waves;
255 unsigned num_pipes = rctx->screen->info.r600_max_pipes;
256 unsigned wave_divisor = (16 * num_pipes);
257 int group_size = 1;
258 int grid_size = 1;
259 /* XXX: Enable lds and get size from cs_shader_state */
260 unsigned lds_size = 0;
261
262 /* Calculate group_size/grid_size */
263 for (i = 0; i < 3; i++) {
264 group_size *= block_layout[i];
265 }
266
267 for (i = 0; i < 3; i++) {
268 grid_size *= grid_layout[i];
269 }
270
271 /* num_waves = ceil((tg_size.x * tg_size.y, tg_size.z) / (16 * num_pipes)) */
272 num_waves = (block_layout[0] * block_layout[1] * block_layout[2] +
273 wave_divisor - 1) / wave_divisor;
274
275 COMPUTE_DBG(rctx->screen, "Using %u pipes, there are %u wavefronts per thread block\n",
276 num_pipes, num_waves);
277
278 /* XXX: Partition the LDS between PS/CS. By default half (4096 dwords
279 * on Evergreen) oes to Pixel Shaders and half goes to Compute Shaders.
280 * We may need to allocat the entire LDS space for Compute Shaders.
281 *
282 * EG: R_008E2C_SQ_LDS_RESOURCE_MGMT := S_008E2C_NUM_LS_LDS(lds_dwords)
283 * CM: CM_R_0286FC_SPI_LDS_MGMT := S_0286FC_NUM_LS_LDS(lds_dwords)
284 */
285
286 r600_write_config_reg(cs, R_008970_VGT_NUM_INDICES, group_size);
287
288 r600_write_config_reg_seq(cs, R_00899C_VGT_COMPUTE_START_X, 3);
289 r600_write_value(cs, 0); /* R_00899C_VGT_COMPUTE_START_X */
290 r600_write_value(cs, 0); /* R_0089A0_VGT_COMPUTE_START_Y */
291 r600_write_value(cs, 0); /* R_0089A4_VGT_COMPUTE_START_Z */
292
293 r600_write_config_reg(cs, R_0089AC_VGT_COMPUTE_THREAD_GROUP_SIZE,
294 group_size);
295
296 r600_write_compute_context_reg_seq(cs, R_0286EC_SPI_COMPUTE_NUM_THREAD_X, 3);
297 r600_write_value(cs, block_layout[0]); /* R_0286EC_SPI_COMPUTE_NUM_THREAD_X */
298 r600_write_value(cs, block_layout[1]); /* R_0286F0_SPI_COMPUTE_NUM_THREAD_Y */
299 r600_write_value(cs, block_layout[2]); /* R_0286F4_SPI_COMPUTE_NUM_THREAD_Z */
300
301 r600_write_compute_context_reg(cs, CM_R_0288E8_SQ_LDS_ALLOC,
302 lds_size | (num_waves << 14));
303
304 /* Dispatch packet */
305 r600_write_value(cs, PKT3C(PKT3_DISPATCH_DIRECT, 3, 0));
306 r600_write_value(cs, grid_layout[0]);
307 r600_write_value(cs, grid_layout[1]);
308 r600_write_value(cs, grid_layout[2]);
309 /* VGT_DISPATCH_INITIATOR = COMPUTE_SHADER_EN */
310 r600_write_value(cs, 1);
311 }
312
313 static void compute_emit_cs(struct r600_context *ctx, const uint *block_layout,
314 const uint *grid_layout)
315 {
316 struct radeon_winsys_cs *cs = ctx->rings.gfx.cs;
317 unsigned flush_flags = 0;
318 int i;
319 struct r600_resource *onebo = NULL;
320 struct evergreen_compute_resource *resources =
321 ctx->cs_shader_state.shader->resources;
322
323 /* make sure that the gfx ring is only one active */
324 if (ctx->rings.dma.cs) {
325 ctx->rings.dma.flush(ctx, RADEON_FLUSH_ASYNC);
326 }
327
328 /* Initialize all the compute-related registers.
329 *
330 * See evergreen_init_atom_start_compute_cs() in this file for the list
331 * of registers initialized by the start_compute_cs_cmd atom.
332 */
333 r600_emit_command_buffer(cs, &ctx->start_compute_cs_cmd);
334
335 ctx->flags |= R600_CONTEXT_WAIT_3D_IDLE | R600_CONTEXT_FLUSH_AND_INV;
336 r600_flush_emit(ctx);
337
338 /* Emit colorbuffers. */
339 for (i = 0; i < ctx->framebuffer.state.nr_cbufs; i++) {
340 struct r600_surface *cb = (struct r600_surface*)ctx->framebuffer.state.cbufs[i];
341 unsigned reloc = r600_context_bo_reloc(ctx, &ctx->rings.gfx,
342 (struct r600_resource*)cb->base.texture,
343 RADEON_USAGE_READWRITE);
344
345 r600_write_compute_context_reg_seq(cs, R_028C60_CB_COLOR0_BASE + i * 0x3C, 7);
346 r600_write_value(cs, cb->cb_color_base); /* R_028C60_CB_COLOR0_BASE */
347 r600_write_value(cs, cb->cb_color_pitch); /* R_028C64_CB_COLOR0_PITCH */
348 r600_write_value(cs, cb->cb_color_slice); /* R_028C68_CB_COLOR0_SLICE */
349 r600_write_value(cs, cb->cb_color_view); /* R_028C6C_CB_COLOR0_VIEW */
350 r600_write_value(cs, cb->cb_color_info); /* R_028C70_CB_COLOR0_INFO */
351 r600_write_value(cs, cb->cb_color_attrib); /* R_028C74_CB_COLOR0_ATTRIB */
352 r600_write_value(cs, cb->cb_color_dim); /* R_028C78_CB_COLOR0_DIM */
353
354 r600_write_value(cs, PKT3(PKT3_NOP, 0, 0)); /* R_028C60_CB_COLOR0_BASE */
355 r600_write_value(cs, reloc);
356
357 if (!ctx->keep_tiling_flags) {
358 r600_write_value(cs, PKT3(PKT3_NOP, 0, 0)); /* R_028C70_CB_COLOR0_INFO */
359 r600_write_value(cs, reloc);
360 }
361
362 r600_write_value(cs, PKT3(PKT3_NOP, 0, 0)); /* R_028C74_CB_COLOR0_ATTRIB */
363 r600_write_value(cs, reloc);
364 }
365
366 /* Set CB_TARGET_MASK XXX: Use cb_misc_state */
367 r600_write_compute_context_reg(cs, R_028238_CB_TARGET_MASK,
368 ctx->compute_cb_target_mask);
369
370
371 /* Emit vertex buffer state */
372 ctx->cs_vertex_buffer_state.atom.num_dw = 12 * util_bitcount(ctx->cs_vertex_buffer_state.dirty_mask);
373 r600_emit_atom(ctx, &ctx->cs_vertex_buffer_state.atom);
374
375 /* Emit compute shader state */
376 r600_emit_atom(ctx, &ctx->cs_shader_state.atom);
377
378 for (i = 0; i < get_compute_resource_num(); i++) {
379 if (resources[i].enabled) {
380 int j;
381 COMPUTE_DBG(ctx->screen, "resnum: %i, cdw: %i\n", i, cs->cdw);
382
383 for (j = 0; j < resources[i].cs_end; j++) {
384 if (resources[i].do_reloc[j]) {
385 assert(resources[i].bo);
386 evergreen_emit_ctx_reloc(ctx,
387 resources[i].bo,
388 resources[i].usage);
389 }
390
391 cs->buf[cs->cdw++] = resources[i].cs[j];
392 }
393
394 if (resources[i].bo) {
395 onebo = resources[i].bo;
396 evergreen_emit_ctx_reloc(ctx,
397 resources[i].bo,
398 resources[i].usage);
399
400 ///special case for textures
401 if (resources[i].do_reloc
402 [resources[i].cs_end] == 2) {
403 evergreen_emit_ctx_reloc(ctx,
404 resources[i].bo,
405 resources[i].usage);
406 }
407 }
408 }
409 }
410
411 /* Emit dispatch state and dispatch packet */
412 evergreen_emit_direct_dispatch(ctx, block_layout, grid_layout);
413
414 /* XXX evergreen_flush_emit() hardcodes the CP_COHER_SIZE to 0xffffffff
415 */
416 ctx->flags |= R600_CONTEXT_INVAL_READ_CACHES;
417 r600_flush_emit(ctx);
418
419 #if 0
420 COMPUTE_DBG(ctx->screen, "cdw: %i\n", cs->cdw);
421 for (i = 0; i < cs->cdw; i++) {
422 COMPUTE_DBG(ctx->screen, "%4i : 0x%08X\n", i, ctx->cs->buf[i]);
423 }
424 #endif
425
426 flush_flags = RADEON_FLUSH_ASYNC | RADEON_FLUSH_COMPUTE;
427 if (ctx->keep_tiling_flags) {
428 flush_flags |= RADEON_FLUSH_KEEP_TILING_FLAGS;
429 }
430
431 ctx->ws->cs_flush(ctx->rings.gfx.cs, flush_flags);
432
433 ctx->flags = 0;
434
435 COMPUTE_DBG(ctx->screen, "shader started\n");
436
437 ctx->ws->buffer_wait(onebo->buf, 0);
438
439 COMPUTE_DBG(ctx->screen, "...\n");
440 }
441
442
443 /**
444 * Emit function for r600_cs_shader_state atom
445 */
446 void evergreen_emit_cs_shader(
447 struct r600_context *rctx,
448 struct r600_atom *atom)
449 {
450 struct r600_cs_shader_state *state =
451 (struct r600_cs_shader_state*)atom;
452 struct r600_pipe_compute *shader = state->shader;
453 struct r600_kernel *kernel = &shader->kernels[state->kernel_index];
454 struct radeon_winsys_cs *cs = rctx->rings.gfx.cs;
455 uint64_t va;
456
457 va = r600_resource_va(&rctx->screen->screen, &kernel->code_bo->b.b);
458
459 r600_write_compute_context_reg_seq(cs, R_0288D0_SQ_PGM_START_LS, 3);
460 r600_write_value(cs, va >> 8); /* R_0288D0_SQ_PGM_START_LS */
461 r600_write_value(cs, /* R_0288D4_SQ_PGM_RESOURCES_LS */
462 S_0288D4_NUM_GPRS(kernel->bc.ngpr)
463 | S_0288D4_STACK_SIZE(kernel->bc.nstack));
464 r600_write_value(cs, 0); /* R_0288D8_SQ_PGM_RESOURCES_LS_2 */
465
466 r600_write_value(cs, PKT3C(PKT3_NOP, 0, 0));
467 r600_write_value(cs, r600_context_bo_reloc(rctx, &rctx->rings.gfx,
468 kernel->code_bo, RADEON_USAGE_READ));
469
470 rctx->flags |= R600_CONTEXT_INVAL_READ_CACHES;
471 }
472
473 static void evergreen_launch_grid(
474 struct pipe_context *ctx_,
475 const uint *block_layout, const uint *grid_layout,
476 uint32_t pc, const void *input)
477 {
478 struct r600_context *ctx = (struct r600_context *)ctx_;
479
480 #ifdef HAVE_OPENCL
481 COMPUTE_DBG(ctx->screen, "*** evergreen_launch_grid: pc = %u\n", pc);
482
483 struct r600_pipe_compute *shader = ctx->cs_shader_state.shader;
484 if (!shader->kernels[pc].code_bo) {
485 void *p;
486 struct r600_kernel *kernel = &shader->kernels[pc];
487 r600_compute_shader_create(ctx_, kernel->llvm_module, &kernel->bc);
488 kernel->code_bo = r600_compute_buffer_alloc_vram(ctx->screen,
489 kernel->bc.ndw * 4);
490 p = r600_buffer_mmap_sync_with_rings(ctx, kernel->code_bo, PIPE_TRANSFER_WRITE);
491 memcpy(p, kernel->bc.bytecode, kernel->bc.ndw * 4);
492 ctx->ws->buffer_unmap(kernel->code_bo->cs_buf);
493 }
494 #endif
495
496 ctx->cs_shader_state.kernel_index = pc;
497 evergreen_compute_upload_input(ctx_, block_layout, grid_layout, input);
498 compute_emit_cs(ctx, block_layout, grid_layout);
499 }
500
501 static void evergreen_set_compute_resources(struct pipe_context * ctx_,
502 unsigned start, unsigned count,
503 struct pipe_surface ** surfaces)
504 {
505 struct r600_context *ctx = (struct r600_context *)ctx_;
506 struct r600_surface **resources = (struct r600_surface **)surfaces;
507
508 COMPUTE_DBG(ctx->screen, "*** evergreen_set_compute_resources: start = %u count = %u\n",
509 start, count);
510
511 for (int i = 0; i < count; i++) {
512 /* The First two vertex buffers are reserved for parameters and
513 * global buffers. */
514 unsigned vtx_id = 2 + i;
515 if (resources[i]) {
516 struct r600_resource_global *buffer =
517 (struct r600_resource_global*)
518 resources[i]->base.texture;
519 if (resources[i]->base.writable) {
520 assert(i+1 < 12);
521
522 evergreen_set_rat(ctx->cs_shader_state.shader, i+1,
523 (struct r600_resource *)resources[i]->base.texture,
524 buffer->chunk->start_in_dw*4,
525 resources[i]->base.texture->width0);
526 }
527
528 evergreen_cs_set_vertex_buffer(ctx, vtx_id,
529 buffer->chunk->start_in_dw * 4,
530 resources[i]->base.texture);
531 }
532 }
533 }
534
535 static void evergreen_set_cs_sampler_view(struct pipe_context *ctx_,
536 unsigned start_slot, unsigned count,
537 struct pipe_sampler_view **views)
538 {
539 struct r600_context *ctx = (struct r600_context *)ctx_;
540 struct r600_pipe_sampler_view **resource =
541 (struct r600_pipe_sampler_view **)views;
542
543 for (int i = 0; i < count; i++) {
544 if (resource[i]) {
545 assert(i+1 < 12);
546 ///FETCH0 = VTX0 (param buffer),
547 //FETCH1 = VTX1 (global buffer pool), FETCH2... = TEX
548 evergreen_set_tex_resource(ctx->cs_shader_state.shader, resource[i], i+2);
549 }
550 }
551 }
552
553 static void evergreen_bind_compute_sampler_states(
554 struct pipe_context *ctx_,
555 unsigned start_slot,
556 unsigned num_samplers,
557 void **samplers_)
558 {
559 struct r600_context *ctx = (struct r600_context *)ctx_;
560 struct compute_sampler_state ** samplers =
561 (struct compute_sampler_state **)samplers_;
562
563 for (int i = 0; i < num_samplers; i++) {
564 if (samplers[i]) {
565 evergreen_set_sampler_resource(
566 ctx->cs_shader_state.shader, samplers[i], i);
567 }
568 }
569 }
570
571 static void evergreen_set_global_binding(
572 struct pipe_context *ctx_, unsigned first, unsigned n,
573 struct pipe_resource **resources,
574 uint32_t **handles)
575 {
576 struct r600_context *ctx = (struct r600_context *)ctx_;
577 struct compute_memory_pool *pool = ctx->screen->global_pool;
578 struct r600_resource_global **buffers =
579 (struct r600_resource_global **)resources;
580
581 COMPUTE_DBG(ctx->screen, "*** evergreen_set_global_binding first = %u n = %u\n",
582 first, n);
583
584 if (!resources) {
585 /* XXX: Unset */
586 return;
587 }
588
589 compute_memory_finalize_pending(pool, ctx_);
590
591 for (int i = 0; i < n; i++)
592 {
593 assert(resources[i]->target == PIPE_BUFFER);
594 assert(resources[i]->bind & PIPE_BIND_GLOBAL);
595
596 *(handles[i]) = buffers[i]->chunk->start_in_dw * 4;
597 }
598
599 evergreen_set_rat(ctx->cs_shader_state.shader, 0, pool->bo, 0, pool->size_in_dw * 4);
600 evergreen_cs_set_vertex_buffer(ctx, 1, 0,
601 (struct pipe_resource*)pool->bo);
602 }
603
604 /**
605 * This function initializes all the compute specific registers that need to
606 * be initialized for each compute command stream. Registers that are common
607 * to both compute and 3D will be initialized at the beginning of each compute
608 * command stream by the start_cs_cmd atom. However, since the SET_CONTEXT_REG
609 * packet requires that the shader type bit be set, we must initialize all
610 * context registers needed for compute in this function. The registers
611 * intialized by the start_cs_cmd atom can be found in evereen_state.c in the
612 * functions evergreen_init_atom_start_cs or cayman_init_atom_start_cs depending
613 * on the GPU family.
614 */
615 void evergreen_init_atom_start_compute_cs(struct r600_context *ctx)
616 {
617 struct r600_command_buffer *cb = &ctx->start_compute_cs_cmd;
618 int num_threads;
619 int num_stack_entries;
620
621 /* since all required registers are initialised in the
622 * start_compute_cs_cmd atom, we can EMIT_EARLY here.
623 */
624 r600_init_command_buffer(cb, 256);
625 cb->pkt_flags = RADEON_CP_PACKET3_COMPUTE_MODE;
626
627 /* This must be first. */
628 r600_store_value(cb, PKT3(PKT3_CONTEXT_CONTROL, 1, 0));
629 r600_store_value(cb, 0x80000000);
630 r600_store_value(cb, 0x80000000);
631
632 /* We're setting config registers here. */
633 r600_store_value(cb, PKT3(PKT3_EVENT_WRITE, 0, 0));
634 r600_store_value(cb, EVENT_TYPE(EVENT_TYPE_CS_PARTIAL_FLUSH) | EVENT_INDEX(4));
635
636 switch (ctx->family) {
637 case CHIP_CEDAR:
638 default:
639 num_threads = 128;
640 num_stack_entries = 256;
641 break;
642 case CHIP_REDWOOD:
643 num_threads = 128;
644 num_stack_entries = 256;
645 break;
646 case CHIP_JUNIPER:
647 num_threads = 128;
648 num_stack_entries = 512;
649 break;
650 case CHIP_CYPRESS:
651 case CHIP_HEMLOCK:
652 num_threads = 128;
653 num_stack_entries = 512;
654 break;
655 case CHIP_PALM:
656 num_threads = 128;
657 num_stack_entries = 256;
658 break;
659 case CHIP_SUMO:
660 num_threads = 128;
661 num_stack_entries = 256;
662 break;
663 case CHIP_SUMO2:
664 num_threads = 128;
665 num_stack_entries = 512;
666 break;
667 case CHIP_BARTS:
668 num_threads = 128;
669 num_stack_entries = 512;
670 break;
671 case CHIP_TURKS:
672 num_threads = 128;
673 num_stack_entries = 256;
674 break;
675 case CHIP_CAICOS:
676 num_threads = 128;
677 num_stack_entries = 256;
678 break;
679 }
680
681 /* Config Registers */
682 if (ctx->chip_class < CAYMAN)
683 evergreen_init_common_regs(cb, ctx->chip_class, ctx->family,
684 ctx->screen->info.drm_minor);
685 else
686 cayman_init_common_regs(cb, ctx->chip_class, ctx->family,
687 ctx->screen->info.drm_minor);
688
689 /* The primitive type always needs to be POINTLIST for compute. */
690 r600_store_config_reg(cb, R_008958_VGT_PRIMITIVE_TYPE,
691 V_008958_DI_PT_POINTLIST);
692
693 if (ctx->chip_class < CAYMAN) {
694
695 /* These registers control which simds can be used by each stage.
696 * The default for these registers is 0xffffffff, which means
697 * all simds are available for each stage. It's possible we may
698 * want to play around with these in the future, but for now
699 * the default value is fine.
700 *
701 * R_008E20_SQ_STATIC_THREAD_MGMT1
702 * R_008E24_SQ_STATIC_THREAD_MGMT2
703 * R_008E28_SQ_STATIC_THREAD_MGMT3
704 */
705
706 /* XXX: We may need to adjust the thread and stack resouce
707 * values for 3D/compute interop */
708
709 r600_store_config_reg_seq(cb, R_008C18_SQ_THREAD_RESOURCE_MGMT_1, 5);
710
711 /* R_008C18_SQ_THREAD_RESOURCE_MGMT_1
712 * Set the number of threads used by the PS/VS/GS/ES stage to
713 * 0.
714 */
715 r600_store_value(cb, 0);
716
717 /* R_008C1C_SQ_THREAD_RESOURCE_MGMT_2
718 * Set the number of threads used by the CS (aka LS) stage to
719 * the maximum number of threads and set the number of threads
720 * for the HS stage to 0. */
721 r600_store_value(cb, S_008C1C_NUM_LS_THREADS(num_threads));
722
723 /* R_008C20_SQ_STACK_RESOURCE_MGMT_1
724 * Set the Control Flow stack entries to 0 for PS/VS stages */
725 r600_store_value(cb, 0);
726
727 /* R_008C24_SQ_STACK_RESOURCE_MGMT_2
728 * Set the Control Flow stack entries to 0 for GS/ES stages */
729 r600_store_value(cb, 0);
730
731 /* R_008C28_SQ_STACK_RESOURCE_MGMT_3
732 * Set the Contol Flow stack entries to 0 for the HS stage, and
733 * set it to the maximum value for the CS (aka LS) stage. */
734 r600_store_value(cb,
735 S_008C28_NUM_LS_STACK_ENTRIES(num_stack_entries));
736 }
737
738 /* Context Registers */
739
740 if (ctx->chip_class < CAYMAN) {
741 /* workaround for hw issues with dyn gpr - must set all limits
742 * to 240 instead of 0, 0x1e == 240 / 8
743 */
744 r600_store_context_reg(cb, R_028838_SQ_DYN_GPR_RESOURCE_LIMIT_1,
745 S_028838_PS_GPRS(0x1e) |
746 S_028838_VS_GPRS(0x1e) |
747 S_028838_GS_GPRS(0x1e) |
748 S_028838_ES_GPRS(0x1e) |
749 S_028838_HS_GPRS(0x1e) |
750 S_028838_LS_GPRS(0x1e));
751 }
752
753 /* XXX: Investigate setting bit 15, which is FAST_COMPUTE_MODE */
754 r600_store_context_reg(cb, R_028A40_VGT_GS_MODE,
755 S_028A40_COMPUTE_MODE(1) | S_028A40_PARTIAL_THD_AT_EOI(1));
756
757 r600_store_context_reg(cb, R_028B54_VGT_SHADER_STAGES_EN, 2/*CS_ON*/);
758
759 r600_store_context_reg(cb, R_0286E8_SPI_COMPUTE_INPUT_CNTL,
760 S_0286E8_TID_IN_GROUP_ENA
761 | S_0286E8_TGID_ENA
762 | S_0286E8_DISABLE_INDEX_PACK)
763 ;
764
765 /* The LOOP_CONST registers are an optimizations for loops that allows
766 * you to store the initial counter, increment value, and maximum
767 * counter value in a register so that hardware can calculate the
768 * correct number of iterations for the loop, so that you don't need
769 * to have the loop counter in your shader code. We don't currently use
770 * this optimization, so we must keep track of the counter in the
771 * shader and use a break instruction to exit loops. However, the
772 * hardware will still uses this register to determine when to exit a
773 * loop, so we need to initialize the counter to 0, set the increment
774 * value to 1 and the maximum counter value to the 4095 (0xfff) which
775 * is the maximum value allowed. This gives us a maximum of 4096
776 * iterations for our loops, but hopefully our break instruction will
777 * execute before some time before the 4096th iteration.
778 */
779 eg_store_loop_const(cb, R_03A200_SQ_LOOP_CONST_0 + (160 * 4), 0x1000FFF);
780 }
781
782 void evergreen_init_compute_state_functions(struct r600_context *ctx)
783 {
784 ctx->context.create_compute_state = evergreen_create_compute_state;
785 ctx->context.delete_compute_state = evergreen_delete_compute_state;
786 ctx->context.bind_compute_state = evergreen_bind_compute_state;
787 // ctx->context.create_sampler_view = evergreen_compute_create_sampler_view;
788 ctx->context.set_compute_resources = evergreen_set_compute_resources;
789 ctx->context.set_compute_sampler_views = evergreen_set_cs_sampler_view;
790 ctx->context.bind_compute_sampler_states = evergreen_bind_compute_sampler_states;
791 ctx->context.set_global_binding = evergreen_set_global_binding;
792 ctx->context.launch_grid = evergreen_launch_grid;
793
794 /* We always use at least two vertex buffers for compute, one for
795 * parameters and one for global memory */
796 ctx->cs_vertex_buffer_state.enabled_mask =
797 ctx->cs_vertex_buffer_state.dirty_mask = 1 | 2;
798 }
799
800
801 struct pipe_resource *r600_compute_global_buffer_create(
802 struct pipe_screen *screen,
803 const struct pipe_resource *templ)
804 {
805 struct r600_resource_global* result = NULL;
806 struct r600_screen* rscreen = NULL;
807 int size_in_dw = 0;
808
809 assert(templ->target == PIPE_BUFFER);
810 assert(templ->bind & PIPE_BIND_GLOBAL);
811 assert(templ->array_size == 1 || templ->array_size == 0);
812 assert(templ->depth0 == 1 || templ->depth0 == 0);
813 assert(templ->height0 == 1 || templ->height0 == 0);
814
815 result = (struct r600_resource_global*)
816 CALLOC(sizeof(struct r600_resource_global), 1);
817 rscreen = (struct r600_screen*)screen;
818
819 COMPUTE_DBG(rscreen, "*** r600_compute_global_buffer_create\n");
820 COMPUTE_DBG(rscreen, "width = %u array_size = %u\n", templ->width0,
821 templ->array_size);
822
823 result->base.b.vtbl = &r600_global_buffer_vtbl;
824 result->base.b.b.screen = screen;
825 result->base.b.b = *templ;
826 pipe_reference_init(&result->base.b.b.reference, 1);
827
828 size_in_dw = (templ->width0+3) / 4;
829
830 result->chunk = compute_memory_alloc(rscreen->global_pool, size_in_dw);
831
832 if (result->chunk == NULL)
833 {
834 free(result);
835 return NULL;
836 }
837
838 return &result->base.b.b;
839 }
840
841 void r600_compute_global_buffer_destroy(
842 struct pipe_screen *screen,
843 struct pipe_resource *res)
844 {
845 struct r600_resource_global* buffer = NULL;
846 struct r600_screen* rscreen = NULL;
847
848 assert(res->target == PIPE_BUFFER);
849 assert(res->bind & PIPE_BIND_GLOBAL);
850
851 buffer = (struct r600_resource_global*)res;
852 rscreen = (struct r600_screen*)screen;
853
854 compute_memory_free(rscreen->global_pool, buffer->chunk->id);
855
856 buffer->chunk = NULL;
857 free(res);
858 }
859
860 void *r600_compute_global_transfer_map(
861 struct pipe_context *ctx_,
862 struct pipe_resource *resource,
863 unsigned level,
864 unsigned usage,
865 const struct pipe_box *box,
866 struct pipe_transfer **ptransfer)
867 {
868 struct r600_context *rctx = (struct r600_context*)ctx_;
869 struct compute_memory_pool *pool = rctx->screen->global_pool;
870 struct pipe_transfer *transfer = util_slab_alloc(&rctx->pool_transfers);
871 struct r600_resource_global* buffer =
872 (struct r600_resource_global*)resource;
873 uint32_t* map;
874
875 compute_memory_finalize_pending(pool, ctx_);
876
877 assert(resource->target == PIPE_BUFFER);
878
879 COMPUTE_DBG(rctx->screen, "* r600_compute_global_get_transfer()\n"
880 "level = %u, usage = %u, box(x = %u, y = %u, z = %u "
881 "width = %u, height = %u, depth = %u)\n", level, usage,
882 box->x, box->y, box->z, box->width, box->height,
883 box->depth);
884
885 transfer->resource = resource;
886 transfer->level = level;
887 transfer->usage = usage;
888 transfer->box = *box;
889 transfer->stride = 0;
890 transfer->layer_stride = 0;
891
892 assert(transfer->resource->target == PIPE_BUFFER);
893 assert(transfer->resource->bind & PIPE_BIND_GLOBAL);
894 assert(transfer->box.x >= 0);
895 assert(transfer->box.y == 0);
896 assert(transfer->box.z == 0);
897
898 ///TODO: do it better, mapping is not possible if the pool is too big
899
900 COMPUTE_DBG(rctx->screen, "* r600_compute_global_transfer_map()\n");
901
902 if (!(map = r600_buffer_mmap_sync_with_rings(rctx, buffer->chunk->pool->bo, transfer->usage))) {
903 util_slab_free(&rctx->pool_transfers, transfer);
904 return NULL;
905 }
906
907 *ptransfer = transfer;
908
909 COMPUTE_DBG(rctx->screen, "Buffer: %p + %u (buffer offset in global memory) "
910 "+ %u (box.x)\n", map, buffer->chunk->start_in_dw, transfer->box.x);
911 return ((char*)(map + buffer->chunk->start_in_dw)) + transfer->box.x;
912 }
913
914 void r600_compute_global_transfer_unmap(
915 struct pipe_context *ctx_,
916 struct pipe_transfer* transfer)
917 {
918 struct r600_context *ctx = NULL;
919 struct r600_resource_global* buffer = NULL;
920
921 assert(transfer->resource->target == PIPE_BUFFER);
922 assert(transfer->resource->bind & PIPE_BIND_GLOBAL);
923
924 ctx = (struct r600_context *)ctx_;
925 buffer = (struct r600_resource_global*)transfer->resource;
926
927 COMPUTE_DBG(ctx->screen, "* r600_compute_global_transfer_unmap()\n");
928
929 ctx->ws->buffer_unmap(buffer->chunk->pool->bo->cs_buf);
930 util_slab_free(&ctx->pool_transfers, transfer);
931 }
932
933 void r600_compute_global_transfer_flush_region(
934 struct pipe_context *ctx_,
935 struct pipe_transfer *transfer,
936 const struct pipe_box *box)
937 {
938 assert(0 && "TODO");
939 }
940
941 void r600_compute_global_transfer_inline_write(
942 struct pipe_context *pipe,
943 struct pipe_resource *resource,
944 unsigned level,
945 unsigned usage,
946 const struct pipe_box *box,
947 const void *data,
948 unsigned stride,
949 unsigned layer_stride)
950 {
951 assert(0 && "TODO");
952 }