r600g/compute: always CONTEXT_CONTROL packet at start of CS
[mesa.git] / src / gallium / drivers / r600 / evergreen_compute.c
1 /*
2 * Copyright 2011 Adam Rak <adam.rak@streamnovation.com>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Adam Rak <adam.rak@streamnovation.com>
25 */
26
27 #include <stdio.h>
28 #include <errno.h>
29 #include "pipe/p_defines.h"
30 #include "pipe/p_state.h"
31 #include "pipe/p_context.h"
32 #include "util/u_blitter.h"
33 #include "util/u_double_list.h"
34 #include "util/u_transfer.h"
35 #include "util/u_surface.h"
36 #include "util/u_pack_color.h"
37 #include "util/u_memory.h"
38 #include "util/u_inlines.h"
39 #include "util/u_framebuffer.h"
40 #include "pipebuffer/pb_buffer.h"
41 #include "r600.h"
42 #include "evergreend.h"
43 #include "r600_resource.h"
44 #include "r600_shader.h"
45 #include "r600_pipe.h"
46 #include "r600_formats.h"
47 #include "evergreen_compute.h"
48 #include "r600_hw_context_priv.h"
49 #include "evergreen_compute_internal.h"
50 #include "compute_memory_pool.h"
51 #ifdef HAVE_OPENCL
52 #include "llvm_wrapper.h"
53 #endif
54
55 /**
56 RAT0 is for global binding write
57 VTX1 is for global binding read
58
59 for wrting images RAT1...
60 for reading images TEX2...
61 TEX2-RAT1 is paired
62
63 TEX2... consumes the same fetch resources, that VTX2... would consume
64
65 CONST0 and VTX0 is for parameters
66 CONST0 is binding smaller input parameter buffer, and for constant indexing,
67 also constant cached
68 VTX0 is for indirect/non-constant indexing, or if the input is bigger than
69 the constant cache can handle
70
71 RAT-s are limited to 12, so we can only bind at most 11 texture for writing
72 because we reserve RAT0 for global bindings. With byteaddressing enabled,
73 we should reserve another one too.=> 10 image binding for writing max.
74
75 from Nvidia OpenCL:
76 CL_DEVICE_MAX_READ_IMAGE_ARGS: 128
77 CL_DEVICE_MAX_WRITE_IMAGE_ARGS: 8
78
79 so 10 for writing is enough. 176 is the max for reading according to the docs
80
81 writable images should be listed first < 10, so their id corresponds to RAT(id+1)
82 writable images will consume TEX slots, VTX slots too because of linear indexing
83
84 */
85
86 static void evergreen_cs_set_vertex_buffer(
87 struct r600_context * rctx,
88 unsigned vb_index,
89 unsigned offset,
90 struct pipe_resource * buffer)
91 {
92 struct r600_vertexbuf_state *state = &rctx->cs_vertex_buffer_state;
93 struct pipe_vertex_buffer *vb = &state->vb[vb_index];
94 vb->stride = 1;
95 vb->buffer_offset = offset;
96 vb->buffer = buffer;
97 vb->user_buffer = NULL;
98
99 /* The vertex instructions in the compute shaders use the texture cache,
100 * so we need to invalidate it. */
101 rctx->flags |= R600_CONTEXT_TEX_FLUSH;
102 state->enabled_mask |= 1 << vb_index;
103 state->dirty_mask |= 1 << vb_index;
104 state->atom.dirty = true;
105 }
106
107 static const struct u_resource_vtbl r600_global_buffer_vtbl =
108 {
109 u_default_resource_get_handle, /* get_handle */
110 r600_compute_global_buffer_destroy, /* resource_destroy */
111 r600_compute_global_transfer_map, /* transfer_map */
112 r600_compute_global_transfer_flush_region,/* transfer_flush_region */
113 r600_compute_global_transfer_unmap, /* transfer_unmap */
114 r600_compute_global_transfer_inline_write /* transfer_inline_write */
115 };
116
117
118 void *evergreen_create_compute_state(
119 struct pipe_context *ctx_,
120 const const struct pipe_compute_state *cso)
121 {
122 struct r600_context *ctx = (struct r600_context *)ctx_;
123 struct r600_pipe_compute *shader = CALLOC_STRUCT(r600_pipe_compute);
124
125 #ifdef HAVE_OPENCL
126 const struct pipe_llvm_program_header * header;
127 const unsigned char * code;
128 unsigned i;
129
130 COMPUTE_DBG("*** evergreen_create_compute_state\n");
131
132 header = cso->prog;
133 code = cso->prog + sizeof(struct pipe_llvm_program_header);
134 #endif
135
136 shader->ctx = (struct r600_context*)ctx;
137 shader->resources = (struct evergreen_compute_resource*)
138 CALLOC(sizeof(struct evergreen_compute_resource),
139 get_compute_resource_num());
140 shader->local_size = cso->req_local_mem; ///TODO: assert it
141 shader->private_size = cso->req_private_mem;
142 shader->input_size = cso->req_input_mem;
143
144 #ifdef HAVE_OPENCL
145 shader->num_kernels = llvm_get_num_kernels(code, header->num_bytes);
146 shader->kernels = CALLOC(sizeof(struct r600_kernel), shader->num_kernels);
147
148 for (i = 0; i < shader->num_kernels; i++) {
149 struct r600_kernel *kernel = &shader->kernels[i];
150 kernel->llvm_module = llvm_get_kernel_module(i, code,
151 header->num_bytes);
152 }
153 #endif
154 return shader;
155 }
156
157 void evergreen_delete_compute_state(struct pipe_context *ctx, void* state)
158 {
159 struct r600_pipe_compute *shader = (struct r600_pipe_compute *)state;
160
161 free(shader->resources);
162 free(shader);
163 }
164
165 static void evergreen_bind_compute_state(struct pipe_context *ctx_, void *state)
166 {
167 struct r600_context *ctx = (struct r600_context *)ctx_;
168
169 COMPUTE_DBG("*** evergreen_bind_compute_state\n");
170
171 ctx->cs_shader_state.shader = (struct r600_pipe_compute *)state;
172 }
173
174 /* The kernel parameters are stored a vtx buffer (ID=0), besides the explicit
175 * kernel parameters there are inplicit parameters that need to be stored
176 * in the vertex buffer as well. Here is how these parameters are organized in
177 * the buffer:
178 *
179 * DWORDS 0-2: Number of work groups in each dimension (x,y,z)
180 * DWORDS 3-5: Number of global work items in each dimension (x,y,z)
181 * DWORDS 6-8: Number of work items within each work group in each dimension
182 * (x,y,z)
183 * DWORDS 9+ : Kernel parameters
184 */
185 void evergreen_compute_upload_input(
186 struct pipe_context *ctx_,
187 const uint *block_layout,
188 const uint *grid_layout,
189 const void *input)
190 {
191 struct r600_context *ctx = (struct r600_context *)ctx_;
192 struct r600_pipe_compute *shader = ctx->cs_shader_state.shader;
193 int i;
194 unsigned kernel_parameters_offset_bytes = 36;
195 uint32_t * num_work_groups_start;
196 uint32_t * global_size_start;
197 uint32_t * local_size_start;
198 uint32_t * kernel_parameters_start;
199
200 if (shader->input_size == 0) {
201 return;
202 }
203
204 if (!shader->kernel_param) {
205 unsigned buffer_size = shader->input_size;
206
207 /* Add space for the grid dimensions */
208 buffer_size += kernel_parameters_offset_bytes * sizeof(uint);
209 shader->kernel_param = r600_compute_buffer_alloc_vram(
210 ctx->screen, buffer_size);
211 }
212
213 num_work_groups_start = ctx->ws->buffer_map(
214 shader->kernel_param->cs_buf, ctx->cs, PIPE_TRANSFER_WRITE);
215 global_size_start = num_work_groups_start + (3 * (sizeof(uint) /4));
216 local_size_start = global_size_start + (3 * (sizeof(uint)) / 4);
217 kernel_parameters_start = local_size_start + (3 * (sizeof(uint)) / 4);
218
219 /* Copy the work group size */
220 memcpy(num_work_groups_start, grid_layout, 3 * sizeof(uint));
221
222 /* Copy the global size */
223 for (i = 0; i < 3; i++) {
224 global_size_start[i] = grid_layout[i] * block_layout[i];
225 }
226
227 /* Copy the local dimensions */
228 memcpy(local_size_start, block_layout, 3 * sizeof(uint));
229
230 /* Copy the kernel inputs */
231 memcpy(kernel_parameters_start, input, shader->input_size);
232
233 for (i = 0; i < (kernel_parameters_offset_bytes / 4) +
234 (shader->input_size / 4); i++) {
235 COMPUTE_DBG("input %i : %i\n", i,
236 ((unsigned*)num_work_groups_start)[i]);
237 }
238
239 ctx->ws->buffer_unmap(shader->kernel_param->cs_buf);
240
241 ///ID=0 is reserved for the parameters
242 evergreen_cs_set_vertex_buffer(ctx, 0, 0,
243 (struct pipe_resource*)shader->kernel_param);
244 ///ID=0 is reserved for parameters
245 evergreen_set_const_cache(shader, 0, shader->kernel_param,
246 shader->input_size, 0);
247 }
248
249 static void evergreen_emit_direct_dispatch(
250 struct r600_context *rctx,
251 const uint *block_layout, const uint *grid_layout)
252 {
253 int i;
254 struct radeon_winsys_cs *cs = rctx->cs;
255 unsigned num_waves;
256 unsigned num_pipes = rctx->screen->info.r600_max_pipes;
257 unsigned wave_divisor = (16 * num_pipes);
258 int group_size = 1;
259 int grid_size = 1;
260 /* XXX: Enable lds and get size from cs_shader_state */
261 unsigned lds_size = 0;
262
263 /* Calculate group_size/grid_size */
264 for (i = 0; i < 3; i++) {
265 group_size *= block_layout[i];
266 }
267
268 for (i = 0; i < 3; i++) {
269 grid_size *= grid_layout[i];
270 }
271
272 /* num_waves = ceil((tg_size.x * tg_size.y, tg_size.z) / (16 * num_pipes)) */
273 num_waves = (block_layout[0] * block_layout[1] * block_layout[2] +
274 wave_divisor - 1) / wave_divisor;
275
276 COMPUTE_DBG("Using %u pipes, there are %u wavefronts per thread block\n",
277 num_pipes, num_waves);
278
279 /* XXX: Partition the LDS between PS/CS. By default half (4096 dwords
280 * on Evergreen) oes to Pixel Shaders and half goes to Compute Shaders.
281 * We may need to allocat the entire LDS space for Compute Shaders.
282 *
283 * EG: R_008E2C_SQ_LDS_RESOURCE_MGMT := S_008E2C_NUM_LS_LDS(lds_dwords)
284 * CM: CM_R_0286FC_SPI_LDS_MGMT := S_0286FC_NUM_LS_LDS(lds_dwords)
285 */
286
287 r600_write_config_reg(cs, R_008970_VGT_NUM_INDICES, group_size);
288
289 r600_write_config_reg_seq(cs, R_00899C_VGT_COMPUTE_START_X, 3);
290 r600_write_value(cs, 0); /* R_00899C_VGT_COMPUTE_START_X */
291 r600_write_value(cs, 0); /* R_0089A0_VGT_COMPUTE_START_Y */
292 r600_write_value(cs, 0); /* R_0089A4_VGT_COMPUTE_START_Z */
293
294 r600_write_config_reg(cs, R_0089AC_VGT_COMPUTE_THREAD_GROUP_SIZE,
295 group_size);
296
297 r600_write_compute_context_reg_seq(cs, R_0286EC_SPI_COMPUTE_NUM_THREAD_X, 3);
298 r600_write_value(cs, block_layout[0]); /* R_0286EC_SPI_COMPUTE_NUM_THREAD_X */
299 r600_write_value(cs, block_layout[1]); /* R_0286F0_SPI_COMPUTE_NUM_THREAD_Y */
300 r600_write_value(cs, block_layout[2]); /* R_0286F4_SPI_COMPUTE_NUM_THREAD_Z */
301
302 r600_write_compute_context_reg(cs, CM_R_0288E8_SQ_LDS_ALLOC,
303 lds_size | (num_waves << 14));
304
305 /* Dispatch packet */
306 r600_write_value(cs, PKT3C(PKT3_DISPATCH_DIRECT, 3, 0));
307 r600_write_value(cs, grid_layout[0]);
308 r600_write_value(cs, grid_layout[1]);
309 r600_write_value(cs, grid_layout[2]);
310 /* VGT_DISPATCH_INITIATOR = COMPUTE_SHADER_EN */
311 r600_write_value(cs, 1);
312 }
313
314 static void compute_emit_cs(struct r600_context *ctx, const uint *block_layout,
315 const uint *grid_layout)
316 {
317 struct radeon_winsys_cs *cs = ctx->cs;
318 unsigned flush_flags = 0;
319 int i;
320
321 struct r600_resource *onebo = NULL;
322 struct evergreen_compute_resource *resources =
323 ctx->cs_shader_state.shader->resources;
324
325 /* Initialize all the compute-related registers.
326 *
327 * See evergreen_init_atom_start_compute_cs() in this file for the list
328 * of registers initialized by the start_compute_cs_cmd atom.
329 */
330 r600_emit_command_buffer(ctx->cs, &ctx->start_compute_cs_cmd);
331
332 ctx->flags |= R600_CONTEXT_CB_FLUSH;
333 r600_flush_emit(ctx);
334
335 /* Emit colorbuffers. */
336 for (i = 0; i < ctx->framebuffer.state.nr_cbufs; i++) {
337 struct r600_surface *cb = (struct r600_surface*)ctx->framebuffer.state.cbufs[i];
338 unsigned reloc = r600_context_bo_reloc(ctx, (struct r600_resource*)cb->base.texture,
339 RADEON_USAGE_READWRITE);
340
341 r600_write_compute_context_reg_seq(cs, R_028C60_CB_COLOR0_BASE + i * 0x3C, 7);
342 r600_write_value(cs, cb->cb_color_base); /* R_028C60_CB_COLOR0_BASE */
343 r600_write_value(cs, cb->cb_color_pitch); /* R_028C64_CB_COLOR0_PITCH */
344 r600_write_value(cs, cb->cb_color_slice); /* R_028C68_CB_COLOR0_SLICE */
345 r600_write_value(cs, cb->cb_color_view); /* R_028C6C_CB_COLOR0_VIEW */
346 r600_write_value(cs, cb->cb_color_info); /* R_028C70_CB_COLOR0_INFO */
347 r600_write_value(cs, cb->cb_color_attrib); /* R_028C74_CB_COLOR0_ATTRIB */
348 r600_write_value(cs, cb->cb_color_dim); /* R_028C78_CB_COLOR0_DIM */
349
350 r600_write_value(cs, PKT3(PKT3_NOP, 0, 0)); /* R_028C60_CB_COLOR0_BASE */
351 r600_write_value(cs, reloc);
352
353 if (!ctx->keep_tiling_flags) {
354 r600_write_value(cs, PKT3(PKT3_NOP, 0, 0)); /* R_028C70_CB_COLOR0_INFO */
355 r600_write_value(cs, reloc);
356 }
357
358 r600_write_value(cs, PKT3(PKT3_NOP, 0, 0)); /* R_028C74_CB_COLOR0_ATTRIB */
359 r600_write_value(cs, reloc);
360 }
361
362 /* Set CB_TARGET_MASK XXX: Use cb_misc_state */
363 r600_write_compute_context_reg(cs, R_028238_CB_TARGET_MASK,
364 ctx->compute_cb_target_mask);
365
366
367 /* Emit vertex buffer state */
368 ctx->cs_vertex_buffer_state.atom.num_dw = 12 * util_bitcount(ctx->cs_vertex_buffer_state.dirty_mask);
369 r600_emit_atom(ctx, &ctx->cs_vertex_buffer_state.atom);
370
371 /* Emit compute shader state */
372 r600_emit_atom(ctx, &ctx->cs_shader_state.atom);
373
374 for (i = 0; i < get_compute_resource_num(); i++) {
375 if (resources[i].enabled) {
376 int j;
377 COMPUTE_DBG("resnum: %i, cdw: %i\n", i, cs->cdw);
378
379 for (j = 0; j < resources[i].cs_end; j++) {
380 if (resources[i].do_reloc[j]) {
381 assert(resources[i].bo);
382 evergreen_emit_ctx_reloc(ctx,
383 resources[i].bo,
384 resources[i].usage);
385 }
386
387 cs->buf[cs->cdw++] = resources[i].cs[j];
388 }
389
390 if (resources[i].bo) {
391 onebo = resources[i].bo;
392 evergreen_emit_ctx_reloc(ctx,
393 resources[i].bo,
394 resources[i].usage);
395
396 ///special case for textures
397 if (resources[i].do_reloc
398 [resources[i].cs_end] == 2) {
399 evergreen_emit_ctx_reloc(ctx,
400 resources[i].bo,
401 resources[i].usage);
402 }
403 }
404 }
405 }
406
407 /* Emit dispatch state and dispatch packet */
408 evergreen_emit_direct_dispatch(ctx, block_layout, grid_layout);
409
410 /* XXX evergreen_flush_emit() hardcodes the CP_COHER_SIZE to 0xffffffff
411 */
412 ctx->flags |= R600_CONTEXT_CB_FLUSH;
413 r600_flush_emit(ctx);
414
415 #if 0
416 COMPUTE_DBG("cdw: %i\n", cs->cdw);
417 for (i = 0; i < cs->cdw; i++) {
418 COMPUTE_DBG("%4i : 0x%08X\n", i, ctx->cs->buf[i]);
419 }
420 #endif
421
422 flush_flags = RADEON_FLUSH_ASYNC | RADEON_FLUSH_COMPUTE;
423 if (ctx->keep_tiling_flags) {
424 flush_flags |= RADEON_FLUSH_KEEP_TILING_FLAGS;
425 }
426
427 ctx->ws->cs_flush(ctx->cs, flush_flags);
428
429 ctx->pm4_dirty_cdwords = 0;
430 ctx->flags = 0;
431
432 COMPUTE_DBG("shader started\n");
433
434 ctx->ws->buffer_wait(onebo->buf, 0);
435
436 COMPUTE_DBG("...\n");
437
438 ctx->streamout_start = TRUE;
439 ctx->streamout_append_bitmask = ~0;
440
441 }
442
443
444 /**
445 * Emit function for r600_cs_shader_state atom
446 */
447 void evergreen_emit_cs_shader(
448 struct r600_context *rctx,
449 struct r600_atom *atom)
450 {
451 struct r600_cs_shader_state *state =
452 (struct r600_cs_shader_state*)atom;
453 struct r600_pipe_compute *shader = state->shader;
454 struct r600_kernel *kernel = &shader->kernels[state->kernel_index];
455 struct radeon_winsys_cs *cs = rctx->cs;
456 uint64_t va;
457
458 va = r600_resource_va(&rctx->screen->screen, &kernel->code_bo->b.b);
459
460 r600_write_compute_context_reg_seq(cs, R_0288D0_SQ_PGM_START_LS, 3);
461 r600_write_value(cs, va >> 8); /* R_0288D0_SQ_PGM_START_LS */
462 r600_write_value(cs, /* R_0288D4_SQ_PGM_RESOURCES_LS */
463 S_0288D4_NUM_GPRS(kernel->bc.ngpr)
464 | S_0288D4_STACK_SIZE(kernel->bc.nstack));
465 r600_write_value(cs, 0); /* R_0288D8_SQ_PGM_RESOURCES_LS_2 */
466
467 r600_write_value(cs, PKT3C(PKT3_NOP, 0, 0));
468 r600_write_value(cs, r600_context_bo_reloc(rctx, kernel->code_bo,
469 RADEON_USAGE_READ));
470
471 rctx->flags |= R600_CONTEXT_SHADERCONST_FLUSH;
472 }
473
474 static void evergreen_launch_grid(
475 struct pipe_context *ctx_,
476 const uint *block_layout, const uint *grid_layout,
477 uint32_t pc, const void *input)
478 {
479 struct r600_context *ctx = (struct r600_context *)ctx_;
480
481 #ifdef HAVE_OPENCL
482 COMPUTE_DBG("*** evergreen_launch_grid: pc = %u\n", pc);
483
484 struct r600_pipe_compute *shader = ctx->cs_shader_state.shader;
485 if (!shader->kernels[pc].code_bo) {
486 void *p;
487 struct r600_kernel *kernel = &shader->kernels[pc];
488 r600_compute_shader_create(ctx_, kernel->llvm_module, &kernel->bc);
489 kernel->code_bo = r600_compute_buffer_alloc_vram(ctx->screen,
490 kernel->bc.ndw * 4);
491 p = ctx->ws->buffer_map(kernel->code_bo->cs_buf, ctx->cs,
492 PIPE_TRANSFER_WRITE);
493 memcpy(p, kernel->bc.bytecode, kernel->bc.ndw * 4);
494 ctx->ws->buffer_unmap(kernel->code_bo->cs_buf);
495 }
496 #endif
497
498 ctx->cs_shader_state.kernel_index = pc;
499 evergreen_compute_upload_input(ctx_, block_layout, grid_layout, input);
500 compute_emit_cs(ctx, block_layout, grid_layout);
501 }
502
503 static void evergreen_set_compute_resources(struct pipe_context * ctx_,
504 unsigned start, unsigned count,
505 struct pipe_surface ** surfaces)
506 {
507 struct r600_context *ctx = (struct r600_context *)ctx_;
508 struct r600_surface **resources = (struct r600_surface **)surfaces;
509
510 COMPUTE_DBG("*** evergreen_set_compute_resources: start = %u count = %u\n",
511 start, count);
512
513 for (int i = 0; i < count; i++) {
514 /* The First two vertex buffers are reserved for parameters and
515 * global buffers. */
516 unsigned vtx_id = 2 + i;
517 if (resources[i]) {
518 struct r600_resource_global *buffer =
519 (struct r600_resource_global*)
520 resources[i]->base.texture;
521 if (resources[i]->base.writable) {
522 assert(i+1 < 12);
523
524 evergreen_set_rat(ctx->cs_shader_state.shader, i+1,
525 (struct r600_resource *)resources[i]->base.texture,
526 buffer->chunk->start_in_dw*4,
527 resources[i]->base.texture->width0);
528 }
529
530 evergreen_cs_set_vertex_buffer(ctx, vtx_id,
531 buffer->chunk->start_in_dw * 4,
532 resources[i]->base.texture);
533 }
534 }
535 }
536
537 static void evergreen_set_cs_sampler_view(struct pipe_context *ctx_,
538 unsigned start_slot, unsigned count,
539 struct pipe_sampler_view **views)
540 {
541 struct r600_context *ctx = (struct r600_context *)ctx_;
542 struct r600_pipe_sampler_view **resource =
543 (struct r600_pipe_sampler_view **)views;
544
545 for (int i = 0; i < count; i++) {
546 if (resource[i]) {
547 assert(i+1 < 12);
548 ///FETCH0 = VTX0 (param buffer),
549 //FETCH1 = VTX1 (global buffer pool), FETCH2... = TEX
550 evergreen_set_tex_resource(ctx->cs_shader_state.shader, resource[i], i+2);
551 }
552 }
553 }
554
555 static void evergreen_bind_compute_sampler_states(
556 struct pipe_context *ctx_,
557 unsigned start_slot,
558 unsigned num_samplers,
559 void **samplers_)
560 {
561 struct r600_context *ctx = (struct r600_context *)ctx_;
562 struct compute_sampler_state ** samplers =
563 (struct compute_sampler_state **)samplers_;
564
565 for (int i = 0; i < num_samplers; i++) {
566 if (samplers[i]) {
567 evergreen_set_sampler_resource(
568 ctx->cs_shader_state.shader, samplers[i], i);
569 }
570 }
571 }
572
573 static void evergreen_set_global_binding(
574 struct pipe_context *ctx_, unsigned first, unsigned n,
575 struct pipe_resource **resources,
576 uint32_t **handles)
577 {
578 struct r600_context *ctx = (struct r600_context *)ctx_;
579 struct compute_memory_pool *pool = ctx->screen->global_pool;
580 struct r600_resource_global **buffers =
581 (struct r600_resource_global **)resources;
582
583 COMPUTE_DBG("*** evergreen_set_global_binding first = %u n = %u\n",
584 first, n);
585
586 if (!resources) {
587 /* XXX: Unset */
588 return;
589 }
590
591 compute_memory_finalize_pending(pool, ctx_);
592
593 for (int i = 0; i < n; i++)
594 {
595 assert(resources[i]->target == PIPE_BUFFER);
596 assert(resources[i]->bind & PIPE_BIND_GLOBAL);
597
598 *(handles[i]) = buffers[i]->chunk->start_in_dw * 4;
599 }
600
601 evergreen_set_rat(ctx->cs_shader_state.shader, 0, pool->bo, 0, pool->size_in_dw * 4);
602 evergreen_cs_set_vertex_buffer(ctx, 1, 0,
603 (struct pipe_resource*)pool->bo);
604 }
605
606 /**
607 * This function initializes all the compute specific registers that need to
608 * be initialized for each compute command stream. Registers that are common
609 * to both compute and 3D will be initialized at the beginning of each compute
610 * command stream by the start_cs_cmd atom. However, since the SET_CONTEXT_REG
611 * packet requires that the shader type bit be set, we must initialize all
612 * context registers needed for compute in this function. The registers
613 * intialized by the start_cs_cmd atom can be found in evereen_state.c in the
614 * functions evergreen_init_atom_start_cs or cayman_init_atom_start_cs depending
615 * on the GPU family.
616 */
617 void evergreen_init_atom_start_compute_cs(struct r600_context *ctx)
618 {
619 struct r600_command_buffer *cb = &ctx->start_compute_cs_cmd;
620 int num_threads;
621 int num_stack_entries;
622
623 /* since all required registers are initialised in the
624 * start_compute_cs_cmd atom, we can EMIT_EARLY here.
625 */
626 r600_init_command_buffer(cb, 256);
627 cb->pkt_flags = RADEON_CP_PACKET3_COMPUTE_MODE;
628
629 /* This must be first. */
630 r600_store_value(cb, PKT3(PKT3_CONTEXT_CONTROL, 1, 0));
631 r600_store_value(cb, 0x80000000);
632 r600_store_value(cb, 0x80000000);
633
634 /* We're setting config registers here. */
635 r600_store_value(cb, PKT3(PKT3_EVENT_WRITE, 0, 0));
636 r600_store_value(cb, EVENT_TYPE(EVENT_TYPE_CS_PARTIAL_FLUSH) | EVENT_INDEX(4));
637
638 switch (ctx->family) {
639 case CHIP_CEDAR:
640 default:
641 num_threads = 128;
642 num_stack_entries = 256;
643 break;
644 case CHIP_REDWOOD:
645 num_threads = 128;
646 num_stack_entries = 256;
647 break;
648 case CHIP_JUNIPER:
649 num_threads = 128;
650 num_stack_entries = 512;
651 break;
652 case CHIP_CYPRESS:
653 case CHIP_HEMLOCK:
654 num_threads = 128;
655 num_stack_entries = 512;
656 break;
657 case CHIP_PALM:
658 num_threads = 128;
659 num_stack_entries = 256;
660 break;
661 case CHIP_SUMO:
662 num_threads = 128;
663 num_stack_entries = 256;
664 break;
665 case CHIP_SUMO2:
666 num_threads = 128;
667 num_stack_entries = 512;
668 break;
669 case CHIP_BARTS:
670 num_threads = 128;
671 num_stack_entries = 512;
672 break;
673 case CHIP_TURKS:
674 num_threads = 128;
675 num_stack_entries = 256;
676 break;
677 case CHIP_CAICOS:
678 num_threads = 128;
679 num_stack_entries = 256;
680 break;
681 }
682
683 /* Config Registers */
684 evergreen_init_common_regs(cb, ctx->chip_class
685 , ctx->family, ctx->screen->info.drm_minor);
686
687 /* The primitive type always needs to be POINTLIST for compute. */
688 r600_store_config_reg(cb, R_008958_VGT_PRIMITIVE_TYPE,
689 V_008958_DI_PT_POINTLIST);
690
691 if (ctx->chip_class < CAYMAN) {
692
693 /* These registers control which simds can be used by each stage.
694 * The default for these registers is 0xffffffff, which means
695 * all simds are available for each stage. It's possible we may
696 * want to play around with these in the future, but for now
697 * the default value is fine.
698 *
699 * R_008E20_SQ_STATIC_THREAD_MGMT1
700 * R_008E24_SQ_STATIC_THREAD_MGMT2
701 * R_008E28_SQ_STATIC_THREAD_MGMT3
702 */
703
704 /* XXX: We may need to adjust the thread and stack resouce
705 * values for 3D/compute interop */
706
707 r600_store_config_reg_seq(cb, R_008C18_SQ_THREAD_RESOURCE_MGMT_1, 5);
708
709 /* R_008C18_SQ_THREAD_RESOURCE_MGMT_1
710 * Set the number of threads used by the PS/VS/GS/ES stage to
711 * 0.
712 */
713 r600_store_value(cb, 0);
714
715 /* R_008C1C_SQ_THREAD_RESOURCE_MGMT_2
716 * Set the number of threads used by the CS (aka LS) stage to
717 * the maximum number of threads and set the number of threads
718 * for the HS stage to 0. */
719 r600_store_value(cb, S_008C1C_NUM_LS_THREADS(num_threads));
720
721 /* R_008C20_SQ_STACK_RESOURCE_MGMT_1
722 * Set the Control Flow stack entries to 0 for PS/VS stages */
723 r600_store_value(cb, 0);
724
725 /* R_008C24_SQ_STACK_RESOURCE_MGMT_2
726 * Set the Control Flow stack entries to 0 for GS/ES stages */
727 r600_store_value(cb, 0);
728
729 /* R_008C28_SQ_STACK_RESOURCE_MGMT_3
730 * Set the Contol Flow stack entries to 0 for the HS stage, and
731 * set it to the maximum value for the CS (aka LS) stage. */
732 r600_store_value(cb,
733 S_008C28_NUM_LS_STACK_ENTRIES(num_stack_entries));
734 }
735
736 /* Context Registers */
737
738 if (ctx->chip_class < CAYMAN) {
739 /* workaround for hw issues with dyn gpr - must set all limits
740 * to 240 instead of 0, 0x1e == 240 / 8
741 */
742 r600_store_context_reg(cb, R_028838_SQ_DYN_GPR_RESOURCE_LIMIT_1,
743 S_028838_PS_GPRS(0x1e) |
744 S_028838_VS_GPRS(0x1e) |
745 S_028838_GS_GPRS(0x1e) |
746 S_028838_ES_GPRS(0x1e) |
747 S_028838_HS_GPRS(0x1e) |
748 S_028838_LS_GPRS(0x1e));
749 }
750
751 /* XXX: Investigate setting bit 15, which is FAST_COMPUTE_MODE */
752 r600_store_context_reg(cb, R_028A40_VGT_GS_MODE,
753 S_028A40_COMPUTE_MODE(1) | S_028A40_PARTIAL_THD_AT_EOI(1));
754
755 r600_store_context_reg(cb, R_028B54_VGT_SHADER_STAGES_EN, 2/*CS_ON*/);
756
757 r600_store_context_reg(cb, R_0286E8_SPI_COMPUTE_INPUT_CNTL,
758 S_0286E8_TID_IN_GROUP_ENA
759 | S_0286E8_TGID_ENA
760 | S_0286E8_DISABLE_INDEX_PACK)
761 ;
762
763 /* The LOOP_CONST registers are an optimizations for loops that allows
764 * you to store the initial counter, increment value, and maximum
765 * counter value in a register so that hardware can calculate the
766 * correct number of iterations for the loop, so that you don't need
767 * to have the loop counter in your shader code. We don't currently use
768 * this optimization, so we must keep track of the counter in the
769 * shader and use a break instruction to exit loops. However, the
770 * hardware will still uses this register to determine when to exit a
771 * loop, so we need to initialize the counter to 0, set the increment
772 * value to 1 and the maximum counter value to the 4095 (0xfff) which
773 * is the maximum value allowed. This gives us a maximum of 4096
774 * iterations for our loops, but hopefully our break instruction will
775 * execute before some time before the 4096th iteration.
776 */
777 eg_store_loop_const(cb, R_03A200_SQ_LOOP_CONST_0 + (160 * 4), 0x1000FFF);
778 }
779
780 void evergreen_init_compute_state_functions(struct r600_context *ctx)
781 {
782 ctx->context.create_compute_state = evergreen_create_compute_state;
783 ctx->context.delete_compute_state = evergreen_delete_compute_state;
784 ctx->context.bind_compute_state = evergreen_bind_compute_state;
785 // ctx->context.create_sampler_view = evergreen_compute_create_sampler_view;
786 ctx->context.set_compute_resources = evergreen_set_compute_resources;
787 ctx->context.set_compute_sampler_views = evergreen_set_cs_sampler_view;
788 ctx->context.bind_compute_sampler_states = evergreen_bind_compute_sampler_states;
789 ctx->context.set_global_binding = evergreen_set_global_binding;
790 ctx->context.launch_grid = evergreen_launch_grid;
791
792 /* We always use at least two vertex buffers for compute, one for
793 * parameters and one for global memory */
794 ctx->cs_vertex_buffer_state.enabled_mask =
795 ctx->cs_vertex_buffer_state.dirty_mask = 1 | 2;
796 }
797
798
799 struct pipe_resource *r600_compute_global_buffer_create(
800 struct pipe_screen *screen,
801 const struct pipe_resource *templ)
802 {
803 assert(templ->target == PIPE_BUFFER);
804 assert(templ->bind & PIPE_BIND_GLOBAL);
805 assert(templ->array_size == 1 || templ->array_size == 0);
806 assert(templ->depth0 == 1 || templ->depth0 == 0);
807 assert(templ->height0 == 1 || templ->height0 == 0);
808
809 struct r600_resource_global* result = (struct r600_resource_global*)
810 CALLOC(sizeof(struct r600_resource_global), 1);
811 struct r600_screen* rscreen = (struct r600_screen*)screen;
812
813 COMPUTE_DBG("*** r600_compute_global_buffer_create\n");
814 COMPUTE_DBG("width = %u array_size = %u\n", templ->width0,
815 templ->array_size);
816
817 result->base.b.vtbl = &r600_global_buffer_vtbl;
818 result->base.b.b.screen = screen;
819 result->base.b.b = *templ;
820 pipe_reference_init(&result->base.b.b.reference, 1);
821
822 int size_in_dw = (templ->width0+3) / 4;
823
824 result->chunk = compute_memory_alloc(rscreen->global_pool, size_in_dw);
825
826 if (result->chunk == NULL)
827 {
828 free(result);
829 return NULL;
830 }
831
832 return &result->base.b.b;
833 }
834
835 void r600_compute_global_buffer_destroy(
836 struct pipe_screen *screen,
837 struct pipe_resource *res)
838 {
839 assert(res->target == PIPE_BUFFER);
840 assert(res->bind & PIPE_BIND_GLOBAL);
841
842 struct r600_resource_global* buffer = (struct r600_resource_global*)res;
843 struct r600_screen* rscreen = (struct r600_screen*)screen;
844
845 compute_memory_free(rscreen->global_pool, buffer->chunk->id);
846
847 buffer->chunk = NULL;
848 free(res);
849 }
850
851 void *r600_compute_global_transfer_map(
852 struct pipe_context *ctx_,
853 struct pipe_resource *resource,
854 unsigned level,
855 unsigned usage,
856 const struct pipe_box *box,
857 struct pipe_transfer **ptransfer)
858 {
859 struct r600_context *rctx = (struct r600_context*)ctx_;
860 struct compute_memory_pool *pool = rctx->screen->global_pool;
861 struct pipe_transfer *transfer = util_slab_alloc(&rctx->pool_transfers);
862 struct r600_resource_global* buffer =
863 (struct r600_resource_global*)resource;
864 uint32_t* map;
865
866 compute_memory_finalize_pending(pool, ctx_);
867
868 assert(resource->target == PIPE_BUFFER);
869
870 COMPUTE_DBG("* r600_compute_global_get_transfer()\n"
871 "level = %u, usage = %u, box(x = %u, y = %u, z = %u "
872 "width = %u, height = %u, depth = %u)\n", level, usage,
873 box->x, box->y, box->z, box->width, box->height,
874 box->depth);
875
876 transfer->resource = resource;
877 transfer->level = level;
878 transfer->usage = usage;
879 transfer->box = *box;
880 transfer->stride = 0;
881 transfer->layer_stride = 0;
882
883 assert(transfer->resource->target == PIPE_BUFFER);
884 assert(transfer->resource->bind & PIPE_BIND_GLOBAL);
885 assert(transfer->box.x >= 0);
886 assert(transfer->box.y == 0);
887 assert(transfer->box.z == 0);
888
889 ///TODO: do it better, mapping is not possible if the pool is too big
890
891 COMPUTE_DBG("* r600_compute_global_transfer_map()\n");
892
893 if (!(map = rctx->ws->buffer_map(buffer->chunk->pool->bo->cs_buf,
894 rctx->cs, transfer->usage))) {
895 util_slab_free(&rctx->pool_transfers, transfer);
896 return NULL;
897 }
898
899 *ptransfer = transfer;
900
901 COMPUTE_DBG("Buffer: %p + %u (buffer offset in global memory) "
902 "+ %u (box.x)\n", map, buffer->chunk->start_in_dw, transfer->box.x);
903 return ((char*)(map + buffer->chunk->start_in_dw)) + transfer->box.x;
904 }
905
906 void r600_compute_global_transfer_unmap(
907 struct pipe_context *ctx_,
908 struct pipe_transfer* transfer)
909 {
910 assert(transfer->resource->target == PIPE_BUFFER);
911 assert(transfer->resource->bind & PIPE_BIND_GLOBAL);
912
913 struct r600_context *ctx = (struct r600_context *)ctx_;
914 struct r600_resource_global* buffer =
915 (struct r600_resource_global*)transfer->resource;
916
917 COMPUTE_DBG("* r600_compute_global_transfer_unmap()\n");
918
919 ctx->ws->buffer_unmap(buffer->chunk->pool->bo->cs_buf);
920 util_slab_free(&ctx->pool_transfers, transfer);
921 }
922
923 void r600_compute_global_transfer_flush_region(
924 struct pipe_context *ctx_,
925 struct pipe_transfer *transfer,
926 const struct pipe_box *box)
927 {
928 assert(0 && "TODO");
929 }
930
931 void r600_compute_global_transfer_inline_write(
932 struct pipe_context *pipe,
933 struct pipe_resource *resource,
934 unsigned level,
935 unsigned usage,
936 const struct pipe_box *box,
937 const void *data,
938 unsigned stride,
939 unsigned layer_stride)
940 {
941 assert(0 && "TODO");
942 }