r600g: atomize scissor state
[mesa.git] / src / gallium / drivers / r600 / evergreen_compute.c
1 /*
2 * Copyright 2011 Adam Rak <adam.rak@streamnovation.com>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Adam Rak <adam.rak@streamnovation.com>
25 */
26
27 #include <stdio.h>
28 #include <errno.h>
29 #include "pipe/p_defines.h"
30 #include "pipe/p_state.h"
31 #include "pipe/p_context.h"
32 #include "util/u_blitter.h"
33 #include "util/u_double_list.h"
34 #include "util/u_transfer.h"
35 #include "util/u_surface.h"
36 #include "util/u_pack_color.h"
37 #include "util/u_memory.h"
38 #include "util/u_inlines.h"
39 #include "util/u_framebuffer.h"
40 #include "pipebuffer/pb_buffer.h"
41 #include "r600.h"
42 #include "evergreend.h"
43 #include "r600_resource.h"
44 #include "r600_shader.h"
45 #include "r600_pipe.h"
46 #include "r600_formats.h"
47 #include "evergreen_compute.h"
48 #include "r600_hw_context_priv.h"
49 #include "evergreen_compute_internal.h"
50 #include "compute_memory_pool.h"
51 #ifdef HAVE_OPENCL
52 #include "llvm_wrapper.h"
53 #endif
54
55 /**
56 RAT0 is for global binding write
57 VTX1 is for global binding read
58
59 for wrting images RAT1...
60 for reading images TEX2...
61 TEX2-RAT1 is paired
62
63 TEX2... consumes the same fetch resources, that VTX2... would consume
64
65 CONST0 and VTX0 is for parameters
66 CONST0 is binding smaller input parameter buffer, and for constant indexing,
67 also constant cached
68 VTX0 is for indirect/non-constant indexing, or if the input is bigger than
69 the constant cache can handle
70
71 RAT-s are limited to 12, so we can only bind at most 11 texture for writing
72 because we reserve RAT0 for global bindings. With byteaddressing enabled,
73 we should reserve another one too.=> 10 image binding for writing max.
74
75 from Nvidia OpenCL:
76 CL_DEVICE_MAX_READ_IMAGE_ARGS: 128
77 CL_DEVICE_MAX_WRITE_IMAGE_ARGS: 8
78
79 so 10 for writing is enough. 176 is the max for reading according to the docs
80
81 writable images should be listed first < 10, so their id corresponds to RAT(id+1)
82 writable images will consume TEX slots, VTX slots too because of linear indexing
83
84 */
85
86 static void evergreen_cs_set_vertex_buffer(
87 struct r600_context * rctx,
88 unsigned vb_index,
89 unsigned offset,
90 struct pipe_resource * buffer)
91 {
92 struct r600_vertexbuf_state *state = &rctx->cs_vertex_buffer_state;
93 struct pipe_vertex_buffer *vb = &state->vb[vb_index];
94 vb->stride = 1;
95 vb->buffer_offset = offset;
96 vb->buffer = buffer;
97 vb->user_buffer = NULL;
98
99 /* The vertex instructions in the compute shaders use the texture cache,
100 * so we need to invalidate it. */
101 rctx->flags |= R600_CONTEXT_TEX_FLUSH;
102 state->enabled_mask |= 1 << vb_index;
103 state->dirty_mask |= 1 << vb_index;
104 state->atom.dirty = true;
105 }
106
107 const struct u_resource_vtbl r600_global_buffer_vtbl =
108 {
109 u_default_resource_get_handle, /* get_handle */
110 r600_compute_global_buffer_destroy, /* resource_destroy */
111 r600_compute_global_get_transfer, /* get_transfer */
112 r600_compute_global_transfer_destroy, /* transfer_destroy */
113 r600_compute_global_transfer_map, /* transfer_map */
114 r600_compute_global_transfer_flush_region,/* transfer_flush_region */
115 r600_compute_global_transfer_unmap, /* transfer_unmap */
116 r600_compute_global_transfer_inline_write /* transfer_inline_write */
117 };
118
119
120 void *evergreen_create_compute_state(
121 struct pipe_context *ctx_,
122 const const struct pipe_compute_state *cso)
123 {
124 struct r600_context *ctx = (struct r600_context *)ctx_;
125 struct r600_pipe_compute *shader = CALLOC_STRUCT(r600_pipe_compute);
126
127 #ifdef HAVE_OPENCL
128 const struct pipe_llvm_program_header * header;
129 const unsigned char * code;
130 unsigned i;
131
132 COMPUTE_DBG("*** evergreen_create_compute_state\n");
133
134 header = cso->prog;
135 code = cso->prog + sizeof(struct pipe_llvm_program_header);
136 #endif
137
138 shader->ctx = (struct r600_context*)ctx;
139 shader->resources = (struct evergreen_compute_resource*)
140 CALLOC(sizeof(struct evergreen_compute_resource),
141 get_compute_resource_num());
142 shader->local_size = cso->req_local_mem; ///TODO: assert it
143 shader->private_size = cso->req_private_mem;
144 shader->input_size = cso->req_input_mem;
145
146 #ifdef HAVE_OPENCL
147 shader->num_kernels = llvm_get_num_kernels(code, header->num_bytes);
148 shader->kernels = CALLOC(sizeof(struct r600_kernel), shader->num_kernels);
149
150 for (i = 0; i < shader->num_kernels; i++) {
151 struct r600_kernel *kernel = &shader->kernels[i];
152 kernel->llvm_module = llvm_get_kernel_module(i, code,
153 header->num_bytes);
154 }
155 #endif
156 return shader;
157 }
158
159 void evergreen_delete_compute_state(struct pipe_context *ctx, void* state)
160 {
161 struct r600_pipe_compute *shader = (struct r600_pipe_compute *)state;
162
163 free(shader->resources);
164 free(shader);
165 }
166
167 static void evergreen_bind_compute_state(struct pipe_context *ctx_, void *state)
168 {
169 struct r600_context *ctx = (struct r600_context *)ctx_;
170
171 COMPUTE_DBG("*** evergreen_bind_compute_state\n");
172
173 ctx->cs_shader_state.shader = (struct r600_pipe_compute *)state;
174 }
175
176 /* The kernel parameters are stored a vtx buffer (ID=0), besides the explicit
177 * kernel parameters there are inplicit parameters that need to be stored
178 * in the vertex buffer as well. Here is how these parameters are organized in
179 * the buffer:
180 *
181 * DWORDS 0-2: Number of work groups in each dimension (x,y,z)
182 * DWORDS 3-5: Number of global work items in each dimension (x,y,z)
183 * DWORDS 6-8: Number of work items within each work group in each dimension
184 * (x,y,z)
185 * DWORDS 9+ : Kernel parameters
186 */
187 void evergreen_compute_upload_input(
188 struct pipe_context *ctx_,
189 const uint *block_layout,
190 const uint *grid_layout,
191 const void *input)
192 {
193 struct r600_context *ctx = (struct r600_context *)ctx_;
194 struct r600_pipe_compute *shader = ctx->cs_shader_state.shader;
195 int i;
196 unsigned kernel_parameters_offset_bytes = 36;
197 uint32_t * num_work_groups_start;
198 uint32_t * global_size_start;
199 uint32_t * local_size_start;
200 uint32_t * kernel_parameters_start;
201
202 if (shader->input_size == 0) {
203 return;
204 }
205
206 if (!shader->kernel_param) {
207 unsigned buffer_size = shader->input_size;
208
209 /* Add space for the grid dimensions */
210 buffer_size += kernel_parameters_offset_bytes * sizeof(uint);
211 shader->kernel_param = r600_compute_buffer_alloc_vram(
212 ctx->screen, buffer_size);
213 }
214
215 num_work_groups_start = ctx->ws->buffer_map(
216 shader->kernel_param->cs_buf, ctx->cs, PIPE_TRANSFER_WRITE);
217 global_size_start = num_work_groups_start + (3 * (sizeof(uint) /4));
218 local_size_start = global_size_start + (3 * (sizeof(uint)) / 4);
219 kernel_parameters_start = local_size_start + (3 * (sizeof(uint)) / 4);
220
221 /* Copy the work group size */
222 memcpy(num_work_groups_start, grid_layout, 3 * sizeof(uint));
223
224 /* Copy the global size */
225 for (i = 0; i < 3; i++) {
226 global_size_start[i] = grid_layout[i] * block_layout[i];
227 }
228
229 /* Copy the local dimensions */
230 memcpy(local_size_start, block_layout, 3 * sizeof(uint));
231
232 /* Copy the kernel inputs */
233 memcpy(kernel_parameters_start, input, shader->input_size);
234
235 for (i = 0; i < (kernel_parameters_offset_bytes / 4) +
236 (shader->input_size / 4); i++) {
237 COMPUTE_DBG("input %i : %i\n", i,
238 ((unsigned*)num_work_groups_start)[i]);
239 }
240
241 ctx->ws->buffer_unmap(shader->kernel_param->cs_buf);
242
243 ///ID=0 is reserved for the parameters
244 evergreen_cs_set_vertex_buffer(ctx, 0, 0,
245 (struct pipe_resource*)shader->kernel_param);
246 ///ID=0 is reserved for parameters
247 evergreen_set_const_cache(shader, 0, shader->kernel_param,
248 shader->input_size, 0);
249 }
250
251 static void evergreen_emit_direct_dispatch(
252 struct r600_context *rctx,
253 const uint *block_layout, const uint *grid_layout)
254 {
255 int i;
256 struct radeon_winsys_cs *cs = rctx->cs;
257 unsigned num_waves;
258 unsigned num_pipes = rctx->screen->info.r600_max_pipes;
259 unsigned wave_divisor = (16 * num_pipes);
260 int group_size = 1;
261 int grid_size = 1;
262 /* XXX: Enable lds and get size from cs_shader_state */
263 unsigned lds_size = 0;
264
265 /* Calculate group_size/grid_size */
266 for (i = 0; i < 3; i++) {
267 group_size *= block_layout[i];
268 }
269
270 for (i = 0; i < 3; i++) {
271 grid_size *= grid_layout[i];
272 }
273
274 /* num_waves = ceil((tg_size.x * tg_size.y, tg_size.z) / (16 * num_pipes)) */
275 num_waves = (block_layout[0] * block_layout[1] * block_layout[2] +
276 wave_divisor - 1) / wave_divisor;
277
278 COMPUTE_DBG("Using %u pipes, there are %u wavefronts per thread block\n",
279 num_pipes, num_waves);
280
281 /* XXX: Partition the LDS between PS/CS. By default half (4096 dwords
282 * on Evergreen) oes to Pixel Shaders and half goes to Compute Shaders.
283 * We may need to allocat the entire LDS space for Compute Shaders.
284 *
285 * EG: R_008E2C_SQ_LDS_RESOURCE_MGMT := S_008E2C_NUM_LS_LDS(lds_dwords)
286 * CM: CM_R_0286FC_SPI_LDS_MGMT := S_0286FC_NUM_LS_LDS(lds_dwords)
287 */
288
289 r600_write_config_reg(cs, R_008970_VGT_NUM_INDICES, group_size);
290
291 r600_write_config_reg_seq(cs, R_00899C_VGT_COMPUTE_START_X, 3);
292 r600_write_value(cs, 0); /* R_00899C_VGT_COMPUTE_START_X */
293 r600_write_value(cs, 0); /* R_0089A0_VGT_COMPUTE_START_Y */
294 r600_write_value(cs, 0); /* R_0089A4_VGT_COMPUTE_START_Z */
295
296 r600_write_config_reg(cs, R_0089AC_VGT_COMPUTE_THREAD_GROUP_SIZE,
297 group_size);
298
299 r600_write_compute_context_reg_seq(cs, R_0286EC_SPI_COMPUTE_NUM_THREAD_X, 3);
300 r600_write_value(cs, block_layout[0]); /* R_0286EC_SPI_COMPUTE_NUM_THREAD_X */
301 r600_write_value(cs, block_layout[1]); /* R_0286F0_SPI_COMPUTE_NUM_THREAD_Y */
302 r600_write_value(cs, block_layout[2]); /* R_0286F4_SPI_COMPUTE_NUM_THREAD_Z */
303
304 r600_write_compute_context_reg(cs, CM_R_0288E8_SQ_LDS_ALLOC,
305 lds_size | (num_waves << 14));
306
307 /* Dispatch packet */
308 r600_write_value(cs, PKT3C(PKT3_DISPATCH_DIRECT, 3, 0));
309 r600_write_value(cs, grid_layout[0]);
310 r600_write_value(cs, grid_layout[1]);
311 r600_write_value(cs, grid_layout[2]);
312 /* VGT_DISPATCH_INITIATOR = COMPUTE_SHADER_EN */
313 r600_write_value(cs, 1);
314 }
315
316 static void compute_emit_cs(struct r600_context *ctx, const uint *block_layout,
317 const uint *grid_layout)
318 {
319 struct radeon_winsys_cs *cs = ctx->cs;
320 unsigned flush_flags = 0;
321 int i;
322
323 struct r600_resource *onebo = NULL;
324 struct evergreen_compute_resource *resources =
325 ctx->cs_shader_state.shader->resources;
326
327 /* Initialize all the compute-related registers.
328 *
329 * See evergreen_init_atom_start_compute_cs() in this file for the list
330 * of registers initialized by the start_compute_cs_cmd atom.
331 */
332 r600_emit_command_buffer(ctx->cs, &ctx->start_compute_cs_cmd);
333
334 ctx->flags |= R600_CONTEXT_CB_FLUSH;
335 r600_flush_emit(ctx);
336
337 /* Emit colorbuffers. */
338 for (i = 0; i < ctx->framebuffer.state.nr_cbufs; i++) {
339 struct r600_surface *cb = (struct r600_surface*)ctx->framebuffer.state.cbufs[i];
340 unsigned reloc = r600_context_bo_reloc(ctx, (struct r600_resource*)cb->base.texture,
341 RADEON_USAGE_READWRITE);
342
343 r600_write_compute_context_reg_seq(cs, R_028C60_CB_COLOR0_BASE + i * 0x3C, 7);
344 r600_write_value(cs, cb->cb_color_base); /* R_028C60_CB_COLOR0_BASE */
345 r600_write_value(cs, cb->cb_color_pitch); /* R_028C64_CB_COLOR0_PITCH */
346 r600_write_value(cs, cb->cb_color_slice); /* R_028C68_CB_COLOR0_SLICE */
347 r600_write_value(cs, cb->cb_color_view); /* R_028C6C_CB_COLOR0_VIEW */
348 r600_write_value(cs, cb->cb_color_info); /* R_028C70_CB_COLOR0_INFO */
349 r600_write_value(cs, cb->cb_color_attrib); /* R_028C74_CB_COLOR0_ATTRIB */
350 r600_write_value(cs, cb->cb_color_dim); /* R_028C78_CB_COLOR0_DIM */
351
352 r600_write_value(cs, PKT3(PKT3_NOP, 0, 0)); /* R_028C60_CB_COLOR0_BASE */
353 r600_write_value(cs, reloc);
354
355 if (!ctx->keep_tiling_flags) {
356 r600_write_value(cs, PKT3(PKT3_NOP, 0, 0)); /* R_028C70_CB_COLOR0_INFO */
357 r600_write_value(cs, reloc);
358 }
359
360 r600_write_value(cs, PKT3(PKT3_NOP, 0, 0)); /* R_028C74_CB_COLOR0_ATTRIB */
361 r600_write_value(cs, reloc);
362 }
363
364 /* Set CB_TARGET_MASK XXX: Use cb_misc_state */
365 r600_write_compute_context_reg(cs, R_028238_CB_TARGET_MASK,
366 ctx->compute_cb_target_mask);
367
368
369 /* Emit vertex buffer state */
370 ctx->cs_vertex_buffer_state.atom.num_dw = 12 * util_bitcount(ctx->cs_vertex_buffer_state.dirty_mask);
371 r600_emit_atom(ctx, &ctx->cs_vertex_buffer_state.atom);
372
373 /* Emit compute shader state */
374 r600_emit_atom(ctx, &ctx->cs_shader_state.atom);
375
376 for (i = 0; i < get_compute_resource_num(); i++) {
377 if (resources[i].enabled) {
378 int j;
379 COMPUTE_DBG("resnum: %i, cdw: %i\n", i, cs->cdw);
380
381 for (j = 0; j < resources[i].cs_end; j++) {
382 if (resources[i].do_reloc[j]) {
383 assert(resources[i].bo);
384 evergreen_emit_ctx_reloc(ctx,
385 resources[i].bo,
386 resources[i].usage);
387 }
388
389 cs->buf[cs->cdw++] = resources[i].cs[j];
390 }
391
392 if (resources[i].bo) {
393 onebo = resources[i].bo;
394 evergreen_emit_ctx_reloc(ctx,
395 resources[i].bo,
396 resources[i].usage);
397
398 ///special case for textures
399 if (resources[i].do_reloc
400 [resources[i].cs_end] == 2) {
401 evergreen_emit_ctx_reloc(ctx,
402 resources[i].bo,
403 resources[i].usage);
404 }
405 }
406 }
407 }
408
409 /* Emit dispatch state and dispatch packet */
410 evergreen_emit_direct_dispatch(ctx, block_layout, grid_layout);
411
412 /* XXX evergreen_flush_emit() hardcodes the CP_COHER_SIZE to 0xffffffff
413 */
414 ctx->flags |= R600_CONTEXT_CB_FLUSH;
415 r600_flush_emit(ctx);
416
417 #if 0
418 COMPUTE_DBG("cdw: %i\n", cs->cdw);
419 for (i = 0; i < cs->cdw; i++) {
420 COMPUTE_DBG("%4i : 0x%08X\n", i, ctx->cs->buf[i]);
421 }
422 #endif
423
424 flush_flags = RADEON_FLUSH_ASYNC | RADEON_FLUSH_COMPUTE;
425 if (ctx->keep_tiling_flags) {
426 flush_flags |= RADEON_FLUSH_KEEP_TILING_FLAGS;
427 }
428
429 ctx->ws->cs_flush(ctx->cs, flush_flags);
430
431 ctx->pm4_dirty_cdwords = 0;
432 ctx->flags = 0;
433
434 COMPUTE_DBG("shader started\n");
435
436 ctx->ws->buffer_wait(onebo->buf, 0);
437
438 COMPUTE_DBG("...\n");
439
440 ctx->streamout_start = TRUE;
441 ctx->streamout_append_bitmask = ~0;
442
443 }
444
445
446 /**
447 * Emit function for r600_cs_shader_state atom
448 */
449 void evergreen_emit_cs_shader(
450 struct r600_context *rctx,
451 struct r600_atom *atom)
452 {
453 struct r600_cs_shader_state *state =
454 (struct r600_cs_shader_state*)atom;
455 struct r600_pipe_compute *shader = state->shader;
456 struct r600_kernel *kernel = &shader->kernels[state->kernel_index];
457 struct radeon_winsys_cs *cs = rctx->cs;
458 uint64_t va;
459
460 va = r600_resource_va(&rctx->screen->screen, &kernel->code_bo->b.b);
461
462 r600_write_compute_context_reg_seq(cs, R_0288D0_SQ_PGM_START_LS, 3);
463 r600_write_value(cs, va >> 8); /* R_0288D0_SQ_PGM_START_LS */
464 r600_write_value(cs, /* R_0288D4_SQ_PGM_RESOURCES_LS */
465 S_0288D4_NUM_GPRS(kernel->bc.ngpr)
466 | S_0288D4_STACK_SIZE(kernel->bc.nstack));
467 r600_write_value(cs, 0); /* R_0288D8_SQ_PGM_RESOURCES_LS_2 */
468
469 r600_write_value(cs, PKT3C(PKT3_NOP, 0, 0));
470 r600_write_value(cs, r600_context_bo_reloc(rctx, kernel->code_bo,
471 RADEON_USAGE_READ));
472
473 rctx->flags |= R600_CONTEXT_SHADERCONST_FLUSH;
474 }
475
476 static void evergreen_launch_grid(
477 struct pipe_context *ctx_,
478 const uint *block_layout, const uint *grid_layout,
479 uint32_t pc, const void *input)
480 {
481 struct r600_context *ctx = (struct r600_context *)ctx_;
482
483 #ifdef HAVE_OPENCL
484 COMPUTE_DBG("*** evergreen_launch_grid: pc = %u\n", pc);
485
486 struct r600_pipe_compute *shader = ctx->cs_shader_state.shader;
487 if (!shader->kernels[pc].code_bo) {
488 void *p;
489 struct r600_kernel *kernel = &shader->kernels[pc];
490 r600_compute_shader_create(ctx_, kernel->llvm_module, &kernel->bc);
491 kernel->code_bo = r600_compute_buffer_alloc_vram(ctx->screen,
492 kernel->bc.ndw * 4);
493 p = ctx->ws->buffer_map(kernel->code_bo->cs_buf, ctx->cs,
494 PIPE_TRANSFER_WRITE);
495 memcpy(p, kernel->bc.bytecode, kernel->bc.ndw * 4);
496 ctx->ws->buffer_unmap(kernel->code_bo->cs_buf);
497 }
498 #endif
499
500 ctx->cs_shader_state.kernel_index = pc;
501 evergreen_compute_upload_input(ctx_, block_layout, grid_layout, input);
502 compute_emit_cs(ctx, block_layout, grid_layout);
503 }
504
505 static void evergreen_set_compute_resources(struct pipe_context * ctx_,
506 unsigned start, unsigned count,
507 struct pipe_surface ** surfaces)
508 {
509 struct r600_context *ctx = (struct r600_context *)ctx_;
510 struct r600_surface **resources = (struct r600_surface **)surfaces;
511
512 COMPUTE_DBG("*** evergreen_set_compute_resources: start = %u count = %u\n",
513 start, count);
514
515 for (int i = 0; i < count; i++) {
516 /* The First two vertex buffers are reserved for parameters and
517 * global buffers. */
518 unsigned vtx_id = 2 + i;
519 if (resources[i]) {
520 struct r600_resource_global *buffer =
521 (struct r600_resource_global*)
522 resources[i]->base.texture;
523 if (resources[i]->base.writable) {
524 assert(i+1 < 12);
525
526 evergreen_set_rat(ctx->cs_shader_state.shader, i+1,
527 (struct r600_resource *)resources[i]->base.texture,
528 buffer->chunk->start_in_dw*4,
529 resources[i]->base.texture->width0);
530 }
531
532 evergreen_cs_set_vertex_buffer(ctx, vtx_id,
533 buffer->chunk->start_in_dw * 4,
534 resources[i]->base.texture);
535 }
536 }
537 }
538
539 static void evergreen_set_cs_sampler_view(struct pipe_context *ctx_,
540 unsigned start_slot, unsigned count,
541 struct pipe_sampler_view **views)
542 {
543 struct r600_context *ctx = (struct r600_context *)ctx_;
544 struct r600_pipe_sampler_view **resource =
545 (struct r600_pipe_sampler_view **)views;
546
547 for (int i = 0; i < count; i++) {
548 if (resource[i]) {
549 assert(i+1 < 12);
550 ///FETCH0 = VTX0 (param buffer),
551 //FETCH1 = VTX1 (global buffer pool), FETCH2... = TEX
552 evergreen_set_tex_resource(ctx->cs_shader_state.shader, resource[i], i+2);
553 }
554 }
555 }
556
557 static void evergreen_bind_compute_sampler_states(
558 struct pipe_context *ctx_,
559 unsigned start_slot,
560 unsigned num_samplers,
561 void **samplers_)
562 {
563 struct r600_context *ctx = (struct r600_context *)ctx_;
564 struct compute_sampler_state ** samplers =
565 (struct compute_sampler_state **)samplers_;
566
567 for (int i = 0; i < num_samplers; i++) {
568 if (samplers[i]) {
569 evergreen_set_sampler_resource(
570 ctx->cs_shader_state.shader, samplers[i], i);
571 }
572 }
573 }
574
575 static void evergreen_set_global_binding(
576 struct pipe_context *ctx_, unsigned first, unsigned n,
577 struct pipe_resource **resources,
578 uint32_t **handles)
579 {
580 struct r600_context *ctx = (struct r600_context *)ctx_;
581 struct compute_memory_pool *pool = ctx->screen->global_pool;
582 struct r600_resource_global **buffers =
583 (struct r600_resource_global **)resources;
584
585 COMPUTE_DBG("*** evergreen_set_global_binding first = %u n = %u\n",
586 first, n);
587
588 if (!resources) {
589 /* XXX: Unset */
590 return;
591 }
592
593 compute_memory_finalize_pending(pool, ctx_);
594
595 for (int i = 0; i < n; i++)
596 {
597 assert(resources[i]->target == PIPE_BUFFER);
598 assert(resources[i]->bind & PIPE_BIND_GLOBAL);
599
600 *(handles[i]) = buffers[i]->chunk->start_in_dw * 4;
601 }
602
603 evergreen_set_rat(ctx->cs_shader_state.shader, 0, pool->bo, 0, pool->size_in_dw * 4);
604 evergreen_cs_set_vertex_buffer(ctx, 1, 0,
605 (struct pipe_resource*)pool->bo);
606 }
607
608 /**
609 * This function initializes all the compute specific registers that need to
610 * be initialized for each compute command stream. Registers that are common
611 * to both compute and 3D will be initialized at the beginning of each compute
612 * command stream by the start_cs_cmd atom. However, since the SET_CONTEXT_REG
613 * packet requires that the shader type bit be set, we must initialize all
614 * context registers needed for compute in this function. The registers
615 * intialized by the start_cs_cmd atom can be found in evereen_state.c in the
616 * functions evergreen_init_atom_start_cs or cayman_init_atom_start_cs depending
617 * on the GPU family.
618 */
619 void evergreen_init_atom_start_compute_cs(struct r600_context *ctx)
620 {
621 struct r600_command_buffer *cb = &ctx->start_compute_cs_cmd;
622 int num_threads;
623 int num_stack_entries;
624
625 /* since all required registers are initialised in the
626 * start_compute_cs_cmd atom, we can EMIT_EARLY here.
627 */
628 r600_init_command_buffer(cb, 256);
629 cb->pkt_flags = RADEON_CP_PACKET3_COMPUTE_MODE;
630
631 switch (ctx->family) {
632 case CHIP_CEDAR:
633 default:
634 num_threads = 128;
635 num_stack_entries = 256;
636 break;
637 case CHIP_REDWOOD:
638 num_threads = 128;
639 num_stack_entries = 256;
640 break;
641 case CHIP_JUNIPER:
642 num_threads = 128;
643 num_stack_entries = 512;
644 break;
645 case CHIP_CYPRESS:
646 case CHIP_HEMLOCK:
647 num_threads = 128;
648 num_stack_entries = 512;
649 break;
650 case CHIP_PALM:
651 num_threads = 128;
652 num_stack_entries = 256;
653 break;
654 case CHIP_SUMO:
655 num_threads = 128;
656 num_stack_entries = 256;
657 break;
658 case CHIP_SUMO2:
659 num_threads = 128;
660 num_stack_entries = 512;
661 break;
662 case CHIP_BARTS:
663 num_threads = 128;
664 num_stack_entries = 512;
665 break;
666 case CHIP_TURKS:
667 num_threads = 128;
668 num_stack_entries = 256;
669 break;
670 case CHIP_CAICOS:
671 num_threads = 128;
672 num_stack_entries = 256;
673 break;
674 }
675
676 /* Config Registers */
677 evergreen_init_common_regs(cb, ctx->chip_class
678 , ctx->family, ctx->screen->info.drm_minor);
679
680 /* The primitive type always needs to be POINTLIST for compute. */
681 r600_store_config_reg(cb, R_008958_VGT_PRIMITIVE_TYPE,
682 V_008958_DI_PT_POINTLIST);
683
684 if (ctx->chip_class < CAYMAN) {
685
686 /* These registers control which simds can be used by each stage.
687 * The default for these registers is 0xffffffff, which means
688 * all simds are available for each stage. It's possible we may
689 * want to play around with these in the future, but for now
690 * the default value is fine.
691 *
692 * R_008E20_SQ_STATIC_THREAD_MGMT1
693 * R_008E24_SQ_STATIC_THREAD_MGMT2
694 * R_008E28_SQ_STATIC_THREAD_MGMT3
695 */
696
697 /* XXX: We may need to adjust the thread and stack resouce
698 * values for 3D/compute interop */
699
700 r600_store_config_reg_seq(cb, R_008C18_SQ_THREAD_RESOURCE_MGMT_1, 5);
701
702 /* R_008C18_SQ_THREAD_RESOURCE_MGMT_1
703 * Set the number of threads used by the PS/VS/GS/ES stage to
704 * 0.
705 */
706 r600_store_value(cb, 0);
707
708 /* R_008C1C_SQ_THREAD_RESOURCE_MGMT_2
709 * Set the number of threads used by the CS (aka LS) stage to
710 * the maximum number of threads and set the number of threads
711 * for the HS stage to 0. */
712 r600_store_value(cb, S_008C1C_NUM_LS_THREADS(num_threads));
713
714 /* R_008C20_SQ_STACK_RESOURCE_MGMT_1
715 * Set the Control Flow stack entries to 0 for PS/VS stages */
716 r600_store_value(cb, 0);
717
718 /* R_008C24_SQ_STACK_RESOURCE_MGMT_2
719 * Set the Control Flow stack entries to 0 for GS/ES stages */
720 r600_store_value(cb, 0);
721
722 /* R_008C28_SQ_STACK_RESOURCE_MGMT_3
723 * Set the Contol Flow stack entries to 0 for the HS stage, and
724 * set it to the maximum value for the CS (aka LS) stage. */
725 r600_store_value(cb,
726 S_008C28_NUM_LS_STACK_ENTRIES(num_stack_entries));
727 }
728
729 /* Context Registers */
730
731 if (ctx->chip_class < CAYMAN) {
732 /* workaround for hw issues with dyn gpr - must set all limits
733 * to 240 instead of 0, 0x1e == 240 / 8
734 */
735 r600_store_context_reg(cb, R_028838_SQ_DYN_GPR_RESOURCE_LIMIT_1,
736 S_028838_PS_GPRS(0x1e) |
737 S_028838_VS_GPRS(0x1e) |
738 S_028838_GS_GPRS(0x1e) |
739 S_028838_ES_GPRS(0x1e) |
740 S_028838_HS_GPRS(0x1e) |
741 S_028838_LS_GPRS(0x1e));
742 }
743
744 /* XXX: Investigate setting bit 15, which is FAST_COMPUTE_MODE */
745 r600_store_context_reg(cb, R_028A40_VGT_GS_MODE,
746 S_028A40_COMPUTE_MODE(1) | S_028A40_PARTIAL_THD_AT_EOI(1));
747
748 r600_store_context_reg(cb, R_028B54_VGT_SHADER_STAGES_EN, 2/*CS_ON*/);
749
750 r600_store_context_reg(cb, R_0286E8_SPI_COMPUTE_INPUT_CNTL,
751 S_0286E8_TID_IN_GROUP_ENA
752 | S_0286E8_TGID_ENA
753 | S_0286E8_DISABLE_INDEX_PACK)
754 ;
755
756 /* The LOOP_CONST registers are an optimizations for loops that allows
757 * you to store the initial counter, increment value, and maximum
758 * counter value in a register so that hardware can calculate the
759 * correct number of iterations for the loop, so that you don't need
760 * to have the loop counter in your shader code. We don't currently use
761 * this optimization, so we must keep track of the counter in the
762 * shader and use a break instruction to exit loops. However, the
763 * hardware will still uses this register to determine when to exit a
764 * loop, so we need to initialize the counter to 0, set the increment
765 * value to 1 and the maximum counter value to the 4095 (0xfff) which
766 * is the maximum value allowed. This gives us a maximum of 4096
767 * iterations for our loops, but hopefully our break instruction will
768 * execute before some time before the 4096th iteration.
769 */
770 eg_store_loop_const(cb, R_03A200_SQ_LOOP_CONST_0 + (160 * 4), 0x1000FFF);
771 }
772
773 void evergreen_init_compute_state_functions(struct r600_context *ctx)
774 {
775 ctx->context.create_compute_state = evergreen_create_compute_state;
776 ctx->context.delete_compute_state = evergreen_delete_compute_state;
777 ctx->context.bind_compute_state = evergreen_bind_compute_state;
778 // ctx->context.create_sampler_view = evergreen_compute_create_sampler_view;
779 ctx->context.set_compute_resources = evergreen_set_compute_resources;
780 ctx->context.set_compute_sampler_views = evergreen_set_cs_sampler_view;
781 ctx->context.bind_compute_sampler_states = evergreen_bind_compute_sampler_states;
782 ctx->context.set_global_binding = evergreen_set_global_binding;
783 ctx->context.launch_grid = evergreen_launch_grid;
784
785 /* We always use at least two vertex buffers for compute, one for
786 * parameters and one for global memory */
787 ctx->cs_vertex_buffer_state.enabled_mask =
788 ctx->cs_vertex_buffer_state.dirty_mask = 1 | 2;
789 }
790
791
792 struct pipe_resource *r600_compute_global_buffer_create(
793 struct pipe_screen *screen,
794 const struct pipe_resource *templ)
795 {
796 assert(templ->target == PIPE_BUFFER);
797 assert(templ->bind & PIPE_BIND_GLOBAL);
798 assert(templ->array_size == 1 || templ->array_size == 0);
799 assert(templ->depth0 == 1 || templ->depth0 == 0);
800 assert(templ->height0 == 1 || templ->height0 == 0);
801
802 struct r600_resource_global* result = (struct r600_resource_global*)
803 CALLOC(sizeof(struct r600_resource_global), 1);
804 struct r600_screen* rscreen = (struct r600_screen*)screen;
805
806 COMPUTE_DBG("*** r600_compute_global_buffer_create\n");
807 COMPUTE_DBG("width = %u array_size = %u\n", templ->width0,
808 templ->array_size);
809
810 result->base.b.vtbl = &r600_global_buffer_vtbl;
811 result->base.b.b.screen = screen;
812 result->base.b.b = *templ;
813 pipe_reference_init(&result->base.b.b.reference, 1);
814
815 int size_in_dw = (templ->width0+3) / 4;
816
817 result->chunk = compute_memory_alloc(rscreen->global_pool, size_in_dw);
818
819 if (result->chunk == NULL)
820 {
821 free(result);
822 return NULL;
823 }
824
825 return &result->base.b.b;
826 }
827
828 void r600_compute_global_buffer_destroy(
829 struct pipe_screen *screen,
830 struct pipe_resource *res)
831 {
832 assert(res->target == PIPE_BUFFER);
833 assert(res->bind & PIPE_BIND_GLOBAL);
834
835 struct r600_resource_global* buffer = (struct r600_resource_global*)res;
836 struct r600_screen* rscreen = (struct r600_screen*)screen;
837
838 compute_memory_free(rscreen->global_pool, buffer->chunk->id);
839
840 buffer->chunk = NULL;
841 free(res);
842 }
843
844 void* r600_compute_global_transfer_map(
845 struct pipe_context *ctx_,
846 struct pipe_transfer* transfer)
847 {
848 assert(transfer->resource->target == PIPE_BUFFER);
849 assert(transfer->resource->bind & PIPE_BIND_GLOBAL);
850 assert(transfer->box.x >= 0);
851 assert(transfer->box.y == 0);
852 assert(transfer->box.z == 0);
853
854 struct r600_context *ctx = (struct r600_context *)ctx_;
855 struct r600_resource_global* buffer =
856 (struct r600_resource_global*)transfer->resource;
857
858 uint32_t* map;
859 ///TODO: do it better, mapping is not possible if the pool is too big
860
861 COMPUTE_DBG("* r600_compute_global_transfer_map()\n");
862
863 if (!(map = ctx->ws->buffer_map(buffer->chunk->pool->bo->cs_buf,
864 ctx->cs, transfer->usage))) {
865 return NULL;
866 }
867
868 COMPUTE_DBG("Buffer: %p + %u (buffer offset in global memory) "
869 "+ %u (box.x)\n", map, buffer->chunk->start_in_dw, transfer->box.x);
870 return ((char*)(map + buffer->chunk->start_in_dw)) + transfer->box.x;
871 }
872
873 void r600_compute_global_transfer_unmap(
874 struct pipe_context *ctx_,
875 struct pipe_transfer* transfer)
876 {
877 assert(transfer->resource->target == PIPE_BUFFER);
878 assert(transfer->resource->bind & PIPE_BIND_GLOBAL);
879
880 struct r600_context *ctx = (struct r600_context *)ctx_;
881 struct r600_resource_global* buffer =
882 (struct r600_resource_global*)transfer->resource;
883
884 COMPUTE_DBG("* r600_compute_global_transfer_unmap()\n");
885
886 ctx->ws->buffer_unmap(buffer->chunk->pool->bo->cs_buf);
887 }
888
889 struct pipe_transfer * r600_compute_global_get_transfer(
890 struct pipe_context *ctx_,
891 struct pipe_resource *resource,
892 unsigned level,
893 unsigned usage,
894 const struct pipe_box *box)
895 {
896 struct r600_context *ctx = (struct r600_context *)ctx_;
897 struct compute_memory_pool *pool = ctx->screen->global_pool;
898
899 compute_memory_finalize_pending(pool, ctx_);
900
901 assert(resource->target == PIPE_BUFFER);
902 struct r600_context *rctx = (struct r600_context*)ctx_;
903 struct pipe_transfer *transfer = util_slab_alloc(&rctx->pool_transfers);
904
905 COMPUTE_DBG("* r600_compute_global_get_transfer()\n"
906 "level = %u, usage = %u, box(x = %u, y = %u, z = %u "
907 "width = %u, height = %u, depth = %u)\n", level, usage,
908 box->x, box->y, box->z, box->width, box->height,
909 box->depth);
910
911 transfer->resource = resource;
912 transfer->level = level;
913 transfer->usage = usage;
914 transfer->box = *box;
915 transfer->stride = 0;
916 transfer->layer_stride = 0;
917 transfer->data = NULL;
918
919 /* Note strides are zero, this is ok for buffers, but not for
920 * textures 2d & higher at least.
921 */
922 return transfer;
923 }
924
925 void r600_compute_global_transfer_destroy(
926 struct pipe_context *ctx_,
927 struct pipe_transfer *transfer)
928 {
929 struct r600_context *rctx = (struct r600_context*)ctx_;
930 util_slab_free(&rctx->pool_transfers, transfer);
931 }
932
933 void r600_compute_global_transfer_flush_region(
934 struct pipe_context *ctx_,
935 struct pipe_transfer *transfer,
936 const struct pipe_box *box)
937 {
938 assert(0 && "TODO");
939 }
940
941 void r600_compute_global_transfer_inline_write(
942 struct pipe_context *pipe,
943 struct pipe_resource *resource,
944 unsigned level,
945 unsigned usage,
946 const struct pipe_box *box,
947 const void *data,
948 unsigned stride,
949 unsigned layer_stride)
950 {
951 assert(0 && "TODO");
952 }