r600g: atomize stencil ref state
[mesa.git] / src / gallium / drivers / r600 / evergreen_compute.c
1 /*
2 * Copyright 2011 Adam Rak <adam.rak@streamnovation.com>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Adam Rak <adam.rak@streamnovation.com>
25 */
26
27 #include <stdio.h>
28 #include <errno.h>
29 #include "pipe/p_defines.h"
30 #include "pipe/p_state.h"
31 #include "pipe/p_context.h"
32 #include "util/u_blitter.h"
33 #include "util/u_double_list.h"
34 #include "util/u_transfer.h"
35 #include "util/u_surface.h"
36 #include "util/u_pack_color.h"
37 #include "util/u_memory.h"
38 #include "util/u_inlines.h"
39 #include "util/u_framebuffer.h"
40 #include "pipebuffer/pb_buffer.h"
41 #include "r600.h"
42 #include "evergreend.h"
43 #include "r600_resource.h"
44 #include "r600_shader.h"
45 #include "r600_pipe.h"
46 #include "r600_formats.h"
47 #include "evergreen_compute.h"
48 #include "r600_hw_context_priv.h"
49 #include "evergreen_compute_internal.h"
50 #include "compute_memory_pool.h"
51 #ifdef HAVE_OPENCL
52 #include "llvm_wrapper.h"
53 #endif
54
55 /**
56 RAT0 is for global binding write
57 VTX1 is for global binding read
58
59 for wrting images RAT1...
60 for reading images TEX2...
61 TEX2-RAT1 is paired
62
63 TEX2... consumes the same fetch resources, that VTX2... would consume
64
65 CONST0 and VTX0 is for parameters
66 CONST0 is binding smaller input parameter buffer, and for constant indexing,
67 also constant cached
68 VTX0 is for indirect/non-constant indexing, or if the input is bigger than
69 the constant cache can handle
70
71 RAT-s are limited to 12, so we can only bind at most 11 texture for writing
72 because we reserve RAT0 for global bindings. With byteaddressing enabled,
73 we should reserve another one too.=> 10 image binding for writing max.
74
75 from Nvidia OpenCL:
76 CL_DEVICE_MAX_READ_IMAGE_ARGS: 128
77 CL_DEVICE_MAX_WRITE_IMAGE_ARGS: 8
78
79 so 10 for writing is enough. 176 is the max for reading according to the docs
80
81 writable images should be listed first < 10, so their id corresponds to RAT(id+1)
82 writable images will consume TEX slots, VTX slots too because of linear indexing
83
84 */
85
86 static void evergreen_cs_set_vertex_buffer(
87 struct r600_context * rctx,
88 unsigned vb_index,
89 unsigned offset,
90 struct pipe_resource * buffer)
91 {
92 struct r600_vertexbuf_state *state = &rctx->cs_vertex_buffer_state;
93 struct pipe_vertex_buffer *vb = &state->vb[vb_index];
94 vb->stride = 1;
95 vb->buffer_offset = offset;
96 vb->buffer = buffer;
97 vb->user_buffer = NULL;
98
99 rctx->flags |= rctx->has_vertex_cache ? R600_CONTEXT_VTX_FLUSH : R600_CONTEXT_TEX_FLUSH;
100 state->enabled_mask |= 1 << vb_index;
101 state->dirty_mask |= 1 << vb_index;
102 r600_atom_dirty(rctx, &state->atom);
103 }
104
105 const struct u_resource_vtbl r600_global_buffer_vtbl =
106 {
107 u_default_resource_get_handle, /* get_handle */
108 r600_compute_global_buffer_destroy, /* resource_destroy */
109 r600_compute_global_get_transfer, /* get_transfer */
110 r600_compute_global_transfer_destroy, /* transfer_destroy */
111 r600_compute_global_transfer_map, /* transfer_map */
112 r600_compute_global_transfer_flush_region,/* transfer_flush_region */
113 r600_compute_global_transfer_unmap, /* transfer_unmap */
114 r600_compute_global_transfer_inline_write /* transfer_inline_write */
115 };
116
117
118 void *evergreen_create_compute_state(
119 struct pipe_context *ctx_,
120 const const struct pipe_compute_state *cso)
121 {
122 struct r600_context *ctx = (struct r600_context *)ctx_;
123 struct r600_pipe_compute *shader = CALLOC_STRUCT(r600_pipe_compute);
124 void *p;
125
126 #ifdef HAVE_OPENCL
127 const struct pipe_llvm_program_header * header;
128 const unsigned char * code;
129
130 COMPUTE_DBG("*** evergreen_create_compute_state\n");
131
132 header = cso->prog;
133 code = cso->prog + sizeof(struct pipe_llvm_program_header);
134 #endif
135
136 shader->ctx = (struct r600_context*)ctx;
137 shader->resources = (struct evergreen_compute_resource*)
138 CALLOC(sizeof(struct evergreen_compute_resource),
139 get_compute_resource_num());
140 shader->local_size = cso->req_local_mem; ///TODO: assert it
141 shader->private_size = cso->req_private_mem;
142 shader->input_size = cso->req_input_mem;
143
144 #ifdef HAVE_OPENCL
145 shader->mod = llvm_parse_bitcode(code, header->num_bytes);
146
147 r600_compute_shader_create(ctx_, shader->mod, &shader->bc);
148 #endif
149 shader->shader_code_bo = r600_compute_buffer_alloc_vram(ctx->screen,
150 shader->bc.ndw * 4);
151
152 p = ctx->ws->buffer_map(shader->shader_code_bo->cs_buf, ctx->cs,
153 PIPE_TRANSFER_WRITE);
154
155 memcpy(p, shader->bc.bytecode, shader->bc.ndw * 4);
156 ctx->ws->buffer_unmap(shader->shader_code_bo->cs_buf);
157 return shader;
158 }
159
160 void evergreen_delete_compute_state(struct pipe_context *ctx, void* state)
161 {
162 struct r600_pipe_compute *shader = (struct r600_pipe_compute *)state;
163
164 free(shader->resources);
165 free(shader);
166 }
167
168 static void evergreen_bind_compute_state(struct pipe_context *ctx_, void *state)
169 {
170 struct r600_context *ctx = (struct r600_context *)ctx_;
171
172 COMPUTE_DBG("*** evergreen_bind_compute_state\n");
173
174 ctx->cs_shader_state.shader = (struct r600_pipe_compute *)state;
175 }
176
177 /* The kernel parameters are stored a vtx buffer (ID=0), besides the explicit
178 * kernel parameters there are inplicit parameters that need to be stored
179 * in the vertex buffer as well. Here is how these parameters are organized in
180 * the buffer:
181 *
182 * DWORDS 0-2: Number of work groups in each dimension (x,y,z)
183 * DWORDS 3-5: Number of global work items in each dimension (x,y,z)
184 * DWORDS 6-8: Number of work items within each work group in each dimension
185 * (x,y,z)
186 * DWORDS 9+ : Kernel parameters
187 */
188 void evergreen_compute_upload_input(
189 struct pipe_context *ctx_,
190 const uint *block_layout,
191 const uint *grid_layout,
192 const void *input)
193 {
194 struct r600_context *ctx = (struct r600_context *)ctx_;
195 struct r600_pipe_compute *shader = ctx->cs_shader_state.shader;
196 int i;
197 unsigned kernel_parameters_offset_bytes = 36;
198 uint32_t * num_work_groups_start;
199 uint32_t * global_size_start;
200 uint32_t * local_size_start;
201 uint32_t * kernel_parameters_start;
202
203 if (shader->input_size == 0) {
204 return;
205 }
206
207 if (!shader->kernel_param) {
208 unsigned buffer_size = shader->input_size;
209
210 /* Add space for the grid dimensions */
211 buffer_size += kernel_parameters_offset_bytes * sizeof(uint);
212 shader->kernel_param = r600_compute_buffer_alloc_vram(
213 ctx->screen, buffer_size);
214 }
215
216 num_work_groups_start = ctx->ws->buffer_map(
217 shader->kernel_param->cs_buf, ctx->cs, PIPE_TRANSFER_WRITE);
218 global_size_start = num_work_groups_start + (3 * (sizeof(uint) /4));
219 local_size_start = global_size_start + (3 * (sizeof(uint)) / 4);
220 kernel_parameters_start = local_size_start + (3 * (sizeof(uint)) / 4);
221
222 /* Copy the work group size */
223 memcpy(num_work_groups_start, grid_layout, 3 * sizeof(uint));
224
225 /* Copy the global size */
226 for (i = 0; i < 3; i++) {
227 global_size_start[i] = grid_layout[i] * block_layout[i];
228 }
229
230 /* Copy the local dimensions */
231 memcpy(local_size_start, block_layout, 3 * sizeof(uint));
232
233 /* Copy the kernel inputs */
234 memcpy(kernel_parameters_start, input, shader->input_size);
235
236 for (i = 0; i < (kernel_parameters_offset_bytes / 4) +
237 (shader->input_size / 4); i++) {
238 COMPUTE_DBG("input %i : %i\n", i,
239 ((unsigned*)num_work_groups_start)[i]);
240 }
241
242 ctx->ws->buffer_unmap(shader->kernel_param->cs_buf);
243
244 ///ID=0 is reserved for the parameters
245 evergreen_cs_set_vertex_buffer(ctx, 0, 0,
246 (struct pipe_resource*)shader->kernel_param);
247 ///ID=0 is reserved for parameters
248 evergreen_set_const_cache(shader, 0, shader->kernel_param,
249 shader->input_size, 0);
250 }
251
252 static void evergreen_emit_direct_dispatch(
253 struct r600_context *rctx,
254 const uint *block_layout, const uint *grid_layout)
255 {
256 int i;
257 struct radeon_winsys_cs *cs = rctx->cs;
258 unsigned num_waves;
259 unsigned num_pipes = rctx->screen->info.r600_max_pipes;
260 unsigned wave_divisor = (16 * num_pipes);
261 int group_size = 1;
262 int grid_size = 1;
263 /* XXX: Enable lds and get size from cs_shader_state */
264 unsigned lds_size = 0;
265
266 /* Calculate group_size/grid_size */
267 for (i = 0; i < 3; i++) {
268 group_size *= block_layout[i];
269 }
270
271 for (i = 0; i < 3; i++) {
272 grid_size *= grid_layout[i];
273 }
274
275 /* num_waves = ceil((tg_size.x * tg_size.y, tg_size.z) / (16 * num_pipes)) */
276 num_waves = (block_layout[0] * block_layout[1] * block_layout[2] +
277 wave_divisor - 1) / wave_divisor;
278
279 COMPUTE_DBG("Using %u pipes, there are %u wavefronts per thread block\n",
280 num_pipes, num_waves);
281
282 /* XXX: Partition the LDS between PS/CS. By default half (4096 dwords
283 * on Evergreen) oes to Pixel Shaders and half goes to Compute Shaders.
284 * We may need to allocat the entire LDS space for Compute Shaders.
285 *
286 * EG: R_008E2C_SQ_LDS_RESOURCE_MGMT := S_008E2C_NUM_LS_LDS(lds_dwords)
287 * CM: CM_R_0286FC_SPI_LDS_MGMT := S_0286FC_NUM_LS_LDS(lds_dwords)
288 */
289
290 r600_write_config_reg(cs, R_008970_VGT_NUM_INDICES, group_size);
291
292 r600_write_config_reg_seq(cs, R_00899C_VGT_COMPUTE_START_X, 3);
293 r600_write_value(cs, 0); /* R_00899C_VGT_COMPUTE_START_X */
294 r600_write_value(cs, 0); /* R_0089A0_VGT_COMPUTE_START_Y */
295 r600_write_value(cs, 0); /* R_0089A4_VGT_COMPUTE_START_Z */
296
297 r600_write_config_reg(cs, R_0089AC_VGT_COMPUTE_THREAD_GROUP_SIZE,
298 group_size);
299
300 r600_write_compute_context_reg_seq(cs, R_0286EC_SPI_COMPUTE_NUM_THREAD_X, 3);
301 r600_write_value(cs, block_layout[0]); /* R_0286EC_SPI_COMPUTE_NUM_THREAD_X */
302 r600_write_value(cs, block_layout[1]); /* R_0286F0_SPI_COMPUTE_NUM_THREAD_Y */
303 r600_write_value(cs, block_layout[2]); /* R_0286F4_SPI_COMPUTE_NUM_THREAD_Z */
304
305 r600_write_compute_context_reg(cs, CM_R_0288E8_SQ_LDS_ALLOC,
306 lds_size | (num_waves << 14));
307
308 /* Dispatch packet */
309 r600_write_value(cs, PKT3C(PKT3_DISPATCH_DIRECT, 3, 0));
310 r600_write_value(cs, grid_layout[0]);
311 r600_write_value(cs, grid_layout[1]);
312 r600_write_value(cs, grid_layout[2]);
313 /* VGT_DISPATCH_INITIATOR = COMPUTE_SHADER_EN */
314 r600_write_value(cs, 1);
315 }
316
317 static void compute_emit_cs(struct r600_context *ctx, const uint *block_layout,
318 const uint *grid_layout)
319 {
320 struct radeon_winsys_cs *cs = ctx->cs;
321 int i;
322
323 struct r600_resource *onebo = NULL;
324 struct r600_pipe_state *cb_state;
325 struct evergreen_compute_resource *resources =
326 ctx->cs_shader_state.shader->resources;
327
328 /* Initialize all the compute-related registers.
329 *
330 * See evergreen_init_atom_start_compute_cs() in this file for the list
331 * of registers initialized by the start_compute_cs_cmd atom.
332 */
333 r600_emit_atom(ctx, &ctx->start_compute_cs_cmd.atom);
334
335 ctx->flags |= R600_CONTEXT_CB_FLUSH;
336 r600_flush_emit(ctx);
337
338 /* Emit cb_state */
339 cb_state = ctx->states[R600_PIPE_STATE_FRAMEBUFFER];
340 r600_context_pipe_state_emit(ctx, cb_state, RADEON_CP_PACKET3_COMPUTE_MODE);
341
342 /* Set CB_TARGET_MASK XXX: Use cb_misc_state */
343 r600_write_compute_context_reg(cs, R_028238_CB_TARGET_MASK,
344 ctx->compute_cb_target_mask);
345
346
347 /* Emit vertex buffer state */
348 ctx->cs_vertex_buffer_state.atom.num_dw = 12 * util_bitcount(ctx->cs_vertex_buffer_state.dirty_mask);
349 r600_emit_atom(ctx, &ctx->cs_vertex_buffer_state.atom);
350
351 /* Emit compute shader state */
352 r600_emit_atom(ctx, &ctx->cs_shader_state.atom);
353
354 for (i = 0; i < get_compute_resource_num(); i++) {
355 if (resources[i].enabled) {
356 int j;
357 COMPUTE_DBG("resnum: %i, cdw: %i\n", i, cs->cdw);
358
359 for (j = 0; j < resources[i].cs_end; j++) {
360 if (resources[i].do_reloc[j]) {
361 assert(resources[i].bo);
362 evergreen_emit_ctx_reloc(ctx,
363 resources[i].bo,
364 resources[i].usage);
365 }
366
367 cs->buf[cs->cdw++] = resources[i].cs[j];
368 }
369
370 if (resources[i].bo) {
371 onebo = resources[i].bo;
372 evergreen_emit_ctx_reloc(ctx,
373 resources[i].bo,
374 resources[i].usage);
375
376 ///special case for textures
377 if (resources[i].do_reloc
378 [resources[i].cs_end] == 2) {
379 evergreen_emit_ctx_reloc(ctx,
380 resources[i].bo,
381 resources[i].usage);
382 }
383 }
384 }
385 }
386
387 /* Emit dispatch state and dispatch packet */
388 evergreen_emit_direct_dispatch(ctx, block_layout, grid_layout);
389
390 /* XXX evergreen_flush_emit() hardcodes the CP_COHER_SIZE to 0xffffffff
391 */
392 ctx->flags |= R600_CONTEXT_CB_FLUSH;
393 r600_flush_emit(ctx);
394
395 #if 0
396 COMPUTE_DBG("cdw: %i\n", cs->cdw);
397 for (i = 0; i < cs->cdw; i++) {
398 COMPUTE_DBG("%4i : 0x%08X\n", i, ctx->cs->buf[i]);
399 }
400 #endif
401
402 ctx->ws->cs_flush(ctx->cs, RADEON_FLUSH_ASYNC | RADEON_FLUSH_COMPUTE);
403
404 ctx->pm4_dirty_cdwords = 0;
405 ctx->flags = 0;
406
407 COMPUTE_DBG("shader started\n");
408
409 ctx->ws->buffer_wait(onebo->buf, 0);
410
411 COMPUTE_DBG("...\n");
412
413 ctx->streamout_start = TRUE;
414 ctx->streamout_append_bitmask = ~0;
415
416 }
417
418
419 /**
420 * Emit function for r600_cs_shader_state atom
421 */
422 void evergreen_emit_cs_shader(
423 struct r600_context *rctx,
424 struct r600_atom *atom)
425 {
426 struct r600_cs_shader_state *state =
427 (struct r600_cs_shader_state*)atom;
428 struct r600_pipe_compute *shader = state->shader;
429 struct radeon_winsys_cs *cs = rctx->cs;
430 uint64_t va;
431
432 va = r600_resource_va(&rctx->screen->screen, &shader->shader_code_bo->b.b);
433
434 r600_write_compute_context_reg_seq(cs, R_0288D0_SQ_PGM_START_LS, 3);
435 r600_write_value(cs, va >> 8); /* R_0288D0_SQ_PGM_START_LS */
436 r600_write_value(cs, /* R_0288D4_SQ_PGM_RESOURCES_LS */
437 S_0288D4_NUM_GPRS(shader->bc.ngpr)
438 | S_0288D4_STACK_SIZE(shader->bc.nstack));
439 r600_write_value(cs, 0); /* R_0288D8_SQ_PGM_RESOURCES_LS_2 */
440
441 r600_write_value(cs, PKT3C(PKT3_NOP, 0, 0));
442 r600_write_value(cs, r600_context_bo_reloc(rctx, shader->shader_code_bo,
443 RADEON_USAGE_READ));
444
445 rctx->flags |= R600_CONTEXT_SHADERCONST_FLUSH;
446 }
447
448 static void evergreen_launch_grid(
449 struct pipe_context *ctx_,
450 const uint *block_layout, const uint *grid_layout,
451 uint32_t pc, const void *input)
452 {
453 struct r600_context *ctx = (struct r600_context *)ctx_;
454
455 COMPUTE_DBG("PC: %i\n", pc);
456
457 evergreen_compute_upload_input(ctx_, block_layout, grid_layout, input);
458 compute_emit_cs(ctx, block_layout, grid_layout);
459 }
460
461 static void evergreen_set_compute_resources(struct pipe_context * ctx_,
462 unsigned start, unsigned count,
463 struct pipe_surface ** surfaces)
464 {
465 struct r600_context *ctx = (struct r600_context *)ctx_;
466 struct r600_surface **resources = (struct r600_surface **)surfaces;
467
468 COMPUTE_DBG("*** evergreen_set_compute_resources: start = %u count = %u\n",
469 start, count);
470
471 for (int i = 0; i < count; i++) {
472 /* The First two vertex buffers are reserved for parameters and
473 * global buffers. */
474 unsigned vtx_id = 2 + i;
475 if (resources[i]) {
476 struct r600_resource_global *buffer =
477 (struct r600_resource_global*)
478 resources[i]->base.texture;
479 if (resources[i]->base.writable) {
480 assert(i+1 < 12);
481
482 evergreen_set_rat(ctx->cs_shader_state.shader, i+1,
483 (struct r600_resource *)resources[i]->base.texture,
484 buffer->chunk->start_in_dw*4,
485 resources[i]->base.texture->width0);
486 }
487
488 evergreen_cs_set_vertex_buffer(ctx, vtx_id,
489 buffer->chunk->start_in_dw * 4,
490 resources[i]->base.texture);
491 }
492 }
493 }
494
495 static void evergreen_set_cs_sampler_view(struct pipe_context *ctx_,
496 unsigned start_slot, unsigned count,
497 struct pipe_sampler_view **views)
498 {
499 struct r600_context *ctx = (struct r600_context *)ctx_;
500 struct r600_pipe_sampler_view **resource =
501 (struct r600_pipe_sampler_view **)views;
502
503 for (int i = 0; i < count; i++) {
504 if (resource[i]) {
505 assert(i+1 < 12);
506 ///FETCH0 = VTX0 (param buffer),
507 //FETCH1 = VTX1 (global buffer pool), FETCH2... = TEX
508 evergreen_set_tex_resource(ctx->cs_shader_state.shader, resource[i], i+2);
509 }
510 }
511 }
512
513 static void evergreen_bind_compute_sampler_states(
514 struct pipe_context *ctx_,
515 unsigned start_slot,
516 unsigned num_samplers,
517 void **samplers_)
518 {
519 struct r600_context *ctx = (struct r600_context *)ctx_;
520 struct compute_sampler_state ** samplers =
521 (struct compute_sampler_state **)samplers_;
522
523 for (int i = 0; i < num_samplers; i++) {
524 if (samplers[i]) {
525 evergreen_set_sampler_resource(
526 ctx->cs_shader_state.shader, samplers[i], i);
527 }
528 }
529 }
530
531 static void evergreen_set_global_binding(
532 struct pipe_context *ctx_, unsigned first, unsigned n,
533 struct pipe_resource **resources,
534 uint32_t **handles)
535 {
536 struct r600_context *ctx = (struct r600_context *)ctx_;
537 struct compute_memory_pool *pool = ctx->screen->global_pool;
538 struct r600_resource_global **buffers =
539 (struct r600_resource_global **)resources;
540
541 COMPUTE_DBG("*** evergreen_set_global_binding first = %u n = %u\n",
542 first, n);
543
544 if (!resources) {
545 /* XXX: Unset */
546 return;
547 }
548
549 compute_memory_finalize_pending(pool, ctx_);
550
551 for (int i = 0; i < n; i++)
552 {
553 assert(resources[i]->target == PIPE_BUFFER);
554 assert(resources[i]->bind & PIPE_BIND_GLOBAL);
555
556 *(handles[i]) = buffers[i]->chunk->start_in_dw * 4;
557 }
558
559 evergreen_set_rat(ctx->cs_shader_state.shader, 0, pool->bo, 0, pool->size_in_dw * 4);
560 evergreen_cs_set_vertex_buffer(ctx, 1, 0,
561 (struct pipe_resource*)pool->bo);
562 }
563
564 /**
565 * This function initializes all the compute specific registers that need to
566 * be initialized for each compute command stream. Registers that are common
567 * to both compute and 3D will be initialized at the beginning of each compute
568 * command stream by the start_cs_cmd atom. However, since the SET_CONTEXT_REG
569 * packet requires that the shader type bit be set, we must initialize all
570 * context registers needed for compute in this function. The registers
571 * intialized by the start_cs_cmd atom can be found in evereen_state.c in the
572 * functions evergreen_init_atom_start_cs or cayman_init_atom_start_cs depending
573 * on the GPU family.
574 */
575 void evergreen_init_atom_start_compute_cs(struct r600_context *ctx)
576 {
577 struct r600_command_buffer *cb = &ctx->start_compute_cs_cmd;
578 int num_threads;
579 int num_stack_entries;
580
581 /* since all required registers are initialised in the
582 * start_compute_cs_cmd atom, we can EMIT_EARLY here.
583 */
584 r600_init_command_buffer(ctx, cb, 1, 256);
585 cb->pkt_flags = RADEON_CP_PACKET3_COMPUTE_MODE;
586
587 switch (ctx->family) {
588 case CHIP_CEDAR:
589 default:
590 num_threads = 128;
591 num_stack_entries = 256;
592 break;
593 case CHIP_REDWOOD:
594 num_threads = 128;
595 num_stack_entries = 256;
596 break;
597 case CHIP_JUNIPER:
598 num_threads = 128;
599 num_stack_entries = 512;
600 break;
601 case CHIP_CYPRESS:
602 case CHIP_HEMLOCK:
603 num_threads = 128;
604 num_stack_entries = 512;
605 break;
606 case CHIP_PALM:
607 num_threads = 128;
608 num_stack_entries = 256;
609 break;
610 case CHIP_SUMO:
611 num_threads = 128;
612 num_stack_entries = 256;
613 break;
614 case CHIP_SUMO2:
615 num_threads = 128;
616 num_stack_entries = 512;
617 break;
618 case CHIP_BARTS:
619 num_threads = 128;
620 num_stack_entries = 512;
621 break;
622 case CHIP_TURKS:
623 num_threads = 128;
624 num_stack_entries = 256;
625 break;
626 case CHIP_CAICOS:
627 num_threads = 128;
628 num_stack_entries = 256;
629 break;
630 }
631
632 /* Config Registers */
633 evergreen_init_common_regs(cb, ctx->chip_class
634 , ctx->family, ctx->screen->info.drm_minor);
635
636 /* The primitive type always needs to be POINTLIST for compute. */
637 r600_store_config_reg(cb, R_008958_VGT_PRIMITIVE_TYPE,
638 V_008958_DI_PT_POINTLIST);
639
640 if (ctx->chip_class < CAYMAN) {
641
642 /* These registers control which simds can be used by each stage.
643 * The default for these registers is 0xffffffff, which means
644 * all simds are available for each stage. It's possible we may
645 * want to play around with these in the future, but for now
646 * the default value is fine.
647 *
648 * R_008E20_SQ_STATIC_THREAD_MGMT1
649 * R_008E24_SQ_STATIC_THREAD_MGMT2
650 * R_008E28_SQ_STATIC_THREAD_MGMT3
651 */
652
653 /* XXX: We may need to adjust the thread and stack resouce
654 * values for 3D/compute interop */
655
656 r600_store_config_reg_seq(cb, R_008C18_SQ_THREAD_RESOURCE_MGMT_1, 5);
657
658 /* R_008C18_SQ_THREAD_RESOURCE_MGMT_1
659 * Set the number of threads used by the PS/VS/GS/ES stage to
660 * 0.
661 */
662 r600_store_value(cb, 0);
663
664 /* R_008C1C_SQ_THREAD_RESOURCE_MGMT_2
665 * Set the number of threads used by the CS (aka LS) stage to
666 * the maximum number of threads and set the number of threads
667 * for the HS stage to 0. */
668 r600_store_value(cb, S_008C1C_NUM_LS_THREADS(num_threads));
669
670 /* R_008C20_SQ_STACK_RESOURCE_MGMT_1
671 * Set the Control Flow stack entries to 0 for PS/VS stages */
672 r600_store_value(cb, 0);
673
674 /* R_008C24_SQ_STACK_RESOURCE_MGMT_2
675 * Set the Control Flow stack entries to 0 for GS/ES stages */
676 r600_store_value(cb, 0);
677
678 /* R_008C28_SQ_STACK_RESOURCE_MGMT_3
679 * Set the Contol Flow stack entries to 0 for the HS stage, and
680 * set it to the maximum value for the CS (aka LS) stage. */
681 r600_store_value(cb,
682 S_008C28_NUM_LS_STACK_ENTRIES(num_stack_entries));
683 }
684
685 /* Context Registers */
686
687 if (ctx->chip_class < CAYMAN) {
688 /* workaround for hw issues with dyn gpr - must set all limits
689 * to 240 instead of 0, 0x1e == 240 / 8
690 */
691 r600_store_context_reg(cb, R_028838_SQ_DYN_GPR_RESOURCE_LIMIT_1,
692 S_028838_PS_GPRS(0x1e) |
693 S_028838_VS_GPRS(0x1e) |
694 S_028838_GS_GPRS(0x1e) |
695 S_028838_ES_GPRS(0x1e) |
696 S_028838_HS_GPRS(0x1e) |
697 S_028838_LS_GPRS(0x1e));
698 }
699
700 /* XXX: Investigate setting bit 15, which is FAST_COMPUTE_MODE */
701 r600_store_context_reg(cb, R_028A40_VGT_GS_MODE,
702 S_028A40_COMPUTE_MODE(1) | S_028A40_PARTIAL_THD_AT_EOI(1));
703
704 r600_store_context_reg(cb, R_028B54_VGT_SHADER_STAGES_EN, 2/*CS_ON*/);
705
706 r600_store_context_reg(cb, R_0286E8_SPI_COMPUTE_INPUT_CNTL,
707 S_0286E8_TID_IN_GROUP_ENA
708 | S_0286E8_TGID_ENA
709 | S_0286E8_DISABLE_INDEX_PACK)
710 ;
711
712 /* The LOOP_CONST registers are an optimizations for loops that allows
713 * you to store the initial counter, increment value, and maximum
714 * counter value in a register so that hardware can calculate the
715 * correct number of iterations for the loop, so that you don't need
716 * to have the loop counter in your shader code. We don't currently use
717 * this optimization, so we must keep track of the counter in the
718 * shader and use a break instruction to exit loops. However, the
719 * hardware will still uses this register to determine when to exit a
720 * loop, so we need to initialize the counter to 0, set the increment
721 * value to 1 and the maximum counter value to the 4095 (0xfff) which
722 * is the maximum value allowed. This gives us a maximum of 4096
723 * iterations for our loops, but hopefully our break instruction will
724 * execute before some time before the 4096th iteration.
725 */
726 eg_store_loop_const(cb, R_03A200_SQ_LOOP_CONST_0 + (160 * 4), 0x1000FFF);
727 }
728
729 void evergreen_init_compute_state_functions(struct r600_context *ctx)
730 {
731 ctx->context.create_compute_state = evergreen_create_compute_state;
732 ctx->context.delete_compute_state = evergreen_delete_compute_state;
733 ctx->context.bind_compute_state = evergreen_bind_compute_state;
734 // ctx->context.create_sampler_view = evergreen_compute_create_sampler_view;
735 ctx->context.set_compute_resources = evergreen_set_compute_resources;
736 ctx->context.set_compute_sampler_views = evergreen_set_cs_sampler_view;
737 ctx->context.bind_compute_sampler_states = evergreen_bind_compute_sampler_states;
738 ctx->context.set_global_binding = evergreen_set_global_binding;
739 ctx->context.launch_grid = evergreen_launch_grid;
740
741 /* We always use at least two vertex buffers for compute, one for
742 * parameters and one for global memory */
743 ctx->cs_vertex_buffer_state.enabled_mask =
744 ctx->cs_vertex_buffer_state.dirty_mask = 1 | 2;
745 }
746
747
748 struct pipe_resource *r600_compute_global_buffer_create(
749 struct pipe_screen *screen,
750 const struct pipe_resource *templ)
751 {
752 assert(templ->target == PIPE_BUFFER);
753 assert(templ->bind & PIPE_BIND_GLOBAL);
754 assert(templ->array_size == 1 || templ->array_size == 0);
755 assert(templ->depth0 == 1 || templ->depth0 == 0);
756 assert(templ->height0 == 1 || templ->height0 == 0);
757
758 struct r600_resource_global* result = (struct r600_resource_global*)
759 CALLOC(sizeof(struct r600_resource_global), 1);
760 struct r600_screen* rscreen = (struct r600_screen*)screen;
761
762 COMPUTE_DBG("*** r600_compute_global_buffer_create\n");
763 COMPUTE_DBG("width = %u array_size = %u\n", templ->width0,
764 templ->array_size);
765
766 result->base.b.vtbl = &r600_global_buffer_vtbl;
767 result->base.b.b.screen = screen;
768 result->base.b.b = *templ;
769 pipe_reference_init(&result->base.b.b.reference, 1);
770
771 int size_in_dw = (templ->width0+3) / 4;
772
773 result->chunk = compute_memory_alloc(rscreen->global_pool, size_in_dw);
774
775 if (result->chunk == NULL)
776 {
777 free(result);
778 return NULL;
779 }
780
781 return &result->base.b.b;
782 }
783
784 void r600_compute_global_buffer_destroy(
785 struct pipe_screen *screen,
786 struct pipe_resource *res)
787 {
788 assert(res->target == PIPE_BUFFER);
789 assert(res->bind & PIPE_BIND_GLOBAL);
790
791 struct r600_resource_global* buffer = (struct r600_resource_global*)res;
792 struct r600_screen* rscreen = (struct r600_screen*)screen;
793
794 compute_memory_free(rscreen->global_pool, buffer->chunk->id);
795
796 buffer->chunk = NULL;
797 free(res);
798 }
799
800 void* r600_compute_global_transfer_map(
801 struct pipe_context *ctx_,
802 struct pipe_transfer* transfer)
803 {
804 assert(transfer->resource->target == PIPE_BUFFER);
805 assert(transfer->resource->bind & PIPE_BIND_GLOBAL);
806 assert(transfer->box.x >= 0);
807 assert(transfer->box.y == 0);
808 assert(transfer->box.z == 0);
809
810 struct r600_context *ctx = (struct r600_context *)ctx_;
811 struct r600_resource_global* buffer =
812 (struct r600_resource_global*)transfer->resource;
813
814 uint32_t* map;
815 ///TODO: do it better, mapping is not possible if the pool is too big
816
817 if (!(map = ctx->ws->buffer_map(buffer->chunk->pool->bo->cs_buf,
818 ctx->cs, transfer->usage))) {
819 return NULL;
820 }
821
822 COMPUTE_DBG("buffer start: %lli\n", buffer->chunk->start_in_dw);
823 return ((char*)(map + buffer->chunk->start_in_dw)) + transfer->box.x;
824 }
825
826 void r600_compute_global_transfer_unmap(
827 struct pipe_context *ctx_,
828 struct pipe_transfer* transfer)
829 {
830 assert(transfer->resource->target == PIPE_BUFFER);
831 assert(transfer->resource->bind & PIPE_BIND_GLOBAL);
832
833 struct r600_context *ctx = (struct r600_context *)ctx_;
834 struct r600_resource_global* buffer =
835 (struct r600_resource_global*)transfer->resource;
836
837 ctx->ws->buffer_unmap(buffer->chunk->pool->bo->cs_buf);
838 }
839
840 struct pipe_transfer * r600_compute_global_get_transfer(
841 struct pipe_context *ctx_,
842 struct pipe_resource *resource,
843 unsigned level,
844 unsigned usage,
845 const struct pipe_box *box)
846 {
847 struct r600_context *ctx = (struct r600_context *)ctx_;
848 struct compute_memory_pool *pool = ctx->screen->global_pool;
849
850 compute_memory_finalize_pending(pool, ctx_);
851
852 assert(resource->target == PIPE_BUFFER);
853 struct r600_context *rctx = (struct r600_context*)ctx_;
854 struct pipe_transfer *transfer = util_slab_alloc(&rctx->pool_transfers);
855
856 transfer->resource = resource;
857 transfer->level = level;
858 transfer->usage = usage;
859 transfer->box = *box;
860 transfer->stride = 0;
861 transfer->layer_stride = 0;
862 transfer->data = NULL;
863
864 /* Note strides are zero, this is ok for buffers, but not for
865 * textures 2d & higher at least.
866 */
867 return transfer;
868 }
869
870 void r600_compute_global_transfer_destroy(
871 struct pipe_context *ctx_,
872 struct pipe_transfer *transfer)
873 {
874 struct r600_context *rctx = (struct r600_context*)ctx_;
875 util_slab_free(&rctx->pool_transfers, transfer);
876 }
877
878 void r600_compute_global_transfer_flush_region(
879 struct pipe_context *ctx_,
880 struct pipe_transfer *transfer,
881 const struct pipe_box *box)
882 {
883 assert(0 && "TODO");
884 }
885
886 void r600_compute_global_transfer_inline_write(
887 struct pipe_context *pipe,
888 struct pipe_resource *resource,
889 unsigned level,
890 unsigned usage,
891 const struct pipe_box *box,
892 const void *data,
893 unsigned stride,
894 unsigned layer_stride)
895 {
896 assert(0 && "TODO");
897 }