2c66306ebc3f87530ba0a9512fe28091b8d139fc
[mesa.git] / src / gallium / drivers / r600 / evergreen_compute.c
1 /*
2 * Copyright 2011 Adam Rak <adam.rak@streamnovation.com>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Adam Rak <adam.rak@streamnovation.com>
25 */
26
27 #include <stdio.h>
28 #include <errno.h>
29 #include "pipe/p_defines.h"
30 #include "pipe/p_state.h"
31 #include "pipe/p_context.h"
32 #include "util/u_blitter.h"
33 #include "util/u_double_list.h"
34 #include "util/u_transfer.h"
35 #include "util/u_surface.h"
36 #include "util/u_pack_color.h"
37 #include "util/u_memory.h"
38 #include "util/u_inlines.h"
39 #include "util/u_framebuffer.h"
40 #include "pipebuffer/pb_buffer.h"
41 #include "r600.h"
42 #include "evergreend.h"
43 #include "r600_resource.h"
44 #include "r600_shader.h"
45 #include "r600_pipe.h"
46 #include "r600_formats.h"
47 #include "evergreen_compute.h"
48 #include "r600_hw_context_priv.h"
49 #include "evergreen_compute_internal.h"
50 #include "compute_memory_pool.h"
51 #ifdef HAVE_OPENCL
52 #include "llvm_wrapper.h"
53 #endif
54
55 /**
56 RAT0 is for global binding write
57 VTX1 is for global binding read
58
59 for wrting images RAT1...
60 for reading images TEX2...
61 TEX2-RAT1 is paired
62
63 TEX2... consumes the same fetch resources, that VTX2... would consume
64
65 CONST0 and VTX0 is for parameters
66 CONST0 is binding smaller input parameter buffer, and for constant indexing,
67 also constant cached
68 VTX0 is for indirect/non-constant indexing, or if the input is bigger than
69 the constant cache can handle
70
71 RAT-s are limited to 12, so we can only bind at most 11 texture for writing
72 because we reserve RAT0 for global bindings. With byteaddressing enabled,
73 we should reserve another one too.=> 10 image binding for writing max.
74
75 from Nvidia OpenCL:
76 CL_DEVICE_MAX_READ_IMAGE_ARGS: 128
77 CL_DEVICE_MAX_WRITE_IMAGE_ARGS: 8
78
79 so 10 for writing is enough. 176 is the max for reading according to the docs
80
81 writable images should be listed first < 10, so their id corresponds to RAT(id+1)
82 writable images will consume TEX slots, VTX slots too because of linear indexing
83
84 */
85
86 static void evergreen_cs_set_vertex_buffer(
87 struct r600_context * rctx,
88 unsigned vb_index,
89 unsigned offset,
90 struct pipe_resource * buffer)
91 {
92 struct r600_vertexbuf_state *state = &rctx->cs_vertex_buffer_state;
93 struct pipe_vertex_buffer *vb = &state->vb[vb_index];
94 vb->stride = 1;
95 vb->buffer_offset = offset;
96 vb->buffer = buffer;
97 vb->user_buffer = NULL;
98
99 /* The vertex instructions in the compute shaders use the texture cache,
100 * so we need to invalidate it. */
101 rctx->flags |= R600_CONTEXT_TEX_FLUSH;
102 state->enabled_mask |= 1 << vb_index;
103 state->dirty_mask |= 1 << vb_index;
104 r600_atom_dirty(rctx, &state->atom);
105 }
106
107 const struct u_resource_vtbl r600_global_buffer_vtbl =
108 {
109 u_default_resource_get_handle, /* get_handle */
110 r600_compute_global_buffer_destroy, /* resource_destroy */
111 r600_compute_global_get_transfer, /* get_transfer */
112 r600_compute_global_transfer_destroy, /* transfer_destroy */
113 r600_compute_global_transfer_map, /* transfer_map */
114 r600_compute_global_transfer_flush_region,/* transfer_flush_region */
115 r600_compute_global_transfer_unmap, /* transfer_unmap */
116 r600_compute_global_transfer_inline_write /* transfer_inline_write */
117 };
118
119
120 void *evergreen_create_compute_state(
121 struct pipe_context *ctx_,
122 const const struct pipe_compute_state *cso)
123 {
124 struct r600_context *ctx = (struct r600_context *)ctx_;
125 struct r600_pipe_compute *shader = CALLOC_STRUCT(r600_pipe_compute);
126 void *p;
127
128 #ifdef HAVE_OPENCL
129 const struct pipe_llvm_program_header * header;
130 const unsigned char * code;
131
132 COMPUTE_DBG("*** evergreen_create_compute_state\n");
133
134 header = cso->prog;
135 code = cso->prog + sizeof(struct pipe_llvm_program_header);
136 #endif
137
138 shader->ctx = (struct r600_context*)ctx;
139 shader->resources = (struct evergreen_compute_resource*)
140 CALLOC(sizeof(struct evergreen_compute_resource),
141 get_compute_resource_num());
142 shader->local_size = cso->req_local_mem; ///TODO: assert it
143 shader->private_size = cso->req_private_mem;
144 shader->input_size = cso->req_input_mem;
145
146 #ifdef HAVE_OPENCL
147 shader->mod = llvm_parse_bitcode(code, header->num_bytes);
148
149 r600_compute_shader_create(ctx_, shader->mod, &shader->bc);
150 #endif
151 shader->shader_code_bo = r600_compute_buffer_alloc_vram(ctx->screen,
152 shader->bc.ndw * 4);
153
154 p = ctx->ws->buffer_map(shader->shader_code_bo->cs_buf, ctx->cs,
155 PIPE_TRANSFER_WRITE);
156
157 memcpy(p, shader->bc.bytecode, shader->bc.ndw * 4);
158 ctx->ws->buffer_unmap(shader->shader_code_bo->cs_buf);
159 return shader;
160 }
161
162 void evergreen_delete_compute_state(struct pipe_context *ctx, void* state)
163 {
164 struct r600_pipe_compute *shader = (struct r600_pipe_compute *)state;
165
166 free(shader->resources);
167 free(shader);
168 }
169
170 static void evergreen_bind_compute_state(struct pipe_context *ctx_, void *state)
171 {
172 struct r600_context *ctx = (struct r600_context *)ctx_;
173
174 COMPUTE_DBG("*** evergreen_bind_compute_state\n");
175
176 ctx->cs_shader_state.shader = (struct r600_pipe_compute *)state;
177 }
178
179 /* The kernel parameters are stored a vtx buffer (ID=0), besides the explicit
180 * kernel parameters there are inplicit parameters that need to be stored
181 * in the vertex buffer as well. Here is how these parameters are organized in
182 * the buffer:
183 *
184 * DWORDS 0-2: Number of work groups in each dimension (x,y,z)
185 * DWORDS 3-5: Number of global work items in each dimension (x,y,z)
186 * DWORDS 6-8: Number of work items within each work group in each dimension
187 * (x,y,z)
188 * DWORDS 9+ : Kernel parameters
189 */
190 void evergreen_compute_upload_input(
191 struct pipe_context *ctx_,
192 const uint *block_layout,
193 const uint *grid_layout,
194 const void *input)
195 {
196 struct r600_context *ctx = (struct r600_context *)ctx_;
197 struct r600_pipe_compute *shader = ctx->cs_shader_state.shader;
198 int i;
199 unsigned kernel_parameters_offset_bytes = 36;
200 uint32_t * num_work_groups_start;
201 uint32_t * global_size_start;
202 uint32_t * local_size_start;
203 uint32_t * kernel_parameters_start;
204
205 if (shader->input_size == 0) {
206 return;
207 }
208
209 if (!shader->kernel_param) {
210 unsigned buffer_size = shader->input_size;
211
212 /* Add space for the grid dimensions */
213 buffer_size += kernel_parameters_offset_bytes * sizeof(uint);
214 shader->kernel_param = r600_compute_buffer_alloc_vram(
215 ctx->screen, buffer_size);
216 }
217
218 num_work_groups_start = ctx->ws->buffer_map(
219 shader->kernel_param->cs_buf, ctx->cs, PIPE_TRANSFER_WRITE);
220 global_size_start = num_work_groups_start + (3 * (sizeof(uint) /4));
221 local_size_start = global_size_start + (3 * (sizeof(uint)) / 4);
222 kernel_parameters_start = local_size_start + (3 * (sizeof(uint)) / 4);
223
224 /* Copy the work group size */
225 memcpy(num_work_groups_start, grid_layout, 3 * sizeof(uint));
226
227 /* Copy the global size */
228 for (i = 0; i < 3; i++) {
229 global_size_start[i] = grid_layout[i] * block_layout[i];
230 }
231
232 /* Copy the local dimensions */
233 memcpy(local_size_start, block_layout, 3 * sizeof(uint));
234
235 /* Copy the kernel inputs */
236 memcpy(kernel_parameters_start, input, shader->input_size);
237
238 for (i = 0; i < (kernel_parameters_offset_bytes / 4) +
239 (shader->input_size / 4); i++) {
240 COMPUTE_DBG("input %i : %i\n", i,
241 ((unsigned*)num_work_groups_start)[i]);
242 }
243
244 ctx->ws->buffer_unmap(shader->kernel_param->cs_buf);
245
246 ///ID=0 is reserved for the parameters
247 evergreen_cs_set_vertex_buffer(ctx, 0, 0,
248 (struct pipe_resource*)shader->kernel_param);
249 ///ID=0 is reserved for parameters
250 evergreen_set_const_cache(shader, 0, shader->kernel_param,
251 shader->input_size, 0);
252 }
253
254 static void evergreen_emit_direct_dispatch(
255 struct r600_context *rctx,
256 const uint *block_layout, const uint *grid_layout)
257 {
258 int i;
259 struct radeon_winsys_cs *cs = rctx->cs;
260 unsigned num_waves;
261 unsigned num_pipes = rctx->screen->info.r600_max_pipes;
262 unsigned wave_divisor = (16 * num_pipes);
263 int group_size = 1;
264 int grid_size = 1;
265 /* XXX: Enable lds and get size from cs_shader_state */
266 unsigned lds_size = 0;
267
268 /* Calculate group_size/grid_size */
269 for (i = 0; i < 3; i++) {
270 group_size *= block_layout[i];
271 }
272
273 for (i = 0; i < 3; i++) {
274 grid_size *= grid_layout[i];
275 }
276
277 /* num_waves = ceil((tg_size.x * tg_size.y, tg_size.z) / (16 * num_pipes)) */
278 num_waves = (block_layout[0] * block_layout[1] * block_layout[2] +
279 wave_divisor - 1) / wave_divisor;
280
281 COMPUTE_DBG("Using %u pipes, there are %u wavefronts per thread block\n",
282 num_pipes, num_waves);
283
284 /* XXX: Partition the LDS between PS/CS. By default half (4096 dwords
285 * on Evergreen) oes to Pixel Shaders and half goes to Compute Shaders.
286 * We may need to allocat the entire LDS space for Compute Shaders.
287 *
288 * EG: R_008E2C_SQ_LDS_RESOURCE_MGMT := S_008E2C_NUM_LS_LDS(lds_dwords)
289 * CM: CM_R_0286FC_SPI_LDS_MGMT := S_0286FC_NUM_LS_LDS(lds_dwords)
290 */
291
292 r600_write_config_reg(cs, R_008970_VGT_NUM_INDICES, group_size);
293
294 r600_write_config_reg_seq(cs, R_00899C_VGT_COMPUTE_START_X, 3);
295 r600_write_value(cs, 0); /* R_00899C_VGT_COMPUTE_START_X */
296 r600_write_value(cs, 0); /* R_0089A0_VGT_COMPUTE_START_Y */
297 r600_write_value(cs, 0); /* R_0089A4_VGT_COMPUTE_START_Z */
298
299 r600_write_config_reg(cs, R_0089AC_VGT_COMPUTE_THREAD_GROUP_SIZE,
300 group_size);
301
302 r600_write_compute_context_reg_seq(cs, R_0286EC_SPI_COMPUTE_NUM_THREAD_X, 3);
303 r600_write_value(cs, block_layout[0]); /* R_0286EC_SPI_COMPUTE_NUM_THREAD_X */
304 r600_write_value(cs, block_layout[1]); /* R_0286F0_SPI_COMPUTE_NUM_THREAD_Y */
305 r600_write_value(cs, block_layout[2]); /* R_0286F4_SPI_COMPUTE_NUM_THREAD_Z */
306
307 r600_write_compute_context_reg(cs, CM_R_0288E8_SQ_LDS_ALLOC,
308 lds_size | (num_waves << 14));
309
310 /* Dispatch packet */
311 r600_write_value(cs, PKT3C(PKT3_DISPATCH_DIRECT, 3, 0));
312 r600_write_value(cs, grid_layout[0]);
313 r600_write_value(cs, grid_layout[1]);
314 r600_write_value(cs, grid_layout[2]);
315 /* VGT_DISPATCH_INITIATOR = COMPUTE_SHADER_EN */
316 r600_write_value(cs, 1);
317 }
318
319 static void compute_emit_cs(struct r600_context *ctx, const uint *block_layout,
320 const uint *grid_layout)
321 {
322 struct radeon_winsys_cs *cs = ctx->cs;
323 int i;
324
325 struct r600_resource *onebo = NULL;
326 struct r600_pipe_state *cb_state;
327 struct evergreen_compute_resource *resources =
328 ctx->cs_shader_state.shader->resources;
329
330 /* Initialize all the compute-related registers.
331 *
332 * See evergreen_init_atom_start_compute_cs() in this file for the list
333 * of registers initialized by the start_compute_cs_cmd atom.
334 */
335 r600_emit_atom(ctx, &ctx->start_compute_cs_cmd.atom);
336
337 ctx->flags |= R600_CONTEXT_CB_FLUSH;
338 r600_flush_emit(ctx);
339
340 /* Emit cb_state */
341 cb_state = ctx->states[R600_PIPE_STATE_FRAMEBUFFER];
342 r600_context_pipe_state_emit(ctx, cb_state, RADEON_CP_PACKET3_COMPUTE_MODE);
343
344 /* Set CB_TARGET_MASK XXX: Use cb_misc_state */
345 r600_write_compute_context_reg(cs, R_028238_CB_TARGET_MASK,
346 ctx->compute_cb_target_mask);
347
348
349 /* Emit vertex buffer state */
350 ctx->cs_vertex_buffer_state.atom.num_dw = 12 * util_bitcount(ctx->cs_vertex_buffer_state.dirty_mask);
351 r600_emit_atom(ctx, &ctx->cs_vertex_buffer_state.atom);
352
353 /* Emit compute shader state */
354 r600_emit_atom(ctx, &ctx->cs_shader_state.atom);
355
356 for (i = 0; i < get_compute_resource_num(); i++) {
357 if (resources[i].enabled) {
358 int j;
359 COMPUTE_DBG("resnum: %i, cdw: %i\n", i, cs->cdw);
360
361 for (j = 0; j < resources[i].cs_end; j++) {
362 if (resources[i].do_reloc[j]) {
363 assert(resources[i].bo);
364 evergreen_emit_ctx_reloc(ctx,
365 resources[i].bo,
366 resources[i].usage);
367 }
368
369 cs->buf[cs->cdw++] = resources[i].cs[j];
370 }
371
372 if (resources[i].bo) {
373 onebo = resources[i].bo;
374 evergreen_emit_ctx_reloc(ctx,
375 resources[i].bo,
376 resources[i].usage);
377
378 ///special case for textures
379 if (resources[i].do_reloc
380 [resources[i].cs_end] == 2) {
381 evergreen_emit_ctx_reloc(ctx,
382 resources[i].bo,
383 resources[i].usage);
384 }
385 }
386 }
387 }
388
389 /* Emit dispatch state and dispatch packet */
390 evergreen_emit_direct_dispatch(ctx, block_layout, grid_layout);
391
392 /* XXX evergreen_flush_emit() hardcodes the CP_COHER_SIZE to 0xffffffff
393 */
394 ctx->flags |= R600_CONTEXT_CB_FLUSH;
395 r600_flush_emit(ctx);
396
397 #if 0
398 COMPUTE_DBG("cdw: %i\n", cs->cdw);
399 for (i = 0; i < cs->cdw; i++) {
400 COMPUTE_DBG("%4i : 0x%08X\n", i, ctx->cs->buf[i]);
401 }
402 #endif
403
404 ctx->ws->cs_flush(ctx->cs, RADEON_FLUSH_ASYNC | RADEON_FLUSH_COMPUTE);
405
406 ctx->pm4_dirty_cdwords = 0;
407 ctx->flags = 0;
408
409 COMPUTE_DBG("shader started\n");
410
411 ctx->ws->buffer_wait(onebo->buf, 0);
412
413 COMPUTE_DBG("...\n");
414
415 ctx->streamout_start = TRUE;
416 ctx->streamout_append_bitmask = ~0;
417
418 }
419
420
421 /**
422 * Emit function for r600_cs_shader_state atom
423 */
424 void evergreen_emit_cs_shader(
425 struct r600_context *rctx,
426 struct r600_atom *atom)
427 {
428 struct r600_cs_shader_state *state =
429 (struct r600_cs_shader_state*)atom;
430 struct r600_pipe_compute *shader = state->shader;
431 struct radeon_winsys_cs *cs = rctx->cs;
432 uint64_t va;
433
434 va = r600_resource_va(&rctx->screen->screen, &shader->shader_code_bo->b.b);
435
436 r600_write_compute_context_reg_seq(cs, R_0288D0_SQ_PGM_START_LS, 3);
437 r600_write_value(cs, va >> 8); /* R_0288D0_SQ_PGM_START_LS */
438 r600_write_value(cs, /* R_0288D4_SQ_PGM_RESOURCES_LS */
439 S_0288D4_NUM_GPRS(shader->bc.ngpr)
440 | S_0288D4_STACK_SIZE(shader->bc.nstack));
441 r600_write_value(cs, 0); /* R_0288D8_SQ_PGM_RESOURCES_LS_2 */
442
443 r600_write_value(cs, PKT3C(PKT3_NOP, 0, 0));
444 r600_write_value(cs, r600_context_bo_reloc(rctx, shader->shader_code_bo,
445 RADEON_USAGE_READ));
446
447 rctx->flags |= R600_CONTEXT_SHADERCONST_FLUSH;
448 }
449
450 static void evergreen_launch_grid(
451 struct pipe_context *ctx_,
452 const uint *block_layout, const uint *grid_layout,
453 uint32_t pc, const void *input)
454 {
455 struct r600_context *ctx = (struct r600_context *)ctx_;
456
457 COMPUTE_DBG("*** evergreen_launch_grid: pc = %u\n", pc);
458
459 evergreen_compute_upload_input(ctx_, block_layout, grid_layout, input);
460 compute_emit_cs(ctx, block_layout, grid_layout);
461 }
462
463 static void evergreen_set_compute_resources(struct pipe_context * ctx_,
464 unsigned start, unsigned count,
465 struct pipe_surface ** surfaces)
466 {
467 struct r600_context *ctx = (struct r600_context *)ctx_;
468 struct r600_surface **resources = (struct r600_surface **)surfaces;
469
470 COMPUTE_DBG("*** evergreen_set_compute_resources: start = %u count = %u\n",
471 start, count);
472
473 for (int i = 0; i < count; i++) {
474 /* The First two vertex buffers are reserved for parameters and
475 * global buffers. */
476 unsigned vtx_id = 2 + i;
477 if (resources[i]) {
478 struct r600_resource_global *buffer =
479 (struct r600_resource_global*)
480 resources[i]->base.texture;
481 if (resources[i]->base.writable) {
482 assert(i+1 < 12);
483
484 evergreen_set_rat(ctx->cs_shader_state.shader, i+1,
485 (struct r600_resource *)resources[i]->base.texture,
486 buffer->chunk->start_in_dw*4,
487 resources[i]->base.texture->width0);
488 }
489
490 evergreen_cs_set_vertex_buffer(ctx, vtx_id,
491 buffer->chunk->start_in_dw * 4,
492 resources[i]->base.texture);
493 }
494 }
495 }
496
497 static void evergreen_set_cs_sampler_view(struct pipe_context *ctx_,
498 unsigned start_slot, unsigned count,
499 struct pipe_sampler_view **views)
500 {
501 struct r600_context *ctx = (struct r600_context *)ctx_;
502 struct r600_pipe_sampler_view **resource =
503 (struct r600_pipe_sampler_view **)views;
504
505 for (int i = 0; i < count; i++) {
506 if (resource[i]) {
507 assert(i+1 < 12);
508 ///FETCH0 = VTX0 (param buffer),
509 //FETCH1 = VTX1 (global buffer pool), FETCH2... = TEX
510 evergreen_set_tex_resource(ctx->cs_shader_state.shader, resource[i], i+2);
511 }
512 }
513 }
514
515 static void evergreen_bind_compute_sampler_states(
516 struct pipe_context *ctx_,
517 unsigned start_slot,
518 unsigned num_samplers,
519 void **samplers_)
520 {
521 struct r600_context *ctx = (struct r600_context *)ctx_;
522 struct compute_sampler_state ** samplers =
523 (struct compute_sampler_state **)samplers_;
524
525 for (int i = 0; i < num_samplers; i++) {
526 if (samplers[i]) {
527 evergreen_set_sampler_resource(
528 ctx->cs_shader_state.shader, samplers[i], i);
529 }
530 }
531 }
532
533 static void evergreen_set_global_binding(
534 struct pipe_context *ctx_, unsigned first, unsigned n,
535 struct pipe_resource **resources,
536 uint32_t **handles)
537 {
538 struct r600_context *ctx = (struct r600_context *)ctx_;
539 struct compute_memory_pool *pool = ctx->screen->global_pool;
540 struct r600_resource_global **buffers =
541 (struct r600_resource_global **)resources;
542
543 COMPUTE_DBG("*** evergreen_set_global_binding first = %u n = %u\n",
544 first, n);
545
546 if (!resources) {
547 /* XXX: Unset */
548 return;
549 }
550
551 compute_memory_finalize_pending(pool, ctx_);
552
553 for (int i = 0; i < n; i++)
554 {
555 assert(resources[i]->target == PIPE_BUFFER);
556 assert(resources[i]->bind & PIPE_BIND_GLOBAL);
557
558 *(handles[i]) = buffers[i]->chunk->start_in_dw * 4;
559 }
560
561 evergreen_set_rat(ctx->cs_shader_state.shader, 0, pool->bo, 0, pool->size_in_dw * 4);
562 evergreen_cs_set_vertex_buffer(ctx, 1, 0,
563 (struct pipe_resource*)pool->bo);
564 }
565
566 /**
567 * This function initializes all the compute specific registers that need to
568 * be initialized for each compute command stream. Registers that are common
569 * to both compute and 3D will be initialized at the beginning of each compute
570 * command stream by the start_cs_cmd atom. However, since the SET_CONTEXT_REG
571 * packet requires that the shader type bit be set, we must initialize all
572 * context registers needed for compute in this function. The registers
573 * intialized by the start_cs_cmd atom can be found in evereen_state.c in the
574 * functions evergreen_init_atom_start_cs or cayman_init_atom_start_cs depending
575 * on the GPU family.
576 */
577 void evergreen_init_atom_start_compute_cs(struct r600_context *ctx)
578 {
579 struct r600_command_buffer *cb = &ctx->start_compute_cs_cmd;
580 int num_threads;
581 int num_stack_entries;
582
583 /* since all required registers are initialised in the
584 * start_compute_cs_cmd atom, we can EMIT_EARLY here.
585 */
586 r600_init_command_buffer(ctx, cb, 1, 256);
587 cb->pkt_flags = RADEON_CP_PACKET3_COMPUTE_MODE;
588
589 switch (ctx->family) {
590 case CHIP_CEDAR:
591 default:
592 num_threads = 128;
593 num_stack_entries = 256;
594 break;
595 case CHIP_REDWOOD:
596 num_threads = 128;
597 num_stack_entries = 256;
598 break;
599 case CHIP_JUNIPER:
600 num_threads = 128;
601 num_stack_entries = 512;
602 break;
603 case CHIP_CYPRESS:
604 case CHIP_HEMLOCK:
605 num_threads = 128;
606 num_stack_entries = 512;
607 break;
608 case CHIP_PALM:
609 num_threads = 128;
610 num_stack_entries = 256;
611 break;
612 case CHIP_SUMO:
613 num_threads = 128;
614 num_stack_entries = 256;
615 break;
616 case CHIP_SUMO2:
617 num_threads = 128;
618 num_stack_entries = 512;
619 break;
620 case CHIP_BARTS:
621 num_threads = 128;
622 num_stack_entries = 512;
623 break;
624 case CHIP_TURKS:
625 num_threads = 128;
626 num_stack_entries = 256;
627 break;
628 case CHIP_CAICOS:
629 num_threads = 128;
630 num_stack_entries = 256;
631 break;
632 }
633
634 /* Config Registers */
635 evergreen_init_common_regs(cb, ctx->chip_class
636 , ctx->family, ctx->screen->info.drm_minor);
637
638 /* The primitive type always needs to be POINTLIST for compute. */
639 r600_store_config_reg(cb, R_008958_VGT_PRIMITIVE_TYPE,
640 V_008958_DI_PT_POINTLIST);
641
642 if (ctx->chip_class < CAYMAN) {
643
644 /* These registers control which simds can be used by each stage.
645 * The default for these registers is 0xffffffff, which means
646 * all simds are available for each stage. It's possible we may
647 * want to play around with these in the future, but for now
648 * the default value is fine.
649 *
650 * R_008E20_SQ_STATIC_THREAD_MGMT1
651 * R_008E24_SQ_STATIC_THREAD_MGMT2
652 * R_008E28_SQ_STATIC_THREAD_MGMT3
653 */
654
655 /* XXX: We may need to adjust the thread and stack resouce
656 * values for 3D/compute interop */
657
658 r600_store_config_reg_seq(cb, R_008C18_SQ_THREAD_RESOURCE_MGMT_1, 5);
659
660 /* R_008C18_SQ_THREAD_RESOURCE_MGMT_1
661 * Set the number of threads used by the PS/VS/GS/ES stage to
662 * 0.
663 */
664 r600_store_value(cb, 0);
665
666 /* R_008C1C_SQ_THREAD_RESOURCE_MGMT_2
667 * Set the number of threads used by the CS (aka LS) stage to
668 * the maximum number of threads and set the number of threads
669 * for the HS stage to 0. */
670 r600_store_value(cb, S_008C1C_NUM_LS_THREADS(num_threads));
671
672 /* R_008C20_SQ_STACK_RESOURCE_MGMT_1
673 * Set the Control Flow stack entries to 0 for PS/VS stages */
674 r600_store_value(cb, 0);
675
676 /* R_008C24_SQ_STACK_RESOURCE_MGMT_2
677 * Set the Control Flow stack entries to 0 for GS/ES stages */
678 r600_store_value(cb, 0);
679
680 /* R_008C28_SQ_STACK_RESOURCE_MGMT_3
681 * Set the Contol Flow stack entries to 0 for the HS stage, and
682 * set it to the maximum value for the CS (aka LS) stage. */
683 r600_store_value(cb,
684 S_008C28_NUM_LS_STACK_ENTRIES(num_stack_entries));
685 }
686
687 /* Context Registers */
688
689 if (ctx->chip_class < CAYMAN) {
690 /* workaround for hw issues with dyn gpr - must set all limits
691 * to 240 instead of 0, 0x1e == 240 / 8
692 */
693 r600_store_context_reg(cb, R_028838_SQ_DYN_GPR_RESOURCE_LIMIT_1,
694 S_028838_PS_GPRS(0x1e) |
695 S_028838_VS_GPRS(0x1e) |
696 S_028838_GS_GPRS(0x1e) |
697 S_028838_ES_GPRS(0x1e) |
698 S_028838_HS_GPRS(0x1e) |
699 S_028838_LS_GPRS(0x1e));
700 }
701
702 /* XXX: Investigate setting bit 15, which is FAST_COMPUTE_MODE */
703 r600_store_context_reg(cb, R_028A40_VGT_GS_MODE,
704 S_028A40_COMPUTE_MODE(1) | S_028A40_PARTIAL_THD_AT_EOI(1));
705
706 r600_store_context_reg(cb, R_028B54_VGT_SHADER_STAGES_EN, 2/*CS_ON*/);
707
708 r600_store_context_reg(cb, R_0286E8_SPI_COMPUTE_INPUT_CNTL,
709 S_0286E8_TID_IN_GROUP_ENA
710 | S_0286E8_TGID_ENA
711 | S_0286E8_DISABLE_INDEX_PACK)
712 ;
713
714 /* The LOOP_CONST registers are an optimizations for loops that allows
715 * you to store the initial counter, increment value, and maximum
716 * counter value in a register so that hardware can calculate the
717 * correct number of iterations for the loop, so that you don't need
718 * to have the loop counter in your shader code. We don't currently use
719 * this optimization, so we must keep track of the counter in the
720 * shader and use a break instruction to exit loops. However, the
721 * hardware will still uses this register to determine when to exit a
722 * loop, so we need to initialize the counter to 0, set the increment
723 * value to 1 and the maximum counter value to the 4095 (0xfff) which
724 * is the maximum value allowed. This gives us a maximum of 4096
725 * iterations for our loops, but hopefully our break instruction will
726 * execute before some time before the 4096th iteration.
727 */
728 eg_store_loop_const(cb, R_03A200_SQ_LOOP_CONST_0 + (160 * 4), 0x1000FFF);
729 }
730
731 void evergreen_init_compute_state_functions(struct r600_context *ctx)
732 {
733 ctx->context.create_compute_state = evergreen_create_compute_state;
734 ctx->context.delete_compute_state = evergreen_delete_compute_state;
735 ctx->context.bind_compute_state = evergreen_bind_compute_state;
736 // ctx->context.create_sampler_view = evergreen_compute_create_sampler_view;
737 ctx->context.set_compute_resources = evergreen_set_compute_resources;
738 ctx->context.set_compute_sampler_views = evergreen_set_cs_sampler_view;
739 ctx->context.bind_compute_sampler_states = evergreen_bind_compute_sampler_states;
740 ctx->context.set_global_binding = evergreen_set_global_binding;
741 ctx->context.launch_grid = evergreen_launch_grid;
742
743 /* We always use at least two vertex buffers for compute, one for
744 * parameters and one for global memory */
745 ctx->cs_vertex_buffer_state.enabled_mask =
746 ctx->cs_vertex_buffer_state.dirty_mask = 1 | 2;
747 }
748
749
750 struct pipe_resource *r600_compute_global_buffer_create(
751 struct pipe_screen *screen,
752 const struct pipe_resource *templ)
753 {
754 assert(templ->target == PIPE_BUFFER);
755 assert(templ->bind & PIPE_BIND_GLOBAL);
756 assert(templ->array_size == 1 || templ->array_size == 0);
757 assert(templ->depth0 == 1 || templ->depth0 == 0);
758 assert(templ->height0 == 1 || templ->height0 == 0);
759
760 struct r600_resource_global* result = (struct r600_resource_global*)
761 CALLOC(sizeof(struct r600_resource_global), 1);
762 struct r600_screen* rscreen = (struct r600_screen*)screen;
763
764 COMPUTE_DBG("*** r600_compute_global_buffer_create\n");
765 COMPUTE_DBG("width = %u array_size = %u\n", templ->width0,
766 templ->array_size);
767
768 result->base.b.vtbl = &r600_global_buffer_vtbl;
769 result->base.b.b.screen = screen;
770 result->base.b.b = *templ;
771 pipe_reference_init(&result->base.b.b.reference, 1);
772
773 int size_in_dw = (templ->width0+3) / 4;
774
775 result->chunk = compute_memory_alloc(rscreen->global_pool, size_in_dw);
776
777 if (result->chunk == NULL)
778 {
779 free(result);
780 return NULL;
781 }
782
783 return &result->base.b.b;
784 }
785
786 void r600_compute_global_buffer_destroy(
787 struct pipe_screen *screen,
788 struct pipe_resource *res)
789 {
790 assert(res->target == PIPE_BUFFER);
791 assert(res->bind & PIPE_BIND_GLOBAL);
792
793 struct r600_resource_global* buffer = (struct r600_resource_global*)res;
794 struct r600_screen* rscreen = (struct r600_screen*)screen;
795
796 compute_memory_free(rscreen->global_pool, buffer->chunk->id);
797
798 buffer->chunk = NULL;
799 free(res);
800 }
801
802 void* r600_compute_global_transfer_map(
803 struct pipe_context *ctx_,
804 struct pipe_transfer* transfer)
805 {
806 assert(transfer->resource->target == PIPE_BUFFER);
807 assert(transfer->resource->bind & PIPE_BIND_GLOBAL);
808 assert(transfer->box.x >= 0);
809 assert(transfer->box.y == 0);
810 assert(transfer->box.z == 0);
811
812 struct r600_context *ctx = (struct r600_context *)ctx_;
813 struct r600_resource_global* buffer =
814 (struct r600_resource_global*)transfer->resource;
815
816 uint32_t* map;
817 ///TODO: do it better, mapping is not possible if the pool is too big
818
819 COMPUTE_DBG("* r600_compute_global_transfer_map()\n");
820
821 if (!(map = ctx->ws->buffer_map(buffer->chunk->pool->bo->cs_buf,
822 ctx->cs, transfer->usage))) {
823 return NULL;
824 }
825
826 COMPUTE_DBG("Buffer: %p + %u (buffer offset in global memory) "
827 "+ %u (box.x)\n", map, buffer->chunk->start_in_dw, transfer->box.x);
828 return ((char*)(map + buffer->chunk->start_in_dw)) + transfer->box.x;
829 }
830
831 void r600_compute_global_transfer_unmap(
832 struct pipe_context *ctx_,
833 struct pipe_transfer* transfer)
834 {
835 assert(transfer->resource->target == PIPE_BUFFER);
836 assert(transfer->resource->bind & PIPE_BIND_GLOBAL);
837
838 struct r600_context *ctx = (struct r600_context *)ctx_;
839 struct r600_resource_global* buffer =
840 (struct r600_resource_global*)transfer->resource;
841
842 COMPUTE_DBG("* r600_compute_global_transfer_unmap()\n");
843
844 ctx->ws->buffer_unmap(buffer->chunk->pool->bo->cs_buf);
845 }
846
847 struct pipe_transfer * r600_compute_global_get_transfer(
848 struct pipe_context *ctx_,
849 struct pipe_resource *resource,
850 unsigned level,
851 unsigned usage,
852 const struct pipe_box *box)
853 {
854 struct r600_context *ctx = (struct r600_context *)ctx_;
855 struct compute_memory_pool *pool = ctx->screen->global_pool;
856
857 compute_memory_finalize_pending(pool, ctx_);
858
859 assert(resource->target == PIPE_BUFFER);
860 struct r600_context *rctx = (struct r600_context*)ctx_;
861 struct pipe_transfer *transfer = util_slab_alloc(&rctx->pool_transfers);
862
863 COMPUTE_DBG("* r600_compute_global_get_transfer()\n"
864 "level = %u, usage = %u, box(x = %u, y = %u, z = %u "
865 "width = %u, height = %u, depth = %u)\n", level, usage,
866 box->x, box->y, box->z, box->width, box->height,
867 box->depth);
868
869 transfer->resource = resource;
870 transfer->level = level;
871 transfer->usage = usage;
872 transfer->box = *box;
873 transfer->stride = 0;
874 transfer->layer_stride = 0;
875 transfer->data = NULL;
876
877 /* Note strides are zero, this is ok for buffers, but not for
878 * textures 2d & higher at least.
879 */
880 return transfer;
881 }
882
883 void r600_compute_global_transfer_destroy(
884 struct pipe_context *ctx_,
885 struct pipe_transfer *transfer)
886 {
887 struct r600_context *rctx = (struct r600_context*)ctx_;
888 util_slab_free(&rctx->pool_transfers, transfer);
889 }
890
891 void r600_compute_global_transfer_flush_region(
892 struct pipe_context *ctx_,
893 struct pipe_transfer *transfer,
894 const struct pipe_box *box)
895 {
896 assert(0 && "TODO");
897 }
898
899 void r600_compute_global_transfer_inline_write(
900 struct pipe_context *pipe,
901 struct pipe_resource *resource,
902 unsigned level,
903 unsigned usage,
904 const struct pipe_box *box,
905 const void *data,
906 unsigned stride,
907 unsigned layer_stride)
908 {
909 assert(0 && "TODO");
910 }