r600g: only set the index type if drawing is indexed
[mesa.git] / src / gallium / drivers / r600 / evergreen_compute.c
1 /*
2 * Copyright 2011 Adam Rak <adam.rak@streamnovation.com>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Adam Rak <adam.rak@streamnovation.com>
25 */
26
27 #include <stdio.h>
28 #include <errno.h>
29 #include "pipe/p_defines.h"
30 #include "pipe/p_state.h"
31 #include "pipe/p_context.h"
32 #include "util/u_blitter.h"
33 #include "util/u_double_list.h"
34 #include "util/u_transfer.h"
35 #include "util/u_surface.h"
36 #include "util/u_pack_color.h"
37 #include "util/u_memory.h"
38 #include "util/u_inlines.h"
39 #include "util/u_framebuffer.h"
40 #include "pipebuffer/pb_buffer.h"
41 #include "r600.h"
42 #include "evergreend.h"
43 #include "r600_resource.h"
44 #include "r600_shader.h"
45 #include "r600_pipe.h"
46 #include "r600_formats.h"
47 #include "evergreen_compute.h"
48 #include "r600_hw_context_priv.h"
49 #include "evergreen_compute_internal.h"
50 #include "compute_memory_pool.h"
51 #ifdef HAVE_OPENCL
52 #include "llvm_wrapper.h"
53 #endif
54
55 /**
56 RAT0 is for global binding write
57 VTX1 is for global binding read
58
59 for wrting images RAT1...
60 for reading images TEX2...
61 TEX2-RAT1 is paired
62
63 TEX2... consumes the same fetch resources, that VTX2... would consume
64
65 CONST0 and VTX0 is for parameters
66 CONST0 is binding smaller input parameter buffer, and for constant indexing,
67 also constant cached
68 VTX0 is for indirect/non-constant indexing, or if the input is bigger than
69 the constant cache can handle
70
71 RAT-s are limited to 12, so we can only bind at most 11 texture for writing
72 because we reserve RAT0 for global bindings. With byteaddressing enabled,
73 we should reserve another one too.=> 10 image binding for writing max.
74
75 from Nvidia OpenCL:
76 CL_DEVICE_MAX_READ_IMAGE_ARGS: 128
77 CL_DEVICE_MAX_WRITE_IMAGE_ARGS: 8
78
79 so 10 for writing is enough. 176 is the max for reading according to the docs
80
81 writable images should be listed first < 10, so their id corresponds to RAT(id+1)
82 writable images will consume TEX slots, VTX slots too because of linear indexing
83
84 */
85
86 static void evergreen_cs_set_vertex_buffer(
87 struct r600_context * rctx,
88 unsigned vb_index,
89 unsigned offset,
90 struct pipe_resource * buffer)
91 {
92 struct r600_vertexbuf_state *state = &rctx->cs_vertex_buffer_state;
93 struct pipe_vertex_buffer *vb = &state->vb[vb_index];
94 vb->stride = 1;
95 vb->buffer_offset = offset;
96 vb->buffer = buffer;
97 vb->user_buffer = NULL;
98
99 r600_inval_vertex_cache(rctx);
100 state->enabled_mask |= 1 << vb_index;
101 state->dirty_mask |= 1 << vb_index;
102 r600_atom_dirty(rctx, &state->atom);
103 }
104
105 const struct u_resource_vtbl r600_global_buffer_vtbl =
106 {
107 u_default_resource_get_handle, /* get_handle */
108 r600_compute_global_buffer_destroy, /* resource_destroy */
109 r600_compute_global_get_transfer, /* get_transfer */
110 r600_compute_global_transfer_destroy, /* transfer_destroy */
111 r600_compute_global_transfer_map, /* transfer_map */
112 r600_compute_global_transfer_flush_region,/* transfer_flush_region */
113 r600_compute_global_transfer_unmap, /* transfer_unmap */
114 r600_compute_global_transfer_inline_write /* transfer_inline_write */
115 };
116
117
118 void *evergreen_create_compute_state(
119 struct pipe_context *ctx_,
120 const const struct pipe_compute_state *cso)
121 {
122 struct r600_context *ctx = (struct r600_context *)ctx_;
123
124 #ifdef HAVE_OPENCL
125 const struct pipe_llvm_program_header * header;
126 const unsigned char * code;
127
128 COMPUTE_DBG("*** evergreen_create_compute_state\n");
129
130 header = cso->prog;
131 code = cso->prog + sizeof(struct pipe_llvm_program_header);
132 #endif
133
134 if (!ctx->screen->screen.get_param(&ctx->screen->screen,
135 PIPE_CAP_COMPUTE)) {
136 fprintf(stderr, "Compute is not supported\n");
137 return NULL;
138 }
139 struct r600_pipe_compute *shader = CALLOC_STRUCT(r600_pipe_compute);
140
141 shader->ctx = (struct r600_context*)ctx;
142 shader->resources = (struct evergreen_compute_resource*)
143 CALLOC(sizeof(struct evergreen_compute_resource),
144 get_compute_resource_num());
145 shader->local_size = cso->req_local_mem; ///TODO: assert it
146 shader->private_size = cso->req_private_mem;
147 shader->input_size = cso->req_input_mem;
148
149 #ifdef HAVE_OPENCL
150 shader->mod = llvm_parse_bitcode(code, header->num_bytes);
151
152 r600_compute_shader_create(ctx_, shader->mod, &shader->bc);
153 #endif
154 return shader;
155 }
156
157 void evergreen_delete_compute_state(struct pipe_context *ctx, void* state)
158 {
159 struct r600_pipe_compute *shader = (struct r600_pipe_compute *)state;
160
161 free(shader->resources);
162 free(shader);
163 }
164
165 static void evergreen_bind_compute_state(struct pipe_context *ctx_, void *state)
166 {
167 struct r600_context *ctx = (struct r600_context *)ctx_;
168
169 COMPUTE_DBG("*** evergreen_bind_compute_state\n");
170
171 ctx->cs_shader = (struct r600_pipe_compute *)state;
172
173 if (!ctx->cs_shader->shader_code_bo) {
174
175 ctx->cs_shader->shader_code_bo =
176 r600_compute_buffer_alloc_vram(ctx->screen,
177 ctx->cs_shader->bc.ndw * 4);
178
179 void *p = ctx->ws->buffer_map(
180 ctx->cs_shader->shader_code_bo->cs_buf,
181 ctx->cs, PIPE_TRANSFER_WRITE);
182
183 memcpy(p, ctx->cs_shader->bc.bytecode, ctx->cs_shader->bc.ndw * 4);
184
185 ctx->ws->buffer_unmap(ctx->cs_shader->shader_code_bo->cs_buf);
186
187 }
188
189 struct evergreen_compute_resource* res = get_empty_res(ctx->cs_shader,
190 COMPUTE_RESOURCE_SHADER, 0);
191
192 if (ctx->chip_class < CAYMAN) {
193 evergreen_reg_set(res, R_008C0C_SQ_GPR_RESOURCE_MGMT_3,
194 S_008C0C_NUM_LS_GPRS(ctx->cs_shader->bc.ngpr));
195 }
196
197 ///maybe we can use it later
198 evergreen_reg_set(res, R_0286C8_SPI_THREAD_GROUPING, 0);
199 ///maybe we can use it later
200 evergreen_reg_set(res, R_008C14_SQ_GLOBAL_GPR_RESOURCE_MGMT_2, 0);
201
202 evergreen_reg_set(res, R_0288D4_SQ_PGM_RESOURCES_LS,
203 S_0288D4_NUM_GPRS(ctx->cs_shader->bc.ngpr)
204 | S_0288D4_STACK_SIZE(ctx->cs_shader->bc.nstack));
205 evergreen_reg_set(res, R_0288D8_SQ_PGM_RESOURCES_LS_2, 0);
206
207 evergreen_reg_set(res, R_0288D0_SQ_PGM_START_LS, 0);
208 res->bo = ctx->cs_shader->shader_code_bo;
209 res->usage = RADEON_USAGE_READ;
210 res->coher_bo_size = ctx->cs_shader->bc.ndw*4;
211
212 r600_inval_shader_cache(ctx);
213
214 }
215
216 /* The kernel parameters are stored a vtx buffer (ID=0), besides the explicit
217 * kernel parameters there are inplicit parameters that need to be stored
218 * in the vertex buffer as well. Here is how these parameters are organized in
219 * the buffer:
220 *
221 * DWORDS 0-2: Number of work groups in each dimension (x,y,z)
222 * DWORDS 3-5: Number of global work items in each dimension (x,y,z)
223 * DWORDS 6-8: Number of work items within each work group in each dimension
224 * (x,y,z)
225 * DWORDS 9+ : Kernel parameters
226 */
227 void evergreen_compute_upload_input(
228 struct pipe_context *ctx_,
229 const uint *block_layout,
230 const uint *grid_layout,
231 const void *input)
232 {
233 struct r600_context *ctx = (struct r600_context *)ctx_;
234 int i;
235 unsigned kernel_parameters_offset_bytes = 36;
236 uint32_t * num_work_groups_start;
237 uint32_t * global_size_start;
238 uint32_t * local_size_start;
239 uint32_t * kernel_parameters_start;
240
241 if (ctx->cs_shader->input_size == 0) {
242 return;
243 }
244
245 if (!ctx->cs_shader->kernel_param) {
246 unsigned buffer_size = ctx->cs_shader->input_size;
247
248 /* Add space for the grid dimensions */
249 buffer_size += kernel_parameters_offset_bytes * sizeof(uint);
250 ctx->cs_shader->kernel_param =
251 r600_compute_buffer_alloc_vram(ctx->screen,
252 buffer_size);
253 }
254
255 num_work_groups_start = ctx->ws->buffer_map(
256 ctx->cs_shader->kernel_param->cs_buf,
257 ctx->cs, PIPE_TRANSFER_WRITE);
258 global_size_start = num_work_groups_start + (3 * (sizeof(uint) /4));
259 local_size_start = global_size_start + (3 * (sizeof(uint)) / 4);
260 kernel_parameters_start = local_size_start + (3 * (sizeof(uint)) / 4);
261
262 /* Copy the work group size */
263 memcpy(num_work_groups_start, grid_layout, 3 * sizeof(uint));
264
265 /* Copy the global size */
266 for (i = 0; i < 3; i++) {
267 global_size_start[i] = grid_layout[i] * block_layout[i];
268 }
269
270 /* Copy the local dimensions */
271 memcpy(local_size_start, block_layout, 3 * sizeof(uint));
272
273 /* Copy the kernel inputs */
274 memcpy(kernel_parameters_start, input, ctx->cs_shader->input_size);
275
276 for (i = 0; i < (kernel_parameters_offset_bytes / 4) +
277 (ctx->cs_shader->input_size / 4); i++) {
278 COMPUTE_DBG("input %i : %i\n", i,
279 ((unsigned*)num_work_groups_start)[i]);
280 }
281
282 ctx->ws->buffer_unmap(ctx->cs_shader->kernel_param->cs_buf);
283
284 ///ID=0 is reserved for the parameters
285 evergreen_cs_set_vertex_buffer(ctx, 0, 0,
286 (struct pipe_resource*)ctx->cs_shader->kernel_param);
287 ///ID=0 is reserved for parameters
288 evergreen_set_const_cache(ctx->cs_shader, 0,
289 ctx->cs_shader->kernel_param, ctx->cs_shader->input_size, 0);
290 }
291
292 void evergreen_direct_dispatch(
293 struct pipe_context *ctx_,
294 const uint *block_layout, const uint *grid_layout)
295 {
296 /* This struct r600_context* must be called rctx, because the
297 * r600_pipe_state_add_reg macro assumes there is a local variable
298 * of type struct r600_context* called rctx.
299 */
300 struct r600_context *rctx = (struct r600_context *)ctx_;
301
302 int i;
303
304 struct evergreen_compute_resource* res = get_empty_res(rctx->cs_shader,
305 COMPUTE_RESOURCE_DISPATCH, 0);
306
307 /* Set CB_TARGET_MASK */
308 evergreen_reg_set(res, R_028238_CB_TARGET_MASK, rctx->compute_cb_target_mask);
309
310 evergreen_reg_set(res, R_008958_VGT_PRIMITIVE_TYPE, V_008958_DI_PT_POINTLIST);
311
312 evergreen_reg_set(res, R_00899C_VGT_COMPUTE_START_X, 0);
313 evergreen_reg_set(res, R_0089A0_VGT_COMPUTE_START_Y, 0);
314 evergreen_reg_set(res, R_0089A4_VGT_COMPUTE_START_Z, 0);
315
316 evergreen_reg_set(res, R_0286EC_SPI_COMPUTE_NUM_THREAD_X, block_layout[0]);
317 evergreen_reg_set(res, R_0286F0_SPI_COMPUTE_NUM_THREAD_Y, block_layout[1]);
318 evergreen_reg_set(res, R_0286F4_SPI_COMPUTE_NUM_THREAD_Z, block_layout[2]);
319
320 int group_size = 1;
321
322 int grid_size = 1;
323
324 for (i = 0; i < 3; i++) {
325 group_size *= block_layout[i];
326 }
327
328 for (i = 0; i < 3; i++) {
329 grid_size *= grid_layout[i];
330 }
331
332 evergreen_reg_set(res, R_008970_VGT_NUM_INDICES, group_size);
333 evergreen_reg_set(res, R_0089AC_VGT_COMPUTE_THREAD_GROUP_SIZE, group_size);
334
335 evergreen_emit_raw_value(res, PKT3C(PKT3_DISPATCH_DIRECT, 3, 0));
336 evergreen_emit_raw_value(res, grid_layout[0]);
337 evergreen_emit_raw_value(res, grid_layout[1]);
338 evergreen_emit_raw_value(res, grid_layout[2]);
339 ///VGT_DISPATCH_INITIATOR = COMPUTE_SHADER_EN
340 evergreen_emit_raw_value(res, 1);
341 }
342
343 static void compute_emit_cs(struct r600_context *ctx)
344 {
345 struct radeon_winsys_cs *cs = ctx->cs;
346 int i;
347
348 struct r600_resource *onebo = NULL;
349 struct r600_pipe_state *cb_state;
350
351 /* Initialize all the registers common to both 3D and compute. Some
352 * 3D only register will be initialized by this atom as well, but
353 * this is OK for now.
354 *
355 * See evergreen_init_atom_start_cs() or cayman_init_atom_start_cs() in
356 * evergreen_state.c for the list of registers that are intialized by
357 * the start_cs_cmd atom.
358 */
359 r600_emit_atom(ctx, &ctx->start_cs_cmd.atom);
360
361 /* Initialize all the compute specific registers.
362 *
363 * See evergreen_init_atom_start_compute_cs() in this file for the list
364 * of registers initialized by the start_compuet_cs_cmd atom.
365 */
366 r600_emit_atom(ctx, &ctx->start_compute_cs_cmd.atom);
367
368 /* Emit cb_state */
369 cb_state = ctx->states[R600_PIPE_STATE_FRAMEBUFFER];
370 r600_context_pipe_state_emit(ctx, cb_state, RADEON_CP_PACKET3_COMPUTE_MODE);
371
372 /* Emit vertex buffer state */
373 ctx->cs_vertex_buffer_state.atom.num_dw = 12 * util_bitcount(ctx->cs_vertex_buffer_state.dirty_mask);
374 r600_emit_atom(ctx, &ctx->cs_vertex_buffer_state.atom);
375
376 for (i = 0; i < get_compute_resource_num(); i++) {
377 if (ctx->cs_shader->resources[i].enabled) {
378 int j;
379 COMPUTE_DBG("resnum: %i, cdw: %i\n", i, cs->cdw);
380
381 for (j = 0; j < ctx->cs_shader->resources[i].cs_end; j++) {
382 if (ctx->cs_shader->resources[i].do_reloc[j]) {
383 assert(ctx->cs_shader->resources[i].bo);
384 evergreen_emit_ctx_reloc(ctx,
385 ctx->cs_shader->resources[i].bo,
386 ctx->cs_shader->resources[i].usage);
387 }
388
389 cs->buf[cs->cdw++] = ctx->cs_shader->resources[i].cs[j];
390 }
391
392 if (ctx->cs_shader->resources[i].bo) {
393 onebo = ctx->cs_shader->resources[i].bo;
394 evergreen_emit_ctx_reloc(ctx,
395 ctx->cs_shader->resources[i].bo,
396 ctx->cs_shader->resources[i].usage);
397
398 ///special case for textures
399 if (ctx->cs_shader->resources[i].do_reloc
400 [ctx->cs_shader->resources[i].cs_end] == 2) {
401 evergreen_emit_ctx_reloc(ctx,
402 ctx->cs_shader->resources[i].bo,
403 ctx->cs_shader->resources[i].usage);
404 }
405 }
406 }
407 }
408
409 /* r600_flush_framebuffer() updates the cb_flush_flags and then
410 * calls r600_emit_atom() on the ctx->surface_sync_cmd.atom, which emits
411 * a SURFACE_SYNC packet via r600_emit_surface_sync().
412 *
413 * XXX r600_emit_surface_sync() hardcodes the CP_COHER_SIZE to
414 * 0xffffffff, so we will need to add a field to struct
415 * r600_surface_sync_cmd if we want to manually set this value.
416 */
417 r600_flush_framebuffer(ctx, true /* Flush now */);
418
419 #if 0
420 COMPUTE_DBG("cdw: %i\n", cs->cdw);
421 for (i = 0; i < cs->cdw; i++) {
422 COMPUTE_DBG("%4i : 0x%08X\n", i, ctx->cs->buf[i]);
423 }
424 #endif
425
426 ctx->ws->cs_flush(ctx->cs, RADEON_FLUSH_ASYNC | RADEON_FLUSH_COMPUTE);
427
428 ctx->pm4_dirty_cdwords = 0;
429 ctx->flags = 0;
430
431 COMPUTE_DBG("shader started\n");
432
433 ctx->ws->buffer_wait(onebo->buf, 0);
434
435 COMPUTE_DBG("...\n");
436
437 ctx->streamout_start = TRUE;
438 ctx->streamout_append_bitmask = ~0;
439
440 }
441
442 static void evergreen_launch_grid(
443 struct pipe_context *ctx_,
444 const uint *block_layout, const uint *grid_layout,
445 uint32_t pc, const void *input)
446 {
447 COMPUTE_DBG("PC: %i\n", pc);
448
449 struct r600_context *ctx = (struct r600_context *)ctx_;
450 unsigned num_waves;
451 unsigned num_pipes = ctx->screen->info.r600_max_pipes;
452 unsigned wave_divisor = (16 * num_pipes);
453
454 /* num_waves = ceil((tg_size.x * tg_size.y, tg_size.z) / (16 * num_pipes)) */
455 num_waves = (block_layout[0] * block_layout[1] * block_layout[2] +
456 wave_divisor - 1) / wave_divisor;
457
458 COMPUTE_DBG("Using %u pipes, there are %u wavefronts per thread block\n",
459 num_pipes, num_waves);
460
461 evergreen_set_lds(ctx->cs_shader, 0, 0, num_waves);
462 evergreen_compute_upload_input(ctx_, block_layout, grid_layout, input);
463 evergreen_direct_dispatch(ctx_, block_layout, grid_layout);
464 compute_emit_cs(ctx);
465 }
466
467 static void evergreen_set_compute_resources(struct pipe_context * ctx_,
468 unsigned start, unsigned count,
469 struct pipe_surface ** surfaces)
470 {
471 struct r600_context *ctx = (struct r600_context *)ctx_;
472 struct r600_surface **resources = (struct r600_surface **)surfaces;
473
474 COMPUTE_DBG("*** evergreen_set_compute_resources: start = %u count = %u\n",
475 start, count);
476
477 for (int i = 0; i < count; i++) {
478 /* The First two vertex buffers are reserved for parameters and
479 * global buffers. */
480 unsigned vtx_id = 2 + i;
481 if (resources[i]) {
482 struct r600_resource_global *buffer =
483 (struct r600_resource_global*)
484 resources[i]->base.texture;
485 if (resources[i]->base.writable) {
486 assert(i+1 < 12);
487
488 evergreen_set_rat(ctx->cs_shader, i+1,
489 (struct r600_resource *)resources[i]->base.texture,
490 buffer->chunk->start_in_dw*4,
491 resources[i]->base.texture->width0);
492 }
493
494 evergreen_cs_set_vertex_buffer(ctx, vtx_id,
495 buffer->chunk->start_in_dw * 4,
496 resources[i]->base.texture);
497 }
498 }
499 }
500
501 static void evergreen_set_cs_sampler_view(struct pipe_context *ctx_,
502 unsigned start_slot, unsigned count,
503 struct pipe_sampler_view **views)
504 {
505 struct r600_context *ctx = (struct r600_context *)ctx_;
506 struct r600_pipe_sampler_view **resource =
507 (struct r600_pipe_sampler_view **)views;
508
509 for (int i = 0; i < count; i++) {
510 if (resource[i]) {
511 assert(i+1 < 12);
512 ///FETCH0 = VTX0 (param buffer),
513 //FETCH1 = VTX1 (global buffer pool), FETCH2... = TEX
514 evergreen_set_tex_resource(ctx->cs_shader, resource[i], i+2);
515 }
516 }
517 }
518
519 static void evergreen_bind_compute_sampler_states(
520 struct pipe_context *ctx_,
521 unsigned start_slot,
522 unsigned num_samplers,
523 void **samplers_)
524 {
525 struct r600_context *ctx = (struct r600_context *)ctx_;
526 struct compute_sampler_state ** samplers =
527 (struct compute_sampler_state **)samplers_;
528
529 for (int i = 0; i < num_samplers; i++) {
530 if (samplers[i]) {
531 evergreen_set_sampler_resource(ctx->cs_shader, samplers[i], i);
532 }
533 }
534 }
535
536 static void evergreen_set_global_binding(
537 struct pipe_context *ctx_, unsigned first, unsigned n,
538 struct pipe_resource **resources,
539 uint32_t **handles)
540 {
541 struct r600_context *ctx = (struct r600_context *)ctx_;
542 struct compute_memory_pool *pool = ctx->screen->global_pool;
543 struct r600_resource_global **buffers =
544 (struct r600_resource_global **)resources;
545
546 COMPUTE_DBG("*** evergreen_set_global_binding first = %u n = %u\n",
547 first, n);
548
549 if (!resources) {
550 /* XXX: Unset */
551 return;
552 }
553
554 compute_memory_finalize_pending(pool, ctx_);
555
556 for (int i = 0; i < n; i++)
557 {
558 assert(resources[i]->target == PIPE_BUFFER);
559 assert(resources[i]->bind & PIPE_BIND_GLOBAL);
560
561 *(handles[i]) = buffers[i]->chunk->start_in_dw * 4;
562 }
563
564 evergreen_set_rat(ctx->cs_shader, 0, pool->bo, 0, pool->size_in_dw * 4);
565 evergreen_cs_set_vertex_buffer(ctx, 1, 0,
566 (struct pipe_resource*)pool->bo);
567 }
568
569 /**
570 * This function initializes all the compute specific registers that need to
571 * be initialized for each compute command stream. Registers that are common
572 * to both compute and 3D will be initialized at the beginning of each compute
573 * command stream by the start_cs_cmd atom. However, since the SET_CONTEXT_REG
574 * packet requires that the shader type bit be set, we must initialize all
575 * context registers needed for compute in this function. The registers
576 * intialized by the start_cs_cmd atom can be found in evereen_state.c in the
577 * functions evergreen_init_atom_start_cs or cayman_init_atom_start_cs depending
578 * on the GPU family.
579 */
580 void evergreen_init_atom_start_compute_cs(struct r600_context *ctx)
581 {
582 struct r600_command_buffer *cb = &ctx->start_compute_cs_cmd;
583 int num_threads;
584 int num_stack_entries;
585
586 /* We aren't passing the EMIT_EARLY flag as the third argument
587 * because we will be emitting this atom manually in order to
588 * ensure it gets emitted after the start_cs_cmd atom.
589 */
590 r600_init_command_buffer(cb, 256, 0);
591 cb->pkt_flags = RADEON_CP_PACKET3_COMPUTE_MODE;
592
593 switch (ctx->family) {
594 case CHIP_CEDAR:
595 default:
596 num_threads = 128;
597 num_stack_entries = 256;
598 break;
599 case CHIP_REDWOOD:
600 num_threads = 128;
601 num_stack_entries = 256;
602 break;
603 case CHIP_JUNIPER:
604 num_threads = 128;
605 num_stack_entries = 512;
606 break;
607 case CHIP_CYPRESS:
608 case CHIP_HEMLOCK:
609 num_threads = 128;
610 num_stack_entries = 512;
611 break;
612 case CHIP_PALM:
613 num_threads = 128;
614 num_stack_entries = 256;
615 break;
616 case CHIP_SUMO:
617 num_threads = 128;
618 num_stack_entries = 256;
619 break;
620 case CHIP_SUMO2:
621 num_threads = 128;
622 num_stack_entries = 512;
623 break;
624 case CHIP_BARTS:
625 num_threads = 128;
626 num_stack_entries = 512;
627 break;
628 case CHIP_TURKS:
629 num_threads = 128;
630 num_stack_entries = 256;
631 break;
632 case CHIP_CAICOS:
633 num_threads = 128;
634 num_stack_entries = 256;
635 break;
636 }
637
638 /* Config Registers */
639 if (ctx->chip_class < CAYMAN) {
640
641 /* These registers control which simds can be used by each stage.
642 * The default for these registers is 0xffffffff, which means
643 * all simds are available for each stage. It's possible we may
644 * want to play around with these in the future, but for now
645 * the default value is fine.
646 *
647 * R_008E20_SQ_STATIC_THREAD_MGMT1
648 * R_008E24_SQ_STATIC_THREAD_MGMT2
649 * R_008E28_SQ_STATIC_THREAD_MGMT3
650 */
651
652 /* XXX: We may need to adjust the thread and stack resouce
653 * values for 3D/compute interop */
654
655 r600_store_config_reg_seq(cb, R_008C18_SQ_THREAD_RESOURCE_MGMT_1, 5);
656
657 /* R_008C18_SQ_THREAD_RESOURCE_MGMT_1
658 * Set the number of threads used by the PS/VS/GS/ES stage to
659 * 0.
660 */
661 r600_store_value(cb, 0);
662
663 /* R_008C1C_SQ_THREAD_RESOURCE_MGMT_2
664 * Set the number of threads used by the CS (aka LS) stage to
665 * the maximum number of threads and set the number of threads
666 * for the HS stage to 0. */
667 r600_store_value(cb, S_008C1C_NUM_LS_THREADS(num_threads));
668
669 /* R_008C20_SQ_STACK_RESOURCE_MGMT_1
670 * Set the Control Flow stack entries to 0 for PS/VS stages */
671 r600_store_value(cb, 0);
672
673 /* R_008C24_SQ_STACK_RESOURCE_MGMT_2
674 * Set the Control Flow stack entries to 0 for GS/ES stages */
675 r600_store_value(cb, 0);
676
677 /* R_008C28_SQ_STACK_RESOURCE_MGMT_3
678 * Set the Contol Flow stack entries to 0 for the HS stage, and
679 * set it to the maximum value for the CS (aka LS) stage. */
680 r600_store_value(cb,
681 S_008C28_NUM_LS_STACK_ENTRIES(num_stack_entries));
682 }
683
684 /* Context Registers */
685
686 if (ctx->chip_class < CAYMAN) {
687 /* workaround for hw issues with dyn gpr - must set all limits
688 * to 240 instead of 0, 0x1e == 240 / 8
689 */
690 r600_store_context_reg(cb, R_028838_SQ_DYN_GPR_RESOURCE_LIMIT_1,
691 S_028838_PS_GPRS(0x1e) |
692 S_028838_VS_GPRS(0x1e) |
693 S_028838_GS_GPRS(0x1e) |
694 S_028838_ES_GPRS(0x1e) |
695 S_028838_HS_GPRS(0x1e) |
696 S_028838_LS_GPRS(0x1e));
697 }
698
699 /* XXX: Investigate setting bit 15, which is FAST_COMPUTE_MODE */
700 r600_store_context_reg(cb, R_028A40_VGT_GS_MODE,
701 S_028A40_COMPUTE_MODE(1) | S_028A40_PARTIAL_THD_AT_EOI(1));
702
703 r600_store_context_reg(cb, R_028B54_VGT_SHADER_STAGES_EN, 2/*CS_ON*/);
704
705 r600_store_context_reg(cb, R_0286E8_SPI_COMPUTE_INPUT_CNTL,
706 S_0286E8_TID_IN_GROUP_ENA
707 | S_0286E8_TGID_ENA
708 | S_0286E8_DISABLE_INDEX_PACK)
709 ;
710
711 /* The LOOP_CONST registers are an optimizations for loops that allows
712 * you to store the initial counter, increment value, and maximum
713 * counter value in a register so that hardware can calculate the
714 * correct number of iterations for the loop, so that you don't need
715 * to have the loop counter in your shader code. We don't currently use
716 * this optimization, so we must keep track of the counter in the
717 * shader and use a break instruction to exit loops. However, the
718 * hardware will still uses this register to determine when to exit a
719 * loop, so we need to initialize the counter to 0, set the increment
720 * value to 1 and the maximum counter value to the 4095 (0xfff) which
721 * is the maximum value allowed. This gives us a maximum of 4096
722 * iterations for our loops, but hopefully our break instruction will
723 * execute before some time before the 4096th iteration.
724 */
725 eg_store_loop_const(cb, R_03A200_SQ_LOOP_CONST_0 + (160 * 4), 0x1000FFF);
726 }
727
728 void evergreen_init_compute_state_functions(struct r600_context *ctx)
729 {
730 ctx->context.create_compute_state = evergreen_create_compute_state;
731 ctx->context.delete_compute_state = evergreen_delete_compute_state;
732 ctx->context.bind_compute_state = evergreen_bind_compute_state;
733 // ctx->context.create_sampler_view = evergreen_compute_create_sampler_view;
734 ctx->context.set_compute_resources = evergreen_set_compute_resources;
735 ctx->context.set_compute_sampler_views = evergreen_set_cs_sampler_view;
736 ctx->context.bind_compute_sampler_states = evergreen_bind_compute_sampler_states;
737 ctx->context.set_global_binding = evergreen_set_global_binding;
738 ctx->context.launch_grid = evergreen_launch_grid;
739
740 /* We always use at least two vertex buffers for compute, one for
741 * parameters and one for global memory */
742 ctx->cs_vertex_buffer_state.enabled_mask =
743 ctx->cs_vertex_buffer_state.dirty_mask = 1 | 2;
744 }
745
746
747 struct pipe_resource *r600_compute_global_buffer_create(
748 struct pipe_screen *screen,
749 const struct pipe_resource *templ)
750 {
751 assert(templ->target == PIPE_BUFFER);
752 assert(templ->bind & PIPE_BIND_GLOBAL);
753 assert(templ->array_size == 1 || templ->array_size == 0);
754 assert(templ->depth0 == 1 || templ->depth0 == 0);
755 assert(templ->height0 == 1 || templ->height0 == 0);
756
757 struct r600_resource_global* result = (struct r600_resource_global*)
758 CALLOC(sizeof(struct r600_resource_global), 1);
759 struct r600_screen* rscreen = (struct r600_screen*)screen;
760
761 COMPUTE_DBG("*** r600_compute_global_buffer_create\n");
762 COMPUTE_DBG("width = %u array_size = %u\n", templ->width0,
763 templ->array_size);
764
765 result->base.b.vtbl = &r600_global_buffer_vtbl;
766 result->base.b.b.screen = screen;
767 result->base.b.b = *templ;
768 pipe_reference_init(&result->base.b.b.reference, 1);
769
770 int size_in_dw = (templ->width0+3) / 4;
771
772 result->chunk = compute_memory_alloc(rscreen->global_pool, size_in_dw);
773
774 if (result->chunk == NULL)
775 {
776 free(result);
777 return NULL;
778 }
779
780 return &result->base.b.b;
781 }
782
783 void r600_compute_global_buffer_destroy(
784 struct pipe_screen *screen,
785 struct pipe_resource *res)
786 {
787 assert(res->target == PIPE_BUFFER);
788 assert(res->bind & PIPE_BIND_GLOBAL);
789
790 struct r600_resource_global* buffer = (struct r600_resource_global*)res;
791 struct r600_screen* rscreen = (struct r600_screen*)screen;
792
793 compute_memory_free(rscreen->global_pool, buffer->chunk->id);
794
795 buffer->chunk = NULL;
796 free(res);
797 }
798
799 void* r600_compute_global_transfer_map(
800 struct pipe_context *ctx_,
801 struct pipe_transfer* transfer)
802 {
803 assert(transfer->resource->target == PIPE_BUFFER);
804 assert(transfer->resource->bind & PIPE_BIND_GLOBAL);
805 assert(transfer->box.x >= 0);
806 assert(transfer->box.y == 0);
807 assert(transfer->box.z == 0);
808
809 struct r600_context *ctx = (struct r600_context *)ctx_;
810 struct r600_resource_global* buffer =
811 (struct r600_resource_global*)transfer->resource;
812
813 uint32_t* map;
814 ///TODO: do it better, mapping is not possible if the pool is too big
815
816 if (!(map = ctx->ws->buffer_map(buffer->chunk->pool->bo->cs_buf,
817 ctx->cs, transfer->usage))) {
818 return NULL;
819 }
820
821 COMPUTE_DBG("buffer start: %lli\n", buffer->chunk->start_in_dw);
822 return ((char*)(map + buffer->chunk->start_in_dw)) + transfer->box.x;
823 }
824
825 void r600_compute_global_transfer_unmap(
826 struct pipe_context *ctx_,
827 struct pipe_transfer* transfer)
828 {
829 assert(transfer->resource->target == PIPE_BUFFER);
830 assert(transfer->resource->bind & PIPE_BIND_GLOBAL);
831
832 struct r600_context *ctx = (struct r600_context *)ctx_;
833 struct r600_resource_global* buffer =
834 (struct r600_resource_global*)transfer->resource;
835
836 ctx->ws->buffer_unmap(buffer->chunk->pool->bo->cs_buf);
837 }
838
839 struct pipe_transfer * r600_compute_global_get_transfer(
840 struct pipe_context *ctx_,
841 struct pipe_resource *resource,
842 unsigned level,
843 unsigned usage,
844 const struct pipe_box *box)
845 {
846 struct r600_context *ctx = (struct r600_context *)ctx_;
847 struct compute_memory_pool *pool = ctx->screen->global_pool;
848
849 compute_memory_finalize_pending(pool, ctx_);
850
851 assert(resource->target == PIPE_BUFFER);
852 struct r600_context *rctx = (struct r600_context*)ctx_;
853 struct pipe_transfer *transfer = util_slab_alloc(&rctx->pool_transfers);
854
855 transfer->resource = resource;
856 transfer->level = level;
857 transfer->usage = usage;
858 transfer->box = *box;
859 transfer->stride = 0;
860 transfer->layer_stride = 0;
861 transfer->data = NULL;
862
863 /* Note strides are zero, this is ok for buffers, but not for
864 * textures 2d & higher at least.
865 */
866 return transfer;
867 }
868
869 void r600_compute_global_transfer_destroy(
870 struct pipe_context *ctx_,
871 struct pipe_transfer *transfer)
872 {
873 struct r600_context *rctx = (struct r600_context*)ctx_;
874 util_slab_free(&rctx->pool_transfers, transfer);
875 }
876
877 void r600_compute_global_transfer_flush_region(
878 struct pipe_context *ctx_,
879 struct pipe_transfer *transfer,
880 const struct pipe_box *box)
881 {
882 assert(0 && "TODO");
883 }
884
885 void r600_compute_global_transfer_inline_write(
886 struct pipe_context *pipe,
887 struct pipe_resource *resource,
888 unsigned level,
889 unsigned usage,
890 const struct pipe_box *box,
891 const void *data,
892 unsigned stride,
893 unsigned layer_stride)
894 {
895 assert(0 && "TODO");
896 }