r600g: implement timestamp query and get_timestamp hook
[mesa.git] / src / gallium / drivers / r600 / evergreen_compute.c
1 /*
2 * Copyright 2011 Adam Rak <adam.rak@streamnovation.com>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Adam Rak <adam.rak@streamnovation.com>
25 */
26
27 #include <stdio.h>
28 #include <errno.h>
29 #include "pipe/p_defines.h"
30 #include "pipe/p_state.h"
31 #include "pipe/p_context.h"
32 #include "util/u_blitter.h"
33 #include "util/u_double_list.h"
34 #include "util/u_transfer.h"
35 #include "util/u_surface.h"
36 #include "util/u_pack_color.h"
37 #include "util/u_memory.h"
38 #include "util/u_inlines.h"
39 #include "util/u_framebuffer.h"
40 #include "pipebuffer/pb_buffer.h"
41 #include "r600.h"
42 #include "evergreend.h"
43 #include "r600_resource.h"
44 #include "r600_shader.h"
45 #include "r600_pipe.h"
46 #include "r600_formats.h"
47 #include "evergreen_compute.h"
48 #include "r600_hw_context_priv.h"
49 #include "evergreen_compute_internal.h"
50 #include "compute_memory_pool.h"
51 #ifdef HAVE_OPENCL
52 #include "llvm_wrapper.h"
53 #endif
54
55 /**
56 RAT0 is for global binding write
57 VTX1 is for global binding read
58
59 for wrting images RAT1...
60 for reading images TEX2...
61 TEX2-RAT1 is paired
62
63 TEX2... consumes the same fetch resources, that VTX2... would consume
64
65 CONST0 and VTX0 is for parameters
66 CONST0 is binding smaller input parameter buffer, and for constant indexing,
67 also constant cached
68 VTX0 is for indirect/non-constant indexing, or if the input is bigger than
69 the constant cache can handle
70
71 RAT-s are limited to 12, so we can only bind at most 11 texture for writing
72 because we reserve RAT0 for global bindings. With byteaddressing enabled,
73 we should reserve another one too.=> 10 image binding for writing max.
74
75 from Nvidia OpenCL:
76 CL_DEVICE_MAX_READ_IMAGE_ARGS: 128
77 CL_DEVICE_MAX_WRITE_IMAGE_ARGS: 8
78
79 so 10 for writing is enough. 176 is the max for reading according to the docs
80
81 writable images should be listed first < 10, so their id corresponds to RAT(id+1)
82 writable images will consume TEX slots, VTX slots too because of linear indexing
83
84 */
85
86 static void evergreen_cs_set_vertex_buffer(
87 struct r600_context * rctx,
88 unsigned vb_index,
89 unsigned offset,
90 struct pipe_resource * buffer)
91 {
92 struct r600_vertexbuf_state *state = &rctx->cs_vertex_buffer_state;
93 struct pipe_vertex_buffer *vb = &state->vb[vb_index];
94 vb->stride = 1;
95 vb->buffer_offset = offset;
96 vb->buffer = buffer;
97 vb->user_buffer = NULL;
98
99 r600_inval_vertex_cache(rctx);
100 state->enabled_mask |= 1 << vb_index;
101 state->dirty_mask |= 1 << vb_index;
102 r600_atom_dirty(rctx, &state->atom);
103 }
104
105 const struct u_resource_vtbl r600_global_buffer_vtbl =
106 {
107 u_default_resource_get_handle, /* get_handle */
108 r600_compute_global_buffer_destroy, /* resource_destroy */
109 r600_compute_global_get_transfer, /* get_transfer */
110 r600_compute_global_transfer_destroy, /* transfer_destroy */
111 r600_compute_global_transfer_map, /* transfer_map */
112 r600_compute_global_transfer_flush_region,/* transfer_flush_region */
113 r600_compute_global_transfer_unmap, /* transfer_unmap */
114 r600_compute_global_transfer_inline_write /* transfer_inline_write */
115 };
116
117
118 void *evergreen_create_compute_state(
119 struct pipe_context *ctx_,
120 const const struct pipe_compute_state *cso)
121 {
122 struct r600_context *ctx = (struct r600_context *)ctx_;
123 struct r600_pipe_compute *shader = CALLOC_STRUCT(r600_pipe_compute);
124 void *p;
125
126 #ifdef HAVE_OPENCL
127 const struct pipe_llvm_program_header * header;
128 const unsigned char * code;
129
130 COMPUTE_DBG("*** evergreen_create_compute_state\n");
131
132 header = cso->prog;
133 code = cso->prog + sizeof(struct pipe_llvm_program_header);
134 #endif
135
136 shader->ctx = (struct r600_context*)ctx;
137 shader->resources = (struct evergreen_compute_resource*)
138 CALLOC(sizeof(struct evergreen_compute_resource),
139 get_compute_resource_num());
140 shader->local_size = cso->req_local_mem; ///TODO: assert it
141 shader->private_size = cso->req_private_mem;
142 shader->input_size = cso->req_input_mem;
143
144 #ifdef HAVE_OPENCL
145 shader->mod = llvm_parse_bitcode(code, header->num_bytes);
146
147 r600_compute_shader_create(ctx_, shader->mod, &shader->bc);
148 #endif
149 shader->shader_code_bo = r600_compute_buffer_alloc_vram(ctx->screen,
150 shader->bc.ndw * 4);
151
152 p = ctx->ws->buffer_map(shader->shader_code_bo->cs_buf, ctx->cs,
153 PIPE_TRANSFER_WRITE);
154
155 memcpy(p, shader->bc.bytecode, shader->bc.ndw * 4);
156 ctx->ws->buffer_unmap(shader->shader_code_bo->cs_buf);
157 return shader;
158 }
159
160 void evergreen_delete_compute_state(struct pipe_context *ctx, void* state)
161 {
162 struct r600_pipe_compute *shader = (struct r600_pipe_compute *)state;
163
164 free(shader->resources);
165 free(shader);
166 }
167
168 static void evergreen_bind_compute_state(struct pipe_context *ctx_, void *state)
169 {
170 struct r600_context *ctx = (struct r600_context *)ctx_;
171
172 COMPUTE_DBG("*** evergreen_bind_compute_state\n");
173
174 ctx->cs_shader_state.shader = (struct r600_pipe_compute *)state;
175 }
176
177 /* The kernel parameters are stored a vtx buffer (ID=0), besides the explicit
178 * kernel parameters there are inplicit parameters that need to be stored
179 * in the vertex buffer as well. Here is how these parameters are organized in
180 * the buffer:
181 *
182 * DWORDS 0-2: Number of work groups in each dimension (x,y,z)
183 * DWORDS 3-5: Number of global work items in each dimension (x,y,z)
184 * DWORDS 6-8: Number of work items within each work group in each dimension
185 * (x,y,z)
186 * DWORDS 9+ : Kernel parameters
187 */
188 void evergreen_compute_upload_input(
189 struct pipe_context *ctx_,
190 const uint *block_layout,
191 const uint *grid_layout,
192 const void *input)
193 {
194 struct r600_context *ctx = (struct r600_context *)ctx_;
195 struct r600_pipe_compute *shader = ctx->cs_shader_state.shader;
196 int i;
197 unsigned kernel_parameters_offset_bytes = 36;
198 uint32_t * num_work_groups_start;
199 uint32_t * global_size_start;
200 uint32_t * local_size_start;
201 uint32_t * kernel_parameters_start;
202
203 if (shader->input_size == 0) {
204 return;
205 }
206
207 if (!shader->kernel_param) {
208 unsigned buffer_size = shader->input_size;
209
210 /* Add space for the grid dimensions */
211 buffer_size += kernel_parameters_offset_bytes * sizeof(uint);
212 shader->kernel_param = r600_compute_buffer_alloc_vram(
213 ctx->screen, buffer_size);
214 }
215
216 num_work_groups_start = ctx->ws->buffer_map(
217 shader->kernel_param->cs_buf, ctx->cs, PIPE_TRANSFER_WRITE);
218 global_size_start = num_work_groups_start + (3 * (sizeof(uint) /4));
219 local_size_start = global_size_start + (3 * (sizeof(uint)) / 4);
220 kernel_parameters_start = local_size_start + (3 * (sizeof(uint)) / 4);
221
222 /* Copy the work group size */
223 memcpy(num_work_groups_start, grid_layout, 3 * sizeof(uint));
224
225 /* Copy the global size */
226 for (i = 0; i < 3; i++) {
227 global_size_start[i] = grid_layout[i] * block_layout[i];
228 }
229
230 /* Copy the local dimensions */
231 memcpy(local_size_start, block_layout, 3 * sizeof(uint));
232
233 /* Copy the kernel inputs */
234 memcpy(kernel_parameters_start, input, shader->input_size);
235
236 for (i = 0; i < (kernel_parameters_offset_bytes / 4) +
237 (shader->input_size / 4); i++) {
238 COMPUTE_DBG("input %i : %i\n", i,
239 ((unsigned*)num_work_groups_start)[i]);
240 }
241
242 ctx->ws->buffer_unmap(shader->kernel_param->cs_buf);
243
244 ///ID=0 is reserved for the parameters
245 evergreen_cs_set_vertex_buffer(ctx, 0, 0,
246 (struct pipe_resource*)shader->kernel_param);
247 ///ID=0 is reserved for parameters
248 evergreen_set_const_cache(shader, 0, shader->kernel_param,
249 shader->input_size, 0);
250 }
251
252 static void evergreen_emit_direct_dispatch(
253 struct r600_context *rctx,
254 const uint *block_layout, const uint *grid_layout)
255 {
256 int i;
257 struct radeon_winsys_cs *cs = rctx->cs;
258 unsigned num_waves;
259 unsigned num_pipes = rctx->screen->info.r600_max_pipes;
260 unsigned wave_divisor = (16 * num_pipes);
261 int group_size = 1;
262 int grid_size = 1;
263 /* XXX: Enable lds and get size from cs_shader_state */
264 unsigned lds_size = 0;
265
266 /* Calculate group_size/grid_size */
267 for (i = 0; i < 3; i++) {
268 group_size *= block_layout[i];
269 }
270
271 for (i = 0; i < 3; i++) {
272 grid_size *= grid_layout[i];
273 }
274
275 /* num_waves = ceil((tg_size.x * tg_size.y, tg_size.z) / (16 * num_pipes)) */
276 num_waves = (block_layout[0] * block_layout[1] * block_layout[2] +
277 wave_divisor - 1) / wave_divisor;
278
279 COMPUTE_DBG("Using %u pipes, there are %u wavefronts per thread block\n",
280 num_pipes, num_waves);
281
282 /* XXX: Partition the LDS between PS/CS. By default half (4096 dwords
283 * on Evergreen) oes to Pixel Shaders and half goes to Compute Shaders.
284 * We may need to allocat the entire LDS space for Compute Shaders.
285 *
286 * EG: R_008E2C_SQ_LDS_RESOURCE_MGMT := S_008E2C_NUM_LS_LDS(lds_dwords)
287 * CM: CM_R_0286FC_SPI_LDS_MGMT := S_0286FC_NUM_LS_LDS(lds_dwords)
288 */
289
290 r600_write_config_reg(cs, R_008970_VGT_NUM_INDICES, group_size);
291
292 r600_write_config_reg_seq(cs, R_00899C_VGT_COMPUTE_START_X, 3);
293 r600_write_value(cs, 0); /* R_00899C_VGT_COMPUTE_START_X */
294 r600_write_value(cs, 0); /* R_0089A0_VGT_COMPUTE_START_Y */
295 r600_write_value(cs, 0); /* R_0089A4_VGT_COMPUTE_START_Z */
296
297 r600_write_config_reg(cs, R_0089AC_VGT_COMPUTE_THREAD_GROUP_SIZE,
298 group_size);
299
300 r600_write_compute_context_reg_seq(cs, R_0286EC_SPI_COMPUTE_NUM_THREAD_X, 3);
301 r600_write_value(cs, block_layout[0]); /* R_0286EC_SPI_COMPUTE_NUM_THREAD_X */
302 r600_write_value(cs, block_layout[1]); /* R_0286F0_SPI_COMPUTE_NUM_THREAD_Y */
303 r600_write_value(cs, block_layout[2]); /* R_0286F4_SPI_COMPUTE_NUM_THREAD_Z */
304
305 r600_write_compute_context_reg(cs, CM_R_0288E8_SQ_LDS_ALLOC,
306 lds_size | (num_waves << 14));
307
308 /* Dispatch packet */
309 r600_write_value(cs, PKT3C(PKT3_DISPATCH_DIRECT, 3, 0));
310 r600_write_value(cs, grid_layout[0]);
311 r600_write_value(cs, grid_layout[1]);
312 r600_write_value(cs, grid_layout[2]);
313 /* VGT_DISPATCH_INITIATOR = COMPUTE_SHADER_EN */
314 r600_write_value(cs, 1);
315 }
316
317 static void compute_emit_cs(struct r600_context *ctx, const uint *block_layout,
318 const uint *grid_layout)
319 {
320 struct radeon_winsys_cs *cs = ctx->cs;
321 int i;
322
323 struct r600_resource *onebo = NULL;
324 struct r600_pipe_state *cb_state;
325 struct evergreen_compute_resource *resources =
326 ctx->cs_shader_state.shader->resources;
327
328 /* Initialize all the registers common to both 3D and compute. Some
329 * 3D only register will be initialized by this atom as well, but
330 * this is OK for now.
331 *
332 * See evergreen_init_atom_start_cs() or cayman_init_atom_start_cs() in
333 * evergreen_state.c for the list of registers that are intialized by
334 * the start_cs_cmd atom.
335 */
336 r600_emit_atom(ctx, &ctx->start_cs_cmd.atom);
337
338 /* Initialize all the compute specific registers.
339 *
340 * See evergreen_init_atom_start_compute_cs() in this file for the list
341 * of registers initialized by the start_compuet_cs_cmd atom.
342 */
343 r600_emit_atom(ctx, &ctx->start_compute_cs_cmd.atom);
344
345 /* Emit cb_state */
346 cb_state = ctx->states[R600_PIPE_STATE_FRAMEBUFFER];
347 r600_context_pipe_state_emit(ctx, cb_state, RADEON_CP_PACKET3_COMPUTE_MODE);
348
349 /* Set CB_TARGET_MASK XXX: Use cb_misc_state */
350 r600_write_compute_context_reg(cs, R_028238_CB_TARGET_MASK,
351 ctx->compute_cb_target_mask);
352
353
354 /* Emit vertex buffer state */
355 ctx->cs_vertex_buffer_state.atom.num_dw = 12 * util_bitcount(ctx->cs_vertex_buffer_state.dirty_mask);
356 r600_emit_atom(ctx, &ctx->cs_vertex_buffer_state.atom);
357
358 /* Emit compute shader state */
359 r600_emit_atom(ctx, &ctx->cs_shader_state.atom);
360
361 for (i = 0; i < get_compute_resource_num(); i++) {
362 if (resources[i].enabled) {
363 int j;
364 COMPUTE_DBG("resnum: %i, cdw: %i\n", i, cs->cdw);
365
366 for (j = 0; j < resources[i].cs_end; j++) {
367 if (resources[i].do_reloc[j]) {
368 assert(resources[i].bo);
369 evergreen_emit_ctx_reloc(ctx,
370 resources[i].bo,
371 resources[i].usage);
372 }
373
374 cs->buf[cs->cdw++] = resources[i].cs[j];
375 }
376
377 if (resources[i].bo) {
378 onebo = resources[i].bo;
379 evergreen_emit_ctx_reloc(ctx,
380 resources[i].bo,
381 resources[i].usage);
382
383 ///special case for textures
384 if (resources[i].do_reloc
385 [resources[i].cs_end] == 2) {
386 evergreen_emit_ctx_reloc(ctx,
387 resources[i].bo,
388 resources[i].usage);
389 }
390 }
391 }
392 }
393
394 /* Emit dispatch state and dispatch packet */
395 evergreen_emit_direct_dispatch(ctx, block_layout, grid_layout);
396
397 /* r600_flush_framebuffer() updates the cb_flush_flags and then
398 * calls r600_emit_atom() on the ctx->surface_sync_cmd.atom, which emits
399 * a SURFACE_SYNC packet via r600_emit_surface_sync().
400 *
401 * XXX r600_emit_surface_sync() hardcodes the CP_COHER_SIZE to
402 * 0xffffffff, so we will need to add a field to struct
403 * r600_surface_sync_cmd if we want to manually set this value.
404 */
405 r600_flush_framebuffer(ctx, true /* Flush now */);
406
407 #if 0
408 COMPUTE_DBG("cdw: %i\n", cs->cdw);
409 for (i = 0; i < cs->cdw; i++) {
410 COMPUTE_DBG("%4i : 0x%08X\n", i, ctx->cs->buf[i]);
411 }
412 #endif
413
414 ctx->ws->cs_flush(ctx->cs, RADEON_FLUSH_ASYNC | RADEON_FLUSH_COMPUTE);
415
416 ctx->pm4_dirty_cdwords = 0;
417 ctx->flags = 0;
418
419 COMPUTE_DBG("shader started\n");
420
421 ctx->ws->buffer_wait(onebo->buf, 0);
422
423 COMPUTE_DBG("...\n");
424
425 ctx->streamout_start = TRUE;
426 ctx->streamout_append_bitmask = ~0;
427
428 }
429
430
431 /**
432 * Emit function for r600_cs_shader_state atom
433 */
434 void evergreen_emit_cs_shader(
435 struct r600_context *rctx,
436 struct r600_atom *atom)
437 {
438 struct r600_cs_shader_state *state =
439 (struct r600_cs_shader_state*)atom;
440 struct r600_pipe_compute *shader = state->shader;
441 struct radeon_winsys_cs *cs = rctx->cs;
442 uint64_t va;
443
444 va = r600_resource_va(&rctx->screen->screen, &shader->shader_code_bo->b.b);
445
446 r600_write_compute_context_reg_seq(cs, R_0288D0_SQ_PGM_START_LS, 3);
447 r600_write_value(cs, va >> 8); /* R_0288D0_SQ_PGM_START_LS */
448 r600_write_value(cs, /* R_0288D4_SQ_PGM_RESOURCES_LS */
449 S_0288D4_NUM_GPRS(shader->bc.ngpr)
450 | S_0288D4_STACK_SIZE(shader->bc.nstack));
451 r600_write_value(cs, 0); /* R_0288D8_SQ_PGM_RESOURCES_LS_2 */
452
453 r600_write_value(cs, PKT3C(PKT3_NOP, 0, 0));
454 r600_write_value(cs, r600_context_bo_reloc(rctx, shader->shader_code_bo,
455 RADEON_USAGE_READ));
456
457 r600_inval_shader_cache(rctx);
458 }
459
460 static void evergreen_launch_grid(
461 struct pipe_context *ctx_,
462 const uint *block_layout, const uint *grid_layout,
463 uint32_t pc, const void *input)
464 {
465 struct r600_context *ctx = (struct r600_context *)ctx_;
466
467 COMPUTE_DBG("PC: %i\n", pc);
468
469 evergreen_compute_upload_input(ctx_, block_layout, grid_layout, input);
470 compute_emit_cs(ctx, block_layout, grid_layout);
471 }
472
473 static void evergreen_set_compute_resources(struct pipe_context * ctx_,
474 unsigned start, unsigned count,
475 struct pipe_surface ** surfaces)
476 {
477 struct r600_context *ctx = (struct r600_context *)ctx_;
478 struct r600_surface **resources = (struct r600_surface **)surfaces;
479
480 COMPUTE_DBG("*** evergreen_set_compute_resources: start = %u count = %u\n",
481 start, count);
482
483 for (int i = 0; i < count; i++) {
484 /* The First two vertex buffers are reserved for parameters and
485 * global buffers. */
486 unsigned vtx_id = 2 + i;
487 if (resources[i]) {
488 struct r600_resource_global *buffer =
489 (struct r600_resource_global*)
490 resources[i]->base.texture;
491 if (resources[i]->base.writable) {
492 assert(i+1 < 12);
493
494 evergreen_set_rat(ctx->cs_shader_state.shader, i+1,
495 (struct r600_resource *)resources[i]->base.texture,
496 buffer->chunk->start_in_dw*4,
497 resources[i]->base.texture->width0);
498 }
499
500 evergreen_cs_set_vertex_buffer(ctx, vtx_id,
501 buffer->chunk->start_in_dw * 4,
502 resources[i]->base.texture);
503 }
504 }
505 }
506
507 static void evergreen_set_cs_sampler_view(struct pipe_context *ctx_,
508 unsigned start_slot, unsigned count,
509 struct pipe_sampler_view **views)
510 {
511 struct r600_context *ctx = (struct r600_context *)ctx_;
512 struct r600_pipe_sampler_view **resource =
513 (struct r600_pipe_sampler_view **)views;
514
515 for (int i = 0; i < count; i++) {
516 if (resource[i]) {
517 assert(i+1 < 12);
518 ///FETCH0 = VTX0 (param buffer),
519 //FETCH1 = VTX1 (global buffer pool), FETCH2... = TEX
520 evergreen_set_tex_resource(ctx->cs_shader_state.shader, resource[i], i+2);
521 }
522 }
523 }
524
525 static void evergreen_bind_compute_sampler_states(
526 struct pipe_context *ctx_,
527 unsigned start_slot,
528 unsigned num_samplers,
529 void **samplers_)
530 {
531 struct r600_context *ctx = (struct r600_context *)ctx_;
532 struct compute_sampler_state ** samplers =
533 (struct compute_sampler_state **)samplers_;
534
535 for (int i = 0; i < num_samplers; i++) {
536 if (samplers[i]) {
537 evergreen_set_sampler_resource(
538 ctx->cs_shader_state.shader, samplers[i], i);
539 }
540 }
541 }
542
543 static void evergreen_set_global_binding(
544 struct pipe_context *ctx_, unsigned first, unsigned n,
545 struct pipe_resource **resources,
546 uint32_t **handles)
547 {
548 struct r600_context *ctx = (struct r600_context *)ctx_;
549 struct compute_memory_pool *pool = ctx->screen->global_pool;
550 struct r600_resource_global **buffers =
551 (struct r600_resource_global **)resources;
552
553 COMPUTE_DBG("*** evergreen_set_global_binding first = %u n = %u\n",
554 first, n);
555
556 if (!resources) {
557 /* XXX: Unset */
558 return;
559 }
560
561 compute_memory_finalize_pending(pool, ctx_);
562
563 for (int i = 0; i < n; i++)
564 {
565 assert(resources[i]->target == PIPE_BUFFER);
566 assert(resources[i]->bind & PIPE_BIND_GLOBAL);
567
568 *(handles[i]) = buffers[i]->chunk->start_in_dw * 4;
569 }
570
571 evergreen_set_rat(ctx->cs_shader_state.shader, 0, pool->bo, 0, pool->size_in_dw * 4);
572 evergreen_cs_set_vertex_buffer(ctx, 1, 0,
573 (struct pipe_resource*)pool->bo);
574 }
575
576 /**
577 * This function initializes all the compute specific registers that need to
578 * be initialized for each compute command stream. Registers that are common
579 * to both compute and 3D will be initialized at the beginning of each compute
580 * command stream by the start_cs_cmd atom. However, since the SET_CONTEXT_REG
581 * packet requires that the shader type bit be set, we must initialize all
582 * context registers needed for compute in this function. The registers
583 * intialized by the start_cs_cmd atom can be found in evereen_state.c in the
584 * functions evergreen_init_atom_start_cs or cayman_init_atom_start_cs depending
585 * on the GPU family.
586 */
587 void evergreen_init_atom_start_compute_cs(struct r600_context *ctx)
588 {
589 struct r600_command_buffer *cb = &ctx->start_compute_cs_cmd;
590 int num_threads;
591 int num_stack_entries;
592
593 /* We aren't passing the EMIT_EARLY flag as the third argument
594 * because we will be emitting this atom manually in order to
595 * ensure it gets emitted after the start_cs_cmd atom.
596 */
597 r600_init_command_buffer(cb, 256, 0);
598 cb->pkt_flags = RADEON_CP_PACKET3_COMPUTE_MODE;
599
600 switch (ctx->family) {
601 case CHIP_CEDAR:
602 default:
603 num_threads = 128;
604 num_stack_entries = 256;
605 break;
606 case CHIP_REDWOOD:
607 num_threads = 128;
608 num_stack_entries = 256;
609 break;
610 case CHIP_JUNIPER:
611 num_threads = 128;
612 num_stack_entries = 512;
613 break;
614 case CHIP_CYPRESS:
615 case CHIP_HEMLOCK:
616 num_threads = 128;
617 num_stack_entries = 512;
618 break;
619 case CHIP_PALM:
620 num_threads = 128;
621 num_stack_entries = 256;
622 break;
623 case CHIP_SUMO:
624 num_threads = 128;
625 num_stack_entries = 256;
626 break;
627 case CHIP_SUMO2:
628 num_threads = 128;
629 num_stack_entries = 512;
630 break;
631 case CHIP_BARTS:
632 num_threads = 128;
633 num_stack_entries = 512;
634 break;
635 case CHIP_TURKS:
636 num_threads = 128;
637 num_stack_entries = 256;
638 break;
639 case CHIP_CAICOS:
640 num_threads = 128;
641 num_stack_entries = 256;
642 break;
643 }
644
645 /* Config Registers */
646
647 /* The primitive type always needs to be POINTLIST for compute. */
648 r600_store_config_reg(cb, R_008958_VGT_PRIMITIVE_TYPE,
649 V_008958_DI_PT_POINTLIST);
650
651 if (ctx->chip_class < CAYMAN) {
652
653 /* These registers control which simds can be used by each stage.
654 * The default for these registers is 0xffffffff, which means
655 * all simds are available for each stage. It's possible we may
656 * want to play around with these in the future, but for now
657 * the default value is fine.
658 *
659 * R_008E20_SQ_STATIC_THREAD_MGMT1
660 * R_008E24_SQ_STATIC_THREAD_MGMT2
661 * R_008E28_SQ_STATIC_THREAD_MGMT3
662 */
663
664 /* XXX: We may need to adjust the thread and stack resouce
665 * values for 3D/compute interop */
666
667 r600_store_config_reg_seq(cb, R_008C18_SQ_THREAD_RESOURCE_MGMT_1, 5);
668
669 /* R_008C18_SQ_THREAD_RESOURCE_MGMT_1
670 * Set the number of threads used by the PS/VS/GS/ES stage to
671 * 0.
672 */
673 r600_store_value(cb, 0);
674
675 /* R_008C1C_SQ_THREAD_RESOURCE_MGMT_2
676 * Set the number of threads used by the CS (aka LS) stage to
677 * the maximum number of threads and set the number of threads
678 * for the HS stage to 0. */
679 r600_store_value(cb, S_008C1C_NUM_LS_THREADS(num_threads));
680
681 /* R_008C20_SQ_STACK_RESOURCE_MGMT_1
682 * Set the Control Flow stack entries to 0 for PS/VS stages */
683 r600_store_value(cb, 0);
684
685 /* R_008C24_SQ_STACK_RESOURCE_MGMT_2
686 * Set the Control Flow stack entries to 0 for GS/ES stages */
687 r600_store_value(cb, 0);
688
689 /* R_008C28_SQ_STACK_RESOURCE_MGMT_3
690 * Set the Contol Flow stack entries to 0 for the HS stage, and
691 * set it to the maximum value for the CS (aka LS) stage. */
692 r600_store_value(cb,
693 S_008C28_NUM_LS_STACK_ENTRIES(num_stack_entries));
694 }
695
696 /* Context Registers */
697
698 if (ctx->chip_class < CAYMAN) {
699 /* workaround for hw issues with dyn gpr - must set all limits
700 * to 240 instead of 0, 0x1e == 240 / 8
701 */
702 r600_store_context_reg(cb, R_028838_SQ_DYN_GPR_RESOURCE_LIMIT_1,
703 S_028838_PS_GPRS(0x1e) |
704 S_028838_VS_GPRS(0x1e) |
705 S_028838_GS_GPRS(0x1e) |
706 S_028838_ES_GPRS(0x1e) |
707 S_028838_HS_GPRS(0x1e) |
708 S_028838_LS_GPRS(0x1e));
709 }
710
711 /* XXX: Investigate setting bit 15, which is FAST_COMPUTE_MODE */
712 r600_store_context_reg(cb, R_028A40_VGT_GS_MODE,
713 S_028A40_COMPUTE_MODE(1) | S_028A40_PARTIAL_THD_AT_EOI(1));
714
715 r600_store_context_reg(cb, R_028B54_VGT_SHADER_STAGES_EN, 2/*CS_ON*/);
716
717 r600_store_context_reg(cb, R_0286E8_SPI_COMPUTE_INPUT_CNTL,
718 S_0286E8_TID_IN_GROUP_ENA
719 | S_0286E8_TGID_ENA
720 | S_0286E8_DISABLE_INDEX_PACK)
721 ;
722
723 /* The LOOP_CONST registers are an optimizations for loops that allows
724 * you to store the initial counter, increment value, and maximum
725 * counter value in a register so that hardware can calculate the
726 * correct number of iterations for the loop, so that you don't need
727 * to have the loop counter in your shader code. We don't currently use
728 * this optimization, so we must keep track of the counter in the
729 * shader and use a break instruction to exit loops. However, the
730 * hardware will still uses this register to determine when to exit a
731 * loop, so we need to initialize the counter to 0, set the increment
732 * value to 1 and the maximum counter value to the 4095 (0xfff) which
733 * is the maximum value allowed. This gives us a maximum of 4096
734 * iterations for our loops, but hopefully our break instruction will
735 * execute before some time before the 4096th iteration.
736 */
737 eg_store_loop_const(cb, R_03A200_SQ_LOOP_CONST_0 + (160 * 4), 0x1000FFF);
738 }
739
740 void evergreen_init_compute_state_functions(struct r600_context *ctx)
741 {
742 ctx->context.create_compute_state = evergreen_create_compute_state;
743 ctx->context.delete_compute_state = evergreen_delete_compute_state;
744 ctx->context.bind_compute_state = evergreen_bind_compute_state;
745 // ctx->context.create_sampler_view = evergreen_compute_create_sampler_view;
746 ctx->context.set_compute_resources = evergreen_set_compute_resources;
747 ctx->context.set_compute_sampler_views = evergreen_set_cs_sampler_view;
748 ctx->context.bind_compute_sampler_states = evergreen_bind_compute_sampler_states;
749 ctx->context.set_global_binding = evergreen_set_global_binding;
750 ctx->context.launch_grid = evergreen_launch_grid;
751
752 /* We always use at least two vertex buffers for compute, one for
753 * parameters and one for global memory */
754 ctx->cs_vertex_buffer_state.enabled_mask =
755 ctx->cs_vertex_buffer_state.dirty_mask = 1 | 2;
756 }
757
758
759 struct pipe_resource *r600_compute_global_buffer_create(
760 struct pipe_screen *screen,
761 const struct pipe_resource *templ)
762 {
763 assert(templ->target == PIPE_BUFFER);
764 assert(templ->bind & PIPE_BIND_GLOBAL);
765 assert(templ->array_size == 1 || templ->array_size == 0);
766 assert(templ->depth0 == 1 || templ->depth0 == 0);
767 assert(templ->height0 == 1 || templ->height0 == 0);
768
769 struct r600_resource_global* result = (struct r600_resource_global*)
770 CALLOC(sizeof(struct r600_resource_global), 1);
771 struct r600_screen* rscreen = (struct r600_screen*)screen;
772
773 COMPUTE_DBG("*** r600_compute_global_buffer_create\n");
774 COMPUTE_DBG("width = %u array_size = %u\n", templ->width0,
775 templ->array_size);
776
777 result->base.b.vtbl = &r600_global_buffer_vtbl;
778 result->base.b.b.screen = screen;
779 result->base.b.b = *templ;
780 pipe_reference_init(&result->base.b.b.reference, 1);
781
782 int size_in_dw = (templ->width0+3) / 4;
783
784 result->chunk = compute_memory_alloc(rscreen->global_pool, size_in_dw);
785
786 if (result->chunk == NULL)
787 {
788 free(result);
789 return NULL;
790 }
791
792 return &result->base.b.b;
793 }
794
795 void r600_compute_global_buffer_destroy(
796 struct pipe_screen *screen,
797 struct pipe_resource *res)
798 {
799 assert(res->target == PIPE_BUFFER);
800 assert(res->bind & PIPE_BIND_GLOBAL);
801
802 struct r600_resource_global* buffer = (struct r600_resource_global*)res;
803 struct r600_screen* rscreen = (struct r600_screen*)screen;
804
805 compute_memory_free(rscreen->global_pool, buffer->chunk->id);
806
807 buffer->chunk = NULL;
808 free(res);
809 }
810
811 void* r600_compute_global_transfer_map(
812 struct pipe_context *ctx_,
813 struct pipe_transfer* transfer)
814 {
815 assert(transfer->resource->target == PIPE_BUFFER);
816 assert(transfer->resource->bind & PIPE_BIND_GLOBAL);
817 assert(transfer->box.x >= 0);
818 assert(transfer->box.y == 0);
819 assert(transfer->box.z == 0);
820
821 struct r600_context *ctx = (struct r600_context *)ctx_;
822 struct r600_resource_global* buffer =
823 (struct r600_resource_global*)transfer->resource;
824
825 uint32_t* map;
826 ///TODO: do it better, mapping is not possible if the pool is too big
827
828 if (!(map = ctx->ws->buffer_map(buffer->chunk->pool->bo->cs_buf,
829 ctx->cs, transfer->usage))) {
830 return NULL;
831 }
832
833 COMPUTE_DBG("buffer start: %lli\n", buffer->chunk->start_in_dw);
834 return ((char*)(map + buffer->chunk->start_in_dw)) + transfer->box.x;
835 }
836
837 void r600_compute_global_transfer_unmap(
838 struct pipe_context *ctx_,
839 struct pipe_transfer* transfer)
840 {
841 assert(transfer->resource->target == PIPE_BUFFER);
842 assert(transfer->resource->bind & PIPE_BIND_GLOBAL);
843
844 struct r600_context *ctx = (struct r600_context *)ctx_;
845 struct r600_resource_global* buffer =
846 (struct r600_resource_global*)transfer->resource;
847
848 ctx->ws->buffer_unmap(buffer->chunk->pool->bo->cs_buf);
849 }
850
851 struct pipe_transfer * r600_compute_global_get_transfer(
852 struct pipe_context *ctx_,
853 struct pipe_resource *resource,
854 unsigned level,
855 unsigned usage,
856 const struct pipe_box *box)
857 {
858 struct r600_context *ctx = (struct r600_context *)ctx_;
859 struct compute_memory_pool *pool = ctx->screen->global_pool;
860
861 compute_memory_finalize_pending(pool, ctx_);
862
863 assert(resource->target == PIPE_BUFFER);
864 struct r600_context *rctx = (struct r600_context*)ctx_;
865 struct pipe_transfer *transfer = util_slab_alloc(&rctx->pool_transfers);
866
867 transfer->resource = resource;
868 transfer->level = level;
869 transfer->usage = usage;
870 transfer->box = *box;
871 transfer->stride = 0;
872 transfer->layer_stride = 0;
873 transfer->data = NULL;
874
875 /* Note strides are zero, this is ok for buffers, but not for
876 * textures 2d & higher at least.
877 */
878 return transfer;
879 }
880
881 void r600_compute_global_transfer_destroy(
882 struct pipe_context *ctx_,
883 struct pipe_transfer *transfer)
884 {
885 struct r600_context *rctx = (struct r600_context*)ctx_;
886 util_slab_free(&rctx->pool_transfers, transfer);
887 }
888
889 void r600_compute_global_transfer_flush_region(
890 struct pipe_context *ctx_,
891 struct pipe_transfer *transfer,
892 const struct pipe_box *box)
893 {
894 assert(0 && "TODO");
895 }
896
897 void r600_compute_global_transfer_inline_write(
898 struct pipe_context *pipe,
899 struct pipe_resource *resource,
900 unsigned level,
901 unsigned usage,
902 const struct pipe_box *box,
903 const void *data,
904 unsigned stride,
905 unsigned layer_stride)
906 {
907 assert(0 && "TODO");
908 }