r600g: Add support for RATs in evergreen_cb()
[mesa.git] / src / gallium / drivers / r600 / evergreen_compute.c
1 /*
2 * Copyright 2011 Adam Rak <adam.rak@streamnovation.com>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Adam Rak <adam.rak@streamnovation.com>
25 */
26
27 #include <stdio.h>
28 #include <errno.h>
29 #include "pipe/p_defines.h"
30 #include "pipe/p_state.h"
31 #include "pipe/p_context.h"
32 #include "util/u_blitter.h"
33 #include "util/u_double_list.h"
34 #include "util/u_transfer.h"
35 #include "util/u_surface.h"
36 #include "util/u_pack_color.h"
37 #include "util/u_memory.h"
38 #include "util/u_inlines.h"
39 #include "util/u_framebuffer.h"
40 #include "pipebuffer/pb_buffer.h"
41 #include "r600.h"
42 #include "evergreend.h"
43 #include "r600_resource.h"
44 #include "r600_shader.h"
45 #include "r600_pipe.h"
46 #include "r600_formats.h"
47 #include "evergreen_compute.h"
48 #include "r600_hw_context_priv.h"
49 #include "evergreen_compute_internal.h"
50 #include "compute_memory_pool.h"
51 #ifdef HAVE_OPENCL
52 #include "llvm_wrapper.h"
53 #endif
54
55 /**
56 RAT0 is for global binding write
57 VTX1 is for global binding read
58
59 for wrting images RAT1...
60 for reading images TEX2...
61 TEX2-RAT1 is paired
62
63 TEX2... consumes the same fetch resources, that VTX2... would consume
64
65 CONST0 and VTX0 is for parameters
66 CONST0 is binding smaller input parameter buffer, and for constant indexing,
67 also constant cached
68 VTX0 is for indirect/non-constant indexing, or if the input is bigger than
69 the constant cache can handle
70
71 RAT-s are limited to 12, so we can only bind at most 11 texture for writing
72 because we reserve RAT0 for global bindings. With byteaddressing enabled,
73 we should reserve another one too.=> 10 image binding for writing max.
74
75 from Nvidia OpenCL:
76 CL_DEVICE_MAX_READ_IMAGE_ARGS: 128
77 CL_DEVICE_MAX_WRITE_IMAGE_ARGS: 8
78
79 so 10 for writing is enough. 176 is the max for reading according to the docs
80
81 writable images should be listed first < 10, so their id corresponds to RAT(id+1)
82 writable images will consume TEX slots, VTX slots too because of linear indexing
83
84 */
85
86 const struct u_resource_vtbl r600_global_buffer_vtbl =
87 {
88 u_default_resource_get_handle, /* get_handle */
89 r600_compute_global_buffer_destroy, /* resource_destroy */
90 r600_compute_global_get_transfer, /* get_transfer */
91 r600_compute_global_transfer_destroy, /* transfer_destroy */
92 r600_compute_global_transfer_map, /* transfer_map */
93 r600_compute_global_transfer_flush_region,/* transfer_flush_region */
94 r600_compute_global_transfer_unmap, /* transfer_unmap */
95 r600_compute_global_transfer_inline_write /* transfer_inline_write */
96 };
97
98
99 void *evergreen_create_compute_state(
100 struct pipe_context *ctx_,
101 const const struct pipe_compute_state *cso)
102 {
103 struct r600_context *ctx = (struct r600_context *)ctx_;
104
105 #ifdef HAVE_OPENCL
106 const struct pipe_llvm_program_header * header;
107 const unsigned char * code;
108
109 header = cso->prog;
110 code = cso->prog + sizeof(struct pipe_llvm_program_header);
111 #endif
112
113 if (!ctx->screen->screen.get_param(&ctx->screen->screen,
114 PIPE_CAP_COMPUTE)) {
115 fprintf(stderr, "Compute is not supported\n");
116 return NULL;
117 }
118 struct r600_pipe_compute *shader = CALLOC_STRUCT(r600_pipe_compute);
119
120 shader->ctx = (struct r600_context*)ctx;
121 shader->resources = (struct evergreen_compute_resource*)
122 CALLOC(sizeof(struct evergreen_compute_resource),
123 get_compute_resource_num());
124 shader->local_size = cso->req_local_mem; ///TODO: assert it
125 shader->private_size = cso->req_private_mem;
126 shader->input_size = cso->req_input_mem;
127
128 #ifdef HAVE_OPENCL
129 shader->mod = llvm_parse_bitcode(code, header->num_bytes);
130
131 r600_compute_shader_create(ctx_, shader->mod, &shader->bc);
132 #endif
133 return shader;
134 }
135
136 void evergreen_delete_compute_state(struct pipe_context *ctx, void* state)
137 {
138 struct r600_pipe_compute *shader = (struct r600_pipe_compute *)state;
139
140 free(shader->resources);
141 free(shader);
142 }
143
144 static void evergreen_bind_compute_state(struct pipe_context *ctx_, void *state)
145 {
146 struct r600_context *ctx = (struct r600_context *)ctx_;
147
148 ctx->cs_shader = (struct r600_pipe_compute *)state;
149
150 if (!ctx->cs_shader->shader_code_bo) {
151
152 ctx->cs_shader->shader_code_bo =
153 r600_compute_buffer_alloc_vram(ctx->screen,
154 ctx->cs_shader->bc.ndw * 4);
155
156 void *p = ctx->ws->buffer_map(
157 ctx->cs_shader->shader_code_bo->cs_buf,
158 ctx->cs, PIPE_TRANSFER_WRITE);
159
160 memcpy(p, ctx->cs_shader->bc.bytecode, ctx->cs_shader->bc.ndw * 4);
161
162 ctx->ws->buffer_unmap(ctx->cs_shader->shader_code_bo->cs_buf);
163
164 }
165
166 struct evergreen_compute_resource* res = get_empty_res(ctx->cs_shader,
167 COMPUTE_RESOURCE_SHADER, 0);
168
169 if (ctx->chip_class < CAYMAN) {
170 evergreen_reg_set(res, R_008C0C_SQ_GPR_RESOURCE_MGMT_3,
171 S_008C0C_NUM_LS_GPRS(ctx->cs_shader->bc.ngpr));
172 }
173
174 ///maybe we can use it later
175 evergreen_reg_set(res, R_0286C8_SPI_THREAD_GROUPING, 0);
176 ///maybe we can use it later
177 evergreen_reg_set(res, R_008C14_SQ_GLOBAL_GPR_RESOURCE_MGMT_2, 0);
178
179 evergreen_reg_set(res, R_0288D4_SQ_PGM_RESOURCES_LS,
180 S_0288D4_NUM_GPRS(ctx->cs_shader->bc.ngpr)
181 | S_0288D4_STACK_SIZE(ctx->cs_shader->bc.nstack));
182 evergreen_reg_set(res, R_0288D8_SQ_PGM_RESOURCES_LS_2, 0);
183
184 evergreen_reg_set(res, R_0288D0_SQ_PGM_START_LS, 0);
185 res->bo = ctx->cs_shader->shader_code_bo;
186 res->usage = RADEON_USAGE_READ;
187 res->coher_bo_size = ctx->cs_shader->bc.ndw*4;
188
189 r600_inval_shader_cache(ctx);
190
191 }
192
193 /* The kernel parameters are stored a vtx buffer (ID=0), besides the explicit
194 * kernel parameters there are inplicit parameters that need to be stored
195 * in the vertex buffer as well. Here is how these parameters are organized in
196 * the buffer:
197 *
198 * DWORDS 0-2: Number of work groups in each dimension (x,y,z)
199 * DWORDS 3-5: Number of global work items in each dimension (x,y,z)
200 * DWORDS 6-8: Number of work items within each work group in each dimension
201 * (x,y,z)
202 * DWORDS 9+ : Kernel parameters
203 */
204 void evergreen_compute_upload_input(
205 struct pipe_context *ctx_,
206 const uint *block_layout,
207 const uint *grid_layout,
208 const void *input)
209 {
210 struct r600_context *ctx = (struct r600_context *)ctx_;
211 int i;
212 unsigned kernel_parameters_offset_bytes = 36;
213 uint32_t * num_work_groups_start;
214 uint32_t * global_size_start;
215 uint32_t * local_size_start;
216 uint32_t * kernel_parameters_start;
217
218 if (ctx->cs_shader->input_size == 0) {
219 return;
220 }
221
222 if (!ctx->cs_shader->kernel_param) {
223 unsigned buffer_size = ctx->cs_shader->input_size;
224
225 /* Add space for the grid dimensions */
226 buffer_size += kernel_parameters_offset_bytes * sizeof(uint);
227 ctx->cs_shader->kernel_param =
228 r600_compute_buffer_alloc_vram(ctx->screen,
229 buffer_size);
230 }
231
232 num_work_groups_start = ctx->ws->buffer_map(
233 ctx->cs_shader->kernel_param->cs_buf,
234 ctx->cs, PIPE_TRANSFER_WRITE);
235 global_size_start = num_work_groups_start + (3 * (sizeof(uint) /4));
236 local_size_start = global_size_start + (3 * (sizeof(uint)) / 4);
237 kernel_parameters_start = local_size_start + (3 * (sizeof(uint)) / 4);
238
239 /* Copy the work group size */
240 memcpy(num_work_groups_start, grid_layout, 3 * sizeof(uint));
241
242 /* Copy the global size */
243 for (i = 0; i < 3; i++) {
244 global_size_start[i] = grid_layout[i] * block_layout[i];
245 }
246
247 /* Copy the local dimensions */
248 memcpy(local_size_start, block_layout, 3 * sizeof(uint));
249
250 /* Copy the kernel inputs */
251 memcpy(kernel_parameters_start, input, ctx->cs_shader->input_size);
252
253 for (i = 0; i < (kernel_parameters_offset_bytes / 4) +
254 (ctx->cs_shader->input_size / 4); i++) {
255 COMPUTE_DBG("input %i : %i\n", i,
256 ((unsigned*)num_work_groups_start)[i]);
257 }
258
259 ctx->ws->buffer_unmap(ctx->cs_shader->kernel_param->cs_buf);
260
261 ///ID=0 is reserved for the parameters
262 evergreen_set_vtx_resource(ctx->cs_shader,
263 ctx->cs_shader->kernel_param, 0, 0, 0);
264 ///ID=0 is reserved for parameters
265 evergreen_set_const_cache(ctx->cs_shader, 0,
266 ctx->cs_shader->kernel_param, ctx->cs_shader->input_size, 0);
267 }
268
269 void evergreen_direct_dispatch(
270 struct pipe_context *ctx_,
271 const uint *block_layout, const uint *grid_layout)
272 {
273 struct r600_context *ctx = (struct r600_context *)ctx_;
274
275 int i;
276
277 struct evergreen_compute_resource* res = get_empty_res(ctx->cs_shader,
278 COMPUTE_RESOURCE_DISPATCH, 0);
279
280 evergreen_reg_set(res, R_008958_VGT_PRIMITIVE_TYPE, V_008958_DI_PT_POINTLIST);
281
282 evergreen_reg_set(res, R_00899C_VGT_COMPUTE_START_X, 0);
283 evergreen_reg_set(res, R_0089A0_VGT_COMPUTE_START_Y, 0);
284 evergreen_reg_set(res, R_0089A4_VGT_COMPUTE_START_Z, 0);
285
286 evergreen_reg_set(res, R_0286EC_SPI_COMPUTE_NUM_THREAD_X, block_layout[0]);
287 evergreen_reg_set(res, R_0286F0_SPI_COMPUTE_NUM_THREAD_Y, block_layout[1]);
288 evergreen_reg_set(res, R_0286F4_SPI_COMPUTE_NUM_THREAD_Z, block_layout[2]);
289
290 int group_size = 1;
291
292 int grid_size = 1;
293
294 for (i = 0; i < 3; i++) {
295 group_size *= block_layout[i];
296 }
297
298 for (i = 0; i < 3; i++) {
299 grid_size *= grid_layout[i];
300 }
301
302 evergreen_reg_set(res, R_008970_VGT_NUM_INDICES, group_size);
303 evergreen_reg_set(res, R_0089AC_VGT_COMPUTE_THREAD_GROUP_SIZE, group_size);
304
305 evergreen_emit_raw_value(res, PKT3C(PKT3_DISPATCH_DIRECT, 3, 0));
306 evergreen_emit_raw_value(res, grid_layout[0]);
307 evergreen_emit_raw_value(res, grid_layout[1]);
308 evergreen_emit_raw_value(res, grid_layout[2]);
309 ///VGT_DISPATCH_INITIATOR = COMPUTE_SHADER_EN
310 evergreen_emit_raw_value(res, 1);
311 }
312
313 static void compute_emit_cs(struct r600_context *ctx)
314 {
315 struct radeon_winsys_cs *cs = ctx->cs;
316 int i;
317
318 struct r600_resource *onebo = NULL;
319
320 /* Initialize all the registers common to both 3D and compute. Some
321 * 3D only register will be initialized by this atom as well, but
322 * this is OK for now.
323 *
324 * See evergreen_init_atom_start_cs() or cayman_init_atom_start_cs() in
325 * evergreen_state.c for the list of registers that are intialized by
326 * the start_cs_cmd atom.
327 */
328 r600_emit_atom(ctx, &ctx->start_cs_cmd.atom);
329
330 /* Initialize all the compute specific registers.
331 *
332 * See evergreen_init_atom_start_compute_cs() in this file for the list
333 * of registers initialized by the start_compuet_cs_cmd atom.
334 */
335 r600_emit_atom(ctx, &ctx->start_compute_cs_cmd.atom);
336
337 for (i = 0; i < get_compute_resource_num(); i++) {
338 if (ctx->cs_shader->resources[i].enabled) {
339 int j;
340 COMPUTE_DBG("resnum: %i, cdw: %i\n", i, cs->cdw);
341
342 for (j = 0; j < ctx->cs_shader->resources[i].cs_end; j++) {
343 if (ctx->cs_shader->resources[i].do_reloc[j]) {
344 assert(ctx->cs_shader->resources[i].bo);
345 evergreen_emit_ctx_reloc(ctx,
346 ctx->cs_shader->resources[i].bo,
347 ctx->cs_shader->resources[i].usage);
348 }
349
350 cs->buf[cs->cdw++] = ctx->cs_shader->resources[i].cs[j];
351 }
352
353 if (ctx->cs_shader->resources[i].bo) {
354 onebo = ctx->cs_shader->resources[i].bo;
355 evergreen_emit_ctx_reloc(ctx,
356 ctx->cs_shader->resources[i].bo,
357 ctx->cs_shader->resources[i].usage);
358
359 ///special case for textures
360 if (ctx->cs_shader->resources[i].do_reloc
361 [ctx->cs_shader->resources[i].cs_end] == 2) {
362 evergreen_emit_ctx_reloc(ctx,
363 ctx->cs_shader->resources[i].bo,
364 ctx->cs_shader->resources[i].usage);
365 }
366 }
367 }
368 }
369
370 /* r600_flush_framebuffer() updates the cb_flush_flags and then
371 * calls r600_emit_atom() on the ctx->surface_sync_cmd.atom, which emits
372 * a SURFACE_SYNC packet via r600_emit_surface_sync().
373 *
374 * XXX r600_emit_surface_sync() hardcodes the CP_COHER_SIZE to
375 * 0xffffffff, so we will need to add a field to struct
376 * r600_surface_sync_cmd if we want to manually set this value.
377 */
378 r600_flush_framebuffer(ctx, true /* Flush now */);
379
380 #if 0
381 COMPUTE_DBG("cdw: %i\n", cs->cdw);
382 for (i = 0; i < cs->cdw; i++) {
383 COMPUTE_DBG("%4i : 0x%08X\n", i, ctx->cs->buf[i]);
384 }
385 #endif
386
387 ctx->ws->cs_flush(ctx->cs, RADEON_FLUSH_ASYNC | RADEON_FLUSH_COMPUTE);
388
389 ctx->pm4_dirty_cdwords = 0;
390 ctx->flags = 0;
391
392 COMPUTE_DBG("shader started\n");
393
394 ctx->ws->buffer_wait(onebo->buf, 0);
395
396 COMPUTE_DBG("...\n");
397
398 ctx->streamout_start = TRUE;
399 ctx->streamout_append_bitmask = ~0;
400
401 }
402
403 static void evergreen_launch_grid(
404 struct pipe_context *ctx_,
405 const uint *block_layout, const uint *grid_layout,
406 uint32_t pc, const void *input)
407 {
408 COMPUTE_DBG("PC: %i\n", pc);
409
410 struct r600_context *ctx = (struct r600_context *)ctx_;
411 unsigned num_waves;
412 unsigned num_pipes = ctx->screen->info.r600_max_pipes;
413 unsigned wave_divisor = (16 * num_pipes);
414
415 /* num_waves = ceil((tg_size.x * tg_size.y, tg_size.z) / (16 * num_pipes)) */
416 num_waves = (block_layout[0] * block_layout[1] * block_layout[2] +
417 wave_divisor - 1) / wave_divisor;
418
419 COMPUTE_DBG("Using %u pipes, there are %u wavefronts per thread block\n",
420 num_pipes, num_waves);
421
422 evergreen_set_lds(ctx->cs_shader, 0, 0, num_waves);
423 evergreen_compute_upload_input(ctx_, block_layout, grid_layout, input);
424 evergreen_direct_dispatch(ctx_, block_layout, grid_layout);
425 compute_emit_cs(ctx);
426 }
427
428 static void evergreen_set_compute_resources(struct pipe_context * ctx_,
429 unsigned start, unsigned count,
430 struct pipe_surface ** surfaces)
431 {
432 struct r600_context *ctx = (struct r600_context *)ctx_;
433 struct r600_surface **resources = (struct r600_surface **)surfaces;
434 for (int i = 0; i < count; i++) {
435 if (resources[i]) {
436 struct r600_resource_global *buffer =
437 (struct r600_resource_global*)resources[i]->base.texture;
438 if (resources[i]->base.writable) {
439 assert(i+1 < 12);
440 struct r600_resource_global *buffer =
441 (struct r600_resource_global*)
442 resources[i]->base.texture;
443
444 evergreen_set_rat(ctx->cs_shader, i+1,
445 (struct r600_resource *)resources[i]->base.texture,
446 buffer->chunk->start_in_dw*4,
447 resources[i]->base.texture->width0);
448 }
449
450 evergreen_set_vtx_resource(ctx->cs_shader,
451 (struct r600_resource *)resources[i]->base.texture, i+2,
452 buffer->chunk->start_in_dw*4, resources[i]->base.writable);
453 }
454 }
455
456 }
457
458 static void evergreen_set_cs_sampler_view(struct pipe_context *ctx_,
459 unsigned start_slot, unsigned count,
460 struct pipe_sampler_view **views)
461 {
462 struct r600_context *ctx = (struct r600_context *)ctx_;
463 struct r600_pipe_sampler_view **resource =
464 (struct r600_pipe_sampler_view **)views;
465
466 for (int i = 0; i < count; i++) {
467 if (resource[i]) {
468 assert(i+1 < 12);
469 ///FETCH0 = VTX0 (param buffer),
470 //FETCH1 = VTX1 (global buffer pool), FETCH2... = TEX
471 evergreen_set_tex_resource(ctx->cs_shader, resource[i], i+2);
472 }
473 }
474 }
475
476 static void evergreen_bind_compute_sampler_states(
477 struct pipe_context *ctx_,
478 unsigned start_slot,
479 unsigned num_samplers,
480 void **samplers_)
481 {
482 struct r600_context *ctx = (struct r600_context *)ctx_;
483 struct compute_sampler_state ** samplers =
484 (struct compute_sampler_state **)samplers_;
485
486 for (int i = 0; i < num_samplers; i++) {
487 if (samplers[i]) {
488 evergreen_set_sampler_resource(ctx->cs_shader, samplers[i], i);
489 }
490 }
491 }
492
493 static void evergreen_set_global_binding(
494 struct pipe_context *ctx_, unsigned first, unsigned n,
495 struct pipe_resource **resources,
496 uint32_t **handles)
497 {
498 struct r600_context *ctx = (struct r600_context *)ctx_;
499 struct compute_memory_pool *pool = ctx->screen->global_pool;
500 struct r600_resource_global **buffers =
501 (struct r600_resource_global **)resources;
502
503 if (!resources) {
504 /* XXX: Unset */
505 return;
506 }
507
508 compute_memory_finalize_pending(pool, ctx_);
509
510 for (int i = 0; i < n; i++)
511 {
512 assert(resources[i]->target == PIPE_BUFFER);
513 assert(resources[i]->bind & PIPE_BIND_GLOBAL);
514
515 *(handles[i]) = buffers[i]->chunk->start_in_dw * 4;
516 }
517
518 evergreen_set_rat(ctx->cs_shader, 0, pool->bo, 0, pool->size_in_dw * 4);
519 evergreen_set_vtx_resource(ctx->cs_shader, pool->bo, 1, 0, 1);
520 }
521
522 /**
523 * This function initializes all the compute specific registers that need to
524 * be initialized for each compute command stream. Registers that are common
525 * to both compute and 3D will be initialized at the beginning of each compute
526 * command stream by the start_cs_cmd atom. However, since the SET_CONTEXT_REG
527 * packet requires that the shader type bit be set, we must initialize all
528 * context registers needed for compute in this function. The registers
529 * intialized by the start_cs_cmd atom can be found in evereen_state.c in the
530 * functions evergreen_init_atom_start_cs or cayman_init_atom_start_cs depending
531 * on the GPU family.
532 */
533 void evergreen_init_atom_start_compute_cs(struct r600_context *ctx)
534 {
535 struct r600_command_buffer *cb = &ctx->start_compute_cs_cmd;
536 int num_threads;
537 int num_stack_entries;
538
539 /* We aren't passing the EMIT_EARLY flag as the third argument
540 * because we will be emitting this atom manually in order to
541 * ensure it gets emitted after the start_cs_cmd atom.
542 */
543 r600_init_command_buffer(cb, 256, 0);
544 cb->pkt_flags = RADEON_CP_PACKET3_COMPUTE_MODE;
545
546 switch (ctx->family) {
547 case CHIP_CEDAR:
548 default:
549 num_threads = 128;
550 num_stack_entries = 256;
551 break;
552 case CHIP_REDWOOD:
553 num_threads = 128;
554 num_stack_entries = 256;
555 break;
556 case CHIP_JUNIPER:
557 num_threads = 128;
558 num_stack_entries = 512;
559 break;
560 case CHIP_CYPRESS:
561 case CHIP_HEMLOCK:
562 num_threads = 128;
563 num_stack_entries = 512;
564 break;
565 case CHIP_PALM:
566 num_threads = 128;
567 num_stack_entries = 256;
568 break;
569 case CHIP_SUMO:
570 num_threads = 128;
571 num_stack_entries = 256;
572 break;
573 case CHIP_SUMO2:
574 num_threads = 128;
575 num_stack_entries = 512;
576 break;
577 case CHIP_BARTS:
578 num_threads = 128;
579 num_stack_entries = 512;
580 break;
581 case CHIP_TURKS:
582 num_threads = 128;
583 num_stack_entries = 256;
584 break;
585 case CHIP_CAICOS:
586 num_threads = 128;
587 num_stack_entries = 256;
588 break;
589 }
590
591 /* Config Registers */
592 if (ctx->chip_class < CAYMAN) {
593
594 /* These registers control which simds can be used by each stage.
595 * The default for these registers is 0xffffffff, which means
596 * all simds are available for each stage. It's possible we may
597 * want to play around with these in the future, but for now
598 * the default value is fine.
599 *
600 * R_008E20_SQ_STATIC_THREAD_MGMT1
601 * R_008E24_SQ_STATIC_THREAD_MGMT2
602 * R_008E28_SQ_STATIC_THREAD_MGMT3
603 */
604
605 /* XXX: We may need to adjust the thread and stack resouce
606 * values for 3D/compute interop */
607
608 r600_store_config_reg_seq(cb, R_008C18_SQ_THREAD_RESOURCE_MGMT_1, 5);
609
610 /* R_008C18_SQ_THREAD_RESOURCE_MGMT_1
611 * Set the number of threads used by the PS/VS/GS/ES stage to
612 * 0.
613 */
614 r600_store_value(cb, 0);
615
616 /* R_008C1C_SQ_THREAD_RESOURCE_MGMT_2
617 * Set the number of threads used by the CS (aka LS) stage to
618 * the maximum number of threads and set the number of threads
619 * for the HS stage to 0. */
620 r600_store_value(cb, S_008C1C_NUM_LS_THREADS(num_threads));
621
622 /* R_008C20_SQ_STACK_RESOURCE_MGMT_1
623 * Set the Control Flow stack entries to 0 for PS/VS stages */
624 r600_store_value(cb, 0);
625
626 /* R_008C24_SQ_STACK_RESOURCE_MGMT_2
627 * Set the Control Flow stack entries to 0 for GS/ES stages */
628 r600_store_value(cb, 0);
629
630 /* R_008C28_SQ_STACK_RESOURCE_MGMT_3
631 * Set the Contol Flow stack entries to 0 for the HS stage, and
632 * set it to the maximum value for the CS (aka LS) stage. */
633 r600_store_value(cb,
634 S_008C28_NUM_LS_STACK_ENTRIES(num_stack_entries));
635 }
636
637 /* Context Registers */
638
639 if (ctx->chip_class < CAYMAN) {
640 /* workaround for hw issues with dyn gpr - must set all limits
641 * to 240 instead of 0, 0x1e == 240 / 8
642 */
643 r600_store_context_reg(cb, R_028838_SQ_DYN_GPR_RESOURCE_LIMIT_1,
644 S_028838_PS_GPRS(0x1e) |
645 S_028838_VS_GPRS(0x1e) |
646 S_028838_GS_GPRS(0x1e) |
647 S_028838_ES_GPRS(0x1e) |
648 S_028838_HS_GPRS(0x1e) |
649 S_028838_LS_GPRS(0x1e));
650 }
651
652 /* XXX: Investigate setting bit 15, which is FAST_COMPUTE_MODE */
653 r600_store_context_reg(cb, R_028A40_VGT_GS_MODE,
654 S_028A40_COMPUTE_MODE(1) | S_028A40_PARTIAL_THD_AT_EOI(1));
655
656 r600_store_context_reg(cb, R_028B54_VGT_SHADER_STAGES_EN, 2/*CS_ON*/);
657
658 r600_store_context_reg(cb, R_0286E8_SPI_COMPUTE_INPUT_CNTL,
659 S_0286E8_TID_IN_GROUP_ENA
660 | S_0286E8_TGID_ENA
661 | S_0286E8_DISABLE_INDEX_PACK)
662 ;
663
664 /* The LOOP_CONST registers are an optimizations for loops that allows
665 * you to store the initial counter, increment value, and maximum
666 * counter value in a register so that hardware can calculate the
667 * correct number of iterations for the loop, so that you don't need
668 * to have the loop counter in your shader code. We don't currently use
669 * this optimization, so we must keep track of the counter in the
670 * shader and use a break instruction to exit loops. However, the
671 * hardware will still uses this register to determine when to exit a
672 * loop, so we need to initialize the counter to 0, set the increment
673 * value to 1 and the maximum counter value to the 4095 (0xfff) which
674 * is the maximum value allowed. This gives us a maximum of 4096
675 * iterations for our loops, but hopefully our break instruction will
676 * execute before some time before the 4096th iteration.
677 */
678 eg_store_loop_const(cb, R_03A200_SQ_LOOP_CONST_0 + (160 * 4), 0x1000FFF);
679 }
680
681 void evergreen_init_compute_state_functions(struct r600_context *ctx)
682 {
683 ctx->context.create_compute_state = evergreen_create_compute_state;
684 ctx->context.delete_compute_state = evergreen_delete_compute_state;
685 ctx->context.bind_compute_state = evergreen_bind_compute_state;
686 // ctx->context.create_sampler_view = evergreen_compute_create_sampler_view;
687 ctx->context.set_compute_resources = evergreen_set_compute_resources;
688 ctx->context.set_compute_sampler_views = evergreen_set_cs_sampler_view;
689 ctx->context.bind_compute_sampler_states = evergreen_bind_compute_sampler_states;
690 ctx->context.set_global_binding = evergreen_set_global_binding;
691 ctx->context.launch_grid = evergreen_launch_grid;
692 }
693
694
695 struct pipe_resource *r600_compute_global_buffer_create(
696 struct pipe_screen *screen,
697 const struct pipe_resource *templ)
698 {
699 assert(templ->target == PIPE_BUFFER);
700 assert(templ->bind & PIPE_BIND_GLOBAL);
701 assert(templ->array_size == 1 || templ->array_size == 0);
702 assert(templ->depth0 == 1 || templ->depth0 == 0);
703 assert(templ->height0 == 1 || templ->height0 == 0);
704
705 struct r600_resource_global* result = (struct r600_resource_global*)
706 CALLOC(sizeof(struct r600_resource_global), 1);
707 struct r600_screen* rscreen = (struct r600_screen*)screen;
708
709 result->base.b.vtbl = &r600_global_buffer_vtbl;
710 result->base.b.b.screen = screen;
711 result->base.b.b = *templ;
712 pipe_reference_init(&result->base.b.b.reference, 1);
713
714 int size_in_dw = (templ->width0+3) / 4;
715
716 result->chunk = compute_memory_alloc(rscreen->global_pool, size_in_dw);
717
718 if (result->chunk == NULL)
719 {
720 free(result);
721 return NULL;
722 }
723
724 return &result->base.b.b;
725 }
726
727 void r600_compute_global_buffer_destroy(
728 struct pipe_screen *screen,
729 struct pipe_resource *res)
730 {
731 assert(res->target == PIPE_BUFFER);
732 assert(res->bind & PIPE_BIND_GLOBAL);
733
734 struct r600_resource_global* buffer = (struct r600_resource_global*)res;
735 struct r600_screen* rscreen = (struct r600_screen*)screen;
736
737 compute_memory_free(rscreen->global_pool, buffer->chunk->id);
738
739 buffer->chunk = NULL;
740 free(res);
741 }
742
743 void* r600_compute_global_transfer_map(
744 struct pipe_context *ctx_,
745 struct pipe_transfer* transfer)
746 {
747 assert(transfer->resource->target == PIPE_BUFFER);
748 assert(transfer->resource->bind & PIPE_BIND_GLOBAL);
749 assert(transfer->box.x >= 0);
750 assert(transfer->box.y == 0);
751 assert(transfer->box.z == 0);
752
753 struct r600_context *ctx = (struct r600_context *)ctx_;
754 struct r600_resource_global* buffer =
755 (struct r600_resource_global*)transfer->resource;
756
757 uint32_t* map;
758 ///TODO: do it better, mapping is not possible if the pool is too big
759
760 if (!(map = ctx->ws->buffer_map(buffer->chunk->pool->bo->cs_buf,
761 ctx->cs, transfer->usage))) {
762 return NULL;
763 }
764
765 COMPUTE_DBG("buffer start: %lli\n", buffer->chunk->start_in_dw);
766 return ((char*)(map + buffer->chunk->start_in_dw)) + transfer->box.x;
767 }
768
769 void r600_compute_global_transfer_unmap(
770 struct pipe_context *ctx_,
771 struct pipe_transfer* transfer)
772 {
773 assert(transfer->resource->target == PIPE_BUFFER);
774 assert(transfer->resource->bind & PIPE_BIND_GLOBAL);
775
776 struct r600_context *ctx = (struct r600_context *)ctx_;
777 struct r600_resource_global* buffer =
778 (struct r600_resource_global*)transfer->resource;
779
780 ctx->ws->buffer_unmap(buffer->chunk->pool->bo->cs_buf);
781 }
782
783 struct pipe_transfer * r600_compute_global_get_transfer(
784 struct pipe_context *ctx_,
785 struct pipe_resource *resource,
786 unsigned level,
787 unsigned usage,
788 const struct pipe_box *box)
789 {
790 struct r600_context *ctx = (struct r600_context *)ctx_;
791 struct compute_memory_pool *pool = ctx->screen->global_pool;
792
793 compute_memory_finalize_pending(pool, ctx_);
794
795 assert(resource->target == PIPE_BUFFER);
796 struct r600_context *rctx = (struct r600_context*)ctx_;
797 struct pipe_transfer *transfer = util_slab_alloc(&rctx->pool_transfers);
798
799 transfer->resource = resource;
800 transfer->level = level;
801 transfer->usage = usage;
802 transfer->box = *box;
803 transfer->stride = 0;
804 transfer->layer_stride = 0;
805 transfer->data = NULL;
806
807 /* Note strides are zero, this is ok for buffers, but not for
808 * textures 2d & higher at least.
809 */
810 return transfer;
811 }
812
813 void r600_compute_global_transfer_destroy(
814 struct pipe_context *ctx_,
815 struct pipe_transfer *transfer)
816 {
817 struct r600_context *rctx = (struct r600_context*)ctx_;
818 util_slab_free(&rctx->pool_transfers, transfer);
819 }
820
821 void r600_compute_global_transfer_flush_region(
822 struct pipe_context *ctx_,
823 struct pipe_transfer *transfer,
824 const struct pipe_box *box)
825 {
826 assert(0 && "TODO");
827 }
828
829 void r600_compute_global_transfer_inline_write(
830 struct pipe_context *pipe,
831 struct pipe_resource *resource,
832 unsigned level,
833 unsigned usage,
834 const struct pipe_box *box,
835 const void *data,
836 unsigned stride,
837 unsigned layer_stride)
838 {
839 assert(0 && "TODO");
840 }