r600g: improve flushed depth texture handling v2
[mesa.git] / src / gallium / drivers / r600 / evergreen_compute.c
1 /*
2 * Copyright 2011 Adam Rak <adam.rak@streamnovation.com>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Adam Rak <adam.rak@streamnovation.com>
25 */
26
27 #include <stdio.h>
28 #include <errno.h>
29 #include "pipe/p_defines.h"
30 #include "pipe/p_state.h"
31 #include "pipe/p_context.h"
32 #include "util/u_blitter.h"
33 #include "util/u_double_list.h"
34 #include "util/u_transfer.h"
35 #include "util/u_surface.h"
36 #include "util/u_pack_color.h"
37 #include "util/u_memory.h"
38 #include "util/u_inlines.h"
39 #include "util/u_framebuffer.h"
40 #include "pipebuffer/pb_buffer.h"
41 #include "r600.h"
42 #include "evergreend.h"
43 #include "r600_resource.h"
44 #include "r600_shader.h"
45 #include "r600_pipe.h"
46 #include "r600_formats.h"
47 #include "evergreen_compute.h"
48 #include "r600_hw_context_priv.h"
49 #include "evergreen_compute_internal.h"
50 #include "compute_memory_pool.h"
51 #ifdef HAVE_OPENCL
52 #include "llvm_wrapper.h"
53 #endif
54
55 /**
56 RAT0 is for global binding write
57 VTX1 is for global binding read
58
59 for wrting images RAT1...
60 for reading images TEX2...
61 TEX2-RAT1 is paired
62
63 TEX2... consumes the same fetch resources, that VTX2... would consume
64
65 CONST0 and VTX0 is for parameters
66 CONST0 is binding smaller input parameter buffer, and for constant indexing,
67 also constant cached
68 VTX0 is for indirect/non-constant indexing, or if the input is bigger than
69 the constant cache can handle
70
71 RAT-s are limited to 12, so we can only bind at most 11 texture for writing
72 because we reserve RAT0 for global bindings. With byteaddressing enabled,
73 we should reserve another one too.=> 10 image binding for writing max.
74
75 from Nvidia OpenCL:
76 CL_DEVICE_MAX_READ_IMAGE_ARGS: 128
77 CL_DEVICE_MAX_WRITE_IMAGE_ARGS: 8
78
79 so 10 for writing is enough. 176 is the max for reading according to the docs
80
81 writable images should be listed first < 10, so their id corresponds to RAT(id+1)
82 writable images will consume TEX slots, VTX slots too because of linear indexing
83
84 */
85
86 const struct u_resource_vtbl r600_global_buffer_vtbl =
87 {
88 u_default_resource_get_handle, /* get_handle */
89 r600_compute_global_buffer_destroy, /* resource_destroy */
90 r600_compute_global_get_transfer, /* get_transfer */
91 r600_compute_global_transfer_destroy, /* transfer_destroy */
92 r600_compute_global_transfer_map, /* transfer_map */
93 r600_compute_global_transfer_flush_region,/* transfer_flush_region */
94 r600_compute_global_transfer_unmap, /* transfer_unmap */
95 r600_compute_global_transfer_inline_write /* transfer_inline_write */
96 };
97
98
99 void *evergreen_create_compute_state(
100 struct pipe_context *ctx_,
101 const const struct pipe_compute_state *cso)
102 {
103 struct r600_context *ctx = (struct r600_context *)ctx_;
104
105 #ifdef HAVE_OPENCL
106 const struct pipe_llvm_program_header * header;
107 const unsigned char * code;
108
109 header = cso->prog;
110 code = cso->prog + sizeof(struct pipe_llvm_program_header);
111 #endif
112
113 if (!ctx->screen->screen.get_param(&ctx->screen->screen,
114 PIPE_CAP_COMPUTE)) {
115 fprintf(stderr, "Compute is not supported\n");
116 return NULL;
117 }
118 struct r600_pipe_compute *shader = CALLOC_STRUCT(r600_pipe_compute);
119
120 shader->ctx = (struct r600_context*)ctx;
121 shader->resources = (struct evergreen_compute_resource*)
122 CALLOC(sizeof(struct evergreen_compute_resource),
123 get_compute_resource_num());
124 shader->local_size = cso->req_local_mem; ///TODO: assert it
125 shader->private_size = cso->req_private_mem;
126 shader->input_size = cso->req_input_mem;
127
128 #ifdef HAVE_OPENCL
129 shader->mod = llvm_parse_bitcode(code, header->num_bytes);
130
131 r600_compute_shader_create(ctx_, shader->mod, &shader->bc);
132 #endif
133 return shader;
134 }
135
136 void evergreen_delete_compute_state(struct pipe_context *ctx, void* state)
137 {
138 struct r600_pipe_compute *shader = (struct r600_pipe_compute *)state;
139
140 free(shader->resources);
141 free(shader);
142 }
143
144 static void evergreen_bind_compute_state(struct pipe_context *ctx_, void *state)
145 {
146 struct r600_context *ctx = (struct r600_context *)ctx_;
147
148 ctx->cs_shader = (struct r600_pipe_compute *)state;
149
150 if (!ctx->cs_shader->shader_code_bo) {
151
152 ctx->cs_shader->shader_code_bo =
153 r600_compute_buffer_alloc_vram(ctx->screen,
154 ctx->cs_shader->bc.ndw * 4);
155
156 void *p = ctx->ws->buffer_map(
157 ctx->cs_shader->shader_code_bo->cs_buf,
158 ctx->cs, PIPE_TRANSFER_WRITE);
159
160 memcpy(p, ctx->cs_shader->bc.bytecode, ctx->cs_shader->bc.ndw * 4);
161
162 ctx->ws->buffer_unmap(ctx->cs_shader->shader_code_bo->cs_buf);
163
164 }
165
166 struct evergreen_compute_resource* res = get_empty_res(ctx->cs_shader,
167 COMPUTE_RESOURCE_SHADER, 0);
168
169 if (ctx->chip_class < CAYMAN) {
170 evergreen_reg_set(res, R_008C0C_SQ_GPR_RESOURCE_MGMT_3,
171 S_008C0C_NUM_LS_GPRS(ctx->cs_shader->bc.ngpr));
172 }
173
174 ///maybe we can use it later
175 evergreen_reg_set(res, R_0286C8_SPI_THREAD_GROUPING, 0);
176 ///maybe we can use it later
177 evergreen_reg_set(res, R_008C14_SQ_GLOBAL_GPR_RESOURCE_MGMT_2, 0);
178
179 evergreen_reg_set(res, R_0288D4_SQ_PGM_RESOURCES_LS,
180 S_0288D4_NUM_GPRS(ctx->cs_shader->bc.ngpr)
181 | S_0288D4_STACK_SIZE(ctx->cs_shader->bc.nstack));
182 evergreen_reg_set(res, R_0288D8_SQ_PGM_RESOURCES_LS_2, 0);
183
184 evergreen_reg_set(res, R_0288D0_SQ_PGM_START_LS, 0);
185 res->bo = ctx->cs_shader->shader_code_bo;
186 res->usage = RADEON_USAGE_READ;
187 res->coher_bo_size = ctx->cs_shader->bc.ndw*4;
188
189 r600_inval_shader_cache(ctx);
190
191 }
192
193 /* The kernel parameters are stored a vtx buffer (ID=0), besides the explicit
194 * kernel parameters there are inplicit parameters that need to be stored
195 * in the vertex buffer as well. Here is how these parameters are organized in
196 * the buffer:
197 *
198 * DWORDS 0-2: Number of work groups in each dimension (x,y,z)
199 * DWORDS 3-5: Number of global work items in each dimension (x,y,z)
200 * DWORDS 6-8: Number of work items within each work group in each dimension
201 * (x,y,z)
202 * DWORDS 9+ : Kernel parameters
203 */
204 void evergreen_compute_upload_input(
205 struct pipe_context *ctx_,
206 const uint *block_layout,
207 const uint *grid_layout,
208 const void *input)
209 {
210 struct r600_context *ctx = (struct r600_context *)ctx_;
211 int i;
212 unsigned kernel_parameters_offset_bytes = 36;
213 uint32_t * num_work_groups_start;
214 uint32_t * global_size_start;
215 uint32_t * local_size_start;
216 uint32_t * kernel_parameters_start;
217
218 if (ctx->cs_shader->input_size == 0) {
219 return;
220 }
221
222 if (!ctx->cs_shader->kernel_param) {
223 unsigned buffer_size = ctx->cs_shader->input_size;
224
225 /* Add space for the grid dimensions */
226 buffer_size += kernel_parameters_offset_bytes * sizeof(uint);
227 ctx->cs_shader->kernel_param =
228 r600_compute_buffer_alloc_vram(ctx->screen,
229 buffer_size);
230 }
231
232 num_work_groups_start = ctx->ws->buffer_map(
233 ctx->cs_shader->kernel_param->cs_buf,
234 ctx->cs, PIPE_TRANSFER_WRITE);
235 global_size_start = num_work_groups_start + (3 * (sizeof(uint) /4));
236 local_size_start = global_size_start + (3 * (sizeof(uint)) / 4);
237 kernel_parameters_start = local_size_start + (3 * (sizeof(uint)) / 4);
238
239 /* Copy the work group size */
240 memcpy(num_work_groups_start, grid_layout, 3 * sizeof(uint));
241
242 /* Copy the global size */
243 for (i = 0; i < 3; i++) {
244 global_size_start[i] = grid_layout[i] * block_layout[i];
245 }
246
247 /* Copy the local dimensions */
248 memcpy(local_size_start, block_layout, 3 * sizeof(uint));
249
250 /* Copy the kernel inputs */
251 memcpy(kernel_parameters_start, input, ctx->cs_shader->input_size);
252
253 for (i = 0; i < (kernel_parameters_offset_bytes / 4) +
254 (ctx->cs_shader->input_size / 4); i++) {
255 COMPUTE_DBG("input %i : %i\n", i,
256 ((unsigned*)num_work_groups_start)[i]);
257 }
258
259 ctx->ws->buffer_unmap(ctx->cs_shader->kernel_param->cs_buf);
260
261 ///ID=0 is reserved for the parameters
262 evergreen_set_vtx_resource(ctx->cs_shader,
263 ctx->cs_shader->kernel_param, 0, 0, 0);
264 ///ID=0 is reserved for parameters
265 evergreen_set_const_cache(ctx->cs_shader, 0,
266 ctx->cs_shader->kernel_param, ctx->cs_shader->input_size, 0);
267 }
268
269 void evergreen_direct_dispatch(
270 struct pipe_context *ctx_,
271 const uint *block_layout, const uint *grid_layout)
272 {
273 /* This struct r600_context* must be called rctx, because the
274 * r600_pipe_state_add_reg macro assumes there is a local variable
275 * of type struct r600_context* called rctx.
276 */
277 struct r600_context *rctx = (struct r600_context *)ctx_;
278
279 int i;
280
281 struct evergreen_compute_resource* res = get_empty_res(rctx->cs_shader,
282 COMPUTE_RESOURCE_DISPATCH, 0);
283 struct r600_pipe_state * cb_state = rctx->states[R600_PIPE_STATE_FRAMEBUFFER];
284
285 /* Set CB_TARGET_MASK */
286 r600_pipe_state_add_reg(cb_state, R_028238_CB_TARGET_MASK, rctx->cb_target_mask);
287
288 evergreen_reg_set(res, R_008958_VGT_PRIMITIVE_TYPE, V_008958_DI_PT_POINTLIST);
289
290 evergreen_reg_set(res, R_00899C_VGT_COMPUTE_START_X, 0);
291 evergreen_reg_set(res, R_0089A0_VGT_COMPUTE_START_Y, 0);
292 evergreen_reg_set(res, R_0089A4_VGT_COMPUTE_START_Z, 0);
293
294 evergreen_reg_set(res, R_0286EC_SPI_COMPUTE_NUM_THREAD_X, block_layout[0]);
295 evergreen_reg_set(res, R_0286F0_SPI_COMPUTE_NUM_THREAD_Y, block_layout[1]);
296 evergreen_reg_set(res, R_0286F4_SPI_COMPUTE_NUM_THREAD_Z, block_layout[2]);
297
298 int group_size = 1;
299
300 int grid_size = 1;
301
302 for (i = 0; i < 3; i++) {
303 group_size *= block_layout[i];
304 }
305
306 for (i = 0; i < 3; i++) {
307 grid_size *= grid_layout[i];
308 }
309
310 evergreen_reg_set(res, R_008970_VGT_NUM_INDICES, group_size);
311 evergreen_reg_set(res, R_0089AC_VGT_COMPUTE_THREAD_GROUP_SIZE, group_size);
312
313 evergreen_emit_raw_value(res, PKT3C(PKT3_DISPATCH_DIRECT, 3, 0));
314 evergreen_emit_raw_value(res, grid_layout[0]);
315 evergreen_emit_raw_value(res, grid_layout[1]);
316 evergreen_emit_raw_value(res, grid_layout[2]);
317 ///VGT_DISPATCH_INITIATOR = COMPUTE_SHADER_EN
318 evergreen_emit_raw_value(res, 1);
319 }
320
321 static void compute_emit_cs(struct r600_context *ctx)
322 {
323 struct radeon_winsys_cs *cs = ctx->cs;
324 int i;
325
326 struct r600_resource *onebo = NULL;
327 struct r600_pipe_state *cb_state;
328
329 /* Initialize all the registers common to both 3D and compute. Some
330 * 3D only register will be initialized by this atom as well, but
331 * this is OK for now.
332 *
333 * See evergreen_init_atom_start_cs() or cayman_init_atom_start_cs() in
334 * evergreen_state.c for the list of registers that are intialized by
335 * the start_cs_cmd atom.
336 */
337 r600_emit_atom(ctx, &ctx->start_cs_cmd.atom);
338
339 /* Initialize all the compute specific registers.
340 *
341 * See evergreen_init_atom_start_compute_cs() in this file for the list
342 * of registers initialized by the start_compuet_cs_cmd atom.
343 */
344 r600_emit_atom(ctx, &ctx->start_compute_cs_cmd.atom);
345
346 /* Emit cb_state */
347 cb_state = ctx->states[R600_PIPE_STATE_FRAMEBUFFER];
348 r600_context_pipe_state_emit(ctx, cb_state, RADEON_CP_PACKET3_COMPUTE_MODE);
349
350 for (i = 0; i < get_compute_resource_num(); i++) {
351 if (ctx->cs_shader->resources[i].enabled) {
352 int j;
353 COMPUTE_DBG("resnum: %i, cdw: %i\n", i, cs->cdw);
354
355 for (j = 0; j < ctx->cs_shader->resources[i].cs_end; j++) {
356 if (ctx->cs_shader->resources[i].do_reloc[j]) {
357 assert(ctx->cs_shader->resources[i].bo);
358 evergreen_emit_ctx_reloc(ctx,
359 ctx->cs_shader->resources[i].bo,
360 ctx->cs_shader->resources[i].usage);
361 }
362
363 cs->buf[cs->cdw++] = ctx->cs_shader->resources[i].cs[j];
364 }
365
366 if (ctx->cs_shader->resources[i].bo) {
367 onebo = ctx->cs_shader->resources[i].bo;
368 evergreen_emit_ctx_reloc(ctx,
369 ctx->cs_shader->resources[i].bo,
370 ctx->cs_shader->resources[i].usage);
371
372 ///special case for textures
373 if (ctx->cs_shader->resources[i].do_reloc
374 [ctx->cs_shader->resources[i].cs_end] == 2) {
375 evergreen_emit_ctx_reloc(ctx,
376 ctx->cs_shader->resources[i].bo,
377 ctx->cs_shader->resources[i].usage);
378 }
379 }
380 }
381 }
382
383 /* r600_flush_framebuffer() updates the cb_flush_flags and then
384 * calls r600_emit_atom() on the ctx->surface_sync_cmd.atom, which emits
385 * a SURFACE_SYNC packet via r600_emit_surface_sync().
386 *
387 * XXX r600_emit_surface_sync() hardcodes the CP_COHER_SIZE to
388 * 0xffffffff, so we will need to add a field to struct
389 * r600_surface_sync_cmd if we want to manually set this value.
390 */
391 r600_flush_framebuffer(ctx, true /* Flush now */);
392
393 #if 0
394 COMPUTE_DBG("cdw: %i\n", cs->cdw);
395 for (i = 0; i < cs->cdw; i++) {
396 COMPUTE_DBG("%4i : 0x%08X\n", i, ctx->cs->buf[i]);
397 }
398 #endif
399
400 ctx->ws->cs_flush(ctx->cs, RADEON_FLUSH_ASYNC | RADEON_FLUSH_COMPUTE);
401
402 ctx->pm4_dirty_cdwords = 0;
403 ctx->flags = 0;
404
405 COMPUTE_DBG("shader started\n");
406
407 ctx->ws->buffer_wait(onebo->buf, 0);
408
409 COMPUTE_DBG("...\n");
410
411 ctx->streamout_start = TRUE;
412 ctx->streamout_append_bitmask = ~0;
413
414 }
415
416 static void evergreen_launch_grid(
417 struct pipe_context *ctx_,
418 const uint *block_layout, const uint *grid_layout,
419 uint32_t pc, const void *input)
420 {
421 COMPUTE_DBG("PC: %i\n", pc);
422
423 struct r600_context *ctx = (struct r600_context *)ctx_;
424 unsigned num_waves;
425 unsigned num_pipes = ctx->screen->info.r600_max_pipes;
426 unsigned wave_divisor = (16 * num_pipes);
427
428 /* num_waves = ceil((tg_size.x * tg_size.y, tg_size.z) / (16 * num_pipes)) */
429 num_waves = (block_layout[0] * block_layout[1] * block_layout[2] +
430 wave_divisor - 1) / wave_divisor;
431
432 COMPUTE_DBG("Using %u pipes, there are %u wavefronts per thread block\n",
433 num_pipes, num_waves);
434
435 evergreen_set_lds(ctx->cs_shader, 0, 0, num_waves);
436 evergreen_compute_upload_input(ctx_, block_layout, grid_layout, input);
437 evergreen_direct_dispatch(ctx_, block_layout, grid_layout);
438 compute_emit_cs(ctx);
439 }
440
441 static void evergreen_set_compute_resources(struct pipe_context * ctx_,
442 unsigned start, unsigned count,
443 struct pipe_surface ** surfaces)
444 {
445 struct r600_context *ctx = (struct r600_context *)ctx_;
446 struct r600_surface **resources = (struct r600_surface **)surfaces;
447 for (int i = 0; i < count; i++) {
448 if (resources[i]) {
449 struct r600_resource_global *buffer =
450 (struct r600_resource_global*)resources[i]->base.texture;
451 if (resources[i]->base.writable) {
452 assert(i+1 < 12);
453 struct r600_resource_global *buffer =
454 (struct r600_resource_global*)
455 resources[i]->base.texture;
456
457 evergreen_set_rat(ctx->cs_shader, i+1,
458 (struct r600_resource *)resources[i]->base.texture,
459 buffer->chunk->start_in_dw*4,
460 resources[i]->base.texture->width0);
461 }
462
463 evergreen_set_vtx_resource(ctx->cs_shader,
464 (struct r600_resource *)resources[i]->base.texture, i+2,
465 buffer->chunk->start_in_dw*4, resources[i]->base.writable);
466 }
467 }
468
469 }
470
471 static void evergreen_set_cs_sampler_view(struct pipe_context *ctx_,
472 unsigned start_slot, unsigned count,
473 struct pipe_sampler_view **views)
474 {
475 struct r600_context *ctx = (struct r600_context *)ctx_;
476 struct r600_pipe_sampler_view **resource =
477 (struct r600_pipe_sampler_view **)views;
478
479 for (int i = 0; i < count; i++) {
480 if (resource[i]) {
481 assert(i+1 < 12);
482 ///FETCH0 = VTX0 (param buffer),
483 //FETCH1 = VTX1 (global buffer pool), FETCH2... = TEX
484 evergreen_set_tex_resource(ctx->cs_shader, resource[i], i+2);
485 }
486 }
487 }
488
489 static void evergreen_bind_compute_sampler_states(
490 struct pipe_context *ctx_,
491 unsigned start_slot,
492 unsigned num_samplers,
493 void **samplers_)
494 {
495 struct r600_context *ctx = (struct r600_context *)ctx_;
496 struct compute_sampler_state ** samplers =
497 (struct compute_sampler_state **)samplers_;
498
499 for (int i = 0; i < num_samplers; i++) {
500 if (samplers[i]) {
501 evergreen_set_sampler_resource(ctx->cs_shader, samplers[i], i);
502 }
503 }
504 }
505
506 static void evergreen_set_global_binding(
507 struct pipe_context *ctx_, unsigned first, unsigned n,
508 struct pipe_resource **resources,
509 uint32_t **handles)
510 {
511 struct r600_context *ctx = (struct r600_context *)ctx_;
512 struct compute_memory_pool *pool = ctx->screen->global_pool;
513 struct r600_resource_global **buffers =
514 (struct r600_resource_global **)resources;
515
516 if (!resources) {
517 /* XXX: Unset */
518 return;
519 }
520
521 compute_memory_finalize_pending(pool, ctx_);
522
523 for (int i = 0; i < n; i++)
524 {
525 assert(resources[i]->target == PIPE_BUFFER);
526 assert(resources[i]->bind & PIPE_BIND_GLOBAL);
527
528 *(handles[i]) = buffers[i]->chunk->start_in_dw * 4;
529 }
530
531 evergreen_set_rat(ctx->cs_shader, 0, pool->bo, 0, pool->size_in_dw * 4);
532 evergreen_set_vtx_resource(ctx->cs_shader, pool->bo, 1, 0, 1);
533 }
534
535 /**
536 * This function initializes all the compute specific registers that need to
537 * be initialized for each compute command stream. Registers that are common
538 * to both compute and 3D will be initialized at the beginning of each compute
539 * command stream by the start_cs_cmd atom. However, since the SET_CONTEXT_REG
540 * packet requires that the shader type bit be set, we must initialize all
541 * context registers needed for compute in this function. The registers
542 * intialized by the start_cs_cmd atom can be found in evereen_state.c in the
543 * functions evergreen_init_atom_start_cs or cayman_init_atom_start_cs depending
544 * on the GPU family.
545 */
546 void evergreen_init_atom_start_compute_cs(struct r600_context *ctx)
547 {
548 struct r600_command_buffer *cb = &ctx->start_compute_cs_cmd;
549 int num_threads;
550 int num_stack_entries;
551
552 /* We aren't passing the EMIT_EARLY flag as the third argument
553 * because we will be emitting this atom manually in order to
554 * ensure it gets emitted after the start_cs_cmd atom.
555 */
556 r600_init_command_buffer(cb, 256, 0);
557 cb->pkt_flags = RADEON_CP_PACKET3_COMPUTE_MODE;
558
559 switch (ctx->family) {
560 case CHIP_CEDAR:
561 default:
562 num_threads = 128;
563 num_stack_entries = 256;
564 break;
565 case CHIP_REDWOOD:
566 num_threads = 128;
567 num_stack_entries = 256;
568 break;
569 case CHIP_JUNIPER:
570 num_threads = 128;
571 num_stack_entries = 512;
572 break;
573 case CHIP_CYPRESS:
574 case CHIP_HEMLOCK:
575 num_threads = 128;
576 num_stack_entries = 512;
577 break;
578 case CHIP_PALM:
579 num_threads = 128;
580 num_stack_entries = 256;
581 break;
582 case CHIP_SUMO:
583 num_threads = 128;
584 num_stack_entries = 256;
585 break;
586 case CHIP_SUMO2:
587 num_threads = 128;
588 num_stack_entries = 512;
589 break;
590 case CHIP_BARTS:
591 num_threads = 128;
592 num_stack_entries = 512;
593 break;
594 case CHIP_TURKS:
595 num_threads = 128;
596 num_stack_entries = 256;
597 break;
598 case CHIP_CAICOS:
599 num_threads = 128;
600 num_stack_entries = 256;
601 break;
602 }
603
604 /* Config Registers */
605 if (ctx->chip_class < CAYMAN) {
606
607 /* These registers control which simds can be used by each stage.
608 * The default for these registers is 0xffffffff, which means
609 * all simds are available for each stage. It's possible we may
610 * want to play around with these in the future, but for now
611 * the default value is fine.
612 *
613 * R_008E20_SQ_STATIC_THREAD_MGMT1
614 * R_008E24_SQ_STATIC_THREAD_MGMT2
615 * R_008E28_SQ_STATIC_THREAD_MGMT3
616 */
617
618 /* XXX: We may need to adjust the thread and stack resouce
619 * values for 3D/compute interop */
620
621 r600_store_config_reg_seq(cb, R_008C18_SQ_THREAD_RESOURCE_MGMT_1, 5);
622
623 /* R_008C18_SQ_THREAD_RESOURCE_MGMT_1
624 * Set the number of threads used by the PS/VS/GS/ES stage to
625 * 0.
626 */
627 r600_store_value(cb, 0);
628
629 /* R_008C1C_SQ_THREAD_RESOURCE_MGMT_2
630 * Set the number of threads used by the CS (aka LS) stage to
631 * the maximum number of threads and set the number of threads
632 * for the HS stage to 0. */
633 r600_store_value(cb, S_008C1C_NUM_LS_THREADS(num_threads));
634
635 /* R_008C20_SQ_STACK_RESOURCE_MGMT_1
636 * Set the Control Flow stack entries to 0 for PS/VS stages */
637 r600_store_value(cb, 0);
638
639 /* R_008C24_SQ_STACK_RESOURCE_MGMT_2
640 * Set the Control Flow stack entries to 0 for GS/ES stages */
641 r600_store_value(cb, 0);
642
643 /* R_008C28_SQ_STACK_RESOURCE_MGMT_3
644 * Set the Contol Flow stack entries to 0 for the HS stage, and
645 * set it to the maximum value for the CS (aka LS) stage. */
646 r600_store_value(cb,
647 S_008C28_NUM_LS_STACK_ENTRIES(num_stack_entries));
648 }
649
650 /* Context Registers */
651
652 if (ctx->chip_class < CAYMAN) {
653 /* workaround for hw issues with dyn gpr - must set all limits
654 * to 240 instead of 0, 0x1e == 240 / 8
655 */
656 r600_store_context_reg(cb, R_028838_SQ_DYN_GPR_RESOURCE_LIMIT_1,
657 S_028838_PS_GPRS(0x1e) |
658 S_028838_VS_GPRS(0x1e) |
659 S_028838_GS_GPRS(0x1e) |
660 S_028838_ES_GPRS(0x1e) |
661 S_028838_HS_GPRS(0x1e) |
662 S_028838_LS_GPRS(0x1e));
663 }
664
665 /* XXX: Investigate setting bit 15, which is FAST_COMPUTE_MODE */
666 r600_store_context_reg(cb, R_028A40_VGT_GS_MODE,
667 S_028A40_COMPUTE_MODE(1) | S_028A40_PARTIAL_THD_AT_EOI(1));
668
669 r600_store_context_reg(cb, R_028B54_VGT_SHADER_STAGES_EN, 2/*CS_ON*/);
670
671 r600_store_context_reg(cb, R_0286E8_SPI_COMPUTE_INPUT_CNTL,
672 S_0286E8_TID_IN_GROUP_ENA
673 | S_0286E8_TGID_ENA
674 | S_0286E8_DISABLE_INDEX_PACK)
675 ;
676
677 /* The LOOP_CONST registers are an optimizations for loops that allows
678 * you to store the initial counter, increment value, and maximum
679 * counter value in a register so that hardware can calculate the
680 * correct number of iterations for the loop, so that you don't need
681 * to have the loop counter in your shader code. We don't currently use
682 * this optimization, so we must keep track of the counter in the
683 * shader and use a break instruction to exit loops. However, the
684 * hardware will still uses this register to determine when to exit a
685 * loop, so we need to initialize the counter to 0, set the increment
686 * value to 1 and the maximum counter value to the 4095 (0xfff) which
687 * is the maximum value allowed. This gives us a maximum of 4096
688 * iterations for our loops, but hopefully our break instruction will
689 * execute before some time before the 4096th iteration.
690 */
691 eg_store_loop_const(cb, R_03A200_SQ_LOOP_CONST_0 + (160 * 4), 0x1000FFF);
692 }
693
694 void evergreen_init_compute_state_functions(struct r600_context *ctx)
695 {
696 ctx->context.create_compute_state = evergreen_create_compute_state;
697 ctx->context.delete_compute_state = evergreen_delete_compute_state;
698 ctx->context.bind_compute_state = evergreen_bind_compute_state;
699 // ctx->context.create_sampler_view = evergreen_compute_create_sampler_view;
700 ctx->context.set_compute_resources = evergreen_set_compute_resources;
701 ctx->context.set_compute_sampler_views = evergreen_set_cs_sampler_view;
702 ctx->context.bind_compute_sampler_states = evergreen_bind_compute_sampler_states;
703 ctx->context.set_global_binding = evergreen_set_global_binding;
704 ctx->context.launch_grid = evergreen_launch_grid;
705 }
706
707
708 struct pipe_resource *r600_compute_global_buffer_create(
709 struct pipe_screen *screen,
710 const struct pipe_resource *templ)
711 {
712 assert(templ->target == PIPE_BUFFER);
713 assert(templ->bind & PIPE_BIND_GLOBAL);
714 assert(templ->array_size == 1 || templ->array_size == 0);
715 assert(templ->depth0 == 1 || templ->depth0 == 0);
716 assert(templ->height0 == 1 || templ->height0 == 0);
717
718 struct r600_resource_global* result = (struct r600_resource_global*)
719 CALLOC(sizeof(struct r600_resource_global), 1);
720 struct r600_screen* rscreen = (struct r600_screen*)screen;
721
722 result->base.b.vtbl = &r600_global_buffer_vtbl;
723 result->base.b.b.screen = screen;
724 result->base.b.b = *templ;
725 pipe_reference_init(&result->base.b.b.reference, 1);
726
727 int size_in_dw = (templ->width0+3) / 4;
728
729 result->chunk = compute_memory_alloc(rscreen->global_pool, size_in_dw);
730
731 if (result->chunk == NULL)
732 {
733 free(result);
734 return NULL;
735 }
736
737 return &result->base.b.b;
738 }
739
740 void r600_compute_global_buffer_destroy(
741 struct pipe_screen *screen,
742 struct pipe_resource *res)
743 {
744 assert(res->target == PIPE_BUFFER);
745 assert(res->bind & PIPE_BIND_GLOBAL);
746
747 struct r600_resource_global* buffer = (struct r600_resource_global*)res;
748 struct r600_screen* rscreen = (struct r600_screen*)screen;
749
750 compute_memory_free(rscreen->global_pool, buffer->chunk->id);
751
752 buffer->chunk = NULL;
753 free(res);
754 }
755
756 void* r600_compute_global_transfer_map(
757 struct pipe_context *ctx_,
758 struct pipe_transfer* transfer)
759 {
760 assert(transfer->resource->target == PIPE_BUFFER);
761 assert(transfer->resource->bind & PIPE_BIND_GLOBAL);
762 assert(transfer->box.x >= 0);
763 assert(transfer->box.y == 0);
764 assert(transfer->box.z == 0);
765
766 struct r600_context *ctx = (struct r600_context *)ctx_;
767 struct r600_resource_global* buffer =
768 (struct r600_resource_global*)transfer->resource;
769
770 uint32_t* map;
771 ///TODO: do it better, mapping is not possible if the pool is too big
772
773 if (!(map = ctx->ws->buffer_map(buffer->chunk->pool->bo->cs_buf,
774 ctx->cs, transfer->usage))) {
775 return NULL;
776 }
777
778 COMPUTE_DBG("buffer start: %lli\n", buffer->chunk->start_in_dw);
779 return ((char*)(map + buffer->chunk->start_in_dw)) + transfer->box.x;
780 }
781
782 void r600_compute_global_transfer_unmap(
783 struct pipe_context *ctx_,
784 struct pipe_transfer* transfer)
785 {
786 assert(transfer->resource->target == PIPE_BUFFER);
787 assert(transfer->resource->bind & PIPE_BIND_GLOBAL);
788
789 struct r600_context *ctx = (struct r600_context *)ctx_;
790 struct r600_resource_global* buffer =
791 (struct r600_resource_global*)transfer->resource;
792
793 ctx->ws->buffer_unmap(buffer->chunk->pool->bo->cs_buf);
794 }
795
796 struct pipe_transfer * r600_compute_global_get_transfer(
797 struct pipe_context *ctx_,
798 struct pipe_resource *resource,
799 unsigned level,
800 unsigned usage,
801 const struct pipe_box *box)
802 {
803 struct r600_context *ctx = (struct r600_context *)ctx_;
804 struct compute_memory_pool *pool = ctx->screen->global_pool;
805
806 compute_memory_finalize_pending(pool, ctx_);
807
808 assert(resource->target == PIPE_BUFFER);
809 struct r600_context *rctx = (struct r600_context*)ctx_;
810 struct pipe_transfer *transfer = util_slab_alloc(&rctx->pool_transfers);
811
812 transfer->resource = resource;
813 transfer->level = level;
814 transfer->usage = usage;
815 transfer->box = *box;
816 transfer->stride = 0;
817 transfer->layer_stride = 0;
818 transfer->data = NULL;
819
820 /* Note strides are zero, this is ok for buffers, but not for
821 * textures 2d & higher at least.
822 */
823 return transfer;
824 }
825
826 void r600_compute_global_transfer_destroy(
827 struct pipe_context *ctx_,
828 struct pipe_transfer *transfer)
829 {
830 struct r600_context *rctx = (struct r600_context*)ctx_;
831 util_slab_free(&rctx->pool_transfers, transfer);
832 }
833
834 void r600_compute_global_transfer_flush_region(
835 struct pipe_context *ctx_,
836 struct pipe_transfer *transfer,
837 const struct pipe_box *box)
838 {
839 assert(0 && "TODO");
840 }
841
842 void r600_compute_global_transfer_inline_write(
843 struct pipe_context *pipe,
844 struct pipe_resource *resource,
845 unsigned level,
846 unsigned usage,
847 const struct pipe_box *box,
848 const void *data,
849 unsigned stride,
850 unsigned layer_stride)
851 {
852 assert(0 && "TODO");
853 }