2 * Copyright 2011 Adam Rak <adam.rak@streamnovation.com>
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
24 * Adam Rak <adam.rak@streamnovation.com>
29 #include "pipe/p_defines.h"
30 #include "pipe/p_state.h"
31 #include "pipe/p_context.h"
32 #include "util/u_blitter.h"
33 #include "util/u_double_list.h"
34 #include "util/u_transfer.h"
35 #include "util/u_surface.h"
36 #include "util/u_pack_color.h"
37 #include "util/u_memory.h"
38 #include "util/u_inlines.h"
39 #include "util/u_framebuffer.h"
40 #include "pipebuffer/pb_buffer.h"
41 #include "evergreend.h"
42 #include "r600_shader.h"
43 #include "r600_pipe.h"
44 #include "r600_formats.h"
45 #include "evergreen_compute.h"
46 #include "evergreen_compute_internal.h"
47 #include "compute_memory_pool.h"
48 #include "sb/sb_public.h"
50 #include "radeon_llvm_util.h"
54 RAT0 is for global binding write
55 VTX1 is for global binding read
57 for wrting images RAT1...
58 for reading images TEX2...
61 TEX2... consumes the same fetch resources, that VTX2... would consume
63 CONST0 and VTX0 is for parameters
64 CONST0 is binding smaller input parameter buffer, and for constant indexing,
66 VTX0 is for indirect/non-constant indexing, or if the input is bigger than
67 the constant cache can handle
69 RAT-s are limited to 12, so we can only bind at most 11 texture for writing
70 because we reserve RAT0 for global bindings. With byteaddressing enabled,
71 we should reserve another one too.=> 10 image binding for writing max.
74 CL_DEVICE_MAX_READ_IMAGE_ARGS: 128
75 CL_DEVICE_MAX_WRITE_IMAGE_ARGS: 8
77 so 10 for writing is enough. 176 is the max for reading according to the docs
79 writable images should be listed first < 10, so their id corresponds to RAT(id+1)
80 writable images will consume TEX slots, VTX slots too because of linear indexing
84 struct r600_resource
* r600_compute_buffer_alloc_vram(
85 struct r600_screen
*screen
,
88 struct pipe_resource
* buffer
= NULL
;
91 buffer
= pipe_buffer_create(
92 (struct pipe_screen
*) screen
,
97 return (struct r600_resource
*)buffer
;
101 static void evergreen_set_rat(
102 struct r600_pipe_compute
*pipe
,
104 struct r600_resource
* bo
,
108 struct pipe_surface rat_templ
;
109 struct r600_surface
*surf
= NULL
;
110 struct r600_context
*rctx
= NULL
;
113 assert((size
& 3) == 0);
114 assert((start
& 0xFF) == 0);
118 COMPUTE_DBG(rctx
->screen
, "bind rat: %i \n", id
);
120 /* Create the RAT surface */
121 memset(&rat_templ
, 0, sizeof(rat_templ
));
122 rat_templ
.format
= PIPE_FORMAT_R32_UINT
;
123 rat_templ
.u
.tex
.level
= 0;
124 rat_templ
.u
.tex
.first_layer
= 0;
125 rat_templ
.u
.tex
.last_layer
= 0;
127 /* Add the RAT the list of color buffers */
128 pipe
->ctx
->framebuffer
.state
.cbufs
[id
] = pipe
->ctx
->b
.b
.create_surface(
129 (struct pipe_context
*)pipe
->ctx
,
130 (struct pipe_resource
*)bo
, &rat_templ
);
132 /* Update the number of color buffers */
133 pipe
->ctx
->framebuffer
.state
.nr_cbufs
=
134 MAX2(id
+ 1, pipe
->ctx
->framebuffer
.state
.nr_cbufs
);
136 /* Update the cb_target_mask
137 * XXX: I think this is a potential spot for bugs once we start doing
138 * GL interop. cb_target_mask may be modified in the 3D sections
140 pipe
->ctx
->compute_cb_target_mask
|= (0xf << (id
* 4));
142 surf
= (struct r600_surface
*)pipe
->ctx
->framebuffer
.state
.cbufs
[id
];
143 evergreen_init_color_surface_rat(rctx
, surf
);
146 static void evergreen_cs_set_vertex_buffer(
147 struct r600_context
* rctx
,
150 struct pipe_resource
* buffer
)
152 struct r600_vertexbuf_state
*state
= &rctx
->cs_vertex_buffer_state
;
153 struct pipe_vertex_buffer
*vb
= &state
->vb
[vb_index
];
155 vb
->buffer_offset
= offset
;
157 vb
->user_buffer
= NULL
;
159 /* The vertex instructions in the compute shaders use the texture cache,
160 * so we need to invalidate it. */
161 rctx
->b
.flags
|= R600_CONTEXT_INV_VERTEX_CACHE
;
162 state
->enabled_mask
|= 1 << vb_index
;
163 state
->dirty_mask
|= 1 << vb_index
;
164 state
->atom
.dirty
= true;
167 static void evergreen_cs_set_constant_buffer(
168 struct r600_context
* rctx
,
172 struct pipe_resource
* buffer
)
174 struct pipe_constant_buffer cb
;
175 cb
.buffer_size
= size
;
176 cb
.buffer_offset
= offset
;
178 cb
.user_buffer
= NULL
;
180 rctx
->b
.b
.set_constant_buffer(&rctx
->b
.b
, PIPE_SHADER_COMPUTE
, cb_index
, &cb
);
183 static const struct u_resource_vtbl r600_global_buffer_vtbl
=
185 u_default_resource_get_handle
, /* get_handle */
186 r600_compute_global_buffer_destroy
, /* resource_destroy */
187 r600_compute_global_transfer_map
, /* transfer_map */
188 r600_compute_global_transfer_flush_region
,/* transfer_flush_region */
189 r600_compute_global_transfer_unmap
, /* transfer_unmap */
190 r600_compute_global_transfer_inline_write
/* transfer_inline_write */
194 void *evergreen_create_compute_state(
195 struct pipe_context
*ctx_
,
196 const const struct pipe_compute_state
*cso
)
198 struct r600_context
*ctx
= (struct r600_context
*)ctx_
;
199 struct r600_pipe_compute
*shader
= CALLOC_STRUCT(r600_pipe_compute
);
202 const struct pipe_llvm_program_header
* header
;
203 const unsigned char * code
;
206 shader
->llvm_ctx
= LLVMContextCreate();
208 COMPUTE_DBG(ctx
->screen
, "*** evergreen_create_compute_state\n");
211 code
= cso
->prog
+ sizeof(struct pipe_llvm_program_header
);
214 shader
->ctx
= (struct r600_context
*)ctx
;
215 shader
->local_size
= cso
->req_local_mem
;
216 shader
->private_size
= cso
->req_private_mem
;
217 shader
->input_size
= cso
->req_input_mem
;
220 shader
->num_kernels
= radeon_llvm_get_num_kernels(shader
->llvm_ctx
, code
,
222 shader
->kernels
= CALLOC(sizeof(struct r600_kernel
), shader
->num_kernels
);
224 for (i
= 0; i
< shader
->num_kernels
; i
++) {
225 struct r600_kernel
*kernel
= &shader
->kernels
[i
];
226 kernel
->llvm_module
= radeon_llvm_get_kernel_module(shader
->llvm_ctx
, i
,
227 code
, header
->num_bytes
);
233 void evergreen_delete_compute_state(struct pipe_context
*ctx
, void* state
)
235 struct r600_pipe_compute
*shader
= (struct r600_pipe_compute
*)state
;
240 FREE(shader
->kernels
);
243 if (shader
->llvm_ctx
){
244 LLVMContextDispose(shader
->llvm_ctx
);
251 static void evergreen_bind_compute_state(struct pipe_context
*ctx_
, void *state
)
253 struct r600_context
*ctx
= (struct r600_context
*)ctx_
;
255 COMPUTE_DBG(ctx
->screen
, "*** evergreen_bind_compute_state\n");
257 ctx
->cs_shader_state
.shader
= (struct r600_pipe_compute
*)state
;
260 /* The kernel parameters are stored a vtx buffer (ID=0), besides the explicit
261 * kernel parameters there are implicit parameters that need to be stored
262 * in the vertex buffer as well. Here is how these parameters are organized in
265 * DWORDS 0-2: Number of work groups in each dimension (x,y,z)
266 * DWORDS 3-5: Number of global work items in each dimension (x,y,z)
267 * DWORDS 6-8: Number of work items within each work group in each dimension
269 * DWORDS 9+ : Kernel parameters
271 void evergreen_compute_upload_input(
272 struct pipe_context
*ctx_
,
273 const uint
*block_layout
,
274 const uint
*grid_layout
,
277 struct r600_context
*ctx
= (struct r600_context
*)ctx_
;
278 struct r600_pipe_compute
*shader
= ctx
->cs_shader_state
.shader
;
280 /* We need to reserve 9 dwords (36 bytes) for implicit kernel
283 unsigned input_size
= shader
->input_size
+ 36;
284 uint32_t * num_work_groups_start
;
285 uint32_t * global_size_start
;
286 uint32_t * local_size_start
;
287 uint32_t * kernel_parameters_start
;
289 struct pipe_transfer
*transfer
= NULL
;
291 if (shader
->input_size
== 0) {
295 if (!shader
->kernel_param
) {
296 /* Add space for the grid dimensions */
297 shader
->kernel_param
= (struct r600_resource
*)
298 pipe_buffer_create(ctx_
->screen
, PIPE_BIND_CUSTOM
,
299 PIPE_USAGE_IMMUTABLE
, input_size
);
302 u_box_1d(0, input_size
, &box
);
303 num_work_groups_start
= ctx_
->transfer_map(ctx_
,
304 (struct pipe_resource
*)shader
->kernel_param
,
305 0, PIPE_TRANSFER_WRITE
| PIPE_TRANSFER_DISCARD_RANGE
,
307 global_size_start
= num_work_groups_start
+ (3 * (sizeof(uint
) /4));
308 local_size_start
= global_size_start
+ (3 * (sizeof(uint
)) / 4);
309 kernel_parameters_start
= local_size_start
+ (3 * (sizeof(uint
)) / 4);
311 /* Copy the work group size */
312 memcpy(num_work_groups_start
, grid_layout
, 3 * sizeof(uint
));
314 /* Copy the global size */
315 for (i
= 0; i
< 3; i
++) {
316 global_size_start
[i
] = grid_layout
[i
] * block_layout
[i
];
319 /* Copy the local dimensions */
320 memcpy(local_size_start
, block_layout
, 3 * sizeof(uint
));
322 /* Copy the kernel inputs */
323 memcpy(kernel_parameters_start
, input
, shader
->input_size
);
325 for (i
= 0; i
< (input_size
/ 4); i
++) {
326 COMPUTE_DBG(ctx
->screen
, "input %i : %u\n", i
,
327 ((unsigned*)num_work_groups_start
)[i
]);
330 ctx_
->transfer_unmap(ctx_
, transfer
);
332 /* ID=0 is reserved for the parameters */
333 evergreen_cs_set_constant_buffer(ctx
, 0, 0, input_size
,
334 (struct pipe_resource
*)shader
->kernel_param
);
337 static void evergreen_emit_direct_dispatch(
338 struct r600_context
*rctx
,
339 const uint
*block_layout
, const uint
*grid_layout
)
342 struct radeon_winsys_cs
*cs
= rctx
->b
.rings
.gfx
.cs
;
343 struct r600_pipe_compute
*shader
= rctx
->cs_shader_state
.shader
;
345 unsigned num_pipes
= rctx
->screen
->b
.info
.r600_max_pipes
;
346 unsigned wave_divisor
= (16 * num_pipes
);
349 unsigned lds_size
= shader
->local_size
/ 4 + shader
->active_kernel
->bc
.nlds_dw
;
351 /* Calculate group_size/grid_size */
352 for (i
= 0; i
< 3; i
++) {
353 group_size
*= block_layout
[i
];
356 for (i
= 0; i
< 3; i
++) {
357 grid_size
*= grid_layout
[i
];
360 /* num_waves = ceil((tg_size.x * tg_size.y, tg_size.z) / (16 * num_pipes)) */
361 num_waves
= (block_layout
[0] * block_layout
[1] * block_layout
[2] +
362 wave_divisor
- 1) / wave_divisor
;
364 COMPUTE_DBG(rctx
->screen
, "Using %u pipes, "
365 "%u wavefronts per thread block, "
366 "allocating %u dwords lds.\n",
367 num_pipes
, num_waves
, lds_size
);
369 r600_write_config_reg(cs
, R_008970_VGT_NUM_INDICES
, group_size
);
371 r600_write_config_reg_seq(cs
, R_00899C_VGT_COMPUTE_START_X
, 3);
372 radeon_emit(cs
, 0); /* R_00899C_VGT_COMPUTE_START_X */
373 radeon_emit(cs
, 0); /* R_0089A0_VGT_COMPUTE_START_Y */
374 radeon_emit(cs
, 0); /* R_0089A4_VGT_COMPUTE_START_Z */
376 r600_write_config_reg(cs
, R_0089AC_VGT_COMPUTE_THREAD_GROUP_SIZE
,
379 r600_write_compute_context_reg_seq(cs
, R_0286EC_SPI_COMPUTE_NUM_THREAD_X
, 3);
380 radeon_emit(cs
, block_layout
[0]); /* R_0286EC_SPI_COMPUTE_NUM_THREAD_X */
381 radeon_emit(cs
, block_layout
[1]); /* R_0286F0_SPI_COMPUTE_NUM_THREAD_Y */
382 radeon_emit(cs
, block_layout
[2]); /* R_0286F4_SPI_COMPUTE_NUM_THREAD_Z */
384 if (rctx
->b
.chip_class
< CAYMAN
) {
385 assert(lds_size
<= 8192);
387 /* Cayman appears to have a slightly smaller limit, see the
388 * value of CM_R_0286FC_SPI_LDS_MGMT.NUM_LS_LDS */
389 assert(lds_size
<= 8160);
392 r600_write_compute_context_reg(cs
, CM_R_0288E8_SQ_LDS_ALLOC
,
393 lds_size
| (num_waves
<< 14));
395 /* Dispatch packet */
396 radeon_emit(cs
, PKT3C(PKT3_DISPATCH_DIRECT
, 3, 0));
397 radeon_emit(cs
, grid_layout
[0]);
398 radeon_emit(cs
, grid_layout
[1]);
399 radeon_emit(cs
, grid_layout
[2]);
400 /* VGT_DISPATCH_INITIATOR = COMPUTE_SHADER_EN */
404 static void compute_emit_cs(struct r600_context
*ctx
, const uint
*block_layout
,
405 const uint
*grid_layout
)
407 struct radeon_winsys_cs
*cs
= ctx
->b
.rings
.gfx
.cs
;
410 /* make sure that the gfx ring is only one active */
411 if (ctx
->b
.rings
.dma
.cs
&& ctx
->b
.rings
.dma
.cs
->cdw
) {
412 ctx
->b
.rings
.dma
.flush(ctx
, RADEON_FLUSH_ASYNC
, NULL
);
415 /* Initialize all the compute-related registers.
417 * See evergreen_init_atom_start_compute_cs() in this file for the list
418 * of registers initialized by the start_compute_cs_cmd atom.
420 r600_emit_command_buffer(cs
, &ctx
->start_compute_cs_cmd
);
422 ctx
->b
.flags
|= R600_CONTEXT_WAIT_3D_IDLE
| R600_CONTEXT_FLUSH_AND_INV
;
423 r600_flush_emit(ctx
);
425 /* Emit colorbuffers. */
426 /* XXX support more than 8 colorbuffers (the offsets are not a multiple of 0x3C for CB8-11) */
427 for (i
= 0; i
< 8 && i
< ctx
->framebuffer
.state
.nr_cbufs
; i
++) {
428 struct r600_surface
*cb
= (struct r600_surface
*)ctx
->framebuffer
.state
.cbufs
[i
];
429 unsigned reloc
= r600_context_bo_reloc(&ctx
->b
, &ctx
->b
.rings
.gfx
,
430 (struct r600_resource
*)cb
->base
.texture
,
431 RADEON_USAGE_READWRITE
,
432 RADEON_PRIO_SHADER_RESOURCE_RW
);
434 r600_write_compute_context_reg_seq(cs
, R_028C60_CB_COLOR0_BASE
+ i
* 0x3C, 7);
435 radeon_emit(cs
, cb
->cb_color_base
); /* R_028C60_CB_COLOR0_BASE */
436 radeon_emit(cs
, cb
->cb_color_pitch
); /* R_028C64_CB_COLOR0_PITCH */
437 radeon_emit(cs
, cb
->cb_color_slice
); /* R_028C68_CB_COLOR0_SLICE */
438 radeon_emit(cs
, cb
->cb_color_view
); /* R_028C6C_CB_COLOR0_VIEW */
439 radeon_emit(cs
, cb
->cb_color_info
); /* R_028C70_CB_COLOR0_INFO */
440 radeon_emit(cs
, cb
->cb_color_attrib
); /* R_028C74_CB_COLOR0_ATTRIB */
441 radeon_emit(cs
, cb
->cb_color_dim
); /* R_028C78_CB_COLOR0_DIM */
443 radeon_emit(cs
, PKT3(PKT3_NOP
, 0, 0)); /* R_028C60_CB_COLOR0_BASE */
444 radeon_emit(cs
, reloc
);
446 if (!ctx
->keep_tiling_flags
) {
447 radeon_emit(cs
, PKT3(PKT3_NOP
, 0, 0)); /* R_028C70_CB_COLOR0_INFO */
448 radeon_emit(cs
, reloc
);
451 radeon_emit(cs
, PKT3(PKT3_NOP
, 0, 0)); /* R_028C74_CB_COLOR0_ATTRIB */
452 radeon_emit(cs
, reloc
);
454 if (ctx
->keep_tiling_flags
) {
455 for (; i
< 8 ; i
++) {
456 r600_write_compute_context_reg(cs
, R_028C70_CB_COLOR0_INFO
+ i
* 0x3C,
457 S_028C70_FORMAT(V_028C70_COLOR_INVALID
));
459 for (; i
< 12; i
++) {
460 r600_write_compute_context_reg(cs
, R_028E50_CB_COLOR8_INFO
+ (i
- 8) * 0x1C,
461 S_028C70_FORMAT(V_028C70_COLOR_INVALID
));
465 /* Set CB_TARGET_MASK XXX: Use cb_misc_state */
466 r600_write_compute_context_reg(cs
, R_028238_CB_TARGET_MASK
,
467 ctx
->compute_cb_target_mask
);
470 /* Emit vertex buffer state */
471 ctx
->cs_vertex_buffer_state
.atom
.num_dw
= 12 * util_bitcount(ctx
->cs_vertex_buffer_state
.dirty_mask
);
472 r600_emit_atom(ctx
, &ctx
->cs_vertex_buffer_state
.atom
);
474 /* Emit constant buffer state */
475 r600_emit_atom(ctx
, &ctx
->constbuf_state
[PIPE_SHADER_COMPUTE
].atom
);
477 /* Emit compute shader state */
478 r600_emit_atom(ctx
, &ctx
->cs_shader_state
.atom
);
480 /* Emit dispatch state and dispatch packet */
481 evergreen_emit_direct_dispatch(ctx
, block_layout
, grid_layout
);
483 /* XXX evergreen_flush_emit() hardcodes the CP_COHER_SIZE to 0xffffffff
485 ctx
->b
.flags
|= R600_CONTEXT_INV_CONST_CACHE
|
486 R600_CONTEXT_INV_VERTEX_CACHE
|
487 R600_CONTEXT_INV_TEX_CACHE
;
488 r600_flush_emit(ctx
);
491 if (ctx
->b
.chip_class
>= CAYMAN
) {
492 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_EVENT_WRITE
, 0, 0);
493 cs
->buf
[cs
->cdw
++] = EVENT_TYPE(EVENT_TYPE_CS_PARTIAL_FLUSH
) | EVENT_INDEX(4);
494 /* DEALLOC_STATE prevents the GPU from hanging when a
495 * SURFACE_SYNC packet is emitted some time after a DISPATCH_DIRECT
496 * with any of the CB*_DEST_BASE_ENA or DB_DEST_BASE_ENA bits set.
498 cs
->buf
[cs
->cdw
++] = PKT3C(PKT3_DEALLOC_STATE
, 0, 0);
499 cs
->buf
[cs
->cdw
++] = 0;
503 COMPUTE_DBG(ctx
->screen
, "cdw: %i\n", cs
->cdw
);
504 for (i
= 0; i
< cs
->cdw
; i
++) {
505 COMPUTE_DBG(ctx
->screen
, "%4i : 0x%08X\n", i
, cs
->buf
[i
]);
513 * Emit function for r600_cs_shader_state atom
515 void evergreen_emit_cs_shader(
516 struct r600_context
*rctx
,
517 struct r600_atom
*atom
)
519 struct r600_cs_shader_state
*state
=
520 (struct r600_cs_shader_state
*)atom
;
521 struct r600_pipe_compute
*shader
= state
->shader
;
522 struct r600_kernel
*kernel
= &shader
->kernels
[state
->kernel_index
];
523 struct radeon_winsys_cs
*cs
= rctx
->b
.rings
.gfx
.cs
;
525 r600_write_compute_context_reg_seq(cs
, R_0288D0_SQ_PGM_START_LS
, 3);
526 radeon_emit(cs
, kernel
->code_bo
->gpu_address
>> 8); /* R_0288D0_SQ_PGM_START_LS */
527 radeon_emit(cs
, /* R_0288D4_SQ_PGM_RESOURCES_LS */
528 S_0288D4_NUM_GPRS(kernel
->bc
.ngpr
)
529 | S_0288D4_STACK_SIZE(kernel
->bc
.nstack
));
530 radeon_emit(cs
, 0); /* R_0288D8_SQ_PGM_RESOURCES_LS_2 */
532 radeon_emit(cs
, PKT3C(PKT3_NOP
, 0, 0));
533 radeon_emit(cs
, r600_context_bo_reloc(&rctx
->b
, &rctx
->b
.rings
.gfx
,
534 kernel
->code_bo
, RADEON_USAGE_READ
,
535 RADEON_PRIO_SHADER_DATA
));
538 static void evergreen_launch_grid(
539 struct pipe_context
*ctx_
,
540 const uint
*block_layout
, const uint
*grid_layout
,
541 uint32_t pc
, const void *input
)
543 struct r600_context
*ctx
= (struct r600_context
*)ctx_
;
545 struct r600_pipe_compute
*shader
= ctx
->cs_shader_state
.shader
;
546 struct r600_kernel
*kernel
= &shader
->kernels
[pc
];
548 COMPUTE_DBG(ctx
->screen
, "*** evergreen_launch_grid: pc = %u\n", pc
);
552 if (!kernel
->code_bo
) {
554 struct r600_bytecode
*bc
= &kernel
->bc
;
555 LLVMModuleRef mod
= kernel
->llvm_module
;
556 boolean use_kill
= false;
557 bool dump
= (ctx
->screen
->b
.debug_flags
& DBG_CS
) != 0;
558 unsigned use_sb
= ctx
->screen
->b
.debug_flags
& DBG_SB_CS
;
559 unsigned sb_disasm
= use_sb
||
560 (ctx
->screen
->b
.debug_flags
& DBG_SB_DISASM
);
562 r600_bytecode_init(bc
, ctx
->b
.chip_class
, ctx
->b
.family
,
563 ctx
->screen
->has_compressed_msaa_texturing
);
564 bc
->type
= TGSI_PROCESSOR_COMPUTE
;
566 r600_llvm_compile(mod
, ctx
->b
.family
, bc
, &use_kill
, dump
);
568 if (dump
&& !sb_disasm
) {
569 r600_bytecode_disasm(bc
);
570 } else if ((dump
&& sb_disasm
) || use_sb
) {
571 if (r600_sb_bytecode_process(ctx
, bc
, NULL
, dump
, use_sb
))
572 R600_ERR("r600_sb_bytecode_process failed!\n");
575 kernel
->code_bo
= r600_compute_buffer_alloc_vram(ctx
->screen
,
577 p
= r600_buffer_map_sync_with_rings(&ctx
->b
, kernel
->code_bo
, PIPE_TRANSFER_WRITE
);
578 memcpy(p
, kernel
->bc
.bytecode
, kernel
->bc
.ndw
* 4);
579 ctx
->b
.ws
->buffer_unmap(kernel
->code_bo
->cs_buf
);
582 shader
->active_kernel
= kernel
;
583 ctx
->cs_shader_state
.kernel_index
= pc
;
584 evergreen_compute_upload_input(ctx_
, block_layout
, grid_layout
, input
);
585 compute_emit_cs(ctx
, block_layout
, grid_layout
);
588 static void evergreen_set_compute_resources(struct pipe_context
* ctx_
,
589 unsigned start
, unsigned count
,
590 struct pipe_surface
** surfaces
)
592 struct r600_context
*ctx
= (struct r600_context
*)ctx_
;
593 struct r600_surface
**resources
= (struct r600_surface
**)surfaces
;
595 COMPUTE_DBG(ctx
->screen
, "*** evergreen_set_compute_resources: start = %u count = %u\n",
598 for (unsigned i
= 0; i
< count
; i
++) {
599 /* The First two vertex buffers are reserved for parameters and
601 unsigned vtx_id
= 2 + i
;
603 struct r600_resource_global
*buffer
=
604 (struct r600_resource_global
*)
605 resources
[i
]->base
.texture
;
606 if (resources
[i
]->base
.writable
) {
609 evergreen_set_rat(ctx
->cs_shader_state
.shader
, i
+1,
610 (struct r600_resource
*)resources
[i
]->base
.texture
,
611 buffer
->chunk
->start_in_dw
*4,
612 resources
[i
]->base
.texture
->width0
);
615 evergreen_cs_set_vertex_buffer(ctx
, vtx_id
,
616 buffer
->chunk
->start_in_dw
* 4,
617 resources
[i
]->base
.texture
);
622 void evergreen_set_cs_sampler_view(struct pipe_context
*ctx_
,
623 unsigned start_slot
, unsigned count
,
624 struct pipe_sampler_view
**views
)
626 struct r600_pipe_sampler_view
**resource
=
627 (struct r600_pipe_sampler_view
**)views
;
629 for (unsigned i
= 0; i
< count
; i
++) {
633 assert(!"Compute samplers not implemented.");
634 ///FETCH0 = VTX0 (param buffer),
635 //FETCH1 = VTX1 (global buffer pool), FETCH2... = TEX
641 static void evergreen_set_global_binding(
642 struct pipe_context
*ctx_
, unsigned first
, unsigned n
,
643 struct pipe_resource
**resources
,
646 struct r600_context
*ctx
= (struct r600_context
*)ctx_
;
647 struct compute_memory_pool
*pool
= ctx
->screen
->global_pool
;
648 struct r600_resource_global
**buffers
=
649 (struct r600_resource_global
**)resources
;
651 COMPUTE_DBG(ctx
->screen
, "*** evergreen_set_global_binding first = %u n = %u\n",
659 /* We mark these items for promotion to the pool if they
660 * aren't already there */
661 for (unsigned i
= 0; i
< n
; i
++) {
662 struct compute_memory_item
*item
= buffers
[i
]->chunk
;
664 if (!is_item_in_pool(item
))
665 buffers
[i
]->chunk
->status
|= ITEM_FOR_PROMOTING
;
668 if (compute_memory_finalize_pending(pool
, ctx_
) == -1) {
673 for (unsigned i
= 0; i
< n
; i
++)
675 uint32_t buffer_offset
;
677 assert(resources
[i
]->target
== PIPE_BUFFER
);
678 assert(resources
[i
]->bind
& PIPE_BIND_GLOBAL
);
680 buffer_offset
= util_le32_to_cpu(*(handles
[i
]));
681 handle
= buffer_offset
+ buffers
[i
]->chunk
->start_in_dw
* 4;
683 *(handles
[i
]) = util_cpu_to_le32(handle
);
686 evergreen_set_rat(ctx
->cs_shader_state
.shader
, 0, pool
->bo
, 0, pool
->size_in_dw
* 4);
687 evergreen_cs_set_vertex_buffer(ctx
, 1, 0,
688 (struct pipe_resource
*)pool
->bo
);
692 * This function initializes all the compute specific registers that need to
693 * be initialized for each compute command stream. Registers that are common
694 * to both compute and 3D will be initialized at the beginning of each compute
695 * command stream by the start_cs_cmd atom. However, since the SET_CONTEXT_REG
696 * packet requires that the shader type bit be set, we must initialize all
697 * context registers needed for compute in this function. The registers
698 * intialized by the start_cs_cmd atom can be found in evereen_state.c in the
699 * functions evergreen_init_atom_start_cs or cayman_init_atom_start_cs depending
702 void evergreen_init_atom_start_compute_cs(struct r600_context
*ctx
)
704 struct r600_command_buffer
*cb
= &ctx
->start_compute_cs_cmd
;
706 int num_stack_entries
;
708 /* since all required registers are initialised in the
709 * start_compute_cs_cmd atom, we can EMIT_EARLY here.
711 r600_init_command_buffer(cb
, 256);
712 cb
->pkt_flags
= RADEON_CP_PACKET3_COMPUTE_MODE
;
714 /* This must be first. */
715 r600_store_value(cb
, PKT3(PKT3_CONTEXT_CONTROL
, 1, 0));
716 r600_store_value(cb
, 0x80000000);
717 r600_store_value(cb
, 0x80000000);
719 /* We're setting config registers here. */
720 r600_store_value(cb
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
721 r600_store_value(cb
, EVENT_TYPE(EVENT_TYPE_CS_PARTIAL_FLUSH
) | EVENT_INDEX(4));
723 switch (ctx
->b
.family
) {
727 num_stack_entries
= 256;
731 num_stack_entries
= 256;
735 num_stack_entries
= 512;
740 num_stack_entries
= 512;
744 num_stack_entries
= 256;
748 num_stack_entries
= 256;
752 num_stack_entries
= 512;
756 num_stack_entries
= 512;
760 num_stack_entries
= 256;
764 num_stack_entries
= 256;
768 /* Config Registers */
769 if (ctx
->b
.chip_class
< CAYMAN
)
770 evergreen_init_common_regs(cb
, ctx
->b
.chip_class
, ctx
->b
.family
,
771 ctx
->screen
->b
.info
.drm_minor
);
773 cayman_init_common_regs(cb
, ctx
->b
.chip_class
, ctx
->b
.family
,
774 ctx
->screen
->b
.info
.drm_minor
);
776 /* The primitive type always needs to be POINTLIST for compute. */
777 r600_store_config_reg(cb
, R_008958_VGT_PRIMITIVE_TYPE
,
778 V_008958_DI_PT_POINTLIST
);
780 if (ctx
->b
.chip_class
< CAYMAN
) {
782 /* These registers control which simds can be used by each stage.
783 * The default for these registers is 0xffffffff, which means
784 * all simds are available for each stage. It's possible we may
785 * want to play around with these in the future, but for now
786 * the default value is fine.
788 * R_008E20_SQ_STATIC_THREAD_MGMT1
789 * R_008E24_SQ_STATIC_THREAD_MGMT2
790 * R_008E28_SQ_STATIC_THREAD_MGMT3
793 /* XXX: We may need to adjust the thread and stack resouce
794 * values for 3D/compute interop */
796 r600_store_config_reg_seq(cb
, R_008C18_SQ_THREAD_RESOURCE_MGMT_1
, 5);
798 /* R_008C18_SQ_THREAD_RESOURCE_MGMT_1
799 * Set the number of threads used by the PS/VS/GS/ES stage to
802 r600_store_value(cb
, 0);
804 /* R_008C1C_SQ_THREAD_RESOURCE_MGMT_2
805 * Set the number of threads used by the CS (aka LS) stage to
806 * the maximum number of threads and set the number of threads
807 * for the HS stage to 0. */
808 r600_store_value(cb
, S_008C1C_NUM_LS_THREADS(num_threads
));
810 /* R_008C20_SQ_STACK_RESOURCE_MGMT_1
811 * Set the Control Flow stack entries to 0 for PS/VS stages */
812 r600_store_value(cb
, 0);
814 /* R_008C24_SQ_STACK_RESOURCE_MGMT_2
815 * Set the Control Flow stack entries to 0 for GS/ES stages */
816 r600_store_value(cb
, 0);
818 /* R_008C28_SQ_STACK_RESOURCE_MGMT_3
819 * Set the Contol Flow stack entries to 0 for the HS stage, and
820 * set it to the maximum value for the CS (aka LS) stage. */
822 S_008C28_NUM_LS_STACK_ENTRIES(num_stack_entries
));
824 /* Give the compute shader all the available LDS space.
825 * NOTE: This only sets the maximum number of dwords that a compute
826 * shader can allocate. When a shader is executed, we still need to
827 * allocate the appropriate amount of LDS dwords using the
828 * CM_R_0288E8_SQ_LDS_ALLOC register.
830 if (ctx
->b
.chip_class
< CAYMAN
) {
831 r600_store_config_reg(cb
, R_008E2C_SQ_LDS_RESOURCE_MGMT
,
832 S_008E2C_NUM_PS_LDS(0x0000) | S_008E2C_NUM_LS_LDS(8192));
834 r600_store_context_reg(cb
, CM_R_0286FC_SPI_LDS_MGMT
,
835 S_0286FC_NUM_PS_LDS(0) |
836 S_0286FC_NUM_LS_LDS(255)); /* 255 * 32 = 8160 dwords */
839 /* Context Registers */
841 if (ctx
->b
.chip_class
< CAYMAN
) {
842 /* workaround for hw issues with dyn gpr - must set all limits
843 * to 240 instead of 0, 0x1e == 240 / 8
845 r600_store_context_reg(cb
, R_028838_SQ_DYN_GPR_RESOURCE_LIMIT_1
,
846 S_028838_PS_GPRS(0x1e) |
847 S_028838_VS_GPRS(0x1e) |
848 S_028838_GS_GPRS(0x1e) |
849 S_028838_ES_GPRS(0x1e) |
850 S_028838_HS_GPRS(0x1e) |
851 S_028838_LS_GPRS(0x1e));
854 /* XXX: Investigate setting bit 15, which is FAST_COMPUTE_MODE */
855 r600_store_context_reg(cb
, R_028A40_VGT_GS_MODE
,
856 S_028A40_COMPUTE_MODE(1) | S_028A40_PARTIAL_THD_AT_EOI(1));
858 r600_store_context_reg(cb
, R_028B54_VGT_SHADER_STAGES_EN
, 2/*CS_ON*/);
860 r600_store_context_reg(cb
, R_0286E8_SPI_COMPUTE_INPUT_CNTL
,
861 S_0286E8_TID_IN_GROUP_ENA
863 | S_0286E8_DISABLE_INDEX_PACK
)
866 /* The LOOP_CONST registers are an optimizations for loops that allows
867 * you to store the initial counter, increment value, and maximum
868 * counter value in a register so that hardware can calculate the
869 * correct number of iterations for the loop, so that you don't need
870 * to have the loop counter in your shader code. We don't currently use
871 * this optimization, so we must keep track of the counter in the
872 * shader and use a break instruction to exit loops. However, the
873 * hardware will still uses this register to determine when to exit a
874 * loop, so we need to initialize the counter to 0, set the increment
875 * value to 1 and the maximum counter value to the 4095 (0xfff) which
876 * is the maximum value allowed. This gives us a maximum of 4096
877 * iterations for our loops, but hopefully our break instruction will
878 * execute before some time before the 4096th iteration.
880 eg_store_loop_const(cb
, R_03A200_SQ_LOOP_CONST_0
+ (160 * 4), 0x1000FFF);
883 void evergreen_init_compute_state_functions(struct r600_context
*ctx
)
885 ctx
->b
.b
.create_compute_state
= evergreen_create_compute_state
;
886 ctx
->b
.b
.delete_compute_state
= evergreen_delete_compute_state
;
887 ctx
->b
.b
.bind_compute_state
= evergreen_bind_compute_state
;
888 // ctx->context.create_sampler_view = evergreen_compute_create_sampler_view;
889 ctx
->b
.b
.set_compute_resources
= evergreen_set_compute_resources
;
890 ctx
->b
.b
.set_global_binding
= evergreen_set_global_binding
;
891 ctx
->b
.b
.launch_grid
= evergreen_launch_grid
;
893 /* We always use at least one vertex buffer for parameters (id = 1)*/
894 ctx
->cs_vertex_buffer_state
.enabled_mask
=
895 ctx
->cs_vertex_buffer_state
.dirty_mask
= 0x2;
898 struct pipe_resource
*r600_compute_global_buffer_create(
899 struct pipe_screen
*screen
,
900 const struct pipe_resource
*templ
)
902 struct r600_resource_global
* result
= NULL
;
903 struct r600_screen
* rscreen
= NULL
;
906 assert(templ
->target
== PIPE_BUFFER
);
907 assert(templ
->bind
& PIPE_BIND_GLOBAL
);
908 assert(templ
->array_size
== 1 || templ
->array_size
== 0);
909 assert(templ
->depth0
== 1 || templ
->depth0
== 0);
910 assert(templ
->height0
== 1 || templ
->height0
== 0);
912 result
= (struct r600_resource_global
*)
913 CALLOC(sizeof(struct r600_resource_global
), 1);
914 rscreen
= (struct r600_screen
*)screen
;
916 COMPUTE_DBG(rscreen
, "*** r600_compute_global_buffer_create\n");
917 COMPUTE_DBG(rscreen
, "width = %u array_size = %u\n", templ
->width0
,
920 result
->base
.b
.vtbl
= &r600_global_buffer_vtbl
;
921 result
->base
.b
.b
.screen
= screen
;
922 result
->base
.b
.b
= *templ
;
923 pipe_reference_init(&result
->base
.b
.b
.reference
, 1);
925 size_in_dw
= (templ
->width0
+3) / 4;
927 result
->chunk
= compute_memory_alloc(rscreen
->global_pool
, size_in_dw
);
929 if (result
->chunk
== NULL
)
935 return &result
->base
.b
.b
;
938 void r600_compute_global_buffer_destroy(
939 struct pipe_screen
*screen
,
940 struct pipe_resource
*res
)
942 struct r600_resource_global
* buffer
= NULL
;
943 struct r600_screen
* rscreen
= NULL
;
945 assert(res
->target
== PIPE_BUFFER
);
946 assert(res
->bind
& PIPE_BIND_GLOBAL
);
948 buffer
= (struct r600_resource_global
*)res
;
949 rscreen
= (struct r600_screen
*)screen
;
951 compute_memory_free(rscreen
->global_pool
, buffer
->chunk
->id
);
953 buffer
->chunk
= NULL
;
957 void *r600_compute_global_transfer_map(
958 struct pipe_context
*ctx_
,
959 struct pipe_resource
*resource
,
962 const struct pipe_box
*box
,
963 struct pipe_transfer
**ptransfer
)
965 struct r600_context
*rctx
= (struct r600_context
*)ctx_
;
966 struct compute_memory_pool
*pool
= rctx
->screen
->global_pool
;
967 struct r600_resource_global
* buffer
=
968 (struct r600_resource_global
*)resource
;
970 struct compute_memory_item
*item
= buffer
->chunk
;
971 struct pipe_resource
*dst
= NULL
;
972 unsigned offset
= box
->x
;
974 if (is_item_in_pool(item
)) {
975 compute_memory_demote_item(pool
, item
, ctx_
);
978 if (item
->real_buffer
== NULL
) {
979 item
->real_buffer
= (struct r600_resource
*)
980 r600_compute_buffer_alloc_vram(pool
->screen
, item
->size_in_dw
* 4);
984 dst
= (struct pipe_resource
*)item
->real_buffer
;
986 if (usage
& PIPE_TRANSFER_READ
)
987 buffer
->chunk
->status
|= ITEM_MAPPED_FOR_READING
;
989 COMPUTE_DBG(rctx
->screen
, "* r600_compute_global_transfer_map()\n"
990 "level = %u, usage = %u, box(x = %u, y = %u, z = %u "
991 "width = %u, height = %u, depth = %u)\n", level
, usage
,
992 box
->x
, box
->y
, box
->z
, box
->width
, box
->height
,
994 COMPUTE_DBG(rctx
->screen
, "Buffer id = %u offset = "
995 "%u (box.x)\n", item
->id
, box
->x
);
998 assert(resource
->target
== PIPE_BUFFER
);
999 assert(resource
->bind
& PIPE_BIND_GLOBAL
);
1000 assert(box
->x
>= 0);
1001 assert(box
->y
== 0);
1002 assert(box
->z
== 0);
1004 ///TODO: do it better, mapping is not possible if the pool is too big
1005 return pipe_buffer_map_range(ctx_
, dst
,
1006 offset
, box
->width
, usage
, ptransfer
);
1009 void r600_compute_global_transfer_unmap(
1010 struct pipe_context
*ctx_
,
1011 struct pipe_transfer
* transfer
)
1013 /* struct r600_resource_global are not real resources, they just map
1014 * to an offset within the compute memory pool. The function
1015 * r600_compute_global_transfer_map() maps the memory pool
1016 * resource rather than the struct r600_resource_global passed to
1017 * it as an argument and then initalizes ptransfer->resource with
1018 * the memory pool resource (via pipe_buffer_map_range).
1019 * When transfer_unmap is called it uses the memory pool's
1020 * vtable which calls r600_buffer_transfer_map() rather than
1023 assert (!"This function should not be called");
1026 void r600_compute_global_transfer_flush_region(
1027 struct pipe_context
*ctx_
,
1028 struct pipe_transfer
*transfer
,
1029 const struct pipe_box
*box
)
1031 assert(0 && "TODO");
1034 void r600_compute_global_transfer_inline_write(
1035 struct pipe_context
*pipe
,
1036 struct pipe_resource
*resource
,
1039 const struct pipe_box
*box
,
1042 unsigned layer_stride
)
1044 assert(0 && "TODO");