2 * Copyright 2011 Adam Rak <adam.rak@streamnovation.com>
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
24 * Adam Rak <adam.rak@streamnovation.com>
29 #include "pipe/p_defines.h"
30 #include "pipe/p_state.h"
31 #include "pipe/p_context.h"
32 #include "util/u_blitter.h"
33 #include "util/u_double_list.h"
34 #include "util/u_transfer.h"
35 #include "util/u_surface.h"
36 #include "util/u_pack_color.h"
37 #include "util/u_memory.h"
38 #include "util/u_inlines.h"
39 #include "util/u_framebuffer.h"
40 #include "pipebuffer/pb_buffer.h"
41 #include "evergreend.h"
42 #include "r600_resource.h"
43 #include "r600_shader.h"
44 #include "r600_pipe.h"
45 #include "r600_formats.h"
46 #include "evergreen_compute.h"
47 #include "evergreen_compute_internal.h"
48 #include "compute_memory_pool.h"
49 #include "sb/sb_public.h"
51 #include "radeon_llvm_util.h"
55 RAT0 is for global binding write
56 VTX1 is for global binding read
58 for wrting images RAT1...
59 for reading images TEX2...
62 TEX2... consumes the same fetch resources, that VTX2... would consume
64 CONST0 and VTX0 is for parameters
65 CONST0 is binding smaller input parameter buffer, and for constant indexing,
67 VTX0 is for indirect/non-constant indexing, or if the input is bigger than
68 the constant cache can handle
70 RAT-s are limited to 12, so we can only bind at most 11 texture for writing
71 because we reserve RAT0 for global bindings. With byteaddressing enabled,
72 we should reserve another one too.=> 10 image binding for writing max.
75 CL_DEVICE_MAX_READ_IMAGE_ARGS: 128
76 CL_DEVICE_MAX_WRITE_IMAGE_ARGS: 8
78 so 10 for writing is enough. 176 is the max for reading according to the docs
80 writable images should be listed first < 10, so their id corresponds to RAT(id+1)
81 writable images will consume TEX slots, VTX slots too because of linear indexing
85 struct r600_resource
* r600_compute_buffer_alloc_vram(
86 struct r600_screen
*screen
,
89 struct pipe_resource
* buffer
= NULL
;
92 buffer
= pipe_buffer_create(
93 (struct pipe_screen
*) screen
,
98 return (struct r600_resource
*)buffer
;
102 static void evergreen_set_rat(
103 struct r600_pipe_compute
*pipe
,
105 struct r600_resource
* bo
,
109 struct pipe_surface rat_templ
;
110 struct r600_surface
*surf
= NULL
;
111 struct r600_context
*rctx
= NULL
;
114 assert((size
& 3) == 0);
115 assert((start
& 0xFF) == 0);
119 COMPUTE_DBG(rctx
->screen
, "bind rat: %i \n", id
);
121 /* Create the RAT surface */
122 memset(&rat_templ
, 0, sizeof(rat_templ
));
123 rat_templ
.format
= PIPE_FORMAT_R32_UINT
;
124 rat_templ
.u
.tex
.level
= 0;
125 rat_templ
.u
.tex
.first_layer
= 0;
126 rat_templ
.u
.tex
.last_layer
= 0;
128 /* Add the RAT the list of color buffers */
129 pipe
->ctx
->framebuffer
.state
.cbufs
[id
] = pipe
->ctx
->b
.b
.create_surface(
130 (struct pipe_context
*)pipe
->ctx
,
131 (struct pipe_resource
*)bo
, &rat_templ
);
133 /* Update the number of color buffers */
134 pipe
->ctx
->framebuffer
.state
.nr_cbufs
=
135 MAX2(id
+ 1, pipe
->ctx
->framebuffer
.state
.nr_cbufs
);
137 /* Update the cb_target_mask
138 * XXX: I think this is a potential spot for bugs once we start doing
139 * GL interop. cb_target_mask may be modified in the 3D sections
141 pipe
->ctx
->compute_cb_target_mask
|= (0xf << (id
* 4));
143 surf
= (struct r600_surface
*)pipe
->ctx
->framebuffer
.state
.cbufs
[id
];
144 evergreen_init_color_surface_rat(rctx
, surf
);
147 static void evergreen_cs_set_vertex_buffer(
148 struct r600_context
* rctx
,
151 struct pipe_resource
* buffer
)
153 struct r600_vertexbuf_state
*state
= &rctx
->cs_vertex_buffer_state
;
154 struct pipe_vertex_buffer
*vb
= &state
->vb
[vb_index
];
156 vb
->buffer_offset
= offset
;
158 vb
->user_buffer
= NULL
;
160 /* The vertex instructions in the compute shaders use the texture cache,
161 * so we need to invalidate it. */
162 rctx
->b
.flags
|= R600_CONTEXT_INV_VERTEX_CACHE
;
163 state
->enabled_mask
|= 1 << vb_index
;
164 state
->dirty_mask
|= 1 << vb_index
;
165 state
->atom
.dirty
= true;
168 static void evergreen_cs_set_constant_buffer(
169 struct r600_context
* rctx
,
173 struct pipe_resource
* buffer
)
175 struct pipe_constant_buffer cb
;
176 cb
.buffer_size
= size
;
177 cb
.buffer_offset
= offset
;
179 cb
.user_buffer
= NULL
;
181 rctx
->b
.b
.set_constant_buffer(&rctx
->b
.b
, PIPE_SHADER_COMPUTE
, cb_index
, &cb
);
184 static const struct u_resource_vtbl r600_global_buffer_vtbl
=
186 u_default_resource_get_handle
, /* get_handle */
187 r600_compute_global_buffer_destroy
, /* resource_destroy */
188 r600_compute_global_transfer_map
, /* transfer_map */
189 r600_compute_global_transfer_flush_region
,/* transfer_flush_region */
190 r600_compute_global_transfer_unmap
, /* transfer_unmap */
191 r600_compute_global_transfer_inline_write
/* transfer_inline_write */
195 void *evergreen_create_compute_state(
196 struct pipe_context
*ctx_
,
197 const const struct pipe_compute_state
*cso
)
199 struct r600_context
*ctx
= (struct r600_context
*)ctx_
;
200 struct r600_pipe_compute
*shader
= CALLOC_STRUCT(r600_pipe_compute
);
203 const struct pipe_llvm_program_header
* header
;
204 const unsigned char * code
;
207 COMPUTE_DBG(ctx
->screen
, "*** evergreen_create_compute_state\n");
210 code
= cso
->prog
+ sizeof(struct pipe_llvm_program_header
);
213 shader
->ctx
= (struct r600_context
*)ctx
;
214 shader
->local_size
= cso
->req_local_mem
;
215 shader
->private_size
= cso
->req_private_mem
;
216 shader
->input_size
= cso
->req_input_mem
;
219 shader
->num_kernels
= radeon_llvm_get_num_kernels(code
, header
->num_bytes
);
220 shader
->kernels
= CALLOC(sizeof(struct r600_kernel
), shader
->num_kernels
);
222 for (i
= 0; i
< shader
->num_kernels
; i
++) {
223 struct r600_kernel
*kernel
= &shader
->kernels
[i
];
224 kernel
->llvm_module
= radeon_llvm_get_kernel_module(i
, code
,
231 void evergreen_delete_compute_state(struct pipe_context
*ctx
, void* state
)
233 struct r600_pipe_compute
*shader
= (struct r600_pipe_compute
*)state
;
238 static void evergreen_bind_compute_state(struct pipe_context
*ctx_
, void *state
)
240 struct r600_context
*ctx
= (struct r600_context
*)ctx_
;
242 COMPUTE_DBG(ctx
->screen
, "*** evergreen_bind_compute_state\n");
244 ctx
->cs_shader_state
.shader
= (struct r600_pipe_compute
*)state
;
247 /* The kernel parameters are stored a vtx buffer (ID=0), besides the explicit
248 * kernel parameters there are inplicit parameters that need to be stored
249 * in the vertex buffer as well. Here is how these parameters are organized in
252 * DWORDS 0-2: Number of work groups in each dimension (x,y,z)
253 * DWORDS 3-5: Number of global work items in each dimension (x,y,z)
254 * DWORDS 6-8: Number of work items within each work group in each dimension
256 * DWORDS 9+ : Kernel parameters
258 void evergreen_compute_upload_input(
259 struct pipe_context
*ctx_
,
260 const uint
*block_layout
,
261 const uint
*grid_layout
,
264 struct r600_context
*ctx
= (struct r600_context
*)ctx_
;
265 struct r600_pipe_compute
*shader
= ctx
->cs_shader_state
.shader
;
267 /* We need to reserve 9 dwords (36 bytes) for implicit kernel
270 unsigned input_size
= shader
->input_size
+ 36;
271 uint32_t * num_work_groups_start
;
272 uint32_t * global_size_start
;
273 uint32_t * local_size_start
;
274 uint32_t * kernel_parameters_start
;
276 struct pipe_transfer
*transfer
= NULL
;
278 if (shader
->input_size
== 0) {
282 if (!shader
->kernel_param
) {
283 /* Add space for the grid dimensions */
284 shader
->kernel_param
= (struct r600_resource
*)
285 pipe_buffer_create(ctx_
->screen
, PIPE_BIND_CUSTOM
,
286 PIPE_USAGE_IMMUTABLE
, input_size
);
289 u_box_1d(0, input_size
, &box
);
290 num_work_groups_start
= ctx_
->transfer_map(ctx_
,
291 (struct pipe_resource
*)shader
->kernel_param
,
292 0, PIPE_TRANSFER_WRITE
| PIPE_TRANSFER_DISCARD_RANGE
,
294 global_size_start
= num_work_groups_start
+ (3 * (sizeof(uint
) /4));
295 local_size_start
= global_size_start
+ (3 * (sizeof(uint
)) / 4);
296 kernel_parameters_start
= local_size_start
+ (3 * (sizeof(uint
)) / 4);
298 /* Copy the work group size */
299 memcpy(num_work_groups_start
, grid_layout
, 3 * sizeof(uint
));
301 /* Copy the global size */
302 for (i
= 0; i
< 3; i
++) {
303 global_size_start
[i
] = grid_layout
[i
] * block_layout
[i
];
306 /* Copy the local dimensions */
307 memcpy(local_size_start
, block_layout
, 3 * sizeof(uint
));
309 /* Copy the kernel inputs */
310 memcpy(kernel_parameters_start
, input
, shader
->input_size
);
312 for (i
= 0; i
< (input_size
/ 4); i
++) {
313 COMPUTE_DBG(ctx
->screen
, "input %i : %i\n", i
,
314 ((unsigned*)num_work_groups_start
)[i
]);
317 ctx_
->transfer_unmap(ctx_
, transfer
);
319 /* ID=0 is reserved for the parameters */
320 evergreen_cs_set_constant_buffer(ctx
, 0, 0, input_size
,
321 (struct pipe_resource
*)shader
->kernel_param
);
324 static void evergreen_emit_direct_dispatch(
325 struct r600_context
*rctx
,
326 const uint
*block_layout
, const uint
*grid_layout
)
329 struct radeon_winsys_cs
*cs
= rctx
->b
.rings
.gfx
.cs
;
330 struct r600_pipe_compute
*shader
= rctx
->cs_shader_state
.shader
;
332 unsigned num_pipes
= rctx
->screen
->b
.info
.r600_max_pipes
;
333 unsigned wave_divisor
= (16 * num_pipes
);
336 unsigned lds_size
= shader
->local_size
/ 4 + shader
->active_kernel
->bc
.nlds_dw
;
338 /* Calculate group_size/grid_size */
339 for (i
= 0; i
< 3; i
++) {
340 group_size
*= block_layout
[i
];
343 for (i
= 0; i
< 3; i
++) {
344 grid_size
*= grid_layout
[i
];
347 /* num_waves = ceil((tg_size.x * tg_size.y, tg_size.z) / (16 * num_pipes)) */
348 num_waves
= (block_layout
[0] * block_layout
[1] * block_layout
[2] +
349 wave_divisor
- 1) / wave_divisor
;
351 COMPUTE_DBG(rctx
->screen
, "Using %u pipes, "
352 "%u wavefronts per thread block, "
353 "allocating %u dwords lds.\n",
354 num_pipes
, num_waves
, lds_size
);
356 r600_write_config_reg(cs
, R_008970_VGT_NUM_INDICES
, group_size
);
358 r600_write_config_reg_seq(cs
, R_00899C_VGT_COMPUTE_START_X
, 3);
359 radeon_emit(cs
, 0); /* R_00899C_VGT_COMPUTE_START_X */
360 radeon_emit(cs
, 0); /* R_0089A0_VGT_COMPUTE_START_Y */
361 radeon_emit(cs
, 0); /* R_0089A4_VGT_COMPUTE_START_Z */
363 r600_write_config_reg(cs
, R_0089AC_VGT_COMPUTE_THREAD_GROUP_SIZE
,
366 r600_write_compute_context_reg_seq(cs
, R_0286EC_SPI_COMPUTE_NUM_THREAD_X
, 3);
367 radeon_emit(cs
, block_layout
[0]); /* R_0286EC_SPI_COMPUTE_NUM_THREAD_X */
368 radeon_emit(cs
, block_layout
[1]); /* R_0286F0_SPI_COMPUTE_NUM_THREAD_Y */
369 radeon_emit(cs
, block_layout
[2]); /* R_0286F4_SPI_COMPUTE_NUM_THREAD_Z */
371 if (rctx
->b
.chip_class
< CAYMAN
) {
372 assert(lds_size
<= 8192);
374 /* Cayman appears to have a slightly smaller limit, see the
375 * value of CM_R_0286FC_SPI_LDS_MGMT.NUM_LS_LDS */
376 assert(lds_size
<= 8160);
379 r600_write_compute_context_reg(cs
, CM_R_0288E8_SQ_LDS_ALLOC
,
380 lds_size
| (num_waves
<< 14));
382 /* Dispatch packet */
383 radeon_emit(cs
, PKT3C(PKT3_DISPATCH_DIRECT
, 3, 0));
384 radeon_emit(cs
, grid_layout
[0]);
385 radeon_emit(cs
, grid_layout
[1]);
386 radeon_emit(cs
, grid_layout
[2]);
387 /* VGT_DISPATCH_INITIATOR = COMPUTE_SHADER_EN */
391 static void compute_emit_cs(struct r600_context
*ctx
, const uint
*block_layout
,
392 const uint
*grid_layout
)
394 struct radeon_winsys_cs
*cs
= ctx
->b
.rings
.gfx
.cs
;
397 /* make sure that the gfx ring is only one active */
398 if (ctx
->b
.rings
.dma
.cs
) {
399 ctx
->b
.rings
.dma
.flush(ctx
, RADEON_FLUSH_ASYNC
);
402 /* Initialize all the compute-related registers.
404 * See evergreen_init_atom_start_compute_cs() in this file for the list
405 * of registers initialized by the start_compute_cs_cmd atom.
407 r600_emit_command_buffer(cs
, &ctx
->start_compute_cs_cmd
);
409 ctx
->b
.flags
|= R600_CONTEXT_WAIT_3D_IDLE
| R600_CONTEXT_FLUSH_AND_INV
;
410 r600_flush_emit(ctx
);
412 /* Emit colorbuffers. */
413 /* XXX support more than 8 colorbuffers (the offsets are not a multiple of 0x3C for CB8-11) */
414 for (i
= 0; i
< 8 && i
< ctx
->framebuffer
.state
.nr_cbufs
; i
++) {
415 struct r600_surface
*cb
= (struct r600_surface
*)ctx
->framebuffer
.state
.cbufs
[i
];
416 unsigned reloc
= r600_context_bo_reloc(&ctx
->b
, &ctx
->b
.rings
.gfx
,
417 (struct r600_resource
*)cb
->base
.texture
,
418 RADEON_USAGE_READWRITE
);
420 r600_write_compute_context_reg_seq(cs
, R_028C60_CB_COLOR0_BASE
+ i
* 0x3C, 7);
421 radeon_emit(cs
, cb
->cb_color_base
); /* R_028C60_CB_COLOR0_BASE */
422 radeon_emit(cs
, cb
->cb_color_pitch
); /* R_028C64_CB_COLOR0_PITCH */
423 radeon_emit(cs
, cb
->cb_color_slice
); /* R_028C68_CB_COLOR0_SLICE */
424 radeon_emit(cs
, cb
->cb_color_view
); /* R_028C6C_CB_COLOR0_VIEW */
425 radeon_emit(cs
, cb
->cb_color_info
); /* R_028C70_CB_COLOR0_INFO */
426 radeon_emit(cs
, cb
->cb_color_attrib
); /* R_028C74_CB_COLOR0_ATTRIB */
427 radeon_emit(cs
, cb
->cb_color_dim
); /* R_028C78_CB_COLOR0_DIM */
429 radeon_emit(cs
, PKT3(PKT3_NOP
, 0, 0)); /* R_028C60_CB_COLOR0_BASE */
430 radeon_emit(cs
, reloc
);
432 if (!ctx
->keep_tiling_flags
) {
433 radeon_emit(cs
, PKT3(PKT3_NOP
, 0, 0)); /* R_028C70_CB_COLOR0_INFO */
434 radeon_emit(cs
, reloc
);
437 radeon_emit(cs
, PKT3(PKT3_NOP
, 0, 0)); /* R_028C74_CB_COLOR0_ATTRIB */
438 radeon_emit(cs
, reloc
);
440 if (ctx
->keep_tiling_flags
) {
441 for (; i
< 8 ; i
++) {
442 r600_write_compute_context_reg(cs
, R_028C70_CB_COLOR0_INFO
+ i
* 0x3C,
443 S_028C70_FORMAT(V_028C70_COLOR_INVALID
));
445 for (; i
< 12; i
++) {
446 r600_write_compute_context_reg(cs
, R_028E50_CB_COLOR8_INFO
+ (i
- 8) * 0x1C,
447 S_028C70_FORMAT(V_028C70_COLOR_INVALID
));
451 /* Set CB_TARGET_MASK XXX: Use cb_misc_state */
452 r600_write_compute_context_reg(cs
, R_028238_CB_TARGET_MASK
,
453 ctx
->compute_cb_target_mask
);
456 /* Emit vertex buffer state */
457 ctx
->cs_vertex_buffer_state
.atom
.num_dw
= 12 * util_bitcount(ctx
->cs_vertex_buffer_state
.dirty_mask
);
458 r600_emit_atom(ctx
, &ctx
->cs_vertex_buffer_state
.atom
);
460 /* Emit constant buffer state */
461 r600_emit_atom(ctx
, &ctx
->constbuf_state
[PIPE_SHADER_COMPUTE
].atom
);
463 /* Emit compute shader state */
464 r600_emit_atom(ctx
, &ctx
->cs_shader_state
.atom
);
466 /* Emit dispatch state and dispatch packet */
467 evergreen_emit_direct_dispatch(ctx
, block_layout
, grid_layout
);
469 /* XXX evergreen_flush_emit() hardcodes the CP_COHER_SIZE to 0xffffffff
471 ctx
->b
.flags
|= R600_CONTEXT_INV_CONST_CACHE
|
472 R600_CONTEXT_INV_VERTEX_CACHE
|
473 R600_CONTEXT_INV_TEX_CACHE
;
474 r600_flush_emit(ctx
);
478 COMPUTE_DBG(ctx
->screen
, "cdw: %i\n", cs
->cdw
);
479 for (i
= 0; i
< cs
->cdw
; i
++) {
480 COMPUTE_DBG(ctx
->screen
, "%4i : 0x%08X\n", i
, cs
->buf
[i
]);
488 * Emit function for r600_cs_shader_state atom
490 void evergreen_emit_cs_shader(
491 struct r600_context
*rctx
,
492 struct r600_atom
*atom
)
494 struct r600_cs_shader_state
*state
=
495 (struct r600_cs_shader_state
*)atom
;
496 struct r600_pipe_compute
*shader
= state
->shader
;
497 struct r600_kernel
*kernel
= &shader
->kernels
[state
->kernel_index
];
498 struct radeon_winsys_cs
*cs
= rctx
->b
.rings
.gfx
.cs
;
501 va
= r600_resource_va(&rctx
->screen
->b
.b
, &kernel
->code_bo
->b
.b
);
503 r600_write_compute_context_reg_seq(cs
, R_0288D0_SQ_PGM_START_LS
, 3);
504 radeon_emit(cs
, va
>> 8); /* R_0288D0_SQ_PGM_START_LS */
505 radeon_emit(cs
, /* R_0288D4_SQ_PGM_RESOURCES_LS */
506 S_0288D4_NUM_GPRS(kernel
->bc
.ngpr
)
507 | S_0288D4_STACK_SIZE(kernel
->bc
.nstack
));
508 radeon_emit(cs
, 0); /* R_0288D8_SQ_PGM_RESOURCES_LS_2 */
510 radeon_emit(cs
, PKT3C(PKT3_NOP
, 0, 0));
511 radeon_emit(cs
, r600_context_bo_reloc(&rctx
->b
, &rctx
->b
.rings
.gfx
,
512 kernel
->code_bo
, RADEON_USAGE_READ
));
515 static void evergreen_launch_grid(
516 struct pipe_context
*ctx_
,
517 const uint
*block_layout
, const uint
*grid_layout
,
518 uint32_t pc
, const void *input
)
520 struct r600_context
*ctx
= (struct r600_context
*)ctx_
;
522 struct r600_pipe_compute
*shader
= ctx
->cs_shader_state
.shader
;
523 struct r600_kernel
*kernel
= &shader
->kernels
[pc
];
525 COMPUTE_DBG(ctx
->screen
, "*** evergreen_launch_grid: pc = %u\n", pc
);
529 if (!kernel
->code_bo
) {
531 struct r600_bytecode
*bc
= &kernel
->bc
;
532 LLVMModuleRef mod
= kernel
->llvm_module
;
533 boolean use_kill
= false;
534 bool dump
= (ctx
->screen
->b
.debug_flags
& DBG_CS
) != 0;
535 unsigned use_sb
= ctx
->screen
->b
.debug_flags
& DBG_SB_CS
;
536 unsigned sb_disasm
= use_sb
||
537 (ctx
->screen
->b
.debug_flags
& DBG_SB_DISASM
);
539 r600_bytecode_init(bc
, ctx
->b
.chip_class
, ctx
->b
.family
,
540 ctx
->screen
->has_compressed_msaa_texturing
);
541 bc
->type
= TGSI_PROCESSOR_COMPUTE
;
543 r600_llvm_compile(mod
, ctx
->b
.family
, bc
, &use_kill
, dump
);
545 if (dump
&& !sb_disasm
) {
546 r600_bytecode_disasm(bc
);
547 } else if ((dump
&& sb_disasm
) || use_sb
) {
548 if (r600_sb_bytecode_process(ctx
, bc
, NULL
, dump
, use_sb
))
549 R600_ERR("r600_sb_bytecode_process failed!\n");
552 kernel
->code_bo
= r600_compute_buffer_alloc_vram(ctx
->screen
,
554 p
= r600_buffer_map_sync_with_rings(&ctx
->b
, kernel
->code_bo
, PIPE_TRANSFER_WRITE
);
555 memcpy(p
, kernel
->bc
.bytecode
, kernel
->bc
.ndw
* 4);
556 ctx
->b
.ws
->buffer_unmap(kernel
->code_bo
->cs_buf
);
559 shader
->active_kernel
= kernel
;
560 ctx
->cs_shader_state
.kernel_index
= pc
;
561 evergreen_compute_upload_input(ctx_
, block_layout
, grid_layout
, input
);
562 compute_emit_cs(ctx
, block_layout
, grid_layout
);
565 static void evergreen_set_compute_resources(struct pipe_context
* ctx_
,
566 unsigned start
, unsigned count
,
567 struct pipe_surface
** surfaces
)
569 struct r600_context
*ctx
= (struct r600_context
*)ctx_
;
570 struct r600_surface
**resources
= (struct r600_surface
**)surfaces
;
572 COMPUTE_DBG(ctx
->screen
, "*** evergreen_set_compute_resources: start = %u count = %u\n",
575 for (int i
= 0; i
< count
; i
++) {
576 /* The First two vertex buffers are reserved for parameters and
578 unsigned vtx_id
= 2 + i
;
580 struct r600_resource_global
*buffer
=
581 (struct r600_resource_global
*)
582 resources
[i
]->base
.texture
;
583 if (resources
[i
]->base
.writable
) {
586 evergreen_set_rat(ctx
->cs_shader_state
.shader
, i
+1,
587 (struct r600_resource
*)resources
[i
]->base
.texture
,
588 buffer
->chunk
->start_in_dw
*4,
589 resources
[i
]->base
.texture
->width0
);
592 evergreen_cs_set_vertex_buffer(ctx
, vtx_id
,
593 buffer
->chunk
->start_in_dw
* 4,
594 resources
[i
]->base
.texture
);
599 void evergreen_set_cs_sampler_view(struct pipe_context
*ctx_
,
600 unsigned start_slot
, unsigned count
,
601 struct pipe_sampler_view
**views
)
603 struct r600_pipe_sampler_view
**resource
=
604 (struct r600_pipe_sampler_view
**)views
;
606 for (int i
= 0; i
< count
; i
++) {
610 assert(!"Compute samplers not implemented.");
611 ///FETCH0 = VTX0 (param buffer),
612 //FETCH1 = VTX1 (global buffer pool), FETCH2... = TEX
618 static void evergreen_set_global_binding(
619 struct pipe_context
*ctx_
, unsigned first
, unsigned n
,
620 struct pipe_resource
**resources
,
623 struct r600_context
*ctx
= (struct r600_context
*)ctx_
;
624 struct compute_memory_pool
*pool
= ctx
->screen
->global_pool
;
625 struct r600_resource_global
**buffers
=
626 (struct r600_resource_global
**)resources
;
628 COMPUTE_DBG(ctx
->screen
, "*** evergreen_set_global_binding first = %u n = %u\n",
636 compute_memory_finalize_pending(pool
, ctx_
);
638 for (int i
= 0; i
< n
; i
++)
640 assert(resources
[i
]->target
== PIPE_BUFFER
);
641 assert(resources
[i
]->bind
& PIPE_BIND_GLOBAL
);
643 *(handles
[i
]) = buffers
[i
]->chunk
->start_in_dw
* 4;
646 evergreen_set_rat(ctx
->cs_shader_state
.shader
, 0, pool
->bo
, 0, pool
->size_in_dw
* 4);
647 evergreen_cs_set_vertex_buffer(ctx
, 1, 0,
648 (struct pipe_resource
*)pool
->bo
);
652 * This function initializes all the compute specific registers that need to
653 * be initialized for each compute command stream. Registers that are common
654 * to both compute and 3D will be initialized at the beginning of each compute
655 * command stream by the start_cs_cmd atom. However, since the SET_CONTEXT_REG
656 * packet requires that the shader type bit be set, we must initialize all
657 * context registers needed for compute in this function. The registers
658 * intialized by the start_cs_cmd atom can be found in evereen_state.c in the
659 * functions evergreen_init_atom_start_cs or cayman_init_atom_start_cs depending
662 void evergreen_init_atom_start_compute_cs(struct r600_context
*ctx
)
664 struct r600_command_buffer
*cb
= &ctx
->start_compute_cs_cmd
;
666 int num_stack_entries
;
668 /* since all required registers are initialised in the
669 * start_compute_cs_cmd atom, we can EMIT_EARLY here.
671 r600_init_command_buffer(cb
, 256);
672 cb
->pkt_flags
= RADEON_CP_PACKET3_COMPUTE_MODE
;
674 /* This must be first. */
675 r600_store_value(cb
, PKT3(PKT3_CONTEXT_CONTROL
, 1, 0));
676 r600_store_value(cb
, 0x80000000);
677 r600_store_value(cb
, 0x80000000);
679 /* We're setting config registers here. */
680 r600_store_value(cb
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
681 r600_store_value(cb
, EVENT_TYPE(EVENT_TYPE_CS_PARTIAL_FLUSH
) | EVENT_INDEX(4));
683 switch (ctx
->b
.family
) {
687 num_stack_entries
= 256;
691 num_stack_entries
= 256;
695 num_stack_entries
= 512;
700 num_stack_entries
= 512;
704 num_stack_entries
= 256;
708 num_stack_entries
= 256;
712 num_stack_entries
= 512;
716 num_stack_entries
= 512;
720 num_stack_entries
= 256;
724 num_stack_entries
= 256;
728 /* Config Registers */
729 if (ctx
->b
.chip_class
< CAYMAN
)
730 evergreen_init_common_regs(cb
, ctx
->b
.chip_class
, ctx
->b
.family
,
731 ctx
->screen
->b
.info
.drm_minor
);
733 cayman_init_common_regs(cb
, ctx
->b
.chip_class
, ctx
->b
.family
,
734 ctx
->screen
->b
.info
.drm_minor
);
736 /* The primitive type always needs to be POINTLIST for compute. */
737 r600_store_config_reg(cb
, R_008958_VGT_PRIMITIVE_TYPE
,
738 V_008958_DI_PT_POINTLIST
);
740 if (ctx
->b
.chip_class
< CAYMAN
) {
742 /* These registers control which simds can be used by each stage.
743 * The default for these registers is 0xffffffff, which means
744 * all simds are available for each stage. It's possible we may
745 * want to play around with these in the future, but for now
746 * the default value is fine.
748 * R_008E20_SQ_STATIC_THREAD_MGMT1
749 * R_008E24_SQ_STATIC_THREAD_MGMT2
750 * R_008E28_SQ_STATIC_THREAD_MGMT3
753 /* XXX: We may need to adjust the thread and stack resouce
754 * values for 3D/compute interop */
756 r600_store_config_reg_seq(cb
, R_008C18_SQ_THREAD_RESOURCE_MGMT_1
, 5);
758 /* R_008C18_SQ_THREAD_RESOURCE_MGMT_1
759 * Set the number of threads used by the PS/VS/GS/ES stage to
762 r600_store_value(cb
, 0);
764 /* R_008C1C_SQ_THREAD_RESOURCE_MGMT_2
765 * Set the number of threads used by the CS (aka LS) stage to
766 * the maximum number of threads and set the number of threads
767 * for the HS stage to 0. */
768 r600_store_value(cb
, S_008C1C_NUM_LS_THREADS(num_threads
));
770 /* R_008C20_SQ_STACK_RESOURCE_MGMT_1
771 * Set the Control Flow stack entries to 0 for PS/VS stages */
772 r600_store_value(cb
, 0);
774 /* R_008C24_SQ_STACK_RESOURCE_MGMT_2
775 * Set the Control Flow stack entries to 0 for GS/ES stages */
776 r600_store_value(cb
, 0);
778 /* R_008C28_SQ_STACK_RESOURCE_MGMT_3
779 * Set the Contol Flow stack entries to 0 for the HS stage, and
780 * set it to the maximum value for the CS (aka LS) stage. */
782 S_008C28_NUM_LS_STACK_ENTRIES(num_stack_entries
));
784 /* Give the compute shader all the available LDS space.
785 * NOTE: This only sets the maximum number of dwords that a compute
786 * shader can allocate. When a shader is executed, we still need to
787 * allocate the appropriate amount of LDS dwords using the
788 * CM_R_0288E8_SQ_LDS_ALLOC register.
790 if (ctx
->b
.chip_class
< CAYMAN
) {
791 r600_store_config_reg(cb
, R_008E2C_SQ_LDS_RESOURCE_MGMT
,
792 S_008E2C_NUM_PS_LDS(0x0000) | S_008E2C_NUM_LS_LDS(8192));
794 r600_store_context_reg(cb
, CM_R_0286FC_SPI_LDS_MGMT
,
795 S_0286FC_NUM_PS_LDS(0) |
796 S_0286FC_NUM_LS_LDS(255)); /* 255 * 32 = 8160 dwords */
799 /* Context Registers */
801 if (ctx
->b
.chip_class
< CAYMAN
) {
802 /* workaround for hw issues with dyn gpr - must set all limits
803 * to 240 instead of 0, 0x1e == 240 / 8
805 r600_store_context_reg(cb
, R_028838_SQ_DYN_GPR_RESOURCE_LIMIT_1
,
806 S_028838_PS_GPRS(0x1e) |
807 S_028838_VS_GPRS(0x1e) |
808 S_028838_GS_GPRS(0x1e) |
809 S_028838_ES_GPRS(0x1e) |
810 S_028838_HS_GPRS(0x1e) |
811 S_028838_LS_GPRS(0x1e));
814 /* XXX: Investigate setting bit 15, which is FAST_COMPUTE_MODE */
815 r600_store_context_reg(cb
, R_028A40_VGT_GS_MODE
,
816 S_028A40_COMPUTE_MODE(1) | S_028A40_PARTIAL_THD_AT_EOI(1));
818 r600_store_context_reg(cb
, R_028B54_VGT_SHADER_STAGES_EN
, 2/*CS_ON*/);
820 r600_store_context_reg(cb
, R_0286E8_SPI_COMPUTE_INPUT_CNTL
,
821 S_0286E8_TID_IN_GROUP_ENA
823 | S_0286E8_DISABLE_INDEX_PACK
)
826 /* The LOOP_CONST registers are an optimizations for loops that allows
827 * you to store the initial counter, increment value, and maximum
828 * counter value in a register so that hardware can calculate the
829 * correct number of iterations for the loop, so that you don't need
830 * to have the loop counter in your shader code. We don't currently use
831 * this optimization, so we must keep track of the counter in the
832 * shader and use a break instruction to exit loops. However, the
833 * hardware will still uses this register to determine when to exit a
834 * loop, so we need to initialize the counter to 0, set the increment
835 * value to 1 and the maximum counter value to the 4095 (0xfff) which
836 * is the maximum value allowed. This gives us a maximum of 4096
837 * iterations for our loops, but hopefully our break instruction will
838 * execute before some time before the 4096th iteration.
840 eg_store_loop_const(cb
, R_03A200_SQ_LOOP_CONST_0
+ (160 * 4), 0x1000FFF);
843 void evergreen_init_compute_state_functions(struct r600_context
*ctx
)
845 ctx
->b
.b
.create_compute_state
= evergreen_create_compute_state
;
846 ctx
->b
.b
.delete_compute_state
= evergreen_delete_compute_state
;
847 ctx
->b
.b
.bind_compute_state
= evergreen_bind_compute_state
;
848 // ctx->context.create_sampler_view = evergreen_compute_create_sampler_view;
849 ctx
->b
.b
.set_compute_resources
= evergreen_set_compute_resources
;
850 ctx
->b
.b
.set_global_binding
= evergreen_set_global_binding
;
851 ctx
->b
.b
.launch_grid
= evergreen_launch_grid
;
853 /* We always use at least one vertex buffer for parameters (id = 1)*/
854 ctx
->cs_vertex_buffer_state
.enabled_mask
=
855 ctx
->cs_vertex_buffer_state
.dirty_mask
= 0x2;
858 struct pipe_resource
*r600_compute_global_buffer_create(
859 struct pipe_screen
*screen
,
860 const struct pipe_resource
*templ
)
862 struct r600_resource_global
* result
= NULL
;
863 struct r600_screen
* rscreen
= NULL
;
866 assert(templ
->target
== PIPE_BUFFER
);
867 assert(templ
->bind
& PIPE_BIND_GLOBAL
);
868 assert(templ
->array_size
== 1 || templ
->array_size
== 0);
869 assert(templ
->depth0
== 1 || templ
->depth0
== 0);
870 assert(templ
->height0
== 1 || templ
->height0
== 0);
872 result
= (struct r600_resource_global
*)
873 CALLOC(sizeof(struct r600_resource_global
), 1);
874 rscreen
= (struct r600_screen
*)screen
;
876 COMPUTE_DBG(rscreen
, "*** r600_compute_global_buffer_create\n");
877 COMPUTE_DBG(rscreen
, "width = %u array_size = %u\n", templ
->width0
,
880 result
->base
.b
.vtbl
= &r600_global_buffer_vtbl
;
881 result
->base
.b
.b
.screen
= screen
;
882 result
->base
.b
.b
= *templ
;
883 pipe_reference_init(&result
->base
.b
.b
.reference
, 1);
885 size_in_dw
= (templ
->width0
+3) / 4;
887 result
->chunk
= compute_memory_alloc(rscreen
->global_pool
, size_in_dw
);
889 if (result
->chunk
== NULL
)
895 return &result
->base
.b
.b
;
898 void r600_compute_global_buffer_destroy(
899 struct pipe_screen
*screen
,
900 struct pipe_resource
*res
)
902 struct r600_resource_global
* buffer
= NULL
;
903 struct r600_screen
* rscreen
= NULL
;
905 assert(res
->target
== PIPE_BUFFER
);
906 assert(res
->bind
& PIPE_BIND_GLOBAL
);
908 buffer
= (struct r600_resource_global
*)res
;
909 rscreen
= (struct r600_screen
*)screen
;
911 compute_memory_free(rscreen
->global_pool
, buffer
->chunk
->id
);
913 buffer
->chunk
= NULL
;
917 void *r600_compute_global_transfer_map(
918 struct pipe_context
*ctx_
,
919 struct pipe_resource
*resource
,
922 const struct pipe_box
*box
,
923 struct pipe_transfer
**ptransfer
)
925 struct r600_context
*rctx
= (struct r600_context
*)ctx_
;
926 struct compute_memory_pool
*pool
= rctx
->screen
->global_pool
;
927 struct r600_resource_global
* buffer
=
928 (struct r600_resource_global
*)resource
;
930 COMPUTE_DBG(rctx
->screen
, "* r600_compute_global_transfer_map()\n"
931 "level = %u, usage = %u, box(x = %u, y = %u, z = %u "
932 "width = %u, height = %u, depth = %u)\n", level
, usage
,
933 box
->x
, box
->y
, box
->z
, box
->width
, box
->height
,
935 COMPUTE_DBG(rctx
->screen
, "Buffer id = %u offset = "
936 "%u (box.x)\n", buffer
->chunk
->id
, box
->x
);
939 compute_memory_finalize_pending(pool
, ctx_
);
941 assert(resource
->target
== PIPE_BUFFER
);
942 assert(resource
->bind
& PIPE_BIND_GLOBAL
);
947 ///TODO: do it better, mapping is not possible if the pool is too big
948 return pipe_buffer_map_range(ctx_
, (struct pipe_resource
*)buffer
->chunk
->pool
->bo
,
949 box
->x
+ (buffer
->chunk
->start_in_dw
* 4),
950 box
->width
, usage
, ptransfer
);
953 void r600_compute_global_transfer_unmap(
954 struct pipe_context
*ctx_
,
955 struct pipe_transfer
* transfer
)
957 /* struct r600_resource_global are not real resources, they just map
958 * to an offset within the compute memory pool. The function
959 * r600_compute_global_transfer_map() maps the memory pool
960 * resource rather than the struct r600_resource_global passed to
961 * it as an argument and then initalizes ptransfer->resource with
962 * the memory pool resource (via pipe_buffer_map_range).
963 * When transfer_unmap is called it uses the memory pool's
964 * vtable which calls r600_buffer_transfer_map() rather than
967 assert (!"This function should not be called");
970 void r600_compute_global_transfer_flush_region(
971 struct pipe_context
*ctx_
,
972 struct pipe_transfer
*transfer
,
973 const struct pipe_box
*box
)
978 void r600_compute_global_transfer_inline_write(
979 struct pipe_context
*pipe
,
980 struct pipe_resource
*resource
,
983 const struct pipe_box
*box
,
986 unsigned layer_stride
)