2 * Copyright 2011 Adam Rak <adam.rak@streamnovation.com>
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
24 * Adam Rak <adam.rak@streamnovation.com>
29 #include "pipe/p_defines.h"
30 #include "pipe/p_state.h"
31 #include "pipe/p_context.h"
32 #include "util/u_blitter.h"
33 #include "util/u_double_list.h"
34 #include "util/u_transfer.h"
35 #include "util/u_surface.h"
36 #include "util/u_pack_color.h"
37 #include "util/u_memory.h"
38 #include "util/u_inlines.h"
39 #include "util/u_framebuffer.h"
40 #include "pipebuffer/pb_buffer.h"
42 #include "evergreend.h"
43 #include "r600_resource.h"
44 #include "r600_shader.h"
45 #include "r600_pipe.h"
46 #include "r600_formats.h"
47 #include "evergreen_compute.h"
48 #include "r600_hw_context_priv.h"
49 #include "evergreen_compute_internal.h"
50 #include "compute_memory_pool.h"
52 #include "llvm_wrapper.h"
56 RAT0 is for global binding write
57 VTX1 is for global binding read
59 for wrting images RAT1...
60 for reading images TEX2...
63 TEX2... consumes the same fetch resources, that VTX2... would consume
65 CONST0 and VTX0 is for parameters
66 CONST0 is binding smaller input parameter buffer, and for constant indexing,
68 VTX0 is for indirect/non-constant indexing, or if the input is bigger than
69 the constant cache can handle
71 RAT-s are limited to 12, so we can only bind at most 11 texture for writing
72 because we reserve RAT0 for global bindings. With byteaddressing enabled,
73 we should reserve another one too.=> 10 image binding for writing max.
76 CL_DEVICE_MAX_READ_IMAGE_ARGS: 128
77 CL_DEVICE_MAX_WRITE_IMAGE_ARGS: 8
79 so 10 for writing is enough. 176 is the max for reading according to the docs
81 writable images should be listed first < 10, so their id corresponds to RAT(id+1)
82 writable images will consume TEX slots, VTX slots too because of linear indexing
86 static void evergreen_cs_set_vertex_buffer(
87 struct r600_context
* rctx
,
90 struct pipe_resource
* buffer
)
92 struct r600_vertexbuf_state
*state
= &rctx
->cs_vertex_buffer_state
;
93 struct pipe_vertex_buffer
*vb
= &state
->vb
[vb_index
];
95 vb
->buffer_offset
= offset
;
97 vb
->user_buffer
= NULL
;
99 /* The vertex instructions in the compute shaders use the texture cache,
100 * so we need to invalidate it. */
101 rctx
->flags
|= R600_CONTEXT_TEX_FLUSH
;
102 state
->enabled_mask
|= 1 << vb_index
;
103 state
->dirty_mask
|= 1 << vb_index
;
104 state
->atom
.dirty
= true;
107 static const struct u_resource_vtbl r600_global_buffer_vtbl
=
109 u_default_resource_get_handle
, /* get_handle */
110 r600_compute_global_buffer_destroy
, /* resource_destroy */
111 r600_compute_global_transfer_map
, /* transfer_map */
112 r600_compute_global_transfer_flush_region
,/* transfer_flush_region */
113 r600_compute_global_transfer_unmap
, /* transfer_unmap */
114 r600_compute_global_transfer_inline_write
/* transfer_inline_write */
118 void *evergreen_create_compute_state(
119 struct pipe_context
*ctx_
,
120 const const struct pipe_compute_state
*cso
)
122 struct r600_context
*ctx
= (struct r600_context
*)ctx_
;
123 struct r600_pipe_compute
*shader
= CALLOC_STRUCT(r600_pipe_compute
);
126 const struct pipe_llvm_program_header
* header
;
127 const unsigned char * code
;
130 COMPUTE_DBG("*** evergreen_create_compute_state\n");
133 code
= cso
->prog
+ sizeof(struct pipe_llvm_program_header
);
136 shader
->ctx
= (struct r600_context
*)ctx
;
137 shader
->resources
= (struct evergreen_compute_resource
*)
138 CALLOC(sizeof(struct evergreen_compute_resource
),
139 get_compute_resource_num());
140 shader
->local_size
= cso
->req_local_mem
; ///TODO: assert it
141 shader
->private_size
= cso
->req_private_mem
;
142 shader
->input_size
= cso
->req_input_mem
;
145 shader
->num_kernels
= llvm_get_num_kernels(code
, header
->num_bytes
);
146 shader
->kernels
= CALLOC(sizeof(struct r600_kernel
), shader
->num_kernels
);
148 for (i
= 0; i
< shader
->num_kernels
; i
++) {
149 struct r600_kernel
*kernel
= &shader
->kernels
[i
];
150 kernel
->llvm_module
= llvm_get_kernel_module(i
, code
,
157 void evergreen_delete_compute_state(struct pipe_context
*ctx
, void* state
)
159 struct r600_pipe_compute
*shader
= (struct r600_pipe_compute
*)state
;
161 free(shader
->resources
);
165 static void evergreen_bind_compute_state(struct pipe_context
*ctx_
, void *state
)
167 struct r600_context
*ctx
= (struct r600_context
*)ctx_
;
169 COMPUTE_DBG("*** evergreen_bind_compute_state\n");
171 ctx
->cs_shader_state
.shader
= (struct r600_pipe_compute
*)state
;
174 /* The kernel parameters are stored a vtx buffer (ID=0), besides the explicit
175 * kernel parameters there are inplicit parameters that need to be stored
176 * in the vertex buffer as well. Here is how these parameters are organized in
179 * DWORDS 0-2: Number of work groups in each dimension (x,y,z)
180 * DWORDS 3-5: Number of global work items in each dimension (x,y,z)
181 * DWORDS 6-8: Number of work items within each work group in each dimension
183 * DWORDS 9+ : Kernel parameters
185 void evergreen_compute_upload_input(
186 struct pipe_context
*ctx_
,
187 const uint
*block_layout
,
188 const uint
*grid_layout
,
191 struct r600_context
*ctx
= (struct r600_context
*)ctx_
;
192 struct r600_pipe_compute
*shader
= ctx
->cs_shader_state
.shader
;
194 unsigned kernel_parameters_offset_bytes
= 36;
195 uint32_t * num_work_groups_start
;
196 uint32_t * global_size_start
;
197 uint32_t * local_size_start
;
198 uint32_t * kernel_parameters_start
;
200 if (shader
->input_size
== 0) {
204 if (!shader
->kernel_param
) {
205 unsigned buffer_size
= shader
->input_size
;
207 /* Add space for the grid dimensions */
208 buffer_size
+= kernel_parameters_offset_bytes
* sizeof(uint
);
209 shader
->kernel_param
= r600_compute_buffer_alloc_vram(
210 ctx
->screen
, buffer_size
);
213 num_work_groups_start
= ctx
->ws
->buffer_map(
214 shader
->kernel_param
->cs_buf
, ctx
->cs
, PIPE_TRANSFER_WRITE
);
215 global_size_start
= num_work_groups_start
+ (3 * (sizeof(uint
) /4));
216 local_size_start
= global_size_start
+ (3 * (sizeof(uint
)) / 4);
217 kernel_parameters_start
= local_size_start
+ (3 * (sizeof(uint
)) / 4);
219 /* Copy the work group size */
220 memcpy(num_work_groups_start
, grid_layout
, 3 * sizeof(uint
));
222 /* Copy the global size */
223 for (i
= 0; i
< 3; i
++) {
224 global_size_start
[i
] = grid_layout
[i
] * block_layout
[i
];
227 /* Copy the local dimensions */
228 memcpy(local_size_start
, block_layout
, 3 * sizeof(uint
));
230 /* Copy the kernel inputs */
231 memcpy(kernel_parameters_start
, input
, shader
->input_size
);
233 for (i
= 0; i
< (kernel_parameters_offset_bytes
/ 4) +
234 (shader
->input_size
/ 4); i
++) {
235 COMPUTE_DBG("input %i : %i\n", i
,
236 ((unsigned*)num_work_groups_start
)[i
]);
239 ctx
->ws
->buffer_unmap(shader
->kernel_param
->cs_buf
);
241 ///ID=0 is reserved for the parameters
242 evergreen_cs_set_vertex_buffer(ctx
, 0, 0,
243 (struct pipe_resource
*)shader
->kernel_param
);
244 ///ID=0 is reserved for parameters
245 evergreen_set_const_cache(shader
, 0, shader
->kernel_param
,
246 shader
->input_size
, 0);
249 static void evergreen_emit_direct_dispatch(
250 struct r600_context
*rctx
,
251 const uint
*block_layout
, const uint
*grid_layout
)
254 struct radeon_winsys_cs
*cs
= rctx
->cs
;
256 unsigned num_pipes
= rctx
->screen
->info
.r600_max_pipes
;
257 unsigned wave_divisor
= (16 * num_pipes
);
260 /* XXX: Enable lds and get size from cs_shader_state */
261 unsigned lds_size
= 0;
263 /* Calculate group_size/grid_size */
264 for (i
= 0; i
< 3; i
++) {
265 group_size
*= block_layout
[i
];
268 for (i
= 0; i
< 3; i
++) {
269 grid_size
*= grid_layout
[i
];
272 /* num_waves = ceil((tg_size.x * tg_size.y, tg_size.z) / (16 * num_pipes)) */
273 num_waves
= (block_layout
[0] * block_layout
[1] * block_layout
[2] +
274 wave_divisor
- 1) / wave_divisor
;
276 COMPUTE_DBG("Using %u pipes, there are %u wavefronts per thread block\n",
277 num_pipes
, num_waves
);
279 /* XXX: Partition the LDS between PS/CS. By default half (4096 dwords
280 * on Evergreen) oes to Pixel Shaders and half goes to Compute Shaders.
281 * We may need to allocat the entire LDS space for Compute Shaders.
283 * EG: R_008E2C_SQ_LDS_RESOURCE_MGMT := S_008E2C_NUM_LS_LDS(lds_dwords)
284 * CM: CM_R_0286FC_SPI_LDS_MGMT := S_0286FC_NUM_LS_LDS(lds_dwords)
287 r600_write_config_reg(cs
, R_008970_VGT_NUM_INDICES
, group_size
);
289 r600_write_config_reg_seq(cs
, R_00899C_VGT_COMPUTE_START_X
, 3);
290 r600_write_value(cs
, 0); /* R_00899C_VGT_COMPUTE_START_X */
291 r600_write_value(cs
, 0); /* R_0089A0_VGT_COMPUTE_START_Y */
292 r600_write_value(cs
, 0); /* R_0089A4_VGT_COMPUTE_START_Z */
294 r600_write_config_reg(cs
, R_0089AC_VGT_COMPUTE_THREAD_GROUP_SIZE
,
297 r600_write_compute_context_reg_seq(cs
, R_0286EC_SPI_COMPUTE_NUM_THREAD_X
, 3);
298 r600_write_value(cs
, block_layout
[0]); /* R_0286EC_SPI_COMPUTE_NUM_THREAD_X */
299 r600_write_value(cs
, block_layout
[1]); /* R_0286F0_SPI_COMPUTE_NUM_THREAD_Y */
300 r600_write_value(cs
, block_layout
[2]); /* R_0286F4_SPI_COMPUTE_NUM_THREAD_Z */
302 r600_write_compute_context_reg(cs
, CM_R_0288E8_SQ_LDS_ALLOC
,
303 lds_size
| (num_waves
<< 14));
305 /* Dispatch packet */
306 r600_write_value(cs
, PKT3C(PKT3_DISPATCH_DIRECT
, 3, 0));
307 r600_write_value(cs
, grid_layout
[0]);
308 r600_write_value(cs
, grid_layout
[1]);
309 r600_write_value(cs
, grid_layout
[2]);
310 /* VGT_DISPATCH_INITIATOR = COMPUTE_SHADER_EN */
311 r600_write_value(cs
, 1);
314 static void compute_emit_cs(struct r600_context
*ctx
, const uint
*block_layout
,
315 const uint
*grid_layout
)
317 struct radeon_winsys_cs
*cs
= ctx
->cs
;
318 unsigned flush_flags
= 0;
321 struct r600_resource
*onebo
= NULL
;
322 struct evergreen_compute_resource
*resources
=
323 ctx
->cs_shader_state
.shader
->resources
;
325 /* Initialize all the compute-related registers.
327 * See evergreen_init_atom_start_compute_cs() in this file for the list
328 * of registers initialized by the start_compute_cs_cmd atom.
330 r600_emit_command_buffer(ctx
->cs
, &ctx
->start_compute_cs_cmd
);
332 ctx
->flags
|= R600_CONTEXT_CB_FLUSH
;
333 r600_flush_emit(ctx
);
335 /* Emit colorbuffers. */
336 for (i
= 0; i
< ctx
->framebuffer
.state
.nr_cbufs
; i
++) {
337 struct r600_surface
*cb
= (struct r600_surface
*)ctx
->framebuffer
.state
.cbufs
[i
];
338 unsigned reloc
= r600_context_bo_reloc(ctx
, (struct r600_resource
*)cb
->base
.texture
,
339 RADEON_USAGE_READWRITE
);
341 r600_write_compute_context_reg_seq(cs
, R_028C60_CB_COLOR0_BASE
+ i
* 0x3C, 7);
342 r600_write_value(cs
, cb
->cb_color_base
); /* R_028C60_CB_COLOR0_BASE */
343 r600_write_value(cs
, cb
->cb_color_pitch
); /* R_028C64_CB_COLOR0_PITCH */
344 r600_write_value(cs
, cb
->cb_color_slice
); /* R_028C68_CB_COLOR0_SLICE */
345 r600_write_value(cs
, cb
->cb_color_view
); /* R_028C6C_CB_COLOR0_VIEW */
346 r600_write_value(cs
, cb
->cb_color_info
); /* R_028C70_CB_COLOR0_INFO */
347 r600_write_value(cs
, cb
->cb_color_attrib
); /* R_028C74_CB_COLOR0_ATTRIB */
348 r600_write_value(cs
, cb
->cb_color_dim
); /* R_028C78_CB_COLOR0_DIM */
350 r600_write_value(cs
, PKT3(PKT3_NOP
, 0, 0)); /* R_028C60_CB_COLOR0_BASE */
351 r600_write_value(cs
, reloc
);
353 if (!ctx
->keep_tiling_flags
) {
354 r600_write_value(cs
, PKT3(PKT3_NOP
, 0, 0)); /* R_028C70_CB_COLOR0_INFO */
355 r600_write_value(cs
, reloc
);
358 r600_write_value(cs
, PKT3(PKT3_NOP
, 0, 0)); /* R_028C74_CB_COLOR0_ATTRIB */
359 r600_write_value(cs
, reloc
);
362 /* Set CB_TARGET_MASK XXX: Use cb_misc_state */
363 r600_write_compute_context_reg(cs
, R_028238_CB_TARGET_MASK
,
364 ctx
->compute_cb_target_mask
);
367 /* Emit vertex buffer state */
368 ctx
->cs_vertex_buffer_state
.atom
.num_dw
= 12 * util_bitcount(ctx
->cs_vertex_buffer_state
.dirty_mask
);
369 r600_emit_atom(ctx
, &ctx
->cs_vertex_buffer_state
.atom
);
371 /* Emit compute shader state */
372 r600_emit_atom(ctx
, &ctx
->cs_shader_state
.atom
);
374 for (i
= 0; i
< get_compute_resource_num(); i
++) {
375 if (resources
[i
].enabled
) {
377 COMPUTE_DBG("resnum: %i, cdw: %i\n", i
, cs
->cdw
);
379 for (j
= 0; j
< resources
[i
].cs_end
; j
++) {
380 if (resources
[i
].do_reloc
[j
]) {
381 assert(resources
[i
].bo
);
382 evergreen_emit_ctx_reloc(ctx
,
387 cs
->buf
[cs
->cdw
++] = resources
[i
].cs
[j
];
390 if (resources
[i
].bo
) {
391 onebo
= resources
[i
].bo
;
392 evergreen_emit_ctx_reloc(ctx
,
396 ///special case for textures
397 if (resources
[i
].do_reloc
398 [resources
[i
].cs_end
] == 2) {
399 evergreen_emit_ctx_reloc(ctx
,
407 /* Emit dispatch state and dispatch packet */
408 evergreen_emit_direct_dispatch(ctx
, block_layout
, grid_layout
);
410 /* XXX evergreen_flush_emit() hardcodes the CP_COHER_SIZE to 0xffffffff
412 ctx
->flags
|= R600_CONTEXT_CB_FLUSH
;
413 r600_flush_emit(ctx
);
416 COMPUTE_DBG("cdw: %i\n", cs
->cdw
);
417 for (i
= 0; i
< cs
->cdw
; i
++) {
418 COMPUTE_DBG("%4i : 0x%08X\n", i
, ctx
->cs
->buf
[i
]);
422 flush_flags
= RADEON_FLUSH_ASYNC
| RADEON_FLUSH_COMPUTE
;
423 if (ctx
->keep_tiling_flags
) {
424 flush_flags
|= RADEON_FLUSH_KEEP_TILING_FLAGS
;
427 ctx
->ws
->cs_flush(ctx
->cs
, flush_flags
);
429 ctx
->pm4_dirty_cdwords
= 0;
432 COMPUTE_DBG("shader started\n");
434 ctx
->ws
->buffer_wait(onebo
->buf
, 0);
436 COMPUTE_DBG("...\n");
438 ctx
->streamout_start
= TRUE
;
439 ctx
->streamout_append_bitmask
= ~0;
445 * Emit function for r600_cs_shader_state atom
447 void evergreen_emit_cs_shader(
448 struct r600_context
*rctx
,
449 struct r600_atom
*atom
)
451 struct r600_cs_shader_state
*state
=
452 (struct r600_cs_shader_state
*)atom
;
453 struct r600_pipe_compute
*shader
= state
->shader
;
454 struct r600_kernel
*kernel
= &shader
->kernels
[state
->kernel_index
];
455 struct radeon_winsys_cs
*cs
= rctx
->cs
;
458 va
= r600_resource_va(&rctx
->screen
->screen
, &kernel
->code_bo
->b
.b
);
460 r600_write_compute_context_reg_seq(cs
, R_0288D0_SQ_PGM_START_LS
, 3);
461 r600_write_value(cs
, va
>> 8); /* R_0288D0_SQ_PGM_START_LS */
462 r600_write_value(cs
, /* R_0288D4_SQ_PGM_RESOURCES_LS */
463 S_0288D4_NUM_GPRS(kernel
->bc
.ngpr
)
464 | S_0288D4_STACK_SIZE(kernel
->bc
.nstack
));
465 r600_write_value(cs
, 0); /* R_0288D8_SQ_PGM_RESOURCES_LS_2 */
467 r600_write_value(cs
, PKT3C(PKT3_NOP
, 0, 0));
468 r600_write_value(cs
, r600_context_bo_reloc(rctx
, kernel
->code_bo
,
471 rctx
->flags
|= R600_CONTEXT_SHADERCONST_FLUSH
;
474 static void evergreen_launch_grid(
475 struct pipe_context
*ctx_
,
476 const uint
*block_layout
, const uint
*grid_layout
,
477 uint32_t pc
, const void *input
)
479 struct r600_context
*ctx
= (struct r600_context
*)ctx_
;
482 COMPUTE_DBG("*** evergreen_launch_grid: pc = %u\n", pc
);
484 struct r600_pipe_compute
*shader
= ctx
->cs_shader_state
.shader
;
485 if (!shader
->kernels
[pc
].code_bo
) {
487 struct r600_kernel
*kernel
= &shader
->kernels
[pc
];
488 r600_compute_shader_create(ctx_
, kernel
->llvm_module
, &kernel
->bc
);
489 kernel
->code_bo
= r600_compute_buffer_alloc_vram(ctx
->screen
,
491 p
= ctx
->ws
->buffer_map(kernel
->code_bo
->cs_buf
, ctx
->cs
,
492 PIPE_TRANSFER_WRITE
);
493 memcpy(p
, kernel
->bc
.bytecode
, kernel
->bc
.ndw
* 4);
494 ctx
->ws
->buffer_unmap(kernel
->code_bo
->cs_buf
);
498 ctx
->cs_shader_state
.kernel_index
= pc
;
499 evergreen_compute_upload_input(ctx_
, block_layout
, grid_layout
, input
);
500 compute_emit_cs(ctx
, block_layout
, grid_layout
);
503 static void evergreen_set_compute_resources(struct pipe_context
* ctx_
,
504 unsigned start
, unsigned count
,
505 struct pipe_surface
** surfaces
)
507 struct r600_context
*ctx
= (struct r600_context
*)ctx_
;
508 struct r600_surface
**resources
= (struct r600_surface
**)surfaces
;
510 COMPUTE_DBG("*** evergreen_set_compute_resources: start = %u count = %u\n",
513 for (int i
= 0; i
< count
; i
++) {
514 /* The First two vertex buffers are reserved for parameters and
516 unsigned vtx_id
= 2 + i
;
518 struct r600_resource_global
*buffer
=
519 (struct r600_resource_global
*)
520 resources
[i
]->base
.texture
;
521 if (resources
[i
]->base
.writable
) {
524 evergreen_set_rat(ctx
->cs_shader_state
.shader
, i
+1,
525 (struct r600_resource
*)resources
[i
]->base
.texture
,
526 buffer
->chunk
->start_in_dw
*4,
527 resources
[i
]->base
.texture
->width0
);
530 evergreen_cs_set_vertex_buffer(ctx
, vtx_id
,
531 buffer
->chunk
->start_in_dw
* 4,
532 resources
[i
]->base
.texture
);
537 static void evergreen_set_cs_sampler_view(struct pipe_context
*ctx_
,
538 unsigned start_slot
, unsigned count
,
539 struct pipe_sampler_view
**views
)
541 struct r600_context
*ctx
= (struct r600_context
*)ctx_
;
542 struct r600_pipe_sampler_view
**resource
=
543 (struct r600_pipe_sampler_view
**)views
;
545 for (int i
= 0; i
< count
; i
++) {
548 ///FETCH0 = VTX0 (param buffer),
549 //FETCH1 = VTX1 (global buffer pool), FETCH2... = TEX
550 evergreen_set_tex_resource(ctx
->cs_shader_state
.shader
, resource
[i
], i
+2);
555 static void evergreen_bind_compute_sampler_states(
556 struct pipe_context
*ctx_
,
558 unsigned num_samplers
,
561 struct r600_context
*ctx
= (struct r600_context
*)ctx_
;
562 struct compute_sampler_state
** samplers
=
563 (struct compute_sampler_state
**)samplers_
;
565 for (int i
= 0; i
< num_samplers
; i
++) {
567 evergreen_set_sampler_resource(
568 ctx
->cs_shader_state
.shader
, samplers
[i
], i
);
573 static void evergreen_set_global_binding(
574 struct pipe_context
*ctx_
, unsigned first
, unsigned n
,
575 struct pipe_resource
**resources
,
578 struct r600_context
*ctx
= (struct r600_context
*)ctx_
;
579 struct compute_memory_pool
*pool
= ctx
->screen
->global_pool
;
580 struct r600_resource_global
**buffers
=
581 (struct r600_resource_global
**)resources
;
583 COMPUTE_DBG("*** evergreen_set_global_binding first = %u n = %u\n",
591 compute_memory_finalize_pending(pool
, ctx_
);
593 for (int i
= 0; i
< n
; i
++)
595 assert(resources
[i
]->target
== PIPE_BUFFER
);
596 assert(resources
[i
]->bind
& PIPE_BIND_GLOBAL
);
598 *(handles
[i
]) = buffers
[i
]->chunk
->start_in_dw
* 4;
601 evergreen_set_rat(ctx
->cs_shader_state
.shader
, 0, pool
->bo
, 0, pool
->size_in_dw
* 4);
602 evergreen_cs_set_vertex_buffer(ctx
, 1, 0,
603 (struct pipe_resource
*)pool
->bo
);
607 * This function initializes all the compute specific registers that need to
608 * be initialized for each compute command stream. Registers that are common
609 * to both compute and 3D will be initialized at the beginning of each compute
610 * command stream by the start_cs_cmd atom. However, since the SET_CONTEXT_REG
611 * packet requires that the shader type bit be set, we must initialize all
612 * context registers needed for compute in this function. The registers
613 * intialized by the start_cs_cmd atom can be found in evereen_state.c in the
614 * functions evergreen_init_atom_start_cs or cayman_init_atom_start_cs depending
617 void evergreen_init_atom_start_compute_cs(struct r600_context
*ctx
)
619 struct r600_command_buffer
*cb
= &ctx
->start_compute_cs_cmd
;
621 int num_stack_entries
;
623 /* since all required registers are initialised in the
624 * start_compute_cs_cmd atom, we can EMIT_EARLY here.
626 r600_init_command_buffer(cb
, 256);
627 cb
->pkt_flags
= RADEON_CP_PACKET3_COMPUTE_MODE
;
629 /* This must be first. */
630 r600_store_value(cb
, PKT3(PKT3_CONTEXT_CONTROL
, 1, 0));
631 r600_store_value(cb
, 0x80000000);
632 r600_store_value(cb
, 0x80000000);
634 /* We're setting config registers here. */
635 r600_store_value(cb
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
636 r600_store_value(cb
, EVENT_TYPE(EVENT_TYPE_CS_PARTIAL_FLUSH
) | EVENT_INDEX(4));
638 switch (ctx
->family
) {
642 num_stack_entries
= 256;
646 num_stack_entries
= 256;
650 num_stack_entries
= 512;
655 num_stack_entries
= 512;
659 num_stack_entries
= 256;
663 num_stack_entries
= 256;
667 num_stack_entries
= 512;
671 num_stack_entries
= 512;
675 num_stack_entries
= 256;
679 num_stack_entries
= 256;
683 /* Config Registers */
684 if (ctx
->chip_class
< CAYMAN
)
685 evergreen_init_common_regs(cb
, ctx
->chip_class
, ctx
->family
,
686 ctx
->screen
->info
.drm_minor
);
688 cayman_init_common_regs(cb
, ctx
->chip_class
, ctx
->family
,
689 ctx
->screen
->info
.drm_minor
);
691 /* The primitive type always needs to be POINTLIST for compute. */
692 r600_store_config_reg(cb
, R_008958_VGT_PRIMITIVE_TYPE
,
693 V_008958_DI_PT_POINTLIST
);
695 if (ctx
->chip_class
< CAYMAN
) {
697 /* These registers control which simds can be used by each stage.
698 * The default for these registers is 0xffffffff, which means
699 * all simds are available for each stage. It's possible we may
700 * want to play around with these in the future, but for now
701 * the default value is fine.
703 * R_008E20_SQ_STATIC_THREAD_MGMT1
704 * R_008E24_SQ_STATIC_THREAD_MGMT2
705 * R_008E28_SQ_STATIC_THREAD_MGMT3
708 /* XXX: We may need to adjust the thread and stack resouce
709 * values for 3D/compute interop */
711 r600_store_config_reg_seq(cb
, R_008C18_SQ_THREAD_RESOURCE_MGMT_1
, 5);
713 /* R_008C18_SQ_THREAD_RESOURCE_MGMT_1
714 * Set the number of threads used by the PS/VS/GS/ES stage to
717 r600_store_value(cb
, 0);
719 /* R_008C1C_SQ_THREAD_RESOURCE_MGMT_2
720 * Set the number of threads used by the CS (aka LS) stage to
721 * the maximum number of threads and set the number of threads
722 * for the HS stage to 0. */
723 r600_store_value(cb
, S_008C1C_NUM_LS_THREADS(num_threads
));
725 /* R_008C20_SQ_STACK_RESOURCE_MGMT_1
726 * Set the Control Flow stack entries to 0 for PS/VS stages */
727 r600_store_value(cb
, 0);
729 /* R_008C24_SQ_STACK_RESOURCE_MGMT_2
730 * Set the Control Flow stack entries to 0 for GS/ES stages */
731 r600_store_value(cb
, 0);
733 /* R_008C28_SQ_STACK_RESOURCE_MGMT_3
734 * Set the Contol Flow stack entries to 0 for the HS stage, and
735 * set it to the maximum value for the CS (aka LS) stage. */
737 S_008C28_NUM_LS_STACK_ENTRIES(num_stack_entries
));
740 /* Context Registers */
742 if (ctx
->chip_class
< CAYMAN
) {
743 /* workaround for hw issues with dyn gpr - must set all limits
744 * to 240 instead of 0, 0x1e == 240 / 8
746 r600_store_context_reg(cb
, R_028838_SQ_DYN_GPR_RESOURCE_LIMIT_1
,
747 S_028838_PS_GPRS(0x1e) |
748 S_028838_VS_GPRS(0x1e) |
749 S_028838_GS_GPRS(0x1e) |
750 S_028838_ES_GPRS(0x1e) |
751 S_028838_HS_GPRS(0x1e) |
752 S_028838_LS_GPRS(0x1e));
755 /* XXX: Investigate setting bit 15, which is FAST_COMPUTE_MODE */
756 r600_store_context_reg(cb
, R_028A40_VGT_GS_MODE
,
757 S_028A40_COMPUTE_MODE(1) | S_028A40_PARTIAL_THD_AT_EOI(1));
759 r600_store_context_reg(cb
, R_028B54_VGT_SHADER_STAGES_EN
, 2/*CS_ON*/);
761 r600_store_context_reg(cb
, R_0286E8_SPI_COMPUTE_INPUT_CNTL
,
762 S_0286E8_TID_IN_GROUP_ENA
764 | S_0286E8_DISABLE_INDEX_PACK
)
767 /* The LOOP_CONST registers are an optimizations for loops that allows
768 * you to store the initial counter, increment value, and maximum
769 * counter value in a register so that hardware can calculate the
770 * correct number of iterations for the loop, so that you don't need
771 * to have the loop counter in your shader code. We don't currently use
772 * this optimization, so we must keep track of the counter in the
773 * shader and use a break instruction to exit loops. However, the
774 * hardware will still uses this register to determine when to exit a
775 * loop, so we need to initialize the counter to 0, set the increment
776 * value to 1 and the maximum counter value to the 4095 (0xfff) which
777 * is the maximum value allowed. This gives us a maximum of 4096
778 * iterations for our loops, but hopefully our break instruction will
779 * execute before some time before the 4096th iteration.
781 eg_store_loop_const(cb
, R_03A200_SQ_LOOP_CONST_0
+ (160 * 4), 0x1000FFF);
784 void evergreen_init_compute_state_functions(struct r600_context
*ctx
)
786 ctx
->context
.create_compute_state
= evergreen_create_compute_state
;
787 ctx
->context
.delete_compute_state
= evergreen_delete_compute_state
;
788 ctx
->context
.bind_compute_state
= evergreen_bind_compute_state
;
789 // ctx->context.create_sampler_view = evergreen_compute_create_sampler_view;
790 ctx
->context
.set_compute_resources
= evergreen_set_compute_resources
;
791 ctx
->context
.set_compute_sampler_views
= evergreen_set_cs_sampler_view
;
792 ctx
->context
.bind_compute_sampler_states
= evergreen_bind_compute_sampler_states
;
793 ctx
->context
.set_global_binding
= evergreen_set_global_binding
;
794 ctx
->context
.launch_grid
= evergreen_launch_grid
;
796 /* We always use at least two vertex buffers for compute, one for
797 * parameters and one for global memory */
798 ctx
->cs_vertex_buffer_state
.enabled_mask
=
799 ctx
->cs_vertex_buffer_state
.dirty_mask
= 1 | 2;
803 struct pipe_resource
*r600_compute_global_buffer_create(
804 struct pipe_screen
*screen
,
805 const struct pipe_resource
*templ
)
807 assert(templ
->target
== PIPE_BUFFER
);
808 assert(templ
->bind
& PIPE_BIND_GLOBAL
);
809 assert(templ
->array_size
== 1 || templ
->array_size
== 0);
810 assert(templ
->depth0
== 1 || templ
->depth0
== 0);
811 assert(templ
->height0
== 1 || templ
->height0
== 0);
813 struct r600_resource_global
* result
= (struct r600_resource_global
*)
814 CALLOC(sizeof(struct r600_resource_global
), 1);
815 struct r600_screen
* rscreen
= (struct r600_screen
*)screen
;
817 COMPUTE_DBG("*** r600_compute_global_buffer_create\n");
818 COMPUTE_DBG("width = %u array_size = %u\n", templ
->width0
,
821 result
->base
.b
.vtbl
= &r600_global_buffer_vtbl
;
822 result
->base
.b
.b
.screen
= screen
;
823 result
->base
.b
.b
= *templ
;
824 pipe_reference_init(&result
->base
.b
.b
.reference
, 1);
826 int size_in_dw
= (templ
->width0
+3) / 4;
828 result
->chunk
= compute_memory_alloc(rscreen
->global_pool
, size_in_dw
);
830 if (result
->chunk
== NULL
)
836 return &result
->base
.b
.b
;
839 void r600_compute_global_buffer_destroy(
840 struct pipe_screen
*screen
,
841 struct pipe_resource
*res
)
843 assert(res
->target
== PIPE_BUFFER
);
844 assert(res
->bind
& PIPE_BIND_GLOBAL
);
846 struct r600_resource_global
* buffer
= (struct r600_resource_global
*)res
;
847 struct r600_screen
* rscreen
= (struct r600_screen
*)screen
;
849 compute_memory_free(rscreen
->global_pool
, buffer
->chunk
->id
);
851 buffer
->chunk
= NULL
;
855 void *r600_compute_global_transfer_map(
856 struct pipe_context
*ctx_
,
857 struct pipe_resource
*resource
,
860 const struct pipe_box
*box
,
861 struct pipe_transfer
**ptransfer
)
863 struct r600_context
*rctx
= (struct r600_context
*)ctx_
;
864 struct compute_memory_pool
*pool
= rctx
->screen
->global_pool
;
865 struct pipe_transfer
*transfer
= util_slab_alloc(&rctx
->pool_transfers
);
866 struct r600_resource_global
* buffer
=
867 (struct r600_resource_global
*)resource
;
870 compute_memory_finalize_pending(pool
, ctx_
);
872 assert(resource
->target
== PIPE_BUFFER
);
874 COMPUTE_DBG("* r600_compute_global_get_transfer()\n"
875 "level = %u, usage = %u, box(x = %u, y = %u, z = %u "
876 "width = %u, height = %u, depth = %u)\n", level
, usage
,
877 box
->x
, box
->y
, box
->z
, box
->width
, box
->height
,
880 transfer
->resource
= resource
;
881 transfer
->level
= level
;
882 transfer
->usage
= usage
;
883 transfer
->box
= *box
;
884 transfer
->stride
= 0;
885 transfer
->layer_stride
= 0;
887 assert(transfer
->resource
->target
== PIPE_BUFFER
);
888 assert(transfer
->resource
->bind
& PIPE_BIND_GLOBAL
);
889 assert(transfer
->box
.x
>= 0);
890 assert(transfer
->box
.y
== 0);
891 assert(transfer
->box
.z
== 0);
893 ///TODO: do it better, mapping is not possible if the pool is too big
895 COMPUTE_DBG("* r600_compute_global_transfer_map()\n");
897 if (!(map
= rctx
->ws
->buffer_map(buffer
->chunk
->pool
->bo
->cs_buf
,
898 rctx
->cs
, transfer
->usage
))) {
899 util_slab_free(&rctx
->pool_transfers
, transfer
);
903 *ptransfer
= transfer
;
905 COMPUTE_DBG("Buffer: %p + %u (buffer offset in global memory) "
906 "+ %u (box.x)\n", map
, buffer
->chunk
->start_in_dw
, transfer
->box
.x
);
907 return ((char*)(map
+ buffer
->chunk
->start_in_dw
)) + transfer
->box
.x
;
910 void r600_compute_global_transfer_unmap(
911 struct pipe_context
*ctx_
,
912 struct pipe_transfer
* transfer
)
914 assert(transfer
->resource
->target
== PIPE_BUFFER
);
915 assert(transfer
->resource
->bind
& PIPE_BIND_GLOBAL
);
917 struct r600_context
*ctx
= (struct r600_context
*)ctx_
;
918 struct r600_resource_global
* buffer
=
919 (struct r600_resource_global
*)transfer
->resource
;
921 COMPUTE_DBG("* r600_compute_global_transfer_unmap()\n");
923 ctx
->ws
->buffer_unmap(buffer
->chunk
->pool
->bo
->cs_buf
);
924 util_slab_free(&ctx
->pool_transfers
, transfer
);
927 void r600_compute_global_transfer_flush_region(
928 struct pipe_context
*ctx_
,
929 struct pipe_transfer
*transfer
,
930 const struct pipe_box
*box
)
935 void r600_compute_global_transfer_inline_write(
936 struct pipe_context
*pipe
,
937 struct pipe_resource
*resource
,
940 const struct pipe_box
*box
,
943 unsigned layer_stride
)