2 * Copyright 2011 Adam Rak <adam.rak@streamnovation.com>
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
24 * Adam Rak <adam.rak@streamnovation.com>
29 #include "pipe/p_defines.h"
30 #include "pipe/p_state.h"
31 #include "pipe/p_context.h"
32 #include "util/u_blitter.h"
33 #include "util/u_double_list.h"
34 #include "util/u_transfer.h"
35 #include "util/u_surface.h"
36 #include "util/u_pack_color.h"
37 #include "util/u_memory.h"
38 #include "util/u_inlines.h"
39 #include "util/u_framebuffer.h"
40 #include "pipebuffer/pb_buffer.h"
42 #include "evergreend.h"
43 #include "r600_resource.h"
44 #include "r600_shader.h"
45 #include "r600_pipe.h"
46 #include "r600_formats.h"
47 #include "evergreen_compute.h"
48 #include "r600_hw_context_priv.h"
49 #include "evergreen_compute_internal.h"
50 #include "compute_memory_pool.h"
52 #include "llvm_wrapper.h"
56 RAT0 is for global binding write
57 VTX1 is for global binding read
59 for wrting images RAT1...
60 for reading images TEX2...
63 TEX2... consumes the same fetch resources, that VTX2... would consume
65 CONST0 and VTX0 is for parameters
66 CONST0 is binding smaller input parameter buffer, and for constant indexing,
68 VTX0 is for indirect/non-constant indexing, or if the input is bigger than
69 the constant cache can handle
71 RAT-s are limited to 12, so we can only bind at most 11 texture for writing
72 because we reserve RAT0 for global bindings. With byteaddressing enabled,
73 we should reserve another one too.=> 10 image binding for writing max.
76 CL_DEVICE_MAX_READ_IMAGE_ARGS: 128
77 CL_DEVICE_MAX_WRITE_IMAGE_ARGS: 8
79 so 10 for writing is enough. 176 is the max for reading according to the docs
81 writable images should be listed first < 10, so their id corresponds to RAT(id+1)
82 writable images will consume TEX slots, VTX slots too because of linear indexing
86 static void evergreen_cs_set_vertex_buffer(
87 struct r600_context
* rctx
,
90 struct pipe_resource
* buffer
)
92 struct r600_vertexbuf_state
*state
= &rctx
->cs_vertex_buffer_state
;
93 struct pipe_vertex_buffer
*vb
= &state
->vb
[vb_index
];
95 vb
->buffer_offset
= offset
;
97 vb
->user_buffer
= NULL
;
99 /* The vertex instructions in the compute shaders use the texture cache,
100 * so we need to invalidate it. */
101 rctx
->flags
|= R600_CONTEXT_INVAL_READ_CACHES
;
102 state
->enabled_mask
|= 1 << vb_index
;
103 state
->dirty_mask
|= 1 << vb_index
;
104 state
->atom
.dirty
= true;
107 static const struct u_resource_vtbl r600_global_buffer_vtbl
=
109 u_default_resource_get_handle
, /* get_handle */
110 r600_compute_global_buffer_destroy
, /* resource_destroy */
111 r600_compute_global_transfer_map
, /* transfer_map */
112 r600_compute_global_transfer_flush_region
,/* transfer_flush_region */
113 r600_compute_global_transfer_unmap
, /* transfer_unmap */
114 r600_compute_global_transfer_inline_write
/* transfer_inline_write */
118 void *evergreen_create_compute_state(
119 struct pipe_context
*ctx_
,
120 const const struct pipe_compute_state
*cso
)
122 struct r600_context
*ctx
= (struct r600_context
*)ctx_
;
123 struct r600_pipe_compute
*shader
= CALLOC_STRUCT(r600_pipe_compute
);
126 const struct pipe_llvm_program_header
* header
;
127 const unsigned char * code
;
130 COMPUTE_DBG(ctx
->screen
, "*** evergreen_create_compute_state\n");
133 code
= cso
->prog
+ sizeof(struct pipe_llvm_program_header
);
136 shader
->ctx
= (struct r600_context
*)ctx
;
137 shader
->resources
= (struct evergreen_compute_resource
*)
138 CALLOC(sizeof(struct evergreen_compute_resource
),
139 get_compute_resource_num());
140 shader
->local_size
= cso
->req_local_mem
; ///TODO: assert it
141 shader
->private_size
= cso
->req_private_mem
;
142 shader
->input_size
= cso
->req_input_mem
;
145 shader
->num_kernels
= llvm_get_num_kernels(code
, header
->num_bytes
);
146 shader
->kernels
= CALLOC(sizeof(struct r600_kernel
), shader
->num_kernels
);
148 for (i
= 0; i
< shader
->num_kernels
; i
++) {
149 struct r600_kernel
*kernel
= &shader
->kernels
[i
];
150 kernel
->llvm_module
= llvm_get_kernel_module(i
, code
,
157 void evergreen_delete_compute_state(struct pipe_context
*ctx
, void* state
)
159 struct r600_pipe_compute
*shader
= (struct r600_pipe_compute
*)state
;
161 free(shader
->resources
);
165 static void evergreen_bind_compute_state(struct pipe_context
*ctx_
, void *state
)
167 struct r600_context
*ctx
= (struct r600_context
*)ctx_
;
169 COMPUTE_DBG(ctx
->screen
, "*** evergreen_bind_compute_state\n");
171 ctx
->cs_shader_state
.shader
= (struct r600_pipe_compute
*)state
;
174 /* The kernel parameters are stored a vtx buffer (ID=0), besides the explicit
175 * kernel parameters there are inplicit parameters that need to be stored
176 * in the vertex buffer as well. Here is how these parameters are organized in
179 * DWORDS 0-2: Number of work groups in each dimension (x,y,z)
180 * DWORDS 3-5: Number of global work items in each dimension (x,y,z)
181 * DWORDS 6-8: Number of work items within each work group in each dimension
183 * DWORDS 9+ : Kernel parameters
185 void evergreen_compute_upload_input(
186 struct pipe_context
*ctx_
,
187 const uint
*block_layout
,
188 const uint
*grid_layout
,
191 struct r600_context
*ctx
= (struct r600_context
*)ctx_
;
192 struct r600_pipe_compute
*shader
= ctx
->cs_shader_state
.shader
;
194 unsigned kernel_parameters_offset_bytes
= 36;
195 uint32_t * num_work_groups_start
;
196 uint32_t * global_size_start
;
197 uint32_t * local_size_start
;
198 uint32_t * kernel_parameters_start
;
200 if (shader
->input_size
== 0) {
204 if (!shader
->kernel_param
) {
205 unsigned buffer_size
= shader
->input_size
;
207 /* Add space for the grid dimensions */
208 buffer_size
+= kernel_parameters_offset_bytes
* sizeof(uint
);
209 shader
->kernel_param
= r600_compute_buffer_alloc_vram(
210 ctx
->screen
, buffer_size
);
213 num_work_groups_start
= r600_buffer_mmap_sync_with_rings(ctx
, shader
->kernel_param
, PIPE_TRANSFER_WRITE
);
214 global_size_start
= num_work_groups_start
+ (3 * (sizeof(uint
) /4));
215 local_size_start
= global_size_start
+ (3 * (sizeof(uint
)) / 4);
216 kernel_parameters_start
= local_size_start
+ (3 * (sizeof(uint
)) / 4);
218 /* Copy the work group size */
219 memcpy(num_work_groups_start
, grid_layout
, 3 * sizeof(uint
));
221 /* Copy the global size */
222 for (i
= 0; i
< 3; i
++) {
223 global_size_start
[i
] = grid_layout
[i
] * block_layout
[i
];
226 /* Copy the local dimensions */
227 memcpy(local_size_start
, block_layout
, 3 * sizeof(uint
));
229 /* Copy the kernel inputs */
230 memcpy(kernel_parameters_start
, input
, shader
->input_size
);
232 for (i
= 0; i
< (kernel_parameters_offset_bytes
/ 4) +
233 (shader
->input_size
/ 4); i
++) {
234 COMPUTE_DBG(ctx
->screen
, "input %i : %i\n", i
,
235 ((unsigned*)num_work_groups_start
)[i
]);
238 ctx
->ws
->buffer_unmap(shader
->kernel_param
->cs_buf
);
240 ///ID=0 is reserved for the parameters
241 evergreen_cs_set_vertex_buffer(ctx
, 0, 0,
242 (struct pipe_resource
*)shader
->kernel_param
);
243 ///ID=0 is reserved for parameters
244 evergreen_set_const_cache(shader
, 0, shader
->kernel_param
,
245 shader
->input_size
, 0);
248 static void evergreen_emit_direct_dispatch(
249 struct r600_context
*rctx
,
250 const uint
*block_layout
, const uint
*grid_layout
)
253 struct radeon_winsys_cs
*cs
= rctx
->rings
.gfx
.cs
;
255 unsigned num_pipes
= rctx
->screen
->info
.r600_max_pipes
;
256 unsigned wave_divisor
= (16 * num_pipes
);
259 /* XXX: Enable lds and get size from cs_shader_state */
260 unsigned lds_size
= 0;
262 /* Calculate group_size/grid_size */
263 for (i
= 0; i
< 3; i
++) {
264 group_size
*= block_layout
[i
];
267 for (i
= 0; i
< 3; i
++) {
268 grid_size
*= grid_layout
[i
];
271 /* num_waves = ceil((tg_size.x * tg_size.y, tg_size.z) / (16 * num_pipes)) */
272 num_waves
= (block_layout
[0] * block_layout
[1] * block_layout
[2] +
273 wave_divisor
- 1) / wave_divisor
;
275 COMPUTE_DBG(rctx
->screen
, "Using %u pipes, there are %u wavefronts per thread block\n",
276 num_pipes
, num_waves
);
278 /* XXX: Partition the LDS between PS/CS. By default half (4096 dwords
279 * on Evergreen) oes to Pixel Shaders and half goes to Compute Shaders.
280 * We may need to allocat the entire LDS space for Compute Shaders.
282 * EG: R_008E2C_SQ_LDS_RESOURCE_MGMT := S_008E2C_NUM_LS_LDS(lds_dwords)
283 * CM: CM_R_0286FC_SPI_LDS_MGMT := S_0286FC_NUM_LS_LDS(lds_dwords)
286 r600_write_config_reg(cs
, R_008970_VGT_NUM_INDICES
, group_size
);
288 r600_write_config_reg_seq(cs
, R_00899C_VGT_COMPUTE_START_X
, 3);
289 r600_write_value(cs
, 0); /* R_00899C_VGT_COMPUTE_START_X */
290 r600_write_value(cs
, 0); /* R_0089A0_VGT_COMPUTE_START_Y */
291 r600_write_value(cs
, 0); /* R_0089A4_VGT_COMPUTE_START_Z */
293 r600_write_config_reg(cs
, R_0089AC_VGT_COMPUTE_THREAD_GROUP_SIZE
,
296 r600_write_compute_context_reg_seq(cs
, R_0286EC_SPI_COMPUTE_NUM_THREAD_X
, 3);
297 r600_write_value(cs
, block_layout
[0]); /* R_0286EC_SPI_COMPUTE_NUM_THREAD_X */
298 r600_write_value(cs
, block_layout
[1]); /* R_0286F0_SPI_COMPUTE_NUM_THREAD_Y */
299 r600_write_value(cs
, block_layout
[2]); /* R_0286F4_SPI_COMPUTE_NUM_THREAD_Z */
301 r600_write_compute_context_reg(cs
, CM_R_0288E8_SQ_LDS_ALLOC
,
302 lds_size
| (num_waves
<< 14));
304 /* Dispatch packet */
305 r600_write_value(cs
, PKT3C(PKT3_DISPATCH_DIRECT
, 3, 0));
306 r600_write_value(cs
, grid_layout
[0]);
307 r600_write_value(cs
, grid_layout
[1]);
308 r600_write_value(cs
, grid_layout
[2]);
309 /* VGT_DISPATCH_INITIATOR = COMPUTE_SHADER_EN */
310 r600_write_value(cs
, 1);
313 static void compute_emit_cs(struct r600_context
*ctx
, const uint
*block_layout
,
314 const uint
*grid_layout
)
316 struct radeon_winsys_cs
*cs
= ctx
->rings
.gfx
.cs
;
317 unsigned flush_flags
= 0;
319 struct r600_resource
*onebo
= NULL
;
320 struct evergreen_compute_resource
*resources
=
321 ctx
->cs_shader_state
.shader
->resources
;
323 /* make sure that the gfx ring is only one active */
324 if (ctx
->rings
.dma
.cs
) {
325 ctx
->rings
.dma
.flush(ctx
, RADEON_FLUSH_ASYNC
);
328 /* Initialize all the compute-related registers.
330 * See evergreen_init_atom_start_compute_cs() in this file for the list
331 * of registers initialized by the start_compute_cs_cmd atom.
333 r600_emit_command_buffer(cs
, &ctx
->start_compute_cs_cmd
);
335 ctx
->flags
|= R600_CONTEXT_WAIT_3D_IDLE
| R600_CONTEXT_FLUSH_AND_INV
;
336 r600_flush_emit(ctx
);
338 /* Emit colorbuffers. */
339 for (i
= 0; i
< ctx
->framebuffer
.state
.nr_cbufs
; i
++) {
340 struct r600_surface
*cb
= (struct r600_surface
*)ctx
->framebuffer
.state
.cbufs
[i
];
341 unsigned reloc
= r600_context_bo_reloc(ctx
, &ctx
->rings
.gfx
,
342 (struct r600_resource
*)cb
->base
.texture
,
343 RADEON_USAGE_READWRITE
);
345 r600_write_compute_context_reg_seq(cs
, R_028C60_CB_COLOR0_BASE
+ i
* 0x3C, 7);
346 r600_write_value(cs
, cb
->cb_color_base
); /* R_028C60_CB_COLOR0_BASE */
347 r600_write_value(cs
, cb
->cb_color_pitch
); /* R_028C64_CB_COLOR0_PITCH */
348 r600_write_value(cs
, cb
->cb_color_slice
); /* R_028C68_CB_COLOR0_SLICE */
349 r600_write_value(cs
, cb
->cb_color_view
); /* R_028C6C_CB_COLOR0_VIEW */
350 r600_write_value(cs
, cb
->cb_color_info
); /* R_028C70_CB_COLOR0_INFO */
351 r600_write_value(cs
, cb
->cb_color_attrib
); /* R_028C74_CB_COLOR0_ATTRIB */
352 r600_write_value(cs
, cb
->cb_color_dim
); /* R_028C78_CB_COLOR0_DIM */
354 r600_write_value(cs
, PKT3(PKT3_NOP
, 0, 0)); /* R_028C60_CB_COLOR0_BASE */
355 r600_write_value(cs
, reloc
);
357 if (!ctx
->keep_tiling_flags
) {
358 r600_write_value(cs
, PKT3(PKT3_NOP
, 0, 0)); /* R_028C70_CB_COLOR0_INFO */
359 r600_write_value(cs
, reloc
);
362 r600_write_value(cs
, PKT3(PKT3_NOP
, 0, 0)); /* R_028C74_CB_COLOR0_ATTRIB */
363 r600_write_value(cs
, reloc
);
366 /* Set CB_TARGET_MASK XXX: Use cb_misc_state */
367 r600_write_compute_context_reg(cs
, R_028238_CB_TARGET_MASK
,
368 ctx
->compute_cb_target_mask
);
371 /* Emit vertex buffer state */
372 ctx
->cs_vertex_buffer_state
.atom
.num_dw
= 12 * util_bitcount(ctx
->cs_vertex_buffer_state
.dirty_mask
);
373 r600_emit_atom(ctx
, &ctx
->cs_vertex_buffer_state
.atom
);
375 /* Emit compute shader state */
376 r600_emit_atom(ctx
, &ctx
->cs_shader_state
.atom
);
378 for (i
= 0; i
< get_compute_resource_num(); i
++) {
379 if (resources
[i
].enabled
) {
381 COMPUTE_DBG(ctx
->screen
, "resnum: %i, cdw: %i\n", i
, cs
->cdw
);
383 for (j
= 0; j
< resources
[i
].cs_end
; j
++) {
384 if (resources
[i
].do_reloc
[j
]) {
385 assert(resources
[i
].bo
);
386 evergreen_emit_ctx_reloc(ctx
,
391 cs
->buf
[cs
->cdw
++] = resources
[i
].cs
[j
];
394 if (resources
[i
].bo
) {
395 onebo
= resources
[i
].bo
;
396 evergreen_emit_ctx_reloc(ctx
,
400 ///special case for textures
401 if (resources
[i
].do_reloc
402 [resources
[i
].cs_end
] == 2) {
403 evergreen_emit_ctx_reloc(ctx
,
411 /* Emit dispatch state and dispatch packet */
412 evergreen_emit_direct_dispatch(ctx
, block_layout
, grid_layout
);
414 /* XXX evergreen_flush_emit() hardcodes the CP_COHER_SIZE to 0xffffffff
416 ctx
->flags
|= R600_CONTEXT_INVAL_READ_CACHES
;
417 r600_flush_emit(ctx
);
420 COMPUTE_DBG(ctx
->screen
, "cdw: %i\n", cs
->cdw
);
421 for (i
= 0; i
< cs
->cdw
; i
++) {
422 COMPUTE_DBG(ctx
->screen
, "%4i : 0x%08X\n", i
, ctx
->cs
->buf
[i
]);
426 flush_flags
= RADEON_FLUSH_ASYNC
| RADEON_FLUSH_COMPUTE
;
427 if (ctx
->keep_tiling_flags
) {
428 flush_flags
|= RADEON_FLUSH_KEEP_TILING_FLAGS
;
431 ctx
->ws
->cs_flush(ctx
->rings
.gfx
.cs
, flush_flags
);
433 ctx
->pm4_dirty_cdwords
= 0;
436 COMPUTE_DBG(ctx
->screen
, "shader started\n");
438 ctx
->ws
->buffer_wait(onebo
->buf
, 0);
440 COMPUTE_DBG(ctx
->screen
, "...\n");
445 * Emit function for r600_cs_shader_state atom
447 void evergreen_emit_cs_shader(
448 struct r600_context
*rctx
,
449 struct r600_atom
*atom
)
451 struct r600_cs_shader_state
*state
=
452 (struct r600_cs_shader_state
*)atom
;
453 struct r600_pipe_compute
*shader
= state
->shader
;
454 struct r600_kernel
*kernel
= &shader
->kernels
[state
->kernel_index
];
455 struct radeon_winsys_cs
*cs
= rctx
->rings
.gfx
.cs
;
458 va
= r600_resource_va(&rctx
->screen
->screen
, &kernel
->code_bo
->b
.b
);
460 r600_write_compute_context_reg_seq(cs
, R_0288D0_SQ_PGM_START_LS
, 3);
461 r600_write_value(cs
, va
>> 8); /* R_0288D0_SQ_PGM_START_LS */
462 r600_write_value(cs
, /* R_0288D4_SQ_PGM_RESOURCES_LS */
463 S_0288D4_NUM_GPRS(kernel
->bc
.ngpr
)
464 | S_0288D4_STACK_SIZE(kernel
->bc
.nstack
));
465 r600_write_value(cs
, 0); /* R_0288D8_SQ_PGM_RESOURCES_LS_2 */
467 r600_write_value(cs
, PKT3C(PKT3_NOP
, 0, 0));
468 r600_write_value(cs
, r600_context_bo_reloc(rctx
, &rctx
->rings
.gfx
,
469 kernel
->code_bo
, RADEON_USAGE_READ
));
471 rctx
->flags
|= R600_CONTEXT_INVAL_READ_CACHES
;
474 static void evergreen_launch_grid(
475 struct pipe_context
*ctx_
,
476 const uint
*block_layout
, const uint
*grid_layout
,
477 uint32_t pc
, const void *input
)
479 struct r600_context
*ctx
= (struct r600_context
*)ctx_
;
482 COMPUTE_DBG(ctx
->screen
, "*** evergreen_launch_grid: pc = %u\n", pc
);
484 struct r600_pipe_compute
*shader
= ctx
->cs_shader_state
.shader
;
485 if (!shader
->kernels
[pc
].code_bo
) {
487 struct r600_kernel
*kernel
= &shader
->kernels
[pc
];
488 r600_compute_shader_create(ctx_
, kernel
->llvm_module
, &kernel
->bc
);
489 kernel
->code_bo
= r600_compute_buffer_alloc_vram(ctx
->screen
,
491 p
= r600_buffer_mmap_sync_with_rings(ctx
, kernel
->code_bo
, PIPE_TRANSFER_WRITE
);
492 memcpy(p
, kernel
->bc
.bytecode
, kernel
->bc
.ndw
* 4);
493 ctx
->ws
->buffer_unmap(kernel
->code_bo
->cs_buf
);
497 ctx
->cs_shader_state
.kernel_index
= pc
;
498 evergreen_compute_upload_input(ctx_
, block_layout
, grid_layout
, input
);
499 compute_emit_cs(ctx
, block_layout
, grid_layout
);
502 static void evergreen_set_compute_resources(struct pipe_context
* ctx_
,
503 unsigned start
, unsigned count
,
504 struct pipe_surface
** surfaces
)
506 struct r600_context
*ctx
= (struct r600_context
*)ctx_
;
507 struct r600_surface
**resources
= (struct r600_surface
**)surfaces
;
509 COMPUTE_DBG(ctx
->screen
, "*** evergreen_set_compute_resources: start = %u count = %u\n",
512 for (int i
= 0; i
< count
; i
++) {
513 /* The First two vertex buffers are reserved for parameters and
515 unsigned vtx_id
= 2 + i
;
517 struct r600_resource_global
*buffer
=
518 (struct r600_resource_global
*)
519 resources
[i
]->base
.texture
;
520 if (resources
[i
]->base
.writable
) {
523 evergreen_set_rat(ctx
->cs_shader_state
.shader
, i
+1,
524 (struct r600_resource
*)resources
[i
]->base
.texture
,
525 buffer
->chunk
->start_in_dw
*4,
526 resources
[i
]->base
.texture
->width0
);
529 evergreen_cs_set_vertex_buffer(ctx
, vtx_id
,
530 buffer
->chunk
->start_in_dw
* 4,
531 resources
[i
]->base
.texture
);
536 static void evergreen_set_cs_sampler_view(struct pipe_context
*ctx_
,
537 unsigned start_slot
, unsigned count
,
538 struct pipe_sampler_view
**views
)
540 struct r600_context
*ctx
= (struct r600_context
*)ctx_
;
541 struct r600_pipe_sampler_view
**resource
=
542 (struct r600_pipe_sampler_view
**)views
;
544 for (int i
= 0; i
< count
; i
++) {
547 ///FETCH0 = VTX0 (param buffer),
548 //FETCH1 = VTX1 (global buffer pool), FETCH2... = TEX
549 evergreen_set_tex_resource(ctx
->cs_shader_state
.shader
, resource
[i
], i
+2);
554 static void evergreen_bind_compute_sampler_states(
555 struct pipe_context
*ctx_
,
557 unsigned num_samplers
,
560 struct r600_context
*ctx
= (struct r600_context
*)ctx_
;
561 struct compute_sampler_state
** samplers
=
562 (struct compute_sampler_state
**)samplers_
;
564 for (int i
= 0; i
< num_samplers
; i
++) {
566 evergreen_set_sampler_resource(
567 ctx
->cs_shader_state
.shader
, samplers
[i
], i
);
572 static void evergreen_set_global_binding(
573 struct pipe_context
*ctx_
, unsigned first
, unsigned n
,
574 struct pipe_resource
**resources
,
577 struct r600_context
*ctx
= (struct r600_context
*)ctx_
;
578 struct compute_memory_pool
*pool
= ctx
->screen
->global_pool
;
579 struct r600_resource_global
**buffers
=
580 (struct r600_resource_global
**)resources
;
582 COMPUTE_DBG(ctx
->screen
, "*** evergreen_set_global_binding first = %u n = %u\n",
590 compute_memory_finalize_pending(pool
, ctx_
);
592 for (int i
= 0; i
< n
; i
++)
594 assert(resources
[i
]->target
== PIPE_BUFFER
);
595 assert(resources
[i
]->bind
& PIPE_BIND_GLOBAL
);
597 *(handles
[i
]) = buffers
[i
]->chunk
->start_in_dw
* 4;
600 evergreen_set_rat(ctx
->cs_shader_state
.shader
, 0, pool
->bo
, 0, pool
->size_in_dw
* 4);
601 evergreen_cs_set_vertex_buffer(ctx
, 1, 0,
602 (struct pipe_resource
*)pool
->bo
);
606 * This function initializes all the compute specific registers that need to
607 * be initialized for each compute command stream. Registers that are common
608 * to both compute and 3D will be initialized at the beginning of each compute
609 * command stream by the start_cs_cmd atom. However, since the SET_CONTEXT_REG
610 * packet requires that the shader type bit be set, we must initialize all
611 * context registers needed for compute in this function. The registers
612 * intialized by the start_cs_cmd atom can be found in evereen_state.c in the
613 * functions evergreen_init_atom_start_cs or cayman_init_atom_start_cs depending
616 void evergreen_init_atom_start_compute_cs(struct r600_context
*ctx
)
618 struct r600_command_buffer
*cb
= &ctx
->start_compute_cs_cmd
;
620 int num_stack_entries
;
622 /* since all required registers are initialised in the
623 * start_compute_cs_cmd atom, we can EMIT_EARLY here.
625 r600_init_command_buffer(cb
, 256);
626 cb
->pkt_flags
= RADEON_CP_PACKET3_COMPUTE_MODE
;
628 /* This must be first. */
629 r600_store_value(cb
, PKT3(PKT3_CONTEXT_CONTROL
, 1, 0));
630 r600_store_value(cb
, 0x80000000);
631 r600_store_value(cb
, 0x80000000);
633 /* We're setting config registers here. */
634 r600_store_value(cb
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
635 r600_store_value(cb
, EVENT_TYPE(EVENT_TYPE_CS_PARTIAL_FLUSH
) | EVENT_INDEX(4));
637 switch (ctx
->family
) {
641 num_stack_entries
= 256;
645 num_stack_entries
= 256;
649 num_stack_entries
= 512;
654 num_stack_entries
= 512;
658 num_stack_entries
= 256;
662 num_stack_entries
= 256;
666 num_stack_entries
= 512;
670 num_stack_entries
= 512;
674 num_stack_entries
= 256;
678 num_stack_entries
= 256;
682 /* Config Registers */
683 if (ctx
->chip_class
< CAYMAN
)
684 evergreen_init_common_regs(cb
, ctx
->chip_class
, ctx
->family
,
685 ctx
->screen
->info
.drm_minor
);
687 cayman_init_common_regs(cb
, ctx
->chip_class
, ctx
->family
,
688 ctx
->screen
->info
.drm_minor
);
690 /* The primitive type always needs to be POINTLIST for compute. */
691 r600_store_config_reg(cb
, R_008958_VGT_PRIMITIVE_TYPE
,
692 V_008958_DI_PT_POINTLIST
);
694 if (ctx
->chip_class
< CAYMAN
) {
696 /* These registers control which simds can be used by each stage.
697 * The default for these registers is 0xffffffff, which means
698 * all simds are available for each stage. It's possible we may
699 * want to play around with these in the future, but for now
700 * the default value is fine.
702 * R_008E20_SQ_STATIC_THREAD_MGMT1
703 * R_008E24_SQ_STATIC_THREAD_MGMT2
704 * R_008E28_SQ_STATIC_THREAD_MGMT3
707 /* XXX: We may need to adjust the thread and stack resouce
708 * values for 3D/compute interop */
710 r600_store_config_reg_seq(cb
, R_008C18_SQ_THREAD_RESOURCE_MGMT_1
, 5);
712 /* R_008C18_SQ_THREAD_RESOURCE_MGMT_1
713 * Set the number of threads used by the PS/VS/GS/ES stage to
716 r600_store_value(cb
, 0);
718 /* R_008C1C_SQ_THREAD_RESOURCE_MGMT_2
719 * Set the number of threads used by the CS (aka LS) stage to
720 * the maximum number of threads and set the number of threads
721 * for the HS stage to 0. */
722 r600_store_value(cb
, S_008C1C_NUM_LS_THREADS(num_threads
));
724 /* R_008C20_SQ_STACK_RESOURCE_MGMT_1
725 * Set the Control Flow stack entries to 0 for PS/VS stages */
726 r600_store_value(cb
, 0);
728 /* R_008C24_SQ_STACK_RESOURCE_MGMT_2
729 * Set the Control Flow stack entries to 0 for GS/ES stages */
730 r600_store_value(cb
, 0);
732 /* R_008C28_SQ_STACK_RESOURCE_MGMT_3
733 * Set the Contol Flow stack entries to 0 for the HS stage, and
734 * set it to the maximum value for the CS (aka LS) stage. */
736 S_008C28_NUM_LS_STACK_ENTRIES(num_stack_entries
));
739 /* Context Registers */
741 if (ctx
->chip_class
< CAYMAN
) {
742 /* workaround for hw issues with dyn gpr - must set all limits
743 * to 240 instead of 0, 0x1e == 240 / 8
745 r600_store_context_reg(cb
, R_028838_SQ_DYN_GPR_RESOURCE_LIMIT_1
,
746 S_028838_PS_GPRS(0x1e) |
747 S_028838_VS_GPRS(0x1e) |
748 S_028838_GS_GPRS(0x1e) |
749 S_028838_ES_GPRS(0x1e) |
750 S_028838_HS_GPRS(0x1e) |
751 S_028838_LS_GPRS(0x1e));
754 /* XXX: Investigate setting bit 15, which is FAST_COMPUTE_MODE */
755 r600_store_context_reg(cb
, R_028A40_VGT_GS_MODE
,
756 S_028A40_COMPUTE_MODE(1) | S_028A40_PARTIAL_THD_AT_EOI(1));
758 r600_store_context_reg(cb
, R_028B54_VGT_SHADER_STAGES_EN
, 2/*CS_ON*/);
760 r600_store_context_reg(cb
, R_0286E8_SPI_COMPUTE_INPUT_CNTL
,
761 S_0286E8_TID_IN_GROUP_ENA
763 | S_0286E8_DISABLE_INDEX_PACK
)
766 /* The LOOP_CONST registers are an optimizations for loops that allows
767 * you to store the initial counter, increment value, and maximum
768 * counter value in a register so that hardware can calculate the
769 * correct number of iterations for the loop, so that you don't need
770 * to have the loop counter in your shader code. We don't currently use
771 * this optimization, so we must keep track of the counter in the
772 * shader and use a break instruction to exit loops. However, the
773 * hardware will still uses this register to determine when to exit a
774 * loop, so we need to initialize the counter to 0, set the increment
775 * value to 1 and the maximum counter value to the 4095 (0xfff) which
776 * is the maximum value allowed. This gives us a maximum of 4096
777 * iterations for our loops, but hopefully our break instruction will
778 * execute before some time before the 4096th iteration.
780 eg_store_loop_const(cb
, R_03A200_SQ_LOOP_CONST_0
+ (160 * 4), 0x1000FFF);
783 void evergreen_init_compute_state_functions(struct r600_context
*ctx
)
785 ctx
->context
.create_compute_state
= evergreen_create_compute_state
;
786 ctx
->context
.delete_compute_state
= evergreen_delete_compute_state
;
787 ctx
->context
.bind_compute_state
= evergreen_bind_compute_state
;
788 // ctx->context.create_sampler_view = evergreen_compute_create_sampler_view;
789 ctx
->context
.set_compute_resources
= evergreen_set_compute_resources
;
790 ctx
->context
.set_compute_sampler_views
= evergreen_set_cs_sampler_view
;
791 ctx
->context
.bind_compute_sampler_states
= evergreen_bind_compute_sampler_states
;
792 ctx
->context
.set_global_binding
= evergreen_set_global_binding
;
793 ctx
->context
.launch_grid
= evergreen_launch_grid
;
795 /* We always use at least two vertex buffers for compute, one for
796 * parameters and one for global memory */
797 ctx
->cs_vertex_buffer_state
.enabled_mask
=
798 ctx
->cs_vertex_buffer_state
.dirty_mask
= 1 | 2;
802 struct pipe_resource
*r600_compute_global_buffer_create(
803 struct pipe_screen
*screen
,
804 const struct pipe_resource
*templ
)
806 struct r600_resource_global
* result
= NULL
;
807 struct r600_screen
* rscreen
= NULL
;
810 assert(templ
->target
== PIPE_BUFFER
);
811 assert(templ
->bind
& PIPE_BIND_GLOBAL
);
812 assert(templ
->array_size
== 1 || templ
->array_size
== 0);
813 assert(templ
->depth0
== 1 || templ
->depth0
== 0);
814 assert(templ
->height0
== 1 || templ
->height0
== 0);
816 result
= (struct r600_resource_global
*)
817 CALLOC(sizeof(struct r600_resource_global
), 1);
818 rscreen
= (struct r600_screen
*)screen
;
820 COMPUTE_DBG(rscreen
, "*** r600_compute_global_buffer_create\n");
821 COMPUTE_DBG(rscreen
, "width = %u array_size = %u\n", templ
->width0
,
824 result
->base
.b
.vtbl
= &r600_global_buffer_vtbl
;
825 result
->base
.b
.b
.screen
= screen
;
826 result
->base
.b
.b
= *templ
;
827 pipe_reference_init(&result
->base
.b
.b
.reference
, 1);
829 size_in_dw
= (templ
->width0
+3) / 4;
831 result
->chunk
= compute_memory_alloc(rscreen
->global_pool
, size_in_dw
);
833 if (result
->chunk
== NULL
)
839 return &result
->base
.b
.b
;
842 void r600_compute_global_buffer_destroy(
843 struct pipe_screen
*screen
,
844 struct pipe_resource
*res
)
846 struct r600_resource_global
* buffer
= NULL
;
847 struct r600_screen
* rscreen
= NULL
;
849 assert(res
->target
== PIPE_BUFFER
);
850 assert(res
->bind
& PIPE_BIND_GLOBAL
);
852 buffer
= (struct r600_resource_global
*)res
;
853 rscreen
= (struct r600_screen
*)screen
;
855 compute_memory_free(rscreen
->global_pool
, buffer
->chunk
->id
);
857 buffer
->chunk
= NULL
;
861 void *r600_compute_global_transfer_map(
862 struct pipe_context
*ctx_
,
863 struct pipe_resource
*resource
,
866 const struct pipe_box
*box
,
867 struct pipe_transfer
**ptransfer
)
869 struct r600_context
*rctx
= (struct r600_context
*)ctx_
;
870 struct compute_memory_pool
*pool
= rctx
->screen
->global_pool
;
871 struct pipe_transfer
*transfer
= util_slab_alloc(&rctx
->pool_transfers
);
872 struct r600_resource_global
* buffer
=
873 (struct r600_resource_global
*)resource
;
876 compute_memory_finalize_pending(pool
, ctx_
);
878 assert(resource
->target
== PIPE_BUFFER
);
880 COMPUTE_DBG(rctx
->screen
, "* r600_compute_global_get_transfer()\n"
881 "level = %u, usage = %u, box(x = %u, y = %u, z = %u "
882 "width = %u, height = %u, depth = %u)\n", level
, usage
,
883 box
->x
, box
->y
, box
->z
, box
->width
, box
->height
,
886 transfer
->resource
= resource
;
887 transfer
->level
= level
;
888 transfer
->usage
= usage
;
889 transfer
->box
= *box
;
890 transfer
->stride
= 0;
891 transfer
->layer_stride
= 0;
893 assert(transfer
->resource
->target
== PIPE_BUFFER
);
894 assert(transfer
->resource
->bind
& PIPE_BIND_GLOBAL
);
895 assert(transfer
->box
.x
>= 0);
896 assert(transfer
->box
.y
== 0);
897 assert(transfer
->box
.z
== 0);
899 ///TODO: do it better, mapping is not possible if the pool is too big
901 COMPUTE_DBG(rctx
->screen
, "* r600_compute_global_transfer_map()\n");
903 if (!(map
= r600_buffer_mmap_sync_with_rings(rctx
, buffer
->chunk
->pool
->bo
, transfer
->usage
))) {
904 util_slab_free(&rctx
->pool_transfers
, transfer
);
908 *ptransfer
= transfer
;
910 COMPUTE_DBG(rctx
->screen
, "Buffer: %p + %u (buffer offset in global memory) "
911 "+ %u (box.x)\n", map
, buffer
->chunk
->start_in_dw
, transfer
->box
.x
);
912 return ((char*)(map
+ buffer
->chunk
->start_in_dw
)) + transfer
->box
.x
;
915 void r600_compute_global_transfer_unmap(
916 struct pipe_context
*ctx_
,
917 struct pipe_transfer
* transfer
)
919 struct r600_context
*ctx
= NULL
;
920 struct r600_resource_global
* buffer
= NULL
;
922 assert(transfer
->resource
->target
== PIPE_BUFFER
);
923 assert(transfer
->resource
->bind
& PIPE_BIND_GLOBAL
);
925 ctx
= (struct r600_context
*)ctx_
;
926 buffer
= (struct r600_resource_global
*)transfer
->resource
;
928 COMPUTE_DBG(ctx
->screen
, "* r600_compute_global_transfer_unmap()\n");
930 ctx
->ws
->buffer_unmap(buffer
->chunk
->pool
->bo
->cs_buf
);
931 util_slab_free(&ctx
->pool_transfers
, transfer
);
934 void r600_compute_global_transfer_flush_region(
935 struct pipe_context
*ctx_
,
936 struct pipe_transfer
*transfer
,
937 const struct pipe_box
*box
)
942 void r600_compute_global_transfer_inline_write(
943 struct pipe_context
*pipe
,
944 struct pipe_resource
*resource
,
947 const struct pipe_box
*box
,
950 unsigned layer_stride
)