2 * Copyright 2019 Advanced Micro Devices, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
27 #include "si_shader_internal.h"
29 #include "si_build_pm4.h"
30 #include "ac_llvm_cull.h"
32 #include "util/u_prim.h"
33 #include "util/u_suballoc.h"
34 #include "util/u_upload_mgr.h"
35 #include "util/fast_idiv_by_const.h"
38 * https://frostbite-wp-prd.s3.amazonaws.com/wp-content/uploads/2016/03/29204330/GDC_2016_Compute.pdf
41 /* This file implements primitive culling using asynchronous compute.
42 * It's written to be GL conformant.
44 * It takes a monolithic VS in LLVM IR returning gl_Position and invokes it
45 * in a compute shader. The shader processes 1 primitive/thread by invoking
46 * the VS for each vertex to get the positions, decomposes strips and fans
47 * into triangles (if needed), eliminates primitive restart (if needed),
48 * does (W<0) culling, face culling, view XY culling, zero-area and
49 * small-primitive culling, and generates a new index buffer that doesn't
50 * contain culled primitives.
52 * The index buffer is generated using the Ordered Count feature of GDS,
53 * which is an atomic counter that is incremented in the wavefront launch
54 * order, so that the original primitive order is preserved.
56 * Another GDS ordered counter is used to eliminate primitive restart indices.
57 * If a restart index lands on an even thread ID, the compute shader has to flip
58 * the primitive orientation of the whole following triangle strip. The primitive
59 * orientation has to be correct after strip and fan decomposition for two-sided
60 * shading to behave correctly. The decomposition also needs to be aware of
61 * which vertex is the provoking vertex for flat shading to behave correctly.
63 * IB = a GPU command buffer
65 * Both the compute and gfx IBs run in parallel sort of like CE and DE.
66 * The gfx IB has a CP barrier (REWIND packet) before a draw packet. REWIND
67 * doesn't continue if its word isn't 0x80000000. Once compute shaders are
68 * finished culling, the last wave will write the final primitive count from
69 * GDS directly into the count word of the draw packet in the gfx IB, and
70 * a CS_DONE event will signal the REWIND packet to continue. It's really
71 * a direct draw with command buffer patching from the compute queue.
73 * The compute IB doesn't have to start when its corresponding gfx IB starts,
74 * but can start sooner. The compute IB is signaled to start after the last
75 * execution barrier in the *previous* gfx IB. This is handled as follows.
76 * The kernel GPU scheduler starts the compute IB after the previous gfx IB has
77 * started. The compute IB then waits (WAIT_REG_MEM) for a mid-IB fence that
78 * represents the barrier in the previous gfx IB.
81 * - Triangle strips and fans are decomposed into an indexed triangle list.
82 * The decomposition differs based on the provoking vertex state.
83 * - Instanced draws are converted into non-instanced draws for 16-bit indices.
84 * (InstanceID is stored in the high bits of VertexID and unpacked by VS)
85 * - Primitive restart is fully supported with triangle strips, including
86 * correct primitive orientation across multiple waves. (restart indices
87 * reset primitive orientation)
88 * - W<0 culling (W<0 is behind the viewer, sort of like near Z culling).
89 * - Back face culling, incl. culling zero-area / degenerate primitives.
91 * - View Z culling (disabled due to limited impact with perspective projection).
92 * - Small primitive culling for all MSAA modes and all quant modes.
94 * The following are not implemented:
95 * - ClipVertex/ClipDistance/CullDistance-based culling.
99 * Limitations (and unimplemented features that may be possible to implement):
100 * - Only triangles, triangle strips, and triangle fans are supported.
101 * - Primitive restart is only supported with triangle strips.
102 * - Instancing and primitive restart can't be used together.
103 * - Instancing is only supported with 16-bit indices and instance count <= 2^16.
104 * - The instance divisor buffer is unavailable, so all divisors must be
106 * - Multidraws where the vertex shader reads gl_DrawID are unsupported.
107 * - No support for tessellation and geometry shaders.
108 * (patch elimination where tess factors are 0 would be possible to implement)
109 * - The vertex shader must not contain memory stores.
110 * - All VS resources must not have a write usage in the command buffer.
111 * - Bindless textures and images must not occur in the vertex shader.
113 * User data SGPR layout:
114 * INDEX_BUFFERS: pointer to constants
115 * 0..3: input index buffer - typed buffer view
116 * 4..7: output index buffer - typed buffer view
117 * 8..11: viewport state - scale.xy, translate.xy
118 * VERTEX_COUNTER: counter address or first primitive ID
119 * - If unordered memory counter: address of "count" in the draw packet
120 * and is incremented atomically by the shader.
121 * - If unordered GDS counter: address of "count" in GDS starting from 0,
122 * must be initialized to 0 before the dispatch.
123 * - If ordered GDS counter: the primitive ID that should reset the vertex
124 * counter to 0 in GDS
125 * LAST_WAVE_PRIM_ID: the primitive ID that should write the final vertex
126 * count to memory if using GDS ordered append
127 * VERTEX_COUNT_ADDR: where the last wave should write the vertex count if
128 * using GDS ordered append
129 * VS.VERTEX_BUFFERS: same value as VS
130 * VS.CONST_AND_SHADER_BUFFERS: same value as VS
131 * VS.SAMPLERS_AND_IMAGES: same value as VS
132 * VS.BASE_VERTEX: same value as VS
133 * VS.START_INSTANCE: same value as VS
134 * NUM_PRIMS_UDIV_MULTIPLIER: For fast 31-bit division by the number of primitives
135 * per instance for instancing.
136 * NUM_PRIMS_UDIV_TERMS:
137 * - Bits [0:4]: "post_shift" for fast 31-bit division for instancing.
138 * - Bits [5:31]: The number of primitives per instance for computing the remainder.
139 * PRIMITIVE_RESTART_INDEX
140 * SMALL_PRIM_CULLING_PRECISION: Scale the primitive bounding box by this number.
143 * The code contains 3 codepaths:
144 * - Unordered memory counter (for debugging, random primitive order, no primitive restart)
145 * - Unordered GDS counter (for debugging, random primitive order, no primitive restart)
146 * - Ordered GDS counter (it preserves the primitive order)
148 * How to test primitive restart (the most complicated part because it needs
149 * to get the primitive orientation right):
150 * Set THREADGROUP_SIZE to 2 to exercise both intra-wave and inter-wave
151 * primitive orientation flips with small draw calls, which is what most tests use.
152 * You can also enable draw call splitting into draw calls with just 2 primitives.
155 /* At least 256 is needed for the fastest wave launch rate from compute queues
156 * due to hw constraints. Nothing in the code needs more than 1 wave/threadgroup. */
157 #define THREADGROUP_SIZE 256 /* high numbers limit available VGPRs */
158 #define THREADGROUPS_PER_CU 1 /* TGs to launch on 1 CU before going onto the next, max 8 */
159 #define MAX_WAVES_PER_SH 0 /* no limit */
160 #define INDEX_STORES_USE_SLC 1 /* don't cache indices if L2 is full */
161 /* Don't cull Z. We already do (W < 0) culling for primitives behind the viewer. */
163 /* 0 = unordered memory counter, 1 = unordered GDS counter, 2 = ordered GDS counter */
164 #define VERTEX_COUNTER_GDS_MODE 2
165 #define GDS_SIZE_UNORDERED (4 * 1024) /* only for the unordered GDS counter */
167 /* Grouping compute dispatches for small draw calls: How many primitives from multiple
168 * draw calls to process by compute before signaling the gfx IB. This reduces the number
169 * of EOP events + REWIND packets, because they decrease performance. */
170 #define PRIMS_PER_BATCH (512 * 1024)
171 /* Draw call splitting at the packet level. This allows signaling the gfx IB
172 * for big draw calls sooner, but doesn't allow context flushes between packets.
173 * Primitive restart is supported. Only implemented for ordered append. */
174 #define SPLIT_PRIMS_PACKET_LEVEL_VALUE PRIMS_PER_BATCH
175 /* If there is not enough ring buffer space for the current IB, split draw calls into
176 * this number of primitives, so that we can flush the context and get free ring space. */
177 #define SPLIT_PRIMS_DRAW_LEVEL PRIMS_PER_BATCH
179 /* Derived values. */
180 #define WAVES_PER_TG DIV_ROUND_UP(THREADGROUP_SIZE, 64)
181 #define SPLIT_PRIMS_PACKET_LEVEL (VERTEX_COUNTER_GDS_MODE == 2 ? \
182 SPLIT_PRIMS_PACKET_LEVEL_VALUE : \
183 UINT_MAX & ~(THREADGROUP_SIZE - 1))
185 #define REWIND_SIGNAL_BIT 0x80000000
186 /* For emulating the rewind packet on CI. */
187 #define FORCE_REWIND_EMULATION 0
189 void si_initialize_prim_discard_tunables(struct si_screen
*sscreen
,
191 unsigned *prim_discard_vertex_count_threshold
,
192 unsigned *index_ring_size_per_ib
)
194 *prim_discard_vertex_count_threshold
= UINT_MAX
; /* disable */
196 if (sscreen
->info
.chip_class
== GFX6
|| /* SI support is not implemented */
197 !sscreen
->info
.has_gds_ordered_append
||
198 sscreen
->debug_flags
& DBG(NO_PD
) ||
202 /* TODO: enable this after the GDS kernel memory management is fixed */
203 bool enable_on_pro_graphics_by_default
= false;
205 if (sscreen
->debug_flags
& DBG(ALWAYS_PD
) ||
206 sscreen
->debug_flags
& DBG(PD
) ||
207 (enable_on_pro_graphics_by_default
&&
208 sscreen
->info
.is_pro_graphics
&&
209 (sscreen
->info
.family
== CHIP_BONAIRE
||
210 sscreen
->info
.family
== CHIP_HAWAII
||
211 sscreen
->info
.family
== CHIP_TONGA
||
212 sscreen
->info
.family
== CHIP_FIJI
||
213 sscreen
->info
.family
== CHIP_POLARIS10
||
214 sscreen
->info
.family
== CHIP_POLARIS11
||
215 sscreen
->info
.family
== CHIP_VEGA10
||
216 sscreen
->info
.family
== CHIP_VEGA20
))) {
217 *prim_discard_vertex_count_threshold
= 6000 * 3; /* 6K triangles */
219 if (sscreen
->debug_flags
& DBG(ALWAYS_PD
))
220 *prim_discard_vertex_count_threshold
= 0; /* always enable */
222 const uint32_t MB
= 1024 * 1024;
223 const uint64_t GB
= 1024 * 1024 * 1024;
225 /* The total size is double this per context.
226 * Greater numbers allow bigger gfx IBs.
228 if (sscreen
->info
.vram_size
<= 2 * GB
)
229 *index_ring_size_per_ib
= 64 * MB
;
230 else if (sscreen
->info
.vram_size
<= 4 * GB
)
231 *index_ring_size_per_ib
= 128 * MB
;
233 *index_ring_size_per_ib
= 256 * MB
;
237 /* Opcode can be "add" or "swap". */
239 si_build_ds_ordered_op(struct si_shader_context
*ctx
, const char *opcode
,
240 LLVMValueRef m0
, LLVMValueRef value
, unsigned ordered_count_index
,
241 bool release
, bool done
)
243 LLVMValueRef args
[] = {
244 LLVMBuildIntToPtr(ctx
->ac
.builder
, m0
,
245 LLVMPointerType(ctx
->ac
.i32
, AC_ADDR_SPACE_GDS
), ""),
247 LLVMConstInt(ctx
->ac
.i32
, LLVMAtomicOrderingMonotonic
, 0), /* ordering */
248 ctx
->ac
.i32_0
, /* scope */
249 ctx
->ac
.i1false
, /* volatile */
250 LLVMConstInt(ctx
->ac
.i32
, ordered_count_index
, 0),
251 LLVMConstInt(ctx
->ac
.i1
, release
, 0),
252 LLVMConstInt(ctx
->ac
.i1
, done
, 0),
256 snprintf(intrinsic
, sizeof(intrinsic
), "llvm.amdgcn.ds.ordered.%s", opcode
);
257 return ac_build_intrinsic(&ctx
->ac
, intrinsic
, ctx
->ac
.i32
, args
, ARRAY_SIZE(args
), 0);
260 static LLVMValueRef
si_expand_32bit_pointer(struct si_shader_context
*ctx
, LLVMValueRef ptr
)
262 uint64_t hi
= (uint64_t)ctx
->screen
->info
.address32_hi
<< 32;
263 ptr
= LLVMBuildZExt(ctx
->ac
.builder
, ptr
, ctx
->ac
.i64
, "");
264 ptr
= LLVMBuildOr(ctx
->ac
.builder
, ptr
, LLVMConstInt(ctx
->ac
.i64
, hi
, 0), "");
265 return LLVMBuildIntToPtr(ctx
->ac
.builder
, ptr
,
266 LLVMPointerType(ctx
->ac
.i32
, AC_ADDR_SPACE_GLOBAL
), "");
269 struct si_thread0_section
{
270 struct si_shader_context
*ctx
;
271 LLVMValueRef vgpr_result
; /* a VGPR for the value on thread 0. */
272 LLVMValueRef saved_exec
;
275 /* Enter a section that only executes on thread 0. */
276 static void si_enter_thread0_section(struct si_shader_context
*ctx
,
277 struct si_thread0_section
*section
,
278 LLVMValueRef thread_id
)
281 section
->vgpr_result
= ac_build_alloca_undef(&ctx
->ac
, ctx
->ac
.i32
, "result0");
283 /* This IF has 4 instructions:
284 * v_and_b32_e32 v, 63, v ; get the thread ID
285 * v_cmp_eq_u32_e32 vcc, 0, v ; thread ID == 0
286 * s_and_saveexec_b64 s, vcc
287 * s_cbranch_execz BB0_4
289 * It could just be s_and_saveexec_b64 s, 1.
291 ac_build_ifcc(&ctx
->ac
,
292 LLVMBuildICmp(ctx
->ac
.builder
, LLVMIntEQ
, thread_id
,
293 ctx
->ac
.i32_0
, ""), 12601);
296 /* Exit a section that only executes on thread 0 and broadcast the result
298 static void si_exit_thread0_section(struct si_thread0_section
*section
,
299 LLVMValueRef
*result
)
301 struct si_shader_context
*ctx
= section
->ctx
;
303 LLVMBuildStore(ctx
->ac
.builder
, *result
, section
->vgpr_result
);
305 ac_build_endif(&ctx
->ac
, 12601);
307 /* Broadcast the result from thread 0 to all threads. */
308 *result
= ac_build_readlane(&ctx
->ac
,
309 LLVMBuildLoad(ctx
->ac
.builder
, section
->vgpr_result
, ""), NULL
);
312 void si_build_prim_discard_compute_shader(struct si_shader_context
*ctx
)
314 struct si_shader_key
*key
= &ctx
->shader
->key
;
315 LLVMBuilderRef builder
= ctx
->ac
.builder
;
316 LLVMValueRef vs
= ctx
->main_fn
;
318 /* Always inline the VS function. */
319 ac_add_function_attr(ctx
->ac
.context
, vs
, -1, AC_FUNC_ATTR_ALWAYSINLINE
);
320 LLVMSetLinkage(vs
, LLVMPrivateLinkage
);
322 enum ac_arg_type const_desc_type
;
323 if (ctx
->shader
->selector
->info
.const_buffers_declared
== 1 &&
324 ctx
->shader
->selector
->info
.shader_buffers_declared
== 0)
325 const_desc_type
= AC_ARG_CONST_FLOAT_PTR
;
327 const_desc_type
= AC_ARG_CONST_DESC_PTR
;
329 memset(&ctx
->args
, 0, sizeof(ctx
->args
));
331 struct ac_arg param_index_buffers_and_constants
, param_vertex_counter
;
332 struct ac_arg param_vb_desc
, param_const_desc
;
333 struct ac_arg param_base_vertex
, param_start_instance
;
334 struct ac_arg param_block_id
, param_local_id
, param_ordered_wave_id
;
335 struct ac_arg param_restart_index
, param_smallprim_precision
;
336 struct ac_arg param_num_prims_udiv_multiplier
, param_num_prims_udiv_terms
;
337 struct ac_arg param_sampler_desc
, param_last_wave_prim_id
, param_vertex_count_addr
;
339 ac_add_arg(&ctx
->args
, AC_ARG_SGPR
, 1, AC_ARG_CONST_DESC_PTR
,
340 ¶m_index_buffers_and_constants
);
341 ac_add_arg(&ctx
->args
, AC_ARG_SGPR
, 1, AC_ARG_INT
, ¶m_vertex_counter
);
342 ac_add_arg(&ctx
->args
, AC_ARG_SGPR
, 1, AC_ARG_INT
, ¶m_last_wave_prim_id
);
343 ac_add_arg(&ctx
->args
, AC_ARG_SGPR
, 1, AC_ARG_INT
, ¶m_vertex_count_addr
);
344 ac_add_arg(&ctx
->args
, AC_ARG_SGPR
, 1, AC_ARG_CONST_DESC_PTR
,
346 ac_add_arg(&ctx
->args
, AC_ARG_SGPR
, 1, const_desc_type
,
348 ac_add_arg(&ctx
->args
, AC_ARG_SGPR
, 1, AC_ARG_CONST_IMAGE_PTR
,
349 ¶m_sampler_desc
);
350 ac_add_arg(&ctx
->args
, AC_ARG_SGPR
, 1, AC_ARG_INT
, ¶m_base_vertex
);
351 ac_add_arg(&ctx
->args
, AC_ARG_SGPR
, 1, AC_ARG_INT
, ¶m_start_instance
);
352 ac_add_arg(&ctx
->args
, AC_ARG_SGPR
, 1, AC_ARG_INT
, ¶m_num_prims_udiv_multiplier
);
353 ac_add_arg(&ctx
->args
, AC_ARG_SGPR
, 1, AC_ARG_INT
, ¶m_num_prims_udiv_terms
);
354 ac_add_arg(&ctx
->args
, AC_ARG_SGPR
, 1, AC_ARG_INT
, ¶m_restart_index
);
355 ac_add_arg(&ctx
->args
, AC_ARG_SGPR
, 1, AC_ARG_FLOAT
, ¶m_smallprim_precision
);
357 /* Block ID and thread ID inputs. */
358 ac_add_arg(&ctx
->args
, AC_ARG_SGPR
, 1, AC_ARG_INT
, ¶m_block_id
);
359 if (VERTEX_COUNTER_GDS_MODE
== 2)
360 ac_add_arg(&ctx
->args
, AC_ARG_SGPR
, 1, AC_ARG_INT
, ¶m_ordered_wave_id
);
361 ac_add_arg(&ctx
->args
, AC_ARG_VGPR
, 1, AC_ARG_INT
, ¶m_local_id
);
363 /* Create the compute shader function. */
364 unsigned old_type
= ctx
->type
;
365 ctx
->type
= PIPE_SHADER_COMPUTE
;
366 si_llvm_create_func(ctx
, "prim_discard_cs", NULL
, 0, THREADGROUP_SIZE
);
367 ctx
->type
= old_type
;
369 if (VERTEX_COUNTER_GDS_MODE
== 2) {
370 ac_llvm_add_target_dep_function_attr(ctx
->main_fn
,
371 "amdgpu-gds-size", 256);
372 } else if (VERTEX_COUNTER_GDS_MODE
== 1) {
373 ac_llvm_add_target_dep_function_attr(ctx
->main_fn
, "amdgpu-gds-size",
377 /* Assemble parameters for VS. */
378 LLVMValueRef vs_params
[16];
379 unsigned num_vs_params
= 0;
380 unsigned param_vertex_id
, param_instance_id
;
382 vs_params
[num_vs_params
++] = LLVMGetUndef(LLVMTypeOf(LLVMGetParam(vs
, 0))); /* RW_BUFFERS */
383 vs_params
[num_vs_params
++] = LLVMGetUndef(LLVMTypeOf(LLVMGetParam(vs
, 1))); /* BINDLESS */
384 vs_params
[num_vs_params
++] = ac_get_arg(&ctx
->ac
, param_const_desc
);
385 vs_params
[num_vs_params
++] = ac_get_arg(&ctx
->ac
, param_sampler_desc
);
386 vs_params
[num_vs_params
++] = LLVMConstInt(ctx
->ac
.i32
,
387 S_VS_STATE_INDEXED(key
->opt
.cs_indexed
), 0);
388 vs_params
[num_vs_params
++] = ac_get_arg(&ctx
->ac
, param_base_vertex
);
389 vs_params
[num_vs_params
++] = ac_get_arg(&ctx
->ac
, param_start_instance
);
390 vs_params
[num_vs_params
++] = ctx
->ac
.i32_0
; /* DrawID */
391 vs_params
[num_vs_params
++] = ac_get_arg(&ctx
->ac
, param_vb_desc
);
393 vs_params
[(param_vertex_id
= num_vs_params
++)] = NULL
; /* VertexID */
394 vs_params
[(param_instance_id
= num_vs_params
++)] = NULL
; /* InstanceID */
395 vs_params
[num_vs_params
++] = ctx
->ac
.i32_0
; /* unused (PrimID) */
396 vs_params
[num_vs_params
++] = ctx
->ac
.i32_0
; /* unused */
398 assert(num_vs_params
<= ARRAY_SIZE(vs_params
));
399 assert(num_vs_params
== LLVMCountParamTypes(LLVMGetElementType(LLVMTypeOf(vs
))));
401 /* Load descriptors. (load 8 dwords at once) */
402 LLVMValueRef input_indexbuf
, output_indexbuf
, tmp
, desc
[8];
404 LLVMValueRef index_buffers_and_constants
= ac_get_arg(&ctx
->ac
, param_index_buffers_and_constants
);
405 tmp
= LLVMBuildPointerCast(builder
, index_buffers_and_constants
,
406 ac_array_in_const32_addr_space(ctx
->ac
.v8i32
), "");
407 tmp
= ac_build_load_to_sgpr(&ctx
->ac
, tmp
, ctx
->ac
.i32_0
);
409 for (unsigned i
= 0; i
< 8; i
++)
410 desc
[i
] = ac_llvm_extract_elem(&ctx
->ac
, tmp
, i
);
412 input_indexbuf
= ac_build_gather_values(&ctx
->ac
, desc
, 4);
413 output_indexbuf
= ac_build_gather_values(&ctx
->ac
, desc
+ 4, 4);
415 /* Compute PrimID and InstanceID. */
416 LLVMValueRef global_thread_id
=
417 ac_build_imad(&ctx
->ac
, ac_get_arg(&ctx
->ac
, param_block_id
),
418 LLVMConstInt(ctx
->ac
.i32
, THREADGROUP_SIZE
, 0),
419 ac_get_arg(&ctx
->ac
, param_local_id
));
420 LLVMValueRef prim_id
= global_thread_id
; /* PrimID within an instance */
421 LLVMValueRef instance_id
= ctx
->ac
.i32_0
;
423 if (key
->opt
.cs_instancing
) {
424 LLVMValueRef num_prims_udiv_terms
=
425 ac_get_arg(&ctx
->ac
, param_num_prims_udiv_terms
);
426 LLVMValueRef num_prims_udiv_multiplier
=
427 ac_get_arg(&ctx
->ac
, param_num_prims_udiv_multiplier
);
428 /* Unpack num_prims_udiv_terms. */
429 LLVMValueRef post_shift
= LLVMBuildAnd(builder
, num_prims_udiv_terms
,
430 LLVMConstInt(ctx
->ac
.i32
, 0x1f, 0), "");
431 LLVMValueRef prims_per_instance
= LLVMBuildLShr(builder
, num_prims_udiv_terms
,
432 LLVMConstInt(ctx
->ac
.i32
, 5, 0), "");
433 /* Divide the total prim_id by the number of prims per instance. */
434 instance_id
= ac_build_fast_udiv_u31_d_not_one(&ctx
->ac
, prim_id
,
435 num_prims_udiv_multiplier
,
437 /* Compute the remainder. */
438 prim_id
= LLVMBuildSub(builder
, prim_id
,
439 LLVMBuildMul(builder
, instance_id
,
440 prims_per_instance
, ""), "");
443 /* Generate indices (like a non-indexed draw call). */
444 LLVMValueRef index
[4] = {NULL
, NULL
, NULL
, LLVMGetUndef(ctx
->ac
.i32
)};
445 unsigned vertices_per_prim
= 3;
447 switch (key
->opt
.cs_prim_type
) {
448 case PIPE_PRIM_TRIANGLES
:
449 for (unsigned i
= 0; i
< 3; i
++) {
450 index
[i
] = ac_build_imad(&ctx
->ac
, prim_id
,
451 LLVMConstInt(ctx
->ac
.i32
, 3, 0),
452 LLVMConstInt(ctx
->ac
.i32
, i
, 0));
455 case PIPE_PRIM_TRIANGLE_STRIP
:
456 for (unsigned i
= 0; i
< 3; i
++) {
457 index
[i
] = LLVMBuildAdd(builder
, prim_id
,
458 LLVMConstInt(ctx
->ac
.i32
, i
, 0), "");
461 case PIPE_PRIM_TRIANGLE_FAN
:
462 /* Vertex 1 is first and vertex 2 is last. This will go to the hw clipper
463 * and rasterizer as a normal triangle, so we need to put the provoking
464 * vertex into the correct index variable and preserve orientation at the same time.
465 * gl_VertexID is preserved, because it's equal to the index.
467 if (key
->opt
.cs_provoking_vertex_first
) {
468 index
[0] = LLVMBuildAdd(builder
, prim_id
, LLVMConstInt(ctx
->ac
.i32
, 1, 0), "");
469 index
[1] = LLVMBuildAdd(builder
, prim_id
, LLVMConstInt(ctx
->ac
.i32
, 2, 0), "");
470 index
[2] = ctx
->ac
.i32_0
;
472 index
[0] = ctx
->ac
.i32_0
;
473 index
[1] = LLVMBuildAdd(builder
, prim_id
, LLVMConstInt(ctx
->ac
.i32
, 1, 0), "");
474 index
[2] = LLVMBuildAdd(builder
, prim_id
, LLVMConstInt(ctx
->ac
.i32
, 2, 0), "");
478 unreachable("unexpected primitive type");
482 if (key
->opt
.cs_indexed
) {
483 for (unsigned i
= 0; i
< 3; i
++) {
484 index
[i
] = ac_build_buffer_load_format(&ctx
->ac
, input_indexbuf
,
485 index
[i
], ctx
->ac
.i32_0
, 1,
487 index
[i
] = ac_to_integer(&ctx
->ac
, index
[i
]);
491 LLVMValueRef ordered_wave_id
= NULL
;
493 /* Extract the ordered wave ID. */
494 if (VERTEX_COUNTER_GDS_MODE
== 2) {
495 ordered_wave_id
= ac_get_arg(&ctx
->ac
, param_ordered_wave_id
);
496 ordered_wave_id
= LLVMBuildLShr(builder
, ordered_wave_id
,
497 LLVMConstInt(ctx
->ac
.i32
, 6, 0), "");
498 ordered_wave_id
= LLVMBuildAnd(builder
, ordered_wave_id
,
499 LLVMConstInt(ctx
->ac
.i32
, 0xfff, 0), "");
501 LLVMValueRef thread_id
=
502 LLVMBuildAnd(builder
, ac_get_arg(&ctx
->ac
, param_local_id
),
503 LLVMConstInt(ctx
->ac
.i32
, 63, 0), "");
505 /* Every other triangle in a strip has a reversed vertex order, so we
506 * need to swap vertices of odd primitives to get the correct primitive
507 * orientation when converting triangle strips to triangles. Primitive
508 * restart complicates it, because a strip can start anywhere.
510 LLVMValueRef prim_restart_accepted
= ctx
->ac
.i1true
;
511 LLVMValueRef vertex_counter
= ac_get_arg(&ctx
->ac
, param_vertex_counter
);
513 if (key
->opt
.cs_prim_type
== PIPE_PRIM_TRIANGLE_STRIP
) {
514 /* Without primitive restart, odd primitives have reversed orientation.
515 * Only primitive restart can flip it with respect to the first vertex
518 LLVMValueRef first_is_odd
= ctx
->ac
.i1false
;
520 /* Handle primitive restart. */
521 if (key
->opt
.cs_primitive_restart
) {
522 /* Get the GDS primitive restart continue flag and clear
523 * the flag in vertex_counter. This flag is used when the draw
524 * call was split and we need to load the primitive orientation
525 * flag from GDS for the first wave too.
527 LLVMValueRef gds_prim_restart_continue
=
528 LLVMBuildLShr(builder
, vertex_counter
,
529 LLVMConstInt(ctx
->ac
.i32
, 31, 0), "");
530 gds_prim_restart_continue
=
531 LLVMBuildTrunc(builder
, gds_prim_restart_continue
, ctx
->ac
.i1
, "");
532 vertex_counter
= LLVMBuildAnd(builder
, vertex_counter
,
533 LLVMConstInt(ctx
->ac
.i32
, 0x7fffffff, 0), "");
535 LLVMValueRef index0_is_reset
;
537 for (unsigned i
= 0; i
< 3; i
++) {
538 LLVMValueRef not_reset
= LLVMBuildICmp(builder
, LLVMIntNE
, index
[i
],
539 ac_get_arg(&ctx
->ac
, param_restart_index
),
542 index0_is_reset
= LLVMBuildNot(builder
, not_reset
, "");
543 prim_restart_accepted
= LLVMBuildAnd(builder
, prim_restart_accepted
,
547 /* If the previous waves flip the primitive orientation
548 * of the current triangle strip, it will be stored in GDS.
550 * Sometimes the correct orientation is not needed, in which case
551 * we don't need to execute this.
553 if (key
->opt
.cs_need_correct_orientation
&& VERTEX_COUNTER_GDS_MODE
== 2) {
554 /* If there are reset indices in this wave, get the thread index
555 * where the most recent strip starts relative to each thread.
557 LLVMValueRef preceding_threads_mask
=
558 LLVMBuildSub(builder
,
559 LLVMBuildShl(builder
, ctx
->ac
.i64_1
,
560 LLVMBuildZExt(builder
, thread_id
, ctx
->ac
.i64
, ""), ""),
563 LLVMValueRef reset_threadmask
= ac_get_i1_sgpr_mask(&ctx
->ac
, index0_is_reset
);
564 LLVMValueRef preceding_reset_threadmask
=
565 LLVMBuildAnd(builder
, reset_threadmask
, preceding_threads_mask
, "");
566 LLVMValueRef strip_start
=
567 ac_build_umsb(&ctx
->ac
, preceding_reset_threadmask
, NULL
);
568 strip_start
= LLVMBuildAdd(builder
, strip_start
, ctx
->ac
.i32_1
, "");
570 /* This flips the orientatino based on reset indices within this wave only. */
571 first_is_odd
= LLVMBuildTrunc(builder
, strip_start
, ctx
->ac
.i1
, "");
573 LLVMValueRef last_strip_start
, prev_wave_state
, ret
, tmp
;
574 LLVMValueRef is_first_wave
, current_wave_resets_index
;
576 /* Get the thread index where the last strip starts in this wave.
578 * If the last strip doesn't start in this wave, the thread index
581 * If the last strip starts in the next wave, the thread index will
584 last_strip_start
= ac_build_umsb(&ctx
->ac
, reset_threadmask
, NULL
);
585 last_strip_start
= LLVMBuildAdd(builder
, last_strip_start
, ctx
->ac
.i32_1
, "");
587 struct si_thread0_section section
;
588 si_enter_thread0_section(ctx
, §ion
, thread_id
);
590 /* This must be done in the thread 0 section, because
591 * we expect PrimID to be 0 for the whole first wave
592 * in this expression.
594 * NOTE: This will need to be different if we wanna support
595 * instancing with primitive restart.
597 is_first_wave
= LLVMBuildICmp(builder
, LLVMIntEQ
, prim_id
, ctx
->ac
.i32_0
, "");
598 is_first_wave
= LLVMBuildAnd(builder
, is_first_wave
,
599 LLVMBuildNot(builder
,
600 gds_prim_restart_continue
, ""), "");
601 current_wave_resets_index
= LLVMBuildICmp(builder
, LLVMIntNE
,
602 last_strip_start
, ctx
->ac
.i32_0
, "");
604 ret
= ac_build_alloca_undef(&ctx
->ac
, ctx
->ac
.i32
, "prev_state");
606 /* Save the last strip start primitive index in GDS and read
607 * the value that previous waves stored.
609 * if (is_first_wave || current_wave_resets_strip)
610 * // Read the value that previous waves stored and store a new one.
611 * first_is_odd = ds.ordered.swap(last_strip_start);
613 * // Just read the value that previous waves stored.
614 * first_is_odd = ds.ordered.add(0);
616 ac_build_ifcc(&ctx
->ac
,
617 LLVMBuildOr(builder
, is_first_wave
,
618 current_wave_resets_index
, ""), 12602);
620 /* The GDS address is always 0 with ordered append. */
621 tmp
= si_build_ds_ordered_op(ctx
, "swap",
622 ordered_wave_id
, last_strip_start
,
624 LLVMBuildStore(builder
, tmp
, ret
);
626 ac_build_else(&ctx
->ac
, 12603);
628 /* Just read the value from GDS. */
629 tmp
= si_build_ds_ordered_op(ctx
, "add",
630 ordered_wave_id
, ctx
->ac
.i32_0
,
632 LLVMBuildStore(builder
, tmp
, ret
);
634 ac_build_endif(&ctx
->ac
, 12602);
636 prev_wave_state
= LLVMBuildLoad(builder
, ret
, "");
637 /* Ignore the return value if this is the first wave. */
638 prev_wave_state
= LLVMBuildSelect(builder
, is_first_wave
,
639 ctx
->ac
.i32_0
, prev_wave_state
, "");
640 si_exit_thread0_section(§ion
, &prev_wave_state
);
641 prev_wave_state
= LLVMBuildTrunc(builder
, prev_wave_state
, ctx
->ac
.i1
, "");
643 /* If the strip start appears to be on thread 0 for the current primitive
644 * (meaning the reset index is not present in this wave and might have
645 * appeared in previous waves), use the value from GDS to determine
646 * primitive orientation.
648 * If the strip start is in this wave for the current primitive, use
649 * the value from the current wave to determine primitive orientation.
651 LLVMValueRef strip_start_is0
= LLVMBuildICmp(builder
, LLVMIntEQ
,
652 strip_start
, ctx
->ac
.i32_0
, "");
653 first_is_odd
= LLVMBuildSelect(builder
, strip_start_is0
, prev_wave_state
,
657 /* prim_is_odd = (first_is_odd + current_is_odd) % 2. */
658 LLVMValueRef prim_is_odd
=
659 LLVMBuildXor(builder
, first_is_odd
,
660 LLVMBuildTrunc(builder
, thread_id
, ctx
->ac
.i1
, ""), "");
662 /* Convert triangle strip indices to triangle indices. */
663 ac_build_triangle_strip_indices_to_triangle(&ctx
->ac
, prim_is_odd
,
664 LLVMConstInt(ctx
->ac
.i1
, key
->opt
.cs_provoking_vertex_first
, 0),
668 /* Execute the vertex shader for each vertex to get vertex positions. */
669 LLVMValueRef pos
[3][4];
670 for (unsigned i
= 0; i
< vertices_per_prim
; i
++) {
671 vs_params
[param_vertex_id
] = index
[i
];
672 vs_params
[param_instance_id
] = instance_id
;
674 LLVMValueRef ret
= ac_build_call(&ctx
->ac
, vs
, vs_params
, num_vs_params
);
675 for (unsigned chan
= 0; chan
< 4; chan
++)
676 pos
[i
][chan
] = LLVMBuildExtractValue(builder
, ret
, chan
, "");
679 /* Divide XYZ by W. */
680 for (unsigned i
= 0; i
< vertices_per_prim
; i
++) {
681 for (unsigned chan
= 0; chan
< 3; chan
++)
682 pos
[i
][chan
] = ac_build_fdiv(&ctx
->ac
, pos
[i
][chan
], pos
[i
][3]);
685 /* Load the viewport state. */
686 LLVMValueRef vp
= ac_build_load_invariant(&ctx
->ac
, index_buffers_and_constants
,
687 LLVMConstInt(ctx
->ac
.i32
, 2, 0));
688 vp
= LLVMBuildBitCast(builder
, vp
, ctx
->ac
.v4f32
, "");
689 LLVMValueRef vp_scale
[2], vp_translate
[2];
690 vp_scale
[0] = ac_llvm_extract_elem(&ctx
->ac
, vp
, 0);
691 vp_scale
[1] = ac_llvm_extract_elem(&ctx
->ac
, vp
, 1);
692 vp_translate
[0] = ac_llvm_extract_elem(&ctx
->ac
, vp
, 2);
693 vp_translate
[1] = ac_llvm_extract_elem(&ctx
->ac
, vp
, 3);
696 struct ac_cull_options options
= {};
697 options
.cull_front
= key
->opt
.cs_cull_front
;
698 options
.cull_back
= key
->opt
.cs_cull_back
;
699 options
.cull_view_xy
= true;
700 options
.cull_view_near_z
= CULL_Z
&& key
->opt
.cs_cull_z
;
701 options
.cull_view_far_z
= CULL_Z
&& key
->opt
.cs_cull_z
;
702 options
.cull_small_prims
= true;
703 options
.cull_zero_area
= true;
704 options
.cull_w
= true;
705 options
.use_halfz_clip_space
= key
->opt
.cs_halfz_clip_space
;
707 LLVMValueRef accepted
=
708 ac_cull_triangle(&ctx
->ac
, pos
, prim_restart_accepted
,
709 vp_scale
, vp_translate
,
710 ac_get_arg(&ctx
->ac
, param_smallprim_precision
),
713 ac_build_optimization_barrier(&ctx
->ac
, &accepted
);
714 LLVMValueRef accepted_threadmask
= ac_get_i1_sgpr_mask(&ctx
->ac
, accepted
);
716 /* Count the number of active threads by doing bitcount(accepted). */
717 LLVMValueRef num_prims_accepted
=
718 ac_build_intrinsic(&ctx
->ac
, "llvm.ctpop.i64", ctx
->ac
.i64
,
719 &accepted_threadmask
, 1, AC_FUNC_ATTR_READNONE
);
720 num_prims_accepted
= LLVMBuildTrunc(builder
, num_prims_accepted
, ctx
->ac
.i32
, "");
724 /* Execute atomic_add on the vertex count. */
725 struct si_thread0_section section
;
726 si_enter_thread0_section(ctx
, §ion
, thread_id
);
728 if (VERTEX_COUNTER_GDS_MODE
== 0) {
729 LLVMValueRef num_indices
= LLVMBuildMul(builder
, num_prims_accepted
,
730 LLVMConstInt(ctx
->ac
.i32
, vertices_per_prim
, 0), "");
731 vertex_counter
= si_expand_32bit_pointer(ctx
, vertex_counter
);
732 start
= LLVMBuildAtomicRMW(builder
, LLVMAtomicRMWBinOpAdd
,
733 vertex_counter
, num_indices
,
734 LLVMAtomicOrderingMonotonic
, false);
735 } else if (VERTEX_COUNTER_GDS_MODE
== 1) {
736 LLVMValueRef num_indices
= LLVMBuildMul(builder
, num_prims_accepted
,
737 LLVMConstInt(ctx
->ac
.i32
, vertices_per_prim
, 0), "");
738 vertex_counter
= LLVMBuildIntToPtr(builder
, vertex_counter
,
739 LLVMPointerType(ctx
->ac
.i32
, AC_ADDR_SPACE_GDS
), "");
740 start
= LLVMBuildAtomicRMW(builder
, LLVMAtomicRMWBinOpAdd
,
741 vertex_counter
, num_indices
,
742 LLVMAtomicOrderingMonotonic
, false);
743 } else if (VERTEX_COUNTER_GDS_MODE
== 2) {
744 LLVMValueRef tmp_store
= ac_build_alloca_undef(&ctx
->ac
, ctx
->ac
.i32
, "");
746 /* If the draw call was split into multiple subdraws, each using
747 * a separate draw packet, we need to start counting from 0 for
748 * the first compute wave of the subdraw.
750 * vertex_counter contains the primitive ID of the first thread
753 * This is only correct with VERTEX_COUNTER_GDS_MODE == 2:
755 LLVMValueRef is_first_wave
=
756 LLVMBuildICmp(builder
, LLVMIntEQ
, global_thread_id
,
759 /* Store the primitive count for ordered append, not vertex count.
760 * The idea is to avoid GDS initialization via CP DMA. The shader
761 * effectively stores the first count using "swap".
764 * ds.ordered.swap(num_prims_accepted); // store the first primitive count
767 * previous = ds.ordered.add(num_prims_accepted) // add the primitive count
770 ac_build_ifcc(&ctx
->ac
, is_first_wave
, 12604);
772 /* The GDS address is always 0 with ordered append. */
773 si_build_ds_ordered_op(ctx
, "swap", ordered_wave_id
,
774 num_prims_accepted
, 0, true, true);
775 LLVMBuildStore(builder
, ctx
->ac
.i32_0
, tmp_store
);
777 ac_build_else(&ctx
->ac
, 12605);
779 LLVMBuildStore(builder
,
780 si_build_ds_ordered_op(ctx
, "add", ordered_wave_id
,
781 num_prims_accepted
, 0,
785 ac_build_endif(&ctx
->ac
, 12604);
787 start
= LLVMBuildLoad(builder
, tmp_store
, "");
790 si_exit_thread0_section(§ion
, &start
);
792 /* Write the final vertex count to memory. An EOS/EOP event could do this,
793 * but those events are super slow and should be avoided if performance
794 * is a concern. Thanks to GDS ordered append, we can emulate a CS_DONE
797 if (VERTEX_COUNTER_GDS_MODE
== 2) {
798 ac_build_ifcc(&ctx
->ac
,
799 LLVMBuildICmp(builder
, LLVMIntEQ
, global_thread_id
,
800 ac_get_arg(&ctx
->ac
, param_last_wave_prim_id
), ""),
802 LLVMValueRef count
= LLVMBuildAdd(builder
, start
, num_prims_accepted
, "");
803 count
= LLVMBuildMul(builder
, count
,
804 LLVMConstInt(ctx
->ac
.i32
, vertices_per_prim
, 0), "");
806 /* GFX8 needs to disable caching, so that the CP can see the stored value.
807 * MTYPE=3 bypasses TC L2.
809 if (ctx
->screen
->info
.chip_class
<= GFX8
) {
810 LLVMValueRef desc
[] = {
811 ac_get_arg(&ctx
->ac
, param_vertex_count_addr
),
812 LLVMConstInt(ctx
->ac
.i32
,
813 S_008F04_BASE_ADDRESS_HI(ctx
->screen
->info
.address32_hi
), 0),
814 LLVMConstInt(ctx
->ac
.i32
, 4, 0),
815 LLVMConstInt(ctx
->ac
.i32
, S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32
) |
816 S_008F0C_MTYPE(3 /* uncached */), 0),
818 LLVMValueRef rsrc
= ac_build_gather_values(&ctx
->ac
, desc
, 4);
819 ac_build_buffer_store_dword(&ctx
->ac
, rsrc
, count
, 1, ctx
->ac
.i32_0
,
820 ctx
->ac
.i32_0
, 0, ac_glc
| ac_slc
);
822 LLVMBuildStore(builder
, count
,
823 si_expand_32bit_pointer(ctx
,
825 param_vertex_count_addr
)));
827 ac_build_endif(&ctx
->ac
, 12606);
829 /* For unordered modes that increment a vertex count instead of
830 * primitive count, convert it into the primitive index.
832 start
= LLVMBuildUDiv(builder
, start
,
833 LLVMConstInt(ctx
->ac
.i32
, vertices_per_prim
, 0), "");
836 /* Now we need to store the indices of accepted primitives into
837 * the output index buffer.
839 ac_build_ifcc(&ctx
->ac
, accepted
, 16607);
841 /* Get the number of bits set before the index of this thread. */
842 LLVMValueRef prim_index
= ac_build_mbcnt(&ctx
->ac
, accepted_threadmask
);
844 /* We have lowered instancing. Pack the instance ID into vertex ID. */
845 if (key
->opt
.cs_instancing
) {
846 instance_id
= LLVMBuildShl(builder
, instance_id
,
847 LLVMConstInt(ctx
->ac
.i32
, 16, 0), "");
849 for (unsigned i
= 0; i
< vertices_per_prim
; i
++)
850 index
[i
] = LLVMBuildOr(builder
, index
[i
], instance_id
, "");
853 if (VERTEX_COUNTER_GDS_MODE
== 2) {
854 /* vertex_counter contains the first primitive ID
855 * for this dispatch. If the draw call was split into
856 * multiple subdraws, the first primitive ID is > 0
857 * for subsequent subdraws. Each subdraw uses a different
858 * portion of the output index buffer. Offset the store
859 * vindex by the first primitive ID to get the correct
860 * store address for the subdraw.
862 start
= LLVMBuildAdd(builder
, start
, vertex_counter
, "");
865 /* Write indices for accepted primitives. */
866 LLVMValueRef vindex
= LLVMBuildAdd(builder
, start
, prim_index
, "");
867 LLVMValueRef vdata
= ac_build_gather_values(&ctx
->ac
, index
, 3);
869 if (!ac_has_vec3_support(ctx
->ac
.chip_class
, true))
870 vdata
= ac_build_expand_to_vec4(&ctx
->ac
, vdata
, 3);
872 ac_build_buffer_store_format(&ctx
->ac
, output_indexbuf
, vdata
,
873 vindex
, ctx
->ac
.i32_0
, 3,
874 ac_glc
| (INDEX_STORES_USE_SLC
? ac_slc
: 0));
876 ac_build_endif(&ctx
->ac
, 16607);
878 LLVMBuildRetVoid(builder
);
881 /* Return false if the shader isn't ready. */
882 static bool si_shader_select_prim_discard_cs(struct si_context
*sctx
,
883 const struct pipe_draw_info
*info
,
884 bool primitive_restart
)
886 struct si_state_rasterizer
*rs
= sctx
->queued
.named
.rasterizer
;
887 struct si_shader_key key
;
889 /* Primitive restart needs ordered counters. */
890 assert(!primitive_restart
|| VERTEX_COUNTER_GDS_MODE
== 2);
891 assert(!primitive_restart
|| info
->instance_count
== 1);
893 memset(&key
, 0, sizeof(key
));
894 si_shader_selector_key_vs(sctx
, sctx
->vs_shader
.cso
, &key
, &key
.part
.vs
.prolog
);
895 assert(!key
.part
.vs
.prolog
.instance_divisor_is_fetched
);
897 key
.part
.vs
.prolog
.unpack_instance_id_from_vertex_id
= 0;
898 key
.opt
.vs_as_prim_discard_cs
= 1;
899 key
.opt
.cs_prim_type
= info
->mode
;
900 key
.opt
.cs_indexed
= info
->index_size
!= 0;
901 key
.opt
.cs_instancing
= info
->instance_count
> 1;
902 key
.opt
.cs_primitive_restart
= primitive_restart
;
903 key
.opt
.cs_provoking_vertex_first
= rs
->provoking_vertex_first
;
905 /* Primitive restart with triangle strips needs to preserve primitive
906 * orientation for cases where front and back primitive orientation matters.
908 if (primitive_restart
) {
909 struct si_shader_selector
*ps
= sctx
->ps_shader
.cso
;
911 key
.opt
.cs_need_correct_orientation
=
912 rs
->cull_front
!= rs
->cull_back
||
913 ps
->info
.uses_frontface
||
914 (rs
->two_side
&& ps
->info
.colors_read
);
917 if (rs
->rasterizer_discard
) {
918 /* Just for performance testing and analysis of trivial bottlenecks.
919 * This should result in a very short compute shader. */
920 key
.opt
.cs_cull_front
= 1;
921 key
.opt
.cs_cull_back
= 1;
923 key
.opt
.cs_cull_front
=
924 sctx
->viewports
.y_inverted
? rs
->cull_back
: rs
->cull_front
;
925 key
.opt
.cs_cull_back
=
926 sctx
->viewports
.y_inverted
? rs
->cull_front
: rs
->cull_back
;
929 if (!rs
->depth_clamp_any
&& CULL_Z
) {
930 key
.opt
.cs_cull_z
= 1;
931 key
.opt
.cs_halfz_clip_space
= rs
->clip_halfz
;
934 sctx
->cs_prim_discard_state
.cso
= sctx
->vs_shader
.cso
;
935 sctx
->cs_prim_discard_state
.current
= NULL
;
937 if (!sctx
->compiler
.passes
)
938 si_init_compiler(sctx
->screen
, &sctx
->compiler
);
940 struct si_compiler_ctx_state compiler_state
;
941 compiler_state
.compiler
= &sctx
->compiler
;
942 compiler_state
.debug
= sctx
->debug
;
943 compiler_state
.is_debug_context
= sctx
->is_debug
;
945 return si_shader_select_with_key(sctx
->screen
, &sctx
->cs_prim_discard_state
,
946 &compiler_state
, &key
, -1, true) == 0 &&
947 /* Disallow compute shaders using the scratch buffer. */
948 sctx
->cs_prim_discard_state
.current
->config
.scratch_bytes_per_wave
== 0;
951 static bool si_initialize_prim_discard_cmdbuf(struct si_context
*sctx
)
953 if (sctx
->index_ring
)
956 if (!sctx
->prim_discard_compute_cs
) {
957 struct radeon_winsys
*ws
= sctx
->ws
;
958 unsigned gds_size
= VERTEX_COUNTER_GDS_MODE
== 1 ? GDS_SIZE_UNORDERED
:
959 VERTEX_COUNTER_GDS_MODE
== 2 ? 8 : 0;
960 unsigned num_oa_counters
= VERTEX_COUNTER_GDS_MODE
== 2 ? 2 : 0;
963 sctx
->gds
= ws
->buffer_create(ws
, gds_size
, 4,
964 RADEON_DOMAIN_GDS
, 0);
968 ws
->cs_add_buffer(sctx
->gfx_cs
, sctx
->gds
,
969 RADEON_USAGE_READWRITE
, 0, 0);
971 if (num_oa_counters
) {
973 sctx
->gds_oa
= ws
->buffer_create(ws
, num_oa_counters
,
974 1, RADEON_DOMAIN_OA
, 0);
978 ws
->cs_add_buffer(sctx
->gfx_cs
, sctx
->gds_oa
,
979 RADEON_USAGE_READWRITE
, 0, 0);
982 sctx
->prim_discard_compute_cs
=
983 ws
->cs_add_parallel_compute_ib(sctx
->gfx_cs
,
984 num_oa_counters
> 0);
985 if (!sctx
->prim_discard_compute_cs
)
989 if (!sctx
->index_ring
) {
991 si_aligned_buffer_create(sctx
->b
.screen
,
992 SI_RESOURCE_FLAG_UNMAPPABLE
,
994 sctx
->index_ring_size_per_ib
* 2,
995 sctx
->screen
->info
.pte_fragment_size
);
996 if (!sctx
->index_ring
)
1002 static bool si_check_ring_space(struct si_context
*sctx
, unsigned out_indexbuf_size
)
1004 return sctx
->index_ring_offset
+
1005 align(out_indexbuf_size
, sctx
->screen
->info
.tcc_cache_line_size
) <=
1006 sctx
->index_ring_size_per_ib
;
1009 enum si_prim_discard_outcome
1010 si_prepare_prim_discard_or_split_draw(struct si_context
*sctx
,
1011 const struct pipe_draw_info
*info
,
1012 bool primitive_restart
)
1014 /* If the compute shader compilation isn't finished, this returns false. */
1015 if (!si_shader_select_prim_discard_cs(sctx
, info
, primitive_restart
))
1016 return SI_PRIM_DISCARD_DISABLED
;
1018 if (!si_initialize_prim_discard_cmdbuf(sctx
))
1019 return SI_PRIM_DISCARD_DISABLED
;
1021 struct radeon_cmdbuf
*gfx_cs
= sctx
->gfx_cs
;
1022 unsigned prim
= info
->mode
;
1023 unsigned count
= info
->count
;
1024 unsigned instance_count
= info
->instance_count
;
1025 unsigned num_prims_per_instance
= u_decomposed_prims_for_vertices(prim
, count
);
1026 unsigned num_prims
= num_prims_per_instance
* instance_count
;
1027 unsigned out_indexbuf_size
= num_prims
* 12;
1028 bool ring_full
= !si_check_ring_space(sctx
, out_indexbuf_size
);
1029 const unsigned split_prims_draw_level
= SPLIT_PRIMS_DRAW_LEVEL
;
1031 /* Split draws at the draw call level if the ring is full. This makes
1032 * better use of the ring space.
1035 num_prims
> split_prims_draw_level
&&
1036 instance_count
== 1 && /* TODO: support splitting instanced draws */
1037 (1 << prim
) & ((1 << PIPE_PRIM_TRIANGLES
) |
1038 (1 << PIPE_PRIM_TRIANGLE_STRIP
))) {
1040 struct pipe_draw_info split_draw
= *info
;
1041 split_draw
.primitive_restart
= primitive_restart
;
1043 unsigned base_start
= split_draw
.start
;
1045 if (prim
== PIPE_PRIM_TRIANGLES
) {
1046 unsigned vert_count_per_subdraw
= split_prims_draw_level
* 3;
1047 assert(vert_count_per_subdraw
< count
);
1049 for (unsigned start
= 0; start
< count
; start
+= vert_count_per_subdraw
) {
1050 split_draw
.start
= base_start
+ start
;
1051 split_draw
.count
= MIN2(count
- start
, vert_count_per_subdraw
);
1053 sctx
->b
.draw_vbo(&sctx
->b
, &split_draw
);
1055 } else if (prim
== PIPE_PRIM_TRIANGLE_STRIP
) {
1056 /* No primitive pair can be split, because strips reverse orientation
1057 * for odd primitives. */
1058 STATIC_ASSERT(split_prims_draw_level
% 2 == 0);
1060 unsigned vert_count_per_subdraw
= split_prims_draw_level
;
1062 for (unsigned start
= 0; start
< count
- 2; start
+= vert_count_per_subdraw
) {
1063 split_draw
.start
= base_start
+ start
;
1064 split_draw
.count
= MIN2(count
- start
, vert_count_per_subdraw
+ 2);
1066 sctx
->b
.draw_vbo(&sctx
->b
, &split_draw
);
1069 primitive_restart
&&
1070 sctx
->cs_prim_discard_state
.current
->key
.opt
.cs_need_correct_orientation
)
1071 sctx
->preserve_prim_restart_gds_at_flush
= true;
1073 sctx
->preserve_prim_restart_gds_at_flush
= false;
1078 return SI_PRIM_DISCARD_DRAW_SPLIT
;
1081 /* Just quit if the draw call doesn't fit into the ring and can't be split. */
1082 if (out_indexbuf_size
> sctx
->index_ring_size_per_ib
) {
1083 if (SI_PRIM_DISCARD_DEBUG
)
1084 puts("PD failed: draw call too big, can't be split");
1085 return SI_PRIM_DISCARD_DISABLED
;
1088 unsigned num_subdraws
= DIV_ROUND_UP(num_prims
, SPLIT_PRIMS_PACKET_LEVEL
);
1089 unsigned need_compute_dw
= 11 /* shader */ + 34 /* first draw */ +
1090 24 * (num_subdraws
- 1) + /* subdraws */
1091 20; /* leave some space at the end */
1092 unsigned need_gfx_dw
= si_get_minimum_num_gfx_cs_dwords(sctx
);
1094 if (sctx
->chip_class
<= GFX7
|| FORCE_REWIND_EMULATION
)
1095 need_gfx_dw
+= 9; /* NOP(2) + WAIT_REG_MEM(7), then chain */
1097 need_gfx_dw
+= num_subdraws
* 8; /* use REWIND(2) + DRAW(6) */
1100 (VERTEX_COUNTER_GDS_MODE
== 1 && sctx
->compute_gds_offset
+ 8 > GDS_SIZE_UNORDERED
) ||
1101 !sctx
->ws
->cs_check_space(gfx_cs
, need_gfx_dw
, false)) {
1102 /* If the current IB is empty but the size is too small, add a NOP
1103 * packet to force a flush and get a bigger IB.
1105 if (!radeon_emitted(gfx_cs
, sctx
->initial_gfx_cs_size
) &&
1106 gfx_cs
->current
.cdw
+ need_gfx_dw
> gfx_cs
->current
.max_dw
) {
1107 radeon_emit(gfx_cs
, PKT3(PKT3_NOP
, 0, 0));
1108 radeon_emit(gfx_cs
, 0);
1111 si_flush_gfx_cs(sctx
, RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW
, NULL
);
1114 /* The compute IB is always chained, but we need to call cs_check_space to add more space. */
1115 struct radeon_cmdbuf
*cs
= sctx
->prim_discard_compute_cs
;
1116 ASSERTED
bool compute_has_space
= sctx
->ws
->cs_check_space(cs
, need_compute_dw
, false);
1117 assert(compute_has_space
);
1118 assert(si_check_ring_space(sctx
, out_indexbuf_size
));
1119 return SI_PRIM_DISCARD_ENABLED
;
1122 void si_compute_signal_gfx(struct si_context
*sctx
)
1124 struct radeon_cmdbuf
*cs
= sctx
->prim_discard_compute_cs
;
1125 unsigned writeback_L2_flags
= 0;
1127 /* The writeback L2 flags vary with each chip generation. */
1128 /* CI needs to flush vertex indices to memory. */
1129 if (sctx
->chip_class
<= GFX7
)
1130 writeback_L2_flags
= EVENT_TC_WB_ACTION_ENA
;
1131 else if (sctx
->chip_class
== GFX8
&& VERTEX_COUNTER_GDS_MODE
== 0)
1132 writeback_L2_flags
= EVENT_TC_WB_ACTION_ENA
| EVENT_TC_NC_ACTION_ENA
;
1134 if (!sctx
->compute_num_prims_in_batch
)
1137 assert(sctx
->compute_rewind_va
);
1139 /* After the queued dispatches are done and vertex counts are written to
1140 * the gfx IB, signal the gfx IB to continue. CP doesn't wait for
1141 * the dispatches to finish, it only adds the CS_DONE event into the event
1144 si_cp_release_mem(sctx
, cs
, V_028A90_CS_DONE
, writeback_L2_flags
,
1145 sctx
->chip_class
<= GFX8
? EOP_DST_SEL_MEM
: EOP_DST_SEL_TC_L2
,
1146 writeback_L2_flags
? EOP_INT_SEL_SEND_DATA_AFTER_WR_CONFIRM
:
1148 EOP_DATA_SEL_VALUE_32BIT
,
1150 sctx
->compute_rewind_va
|
1151 ((uint64_t)sctx
->screen
->info
.address32_hi
<< 32),
1152 REWIND_SIGNAL_BIT
, /* signaling value for the REWIND packet */
1155 sctx
->compute_rewind_va
= 0;
1156 sctx
->compute_num_prims_in_batch
= 0;
1159 /* Dispatch a primitive discard compute shader. */
1160 void si_dispatch_prim_discard_cs_and_draw(struct si_context
*sctx
,
1161 const struct pipe_draw_info
*info
,
1162 unsigned index_size
,
1163 unsigned base_vertex
,
1164 uint64_t input_indexbuf_va
,
1165 unsigned input_indexbuf_num_elements
)
1167 struct radeon_cmdbuf
*gfx_cs
= sctx
->gfx_cs
;
1168 struct radeon_cmdbuf
*cs
= sctx
->prim_discard_compute_cs
;
1169 unsigned num_prims_per_instance
= u_decomposed_prims_for_vertices(info
->mode
, info
->count
);
1170 if (!num_prims_per_instance
)
1173 unsigned num_prims
= num_prims_per_instance
* info
->instance_count
;
1174 unsigned vertices_per_prim
, output_indexbuf_format
;
1176 switch (info
->mode
) {
1177 case PIPE_PRIM_TRIANGLES
:
1178 case PIPE_PRIM_TRIANGLE_STRIP
:
1179 case PIPE_PRIM_TRIANGLE_FAN
:
1180 vertices_per_prim
= 3;
1181 output_indexbuf_format
= V_008F0C_BUF_DATA_FORMAT_32_32_32
;
1184 unreachable("unsupported primitive type");
1188 unsigned out_indexbuf_offset
;
1189 uint64_t output_indexbuf_size
= num_prims
* vertices_per_prim
* 4;
1190 bool first_dispatch
= !sctx
->prim_discard_compute_ib_initialized
;
1192 /* Initialize the compute IB if it's empty. */
1193 if (!sctx
->prim_discard_compute_ib_initialized
) {
1194 /* 1) State initialization. */
1195 sctx
->compute_gds_offset
= 0;
1196 sctx
->compute_ib_last_shader
= NULL
;
1198 if (sctx
->last_ib_barrier_fence
) {
1199 assert(!sctx
->last_ib_barrier_buf
);
1200 sctx
->ws
->cs_add_fence_dependency(gfx_cs
,
1201 sctx
->last_ib_barrier_fence
,
1202 RADEON_DEPENDENCY_PARALLEL_COMPUTE_ONLY
);
1205 /* 2) IB initialization. */
1207 /* This needs to be done at the beginning of IBs due to possible
1208 * TTM buffer moves in the kernel.
1210 * TODO: update for GFX10
1212 si_emit_surface_sync(sctx
, cs
,
1213 S_0085F0_TC_ACTION_ENA(1) |
1214 S_0085F0_TCL1_ACTION_ENA(1) |
1215 S_0301F0_TC_WB_ACTION_ENA(sctx
->chip_class
>= GFX8
) |
1216 S_0085F0_SH_ICACHE_ACTION_ENA(1) |
1217 S_0085F0_SH_KCACHE_ACTION_ENA(1));
1219 /* Restore the GDS prim restart counter if needed. */
1220 if (sctx
->preserve_prim_restart_gds_at_flush
) {
1221 si_cp_copy_data(sctx
, cs
,
1222 COPY_DATA_GDS
, NULL
, 4,
1223 COPY_DATA_SRC_MEM
, sctx
->wait_mem_scratch
, 4);
1226 si_emit_initial_compute_regs(sctx
, cs
);
1228 radeon_set_sh_reg(cs
, R_00B860_COMPUTE_TMPRING_SIZE
,
1229 S_00B860_WAVES(sctx
->scratch_waves
) |
1230 S_00B860_WAVESIZE(0)); /* no scratch */
1232 /* Only 1D grids are launched. */
1233 radeon_set_sh_reg_seq(cs
, R_00B820_COMPUTE_NUM_THREAD_Y
, 2);
1234 radeon_emit(cs
, S_00B820_NUM_THREAD_FULL(1) |
1235 S_00B820_NUM_THREAD_PARTIAL(1));
1236 radeon_emit(cs
, S_00B824_NUM_THREAD_FULL(1) |
1237 S_00B824_NUM_THREAD_PARTIAL(1));
1239 radeon_set_sh_reg_seq(cs
, R_00B814_COMPUTE_START_Y
, 2);
1243 /* Disable ordered alloc for OA resources. */
1244 for (unsigned i
= 0; i
< 2; i
++) {
1245 radeon_set_uconfig_reg_seq(cs
, R_031074_GDS_OA_CNTL
, 3);
1246 radeon_emit(cs
, S_031074_INDEX(i
));
1248 radeon_emit(cs
, S_03107C_ENABLE(0));
1251 if (sctx
->last_ib_barrier_buf
) {
1252 assert(!sctx
->last_ib_barrier_fence
);
1253 radeon_add_to_buffer_list(sctx
, gfx_cs
, sctx
->last_ib_barrier_buf
,
1254 RADEON_USAGE_READ
, RADEON_PRIO_FENCE
);
1255 si_cp_wait_mem(sctx
, cs
,
1256 sctx
->last_ib_barrier_buf
->gpu_address
+
1257 sctx
->last_ib_barrier_buf_offset
, 1, 1,
1258 WAIT_REG_MEM_EQUAL
);
1261 sctx
->prim_discard_compute_ib_initialized
= true;
1264 /* Allocate the output index buffer. */
1265 output_indexbuf_size
= align(output_indexbuf_size
,
1266 sctx
->screen
->info
.tcc_cache_line_size
);
1267 assert(sctx
->index_ring_offset
+ output_indexbuf_size
<= sctx
->index_ring_size_per_ib
);
1268 out_indexbuf_offset
= sctx
->index_ring_base
+ sctx
->index_ring_offset
;
1269 sctx
->index_ring_offset
+= output_indexbuf_size
;
1271 radeon_add_to_buffer_list(sctx
, gfx_cs
, sctx
->index_ring
, RADEON_USAGE_READWRITE
,
1272 RADEON_PRIO_SHADER_RW_BUFFER
);
1273 uint64_t out_indexbuf_va
= sctx
->index_ring
->gpu_address
+ out_indexbuf_offset
;
1275 /* Prepare index buffer descriptors. */
1276 struct si_resource
*indexbuf_desc
= NULL
;
1277 unsigned indexbuf_desc_offset
;
1278 unsigned desc_size
= 12 * 4;
1281 u_upload_alloc(sctx
->b
.const_uploader
, 0, desc_size
,
1282 si_optimal_tcc_alignment(sctx
, desc_size
),
1283 &indexbuf_desc_offset
, (struct pipe_resource
**)&indexbuf_desc
,
1285 radeon_add_to_buffer_list(sctx
, gfx_cs
, indexbuf_desc
, RADEON_USAGE_READ
,
1286 RADEON_PRIO_DESCRIPTORS
);
1288 /* Input index buffer. */
1289 desc
[0] = input_indexbuf_va
;
1290 desc
[1] = S_008F04_BASE_ADDRESS_HI(input_indexbuf_va
>> 32) |
1291 S_008F04_STRIDE(index_size
);
1292 desc
[2] = input_indexbuf_num_elements
* (sctx
->chip_class
== GFX8
? index_size
: 1);
1293 desc
[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X
) |
1294 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_UINT
) |
1295 S_008F0C_DATA_FORMAT(index_size
== 1 ? V_008F0C_BUF_DATA_FORMAT_8
:
1296 index_size
== 2 ? V_008F0C_BUF_DATA_FORMAT_16
:
1297 V_008F0C_BUF_DATA_FORMAT_32
);
1299 /* Output index buffer. */
1300 desc
[4] = out_indexbuf_va
;
1301 desc
[5] = S_008F04_BASE_ADDRESS_HI(out_indexbuf_va
>> 32) |
1302 S_008F04_STRIDE(vertices_per_prim
* 4);
1303 desc
[6] = num_prims
* (sctx
->chip_class
== GFX8
? vertices_per_prim
* 4 : 1);
1304 desc
[7] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X
) |
1305 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y
) |
1306 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z
) |
1307 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_0
) |
1308 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_UINT
) |
1309 S_008F0C_DATA_FORMAT(output_indexbuf_format
);
1311 /* Viewport state. */
1312 struct si_small_prim_cull_info cull_info
;
1313 si_get_small_prim_cull_info(sctx
, &cull_info
);
1315 desc
[8] = fui(cull_info
.scale
[0]);
1316 desc
[9] = fui(cull_info
.scale
[1]);
1317 desc
[10] = fui(cull_info
.translate
[0]);
1318 desc
[11] = fui(cull_info
.translate
[1]);
1320 /* Better subpixel precision increases the efficiency of small
1321 * primitive culling. */
1322 unsigned num_samples
= sctx
->framebuffer
.nr_samples
;
1323 unsigned quant_mode
= sctx
->viewports
.as_scissor
[0].quant_mode
;
1324 float small_prim_cull_precision
;
1326 if (quant_mode
== SI_QUANT_MODE_12_12_FIXED_POINT_1_4096TH
)
1327 small_prim_cull_precision
= num_samples
/ 4096.0;
1328 else if (quant_mode
== SI_QUANT_MODE_14_10_FIXED_POINT_1_1024TH
)
1329 small_prim_cull_precision
= num_samples
/ 1024.0;
1331 small_prim_cull_precision
= num_samples
/ 256.0;
1333 /* Set user data SGPRs. */
1334 /* This can't be greater than 14 if we want the fastest launch rate. */
1335 unsigned user_sgprs
= 13;
1337 uint64_t index_buffers_va
= indexbuf_desc
->gpu_address
+ indexbuf_desc_offset
;
1338 unsigned vs_const_desc
= si_const_and_shader_buffer_descriptors_idx(PIPE_SHADER_VERTEX
);
1339 unsigned vs_sampler_desc
= si_sampler_and_image_descriptors_idx(PIPE_SHADER_VERTEX
);
1340 uint64_t vs_const_desc_va
= sctx
->descriptors
[vs_const_desc
].gpu_address
;
1341 uint64_t vs_sampler_desc_va
= sctx
->descriptors
[vs_sampler_desc
].gpu_address
;
1342 uint64_t vb_desc_va
= sctx
->vb_descriptors_buffer
?
1343 sctx
->vb_descriptors_buffer
->gpu_address
+
1344 sctx
->vb_descriptors_offset
: 0;
1345 unsigned gds_offset
, gds_size
;
1346 struct si_fast_udiv_info32 num_prims_udiv
= {};
1348 if (info
->instance_count
> 1)
1349 num_prims_udiv
= si_compute_fast_udiv_info32(num_prims_per_instance
, 31);
1351 /* Limitations on how these two are packed in the user SGPR. */
1352 assert(num_prims_udiv
.post_shift
< 32);
1353 assert(num_prims_per_instance
< 1 << 27);
1355 si_resource_reference(&indexbuf_desc
, NULL
);
1357 bool primitive_restart
= sctx
->cs_prim_discard_state
.current
->key
.opt
.cs_primitive_restart
;
1359 if (VERTEX_COUNTER_GDS_MODE
== 1) {
1360 gds_offset
= sctx
->compute_gds_offset
;
1361 gds_size
= primitive_restart
? 8 : 4;
1362 sctx
->compute_gds_offset
+= gds_size
;
1364 /* Reset the counters in GDS for the first dispatch using WRITE_DATA.
1365 * The remainder of the GDS will be cleared after the dispatch packet
1366 * in parallel with compute shaders.
1368 if (first_dispatch
) {
1369 radeon_emit(cs
, PKT3(PKT3_WRITE_DATA
, 2 + gds_size
/4, 0));
1370 radeon_emit(cs
, S_370_DST_SEL(V_370_GDS
) | S_370_WR_CONFIRM(1));
1371 radeon_emit(cs
, gds_offset
);
1373 radeon_emit(cs
, 0); /* value to write */
1379 /* Set shader registers. */
1380 struct si_shader
*shader
= sctx
->cs_prim_discard_state
.current
;
1382 if (shader
!= sctx
->compute_ib_last_shader
) {
1383 radeon_add_to_buffer_list(sctx
, gfx_cs
, shader
->bo
, RADEON_USAGE_READ
,
1384 RADEON_PRIO_SHADER_BINARY
);
1385 uint64_t shader_va
= shader
->bo
->gpu_address
;
1387 assert(shader
->config
.scratch_bytes_per_wave
== 0);
1388 assert(shader
->config
.num_vgprs
* WAVES_PER_TG
<= 256 * 4);
1390 radeon_set_sh_reg_seq(cs
, R_00B830_COMPUTE_PGM_LO
, 2);
1391 radeon_emit(cs
, shader_va
>> 8);
1392 radeon_emit(cs
, S_00B834_DATA(shader_va
>> 40));
1394 radeon_set_sh_reg_seq(cs
, R_00B848_COMPUTE_PGM_RSRC1
, 2);
1395 radeon_emit(cs
, S_00B848_VGPRS((shader
->config
.num_vgprs
- 1) / 4) |
1396 S_00B848_SGPRS((shader
->config
.num_sgprs
- 1) / 8) |
1397 S_00B848_FLOAT_MODE(shader
->config
.float_mode
) |
1398 S_00B848_DX10_CLAMP(1));
1399 radeon_emit(cs
, S_00B84C_SCRATCH_EN(0 /* no scratch */) |
1400 S_00B84C_USER_SGPR(user_sgprs
) |
1401 S_00B84C_TGID_X_EN(1 /* only blockID.x is used */) |
1402 S_00B84C_TG_SIZE_EN(VERTEX_COUNTER_GDS_MODE
== 2 /* need the wave ID */) |
1403 S_00B84C_TIDIG_COMP_CNT(0 /* only threadID.x is used */) |
1404 S_00B84C_LDS_SIZE(shader
->config
.lds_size
));
1406 radeon_set_sh_reg(cs
, R_00B854_COMPUTE_RESOURCE_LIMITS
,
1407 ac_get_compute_resource_limits(&sctx
->screen
->info
,
1410 THREADGROUPS_PER_CU
));
1411 sctx
->compute_ib_last_shader
= shader
;
1414 STATIC_ASSERT(SPLIT_PRIMS_PACKET_LEVEL
% THREADGROUP_SIZE
== 0);
1416 /* Big draw calls are split into smaller dispatches and draw packets. */
1417 for (unsigned start_prim
= 0; start_prim
< num_prims
; start_prim
+= SPLIT_PRIMS_PACKET_LEVEL
) {
1418 unsigned num_subdraw_prims
;
1420 if (start_prim
+ SPLIT_PRIMS_PACKET_LEVEL
< num_prims
)
1421 num_subdraw_prims
= SPLIT_PRIMS_PACKET_LEVEL
;
1423 num_subdraw_prims
= num_prims
- start_prim
;
1425 /* Small dispatches are executed back to back until a specific primitive
1426 * count is reached. Then, a CS_DONE is inserted to signal the gfx IB
1427 * to start drawing the batch. This batching adds latency to the gfx IB,
1428 * but CS_DONE and REWIND are too slow.
1430 if (sctx
->compute_num_prims_in_batch
+ num_subdraw_prims
> PRIMS_PER_BATCH
)
1431 si_compute_signal_gfx(sctx
);
1433 if (sctx
->compute_num_prims_in_batch
== 0) {
1434 assert((gfx_cs
->gpu_address
>> 32) == sctx
->screen
->info
.address32_hi
);
1435 sctx
->compute_rewind_va
= gfx_cs
->gpu_address
+ (gfx_cs
->current
.cdw
+ 1) * 4;
1437 if (sctx
->chip_class
<= GFX7
|| FORCE_REWIND_EMULATION
) {
1438 radeon_emit(gfx_cs
, PKT3(PKT3_NOP
, 0, 0));
1439 radeon_emit(gfx_cs
, 0);
1441 si_cp_wait_mem(sctx
, gfx_cs
,
1442 sctx
->compute_rewind_va
|
1443 (uint64_t)sctx
->screen
->info
.address32_hi
<< 32,
1444 REWIND_SIGNAL_BIT
, REWIND_SIGNAL_BIT
,
1445 WAIT_REG_MEM_EQUAL
| WAIT_REG_MEM_PFP
);
1447 /* Use INDIRECT_BUFFER to chain to a different buffer
1448 * to discard the CP prefetch cache.
1450 sctx
->ws
->cs_check_space(gfx_cs
, 0, true);
1452 radeon_emit(gfx_cs
, PKT3(PKT3_REWIND
, 0, 0));
1453 radeon_emit(gfx_cs
, 0);
1457 sctx
->compute_num_prims_in_batch
+= num_subdraw_prims
;
1459 uint32_t count_va
= gfx_cs
->gpu_address
+ (gfx_cs
->current
.cdw
+ 4) * 4;
1460 uint64_t index_va
= out_indexbuf_va
+ start_prim
* 12;
1462 /* Emit the draw packet into the gfx IB. */
1463 radeon_emit(gfx_cs
, PKT3(PKT3_DRAW_INDEX_2
, 4, 0));
1464 radeon_emit(gfx_cs
, num_prims
* vertices_per_prim
);
1465 radeon_emit(gfx_cs
, index_va
);
1466 radeon_emit(gfx_cs
, index_va
>> 32);
1467 radeon_emit(gfx_cs
, 0);
1468 radeon_emit(gfx_cs
, V_0287F0_DI_SRC_SEL_DMA
);
1470 /* Continue with the compute IB. */
1471 if (start_prim
== 0) {
1472 uint32_t gds_prim_restart_continue_bit
= 0;
1474 if (sctx
->preserve_prim_restart_gds_at_flush
) {
1475 assert(primitive_restart
&&
1476 info
->mode
== PIPE_PRIM_TRIANGLE_STRIP
);
1477 assert(start_prim
< 1 << 31);
1478 gds_prim_restart_continue_bit
= 1 << 31;
1481 radeon_set_sh_reg_seq(cs
, R_00B900_COMPUTE_USER_DATA_0
, user_sgprs
);
1482 radeon_emit(cs
, index_buffers_va
);
1484 VERTEX_COUNTER_GDS_MODE
== 0 ? count_va
:
1485 VERTEX_COUNTER_GDS_MODE
== 1 ? gds_offset
:
1487 gds_prim_restart_continue_bit
);
1488 radeon_emit(cs
, start_prim
+ num_subdraw_prims
- 1);
1489 radeon_emit(cs
, count_va
);
1490 radeon_emit(cs
, vb_desc_va
);
1491 radeon_emit(cs
, vs_const_desc_va
);
1492 radeon_emit(cs
, vs_sampler_desc_va
);
1493 radeon_emit(cs
, base_vertex
);
1494 radeon_emit(cs
, info
->start_instance
);
1495 radeon_emit(cs
, num_prims_udiv
.multiplier
);
1496 radeon_emit(cs
, num_prims_udiv
.post_shift
|
1497 (num_prims_per_instance
<< 5));
1498 radeon_emit(cs
, info
->restart_index
);
1499 /* small-prim culling precision (same as rasterizer precision = QUANT_MODE) */
1500 radeon_emit(cs
, fui(small_prim_cull_precision
));
1502 assert(VERTEX_COUNTER_GDS_MODE
== 2);
1503 /* Only update the SGPRs that changed. */
1504 radeon_set_sh_reg_seq(cs
, R_00B904_COMPUTE_USER_DATA_1
, 3);
1505 radeon_emit(cs
, start_prim
);
1506 radeon_emit(cs
, start_prim
+ num_subdraw_prims
- 1);
1507 radeon_emit(cs
, count_va
);
1510 /* Set grid dimensions. */
1511 unsigned start_block
= start_prim
/ THREADGROUP_SIZE
;
1512 unsigned num_full_blocks
= num_subdraw_prims
/ THREADGROUP_SIZE
;
1513 unsigned partial_block_size
= num_subdraw_prims
% THREADGROUP_SIZE
;
1515 radeon_set_sh_reg(cs
, R_00B810_COMPUTE_START_X
, start_block
);
1516 radeon_set_sh_reg(cs
, R_00B81C_COMPUTE_NUM_THREAD_X
,
1517 S_00B81C_NUM_THREAD_FULL(THREADGROUP_SIZE
) |
1518 S_00B81C_NUM_THREAD_PARTIAL(partial_block_size
));
1520 radeon_emit(cs
, PKT3(PKT3_DISPATCH_DIRECT
, 3, 0) |
1521 PKT3_SHADER_TYPE_S(1));
1522 radeon_emit(cs
, start_block
+ num_full_blocks
+ !!partial_block_size
);
1525 radeon_emit(cs
, S_00B800_COMPUTE_SHADER_EN(1) |
1526 S_00B800_PARTIAL_TG_EN(!!partial_block_size
) |
1527 S_00B800_ORDERED_APPEND_ENBL(VERTEX_COUNTER_GDS_MODE
== 2) |
1528 S_00B800_ORDER_MODE(0 /* launch in order */));
1530 /* This is only for unordered append. Ordered append writes this from
1533 * Note that EOP and EOS events are super slow, so emulating the event
1534 * in a shader is an important optimization.
1536 if (VERTEX_COUNTER_GDS_MODE
== 1) {
1537 si_cp_release_mem(sctx
, cs
, V_028A90_CS_DONE
, 0,
1538 sctx
->chip_class
<= GFX8
? EOP_DST_SEL_MEM
: EOP_DST_SEL_TC_L2
,
1542 count_va
| ((uint64_t)sctx
->screen
->info
.address32_hi
<< 32),
1543 EOP_DATA_GDS(gds_offset
/ 4, 1),
1546 /* Now that compute shaders are running, clear the remainder of GDS. */
1547 if (first_dispatch
) {
1548 unsigned offset
= gds_offset
+ gds_size
;
1549 si_cp_dma_clear_buffer(sctx
, cs
, NULL
, offset
,
1550 GDS_SIZE_UNORDERED
- offset
,
1552 SI_CPDMA_SKIP_CHECK_CS_SPACE
|
1553 SI_CPDMA_SKIP_GFX_SYNC
|
1554 SI_CPDMA_SKIP_SYNC_BEFORE
,
1555 SI_COHERENCY_NONE
, L2_BYPASS
);
1558 first_dispatch
= false;
1560 assert(cs
->current
.cdw
<= cs
->current
.max_dw
);
1561 assert(gfx_cs
->current
.cdw
<= gfx_cs
->current
.max_dw
);