2 * Copyright 2019 Advanced Micro Devices, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 #include "ac_llvm_cull.h"
27 #include "si_build_pm4.h"
29 #include "si_shader_internal.h"
31 #include "util/fast_idiv_by_const.h"
32 #include "util/u_prim.h"
33 #include "util/u_suballoc.h"
34 #include "util/u_upload_mgr.h"
37 * https://frostbite-wp-prd.s3.amazonaws.com/wp-content/uploads/2016/03/29204330/GDC_2016_Compute.pdf
40 /* This file implements primitive culling using asynchronous compute.
41 * It's written to be GL conformant.
43 * It takes a monolithic VS in LLVM IR returning gl_Position and invokes it
44 * in a compute shader. The shader processes 1 primitive/thread by invoking
45 * the VS for each vertex to get the positions, decomposes strips and fans
46 * into triangles (if needed), eliminates primitive restart (if needed),
47 * does (W<0) culling, face culling, view XY culling, zero-area and
48 * small-primitive culling, and generates a new index buffer that doesn't
49 * contain culled primitives.
51 * The index buffer is generated using the Ordered Count feature of GDS,
52 * which is an atomic counter that is incremented in the wavefront launch
53 * order, so that the original primitive order is preserved.
55 * Another GDS ordered counter is used to eliminate primitive restart indices.
56 * If a restart index lands on an even thread ID, the compute shader has to flip
57 * the primitive orientation of the whole following triangle strip. The primitive
58 * orientation has to be correct after strip and fan decomposition for two-sided
59 * shading to behave correctly. The decomposition also needs to be aware of
60 * which vertex is the provoking vertex for flat shading to behave correctly.
62 * IB = a GPU command buffer
64 * Both the compute and gfx IBs run in parallel sort of like CE and DE.
65 * The gfx IB has a CP barrier (REWIND packet) before a draw packet. REWIND
66 * doesn't continue if its word isn't 0x80000000. Once compute shaders are
67 * finished culling, the last wave will write the final primitive count from
68 * GDS directly into the count word of the draw packet in the gfx IB, and
69 * a CS_DONE event will signal the REWIND packet to continue. It's really
70 * a direct draw with command buffer patching from the compute queue.
72 * The compute IB doesn't have to start when its corresponding gfx IB starts,
73 * but can start sooner. The compute IB is signaled to start after the last
74 * execution barrier in the *previous* gfx IB. This is handled as follows.
75 * The kernel GPU scheduler starts the compute IB after the previous gfx IB has
76 * started. The compute IB then waits (WAIT_REG_MEM) for a mid-IB fence that
77 * represents the barrier in the previous gfx IB.
80 * - Triangle strips and fans are decomposed into an indexed triangle list.
81 * The decomposition differs based on the provoking vertex state.
82 * - Instanced draws are converted into non-instanced draws for 16-bit indices.
83 * (InstanceID is stored in the high bits of VertexID and unpacked by VS)
84 * - Primitive restart is fully supported with triangle strips, including
85 * correct primitive orientation across multiple waves. (restart indices
86 * reset primitive orientation)
87 * - W<0 culling (W<0 is behind the viewer, sort of like near Z culling).
88 * - Back face culling, incl. culling zero-area / degenerate primitives.
90 * - View Z culling (disabled due to limited impact with perspective projection).
91 * - Small primitive culling for all MSAA modes and all quant modes.
93 * The following are not implemented:
94 * - ClipVertex/ClipDistance/CullDistance-based culling.
98 * Limitations (and unimplemented features that may be possible to implement):
99 * - Only triangles, triangle strips, and triangle fans are supported.
100 * - Primitive restart is only supported with triangle strips.
101 * - Instancing and primitive restart can't be used together.
102 * - Instancing is only supported with 16-bit indices and instance count <= 2^16.
103 * - The instance divisor buffer is unavailable, so all divisors must be
105 * - Multidraws where the vertex shader reads gl_DrawID are unsupported.
106 * - No support for tessellation and geometry shaders.
107 * (patch elimination where tess factors are 0 would be possible to implement)
108 * - The vertex shader must not contain memory stores.
109 * - All VS resources must not have a write usage in the command buffer.
110 * - Bindless textures and images must not occur in the vertex shader.
112 * User data SGPR layout:
113 * INDEX_BUFFERS: pointer to constants
114 * 0..3: input index buffer - typed buffer view
115 * 4..7: output index buffer - typed buffer view
116 * 8..11: viewport state - scale.xy, translate.xy
117 * VERTEX_COUNTER: counter address or first primitive ID
118 * - If unordered memory counter: address of "count" in the draw packet
119 * and is incremented atomically by the shader.
120 * - If unordered GDS counter: address of "count" in GDS starting from 0,
121 * must be initialized to 0 before the dispatch.
122 * - If ordered GDS counter: the primitive ID that should reset the vertex
123 * counter to 0 in GDS
124 * LAST_WAVE_PRIM_ID: the primitive ID that should write the final vertex
125 * count to memory if using GDS ordered append
126 * VERTEX_COUNT_ADDR: where the last wave should write the vertex count if
127 * using GDS ordered append
128 * VS.VERTEX_BUFFERS: same value as VS
129 * VS.CONST_AND_SHADER_BUFFERS: same value as VS
130 * VS.SAMPLERS_AND_IMAGES: same value as VS
131 * VS.BASE_VERTEX: same value as VS
132 * VS.START_INSTANCE: same value as VS
133 * NUM_PRIMS_UDIV_MULTIPLIER: For fast 31-bit division by the number of primitives
134 * per instance for instancing.
135 * NUM_PRIMS_UDIV_TERMS:
136 * - Bits [0:4]: "post_shift" for fast 31-bit division for instancing.
137 * - Bits [5:31]: The number of primitives per instance for computing the remainder.
138 * PRIMITIVE_RESTART_INDEX
139 * SMALL_PRIM_CULLING_PRECISION: Scale the primitive bounding box by this number.
142 * The code contains 3 codepaths:
143 * - Unordered memory counter (for debugging, random primitive order, no primitive restart)
144 * - Unordered GDS counter (for debugging, random primitive order, no primitive restart)
145 * - Ordered GDS counter (it preserves the primitive order)
147 * How to test primitive restart (the most complicated part because it needs
148 * to get the primitive orientation right):
149 * Set THREADGROUP_SIZE to 2 to exercise both intra-wave and inter-wave
150 * primitive orientation flips with small draw calls, which is what most tests use.
151 * You can also enable draw call splitting into draw calls with just 2 primitives.
154 /* At least 256 is needed for the fastest wave launch rate from compute queues
155 * due to hw constraints. Nothing in the code needs more than 1 wave/threadgroup. */
156 #define THREADGROUP_SIZE 256 /* high numbers limit available VGPRs */
157 #define THREADGROUPS_PER_CU 1 /* TGs to launch on 1 CU before going onto the next, max 8 */
158 #define MAX_WAVES_PER_SH 0 /* no limit */
159 #define INDEX_STORES_USE_SLC 1 /* don't cache indices if L2 is full */
160 /* Don't cull Z. We already do (W < 0) culling for primitives behind the viewer. */
162 /* 0 = unordered memory counter, 1 = unordered GDS counter, 2 = ordered GDS counter */
163 #define VERTEX_COUNTER_GDS_MODE 2
164 #define GDS_SIZE_UNORDERED (4 * 1024) /* only for the unordered GDS counter */
166 /* Grouping compute dispatches for small draw calls: How many primitives from multiple
167 * draw calls to process by compute before signaling the gfx IB. This reduces the number
168 * of EOP events + REWIND packets, because they decrease performance. */
169 #define PRIMS_PER_BATCH (512 * 1024)
170 /* Draw call splitting at the packet level. This allows signaling the gfx IB
171 * for big draw calls sooner, but doesn't allow context flushes between packets.
172 * Primitive restart is supported. Only implemented for ordered append. */
173 #define SPLIT_PRIMS_PACKET_LEVEL_VALUE PRIMS_PER_BATCH
174 /* If there is not enough ring buffer space for the current IB, split draw calls into
175 * this number of primitives, so that we can flush the context and get free ring space. */
176 #define SPLIT_PRIMS_DRAW_LEVEL PRIMS_PER_BATCH
178 /* Derived values. */
179 #define WAVES_PER_TG DIV_ROUND_UP(THREADGROUP_SIZE, 64)
180 #define SPLIT_PRIMS_PACKET_LEVEL \
181 (VERTEX_COUNTER_GDS_MODE == 2 ? SPLIT_PRIMS_PACKET_LEVEL_VALUE \
182 : UINT_MAX & ~(THREADGROUP_SIZE - 1))
184 #define REWIND_SIGNAL_BIT 0x80000000
185 /* For emulating the rewind packet on CI. */
186 #define FORCE_REWIND_EMULATION 0
188 void si_initialize_prim_discard_tunables(struct si_screen
*sscreen
, bool is_aux_context
,
189 unsigned *prim_discard_vertex_count_threshold
,
190 unsigned *index_ring_size_per_ib
)
192 *prim_discard_vertex_count_threshold
= UINT_MAX
; /* disable */
194 if (sscreen
->info
.chip_class
== GFX6
|| /* SI support is not implemented */
195 !sscreen
->info
.has_gds_ordered_append
|| sscreen
->debug_flags
& DBG(NO_PD
) || is_aux_context
)
198 /* TODO: enable this after the GDS kernel memory management is fixed */
199 bool enable_on_pro_graphics_by_default
= false;
201 if (sscreen
->debug_flags
& DBG(ALWAYS_PD
) || sscreen
->debug_flags
& DBG(PD
) ||
202 (enable_on_pro_graphics_by_default
&& sscreen
->info
.is_pro_graphics
&&
203 (sscreen
->info
.family
== CHIP_BONAIRE
|| sscreen
->info
.family
== CHIP_HAWAII
||
204 sscreen
->info
.family
== CHIP_TONGA
|| sscreen
->info
.family
== CHIP_FIJI
||
205 sscreen
->info
.family
== CHIP_POLARIS10
|| sscreen
->info
.family
== CHIP_POLARIS11
||
206 sscreen
->info
.family
== CHIP_VEGA10
|| sscreen
->info
.family
== CHIP_VEGA20
))) {
207 *prim_discard_vertex_count_threshold
= 6000 * 3; /* 6K triangles */
209 if (sscreen
->debug_flags
& DBG(ALWAYS_PD
))
210 *prim_discard_vertex_count_threshold
= 0; /* always enable */
212 const uint32_t MB
= 1024 * 1024;
213 const uint64_t GB
= 1024 * 1024 * 1024;
215 /* The total size is double this per context.
216 * Greater numbers allow bigger gfx IBs.
218 if (sscreen
->info
.vram_size
<= 2 * GB
)
219 *index_ring_size_per_ib
= 64 * MB
;
220 else if (sscreen
->info
.vram_size
<= 4 * GB
)
221 *index_ring_size_per_ib
= 128 * MB
;
223 *index_ring_size_per_ib
= 256 * MB
;
227 /* Opcode can be "add" or "swap". */
228 static LLVMValueRef
si_build_ds_ordered_op(struct si_shader_context
*ctx
, const char *opcode
,
229 LLVMValueRef m0
, LLVMValueRef value
,
230 unsigned ordered_count_index
, bool release
, bool done
)
232 if (ctx
->screen
->info
.chip_class
>= GFX10
)
233 ordered_count_index
|= 1 << 24; /* number of dwords == 1 */
235 LLVMValueRef args
[] = {
236 LLVMBuildIntToPtr(ctx
->ac
.builder
, m0
, LLVMPointerType(ctx
->ac
.i32
, AC_ADDR_SPACE_GDS
), ""),
238 LLVMConstInt(ctx
->ac
.i32
, LLVMAtomicOrderingMonotonic
, 0), /* ordering */
239 ctx
->ac
.i32_0
, /* scope */
240 ctx
->ac
.i1false
, /* volatile */
241 LLVMConstInt(ctx
->ac
.i32
, ordered_count_index
, 0),
242 LLVMConstInt(ctx
->ac
.i1
, release
, 0),
243 LLVMConstInt(ctx
->ac
.i1
, done
, 0),
247 snprintf(intrinsic
, sizeof(intrinsic
), "llvm.amdgcn.ds.ordered.%s", opcode
);
248 return ac_build_intrinsic(&ctx
->ac
, intrinsic
, ctx
->ac
.i32
, args
, ARRAY_SIZE(args
), 0);
251 static LLVMValueRef
si_expand_32bit_pointer(struct si_shader_context
*ctx
, LLVMValueRef ptr
)
253 uint64_t hi
= (uint64_t)ctx
->screen
->info
.address32_hi
<< 32;
254 ptr
= LLVMBuildZExt(ctx
->ac
.builder
, ptr
, ctx
->ac
.i64
, "");
255 ptr
= LLVMBuildOr(ctx
->ac
.builder
, ptr
, LLVMConstInt(ctx
->ac
.i64
, hi
, 0), "");
256 return LLVMBuildIntToPtr(ctx
->ac
.builder
, ptr
,
257 LLVMPointerType(ctx
->ac
.i32
, AC_ADDR_SPACE_GLOBAL
), "");
260 struct si_thread0_section
{
261 struct si_shader_context
*ctx
;
262 LLVMValueRef vgpr_result
; /* a VGPR for the value on thread 0. */
263 LLVMValueRef saved_exec
;
266 /* Enter a section that only executes on thread 0. */
267 static void si_enter_thread0_section(struct si_shader_context
*ctx
,
268 struct si_thread0_section
*section
, LLVMValueRef thread_id
)
271 section
->vgpr_result
= ac_build_alloca_undef(&ctx
->ac
, ctx
->ac
.i32
, "result0");
273 /* This IF has 4 instructions:
274 * v_and_b32_e32 v, 63, v ; get the thread ID
275 * v_cmp_eq_u32_e32 vcc, 0, v ; thread ID == 0
276 * s_and_saveexec_b64 s, vcc
277 * s_cbranch_execz BB0_4
279 * It could just be s_and_saveexec_b64 s, 1.
281 ac_build_ifcc(&ctx
->ac
, LLVMBuildICmp(ctx
->ac
.builder
, LLVMIntEQ
, thread_id
, ctx
->ac
.i32_0
, ""),
285 /* Exit a section that only executes on thread 0 and broadcast the result
287 static void si_exit_thread0_section(struct si_thread0_section
*section
, LLVMValueRef
*result
)
289 struct si_shader_context
*ctx
= section
->ctx
;
291 LLVMBuildStore(ctx
->ac
.builder
, *result
, section
->vgpr_result
);
293 ac_build_endif(&ctx
->ac
, 12601);
295 /* Broadcast the result from thread 0 to all threads. */
297 ac_build_readlane(&ctx
->ac
, LLVMBuildLoad(ctx
->ac
.builder
, section
->vgpr_result
, ""), NULL
);
300 void si_build_prim_discard_compute_shader(struct si_shader_context
*ctx
)
302 struct si_shader_key
*key
= &ctx
->shader
->key
;
303 LLVMBuilderRef builder
= ctx
->ac
.builder
;
304 LLVMValueRef vs
= ctx
->main_fn
;
306 /* Always inline the VS function. */
307 ac_add_function_attr(ctx
->ac
.context
, vs
, -1, AC_FUNC_ATTR_ALWAYSINLINE
);
308 LLVMSetLinkage(vs
, LLVMPrivateLinkage
);
310 enum ac_arg_type const_desc_type
;
311 if (ctx
->shader
->selector
->info
.const_buffers_declared
== 1 &&
312 ctx
->shader
->selector
->info
.shader_buffers_declared
== 0)
313 const_desc_type
= AC_ARG_CONST_FLOAT_PTR
;
315 const_desc_type
= AC_ARG_CONST_DESC_PTR
;
317 memset(&ctx
->args
, 0, sizeof(ctx
->args
));
319 struct ac_arg param_index_buffers_and_constants
, param_vertex_counter
;
320 struct ac_arg param_vb_desc
, param_const_desc
;
321 struct ac_arg param_base_vertex
, param_start_instance
;
322 struct ac_arg param_block_id
, param_local_id
, param_ordered_wave_id
;
323 struct ac_arg param_restart_index
, param_smallprim_precision
;
324 struct ac_arg param_num_prims_udiv_multiplier
, param_num_prims_udiv_terms
;
325 struct ac_arg param_sampler_desc
, param_last_wave_prim_id
, param_vertex_count_addr
;
327 ac_add_arg(&ctx
->args
, AC_ARG_SGPR
, 1, AC_ARG_CONST_DESC_PTR
,
328 ¶m_index_buffers_and_constants
);
329 ac_add_arg(&ctx
->args
, AC_ARG_SGPR
, 1, AC_ARG_INT
, ¶m_vertex_counter
);
330 ac_add_arg(&ctx
->args
, AC_ARG_SGPR
, 1, AC_ARG_INT
, ¶m_last_wave_prim_id
);
331 ac_add_arg(&ctx
->args
, AC_ARG_SGPR
, 1, AC_ARG_INT
, ¶m_vertex_count_addr
);
332 ac_add_arg(&ctx
->args
, AC_ARG_SGPR
, 1, AC_ARG_CONST_DESC_PTR
, ¶m_vb_desc
);
333 ac_add_arg(&ctx
->args
, AC_ARG_SGPR
, 1, const_desc_type
, ¶m_const_desc
);
334 ac_add_arg(&ctx
->args
, AC_ARG_SGPR
, 1, AC_ARG_CONST_IMAGE_PTR
, ¶m_sampler_desc
);
335 ac_add_arg(&ctx
->args
, AC_ARG_SGPR
, 1, AC_ARG_INT
, ¶m_base_vertex
);
336 ac_add_arg(&ctx
->args
, AC_ARG_SGPR
, 1, AC_ARG_INT
, ¶m_start_instance
);
337 ac_add_arg(&ctx
->args
, AC_ARG_SGPR
, 1, AC_ARG_INT
, ¶m_num_prims_udiv_multiplier
);
338 ac_add_arg(&ctx
->args
, AC_ARG_SGPR
, 1, AC_ARG_INT
, ¶m_num_prims_udiv_terms
);
339 ac_add_arg(&ctx
->args
, AC_ARG_SGPR
, 1, AC_ARG_INT
, ¶m_restart_index
);
340 ac_add_arg(&ctx
->args
, AC_ARG_SGPR
, 1, AC_ARG_FLOAT
, ¶m_smallprim_precision
);
342 /* Block ID and thread ID inputs. */
343 ac_add_arg(&ctx
->args
, AC_ARG_SGPR
, 1, AC_ARG_INT
, ¶m_block_id
);
344 if (VERTEX_COUNTER_GDS_MODE
== 2)
345 ac_add_arg(&ctx
->args
, AC_ARG_SGPR
, 1, AC_ARG_INT
, ¶m_ordered_wave_id
);
346 ac_add_arg(&ctx
->args
, AC_ARG_VGPR
, 1, AC_ARG_INT
, ¶m_local_id
);
348 /* Create the compute shader function. */
349 unsigned old_type
= ctx
->type
;
350 ctx
->type
= PIPE_SHADER_COMPUTE
;
351 si_llvm_create_func(ctx
, "prim_discard_cs", NULL
, 0, THREADGROUP_SIZE
);
352 ctx
->type
= old_type
;
354 if (VERTEX_COUNTER_GDS_MODE
== 2) {
355 ac_llvm_add_target_dep_function_attr(ctx
->main_fn
, "amdgpu-gds-size", 256);
356 } else if (VERTEX_COUNTER_GDS_MODE
== 1) {
357 ac_llvm_add_target_dep_function_attr(ctx
->main_fn
, "amdgpu-gds-size", GDS_SIZE_UNORDERED
);
360 /* Assemble parameters for VS. */
361 LLVMValueRef vs_params
[16];
362 unsigned num_vs_params
= 0;
363 unsigned param_vertex_id
, param_instance_id
;
365 vs_params
[num_vs_params
++] = LLVMGetUndef(LLVMTypeOf(LLVMGetParam(vs
, 0))); /* RW_BUFFERS */
366 vs_params
[num_vs_params
++] = LLVMGetUndef(LLVMTypeOf(LLVMGetParam(vs
, 1))); /* BINDLESS */
367 vs_params
[num_vs_params
++] = ac_get_arg(&ctx
->ac
, param_const_desc
);
368 vs_params
[num_vs_params
++] = ac_get_arg(&ctx
->ac
, param_sampler_desc
);
369 vs_params
[num_vs_params
++] =
370 LLVMConstInt(ctx
->ac
.i32
, S_VS_STATE_INDEXED(key
->opt
.cs_indexed
), 0);
371 vs_params
[num_vs_params
++] = ac_get_arg(&ctx
->ac
, param_base_vertex
);
372 vs_params
[num_vs_params
++] = ac_get_arg(&ctx
->ac
, param_start_instance
);
373 vs_params
[num_vs_params
++] = ctx
->ac
.i32_0
; /* DrawID */
374 vs_params
[num_vs_params
++] = ac_get_arg(&ctx
->ac
, param_vb_desc
);
376 vs_params
[(param_vertex_id
= num_vs_params
++)] = NULL
; /* VertexID */
377 vs_params
[(param_instance_id
= num_vs_params
++)] = NULL
; /* InstanceID */
378 vs_params
[num_vs_params
++] = ctx
->ac
.i32_0
; /* unused (PrimID) */
379 vs_params
[num_vs_params
++] = ctx
->ac
.i32_0
; /* unused */
381 assert(num_vs_params
<= ARRAY_SIZE(vs_params
));
382 assert(num_vs_params
== LLVMCountParamTypes(LLVMGetElementType(LLVMTypeOf(vs
))));
384 /* Load descriptors. (load 8 dwords at once) */
385 LLVMValueRef input_indexbuf
, output_indexbuf
, tmp
, desc
[8];
387 LLVMValueRef index_buffers_and_constants
=
388 ac_get_arg(&ctx
->ac
, param_index_buffers_and_constants
);
389 tmp
= LLVMBuildPointerCast(builder
, index_buffers_and_constants
,
390 ac_array_in_const32_addr_space(ctx
->ac
.v8i32
), "");
391 tmp
= ac_build_load_to_sgpr(&ctx
->ac
, tmp
, ctx
->ac
.i32_0
);
393 for (unsigned i
= 0; i
< 8; i
++)
394 desc
[i
] = ac_llvm_extract_elem(&ctx
->ac
, tmp
, i
);
396 input_indexbuf
= ac_build_gather_values(&ctx
->ac
, desc
, 4);
397 output_indexbuf
= ac_build_gather_values(&ctx
->ac
, desc
+ 4, 4);
399 /* Compute PrimID and InstanceID. */
400 LLVMValueRef global_thread_id
= ac_build_imad(&ctx
->ac
, ac_get_arg(&ctx
->ac
, param_block_id
),
401 LLVMConstInt(ctx
->ac
.i32
, THREADGROUP_SIZE
, 0),
402 ac_get_arg(&ctx
->ac
, param_local_id
));
403 LLVMValueRef prim_id
= global_thread_id
; /* PrimID within an instance */
404 LLVMValueRef instance_id
= ctx
->ac
.i32_0
;
406 if (key
->opt
.cs_instancing
) {
407 LLVMValueRef num_prims_udiv_terms
= ac_get_arg(&ctx
->ac
, param_num_prims_udiv_terms
);
408 LLVMValueRef num_prims_udiv_multiplier
=
409 ac_get_arg(&ctx
->ac
, param_num_prims_udiv_multiplier
);
410 /* Unpack num_prims_udiv_terms. */
411 LLVMValueRef post_shift
=
412 LLVMBuildAnd(builder
, num_prims_udiv_terms
, LLVMConstInt(ctx
->ac
.i32
, 0x1f, 0), "");
413 LLVMValueRef prims_per_instance
=
414 LLVMBuildLShr(builder
, num_prims_udiv_terms
, LLVMConstInt(ctx
->ac
.i32
, 5, 0), "");
415 /* Divide the total prim_id by the number of prims per instance. */
417 ac_build_fast_udiv_u31_d_not_one(&ctx
->ac
, prim_id
, num_prims_udiv_multiplier
, post_shift
);
418 /* Compute the remainder. */
419 prim_id
= LLVMBuildSub(builder
, prim_id
,
420 LLVMBuildMul(builder
, instance_id
, prims_per_instance
, ""), "");
423 /* Generate indices (like a non-indexed draw call). */
424 LLVMValueRef index
[4] = {NULL
, NULL
, NULL
, LLVMGetUndef(ctx
->ac
.i32
)};
425 unsigned vertices_per_prim
= 3;
427 switch (key
->opt
.cs_prim_type
) {
428 case PIPE_PRIM_TRIANGLES
:
429 for (unsigned i
= 0; i
< 3; i
++) {
430 index
[i
] = ac_build_imad(&ctx
->ac
, prim_id
, LLVMConstInt(ctx
->ac
.i32
, 3, 0),
431 LLVMConstInt(ctx
->ac
.i32
, i
, 0));
434 case PIPE_PRIM_TRIANGLE_STRIP
:
435 for (unsigned i
= 0; i
< 3; i
++) {
436 index
[i
] = LLVMBuildAdd(builder
, prim_id
, LLVMConstInt(ctx
->ac
.i32
, i
, 0), "");
439 case PIPE_PRIM_TRIANGLE_FAN
:
440 /* Vertex 1 is first and vertex 2 is last. This will go to the hw clipper
441 * and rasterizer as a normal triangle, so we need to put the provoking
442 * vertex into the correct index variable and preserve orientation at the same time.
443 * gl_VertexID is preserved, because it's equal to the index.
445 if (key
->opt
.cs_provoking_vertex_first
) {
446 index
[0] = LLVMBuildAdd(builder
, prim_id
, LLVMConstInt(ctx
->ac
.i32
, 1, 0), "");
447 index
[1] = LLVMBuildAdd(builder
, prim_id
, LLVMConstInt(ctx
->ac
.i32
, 2, 0), "");
448 index
[2] = ctx
->ac
.i32_0
;
450 index
[0] = ctx
->ac
.i32_0
;
451 index
[1] = LLVMBuildAdd(builder
, prim_id
, LLVMConstInt(ctx
->ac
.i32
, 1, 0), "");
452 index
[2] = LLVMBuildAdd(builder
, prim_id
, LLVMConstInt(ctx
->ac
.i32
, 2, 0), "");
456 unreachable("unexpected primitive type");
460 if (key
->opt
.cs_indexed
) {
461 for (unsigned i
= 0; i
< 3; i
++) {
462 index
[i
] = ac_build_buffer_load_format(&ctx
->ac
, input_indexbuf
, index
[i
], ctx
->ac
.i32_0
,
464 index
[i
] = ac_to_integer(&ctx
->ac
, index
[i
]);
468 LLVMValueRef ordered_wave_id
= NULL
;
470 /* Extract the ordered wave ID. */
471 if (VERTEX_COUNTER_GDS_MODE
== 2) {
472 ordered_wave_id
= ac_get_arg(&ctx
->ac
, param_ordered_wave_id
);
474 LLVMBuildLShr(builder
, ordered_wave_id
, LLVMConstInt(ctx
->ac
.i32
, 6, 0), "");
476 LLVMBuildAnd(builder
, ordered_wave_id
, LLVMConstInt(ctx
->ac
.i32
, 0xfff, 0), "");
478 LLVMValueRef thread_id
= LLVMBuildAnd(builder
, ac_get_arg(&ctx
->ac
, param_local_id
),
479 LLVMConstInt(ctx
->ac
.i32
, 63, 0), "");
481 /* Every other triangle in a strip has a reversed vertex order, so we
482 * need to swap vertices of odd primitives to get the correct primitive
483 * orientation when converting triangle strips to triangles. Primitive
484 * restart complicates it, because a strip can start anywhere.
486 LLVMValueRef prim_restart_accepted
= ctx
->ac
.i1true
;
487 LLVMValueRef vertex_counter
= ac_get_arg(&ctx
->ac
, param_vertex_counter
);
489 if (key
->opt
.cs_prim_type
== PIPE_PRIM_TRIANGLE_STRIP
) {
490 /* Without primitive restart, odd primitives have reversed orientation.
491 * Only primitive restart can flip it with respect to the first vertex
494 LLVMValueRef first_is_odd
= ctx
->ac
.i1false
;
496 /* Handle primitive restart. */
497 if (key
->opt
.cs_primitive_restart
) {
498 /* Get the GDS primitive restart continue flag and clear
499 * the flag in vertex_counter. This flag is used when the draw
500 * call was split and we need to load the primitive orientation
501 * flag from GDS for the first wave too.
503 LLVMValueRef gds_prim_restart_continue
=
504 LLVMBuildLShr(builder
, vertex_counter
, LLVMConstInt(ctx
->ac
.i32
, 31, 0), "");
505 gds_prim_restart_continue
=
506 LLVMBuildTrunc(builder
, gds_prim_restart_continue
, ctx
->ac
.i1
, "");
508 LLVMBuildAnd(builder
, vertex_counter
, LLVMConstInt(ctx
->ac
.i32
, 0x7fffffff, 0), "");
510 LLVMValueRef index0_is_reset
;
512 for (unsigned i
= 0; i
< 3; i
++) {
513 LLVMValueRef not_reset
= LLVMBuildICmp(builder
, LLVMIntNE
, index
[i
],
514 ac_get_arg(&ctx
->ac
, param_restart_index
), "");
516 index0_is_reset
= LLVMBuildNot(builder
, not_reset
, "");
517 prim_restart_accepted
= LLVMBuildAnd(builder
, prim_restart_accepted
, not_reset
, "");
520 /* If the previous waves flip the primitive orientation
521 * of the current triangle strip, it will be stored in GDS.
523 * Sometimes the correct orientation is not needed, in which case
524 * we don't need to execute this.
526 if (key
->opt
.cs_need_correct_orientation
&& VERTEX_COUNTER_GDS_MODE
== 2) {
527 /* If there are reset indices in this wave, get the thread index
528 * where the most recent strip starts relative to each thread.
530 LLVMValueRef preceding_threads_mask
=
531 LLVMBuildSub(builder
,
532 LLVMBuildShl(builder
, ctx
->ac
.i64_1
,
533 LLVMBuildZExt(builder
, thread_id
, ctx
->ac
.i64
, ""), ""),
536 LLVMValueRef reset_threadmask
= ac_get_i1_sgpr_mask(&ctx
->ac
, index0_is_reset
);
537 LLVMValueRef preceding_reset_threadmask
=
538 LLVMBuildAnd(builder
, reset_threadmask
, preceding_threads_mask
, "");
539 LLVMValueRef strip_start
= ac_build_umsb(&ctx
->ac
, preceding_reset_threadmask
, NULL
);
540 strip_start
= LLVMBuildAdd(builder
, strip_start
, ctx
->ac
.i32_1
, "");
542 /* This flips the orientatino based on reset indices within this wave only. */
543 first_is_odd
= LLVMBuildTrunc(builder
, strip_start
, ctx
->ac
.i1
, "");
545 LLVMValueRef last_strip_start
, prev_wave_state
, ret
, tmp
;
546 LLVMValueRef is_first_wave
, current_wave_resets_index
;
548 /* Get the thread index where the last strip starts in this wave.
550 * If the last strip doesn't start in this wave, the thread index
553 * If the last strip starts in the next wave, the thread index will
556 last_strip_start
= ac_build_umsb(&ctx
->ac
, reset_threadmask
, NULL
);
557 last_strip_start
= LLVMBuildAdd(builder
, last_strip_start
, ctx
->ac
.i32_1
, "");
559 struct si_thread0_section section
;
560 si_enter_thread0_section(ctx
, §ion
, thread_id
);
562 /* This must be done in the thread 0 section, because
563 * we expect PrimID to be 0 for the whole first wave
564 * in this expression.
566 * NOTE: This will need to be different if we wanna support
567 * instancing with primitive restart.
569 is_first_wave
= LLVMBuildICmp(builder
, LLVMIntEQ
, prim_id
, ctx
->ac
.i32_0
, "");
570 is_first_wave
= LLVMBuildAnd(builder
, is_first_wave
,
571 LLVMBuildNot(builder
, gds_prim_restart_continue
, ""), "");
572 current_wave_resets_index
=
573 LLVMBuildICmp(builder
, LLVMIntNE
, last_strip_start
, ctx
->ac
.i32_0
, "");
575 ret
= ac_build_alloca_undef(&ctx
->ac
, ctx
->ac
.i32
, "prev_state");
577 /* Save the last strip start primitive index in GDS and read
578 * the value that previous waves stored.
580 * if (is_first_wave || current_wave_resets_strip)
581 * // Read the value that previous waves stored and store a new one.
582 * first_is_odd = ds.ordered.swap(last_strip_start);
584 * // Just read the value that previous waves stored.
585 * first_is_odd = ds.ordered.add(0);
588 &ctx
->ac
, LLVMBuildOr(builder
, is_first_wave
, current_wave_resets_index
, ""), 12602);
590 /* The GDS address is always 0 with ordered append. */
591 tmp
= si_build_ds_ordered_op(ctx
, "swap", ordered_wave_id
, last_strip_start
, 1, true,
593 LLVMBuildStore(builder
, tmp
, ret
);
595 ac_build_else(&ctx
->ac
, 12603);
597 /* Just read the value from GDS. */
598 tmp
= si_build_ds_ordered_op(ctx
, "add", ordered_wave_id
, ctx
->ac
.i32_0
, 1, true,
600 LLVMBuildStore(builder
, tmp
, ret
);
602 ac_build_endif(&ctx
->ac
, 12602);
604 prev_wave_state
= LLVMBuildLoad(builder
, ret
, "");
605 /* Ignore the return value if this is the first wave. */
607 LLVMBuildSelect(builder
, is_first_wave
, ctx
->ac
.i32_0
, prev_wave_state
, "");
608 si_exit_thread0_section(§ion
, &prev_wave_state
);
609 prev_wave_state
= LLVMBuildTrunc(builder
, prev_wave_state
, ctx
->ac
.i1
, "");
611 /* If the strip start appears to be on thread 0 for the current primitive
612 * (meaning the reset index is not present in this wave and might have
613 * appeared in previous waves), use the value from GDS to determine
614 * primitive orientation.
616 * If the strip start is in this wave for the current primitive, use
617 * the value from the current wave to determine primitive orientation.
619 LLVMValueRef strip_start_is0
=
620 LLVMBuildICmp(builder
, LLVMIntEQ
, strip_start
, ctx
->ac
.i32_0
, "");
622 LLVMBuildSelect(builder
, strip_start_is0
, prev_wave_state
, first_is_odd
, "");
625 /* prim_is_odd = (first_is_odd + current_is_odd) % 2. */
626 LLVMValueRef prim_is_odd
= LLVMBuildXor(
627 builder
, first_is_odd
, LLVMBuildTrunc(builder
, thread_id
, ctx
->ac
.i1
, ""), "");
629 /* Convert triangle strip indices to triangle indices. */
630 ac_build_triangle_strip_indices_to_triangle(
631 &ctx
->ac
, prim_is_odd
, LLVMConstInt(ctx
->ac
.i1
, key
->opt
.cs_provoking_vertex_first
, 0),
635 /* Execute the vertex shader for each vertex to get vertex positions. */
636 LLVMValueRef pos
[3][4];
637 for (unsigned i
= 0; i
< vertices_per_prim
; i
++) {
638 vs_params
[param_vertex_id
] = index
[i
];
639 vs_params
[param_instance_id
] = instance_id
;
641 LLVMValueRef ret
= ac_build_call(&ctx
->ac
, vs
, vs_params
, num_vs_params
);
642 for (unsigned chan
= 0; chan
< 4; chan
++)
643 pos
[i
][chan
] = LLVMBuildExtractValue(builder
, ret
, chan
, "");
646 /* Divide XYZ by W. */
647 for (unsigned i
= 0; i
< vertices_per_prim
; i
++) {
648 for (unsigned chan
= 0; chan
< 3; chan
++)
649 pos
[i
][chan
] = ac_build_fdiv(&ctx
->ac
, pos
[i
][chan
], pos
[i
][3]);
652 /* Load the viewport state. */
653 LLVMValueRef vp
= ac_build_load_invariant(&ctx
->ac
, index_buffers_and_constants
,
654 LLVMConstInt(ctx
->ac
.i32
, 2, 0));
655 vp
= LLVMBuildBitCast(builder
, vp
, ctx
->ac
.v4f32
, "");
656 LLVMValueRef vp_scale
[2], vp_translate
[2];
657 vp_scale
[0] = ac_llvm_extract_elem(&ctx
->ac
, vp
, 0);
658 vp_scale
[1] = ac_llvm_extract_elem(&ctx
->ac
, vp
, 1);
659 vp_translate
[0] = ac_llvm_extract_elem(&ctx
->ac
, vp
, 2);
660 vp_translate
[1] = ac_llvm_extract_elem(&ctx
->ac
, vp
, 3);
663 struct ac_cull_options options
= {};
664 options
.cull_front
= key
->opt
.cs_cull_front
;
665 options
.cull_back
= key
->opt
.cs_cull_back
;
666 options
.cull_view_xy
= true;
667 options
.cull_view_near_z
= CULL_Z
&& key
->opt
.cs_cull_z
;
668 options
.cull_view_far_z
= CULL_Z
&& key
->opt
.cs_cull_z
;
669 options
.cull_small_prims
= true;
670 options
.cull_zero_area
= true;
671 options
.cull_w
= true;
672 options
.use_halfz_clip_space
= key
->opt
.cs_halfz_clip_space
;
674 LLVMValueRef accepted
=
675 ac_cull_triangle(&ctx
->ac
, pos
, prim_restart_accepted
, vp_scale
, vp_translate
,
676 ac_get_arg(&ctx
->ac
, param_smallprim_precision
), &options
);
678 ac_build_optimization_barrier(&ctx
->ac
, &accepted
);
679 LLVMValueRef accepted_threadmask
= ac_get_i1_sgpr_mask(&ctx
->ac
, accepted
);
681 /* Count the number of active threads by doing bitcount(accepted). */
682 LLVMValueRef num_prims_accepted
= ac_build_intrinsic(
683 &ctx
->ac
, "llvm.ctpop.i64", ctx
->ac
.i64
, &accepted_threadmask
, 1, AC_FUNC_ATTR_READNONE
);
684 num_prims_accepted
= LLVMBuildTrunc(builder
, num_prims_accepted
, ctx
->ac
.i32
, "");
688 /* Execute atomic_add on the vertex count. */
689 struct si_thread0_section section
;
690 si_enter_thread0_section(ctx
, §ion
, thread_id
);
692 if (VERTEX_COUNTER_GDS_MODE
== 0) {
693 LLVMValueRef num_indices
= LLVMBuildMul(
694 builder
, num_prims_accepted
, LLVMConstInt(ctx
->ac
.i32
, vertices_per_prim
, 0), "");
695 vertex_counter
= si_expand_32bit_pointer(ctx
, vertex_counter
);
696 start
= LLVMBuildAtomicRMW(builder
, LLVMAtomicRMWBinOpAdd
, vertex_counter
, num_indices
,
697 LLVMAtomicOrderingMonotonic
, false);
698 } else if (VERTEX_COUNTER_GDS_MODE
== 1) {
699 LLVMValueRef num_indices
= LLVMBuildMul(
700 builder
, num_prims_accepted
, LLVMConstInt(ctx
->ac
.i32
, vertices_per_prim
, 0), "");
701 vertex_counter
= LLVMBuildIntToPtr(builder
, vertex_counter
,
702 LLVMPointerType(ctx
->ac
.i32
, AC_ADDR_SPACE_GDS
), "");
703 start
= LLVMBuildAtomicRMW(builder
, LLVMAtomicRMWBinOpAdd
, vertex_counter
, num_indices
,
704 LLVMAtomicOrderingMonotonic
, false);
705 } else if (VERTEX_COUNTER_GDS_MODE
== 2) {
706 LLVMValueRef tmp_store
= ac_build_alloca_undef(&ctx
->ac
, ctx
->ac
.i32
, "");
708 /* If the draw call was split into multiple subdraws, each using
709 * a separate draw packet, we need to start counting from 0 for
710 * the first compute wave of the subdraw.
712 * vertex_counter contains the primitive ID of the first thread
715 * This is only correct with VERTEX_COUNTER_GDS_MODE == 2:
717 LLVMValueRef is_first_wave
=
718 LLVMBuildICmp(builder
, LLVMIntEQ
, global_thread_id
, vertex_counter
, "");
720 /* Store the primitive count for ordered append, not vertex count.
721 * The idea is to avoid GDS initialization via CP DMA. The shader
722 * effectively stores the first count using "swap".
725 * ds.ordered.swap(num_prims_accepted); // store the first primitive count
728 * previous = ds.ordered.add(num_prims_accepted) // add the primitive count
731 ac_build_ifcc(&ctx
->ac
, is_first_wave
, 12604);
733 /* The GDS address is always 0 with ordered append. */
734 si_build_ds_ordered_op(ctx
, "swap", ordered_wave_id
, num_prims_accepted
, 0, true, true);
735 LLVMBuildStore(builder
, ctx
->ac
.i32_0
, tmp_store
);
737 ac_build_else(&ctx
->ac
, 12605);
739 LLVMBuildStore(builder
,
740 si_build_ds_ordered_op(ctx
, "add", ordered_wave_id
, num_prims_accepted
,
744 ac_build_endif(&ctx
->ac
, 12604);
746 start
= LLVMBuildLoad(builder
, tmp_store
, "");
749 si_exit_thread0_section(§ion
, &start
);
751 /* Write the final vertex count to memory. An EOS/EOP event could do this,
752 * but those events are super slow and should be avoided if performance
753 * is a concern. Thanks to GDS ordered append, we can emulate a CS_DONE
756 if (VERTEX_COUNTER_GDS_MODE
== 2) {
757 ac_build_ifcc(&ctx
->ac
,
758 LLVMBuildICmp(builder
, LLVMIntEQ
, global_thread_id
,
759 ac_get_arg(&ctx
->ac
, param_last_wave_prim_id
), ""),
761 LLVMValueRef count
= LLVMBuildAdd(builder
, start
, num_prims_accepted
, "");
762 count
= LLVMBuildMul(builder
, count
, LLVMConstInt(ctx
->ac
.i32
, vertices_per_prim
, 0), "");
764 /* GFX8 needs to disable caching, so that the CP can see the stored value.
765 * MTYPE=3 bypasses TC L2.
767 if (ctx
->screen
->info
.chip_class
<= GFX8
) {
768 LLVMValueRef desc
[] = {
769 ac_get_arg(&ctx
->ac
, param_vertex_count_addr
),
770 LLVMConstInt(ctx
->ac
.i32
, S_008F04_BASE_ADDRESS_HI(ctx
->screen
->info
.address32_hi
), 0),
771 LLVMConstInt(ctx
->ac
.i32
, 4, 0),
774 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32
) | S_008F0C_MTYPE(3 /* uncached */),
777 LLVMValueRef rsrc
= ac_build_gather_values(&ctx
->ac
, desc
, 4);
778 ac_build_buffer_store_dword(&ctx
->ac
, rsrc
, count
, 1, ctx
->ac
.i32_0
, ctx
->ac
.i32_0
, 0,
783 si_expand_32bit_pointer(ctx
, ac_get_arg(&ctx
->ac
, param_vertex_count_addr
)));
785 ac_build_endif(&ctx
->ac
, 12606);
787 /* For unordered modes that increment a vertex count instead of
788 * primitive count, convert it into the primitive index.
790 start
= LLVMBuildUDiv(builder
, start
, LLVMConstInt(ctx
->ac
.i32
, vertices_per_prim
, 0), "");
793 /* Now we need to store the indices of accepted primitives into
794 * the output index buffer.
796 ac_build_ifcc(&ctx
->ac
, accepted
, 16607);
798 /* Get the number of bits set before the index of this thread. */
799 LLVMValueRef prim_index
= ac_build_mbcnt(&ctx
->ac
, accepted_threadmask
);
801 /* We have lowered instancing. Pack the instance ID into vertex ID. */
802 if (key
->opt
.cs_instancing
) {
803 instance_id
= LLVMBuildShl(builder
, instance_id
, LLVMConstInt(ctx
->ac
.i32
, 16, 0), "");
805 for (unsigned i
= 0; i
< vertices_per_prim
; i
++)
806 index
[i
] = LLVMBuildOr(builder
, index
[i
], instance_id
, "");
809 if (VERTEX_COUNTER_GDS_MODE
== 2) {
810 /* vertex_counter contains the first primitive ID
811 * for this dispatch. If the draw call was split into
812 * multiple subdraws, the first primitive ID is > 0
813 * for subsequent subdraws. Each subdraw uses a different
814 * portion of the output index buffer. Offset the store
815 * vindex by the first primitive ID to get the correct
816 * store address for the subdraw.
818 start
= LLVMBuildAdd(builder
, start
, vertex_counter
, "");
821 /* Write indices for accepted primitives. */
822 LLVMValueRef vindex
= LLVMBuildAdd(builder
, start
, prim_index
, "");
823 LLVMValueRef vdata
= ac_build_gather_values(&ctx
->ac
, index
, 3);
825 if (!ac_has_vec3_support(ctx
->ac
.chip_class
, true))
826 vdata
= ac_build_expand_to_vec4(&ctx
->ac
, vdata
, 3);
828 ac_build_buffer_store_format(&ctx
->ac
, output_indexbuf
, vdata
, vindex
, ctx
->ac
.i32_0
,
829 ac_glc
| (INDEX_STORES_USE_SLC
? ac_slc
: 0));
831 ac_build_endif(&ctx
->ac
, 16607);
833 LLVMBuildRetVoid(builder
);
836 /* Return false if the shader isn't ready. */
837 static bool si_shader_select_prim_discard_cs(struct si_context
*sctx
,
838 const struct pipe_draw_info
*info
,
839 bool primitive_restart
)
841 struct si_state_rasterizer
*rs
= sctx
->queued
.named
.rasterizer
;
842 struct si_shader_key key
;
844 /* Primitive restart needs ordered counters. */
845 assert(!primitive_restart
|| VERTEX_COUNTER_GDS_MODE
== 2);
846 assert(!primitive_restart
|| info
->instance_count
== 1);
848 memset(&key
, 0, sizeof(key
));
849 si_shader_selector_key_vs(sctx
, sctx
->vs_shader
.cso
, &key
, &key
.part
.vs
.prolog
);
850 assert(!key
.part
.vs
.prolog
.instance_divisor_is_fetched
);
852 key
.part
.vs
.prolog
.unpack_instance_id_from_vertex_id
= 0;
853 key
.opt
.vs_as_prim_discard_cs
= 1;
854 key
.opt
.cs_prim_type
= info
->mode
;
855 key
.opt
.cs_indexed
= info
->index_size
!= 0;
856 key
.opt
.cs_instancing
= info
->instance_count
> 1;
857 key
.opt
.cs_primitive_restart
= primitive_restart
;
858 key
.opt
.cs_provoking_vertex_first
= rs
->provoking_vertex_first
;
860 /* Primitive restart with triangle strips needs to preserve primitive
861 * orientation for cases where front and back primitive orientation matters.
863 if (primitive_restart
) {
864 struct si_shader_selector
*ps
= sctx
->ps_shader
.cso
;
866 key
.opt
.cs_need_correct_orientation
= rs
->cull_front
!= rs
->cull_back
||
867 ps
->info
.uses_frontface
||
868 (rs
->two_side
&& ps
->info
.colors_read
);
871 if (rs
->rasterizer_discard
) {
872 /* Just for performance testing and analysis of trivial bottlenecks.
873 * This should result in a very short compute shader. */
874 key
.opt
.cs_cull_front
= 1;
875 key
.opt
.cs_cull_back
= 1;
877 key
.opt
.cs_cull_front
= sctx
->viewports
.y_inverted
? rs
->cull_back
: rs
->cull_front
;
878 key
.opt
.cs_cull_back
= sctx
->viewports
.y_inverted
? rs
->cull_front
: rs
->cull_back
;
881 if (!rs
->depth_clamp_any
&& CULL_Z
) {
882 key
.opt
.cs_cull_z
= 1;
883 key
.opt
.cs_halfz_clip_space
= rs
->clip_halfz
;
886 sctx
->cs_prim_discard_state
.cso
= sctx
->vs_shader
.cso
;
887 sctx
->cs_prim_discard_state
.current
= NULL
;
889 if (!sctx
->compiler
.passes
)
890 si_init_compiler(sctx
->screen
, &sctx
->compiler
);
892 struct si_compiler_ctx_state compiler_state
;
893 compiler_state
.compiler
= &sctx
->compiler
;
894 compiler_state
.debug
= sctx
->debug
;
895 compiler_state
.is_debug_context
= sctx
->is_debug
;
897 return si_shader_select_with_key(sctx
->screen
, &sctx
->cs_prim_discard_state
, &compiler_state
,
898 &key
, -1, true) == 0 &&
899 /* Disallow compute shaders using the scratch buffer. */
900 sctx
->cs_prim_discard_state
.current
->config
.scratch_bytes_per_wave
== 0;
903 static bool si_initialize_prim_discard_cmdbuf(struct si_context
*sctx
)
905 if (sctx
->index_ring
)
908 if (!sctx
->prim_discard_compute_cs
) {
909 struct radeon_winsys
*ws
= sctx
->ws
;
911 VERTEX_COUNTER_GDS_MODE
== 1 ? GDS_SIZE_UNORDERED
: VERTEX_COUNTER_GDS_MODE
== 2 ? 8 : 0;
912 unsigned num_oa_counters
= VERTEX_COUNTER_GDS_MODE
== 2 ? 2 : 0;
915 sctx
->gds
= ws
->buffer_create(ws
, gds_size
, 4, RADEON_DOMAIN_GDS
, 0);
919 ws
->cs_add_buffer(sctx
->gfx_cs
, sctx
->gds
, RADEON_USAGE_READWRITE
, 0, 0);
921 if (num_oa_counters
) {
923 sctx
->gds_oa
= ws
->buffer_create(ws
, num_oa_counters
, 1, RADEON_DOMAIN_OA
, 0);
927 ws
->cs_add_buffer(sctx
->gfx_cs
, sctx
->gds_oa
, RADEON_USAGE_READWRITE
, 0, 0);
930 sctx
->prim_discard_compute_cs
=
931 ws
->cs_add_parallel_compute_ib(sctx
->gfx_cs
, num_oa_counters
> 0);
932 if (!sctx
->prim_discard_compute_cs
)
936 if (!sctx
->index_ring
) {
937 sctx
->index_ring
= si_aligned_buffer_create(
938 sctx
->b
.screen
, SI_RESOURCE_FLAG_UNMAPPABLE
, PIPE_USAGE_DEFAULT
,
939 sctx
->index_ring_size_per_ib
* 2, sctx
->screen
->info
.pte_fragment_size
);
940 if (!sctx
->index_ring
)
946 static bool si_check_ring_space(struct si_context
*sctx
, unsigned out_indexbuf_size
)
948 return sctx
->index_ring_offset
+
949 align(out_indexbuf_size
, sctx
->screen
->info
.tcc_cache_line_size
) <=
950 sctx
->index_ring_size_per_ib
;
953 enum si_prim_discard_outcome
954 si_prepare_prim_discard_or_split_draw(struct si_context
*sctx
, const struct pipe_draw_info
*info
,
955 bool primitive_restart
)
957 /* If the compute shader compilation isn't finished, this returns false. */
958 if (!si_shader_select_prim_discard_cs(sctx
, info
, primitive_restart
))
959 return SI_PRIM_DISCARD_DISABLED
;
961 if (!si_initialize_prim_discard_cmdbuf(sctx
))
962 return SI_PRIM_DISCARD_DISABLED
;
964 struct radeon_cmdbuf
*gfx_cs
= sctx
->gfx_cs
;
965 unsigned prim
= info
->mode
;
966 unsigned count
= info
->count
;
967 unsigned instance_count
= info
->instance_count
;
968 unsigned num_prims_per_instance
= u_decomposed_prims_for_vertices(prim
, count
);
969 unsigned num_prims
= num_prims_per_instance
* instance_count
;
970 unsigned out_indexbuf_size
= num_prims
* 12;
971 bool ring_full
= !si_check_ring_space(sctx
, out_indexbuf_size
);
972 const unsigned split_prims_draw_level
= SPLIT_PRIMS_DRAW_LEVEL
;
974 /* Split draws at the draw call level if the ring is full. This makes
975 * better use of the ring space.
977 if (ring_full
&& num_prims
> split_prims_draw_level
&&
978 instance_count
== 1 && /* TODO: support splitting instanced draws */
979 (1 << prim
) & ((1 << PIPE_PRIM_TRIANGLES
) | (1 << PIPE_PRIM_TRIANGLE_STRIP
))) {
981 struct pipe_draw_info split_draw
= *info
;
982 split_draw
.primitive_restart
= primitive_restart
;
984 unsigned base_start
= split_draw
.start
;
986 if (prim
== PIPE_PRIM_TRIANGLES
) {
987 unsigned vert_count_per_subdraw
= split_prims_draw_level
* 3;
988 assert(vert_count_per_subdraw
< count
);
990 for (unsigned start
= 0; start
< count
; start
+= vert_count_per_subdraw
) {
991 split_draw
.start
= base_start
+ start
;
992 split_draw
.count
= MIN2(count
- start
, vert_count_per_subdraw
);
994 sctx
->b
.draw_vbo(&sctx
->b
, &split_draw
);
996 } else if (prim
== PIPE_PRIM_TRIANGLE_STRIP
) {
997 /* No primitive pair can be split, because strips reverse orientation
998 * for odd primitives. */
999 STATIC_ASSERT(split_prims_draw_level
% 2 == 0);
1001 unsigned vert_count_per_subdraw
= split_prims_draw_level
;
1003 for (unsigned start
= 0; start
< count
- 2; start
+= vert_count_per_subdraw
) {
1004 split_draw
.start
= base_start
+ start
;
1005 split_draw
.count
= MIN2(count
- start
, vert_count_per_subdraw
+ 2);
1007 sctx
->b
.draw_vbo(&sctx
->b
, &split_draw
);
1009 if (start
== 0 && primitive_restart
&&
1010 sctx
->cs_prim_discard_state
.current
->key
.opt
.cs_need_correct_orientation
)
1011 sctx
->preserve_prim_restart_gds_at_flush
= true;
1013 sctx
->preserve_prim_restart_gds_at_flush
= false;
1018 return SI_PRIM_DISCARD_DRAW_SPLIT
;
1021 /* Just quit if the draw call doesn't fit into the ring and can't be split. */
1022 if (out_indexbuf_size
> sctx
->index_ring_size_per_ib
) {
1023 if (SI_PRIM_DISCARD_DEBUG
)
1024 puts("PD failed: draw call too big, can't be split");
1025 return SI_PRIM_DISCARD_DISABLED
;
1028 unsigned num_subdraws
= DIV_ROUND_UP(num_prims
, SPLIT_PRIMS_PACKET_LEVEL
);
1029 unsigned need_compute_dw
= 11 /* shader */ + 34 /* first draw */ +
1030 24 * (num_subdraws
- 1) + /* subdraws */
1031 30; /* leave some space at the end */
1032 unsigned need_gfx_dw
= si_get_minimum_num_gfx_cs_dwords(sctx
);
1034 if (sctx
->chip_class
<= GFX7
|| FORCE_REWIND_EMULATION
)
1035 need_gfx_dw
+= 9; /* NOP(2) + WAIT_REG_MEM(7), then chain */
1037 need_gfx_dw
+= num_subdraws
* 8; /* use REWIND(2) + DRAW(6) */
1040 (VERTEX_COUNTER_GDS_MODE
== 1 && sctx
->compute_gds_offset
+ 8 > GDS_SIZE_UNORDERED
) ||
1041 !sctx
->ws
->cs_check_space(gfx_cs
, need_gfx_dw
, false)) {
1042 /* If the current IB is empty but the size is too small, add a NOP
1043 * packet to force a flush and get a bigger IB.
1045 if (!radeon_emitted(gfx_cs
, sctx
->initial_gfx_cs_size
) &&
1046 gfx_cs
->current
.cdw
+ need_gfx_dw
> gfx_cs
->current
.max_dw
) {
1047 radeon_emit(gfx_cs
, PKT3(PKT3_NOP
, 0, 0));
1048 radeon_emit(gfx_cs
, 0);
1051 si_flush_gfx_cs(sctx
, RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW
, NULL
);
1054 /* The compute IB is always chained, but we need to call cs_check_space to add more space. */
1055 struct radeon_cmdbuf
*cs
= sctx
->prim_discard_compute_cs
;
1056 ASSERTED
bool compute_has_space
= sctx
->ws
->cs_check_space(cs
, need_compute_dw
, false);
1057 assert(compute_has_space
);
1058 assert(si_check_ring_space(sctx
, out_indexbuf_size
));
1059 return SI_PRIM_DISCARD_ENABLED
;
1062 void si_compute_signal_gfx(struct si_context
*sctx
)
1064 struct radeon_cmdbuf
*cs
= sctx
->prim_discard_compute_cs
;
1065 unsigned writeback_L2_flags
= 0;
1067 /* The writeback L2 flags vary with each chip generation. */
1068 /* CI needs to flush vertex indices to memory. */
1069 if (sctx
->chip_class
<= GFX7
)
1070 writeback_L2_flags
= EVENT_TC_WB_ACTION_ENA
;
1071 else if (sctx
->chip_class
== GFX8
&& VERTEX_COUNTER_GDS_MODE
== 0)
1072 writeback_L2_flags
= EVENT_TC_WB_ACTION_ENA
| EVENT_TC_NC_ACTION_ENA
;
1074 if (!sctx
->compute_num_prims_in_batch
)
1077 assert(sctx
->compute_rewind_va
);
1079 /* After the queued dispatches are done and vertex counts are written to
1080 * the gfx IB, signal the gfx IB to continue. CP doesn't wait for
1081 * the dispatches to finish, it only adds the CS_DONE event into the event
1084 si_cp_release_mem(sctx
, cs
, V_028A90_CS_DONE
, writeback_L2_flags
,
1085 sctx
->chip_class
<= GFX8
? EOP_DST_SEL_MEM
: EOP_DST_SEL_TC_L2
,
1086 writeback_L2_flags
? EOP_INT_SEL_SEND_DATA_AFTER_WR_CONFIRM
: EOP_INT_SEL_NONE
,
1087 EOP_DATA_SEL_VALUE_32BIT
, NULL
,
1088 sctx
->compute_rewind_va
| ((uint64_t)sctx
->screen
->info
.address32_hi
<< 32),
1089 REWIND_SIGNAL_BIT
, /* signaling value for the REWIND packet */
1092 sctx
->compute_rewind_va
= 0;
1093 sctx
->compute_num_prims_in_batch
= 0;
1096 /* Dispatch a primitive discard compute shader. */
1097 void si_dispatch_prim_discard_cs_and_draw(struct si_context
*sctx
,
1098 const struct pipe_draw_info
*info
, unsigned index_size
,
1099 unsigned base_vertex
, uint64_t input_indexbuf_va
,
1100 unsigned input_indexbuf_num_elements
)
1102 struct radeon_cmdbuf
*gfx_cs
= sctx
->gfx_cs
;
1103 struct radeon_cmdbuf
*cs
= sctx
->prim_discard_compute_cs
;
1104 unsigned num_prims_per_instance
= u_decomposed_prims_for_vertices(info
->mode
, info
->count
);
1105 if (!num_prims_per_instance
)
1108 unsigned num_prims
= num_prims_per_instance
* info
->instance_count
;
1109 unsigned vertices_per_prim
, output_indexbuf_format
, gfx10_output_indexbuf_format
;
1111 switch (info
->mode
) {
1112 case PIPE_PRIM_TRIANGLES
:
1113 case PIPE_PRIM_TRIANGLE_STRIP
:
1114 case PIPE_PRIM_TRIANGLE_FAN
:
1115 vertices_per_prim
= 3;
1116 output_indexbuf_format
= V_008F0C_BUF_DATA_FORMAT_32_32_32
;
1117 gfx10_output_indexbuf_format
= V_008F0C_IMG_FORMAT_32_32_32_UINT
;
1120 unreachable("unsupported primitive type");
1124 unsigned out_indexbuf_offset
;
1125 uint64_t output_indexbuf_size
= num_prims
* vertices_per_prim
* 4;
1126 bool first_dispatch
= !sctx
->prim_discard_compute_ib_initialized
;
1128 /* Initialize the compute IB if it's empty. */
1129 if (!sctx
->prim_discard_compute_ib_initialized
) {
1130 /* 1) State initialization. */
1131 sctx
->compute_gds_offset
= 0;
1132 sctx
->compute_ib_last_shader
= NULL
;
1134 if (sctx
->last_ib_barrier_fence
) {
1135 assert(!sctx
->last_ib_barrier_buf
);
1136 sctx
->ws
->cs_add_fence_dependency(gfx_cs
, sctx
->last_ib_barrier_fence
,
1137 RADEON_DEPENDENCY_PARALLEL_COMPUTE_ONLY
);
1140 /* 2) IB initialization. */
1142 /* This needs to be done at the beginning of IBs due to possible
1143 * TTM buffer moves in the kernel.
1145 if (sctx
->chip_class
>= GFX10
) {
1146 radeon_emit(cs
, PKT3(PKT3_ACQUIRE_MEM
, 6, 0));
1147 radeon_emit(cs
, 0); /* CP_COHER_CNTL */
1148 radeon_emit(cs
, 0xffffffff); /* CP_COHER_SIZE */
1149 radeon_emit(cs
, 0xffffff); /* CP_COHER_SIZE_HI */
1150 radeon_emit(cs
, 0); /* CP_COHER_BASE */
1151 radeon_emit(cs
, 0); /* CP_COHER_BASE_HI */
1152 radeon_emit(cs
, 0x0000000A); /* POLL_INTERVAL */
1153 radeon_emit(cs
, /* GCR_CNTL */
1154 S_586_GLI_INV(V_586_GLI_ALL
) | S_586_GLK_INV(1) | S_586_GLV_INV(1) |
1155 S_586_GL1_INV(1) | S_586_GL2_INV(1) | S_586_GL2_WB(1) | S_586_GLM_INV(1) |
1156 S_586_GLM_WB(1) | S_586_SEQ(V_586_SEQ_FORWARD
));
1158 si_emit_surface_sync(sctx
, cs
,
1159 S_0085F0_TC_ACTION_ENA(1) | S_0085F0_TCL1_ACTION_ENA(1) |
1160 S_0301F0_TC_WB_ACTION_ENA(sctx
->chip_class
>= GFX8
) |
1161 S_0085F0_SH_ICACHE_ACTION_ENA(1) |
1162 S_0085F0_SH_KCACHE_ACTION_ENA(1));
1165 /* Restore the GDS prim restart counter if needed. */
1166 if (sctx
->preserve_prim_restart_gds_at_flush
) {
1167 si_cp_copy_data(sctx
, cs
, COPY_DATA_GDS
, NULL
, 4, COPY_DATA_SRC_MEM
,
1168 sctx
->wait_mem_scratch
, 4);
1171 si_emit_initial_compute_regs(sctx
, cs
);
1174 cs
, R_00B860_COMPUTE_TMPRING_SIZE
,
1175 S_00B860_WAVES(sctx
->scratch_waves
) | S_00B860_WAVESIZE(0)); /* no scratch */
1177 /* Only 1D grids are launched. */
1178 radeon_set_sh_reg_seq(cs
, R_00B820_COMPUTE_NUM_THREAD_Y
, 2);
1179 radeon_emit(cs
, S_00B820_NUM_THREAD_FULL(1) | S_00B820_NUM_THREAD_PARTIAL(1));
1180 radeon_emit(cs
, S_00B824_NUM_THREAD_FULL(1) | S_00B824_NUM_THREAD_PARTIAL(1));
1182 radeon_set_sh_reg_seq(cs
, R_00B814_COMPUTE_START_Y
, 2);
1186 /* Disable ordered alloc for OA resources. */
1187 for (unsigned i
= 0; i
< 2; i
++) {
1188 radeon_set_uconfig_reg_seq(cs
, R_031074_GDS_OA_CNTL
, 3);
1189 radeon_emit(cs
, S_031074_INDEX(i
));
1191 radeon_emit(cs
, S_03107C_ENABLE(0));
1194 if (sctx
->last_ib_barrier_buf
) {
1195 assert(!sctx
->last_ib_barrier_fence
);
1196 radeon_add_to_buffer_list(sctx
, gfx_cs
, sctx
->last_ib_barrier_buf
, RADEON_USAGE_READ
,
1198 si_cp_wait_mem(sctx
, cs
,
1199 sctx
->last_ib_barrier_buf
->gpu_address
+ sctx
->last_ib_barrier_buf_offset
,
1200 1, 1, WAIT_REG_MEM_EQUAL
);
1203 sctx
->prim_discard_compute_ib_initialized
= true;
1206 /* Allocate the output index buffer. */
1207 output_indexbuf_size
= align(output_indexbuf_size
, sctx
->screen
->info
.tcc_cache_line_size
);
1208 assert(sctx
->index_ring_offset
+ output_indexbuf_size
<= sctx
->index_ring_size_per_ib
);
1209 out_indexbuf_offset
= sctx
->index_ring_base
+ sctx
->index_ring_offset
;
1210 sctx
->index_ring_offset
+= output_indexbuf_size
;
1212 radeon_add_to_buffer_list(sctx
, gfx_cs
, sctx
->index_ring
, RADEON_USAGE_READWRITE
,
1213 RADEON_PRIO_SHADER_RW_BUFFER
);
1214 uint64_t out_indexbuf_va
= sctx
->index_ring
->gpu_address
+ out_indexbuf_offset
;
1216 /* Prepare index buffer descriptors. */
1217 struct si_resource
*indexbuf_desc
= NULL
;
1218 unsigned indexbuf_desc_offset
;
1219 unsigned desc_size
= 12 * 4;
1222 u_upload_alloc(sctx
->b
.const_uploader
, 0, desc_size
, si_optimal_tcc_alignment(sctx
, desc_size
),
1223 &indexbuf_desc_offset
, (struct pipe_resource
**)&indexbuf_desc
, (void **)&desc
);
1224 radeon_add_to_buffer_list(sctx
, gfx_cs
, indexbuf_desc
, RADEON_USAGE_READ
,
1225 RADEON_PRIO_DESCRIPTORS
);
1227 /* Input index buffer. */
1228 desc
[0] = input_indexbuf_va
;
1229 desc
[1] = S_008F04_BASE_ADDRESS_HI(input_indexbuf_va
>> 32) | S_008F04_STRIDE(index_size
);
1230 desc
[2] = input_indexbuf_num_elements
* (sctx
->chip_class
== GFX8
? index_size
: 1);
1232 if (sctx
->chip_class
>= GFX10
) {
1233 desc
[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X
) |
1234 S_008F0C_FORMAT(index_size
== 1 ? V_008F0C_IMG_FORMAT_8_UINT
1235 : index_size
== 2 ? V_008F0C_IMG_FORMAT_16_UINT
1236 : V_008F0C_IMG_FORMAT_32_UINT
) |
1237 S_008F0C_OOB_SELECT(V_008F0C_OOB_SELECT_STRUCTURED_WITH_OFFSET
) |
1238 S_008F0C_RESOURCE_LEVEL(1);
1241 S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X
) | S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_UINT
) |
1242 S_008F0C_DATA_FORMAT(index_size
== 1 ? V_008F0C_BUF_DATA_FORMAT_8
1243 : index_size
== 2 ? V_008F0C_BUF_DATA_FORMAT_16
1244 : V_008F0C_BUF_DATA_FORMAT_32
);
1247 /* Output index buffer. */
1248 desc
[4] = out_indexbuf_va
;
1250 S_008F04_BASE_ADDRESS_HI(out_indexbuf_va
>> 32) | S_008F04_STRIDE(vertices_per_prim
* 4);
1251 desc
[6] = num_prims
* (sctx
->chip_class
== GFX8
? vertices_per_prim
* 4 : 1);
1253 if (sctx
->chip_class
>= GFX10
) {
1254 desc
[7] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X
) | S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y
) |
1255 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z
) | S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_0
) |
1256 S_008F0C_FORMAT(gfx10_output_indexbuf_format
) |
1257 S_008F0C_OOB_SELECT(V_008F0C_OOB_SELECT_STRUCTURED_WITH_OFFSET
) |
1258 S_008F0C_RESOURCE_LEVEL(1);
1260 desc
[7] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X
) | S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y
) |
1261 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z
) | S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_0
) |
1262 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_UINT
) |
1263 S_008F0C_DATA_FORMAT(output_indexbuf_format
);
1266 /* Viewport state. */
1267 struct si_small_prim_cull_info cull_info
;
1268 si_get_small_prim_cull_info(sctx
, &cull_info
);
1270 desc
[8] = fui(cull_info
.scale
[0]);
1271 desc
[9] = fui(cull_info
.scale
[1]);
1272 desc
[10] = fui(cull_info
.translate
[0]);
1273 desc
[11] = fui(cull_info
.translate
[1]);
1275 /* Better subpixel precision increases the efficiency of small
1276 * primitive culling. */
1277 unsigned num_samples
= sctx
->framebuffer
.nr_samples
;
1278 unsigned quant_mode
= sctx
->viewports
.as_scissor
[0].quant_mode
;
1279 float small_prim_cull_precision
;
1281 if (quant_mode
== SI_QUANT_MODE_12_12_FIXED_POINT_1_4096TH
)
1282 small_prim_cull_precision
= num_samples
/ 4096.0;
1283 else if (quant_mode
== SI_QUANT_MODE_14_10_FIXED_POINT_1_1024TH
)
1284 small_prim_cull_precision
= num_samples
/ 1024.0;
1286 small_prim_cull_precision
= num_samples
/ 256.0;
1288 /* Set user data SGPRs. */
1289 /* This can't be greater than 14 if we want the fastest launch rate. */
1290 unsigned user_sgprs
= 13;
1292 uint64_t index_buffers_va
= indexbuf_desc
->gpu_address
+ indexbuf_desc_offset
;
1293 unsigned vs_const_desc
= si_const_and_shader_buffer_descriptors_idx(PIPE_SHADER_VERTEX
);
1294 unsigned vs_sampler_desc
= si_sampler_and_image_descriptors_idx(PIPE_SHADER_VERTEX
);
1295 uint64_t vs_const_desc_va
= sctx
->descriptors
[vs_const_desc
].gpu_address
;
1296 uint64_t vs_sampler_desc_va
= sctx
->descriptors
[vs_sampler_desc
].gpu_address
;
1297 uint64_t vb_desc_va
= sctx
->vb_descriptors_buffer
1298 ? sctx
->vb_descriptors_buffer
->gpu_address
+ sctx
->vb_descriptors_offset
1300 unsigned gds_offset
, gds_size
;
1301 struct si_fast_udiv_info32 num_prims_udiv
= {};
1303 if (info
->instance_count
> 1)
1304 num_prims_udiv
= si_compute_fast_udiv_info32(num_prims_per_instance
, 31);
1306 /* Limitations on how these two are packed in the user SGPR. */
1307 assert(num_prims_udiv
.post_shift
< 32);
1308 assert(num_prims_per_instance
< 1 << 27);
1310 si_resource_reference(&indexbuf_desc
, NULL
);
1312 bool primitive_restart
= sctx
->cs_prim_discard_state
.current
->key
.opt
.cs_primitive_restart
;
1314 if (VERTEX_COUNTER_GDS_MODE
== 1) {
1315 gds_offset
= sctx
->compute_gds_offset
;
1316 gds_size
= primitive_restart
? 8 : 4;
1317 sctx
->compute_gds_offset
+= gds_size
;
1319 /* Reset the counters in GDS for the first dispatch using WRITE_DATA.
1320 * The remainder of the GDS will be cleared after the dispatch packet
1321 * in parallel with compute shaders.
1323 if (first_dispatch
) {
1324 radeon_emit(cs
, PKT3(PKT3_WRITE_DATA
, 2 + gds_size
/ 4, 0));
1325 radeon_emit(cs
, S_370_DST_SEL(V_370_GDS
) | S_370_WR_CONFIRM(1));
1326 radeon_emit(cs
, gds_offset
);
1328 radeon_emit(cs
, 0); /* value to write */
1334 /* Set shader registers. */
1335 struct si_shader
*shader
= sctx
->cs_prim_discard_state
.current
;
1337 if (shader
!= sctx
->compute_ib_last_shader
) {
1338 radeon_add_to_buffer_list(sctx
, gfx_cs
, shader
->bo
, RADEON_USAGE_READ
,
1339 RADEON_PRIO_SHADER_BINARY
);
1340 uint64_t shader_va
= shader
->bo
->gpu_address
;
1342 assert(shader
->config
.scratch_bytes_per_wave
== 0);
1343 assert(shader
->config
.num_vgprs
* WAVES_PER_TG
<= 256 * 4);
1345 radeon_set_sh_reg_seq(cs
, R_00B830_COMPUTE_PGM_LO
, 2);
1346 radeon_emit(cs
, shader_va
>> 8);
1347 radeon_emit(cs
, S_00B834_DATA(shader_va
>> 40));
1349 radeon_set_sh_reg_seq(cs
, R_00B848_COMPUTE_PGM_RSRC1
, 2);
1351 cs
, S_00B848_VGPRS((shader
->config
.num_vgprs
- 1) / 4) |
1352 S_00B848_SGPRS(sctx
->chip_class
<= GFX9
? (shader
->config
.num_sgprs
- 1) / 8 : 0) |
1353 S_00B848_FLOAT_MODE(shader
->config
.float_mode
) | S_00B848_DX10_CLAMP(1) |
1354 S_00B848_MEM_ORDERED(sctx
->chip_class
>= GFX10
) |
1355 S_00B848_WGP_MODE(sctx
->chip_class
>= GFX10
));
1356 radeon_emit(cs
, S_00B84C_SCRATCH_EN(0 /* no scratch */) | S_00B84C_USER_SGPR(user_sgprs
) |
1357 S_00B84C_TGID_X_EN(1 /* only blockID.x is used */) |
1358 S_00B84C_TG_SIZE_EN(VERTEX_COUNTER_GDS_MODE
== 2 /* need the wave ID */) |
1359 S_00B84C_TIDIG_COMP_CNT(0 /* only threadID.x is used */) |
1360 S_00B84C_LDS_SIZE(shader
->config
.lds_size
));
1362 radeon_set_sh_reg(cs
, R_00B854_COMPUTE_RESOURCE_LIMITS
,
1363 ac_get_compute_resource_limits(&sctx
->screen
->info
, WAVES_PER_TG
,
1364 MAX_WAVES_PER_SH
, THREADGROUPS_PER_CU
));
1365 sctx
->compute_ib_last_shader
= shader
;
1368 STATIC_ASSERT(SPLIT_PRIMS_PACKET_LEVEL
% THREADGROUP_SIZE
== 0);
1370 /* Big draw calls are split into smaller dispatches and draw packets. */
1371 for (unsigned start_prim
= 0; start_prim
< num_prims
; start_prim
+= SPLIT_PRIMS_PACKET_LEVEL
) {
1372 unsigned num_subdraw_prims
;
1374 if (start_prim
+ SPLIT_PRIMS_PACKET_LEVEL
< num_prims
)
1375 num_subdraw_prims
= SPLIT_PRIMS_PACKET_LEVEL
;
1377 num_subdraw_prims
= num_prims
- start_prim
;
1379 /* Small dispatches are executed back to back until a specific primitive
1380 * count is reached. Then, a CS_DONE is inserted to signal the gfx IB
1381 * to start drawing the batch. This batching adds latency to the gfx IB,
1382 * but CS_DONE and REWIND are too slow.
1384 if (sctx
->compute_num_prims_in_batch
+ num_subdraw_prims
> PRIMS_PER_BATCH
)
1385 si_compute_signal_gfx(sctx
);
1387 if (sctx
->compute_num_prims_in_batch
== 0) {
1388 assert((gfx_cs
->gpu_address
>> 32) == sctx
->screen
->info
.address32_hi
);
1389 sctx
->compute_rewind_va
= gfx_cs
->gpu_address
+ (gfx_cs
->current
.cdw
+ 1) * 4;
1391 if (sctx
->chip_class
<= GFX7
|| FORCE_REWIND_EMULATION
) {
1392 radeon_emit(gfx_cs
, PKT3(PKT3_NOP
, 0, 0));
1393 radeon_emit(gfx_cs
, 0);
1397 sctx
->compute_rewind_va
| (uint64_t)sctx
->screen
->info
.address32_hi
<< 32,
1398 REWIND_SIGNAL_BIT
, REWIND_SIGNAL_BIT
, WAIT_REG_MEM_EQUAL
| WAIT_REG_MEM_PFP
);
1400 /* Use INDIRECT_BUFFER to chain to a different buffer
1401 * to discard the CP prefetch cache.
1403 sctx
->ws
->cs_check_space(gfx_cs
, 0, true);
1405 radeon_emit(gfx_cs
, PKT3(PKT3_REWIND
, 0, 0));
1406 radeon_emit(gfx_cs
, 0);
1410 sctx
->compute_num_prims_in_batch
+= num_subdraw_prims
;
1412 uint32_t count_va
= gfx_cs
->gpu_address
+ (gfx_cs
->current
.cdw
+ 4) * 4;
1413 uint64_t index_va
= out_indexbuf_va
+ start_prim
* 12;
1415 /* Emit the draw packet into the gfx IB. */
1416 radeon_emit(gfx_cs
, PKT3(PKT3_DRAW_INDEX_2
, 4, 0));
1417 radeon_emit(gfx_cs
, num_prims
* vertices_per_prim
);
1418 radeon_emit(gfx_cs
, index_va
);
1419 radeon_emit(gfx_cs
, index_va
>> 32);
1420 radeon_emit(gfx_cs
, 0);
1421 radeon_emit(gfx_cs
, V_0287F0_DI_SRC_SEL_DMA
);
1423 /* Continue with the compute IB. */
1424 if (start_prim
== 0) {
1425 uint32_t gds_prim_restart_continue_bit
= 0;
1427 if (sctx
->preserve_prim_restart_gds_at_flush
) {
1428 assert(primitive_restart
&& info
->mode
== PIPE_PRIM_TRIANGLE_STRIP
);
1429 assert(start_prim
< 1 << 31);
1430 gds_prim_restart_continue_bit
= 1 << 31;
1433 radeon_set_sh_reg_seq(cs
, R_00B900_COMPUTE_USER_DATA_0
, user_sgprs
);
1434 radeon_emit(cs
, index_buffers_va
);
1435 radeon_emit(cs
, VERTEX_COUNTER_GDS_MODE
== 0
1437 : VERTEX_COUNTER_GDS_MODE
== 1
1439 : start_prim
| gds_prim_restart_continue_bit
);
1440 radeon_emit(cs
, start_prim
+ num_subdraw_prims
- 1);
1441 radeon_emit(cs
, count_va
);
1442 radeon_emit(cs
, vb_desc_va
);
1443 radeon_emit(cs
, vs_const_desc_va
);
1444 radeon_emit(cs
, vs_sampler_desc_va
);
1445 radeon_emit(cs
, base_vertex
);
1446 radeon_emit(cs
, info
->start_instance
);
1447 radeon_emit(cs
, num_prims_udiv
.multiplier
);
1448 radeon_emit(cs
, num_prims_udiv
.post_shift
| (num_prims_per_instance
<< 5));
1449 radeon_emit(cs
, info
->restart_index
);
1450 /* small-prim culling precision (same as rasterizer precision = QUANT_MODE) */
1451 radeon_emit(cs
, fui(small_prim_cull_precision
));
1453 assert(VERTEX_COUNTER_GDS_MODE
== 2);
1454 /* Only update the SGPRs that changed. */
1455 radeon_set_sh_reg_seq(cs
, R_00B904_COMPUTE_USER_DATA_1
, 3);
1456 radeon_emit(cs
, start_prim
);
1457 radeon_emit(cs
, start_prim
+ num_subdraw_prims
- 1);
1458 radeon_emit(cs
, count_va
);
1461 /* Set grid dimensions. */
1462 unsigned start_block
= start_prim
/ THREADGROUP_SIZE
;
1463 unsigned num_full_blocks
= num_subdraw_prims
/ THREADGROUP_SIZE
;
1464 unsigned partial_block_size
= num_subdraw_prims
% THREADGROUP_SIZE
;
1466 radeon_set_sh_reg(cs
, R_00B810_COMPUTE_START_X
, start_block
);
1467 radeon_set_sh_reg(cs
, R_00B81C_COMPUTE_NUM_THREAD_X
,
1468 S_00B81C_NUM_THREAD_FULL(THREADGROUP_SIZE
) |
1469 S_00B81C_NUM_THREAD_PARTIAL(partial_block_size
));
1471 radeon_emit(cs
, PKT3(PKT3_DISPATCH_DIRECT
, 3, 0) | PKT3_SHADER_TYPE_S(1));
1472 radeon_emit(cs
, start_block
+ num_full_blocks
+ !!partial_block_size
);
1475 radeon_emit(cs
, S_00B800_COMPUTE_SHADER_EN(1) | S_00B800_PARTIAL_TG_EN(!!partial_block_size
) |
1476 S_00B800_ORDERED_APPEND_ENBL(VERTEX_COUNTER_GDS_MODE
== 2) |
1477 S_00B800_ORDER_MODE(0 /* launch in order */));
1479 /* This is only for unordered append. Ordered append writes this from
1482 * Note that EOP and EOS events are super slow, so emulating the event
1483 * in a shader is an important optimization.
1485 if (VERTEX_COUNTER_GDS_MODE
== 1) {
1486 si_cp_release_mem(sctx
, cs
, V_028A90_CS_DONE
, 0,
1487 sctx
->chip_class
<= GFX8
? EOP_DST_SEL_MEM
: EOP_DST_SEL_TC_L2
,
1488 EOP_INT_SEL_NONE
, EOP_DATA_SEL_GDS
, NULL
,
1489 count_va
| ((uint64_t)sctx
->screen
->info
.address32_hi
<< 32),
1490 EOP_DATA_GDS(gds_offset
/ 4, 1), SI_NOT_QUERY
);
1492 /* Now that compute shaders are running, clear the remainder of GDS. */
1493 if (first_dispatch
) {
1494 unsigned offset
= gds_offset
+ gds_size
;
1495 si_cp_dma_clear_buffer(
1496 sctx
, cs
, NULL
, offset
, GDS_SIZE_UNORDERED
- offset
, 0,
1497 SI_CPDMA_SKIP_CHECK_CS_SPACE
| SI_CPDMA_SKIP_GFX_SYNC
| SI_CPDMA_SKIP_SYNC_BEFORE
,
1498 SI_COHERENCY_NONE
, L2_BYPASS
);
1501 first_dispatch
= false;
1503 assert(cs
->current
.cdw
<= cs
->current
.max_dw
);
1504 assert(gfx_cs
->current
.cdw
<= gfx_cs
->current
.max_dw
);