2 * Copyright 2019 Advanced Micro Devices, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
27 #include "si_shader_internal.h"
29 #include "si_build_pm4.h"
30 #include "ac_llvm_cull.h"
32 #include "util/u_prim.h"
33 #include "util/u_suballoc.h"
34 #include "util/u_upload_mgr.h"
35 #include "util/fast_idiv_by_const.h"
38 * https://frostbite-wp-prd.s3.amazonaws.com/wp-content/uploads/2016/03/29204330/GDC_2016_Compute.pdf
41 /* This file implements primitive culling using asynchronous compute.
42 * It's written to be GL conformant.
44 * It takes a monolithic VS in LLVM IR returning gl_Position and invokes it
45 * in a compute shader. The shader processes 1 primitive/thread by invoking
46 * the VS for each vertex to get the positions, decomposes strips and fans
47 * into triangles (if needed), eliminates primitive restart (if needed),
48 * does (W<0) culling, face culling, view XY culling, zero-area and
49 * small-primitive culling, and generates a new index buffer that doesn't
50 * contain culled primitives.
52 * The index buffer is generated using the Ordered Count feature of GDS,
53 * which is an atomic counter that is incremented in the wavefront launch
54 * order, so that the original primitive order is preserved.
56 * Another GDS ordered counter is used to eliminate primitive restart indices.
57 * If a restart index lands on an even thread ID, the compute shader has to flip
58 * the primitive orientation of the whole following triangle strip. The primitive
59 * orientation has to be correct after strip and fan decomposition for two-sided
60 * shading to behave correctly. The decomposition also needs to be aware of
61 * which vertex is the provoking vertex for flat shading to behave correctly.
63 * IB = a GPU command buffer
65 * Both the compute and gfx IBs run in parallel sort of like CE and DE.
66 * The gfx IB has a CP barrier (REWIND packet) before a draw packet. REWIND
67 * doesn't continue if its word isn't 0x80000000. Once compute shaders are
68 * finished culling, the last wave will write the final primitive count from
69 * GDS directly into the count word of the draw packet in the gfx IB, and
70 * a CS_DONE event will signal the REWIND packet to continue. It's really
71 * a direct draw with command buffer patching from the compute queue.
73 * The compute IB doesn't have to start when its corresponding gfx IB starts,
74 * but can start sooner. The compute IB is signaled to start after the last
75 * execution barrier in the *previous* gfx IB. This is handled as follows.
76 * The kernel GPU scheduler starts the compute IB after the previous gfx IB has
77 * started. The compute IB then waits (WAIT_REG_MEM) for a mid-IB fence that
78 * represents the barrier in the previous gfx IB.
81 * - Triangle strips and fans are decomposed into an indexed triangle list.
82 * The decomposition differs based on the provoking vertex state.
83 * - Instanced draws are converted into non-instanced draws for 16-bit indices.
84 * (InstanceID is stored in the high bits of VertexID and unpacked by VS)
85 * - Primitive restart is fully supported with triangle strips, including
86 * correct primitive orientation across multiple waves. (restart indices
87 * reset primitive orientation)
88 * - W<0 culling (W<0 is behind the viewer, sort of like near Z culling).
89 * - Back face culling, incl. culling zero-area / degenerate primitives.
91 * - View Z culling (disabled due to limited impact with perspective projection).
92 * - Small primitive culling for all MSAA modes and all quant modes.
94 * The following are not implemented:
95 * - ClipVertex/ClipDistance/CullDistance-based culling.
99 * Limitations (and unimplemented features that may be possible to implement):
100 * - Only triangles, triangle strips, and triangle fans are supported.
101 * - Primitive restart is only supported with triangle strips.
102 * - Instancing and primitive restart can't be used together.
103 * - Instancing is only supported with 16-bit indices and instance count <= 2^16.
104 * - The instance divisor buffer is unavailable, so all divisors must be
106 * - Multidraws where the vertex shader reads gl_DrawID are unsupported.
107 * - No support for tessellation and geometry shaders.
108 * (patch elimination where tess factors are 0 would be possible to implement)
109 * - The vertex shader must not contain memory stores.
110 * - All VS resources must not have a write usage in the command buffer.
111 * (TODO: all shader buffers currently set the write usage)
112 * - Bindless textures and images must not occur in the vertex shader.
114 * User data SGPR layout:
115 * INDEX_BUFFERS: pointer to constants
116 * 0..3: input index buffer - typed buffer view
117 * 4..7: output index buffer - typed buffer view
118 * 8..11: viewport state - scale.xy, translate.xy
119 * VERTEX_COUNTER: counter address or first primitive ID
120 * - If unordered memory counter: address of "count" in the draw packet
121 * and is incremented atomically by the shader.
122 * - If unordered GDS counter: address of "count" in GDS starting from 0,
123 * must be initialized to 0 before the dispatch.
124 * - If ordered GDS counter: the primitive ID that should reset the vertex
125 * counter to 0 in GDS
126 * LAST_WAVE_PRIM_ID: the primitive ID that should write the final vertex
127 * count to memory if using GDS ordered append
128 * VERTEX_COUNT_ADDR: where the last wave should write the vertex count if
129 * using GDS ordered append
130 * VS.VERTEX_BUFFERS: same value as VS
131 * VS.CONST_AND_SHADER_BUFFERS: same value as VS
132 * VS.SAMPLERS_AND_IMAGES: same value as VS
133 * VS.BASE_VERTEX: same value as VS
134 * VS.START_INSTANCE: same value as VS
135 * NUM_PRIMS_UDIV_MULTIPLIER: For fast 31-bit division by the number of primitives
136 * per instance for instancing.
137 * NUM_PRIMS_UDIV_TERMS:
138 * - Bits [0:4]: "post_shift" for fast 31-bit division for instancing.
139 * - Bits [5:31]: The number of primitives per instance for computing the remainder.
140 * PRIMITIVE_RESTART_INDEX
141 * SMALL_PRIM_CULLING_PRECISION: Scale the primitive bounding box by this number.
144 * The code contains 3 codepaths:
145 * - Unordered memory counter (for debugging, random primitive order, no primitive restart)
146 * - Unordered GDS counter (for debugging, random primitive order, no primitive restart)
147 * - Ordered GDS counter (it preserves the primitive order)
149 * How to test primitive restart (the most complicated part because it needs
150 * to get the primitive orientation right):
151 * Set THREADGROUP_SIZE to 2 to exercise both intra-wave and inter-wave
152 * primitive orientation flips with small draw calls, which is what most tests use.
153 * You can also enable draw call splitting into draw calls with just 2 primitives.
156 /* At least 256 is needed for the fastest wave launch rate from compute queues
157 * due to hw constraints. Nothing in the code needs more than 1 wave/threadgroup. */
158 #define THREADGROUP_SIZE 256 /* high numbers limit available VGPRs */
159 #define THREADGROUPS_PER_CU 1 /* TGs to launch on 1 CU before going onto the next, max 8 */
160 #define MAX_WAVES_PER_SH 0 /* no limit */
161 #define INDEX_STORES_USE_SLC 1 /* don't cache indices if L2 is full */
162 /* Don't cull Z. We already do (W < 0) culling for primitives behind the viewer. */
164 /* 0 = unordered memory counter, 1 = unordered GDS counter, 2 = ordered GDS counter */
165 #define VERTEX_COUNTER_GDS_MODE 2
166 #define GDS_SIZE_UNORDERED (4 * 1024) /* only for the unordered GDS counter */
168 /* Grouping compute dispatches for small draw calls: How many primitives from multiple
169 * draw calls to process by compute before signaling the gfx IB. This reduces the number
170 * of EOP events + REWIND packets, because they decrease performance. */
171 #define PRIMS_PER_BATCH (512 * 1024)
172 /* Draw call splitting at the packet level. This allows signaling the gfx IB
173 * for big draw calls sooner, but doesn't allow context flushes between packets.
174 * Primitive restart is supported. Only implemented for ordered append. */
175 #define SPLIT_PRIMS_PACKET_LEVEL_VALUE PRIMS_PER_BATCH
176 /* If there is not enough ring buffer space for the current IB, split draw calls into
177 * this number of primitives, so that we can flush the context and get free ring space. */
178 #define SPLIT_PRIMS_DRAW_LEVEL PRIMS_PER_BATCH
180 /* Derived values. */
181 #define WAVES_PER_TG DIV_ROUND_UP(THREADGROUP_SIZE, 64)
182 #define SPLIT_PRIMS_PACKET_LEVEL (VERTEX_COUNTER_GDS_MODE == 2 ? \
183 SPLIT_PRIMS_PACKET_LEVEL_VALUE : \
184 UINT_MAX & ~(THREADGROUP_SIZE - 1))
186 #define REWIND_SIGNAL_BIT 0x80000000
187 /* For emulating the rewind packet on CI. */
188 #define FORCE_REWIND_EMULATION 0
190 void si_initialize_prim_discard_tunables(struct si_context
*sctx
)
192 sctx
->prim_discard_vertex_count_threshold
= UINT_MAX
; /* disable */
194 if (sctx
->chip_class
== GFX6
|| /* SI support is not implemented */
195 !sctx
->screen
->info
.has_gds_ordered_append
||
196 sctx
->screen
->debug_flags
& DBG(NO_PD
) ||
197 /* If aux_context == NULL, we are initializing aux_context right now. */
198 !sctx
->screen
->aux_context
)
201 /* TODO: enable this after the GDS kernel memory management is fixed */
202 bool enable_on_pro_graphics_by_default
= false;
204 if (sctx
->screen
->debug_flags
& DBG(ALWAYS_PD
) ||
205 sctx
->screen
->debug_flags
& DBG(PD
) ||
206 (enable_on_pro_graphics_by_default
&&
207 sctx
->screen
->info
.is_pro_graphics
&&
208 (sctx
->family
== CHIP_BONAIRE
||
209 sctx
->family
== CHIP_HAWAII
||
210 sctx
->family
== CHIP_TONGA
||
211 sctx
->family
== CHIP_FIJI
||
212 sctx
->family
== CHIP_POLARIS10
||
213 sctx
->family
== CHIP_POLARIS11
||
214 sctx
->family
== CHIP_VEGA10
||
215 sctx
->family
== CHIP_VEGA20
))) {
216 sctx
->prim_discard_vertex_count_threshold
= 6000 * 3; /* 6K triangles */
218 if (sctx
->screen
->debug_flags
& DBG(ALWAYS_PD
))
219 sctx
->prim_discard_vertex_count_threshold
= 0; /* always enable */
221 const uint32_t MB
= 1024 * 1024;
222 const uint64_t GB
= 1024 * 1024 * 1024;
224 /* The total size is double this per context.
225 * Greater numbers allow bigger gfx IBs.
227 if (sctx
->screen
->info
.vram_size
<= 2 * GB
)
228 sctx
->index_ring_size_per_ib
= 64 * MB
;
229 else if (sctx
->screen
->info
.vram_size
<= 4 * GB
)
230 sctx
->index_ring_size_per_ib
= 128 * MB
;
232 sctx
->index_ring_size_per_ib
= 256 * MB
;
236 /* Opcode can be "add" or "swap". */
238 si_build_ds_ordered_op(struct si_shader_context
*ctx
, const char *opcode
,
239 LLVMValueRef m0
, LLVMValueRef value
, unsigned ordered_count_index
,
240 bool release
, bool done
)
242 LLVMValueRef args
[] = {
243 LLVMBuildIntToPtr(ctx
->ac
.builder
, m0
,
244 LLVMPointerType(ctx
->i32
, AC_ADDR_SPACE_GDS
), ""),
246 LLVMConstInt(ctx
->i32
, LLVMAtomicOrderingMonotonic
, 0), /* ordering */
247 ctx
->i32_0
, /* scope */
248 ctx
->i1false
, /* volatile */
249 LLVMConstInt(ctx
->i32
, ordered_count_index
, 0),
250 LLVMConstInt(ctx
->i1
, release
, 0),
251 LLVMConstInt(ctx
->i1
, done
, 0),
255 snprintf(intrinsic
, sizeof(intrinsic
), "llvm.amdgcn.ds.ordered.%s", opcode
);
256 return ac_build_intrinsic(&ctx
->ac
, intrinsic
, ctx
->i32
, args
, ARRAY_SIZE(args
), 0);
259 static LLVMValueRef
si_expand_32bit_pointer(struct si_shader_context
*ctx
, LLVMValueRef ptr
)
261 uint64_t hi
= (uint64_t)ctx
->screen
->info
.address32_hi
<< 32;
262 ptr
= LLVMBuildZExt(ctx
->ac
.builder
, ptr
, ctx
->i64
, "");
263 ptr
= LLVMBuildOr(ctx
->ac
.builder
, ptr
, LLVMConstInt(ctx
->i64
, hi
, 0), "");
264 return LLVMBuildIntToPtr(ctx
->ac
.builder
, ptr
,
265 LLVMPointerType(ctx
->i32
, AC_ADDR_SPACE_GLOBAL
), "");
268 struct si_thread0_section
{
269 struct si_shader_context
*ctx
;
270 struct lp_build_if_state if_thread0
;
271 LLVMValueRef vgpr_result
; /* a VGPR for the value on thread 0. */
272 LLVMValueRef saved_exec
;
275 /* Enter a section that only executes on thread 0. */
276 static void si_enter_thread0_section(struct si_shader_context
*ctx
,
277 struct si_thread0_section
*section
,
278 LLVMValueRef thread_id
)
281 section
->vgpr_result
= ac_build_alloca_undef(&ctx
->ac
, ctx
->i32
, "result0");
283 /* This IF has 4 instructions:
284 * v_and_b32_e32 v, 63, v ; get the thread ID
285 * v_cmp_eq_u32_e32 vcc, 0, v ; thread ID == 0
286 * s_and_saveexec_b64 s, vcc
287 * s_cbranch_execz BB0_4
289 * It could just be s_and_saveexec_b64 s, 1.
291 lp_build_if(§ion
->if_thread0
, &ctx
->gallivm
,
292 LLVMBuildICmp(ctx
->ac
.builder
, LLVMIntEQ
, thread_id
,
296 /* Exit a section that only executes on thread 0 and broadcast the result
298 static void si_exit_thread0_section(struct si_thread0_section
*section
,
299 LLVMValueRef
*result
)
301 struct si_shader_context
*ctx
= section
->ctx
;
303 LLVMBuildStore(ctx
->ac
.builder
, *result
, section
->vgpr_result
);
305 lp_build_endif(§ion
->if_thread0
);
307 /* Broadcast the result from thread 0 to all threads. */
308 *result
= ac_build_readlane(&ctx
->ac
,
309 LLVMBuildLoad(ctx
->ac
.builder
, section
->vgpr_result
, ""), NULL
);
312 void si_build_prim_discard_compute_shader(struct si_shader_context
*ctx
)
314 struct si_shader_key
*key
= &ctx
->shader
->key
;
315 LLVMBuilderRef builder
= ctx
->ac
.builder
;
316 LLVMValueRef vs
= ctx
->main_fn
;
318 /* Always inline the VS function. */
319 ac_add_function_attr(ctx
->ac
.context
, vs
, -1, AC_FUNC_ATTR_ALWAYSINLINE
);
320 LLVMSetLinkage(vs
, LLVMPrivateLinkage
);
322 LLVMTypeRef const_desc_type
;
323 if (ctx
->shader
->selector
->info
.const_buffers_declared
== 1 &&
324 ctx
->shader
->selector
->info
.shader_buffers_declared
== 0)
325 const_desc_type
= ctx
->f32
;
327 const_desc_type
= ctx
->v4i32
;
329 struct si_function_info fninfo
;
330 si_init_function_info(&fninfo
);
332 LLVMValueRef index_buffers_and_constants
, vertex_counter
, vb_desc
, const_desc
;
333 LLVMValueRef base_vertex
, start_instance
, block_id
, local_id
, ordered_wave_id
;
334 LLVMValueRef restart_index
, vp_scale
[2], vp_translate
[2], smallprim_precision
;
335 LLVMValueRef num_prims_udiv_multiplier
, num_prims_udiv_terms
, sampler_desc
;
336 LLVMValueRef last_wave_prim_id
, vertex_count_addr
;
338 add_arg_assign(&fninfo
, ARG_SGPR
, ac_array_in_const32_addr_space(ctx
->v4i32
),
339 &index_buffers_and_constants
);
340 add_arg_assign(&fninfo
, ARG_SGPR
, ctx
->i32
, &vertex_counter
);
341 add_arg_assign(&fninfo
, ARG_SGPR
, ctx
->i32
, &last_wave_prim_id
);
342 add_arg_assign(&fninfo
, ARG_SGPR
, ctx
->i32
, &vertex_count_addr
);
343 add_arg_assign(&fninfo
, ARG_SGPR
, ac_array_in_const32_addr_space(ctx
->v4i32
),
345 add_arg_assign(&fninfo
, ARG_SGPR
, ac_array_in_const32_addr_space(const_desc_type
),
347 add_arg_assign(&fninfo
, ARG_SGPR
, ac_array_in_const32_addr_space(ctx
->v8i32
),
349 add_arg_assign(&fninfo
, ARG_SGPR
, ctx
->i32
, &base_vertex
);
350 add_arg_assign(&fninfo
, ARG_SGPR
, ctx
->i32
, &start_instance
);
351 add_arg_assign(&fninfo
, ARG_SGPR
, ctx
->i32
, &num_prims_udiv_multiplier
);
352 add_arg_assign(&fninfo
, ARG_SGPR
, ctx
->i32
, &num_prims_udiv_terms
);
353 add_arg_assign(&fninfo
, ARG_SGPR
, ctx
->i32
, &restart_index
);
354 add_arg_assign(&fninfo
, ARG_SGPR
, ctx
->f32
, &smallprim_precision
);
356 /* Block ID and thread ID inputs. */
357 add_arg_assign(&fninfo
, ARG_SGPR
, ctx
->i32
, &block_id
);
358 if (VERTEX_COUNTER_GDS_MODE
== 2)
359 add_arg_assign(&fninfo
, ARG_SGPR
, ctx
->i32
, &ordered_wave_id
);
360 add_arg_assign(&fninfo
, ARG_VGPR
, ctx
->i32
, &local_id
);
362 /* Create the compute shader function. */
363 unsigned old_type
= ctx
->type
;
364 ctx
->type
= PIPE_SHADER_COMPUTE
;
365 si_create_function(ctx
, "prim_discard_cs", NULL
, 0, &fninfo
, THREADGROUP_SIZE
);
366 ctx
->type
= old_type
;
368 if (VERTEX_COUNTER_GDS_MODE
== 1) {
369 ac_llvm_add_target_dep_function_attr(ctx
->main_fn
, "amdgpu-gds-size",
373 /* Assemble parameters for VS. */
374 LLVMValueRef vs_params
[16];
375 unsigned num_vs_params
= 0;
376 unsigned param_vertex_id
, param_instance_id
;
378 vs_params
[num_vs_params
++] = LLVMGetUndef(LLVMTypeOf(LLVMGetParam(vs
, 0))); /* RW_BUFFERS */
379 vs_params
[num_vs_params
++] = LLVMGetUndef(LLVMTypeOf(LLVMGetParam(vs
, 1))); /* BINDLESS */
380 vs_params
[num_vs_params
++] = const_desc
;
381 vs_params
[num_vs_params
++] = sampler_desc
;
382 vs_params
[num_vs_params
++] = LLVMConstInt(ctx
->i32
,
383 S_VS_STATE_INDEXED(key
->opt
.cs_indexed
), 0);
384 vs_params
[num_vs_params
++] = base_vertex
;
385 vs_params
[num_vs_params
++] = start_instance
;
386 vs_params
[num_vs_params
++] = ctx
->i32_0
; /* DrawID */
387 vs_params
[num_vs_params
++] = vb_desc
;
389 vs_params
[(param_vertex_id
= num_vs_params
++)] = NULL
; /* VertexID */
390 vs_params
[(param_instance_id
= num_vs_params
++)] = NULL
; /* InstanceID */
391 vs_params
[num_vs_params
++] = ctx
->i32_0
; /* unused (PrimID) */
392 vs_params
[num_vs_params
++] = ctx
->i32_0
; /* unused */
394 assert(num_vs_params
<= ARRAY_SIZE(vs_params
));
395 assert(num_vs_params
== LLVMCountParamTypes(LLVMGetElementType(LLVMTypeOf(vs
))));
397 /* Load descriptors. (load 8 dwords at once) */
398 LLVMValueRef input_indexbuf
, output_indexbuf
, tmp
, desc
[8];
400 tmp
= LLVMBuildPointerCast(builder
, index_buffers_and_constants
,
401 ac_array_in_const32_addr_space(ctx
->v8i32
), "");
402 tmp
= ac_build_load_to_sgpr(&ctx
->ac
, tmp
, ctx
->i32_0
);
404 for (unsigned i
= 0; i
< 8; i
++)
405 desc
[i
] = ac_llvm_extract_elem(&ctx
->ac
, tmp
, i
);
407 input_indexbuf
= ac_build_gather_values(&ctx
->ac
, desc
, 4);
408 output_indexbuf
= ac_build_gather_values(&ctx
->ac
, desc
+ 4, 4);
410 /* Compute PrimID and InstanceID. */
411 LLVMValueRef global_thread_id
=
412 ac_build_imad(&ctx
->ac
, block_id
,
413 LLVMConstInt(ctx
->i32
, THREADGROUP_SIZE
, 0), local_id
);
414 LLVMValueRef prim_id
= global_thread_id
; /* PrimID within an instance */
415 LLVMValueRef instance_id
= ctx
->i32_0
;
417 if (key
->opt
.cs_instancing
) {
418 /* Unpack num_prims_udiv_terms. */
419 LLVMValueRef post_shift
= LLVMBuildAnd(builder
, num_prims_udiv_terms
,
420 LLVMConstInt(ctx
->i32
, 0x1f, 0), "");
421 LLVMValueRef prims_per_instance
= LLVMBuildLShr(builder
, num_prims_udiv_terms
,
422 LLVMConstInt(ctx
->i32
, 5, 0), "");
423 /* Divide the total prim_id by the number of prims per instance. */
424 instance_id
= ac_build_fast_udiv_u31_d_not_one(&ctx
->ac
, prim_id
,
425 num_prims_udiv_multiplier
,
427 /* Compute the remainder. */
428 prim_id
= LLVMBuildSub(builder
, prim_id
,
429 LLVMBuildMul(builder
, instance_id
,
430 prims_per_instance
, ""), "");
433 /* Generate indices (like a non-indexed draw call). */
434 LLVMValueRef index
[4] = {NULL
, NULL
, NULL
, LLVMGetUndef(ctx
->i32
)};
435 unsigned vertices_per_prim
= 3;
437 switch (key
->opt
.cs_prim_type
) {
438 case PIPE_PRIM_TRIANGLES
:
439 for (unsigned i
= 0; i
< 3; i
++) {
440 index
[i
] = ac_build_imad(&ctx
->ac
, prim_id
,
441 LLVMConstInt(ctx
->i32
, 3, 0),
442 LLVMConstInt(ctx
->i32
, i
, 0));
445 case PIPE_PRIM_TRIANGLE_STRIP
:
446 for (unsigned i
= 0; i
< 3; i
++) {
447 index
[i
] = LLVMBuildAdd(builder
, prim_id
,
448 LLVMConstInt(ctx
->i32
, i
, 0), "");
451 case PIPE_PRIM_TRIANGLE_FAN
:
452 /* Vertex 1 is first and vertex 2 is last. This will go to the hw clipper
453 * and rasterizer as a normal triangle, so we need to put the provoking
454 * vertex into the correct index variable and preserve orientation at the same time.
455 * gl_VertexID is preserved, because it's equal to the index.
457 if (key
->opt
.cs_provoking_vertex_first
) {
458 index
[0] = LLVMBuildAdd(builder
, prim_id
, LLVMConstInt(ctx
->i32
, 1, 0), "");
459 index
[1] = LLVMBuildAdd(builder
, prim_id
, LLVMConstInt(ctx
->i32
, 2, 0), "");
460 index
[2] = ctx
->i32_0
;
462 index
[0] = ctx
->i32_0
;
463 index
[1] = LLVMBuildAdd(builder
, prim_id
, LLVMConstInt(ctx
->i32
, 1, 0), "");
464 index
[2] = LLVMBuildAdd(builder
, prim_id
, LLVMConstInt(ctx
->i32
, 2, 0), "");
468 unreachable("unexpected primitive type");
472 if (key
->opt
.cs_indexed
) {
473 for (unsigned i
= 0; i
< 3; i
++) {
474 index
[i
] = ac_build_buffer_load_format(&ctx
->ac
, input_indexbuf
,
475 index
[i
], ctx
->i32_0
, 1,
477 index
[i
] = ac_to_integer(&ctx
->ac
, index
[i
]);
481 /* Extract the ordered wave ID. */
482 if (VERTEX_COUNTER_GDS_MODE
== 2) {
483 ordered_wave_id
= LLVMBuildLShr(builder
, ordered_wave_id
,
484 LLVMConstInt(ctx
->i32
, 6, 0), "");
485 ordered_wave_id
= LLVMBuildAnd(builder
, ordered_wave_id
,
486 LLVMConstInt(ctx
->i32
, 0xfff, 0), "");
488 LLVMValueRef thread_id
=
489 LLVMBuildAnd(builder
, local_id
, LLVMConstInt(ctx
->i32
, 63, 0), "");
491 /* Every other triangle in a strip has a reversed vertex order, so we
492 * need to swap vertices of odd primitives to get the correct primitive
493 * orientation when converting triangle strips to triangles. Primitive
494 * restart complicates it, because a strip can start anywhere.
496 LLVMValueRef prim_restart_accepted
= ctx
->i1true
;
498 if (key
->opt
.cs_prim_type
== PIPE_PRIM_TRIANGLE_STRIP
) {
499 /* Without primitive restart, odd primitives have reversed orientation.
500 * Only primitive restart can flip it with respect to the first vertex
503 LLVMValueRef first_is_odd
= ctx
->i1false
;
505 /* Handle primitive restart. */
506 if (key
->opt
.cs_primitive_restart
) {
507 /* Get the GDS primitive restart continue flag and clear
508 * the flag in vertex_counter. This flag is used when the draw
509 * call was split and we need to load the primitive orientation
510 * flag from GDS for the first wave too.
512 LLVMValueRef gds_prim_restart_continue
=
513 LLVMBuildLShr(builder
, vertex_counter
,
514 LLVMConstInt(ctx
->i32
, 31, 0), "");
515 gds_prim_restart_continue
=
516 LLVMBuildTrunc(builder
, gds_prim_restart_continue
, ctx
->i1
, "");
517 vertex_counter
= LLVMBuildAnd(builder
, vertex_counter
,
518 LLVMConstInt(ctx
->i32
, 0x7fffffff, 0), "");
520 LLVMValueRef index0_is_reset
;
522 for (unsigned i
= 0; i
< 3; i
++) {
523 LLVMValueRef not_reset
= LLVMBuildICmp(builder
, LLVMIntNE
, index
[i
],
526 index0_is_reset
= LLVMBuildNot(builder
, not_reset
, "");
527 prim_restart_accepted
= LLVMBuildAnd(builder
, prim_restart_accepted
,
531 /* If the previous waves flip the primitive orientation
532 * of the current triangle strip, it will be stored in GDS.
534 * Sometimes the correct orientation is not needed, in which case
535 * we don't need to execute this.
537 if (key
->opt
.cs_need_correct_orientation
&& VERTEX_COUNTER_GDS_MODE
== 2) {
538 /* If there are reset indices in this wave, get the thread index
539 * where the most recent strip starts relative to each thread.
541 LLVMValueRef preceding_threads_mask
=
542 LLVMBuildSub(builder
,
543 LLVMBuildShl(builder
, ctx
->ac
.i64_1
,
544 LLVMBuildZExt(builder
, thread_id
, ctx
->i64
, ""), ""),
547 LLVMValueRef reset_threadmask
= ac_get_i1_sgpr_mask(&ctx
->ac
, index0_is_reset
);
548 LLVMValueRef preceding_reset_threadmask
=
549 LLVMBuildAnd(builder
, reset_threadmask
, preceding_threads_mask
, "");
550 LLVMValueRef strip_start
=
551 ac_build_umsb(&ctx
->ac
, preceding_reset_threadmask
, NULL
);
552 strip_start
= LLVMBuildAdd(builder
, strip_start
, ctx
->i32_1
, "");
554 /* This flips the orientatino based on reset indices within this wave only. */
555 first_is_odd
= LLVMBuildTrunc(builder
, strip_start
, ctx
->i1
, "");
557 LLVMValueRef last_strip_start
, prev_wave_state
, ret
, tmp
;
558 LLVMValueRef is_first_wave
, current_wave_resets_index
;
560 /* Get the thread index where the last strip starts in this wave.
562 * If the last strip doesn't start in this wave, the thread index
565 * If the last strip starts in the next wave, the thread index will
568 last_strip_start
= ac_build_umsb(&ctx
->ac
, reset_threadmask
, NULL
);
569 last_strip_start
= LLVMBuildAdd(builder
, last_strip_start
, ctx
->i32_1
, "");
571 struct si_thread0_section section
;
572 si_enter_thread0_section(ctx
, §ion
, thread_id
);
574 /* This must be done in the thread 0 section, because
575 * we expect PrimID to be 0 for the whole first wave
576 * in this expression.
578 * NOTE: This will need to be different if we wanna support
579 * instancing with primitive restart.
581 is_first_wave
= LLVMBuildICmp(builder
, LLVMIntEQ
, prim_id
, ctx
->i32_0
, "");
582 is_first_wave
= LLVMBuildAnd(builder
, is_first_wave
,
583 LLVMBuildNot(builder
,
584 gds_prim_restart_continue
, ""), "");
585 current_wave_resets_index
= LLVMBuildICmp(builder
, LLVMIntNE
,
586 last_strip_start
, ctx
->i32_0
, "");
588 ret
= ac_build_alloca_undef(&ctx
->ac
, ctx
->i32
, "prev_state");
590 /* Save the last strip start primitive index in GDS and read
591 * the value that previous waves stored.
593 * if (is_first_wave || current_wave_resets_strip)
594 * // Read the value that previous waves stored and store a new one.
595 * first_is_odd = ds.ordered.swap(last_strip_start);
597 * // Just read the value that previous waves stored.
598 * first_is_odd = ds.ordered.add(0);
600 struct lp_build_if_state if_overwrite_counter
;
601 lp_build_if(&if_overwrite_counter
, &ctx
->gallivm
,
602 LLVMBuildOr(builder
, is_first_wave
,
603 current_wave_resets_index
, ""));
605 /* The GDS address is always 0 with ordered append. */
606 tmp
= si_build_ds_ordered_op(ctx
, "swap",
607 ordered_wave_id
, last_strip_start
,
609 LLVMBuildStore(builder
, tmp
, ret
);
611 lp_build_else(&if_overwrite_counter
);
613 /* Just read the value from GDS. */
614 tmp
= si_build_ds_ordered_op(ctx
, "add",
615 ordered_wave_id
, ctx
->i32_0
,
617 LLVMBuildStore(builder
, tmp
, ret
);
619 lp_build_endif(&if_overwrite_counter
);
621 prev_wave_state
= LLVMBuildLoad(builder
, ret
, "");
622 /* Ignore the return value if this is the first wave. */
623 prev_wave_state
= LLVMBuildSelect(builder
, is_first_wave
,
624 ctx
->i32_0
, prev_wave_state
, "");
625 si_exit_thread0_section(§ion
, &prev_wave_state
);
626 prev_wave_state
= LLVMBuildTrunc(builder
, prev_wave_state
, ctx
->i1
, "");
628 /* If the strip start appears to be on thread 0 for the current primitive
629 * (meaning the reset index is not present in this wave and might have
630 * appeared in previous waves), use the value from GDS to determine
631 * primitive orientation.
633 * If the strip start is in this wave for the current primitive, use
634 * the value from the current wave to determine primitive orientation.
636 LLVMValueRef strip_start_is0
= LLVMBuildICmp(builder
, LLVMIntEQ
,
637 strip_start
, ctx
->i32_0
, "");
638 first_is_odd
= LLVMBuildSelect(builder
, strip_start_is0
, prev_wave_state
,
642 /* prim_is_odd = (first_is_odd + current_is_odd) % 2. */
643 LLVMValueRef prim_is_odd
=
644 LLVMBuildXor(builder
, first_is_odd
,
645 LLVMBuildTrunc(builder
, thread_id
, ctx
->i1
, ""), "");
647 /* Determine the primitive orientation.
648 * Only swap the vertices that are not the provoking vertex. We need to keep
649 * the provoking vertex in place.
651 if (key
->opt
.cs_provoking_vertex_first
) {
652 LLVMValueRef index1
= index
[1];
653 LLVMValueRef index2
= index
[2];
654 index
[1] = LLVMBuildSelect(builder
, prim_is_odd
, index2
, index1
, "");
655 index
[2] = LLVMBuildSelect(builder
, prim_is_odd
, index1
, index2
, "");
657 LLVMValueRef index0
= index
[0];
658 LLVMValueRef index1
= index
[1];
659 index
[0] = LLVMBuildSelect(builder
, prim_is_odd
, index1
, index0
, "");
660 index
[1] = LLVMBuildSelect(builder
, prim_is_odd
, index0
, index1
, "");
664 /* Execute the vertex shader for each vertex to get vertex positions. */
665 LLVMValueRef pos
[3][4];
666 for (unsigned i
= 0; i
< vertices_per_prim
; i
++) {
667 vs_params
[param_vertex_id
] = index
[i
];
668 vs_params
[param_instance_id
] = instance_id
;
670 LLVMValueRef ret
= LLVMBuildCall(builder
, vs
, vs_params
, num_vs_params
, "");
671 for (unsigned chan
= 0; chan
< 4; chan
++)
672 pos
[i
][chan
] = LLVMBuildExtractValue(builder
, ret
, chan
, "");
675 /* Divide XYZ by W. */
676 for (unsigned i
= 0; i
< vertices_per_prim
; i
++) {
677 for (unsigned chan
= 0; chan
< 3; chan
++)
678 pos
[i
][chan
] = ac_build_fdiv(&ctx
->ac
, pos
[i
][chan
], pos
[i
][3]);
681 /* Load the viewport state. */
682 LLVMValueRef vp
= ac_build_load_invariant(&ctx
->ac
, index_buffers_and_constants
,
683 LLVMConstInt(ctx
->i32
, 2, 0));
684 vp
= LLVMBuildBitCast(builder
, vp
, ctx
->v4f32
, "");
685 vp_scale
[0] = ac_llvm_extract_elem(&ctx
->ac
, vp
, 0);
686 vp_scale
[1] = ac_llvm_extract_elem(&ctx
->ac
, vp
, 1);
687 vp_translate
[0] = ac_llvm_extract_elem(&ctx
->ac
, vp
, 2);
688 vp_translate
[1] = ac_llvm_extract_elem(&ctx
->ac
, vp
, 3);
691 struct ac_cull_options options
= {};
692 options
.cull_front
= key
->opt
.cs_cull_front
;
693 options
.cull_back
= key
->opt
.cs_cull_back
;
694 options
.cull_view_xy
= true;
695 options
.cull_view_near_z
= CULL_Z
&& key
->opt
.cs_cull_z
;
696 options
.cull_view_far_z
= CULL_Z
&& key
->opt
.cs_cull_z
;
697 options
.cull_small_prims
= true;
698 options
.cull_zero_area
= true;
699 options
.cull_w
= true;
700 options
.use_halfz_clip_space
= key
->opt
.cs_halfz_clip_space
;
702 LLVMValueRef accepted
=
703 ac_cull_triangle(&ctx
->ac
, pos
, prim_restart_accepted
,
704 vp_scale
, vp_translate
, smallprim_precision
,
707 LLVMValueRef accepted_threadmask
= ac_get_i1_sgpr_mask(&ctx
->ac
, accepted
);
709 /* Count the number of active threads by doing bitcount(accepted). */
710 LLVMValueRef num_prims_accepted
=
711 ac_build_intrinsic(&ctx
->ac
, "llvm.ctpop.i64", ctx
->i64
,
712 &accepted_threadmask
, 1, AC_FUNC_ATTR_READNONE
);
713 num_prims_accepted
= LLVMBuildTrunc(builder
, num_prims_accepted
, ctx
->i32
, "");
717 /* Execute atomic_add on the vertex count. */
718 struct si_thread0_section section
;
719 si_enter_thread0_section(ctx
, §ion
, thread_id
);
721 if (VERTEX_COUNTER_GDS_MODE
== 0) {
722 LLVMValueRef num_indices
= LLVMBuildMul(builder
, num_prims_accepted
,
723 LLVMConstInt(ctx
->i32
, vertices_per_prim
, 0), "");
724 vertex_counter
= si_expand_32bit_pointer(ctx
, vertex_counter
);
725 start
= LLVMBuildAtomicRMW(builder
, LLVMAtomicRMWBinOpAdd
,
726 vertex_counter
, num_indices
,
727 LLVMAtomicOrderingMonotonic
, false);
728 } else if (VERTEX_COUNTER_GDS_MODE
== 1) {
729 LLVMValueRef num_indices
= LLVMBuildMul(builder
, num_prims_accepted
,
730 LLVMConstInt(ctx
->i32
, vertices_per_prim
, 0), "");
731 vertex_counter
= LLVMBuildIntToPtr(builder
, vertex_counter
,
732 LLVMPointerType(ctx
->i32
, AC_ADDR_SPACE_GDS
), "");
733 start
= LLVMBuildAtomicRMW(builder
, LLVMAtomicRMWBinOpAdd
,
734 vertex_counter
, num_indices
,
735 LLVMAtomicOrderingMonotonic
, false);
736 } else if (VERTEX_COUNTER_GDS_MODE
== 2) {
737 LLVMValueRef tmp_store
= ac_build_alloca_undef(&ctx
->ac
, ctx
->i32
, "");
739 /* If the draw call was split into multiple subdraws, each using
740 * a separate draw packet, we need to start counting from 0 for
741 * the first compute wave of the subdraw.
743 * vertex_counter contains the primitive ID of the first thread
746 * This is only correct with VERTEX_COUNTER_GDS_MODE == 2:
748 LLVMValueRef is_first_wave
=
749 LLVMBuildICmp(builder
, LLVMIntEQ
, global_thread_id
,
752 /* Store the primitive count for ordered append, not vertex count.
753 * The idea is to avoid GDS initialization via CP DMA. The shader
754 * effectively stores the first count using "swap".
757 * ds.ordered.swap(num_prims_accepted); // store the first primitive count
760 * previous = ds.ordered.add(num_prims_accepted) // add the primitive count
763 struct lp_build_if_state if_first_wave
;
764 lp_build_if(&if_first_wave
, &ctx
->gallivm
, is_first_wave
);
766 /* The GDS address is always 0 with ordered append. */
767 si_build_ds_ordered_op(ctx
, "swap", ordered_wave_id
,
768 num_prims_accepted
, 0, true, true);
769 LLVMBuildStore(builder
, ctx
->i32_0
, tmp_store
);
771 lp_build_else(&if_first_wave
);
773 LLVMBuildStore(builder
,
774 si_build_ds_ordered_op(ctx
, "add", ordered_wave_id
,
775 num_prims_accepted
, 0,
779 lp_build_endif(&if_first_wave
);
781 start
= LLVMBuildLoad(builder
, tmp_store
, "");
784 si_exit_thread0_section(§ion
, &start
);
786 /* Write the final vertex count to memory. An EOS/EOP event could do this,
787 * but those events are super slow and should be avoided if performance
788 * is a concern. Thanks to GDS ordered append, we can emulate a CS_DONE
791 if (VERTEX_COUNTER_GDS_MODE
== 2) {
792 struct lp_build_if_state if_last_wave
;
793 lp_build_if(&if_last_wave
, &ctx
->gallivm
,
794 LLVMBuildICmp(builder
, LLVMIntEQ
, global_thread_id
,
795 last_wave_prim_id
, ""));
796 LLVMValueRef count
= LLVMBuildAdd(builder
, start
, num_prims_accepted
, "");
797 count
= LLVMBuildMul(builder
, count
,
798 LLVMConstInt(ctx
->i32
, vertices_per_prim
, 0), "");
800 /* VI needs to disable caching, so that the CP can see the stored value.
801 * MTYPE=3 bypasses TC L2.
803 if (ctx
->screen
->info
.chip_class
<= GFX8
) {
804 LLVMValueRef desc
[] = {
806 LLVMConstInt(ctx
->i32
,
807 S_008F04_BASE_ADDRESS_HI(ctx
->screen
->info
.address32_hi
), 0),
808 LLVMConstInt(ctx
->i32
, 4, 0),
809 LLVMConstInt(ctx
->i32
, S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32
) |
810 S_008F0C_MTYPE(3 /* uncached */), 0),
812 LLVMValueRef rsrc
= ac_build_gather_values(&ctx
->ac
, desc
, 4);
813 ac_build_buffer_store_dword(&ctx
->ac
, rsrc
, count
, 1, ctx
->i32_0
,
814 ctx
->i32_0
, 0, true, true, true, false);
816 LLVMBuildStore(builder
, count
,
817 si_expand_32bit_pointer(ctx
, vertex_count_addr
));
819 lp_build_endif(&if_last_wave
);
821 /* For unordered modes that increment a vertex count instead of
822 * primitive count, convert it into the primitive index.
824 start
= LLVMBuildUDiv(builder
, start
,
825 LLVMConstInt(ctx
->i32
, vertices_per_prim
, 0), "");
828 /* Now we need to store the indices of accepted primitives into
829 * the output index buffer.
831 struct lp_build_if_state if_accepted
;
832 lp_build_if(&if_accepted
, &ctx
->gallivm
, accepted
);
834 /* Get the number of bits set before the index of this thread. */
835 LLVMValueRef prim_index
= ac_build_mbcnt(&ctx
->ac
, accepted_threadmask
);
837 /* We have lowered instancing. Pack the instance ID into vertex ID. */
838 if (key
->opt
.cs_instancing
) {
839 instance_id
= LLVMBuildShl(builder
, instance_id
,
840 LLVMConstInt(ctx
->i32
, 16, 0), "");
842 for (unsigned i
= 0; i
< vertices_per_prim
; i
++)
843 index
[i
] = LLVMBuildOr(builder
, index
[i
], instance_id
, "");
846 if (VERTEX_COUNTER_GDS_MODE
== 2) {
847 /* vertex_counter contains the first primitive ID
848 * for this dispatch. If the draw call was split into
849 * multiple subdraws, the first primitive ID is > 0
850 * for subsequent subdraws. Each subdraw uses a different
851 * portion of the output index buffer. Offset the store
852 * vindex by the first primitive ID to get the correct
853 * store address for the subdraw.
855 start
= LLVMBuildAdd(builder
, start
, vertex_counter
, "");
858 /* Write indices for accepted primitives. */
859 LLVMValueRef buf_args
[] = {
860 ac_to_float(&ctx
->ac
, ac_build_expand_to_vec4(&ctx
->ac
,
861 ac_build_gather_values(&ctx
->ac
, index
, 3), 3)),
863 LLVMBuildAdd(builder
, start
, prim_index
, ""),
864 ctx
->i32_0
, /* voffset */
865 ctx
->i1true
, /* glc */
866 LLVMConstInt(ctx
->i1
, INDEX_STORES_USE_SLC
, 0),
868 ac_build_intrinsic(&ctx
->ac
, "llvm.amdgcn.buffer.store.format.v4f32",
869 ctx
->voidt
, buf_args
, 6,
870 ac_get_store_intr_attribs(true));
872 lp_build_endif(&if_accepted
);
874 LLVMBuildRetVoid(builder
);
877 /* Return false if the shader isn't ready. */
878 static bool si_shader_select_prim_discard_cs(struct si_context
*sctx
,
879 const struct pipe_draw_info
*info
,
880 bool primitive_restart
)
882 struct si_state_rasterizer
*rs
= sctx
->queued
.named
.rasterizer
;
883 struct si_shader_key key
;
885 /* Primitive restart needs ordered counters. */
886 assert(!primitive_restart
|| VERTEX_COUNTER_GDS_MODE
== 2);
887 assert(!primitive_restart
|| info
->instance_count
== 1);
889 memset(&key
, 0, sizeof(key
));
890 si_shader_selector_key_vs(sctx
, sctx
->vs_shader
.cso
, &key
, &key
.part
.vs
.prolog
);
891 assert(!key
.part
.vs
.prolog
.instance_divisor_is_fetched
);
893 key
.part
.vs
.prolog
.unpack_instance_id_from_vertex_id
= 0;
894 key
.opt
.vs_as_prim_discard_cs
= 1;
895 key
.opt
.cs_prim_type
= info
->mode
;
896 key
.opt
.cs_indexed
= info
->index_size
!= 0;
897 key
.opt
.cs_instancing
= info
->instance_count
> 1;
898 key
.opt
.cs_primitive_restart
= primitive_restart
;
899 key
.opt
.cs_provoking_vertex_first
= rs
->provoking_vertex_first
;
901 /* Primitive restart with triangle strips needs to preserve primitive
902 * orientation for cases where front and back primitive orientation matters.
904 if (primitive_restart
) {
905 struct si_shader_selector
*ps
= sctx
->ps_shader
.cso
;
907 key
.opt
.cs_need_correct_orientation
=
908 rs
->cull_front
!= rs
->cull_back
||
909 ps
->info
.uses_frontface
||
910 (rs
->two_side
&& ps
->info
.colors_read
);
913 if (rs
->rasterizer_discard
) {
914 /* Just for performance testing and analysis of trivial bottlenecks.
915 * This should result in a very short compute shader. */
916 key
.opt
.cs_cull_front
= 1;
917 key
.opt
.cs_cull_back
= 1;
919 key
.opt
.cs_cull_front
=
920 sctx
->viewports
.y_inverted
? rs
->cull_back
: rs
->cull_front
;
921 key
.opt
.cs_cull_back
=
922 sctx
->viewports
.y_inverted
? rs
->cull_front
: rs
->cull_back
;
925 if (!rs
->depth_clamp_any
&& CULL_Z
) {
926 key
.opt
.cs_cull_z
= 1;
927 key
.opt
.cs_halfz_clip_space
= rs
->clip_halfz
;
930 sctx
->cs_prim_discard_state
.cso
= sctx
->vs_shader
.cso
;
931 sctx
->cs_prim_discard_state
.current
= NULL
;
933 struct si_compiler_ctx_state compiler_state
;
934 compiler_state
.compiler
= &sctx
->compiler
;
935 compiler_state
.debug
= sctx
->debug
;
936 compiler_state
.is_debug_context
= sctx
->is_debug
;
938 return si_shader_select_with_key(sctx
->screen
, &sctx
->cs_prim_discard_state
,
939 &compiler_state
, &key
, -1, true) == 0 &&
940 /* Disallow compute shaders using the scratch buffer. */
941 sctx
->cs_prim_discard_state
.current
->config
.scratch_bytes_per_wave
== 0;
944 static bool si_initialize_prim_discard_cmdbuf(struct si_context
*sctx
)
946 if (sctx
->index_ring
)
949 if (!sctx
->prim_discard_compute_cs
) {
950 struct radeon_winsys
*ws
= sctx
->ws
;
951 unsigned gds_size
= VERTEX_COUNTER_GDS_MODE
== 1 ? GDS_SIZE_UNORDERED
:
952 VERTEX_COUNTER_GDS_MODE
== 2 ? 8 : 0;
953 unsigned num_oa_counters
= VERTEX_COUNTER_GDS_MODE
== 2 ? 2 : 0;
956 sctx
->gds
= ws
->buffer_create(ws
, gds_size
, 4,
957 RADEON_DOMAIN_GDS
, 0);
961 ws
->cs_add_buffer(sctx
->gfx_cs
, sctx
->gds
,
962 RADEON_USAGE_READWRITE
, 0, 0);
964 if (num_oa_counters
) {
966 sctx
->gds_oa
= ws
->buffer_create(ws
, num_oa_counters
,
967 1, RADEON_DOMAIN_OA
, 0);
971 ws
->cs_add_buffer(sctx
->gfx_cs
, sctx
->gds_oa
,
972 RADEON_USAGE_READWRITE
, 0, 0);
975 sctx
->prim_discard_compute_cs
=
976 ws
->cs_add_parallel_compute_ib(sctx
->gfx_cs
,
977 num_oa_counters
> 0);
978 if (!sctx
->prim_discard_compute_cs
)
982 if (!sctx
->index_ring
) {
984 si_aligned_buffer_create(sctx
->b
.screen
,
985 SI_RESOURCE_FLAG_UNMAPPABLE
,
987 sctx
->index_ring_size_per_ib
* 2,
989 if (!sctx
->index_ring
)
995 static bool si_check_ring_space(struct si_context
*sctx
, unsigned out_indexbuf_size
)
997 return sctx
->index_ring_offset
+
998 align(out_indexbuf_size
, sctx
->screen
->info
.tcc_cache_line_size
) <=
999 sctx
->index_ring_size_per_ib
;
1002 enum si_prim_discard_outcome
1003 si_prepare_prim_discard_or_split_draw(struct si_context
*sctx
,
1004 const struct pipe_draw_info
*info
,
1005 bool primitive_restart
)
1007 /* If the compute shader compilation isn't finished, this returns false. */
1008 if (!si_shader_select_prim_discard_cs(sctx
, info
, primitive_restart
))
1009 return SI_PRIM_DISCARD_DISABLED
;
1011 if (!si_initialize_prim_discard_cmdbuf(sctx
))
1012 return SI_PRIM_DISCARD_DISABLED
;
1014 struct radeon_cmdbuf
*gfx_cs
= sctx
->gfx_cs
;
1015 unsigned prim
= info
->mode
;
1016 unsigned count
= info
->count
;
1017 unsigned instance_count
= info
->instance_count
;
1018 unsigned num_prims_per_instance
= u_decomposed_prims_for_vertices(prim
, count
);
1019 unsigned num_prims
= num_prims_per_instance
* instance_count
;
1020 unsigned out_indexbuf_size
= num_prims
* 12;
1021 bool ring_full
= !si_check_ring_space(sctx
, out_indexbuf_size
);
1022 const unsigned split_prims_draw_level
= SPLIT_PRIMS_DRAW_LEVEL
;
1024 /* Split draws at the draw call level if the ring is full. This makes
1025 * better use of the ring space.
1028 num_prims
> split_prims_draw_level
&&
1029 instance_count
== 1 && /* TODO: support splitting instanced draws */
1030 (1 << prim
) & ((1 << PIPE_PRIM_TRIANGLES
) |
1031 (1 << PIPE_PRIM_TRIANGLE_STRIP
))) {
1033 struct pipe_draw_info split_draw
= *info
;
1034 split_draw
.primitive_restart
= primitive_restart
;
1036 unsigned base_start
= split_draw
.start
;
1038 if (prim
== PIPE_PRIM_TRIANGLES
) {
1039 unsigned vert_count_per_subdraw
= split_prims_draw_level
* 3;
1040 assert(vert_count_per_subdraw
< count
);
1042 for (unsigned start
= 0; start
< count
; start
+= vert_count_per_subdraw
) {
1043 split_draw
.start
= base_start
+ start
;
1044 split_draw
.count
= MIN2(count
- start
, vert_count_per_subdraw
);
1046 sctx
->b
.draw_vbo(&sctx
->b
, &split_draw
);
1048 } else if (prim
== PIPE_PRIM_TRIANGLE_STRIP
) {
1049 /* No primitive pair can be split, because strips reverse orientation
1050 * for odd primitives. */
1051 STATIC_ASSERT(split_prims_draw_level
% 2 == 0);
1053 unsigned vert_count_per_subdraw
= split_prims_draw_level
;
1055 for (unsigned start
= 0; start
< count
- 2; start
+= vert_count_per_subdraw
) {
1056 split_draw
.start
= base_start
+ start
;
1057 split_draw
.count
= MIN2(count
- start
, vert_count_per_subdraw
+ 2);
1059 sctx
->b
.draw_vbo(&sctx
->b
, &split_draw
);
1062 primitive_restart
&&
1063 sctx
->cs_prim_discard_state
.current
->key
.opt
.cs_need_correct_orientation
)
1064 sctx
->preserve_prim_restart_gds_at_flush
= true;
1066 sctx
->preserve_prim_restart_gds_at_flush
= false;
1071 return SI_PRIM_DISCARD_DRAW_SPLIT
;
1074 /* Just quit if the draw call doesn't fit into the ring and can't be split. */
1075 if (out_indexbuf_size
> sctx
->index_ring_size_per_ib
) {
1076 if (SI_PRIM_DISCARD_DEBUG
)
1077 puts("PD failed: draw call too big, can't be split");
1078 return SI_PRIM_DISCARD_DISABLED
;
1081 unsigned num_subdraws
= DIV_ROUND_UP(num_prims
, SPLIT_PRIMS_PACKET_LEVEL
);
1082 unsigned need_compute_dw
= 11 /* shader */ + 34 /* first draw */ +
1083 24 * (num_subdraws
- 1) + /* subdraws */
1084 20; /* leave some space at the end */
1085 unsigned need_gfx_dw
= si_get_minimum_num_gfx_cs_dwords(sctx
);
1087 if (sctx
->chip_class
<= GFX7
|| FORCE_REWIND_EMULATION
)
1088 need_gfx_dw
+= 9; /* NOP(2) + WAIT_REG_MEM(7), then chain */
1090 need_gfx_dw
+= num_subdraws
* 8; /* use REWIND(2) + DRAW(6) */
1093 (VERTEX_COUNTER_GDS_MODE
== 1 && sctx
->compute_gds_offset
+ 8 > GDS_SIZE_UNORDERED
) ||
1094 !sctx
->ws
->cs_check_space(gfx_cs
, need_gfx_dw
, false)) {
1095 /* If the current IB is empty but the size is too small, add a NOP
1096 * packet to force a flush and get a bigger IB.
1098 if (!radeon_emitted(gfx_cs
, sctx
->initial_gfx_cs_size
) &&
1099 gfx_cs
->current
.cdw
+ need_gfx_dw
> gfx_cs
->current
.max_dw
) {
1100 radeon_emit(gfx_cs
, PKT3(PKT3_NOP
, 0, 0));
1101 radeon_emit(gfx_cs
, 0);
1104 si_flush_gfx_cs(sctx
, RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW
, NULL
);
1107 /* The compute IB is always chained, but we need to call cs_check_space to add more space. */
1108 struct radeon_cmdbuf
*cs
= sctx
->prim_discard_compute_cs
;
1109 bool compute_has_space
= sctx
->ws
->cs_check_space(cs
, need_compute_dw
, false);
1110 assert(compute_has_space
);
1111 assert(si_check_ring_space(sctx
, out_indexbuf_size
));
1112 return SI_PRIM_DISCARD_ENABLED
;
1115 void si_compute_signal_gfx(struct si_context
*sctx
)
1117 struct radeon_cmdbuf
*cs
= sctx
->prim_discard_compute_cs
;
1118 unsigned writeback_L2_flags
= 0;
1120 /* The writeback L2 flags vary with each chip generation. */
1121 /* CI needs to flush vertex indices to memory. */
1122 if (sctx
->chip_class
<= GFX7
)
1123 writeback_L2_flags
= EVENT_TC_WB_ACTION_ENA
;
1124 else if (sctx
->chip_class
== GFX8
&& VERTEX_COUNTER_GDS_MODE
== 0)
1125 writeback_L2_flags
= EVENT_TC_WB_ACTION_ENA
| EVENT_TC_NC_ACTION_ENA
;
1127 if (!sctx
->compute_num_prims_in_batch
)
1130 assert(sctx
->compute_rewind_va
);
1132 /* After the queued dispatches are done and vertex counts are written to
1133 * the gfx IB, signal the gfx IB to continue. CP doesn't wait for
1134 * the dispatches to finish, it only adds the CS_DONE event into the event
1137 si_cp_release_mem(sctx
, cs
, V_028A90_CS_DONE
, writeback_L2_flags
,
1138 sctx
->chip_class
<= GFX8
? EOP_DST_SEL_MEM
: EOP_DST_SEL_TC_L2
,
1139 writeback_L2_flags
? EOP_INT_SEL_SEND_DATA_AFTER_WR_CONFIRM
:
1141 EOP_DATA_SEL_VALUE_32BIT
,
1143 sctx
->compute_rewind_va
|
1144 ((uint64_t)sctx
->screen
->info
.address32_hi
<< 32),
1145 REWIND_SIGNAL_BIT
, /* signaling value for the REWIND packet */
1148 sctx
->compute_rewind_va
= 0;
1149 sctx
->compute_num_prims_in_batch
= 0;
1152 /* Dispatch a primitive discard compute shader. */
1153 void si_dispatch_prim_discard_cs_and_draw(struct si_context
*sctx
,
1154 const struct pipe_draw_info
*info
,
1155 unsigned index_size
,
1156 unsigned base_vertex
,
1157 uint64_t input_indexbuf_va
,
1158 unsigned input_indexbuf_num_elements
)
1160 struct radeon_cmdbuf
*gfx_cs
= sctx
->gfx_cs
;
1161 struct radeon_cmdbuf
*cs
= sctx
->prim_discard_compute_cs
;
1162 unsigned num_prims_per_instance
= u_decomposed_prims_for_vertices(info
->mode
, info
->count
);
1163 if (!num_prims_per_instance
)
1166 unsigned num_prims
= num_prims_per_instance
* info
->instance_count
;
1167 unsigned vertices_per_prim
, output_indexbuf_format
;
1169 switch (info
->mode
) {
1170 case PIPE_PRIM_TRIANGLES
:
1171 case PIPE_PRIM_TRIANGLE_STRIP
:
1172 case PIPE_PRIM_TRIANGLE_FAN
:
1173 vertices_per_prim
= 3;
1174 output_indexbuf_format
= V_008F0C_BUF_DATA_FORMAT_32_32_32
;
1177 unreachable("unsupported primitive type");
1181 unsigned out_indexbuf_offset
;
1182 uint64_t output_indexbuf_size
= num_prims
* vertices_per_prim
* 4;
1183 bool first_dispatch
= !sctx
->prim_discard_compute_ib_initialized
;
1185 /* Initialize the compute IB if it's empty. */
1186 if (!sctx
->prim_discard_compute_ib_initialized
) {
1187 /* 1) State initialization. */
1188 sctx
->compute_gds_offset
= 0;
1189 sctx
->compute_ib_last_shader
= NULL
;
1191 if (sctx
->last_ib_barrier_fence
) {
1192 assert(!sctx
->last_ib_barrier_buf
);
1193 sctx
->ws
->cs_add_fence_dependency(gfx_cs
,
1194 sctx
->last_ib_barrier_fence
,
1195 RADEON_DEPENDENCY_PARALLEL_COMPUTE_ONLY
);
1198 /* 2) IB initialization. */
1199 /* Restore the GDS prim restart counter if needed. */
1200 if (sctx
->preserve_prim_restart_gds_at_flush
) {
1201 si_cp_copy_data(sctx
, cs
,
1202 COPY_DATA_GDS
, NULL
, 4,
1203 COPY_DATA_SRC_MEM
, sctx
->wait_mem_scratch
, 4);
1206 si_emit_initial_compute_regs(sctx
, cs
);
1208 radeon_set_sh_reg(cs
, R_00B860_COMPUTE_TMPRING_SIZE
,
1209 S_00B860_WAVES(sctx
->scratch_waves
) |
1210 S_00B860_WAVESIZE(0)); /* no scratch */
1212 /* Only 1D grids are launched. */
1213 radeon_set_sh_reg_seq(cs
, R_00B820_COMPUTE_NUM_THREAD_Y
, 2);
1214 radeon_emit(cs
, S_00B820_NUM_THREAD_FULL(1) |
1215 S_00B820_NUM_THREAD_PARTIAL(1));
1216 radeon_emit(cs
, S_00B824_NUM_THREAD_FULL(1) |
1217 S_00B824_NUM_THREAD_PARTIAL(1));
1219 radeon_set_sh_reg_seq(cs
, R_00B814_COMPUTE_START_Y
, 2);
1223 /* Disable ordered alloc for OA resources. */
1224 for (unsigned i
= 0; i
< 2; i
++) {
1225 radeon_set_uconfig_reg_seq(cs
, R_031074_GDS_OA_CNTL
, 3);
1226 radeon_emit(cs
, S_031074_INDEX(i
));
1228 radeon_emit(cs
, S_03107C_ENABLE(0));
1231 if (sctx
->last_ib_barrier_buf
) {
1232 assert(!sctx
->last_ib_barrier_fence
);
1233 radeon_add_to_buffer_list(sctx
, gfx_cs
, sctx
->last_ib_barrier_buf
,
1234 RADEON_USAGE_READ
, RADEON_PRIO_FENCE
);
1235 si_cp_wait_mem(sctx
, cs
,
1236 sctx
->last_ib_barrier_buf
->gpu_address
+
1237 sctx
->last_ib_barrier_buf_offset
, 1, 1,
1238 WAIT_REG_MEM_EQUAL
);
1241 sctx
->prim_discard_compute_ib_initialized
= true;
1244 /* Allocate the output index buffer. */
1245 output_indexbuf_size
= align(output_indexbuf_size
,
1246 sctx
->screen
->info
.tcc_cache_line_size
);
1247 assert(sctx
->index_ring_offset
+ output_indexbuf_size
<= sctx
->index_ring_size_per_ib
);
1248 out_indexbuf_offset
= sctx
->index_ring_base
+ sctx
->index_ring_offset
;
1249 sctx
->index_ring_offset
+= output_indexbuf_size
;
1251 radeon_add_to_buffer_list(sctx
, gfx_cs
, sctx
->index_ring
, RADEON_USAGE_READWRITE
,
1252 RADEON_PRIO_SHADER_RW_BUFFER
);
1253 uint64_t out_indexbuf_va
= sctx
->index_ring
->gpu_address
+ out_indexbuf_offset
;
1255 /* Prepare index buffer descriptors. */
1256 struct si_resource
*indexbuf_desc
= NULL
;
1257 unsigned indexbuf_desc_offset
;
1258 unsigned desc_size
= 12 * 4;
1261 u_upload_alloc(sctx
->b
.const_uploader
, 0, desc_size
,
1262 si_optimal_tcc_alignment(sctx
, desc_size
),
1263 &indexbuf_desc_offset
, (struct pipe_resource
**)&indexbuf_desc
,
1265 radeon_add_to_buffer_list(sctx
, gfx_cs
, indexbuf_desc
, RADEON_USAGE_READ
,
1266 RADEON_PRIO_DESCRIPTORS
);
1268 /* Input index buffer. */
1269 desc
[0] = input_indexbuf_va
;
1270 desc
[1] = S_008F04_BASE_ADDRESS_HI(input_indexbuf_va
>> 32) |
1271 S_008F04_STRIDE(index_size
);
1272 desc
[2] = input_indexbuf_num_elements
* (sctx
->chip_class
== GFX8
? index_size
: 1);
1273 desc
[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X
) |
1274 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_UINT
) |
1275 S_008F0C_DATA_FORMAT(index_size
== 1 ? V_008F0C_BUF_DATA_FORMAT_8
:
1276 index_size
== 2 ? V_008F0C_BUF_DATA_FORMAT_16
:
1277 V_008F0C_BUF_DATA_FORMAT_32
);
1279 /* Output index buffer. */
1280 desc
[4] = out_indexbuf_va
;
1281 desc
[5] = S_008F04_BASE_ADDRESS_HI(out_indexbuf_va
>> 32) |
1282 S_008F04_STRIDE(vertices_per_prim
* 4);
1283 desc
[6] = num_prims
* (sctx
->chip_class
== GFX8
? vertices_per_prim
* 4 : 1);
1284 desc
[7] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X
) |
1285 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y
) |
1286 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z
) |
1287 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_0
) |
1288 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_UINT
) |
1289 S_008F0C_DATA_FORMAT(output_indexbuf_format
);
1292 * This is needed by the small primitive culling, because it's done
1295 float scale
[2], translate
[2];
1297 scale
[0] = sctx
->viewports
.states
[0].scale
[0];
1298 scale
[1] = sctx
->viewports
.states
[0].scale
[1];
1299 translate
[0] = sctx
->viewports
.states
[0].translate
[0];
1300 translate
[1] = sctx
->viewports
.states
[0].translate
[1];
1302 /* The viewport shouldn't flip the X axis for the small prim culling to work. */
1303 assert(-scale
[0] + translate
[0] <= scale
[0] + translate
[0]);
1305 /* If the Y axis is inverted (OpenGL default framebuffer), reverse it.
1306 * This is because the viewport transformation inverts the clip space
1307 * bounding box, so min becomes max, which breaks small primitive
1310 if (sctx
->viewports
.y_inverted
) {
1311 scale
[1] = -scale
[1];
1312 translate
[1] = -translate
[1];
1315 /* Scale the framebuffer up, so that samples become pixels and small
1316 * primitive culling is the same for all sample counts.
1317 * This only works with the standard DX sample positions, because
1318 * the samples are evenly spaced on both X and Y axes.
1320 unsigned num_samples
= sctx
->framebuffer
.nr_samples
;
1321 assert(num_samples
>= 1);
1323 for (unsigned i
= 0; i
< 2; i
++) {
1324 scale
[i
] *= num_samples
;
1325 translate
[i
] *= num_samples
;
1328 desc
[8] = fui(scale
[0]);
1329 desc
[9] = fui(scale
[1]);
1330 desc
[10] = fui(translate
[0]);
1331 desc
[11] = fui(translate
[1]);
1333 /* Better subpixel precision increases the efficiency of small
1334 * primitive culling. */
1335 unsigned quant_mode
= sctx
->viewports
.as_scissor
[0].quant_mode
;
1336 float small_prim_cull_precision
;
1338 if (quant_mode
== SI_QUANT_MODE_12_12_FIXED_POINT_1_4096TH
)
1339 small_prim_cull_precision
= num_samples
/ 4096.0;
1340 else if (quant_mode
== SI_QUANT_MODE_14_10_FIXED_POINT_1_1024TH
)
1341 small_prim_cull_precision
= num_samples
/ 1024.0;
1343 small_prim_cull_precision
= num_samples
/ 256.0;
1345 /* Set user data SGPRs. */
1346 /* This can't be greater than 14 if we want the fastest launch rate. */
1347 unsigned user_sgprs
= 13;
1349 uint64_t index_buffers_va
= indexbuf_desc
->gpu_address
+ indexbuf_desc_offset
;
1350 unsigned vs_const_desc
= si_const_and_shader_buffer_descriptors_idx(PIPE_SHADER_VERTEX
);
1351 unsigned vs_sampler_desc
= si_sampler_and_image_descriptors_idx(PIPE_SHADER_VERTEX
);
1352 uint64_t vs_const_desc_va
= sctx
->descriptors
[vs_const_desc
].gpu_address
;
1353 uint64_t vs_sampler_desc_va
= sctx
->descriptors
[vs_sampler_desc
].gpu_address
;
1354 uint64_t vb_desc_va
= sctx
->vb_descriptors_buffer
?
1355 sctx
->vb_descriptors_buffer
->gpu_address
+
1356 sctx
->vb_descriptors_offset
: 0;
1357 unsigned gds_offset
, gds_size
;
1358 struct si_fast_udiv_info32 num_prims_udiv
= {};
1360 if (info
->instance_count
> 1)
1361 num_prims_udiv
= si_compute_fast_udiv_info32(num_prims_per_instance
, 31);
1363 /* Limitations on how these two are packed in the user SGPR. */
1364 assert(num_prims_udiv
.post_shift
< 32);
1365 assert(num_prims_per_instance
< 1 << 27);
1367 si_resource_reference(&indexbuf_desc
, NULL
);
1369 bool primitive_restart
= sctx
->cs_prim_discard_state
.current
->key
.opt
.cs_primitive_restart
;
1371 if (VERTEX_COUNTER_GDS_MODE
== 1) {
1372 gds_offset
= sctx
->compute_gds_offset
;
1373 gds_size
= primitive_restart
? 8 : 4;
1374 sctx
->compute_gds_offset
+= gds_size
;
1376 /* Reset the counters in GDS for the first dispatch using WRITE_DATA.
1377 * The remainder of the GDS will be cleared after the dispatch packet
1378 * in parallel with compute shaders.
1380 if (first_dispatch
) {
1381 radeon_emit(cs
, PKT3(PKT3_WRITE_DATA
, 2 + gds_size
/4, 0));
1382 radeon_emit(cs
, S_370_DST_SEL(V_370_GDS
) | S_370_WR_CONFIRM(1));
1383 radeon_emit(cs
, gds_offset
);
1385 radeon_emit(cs
, 0); /* value to write */
1391 /* Set shader registers. */
1392 struct si_shader
*shader
= sctx
->cs_prim_discard_state
.current
;
1394 if (shader
!= sctx
->compute_ib_last_shader
) {
1395 radeon_add_to_buffer_list(sctx
, gfx_cs
, shader
->bo
, RADEON_USAGE_READ
,
1396 RADEON_PRIO_SHADER_BINARY
);
1397 uint64_t shader_va
= shader
->bo
->gpu_address
;
1399 assert(shader
->config
.scratch_bytes_per_wave
== 0);
1400 assert(shader
->config
.num_vgprs
* WAVES_PER_TG
<= 256 * 4);
1402 radeon_set_sh_reg_seq(cs
, R_00B830_COMPUTE_PGM_LO
, 2);
1403 radeon_emit(cs
, shader_va
>> 8);
1404 radeon_emit(cs
, S_00B834_DATA(shader_va
>> 40));
1406 radeon_set_sh_reg_seq(cs
, R_00B848_COMPUTE_PGM_RSRC1
, 2);
1407 radeon_emit(cs
, S_00B848_VGPRS((shader
->config
.num_vgprs
- 1) / 4) |
1408 S_00B848_SGPRS((shader
->config
.num_sgprs
- 1) / 8) |
1409 S_00B848_FLOAT_MODE(shader
->config
.float_mode
) |
1410 S_00B848_DX10_CLAMP(1));
1411 radeon_emit(cs
, S_00B84C_SCRATCH_EN(0 /* no scratch */) |
1412 S_00B84C_USER_SGPR(user_sgprs
) |
1413 S_00B84C_TGID_X_EN(1 /* only blockID.x is used */) |
1414 S_00B84C_TG_SIZE_EN(VERTEX_COUNTER_GDS_MODE
== 2 /* need the wave ID */) |
1415 S_00B84C_TIDIG_COMP_CNT(0 /* only threadID.x is used */) |
1416 S_00B84C_LDS_SIZE(shader
->config
.lds_size
));
1418 radeon_set_sh_reg(cs
, R_00B854_COMPUTE_RESOURCE_LIMITS
,
1419 si_get_compute_resource_limits(sctx
->screen
, WAVES_PER_TG
,
1420 MAX_WAVES_PER_SH
, THREADGROUPS_PER_CU
));
1421 sctx
->compute_ib_last_shader
= shader
;
1424 STATIC_ASSERT(SPLIT_PRIMS_PACKET_LEVEL
% THREADGROUP_SIZE
== 0);
1426 /* Big draw calls are split into smaller dispatches and draw packets. */
1427 for (unsigned start_prim
= 0; start_prim
< num_prims
; start_prim
+= SPLIT_PRIMS_PACKET_LEVEL
) {
1428 unsigned num_subdraw_prims
;
1430 if (start_prim
+ SPLIT_PRIMS_PACKET_LEVEL
< num_prims
)
1431 num_subdraw_prims
= SPLIT_PRIMS_PACKET_LEVEL
;
1433 num_subdraw_prims
= num_prims
- start_prim
;
1435 /* Small dispatches are executed back to back until a specific primitive
1436 * count is reached. Then, a CS_DONE is inserted to signal the gfx IB
1437 * to start drawing the batch. This batching adds latency to the gfx IB,
1438 * but CS_DONE and REWIND are too slow.
1440 if (sctx
->compute_num_prims_in_batch
+ num_subdraw_prims
> PRIMS_PER_BATCH
)
1441 si_compute_signal_gfx(sctx
);
1443 if (sctx
->compute_num_prims_in_batch
== 0) {
1444 assert((gfx_cs
->gpu_address
>> 32) == sctx
->screen
->info
.address32_hi
);
1445 sctx
->compute_rewind_va
= gfx_cs
->gpu_address
+ (gfx_cs
->current
.cdw
+ 1) * 4;
1447 if (sctx
->chip_class
<= GFX7
|| FORCE_REWIND_EMULATION
) {
1448 radeon_emit(gfx_cs
, PKT3(PKT3_NOP
, 0, 0));
1449 radeon_emit(gfx_cs
, 0);
1451 si_cp_wait_mem(sctx
, gfx_cs
,
1452 sctx
->compute_rewind_va
|
1453 (uint64_t)sctx
->screen
->info
.address32_hi
<< 32,
1454 REWIND_SIGNAL_BIT
, REWIND_SIGNAL_BIT
,
1455 WAIT_REG_MEM_EQUAL
| WAIT_REG_MEM_PFP
);
1457 /* Use INDIRECT_BUFFER to chain to a different buffer
1458 * to discard the CP prefetch cache.
1460 sctx
->ws
->cs_check_space(gfx_cs
, 0, true);
1462 radeon_emit(gfx_cs
, PKT3(PKT3_REWIND
, 0, 0));
1463 radeon_emit(gfx_cs
, 0);
1467 sctx
->compute_num_prims_in_batch
+= num_subdraw_prims
;
1469 uint32_t count_va
= gfx_cs
->gpu_address
+ (gfx_cs
->current
.cdw
+ 4) * 4;
1470 uint64_t index_va
= out_indexbuf_va
+ start_prim
* 12;
1472 /* Emit the draw packet into the gfx IB. */
1473 radeon_emit(gfx_cs
, PKT3(PKT3_DRAW_INDEX_2
, 4, 0));
1474 radeon_emit(gfx_cs
, num_prims
* vertices_per_prim
);
1475 radeon_emit(gfx_cs
, index_va
);
1476 radeon_emit(gfx_cs
, index_va
>> 32);
1477 radeon_emit(gfx_cs
, 0);
1478 radeon_emit(gfx_cs
, V_0287F0_DI_SRC_SEL_DMA
);
1480 /* Continue with the compute IB. */
1481 if (start_prim
== 0) {
1482 uint32_t gds_prim_restart_continue_bit
= 0;
1484 if (sctx
->preserve_prim_restart_gds_at_flush
) {
1485 assert(primitive_restart
&&
1486 info
->mode
== PIPE_PRIM_TRIANGLE_STRIP
);
1487 assert(start_prim
< 1 << 31);
1488 gds_prim_restart_continue_bit
= 1 << 31;
1491 radeon_set_sh_reg_seq(cs
, R_00B900_COMPUTE_USER_DATA_0
, user_sgprs
);
1492 radeon_emit(cs
, index_buffers_va
);
1494 VERTEX_COUNTER_GDS_MODE
== 0 ? count_va
:
1495 VERTEX_COUNTER_GDS_MODE
== 1 ? gds_offset
:
1497 gds_prim_restart_continue_bit
);
1498 radeon_emit(cs
, start_prim
+ num_subdraw_prims
- 1);
1499 radeon_emit(cs
, count_va
);
1500 radeon_emit(cs
, vb_desc_va
);
1501 radeon_emit(cs
, vs_const_desc_va
);
1502 radeon_emit(cs
, vs_sampler_desc_va
);
1503 radeon_emit(cs
, base_vertex
);
1504 radeon_emit(cs
, info
->start_instance
);
1505 radeon_emit(cs
, num_prims_udiv
.multiplier
);
1506 radeon_emit(cs
, num_prims_udiv
.post_shift
|
1507 (num_prims_per_instance
<< 5));
1508 radeon_emit(cs
, info
->restart_index
);
1509 /* small-prim culling precision (same as rasterizer precision = QUANT_MODE) */
1510 radeon_emit(cs
, fui(small_prim_cull_precision
));
1512 assert(VERTEX_COUNTER_GDS_MODE
== 2);
1513 /* Only update the SGPRs that changed. */
1514 radeon_set_sh_reg_seq(cs
, R_00B904_COMPUTE_USER_DATA_1
, 3);
1515 radeon_emit(cs
, start_prim
);
1516 radeon_emit(cs
, start_prim
+ num_subdraw_prims
- 1);
1517 radeon_emit(cs
, count_va
);
1520 /* Set grid dimensions. */
1521 unsigned start_block
= start_prim
/ THREADGROUP_SIZE
;
1522 unsigned num_full_blocks
= num_subdraw_prims
/ THREADGROUP_SIZE
;
1523 unsigned partial_block_size
= num_subdraw_prims
% THREADGROUP_SIZE
;
1525 radeon_set_sh_reg(cs
, R_00B810_COMPUTE_START_X
, start_block
);
1526 radeon_set_sh_reg(cs
, R_00B81C_COMPUTE_NUM_THREAD_X
,
1527 S_00B81C_NUM_THREAD_FULL(THREADGROUP_SIZE
) |
1528 S_00B81C_NUM_THREAD_PARTIAL(partial_block_size
));
1530 radeon_emit(cs
, PKT3(PKT3_DISPATCH_DIRECT
, 3, 0) |
1531 PKT3_SHADER_TYPE_S(1));
1532 radeon_emit(cs
, start_block
+ num_full_blocks
+ !!partial_block_size
);
1535 radeon_emit(cs
, S_00B800_COMPUTE_SHADER_EN(1) |
1536 S_00B800_PARTIAL_TG_EN(!!partial_block_size
) |
1537 S_00B800_ORDERED_APPEND_ENBL(VERTEX_COUNTER_GDS_MODE
== 2) |
1538 S_00B800_ORDER_MODE(0 /* launch in order */));
1540 /* This is only for unordered append. Ordered append writes this from
1543 * Note that EOP and EOS events are super slow, so emulating the event
1544 * in a shader is an important optimization.
1546 if (VERTEX_COUNTER_GDS_MODE
== 1) {
1547 si_cp_release_mem(sctx
, cs
, V_028A90_CS_DONE
, 0,
1548 sctx
->chip_class
<= GFX8
? EOP_DST_SEL_MEM
: EOP_DST_SEL_TC_L2
,
1552 count_va
| ((uint64_t)sctx
->screen
->info
.address32_hi
<< 32),
1553 EOP_DATA_GDS(gds_offset
/ 4, 1),
1556 /* Now that compute shaders are running, clear the remainder of GDS. */
1557 if (first_dispatch
) {
1558 unsigned offset
= gds_offset
+ gds_size
;
1559 si_cp_dma_clear_buffer(sctx
, cs
, NULL
, offset
,
1560 GDS_SIZE_UNORDERED
- offset
,
1562 SI_CPDMA_SKIP_CHECK_CS_SPACE
|
1563 SI_CPDMA_SKIP_GFX_SYNC
|
1564 SI_CPDMA_SKIP_SYNC_BEFORE
,
1565 SI_COHERENCY_NONE
, L2_BYPASS
);
1568 first_dispatch
= false;
1570 assert(cs
->current
.cdw
<= cs
->current
.max_dw
);
1571 assert(gfx_cs
->current
.cdw
<= gfx_cs
->current
.max_dw
);