/* For emulating the rewind packet on CI. */
#define FORCE_REWIND_EMULATION 0
-void si_initialize_prim_discard_tunables(struct si_context *sctx)
+void si_initialize_prim_discard_tunables(struct si_screen *sscreen,
+ bool is_aux_context,
+ unsigned *prim_discard_vertex_count_threshold,
+ unsigned *index_ring_size_per_ib)
{
- sctx->prim_discard_vertex_count_threshold = UINT_MAX; /* disable */
+ *prim_discard_vertex_count_threshold = UINT_MAX; /* disable */
- if (sctx->chip_class == GFX6 || /* SI support is not implemented */
- !sctx->screen->info.has_gds_ordered_append ||
- sctx->screen->debug_flags & DBG(NO_PD) ||
- /* If aux_context == NULL, we are initializing aux_context right now. */
- !sctx->screen->aux_context)
+ if (sscreen->info.chip_class == GFX6 || /* SI support is not implemented */
+ !sscreen->info.has_gds_ordered_append ||
+ sscreen->debug_flags & DBG(NO_PD) ||
+ is_aux_context)
return;
/* TODO: enable this after the GDS kernel memory management is fixed */
bool enable_on_pro_graphics_by_default = false;
- if (sctx->screen->debug_flags & DBG(ALWAYS_PD) ||
- sctx->screen->debug_flags & DBG(PD) ||
+ if (sscreen->debug_flags & DBG(ALWAYS_PD) ||
+ sscreen->debug_flags & DBG(PD) ||
(enable_on_pro_graphics_by_default &&
- sctx->screen->info.is_pro_graphics &&
- (sctx->family == CHIP_BONAIRE ||
- sctx->family == CHIP_HAWAII ||
- sctx->family == CHIP_TONGA ||
- sctx->family == CHIP_FIJI ||
- sctx->family == CHIP_POLARIS10 ||
- sctx->family == CHIP_POLARIS11 ||
- sctx->family == CHIP_VEGA10 ||
- sctx->family == CHIP_VEGA20))) {
- sctx->prim_discard_vertex_count_threshold = 6000 * 3; /* 6K triangles */
-
- if (sctx->screen->debug_flags & DBG(ALWAYS_PD))
- sctx->prim_discard_vertex_count_threshold = 0; /* always enable */
+ sscreen->info.is_pro_graphics &&
+ (sscreen->info.family == CHIP_BONAIRE ||
+ sscreen->info.family == CHIP_HAWAII ||
+ sscreen->info.family == CHIP_TONGA ||
+ sscreen->info.family == CHIP_FIJI ||
+ sscreen->info.family == CHIP_POLARIS10 ||
+ sscreen->info.family == CHIP_POLARIS11 ||
+ sscreen->info.family == CHIP_VEGA10 ||
+ sscreen->info.family == CHIP_VEGA20))) {
+ *prim_discard_vertex_count_threshold = 6000 * 3; /* 6K triangles */
+
+ if (sscreen->debug_flags & DBG(ALWAYS_PD))
+ *prim_discard_vertex_count_threshold = 0; /* always enable */
const uint32_t MB = 1024 * 1024;
const uint64_t GB = 1024 * 1024 * 1024;
/* The total size is double this per context.
* Greater numbers allow bigger gfx IBs.
*/
- if (sctx->screen->info.vram_size <= 2 * GB)
- sctx->index_ring_size_per_ib = 64 * MB;
- else if (sctx->screen->info.vram_size <= 4 * GB)
- sctx->index_ring_size_per_ib = 128 * MB;
+ if (sscreen->info.vram_size <= 2 * GB)
+ *index_ring_size_per_ib = 64 * MB;
+ else if (sscreen->info.vram_size <= 4 * GB)
+ *index_ring_size_per_ib = 128 * MB;
else
- sctx->index_ring_size_per_ib = 256 * MB;
+ *index_ring_size_per_ib = 256 * MB;
}
}
ac_add_function_attr(ctx->ac.context, vs, -1, AC_FUNC_ATTR_ALWAYSINLINE);
LLVMSetLinkage(vs, LLVMPrivateLinkage);
- LLVMTypeRef const_desc_type;
+ enum ac_arg_type const_desc_type;
if (ctx->shader->selector->info.const_buffers_declared == 1 &&
ctx->shader->selector->info.shader_buffers_declared == 0)
- const_desc_type = ctx->f32;
+ const_desc_type = AC_ARG_CONST_FLOAT_PTR;
else
- const_desc_type = ctx->v4i32;
-
- struct si_function_info fninfo;
- si_init_function_info(&fninfo);
-
- LLVMValueRef index_buffers_and_constants, vertex_counter, vb_desc, const_desc;
- LLVMValueRef base_vertex, start_instance, block_id, local_id, ordered_wave_id;
- LLVMValueRef restart_index, vp_scale[2], vp_translate[2], smallprim_precision;
- LLVMValueRef num_prims_udiv_multiplier, num_prims_udiv_terms, sampler_desc;
- LLVMValueRef last_wave_prim_id, vertex_count_addr;
-
- add_arg_assign(&fninfo, ARG_SGPR, ac_array_in_const32_addr_space(ctx->v4i32),
- &index_buffers_and_constants);
- add_arg_assign(&fninfo, ARG_SGPR, ctx->i32, &vertex_counter);
- add_arg_assign(&fninfo, ARG_SGPR, ctx->i32, &last_wave_prim_id);
- add_arg_assign(&fninfo, ARG_SGPR, ctx->i32, &vertex_count_addr);
- add_arg_assign(&fninfo, ARG_SGPR, ac_array_in_const32_addr_space(ctx->v4i32),
- &vb_desc);
- add_arg_assign(&fninfo, ARG_SGPR, ac_array_in_const32_addr_space(const_desc_type),
- &const_desc);
- add_arg_assign(&fninfo, ARG_SGPR, ac_array_in_const32_addr_space(ctx->v8i32),
- &sampler_desc);
- add_arg_assign(&fninfo, ARG_SGPR, ctx->i32, &base_vertex);
- add_arg_assign(&fninfo, ARG_SGPR, ctx->i32, &start_instance);
- add_arg_assign(&fninfo, ARG_SGPR, ctx->i32, &num_prims_udiv_multiplier);
- add_arg_assign(&fninfo, ARG_SGPR, ctx->i32, &num_prims_udiv_terms);
- add_arg_assign(&fninfo, ARG_SGPR, ctx->i32, &restart_index);
- add_arg_assign(&fninfo, ARG_SGPR, ctx->f32, &smallprim_precision);
+ const_desc_type = AC_ARG_CONST_DESC_PTR;
+
+ memset(&ctx->args, 0, sizeof(ctx->args));
+
+ struct ac_arg param_index_buffers_and_constants, param_vertex_counter;
+ struct ac_arg param_vb_desc, param_const_desc;
+ struct ac_arg param_base_vertex, param_start_instance;
+ struct ac_arg param_block_id, param_local_id, param_ordered_wave_id;
+ struct ac_arg param_restart_index, param_smallprim_precision;
+ struct ac_arg param_num_prims_udiv_multiplier, param_num_prims_udiv_terms;
+ struct ac_arg param_sampler_desc, param_last_wave_prim_id, param_vertex_count_addr;
+
+ ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_CONST_DESC_PTR,
+ ¶m_index_buffers_and_constants);
+ ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, ¶m_vertex_counter);
+ ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, ¶m_last_wave_prim_id);
+ ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, ¶m_vertex_count_addr);
+ ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_CONST_DESC_PTR,
+ ¶m_vb_desc);
+ ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, const_desc_type,
+ ¶m_const_desc);
+ ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_CONST_IMAGE_PTR,
+ ¶m_sampler_desc);
+ ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, ¶m_base_vertex);
+ ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, ¶m_start_instance);
+ ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, ¶m_num_prims_udiv_multiplier);
+ ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, ¶m_num_prims_udiv_terms);
+ ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, ¶m_restart_index);
+ ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_FLOAT, ¶m_smallprim_precision);
/* Block ID and thread ID inputs. */
- add_arg_assign(&fninfo, ARG_SGPR, ctx->i32, &block_id);
+ ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, ¶m_block_id);
if (VERTEX_COUNTER_GDS_MODE == 2)
- add_arg_assign(&fninfo, ARG_SGPR, ctx->i32, &ordered_wave_id);
- add_arg_assign(&fninfo, ARG_VGPR, ctx->i32, &local_id);
+ ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, ¶m_ordered_wave_id);
+ ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, ¶m_local_id);
/* Create the compute shader function. */
unsigned old_type = ctx->type;
ctx->type = PIPE_SHADER_COMPUTE;
- si_create_function(ctx, "prim_discard_cs", NULL, 0, &fninfo, THREADGROUP_SIZE);
+ si_llvm_create_func(ctx, "prim_discard_cs", NULL, 0, THREADGROUP_SIZE);
ctx->type = old_type;
if (VERTEX_COUNTER_GDS_MODE == 1) {
vs_params[num_vs_params++] = LLVMGetUndef(LLVMTypeOf(LLVMGetParam(vs, 0))); /* RW_BUFFERS */
vs_params[num_vs_params++] = LLVMGetUndef(LLVMTypeOf(LLVMGetParam(vs, 1))); /* BINDLESS */
- vs_params[num_vs_params++] = const_desc;
- vs_params[num_vs_params++] = sampler_desc;
+ vs_params[num_vs_params++] = ac_get_arg(&ctx->ac, param_const_desc);
+ vs_params[num_vs_params++] = ac_get_arg(&ctx->ac, param_sampler_desc);
vs_params[num_vs_params++] = LLVMConstInt(ctx->i32,
S_VS_STATE_INDEXED(key->opt.cs_indexed), 0);
- vs_params[num_vs_params++] = base_vertex;
- vs_params[num_vs_params++] = start_instance;
+ vs_params[num_vs_params++] = ac_get_arg(&ctx->ac, param_base_vertex);
+ vs_params[num_vs_params++] = ac_get_arg(&ctx->ac, param_start_instance);
vs_params[num_vs_params++] = ctx->i32_0; /* DrawID */
- vs_params[num_vs_params++] = vb_desc;
+ vs_params[num_vs_params++] = ac_get_arg(&ctx->ac, param_vb_desc);
vs_params[(param_vertex_id = num_vs_params++)] = NULL; /* VertexID */
vs_params[(param_instance_id = num_vs_params++)] = NULL; /* InstanceID */
/* Load descriptors. (load 8 dwords at once) */
LLVMValueRef input_indexbuf, output_indexbuf, tmp, desc[8];
+ LLVMValueRef index_buffers_and_constants = ac_get_arg(&ctx->ac, param_index_buffers_and_constants);
tmp = LLVMBuildPointerCast(builder, index_buffers_and_constants,
ac_array_in_const32_addr_space(ctx->v8i32), "");
tmp = ac_build_load_to_sgpr(&ctx->ac, tmp, ctx->i32_0);
/* Compute PrimID and InstanceID. */
LLVMValueRef global_thread_id =
- ac_build_imad(&ctx->ac, block_id,
- LLVMConstInt(ctx->i32, THREADGROUP_SIZE, 0), local_id);
+ ac_build_imad(&ctx->ac, ac_get_arg(&ctx->ac, param_block_id),
+ LLVMConstInt(ctx->i32, THREADGROUP_SIZE, 0),
+ ac_get_arg(&ctx->ac, param_local_id));
LLVMValueRef prim_id = global_thread_id; /* PrimID within an instance */
LLVMValueRef instance_id = ctx->i32_0;
if (key->opt.cs_instancing) {
+ LLVMValueRef num_prims_udiv_terms =
+ ac_get_arg(&ctx->ac, param_num_prims_udiv_terms);
+ LLVMValueRef num_prims_udiv_multiplier =
+ ac_get_arg(&ctx->ac, param_num_prims_udiv_multiplier);
/* Unpack num_prims_udiv_terms. */
LLVMValueRef post_shift = LLVMBuildAnd(builder, num_prims_udiv_terms,
LLVMConstInt(ctx->i32, 0x1f, 0), "");
}
}
+ LLVMValueRef ordered_wave_id = ac_get_arg(&ctx->ac, param_ordered_wave_id);
+
/* Extract the ordered wave ID. */
if (VERTEX_COUNTER_GDS_MODE == 2) {
ordered_wave_id = LLVMBuildLShr(builder, ordered_wave_id,
LLVMConstInt(ctx->i32, 0xfff, 0), "");
}
LLVMValueRef thread_id =
- LLVMBuildAnd(builder, local_id, LLVMConstInt(ctx->i32, 63, 0), "");
+ LLVMBuildAnd(builder, ac_get_arg(&ctx->ac, param_local_id),
+ LLVMConstInt(ctx->i32, 63, 0), "");
/* Every other triangle in a strip has a reversed vertex order, so we
* need to swap vertices of odd primitives to get the correct primitive
* restart complicates it, because a strip can start anywhere.
*/
LLVMValueRef prim_restart_accepted = ctx->i1true;
+ LLVMValueRef vertex_counter = ac_get_arg(&ctx->ac, param_vertex_counter);
if (key->opt.cs_prim_type == PIPE_PRIM_TRIANGLE_STRIP) {
/* Without primitive restart, odd primitives have reversed orientation.
for (unsigned i = 0; i < 3; i++) {
LLVMValueRef not_reset = LLVMBuildICmp(builder, LLVMIntNE, index[i],
- restart_index, "");
+ ac_get_arg(&ctx->ac, param_restart_index),
+ "");
if (i == 0)
index0_is_reset = LLVMBuildNot(builder, not_reset, "");
prim_restart_accepted = LLVMBuildAnd(builder, prim_restart_accepted,
LLVMValueRef vp = ac_build_load_invariant(&ctx->ac, index_buffers_and_constants,
LLVMConstInt(ctx->i32, 2, 0));
vp = LLVMBuildBitCast(builder, vp, ctx->v4f32, "");
+ LLVMValueRef vp_scale[2], vp_translate[2];
vp_scale[0] = ac_llvm_extract_elem(&ctx->ac, vp, 0);
vp_scale[1] = ac_llvm_extract_elem(&ctx->ac, vp, 1);
vp_translate[0] = ac_llvm_extract_elem(&ctx->ac, vp, 2);
LLVMValueRef accepted =
ac_cull_triangle(&ctx->ac, pos, prim_restart_accepted,
- vp_scale, vp_translate, smallprim_precision,
+ vp_scale, vp_translate,
+ ac_get_arg(&ctx->ac, param_smallprim_precision),
&options);
LLVMValueRef accepted_threadmask = ac_get_i1_sgpr_mask(&ctx->ac, accepted);
if (VERTEX_COUNTER_GDS_MODE == 2) {
ac_build_ifcc(&ctx->ac,
LLVMBuildICmp(builder, LLVMIntEQ, global_thread_id,
- last_wave_prim_id, ""), 12606);
+ ac_get_arg(&ctx->ac, param_last_wave_prim_id), ""),
+ 12606);
LLVMValueRef count = LLVMBuildAdd(builder, start, num_prims_accepted, "");
count = LLVMBuildMul(builder, count,
LLVMConstInt(ctx->i32, vertices_per_prim, 0), "");
*/
if (ctx->screen->info.chip_class <= GFX8) {
LLVMValueRef desc[] = {
- vertex_count_addr,
+ ac_get_arg(&ctx->ac, param_vertex_count_addr),
LLVMConstInt(ctx->i32,
S_008F04_BASE_ADDRESS_HI(ctx->screen->info.address32_hi), 0),
LLVMConstInt(ctx->i32, 4, 0),
};
LLVMValueRef rsrc = ac_build_gather_values(&ctx->ac, desc, 4);
ac_build_buffer_store_dword(&ctx->ac, rsrc, count, 1, ctx->i32_0,
- ctx->i32_0, 0, ac_glc | ac_slc, false);
+ ctx->i32_0, 0, ac_glc | ac_slc);
} else {
LLVMBuildStore(builder, count,
- si_expand_32bit_pointer(ctx, vertex_count_addr));
+ si_expand_32bit_pointer(ctx,
+ ac_get_arg(&ctx->ac,
+ param_vertex_count_addr)));
}
ac_build_endif(&ctx->ac, 12606);
} else {
sctx->cs_prim_discard_state.cso = sctx->vs_shader.cso;
sctx->cs_prim_discard_state.current = NULL;
+ if (!sctx->compiler.passes)
+ si_init_compiler(sctx->screen, &sctx->compiler);
+
struct si_compiler_ctx_state compiler_state;
compiler_state.compiler = &sctx->compiler;
compiler_state.debug = sctx->debug;
SI_RESOURCE_FLAG_UNMAPPABLE,
PIPE_USAGE_DEFAULT,
sctx->index_ring_size_per_ib * 2,
- 2 * 1024 * 1024);
+ sctx->screen->info.pte_fragment_size);
if (!sctx->index_ring)
return false;
}
/* The compute IB is always chained, but we need to call cs_check_space to add more space. */
struct radeon_cmdbuf *cs = sctx->prim_discard_compute_cs;
- MAYBE_UNUSED bool compute_has_space = sctx->ws->cs_check_space(cs, need_compute_dw, false);
+ ASSERTED bool compute_has_space = sctx->ws->cs_check_space(cs, need_compute_dw, false);
assert(compute_has_space);
assert(si_check_ring_space(sctx, out_indexbuf_size));
return SI_PRIM_DISCARD_ENABLED;
S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_UINT) |
S_008F0C_DATA_FORMAT(output_indexbuf_format);
- /* Viewport state.
- * This is needed by the small primitive culling, because it's done
- * in screen space.
- */
- float scale[2], translate[2];
-
- scale[0] = sctx->viewports.states[0].scale[0];
- scale[1] = sctx->viewports.states[0].scale[1];
- translate[0] = sctx->viewports.states[0].translate[0];
- translate[1] = sctx->viewports.states[0].translate[1];
-
- /* The viewport shouldn't flip the X axis for the small prim culling to work. */
- assert(-scale[0] + translate[0] <= scale[0] + translate[0]);
-
- /* If the Y axis is inverted (OpenGL default framebuffer), reverse it.
- * This is because the viewport transformation inverts the clip space
- * bounding box, so min becomes max, which breaks small primitive
- * culling.
- */
- if (sctx->viewports.y_inverted) {
- scale[1] = -scale[1];
- translate[1] = -translate[1];
- }
-
- /* Scale the framebuffer up, so that samples become pixels and small
- * primitive culling is the same for all sample counts.
- * This only works with the standard DX sample positions, because
- * the samples are evenly spaced on both X and Y axes.
- */
- unsigned num_samples = sctx->framebuffer.nr_samples;
- assert(num_samples >= 1);
+ /* Viewport state. */
+ struct si_small_prim_cull_info cull_info;
+ si_get_small_prim_cull_info(sctx, &cull_info);
- for (unsigned i = 0; i < 2; i++) {
- scale[i] *= num_samples;
- translate[i] *= num_samples;
- }
-
- desc[8] = fui(scale[0]);
- desc[9] = fui(scale[1]);
- desc[10] = fui(translate[0]);
- desc[11] = fui(translate[1]);
+ desc[8] = fui(cull_info.scale[0]);
+ desc[9] = fui(cull_info.scale[1]);
+ desc[10] = fui(cull_info.translate[0]);
+ desc[11] = fui(cull_info.translate[1]);
/* Better subpixel precision increases the efficiency of small
* primitive culling. */
+ unsigned num_samples = sctx->framebuffer.nr_samples;
unsigned quant_mode = sctx->viewports.as_scissor[0].quant_mode;
float small_prim_cull_precision;