From: Marek Olšák Date: Sat, 29 Jun 2019 00:53:15 +0000 (-0400) Subject: ac: replace glc,slc with cache_policy for loads X-Git-Url: https://git.libre-soc.org/?a=commitdiff_plain;h=8a71f60194de3447070138316d0e8b869fe669e2;p=mesa.git ac: replace glc,slc with cache_policy for loads cosmetic change Reviewed-by: Pierre-Eric Pelloux-Prayer --- diff --git a/src/amd/common/ac_llvm_build.c b/src/amd/common/ac_llvm_build.c index eb143e3fa10..d009c3b0946 100644 --- a/src/amd/common/ac_llvm_build.c +++ b/src/amd/common/ac_llvm_build.c @@ -1107,13 +1107,11 @@ LLVMValueRef ac_build_load_to_sgpr_uint_wraparound(struct ac_llvm_context *ctx, return ac_build_load_custom(ctx, base_ptr, index, true, true, false); } -static LLVMValueRef get_cache_policy(struct ac_llvm_context *ctx, - bool load, bool glc, bool slc) +static unsigned get_load_cache_policy(struct ac_llvm_context *ctx, + unsigned cache_policy) { - return LLVMConstInt(ctx->i32, - (glc ? ac_glc : 0) + - (slc ? ac_slc : 0) + - (ctx->chip_class >= GFX10 && glc && load ? ac_dlc : 0), 0); + return cache_policy | + (ctx->chip_class >= GFX10 && cache_policy & ac_glc ? ac_dlc : 0); } static void @@ -1302,8 +1300,7 @@ ac_build_llvm7_buffer_load_common(struct ac_llvm_context *ctx, LLVMValueRef vindex, LLVMValueRef voffset, unsigned num_channels, - bool glc, - bool slc, + unsigned cache_policy, bool can_speculate, bool use_format) { @@ -1311,8 +1308,8 @@ ac_build_llvm7_buffer_load_common(struct ac_llvm_context *ctx, LLVMBuildBitCast(ctx->builder, rsrc, ctx->v4i32, ""), vindex ? vindex : ctx->i32_0, voffset, - LLVMConstInt(ctx->i1, glc, 0), - LLVMConstInt(ctx->i1, slc, 0) + LLVMConstInt(ctx->i1, !!(cache_policy & ac_glc), 0), + LLVMConstInt(ctx->i1, !!(cache_policy & ac_slc), 0) }; unsigned func = CLAMP(num_channels, 1, 3) - 1; @@ -1341,8 +1338,7 @@ ac_build_llvm8_buffer_load_common(struct ac_llvm_context *ctx, LLVMValueRef soffset, unsigned num_channels, LLVMTypeRef channel_type, - bool glc, - bool slc, + unsigned cache_policy, bool can_speculate, bool use_format, bool structurized) @@ -1354,7 +1350,7 @@ ac_build_llvm8_buffer_load_common(struct ac_llvm_context *ctx, args[idx++] = vindex ? vindex : ctx->i32_0; args[idx++] = voffset ? voffset : ctx->i32_0; args[idx++] = soffset ? soffset : ctx->i32_0; - args[idx++] = get_cache_policy(ctx, true, glc, slc); + args[idx++] = LLVMConstInt(ctx->i32, get_load_cache_policy(ctx, cache_policy), 0); unsigned func = !ac_has_vec3_support(ctx->chip_class, use_format) && num_channels == 3 ? 4 : num_channels; const char *indexing_kind = structurized ? "struct" : "raw"; char name[256], type_name[8]; @@ -1382,8 +1378,7 @@ ac_build_buffer_load(struct ac_llvm_context *ctx, LLVMValueRef voffset, LLVMValueRef soffset, unsigned inst_offset, - unsigned glc, - unsigned slc, + unsigned cache_policy, bool can_speculate, bool allow_smem) { @@ -1393,8 +1388,8 @@ ac_build_buffer_load(struct ac_llvm_context *ctx, if (soffset) offset = LLVMBuildAdd(ctx->builder, offset, soffset, ""); - if (allow_smem && !slc && - (!glc || (HAVE_LLVM >= 0x0800 && ctx->chip_class >= GFX8))) { + if (allow_smem && !(cache_policy & ac_slc) && + (!(cache_policy & ac_glc) || (HAVE_LLVM >= 0x0800 && ctx->chip_class >= GFX8))) { assert(vindex == NULL); LLVMValueRef result[8]; @@ -1411,7 +1406,7 @@ ac_build_buffer_load(struct ac_llvm_context *ctx, LLVMValueRef args[3] = { rsrc, offset, - get_cache_policy(ctx, true, glc, false), + LLVMConstInt(ctx->i32, get_load_cache_policy(ctx, cache_policy), 0), }; result[i] = ac_build_intrinsic(ctx, intrname, ctx->f32, args, num_args, @@ -1430,13 +1425,13 @@ ac_build_buffer_load(struct ac_llvm_context *ctx, return ac_build_llvm8_buffer_load_common(ctx, rsrc, vindex, offset, ctx->i32_0, num_channels, ctx->f32, - glc, slc, + cache_policy, can_speculate, false, false); } return ac_build_llvm7_buffer_load_common(ctx, rsrc, vindex, offset, - num_channels, glc, slc, + num_channels, cache_policy, can_speculate, false); } @@ -1445,17 +1440,16 @@ LLVMValueRef ac_build_buffer_load_format(struct ac_llvm_context *ctx, LLVMValueRef vindex, LLVMValueRef voffset, unsigned num_channels, - bool glc, + unsigned cache_policy, bool can_speculate) { if (HAVE_LLVM >= 0x800) { return ac_build_llvm8_buffer_load_common(ctx, rsrc, vindex, voffset, ctx->i32_0, num_channels, ctx->f32, - glc, false, - can_speculate, true, true); + cache_policy, can_speculate, true, true); } return ac_build_llvm7_buffer_load_common(ctx, rsrc, vindex, voffset, - num_channels, glc, false, + num_channels, cache_policy, can_speculate, true); } @@ -1464,14 +1458,13 @@ LLVMValueRef ac_build_buffer_load_format_gfx9_safe(struct ac_llvm_context *ctx, LLVMValueRef vindex, LLVMValueRef voffset, unsigned num_channels, - bool glc, + unsigned cache_policy, bool can_speculate) { if (HAVE_LLVM >= 0x800) { return ac_build_llvm8_buffer_load_common(ctx, rsrc, vindex, voffset, ctx->i32_0, num_channels, ctx->f32, - glc, false, - can_speculate, true, true); + cache_policy, can_speculate, true, true); } LLVMValueRef elem_count = LLVMBuildExtractElement(ctx->builder, rsrc, LLVMConstInt(ctx->i32, 2, 0), ""); @@ -1486,7 +1479,7 @@ LLVMValueRef ac_build_buffer_load_format_gfx9_safe(struct ac_llvm_context *ctx, LLVMConstInt(ctx->i32, 2, 0), ""); return ac_build_llvm7_buffer_load_common(ctx, new_rsrc, vindex, voffset, - num_channels, glc, false, + num_channels, cache_policy, can_speculate, true); } @@ -1542,8 +1535,7 @@ ac_build_llvm8_tbuffer_load(struct ac_llvm_context *ctx, unsigned num_channels, unsigned dfmt, unsigned nfmt, - bool glc, - bool slc, + unsigned cache_policy, bool can_speculate, bool structurized) { @@ -1555,7 +1547,7 @@ ac_build_llvm8_tbuffer_load(struct ac_llvm_context *ctx, args[idx++] = voffset ? voffset : ctx->i32_0; args[idx++] = soffset ? soffset : ctx->i32_0; args[idx++] = LLVMConstInt(ctx->i32, ac_get_tbuffer_format(ctx, dfmt, nfmt), 0); - args[idx++] = get_cache_policy(ctx, true, glc, slc); + args[idx++] = LLVMConstInt(ctx->i32, get_load_cache_policy(ctx, cache_policy), 0); unsigned func = !ac_has_vec3_support(ctx->chip_class, true) && num_channels == 3 ? 4 : num_channels; const char *indexing_kind = structurized ? "struct" : "raw"; char name[256], type_name[8]; @@ -1580,8 +1572,7 @@ ac_build_tbuffer_load(struct ac_llvm_context *ctx, unsigned num_channels, unsigned dfmt, unsigned nfmt, - bool glc, - bool slc, + unsigned cache_policy, bool can_speculate, bool structurized) /* only matters for LLVM 8+ */ { @@ -1590,7 +1581,7 @@ ac_build_tbuffer_load(struct ac_llvm_context *ctx, return ac_build_llvm8_tbuffer_load(ctx, rsrc, vindex, voffset, soffset, num_channels, - dfmt, nfmt, glc, slc, + dfmt, nfmt, cache_policy, can_speculate, structurized); } @@ -1602,8 +1593,8 @@ ac_build_tbuffer_load(struct ac_llvm_context *ctx, immoffset, LLVMConstInt(ctx->i32, dfmt, false), LLVMConstInt(ctx->i32, nfmt, false), - LLVMConstInt(ctx->i1, glc, false), - LLVMConstInt(ctx->i1, slc, false), + LLVMConstInt(ctx->i1, !!(cache_policy & ac_glc), false), + LLVMConstInt(ctx->i1, !!(cache_policy & ac_slc), false), }; unsigned func = CLAMP(num_channels, 1, 3) - 1; LLVMTypeRef types[] = {ctx->i32, ctx->v2i32, ctx->v4i32}; @@ -1627,13 +1618,12 @@ ac_build_struct_tbuffer_load(struct ac_llvm_context *ctx, unsigned num_channels, unsigned dfmt, unsigned nfmt, - bool glc, - bool slc, + unsigned cache_policy, bool can_speculate) { return ac_build_tbuffer_load(ctx, rsrc, vindex, voffset, soffset, - immoffset, num_channels, dfmt, nfmt, glc, - slc, can_speculate, true); + immoffset, num_channels, dfmt, nfmt, + cache_policy, can_speculate, true); } LLVMValueRef @@ -1645,13 +1635,12 @@ ac_build_raw_tbuffer_load(struct ac_llvm_context *ctx, unsigned num_channels, unsigned dfmt, unsigned nfmt, - bool glc, - bool slc, + unsigned cache_policy, bool can_speculate) { return ac_build_tbuffer_load(ctx, rsrc, NULL, voffset, soffset, - immoffset, num_channels, dfmt, nfmt, glc, - slc, can_speculate, false); + immoffset, num_channels, dfmt, nfmt, + cache_policy, can_speculate, false); } LLVMValueRef @@ -1660,7 +1649,7 @@ ac_build_tbuffer_load_short(struct ac_llvm_context *ctx, LLVMValueRef voffset, LLVMValueRef soffset, LLVMValueRef immoffset, - bool glc) + unsigned cache_policy) { LLVMValueRef res; @@ -1670,14 +1659,14 @@ ac_build_tbuffer_load_short(struct ac_llvm_context *ctx, /* LLVM 9+ supports i8/i16 with struct/raw intrinsics. */ res = ac_build_llvm8_buffer_load_common(ctx, rsrc, NULL, voffset, soffset, - 1, ctx->i16, glc, false, + 1, ctx->i16, cache_policy, false, false, false); } else { unsigned dfmt = V_008F0C_BUF_DATA_FORMAT_16; unsigned nfmt = V_008F0C_BUF_NUM_FORMAT_UINT; res = ac_build_raw_tbuffer_load(ctx, rsrc, voffset, soffset, - immoffset, 1, dfmt, nfmt, glc, false, + immoffset, 1, dfmt, nfmt, cache_policy, false); res = LLVMBuildTrunc(ctx->builder, res, ctx->i16, ""); @@ -1692,7 +1681,7 @@ ac_build_tbuffer_load_byte(struct ac_llvm_context *ctx, LLVMValueRef voffset, LLVMValueRef soffset, LLVMValueRef immoffset, - bool glc) + unsigned cache_policy) { LLVMValueRef res; @@ -1702,14 +1691,14 @@ ac_build_tbuffer_load_byte(struct ac_llvm_context *ctx, /* LLVM 9+ supports i8/i16 with struct/raw intrinsics. */ res = ac_build_llvm8_buffer_load_common(ctx, rsrc, NULL, voffset, soffset, - 1, ctx->i8, glc, false, + 1, ctx->i8, cache_policy, false, false, false); } else { unsigned dfmt = V_008F0C_BUF_DATA_FORMAT_8; unsigned nfmt = V_008F0C_BUF_NUM_FORMAT_UINT; res = ac_build_raw_tbuffer_load(ctx, rsrc, voffset, soffset, - immoffset, 1, dfmt, nfmt, glc, false, + immoffset, 1, dfmt, nfmt, cache_policy, false); res = LLVMBuildTrunc(ctx->builder, res, ctx->i8, ""); @@ -1811,8 +1800,7 @@ ac_build_opencoded_load_format(struct ac_llvm_context *ctx, LLVMValueRef vindex, LLVMValueRef voffset, LLVMValueRef soffset, - bool glc, - bool slc, + unsigned cache_policy, bool can_speculate) { LLVMValueRef tmp; @@ -1851,13 +1839,13 @@ ac_build_opencoded_load_format(struct ac_llvm_context *ctx, unsigned num_channels = 1 << (MAX2(load_log_size, 2) - 2); loads[i] = ac_build_llvm8_buffer_load_common( ctx, rsrc, vindex, voffset, tmp, - num_channels, channel_type, glc, slc, + num_channels, channel_type, cache_policy, can_speculate, false, true); } else { tmp = LLVMBuildAdd(ctx->builder, voffset, tmp, ""); loads[i] = ac_build_llvm7_buffer_load_common( ctx, rsrc, vindex, tmp, - 1 << (load_log_size - 2), glc, slc, can_speculate, false); + 1 << (load_log_size - 2), cache_policy, can_speculate, false); } if (load_log_size >= 2) loads[i] = ac_to_integer(ctx, loads[i]); diff --git a/src/amd/common/ac_llvm_build.h b/src/amd/common/ac_llvm_build.h index edb950a9f72..18c9a27b068 100644 --- a/src/amd/common/ac_llvm_build.h +++ b/src/amd/common/ac_llvm_build.h @@ -288,8 +288,7 @@ ac_build_buffer_load(struct ac_llvm_context *ctx, LLVMValueRef voffset, LLVMValueRef soffset, unsigned inst_offset, - unsigned glc, - unsigned slc, + unsigned cache_policy, bool can_speculate, bool allow_smem); @@ -298,7 +297,7 @@ LLVMValueRef ac_build_buffer_load_format(struct ac_llvm_context *ctx, LLVMValueRef vindex, LLVMValueRef voffset, unsigned num_channels, - bool glc, + unsigned cache_policy, bool can_speculate); /* load_format that handles the stride & element count better if idxen is @@ -308,7 +307,7 @@ LLVMValueRef ac_build_buffer_load_format_gfx9_safe(struct ac_llvm_context *ctx, LLVMValueRef vindex, LLVMValueRef voffset, unsigned num_channels, - bool glc, + unsigned cache_policy, bool can_speculate); LLVMValueRef @@ -317,7 +316,7 @@ ac_build_tbuffer_load_short(struct ac_llvm_context *ctx, LLVMValueRef voffset, LLVMValueRef soffset, LLVMValueRef immoffset, - bool glc); + unsigned cache_policy); LLVMValueRef ac_build_tbuffer_load_byte(struct ac_llvm_context *ctx, @@ -325,7 +324,7 @@ ac_build_tbuffer_load_byte(struct ac_llvm_context *ctx, LLVMValueRef voffset, LLVMValueRef soffset, LLVMValueRef immoffset, - bool glc); + unsigned cache_policy); LLVMValueRef ac_build_struct_tbuffer_load(struct ac_llvm_context *ctx, @@ -337,8 +336,7 @@ ac_build_struct_tbuffer_load(struct ac_llvm_context *ctx, unsigned num_channels, unsigned dfmt, unsigned nfmt, - bool glc, - bool slc, + unsigned cache_policy, bool can_speculate); LLVMValueRef @@ -350,8 +348,7 @@ ac_build_raw_tbuffer_load(struct ac_llvm_context *ctx, unsigned num_channels, unsigned dfmt, unsigned nfmt, - bool glc, - bool slc, + unsigned cache_policy, bool can_speculate); /* For ac_build_fetch_format. @@ -380,8 +377,7 @@ ac_build_opencoded_load_format(struct ac_llvm_context *ctx, LLVMValueRef vindex, LLVMValueRef voffset, LLVMValueRef soffset, - bool glc, - bool slc, + unsigned cache_policy, bool can_speculate); void diff --git a/src/amd/common/ac_nir_to_llvm.c b/src/amd/common/ac_nir_to_llvm.c index 69204e2395f..a92c076497d 100644 --- a/src/amd/common/ac_nir_to_llvm.c +++ b/src/amd/common/ac_nir_to_llvm.c @@ -1303,14 +1303,14 @@ static LLVMValueRef build_tex_intrinsic(struct ac_nir_context *ctx, args->coords[0], ctx->ac.i32_0, util_last_bit(mask), - false, true); + 0, true); } else { return ac_build_buffer_load_format(&ctx->ac, args->resource, args->coords[0], ctx->ac.i32_0, util_last_bit(mask), - false, true); + 0, true); } } @@ -1743,22 +1743,21 @@ static LLVMValueRef visit_load_buffer(struct ac_nir_context *ctx, offset, ctx->ac.i32_0, immoffset, - cache_policy & ac_glc); + cache_policy); } else if (load_bytes == 2) { ret = ac_build_tbuffer_load_short(&ctx->ac, rsrc, offset, ctx->ac.i32_0, immoffset, - cache_policy & ac_glc); + cache_policy); } else { int num_channels = util_next_power_of_two(load_bytes) / 4; bool can_speculate = access & ACCESS_CAN_REORDER; ret = ac_build_buffer_load(&ctx->ac, rsrc, num_channels, vindex, offset, immoffset, 0, - cache_policy & ac_glc, 0, - can_speculate, false); + cache_policy, can_speculate, false); } LLVMTypeRef byte_vec = LLVMVectorType(ctx->ac.i8, ac_get_type_size(LLVMTypeOf(ret))); @@ -1804,7 +1803,7 @@ static LLVMValueRef visit_load_ubo_buffer(struct ac_nir_context *ctx, offset, ctx->ac.i32_0, immoffset, - false); + 0); } else { assert(load_bytes == 2); results[i] = ac_build_tbuffer_load_short(&ctx->ac, @@ -1812,13 +1811,13 @@ static LLVMValueRef visit_load_ubo_buffer(struct ac_nir_context *ctx, offset, ctx->ac.i32_0, immoffset, - false); + 0); } } ret = ac_build_gather_values(&ctx->ac, results, num_components); } else { ret = ac_build_buffer_load(&ctx->ac, rsrc, num_components, NULL, offset, - NULL, 0, false, false, true, true); + NULL, 0, 0, true, true); ret = ac_trim_vector(&ctx->ac, ret, num_components); } @@ -2467,7 +2466,7 @@ static LLVMValueRef visit_image_load(struct ac_nir_context *ctx, bool can_speculate = access & ACCESS_CAN_REORDER; res = ac_build_buffer_load_format(&ctx->ac, rsrc, vindex, ctx->ac.i32_0, num_channels, - !!(args.cache_policy & ac_glc), + args.cache_policy, can_speculate); res = ac_build_expand_to_vec4(&ctx->ac, res, num_channels); @@ -3476,7 +3475,7 @@ static LLVMValueRef get_bindless_index_from_uniform(struct ac_nir_context *ctx, LLVMValueRef ubo_index = ctx->abi->load_ubo(ctx->abi, ctx->ac.i32_0); LLVMValueRef ret = ac_build_buffer_load(&ctx->ac, ubo_index, 1, NULL, offset, - NULL, 0, false, false, true, true); + NULL, 0, 0, true, true); return LLVMBuildBitCast(ctx->ac.builder, ret, ctx->ac.i32, ""); } diff --git a/src/amd/vulkan/radv_nir_to_llvm.c b/src/amd/vulkan/radv_nir_to_llvm.c index 5ae4860b3b7..e310c5c495e 100644 --- a/src/amd/vulkan/radv_nir_to_llvm.c +++ b/src/amd/vulkan/radv_nir_to_llvm.c @@ -1630,7 +1630,7 @@ load_tes_input(struct ac_shader_abi *abi, buf_addr = LLVMBuildAdd(ctx->ac.builder, buf_addr, comp_offset, ""); result = ac_build_buffer_load(&ctx->ac, ctx->hs_ring_tess_offchip, num_components, NULL, - buf_addr, ctx->oc_lds, is_compact ? (4 * const_index) : 0, 1, 0, true, false); + buf_addr, ctx->oc_lds, is_compact ? (4 * const_index) : 0, ac_glc, true, false); result = ac_trim_vector(&ctx->ac, result, num_components); return result; } @@ -1673,7 +1673,7 @@ load_gs_input(struct ac_shader_abi *abi, ctx->esgs_ring, 1, ctx->ac.i32_0, vtx_offset, soffset, - 0, 1, 0, true, false); + 0, ac_glc, true, false); } if (ac_get_type_size(type) == 2) { @@ -2236,8 +2236,7 @@ handle_vs_input_decl(struct radv_shader_context *ctx, LLVMConstInt(ctx->ac.i32, attrib_offset, false), ctx->ac.i32_0, ctx->ac.i32_0, num_channels, - data_format, num_format, - false, false, true); + data_format, num_format, 0, true); if (ctx->options->key.vs.post_shuffle & (1 << attrib_index)) { LLVMValueRef c[4]; @@ -4041,7 +4040,7 @@ ac_gs_copy_shader_emit(struct radv_shader_context *ctx) ctx->gsvs_ring[0], 1, ctx->ac.i32_0, vtx_offset, soffset, - 0, 1, 1, true, false); + 0, ac_glc | ac_slc, true, false); LLVMTypeRef type = LLVMGetAllocatedType(ctx->abi.outputs[ac_llvm_reg_index_soa(i, j)]); if (ac_get_type_size(type) == 2) { diff --git a/src/gallium/drivers/radeonsi/si_compute_prim_discard.c b/src/gallium/drivers/radeonsi/si_compute_prim_discard.c index a7b0e7ad3a7..e16c0791a27 100644 --- a/src/gallium/drivers/radeonsi/si_compute_prim_discard.c +++ b/src/gallium/drivers/radeonsi/si_compute_prim_discard.c @@ -473,7 +473,7 @@ void si_build_prim_discard_compute_shader(struct si_shader_context *ctx) for (unsigned i = 0; i < 3; i++) { index[i] = ac_build_buffer_load_format(&ctx->ac, input_indexbuf, index[i], ctx->i32_0, 1, - false, true); + 0, true); index[i] = ac_to_integer(&ctx->ac, index[i]); } } diff --git a/src/gallium/drivers/radeonsi/si_shader.c b/src/gallium/drivers/radeonsi/si_shader.c index 4cf642f4a95..756199a3496 100644 --- a/src/gallium/drivers/radeonsi/si_shader.c +++ b/src/gallium/drivers/radeonsi/si_shader.c @@ -541,8 +541,7 @@ void si_llvm_load_input_vs( tmp = ac_build_opencoded_load_format( &ctx->ac, fix_fetch.u.log_size, fix_fetch.u.num_channels_m1 + 1, fix_fetch.u.format, fix_fetch.u.reverse, !opencode, - t_list, vertex_index, ctx->ac.i32_0, ctx->ac.i32_0, - false, false, true); + t_list, vertex_index, ctx->ac.i32_0, ctx->ac.i32_0, 0, true); for (unsigned i = 0; i < 4; ++i) out[i] = LLVMBuildExtractElement(ctx->ac.builder, tmp, LLVMConstInt(ctx->i32, i, false), ""); return; @@ -568,7 +567,7 @@ void si_llvm_load_input_vs( for (unsigned i = 0; i < num_fetches; ++i) { LLVMValueRef voffset = LLVMConstInt(ctx->i32, fetch_stride * i, 0); fetches[i] = ac_build_buffer_load_format(&ctx->ac, t_list, vertex_index, voffset, - channels_per_fetch, false, true); + channels_per_fetch, 0, true); } if (num_fetches == 1 && channels_per_fetch > 1) { @@ -967,14 +966,14 @@ static LLVMValueRef buffer_load(struct lp_build_tgsi_context *bld_base, if (swizzle == ~0) { value = ac_build_buffer_load(&ctx->ac, buffer, 4, NULL, base, offset, - 0, 1, 0, can_speculate, false); + 0, ac_glc, can_speculate, false); return LLVMBuildBitCast(ctx->ac.builder, value, vec_type, ""); } if (!llvm_type_is_64bit(ctx, type)) { value = ac_build_buffer_load(&ctx->ac, buffer, 4, NULL, base, offset, - 0, 1, 0, can_speculate, false); + 0, ac_glc, can_speculate, false); value = LLVMBuildBitCast(ctx->ac.builder, value, vec_type, ""); return LLVMBuildExtractElement(ctx->ac.builder, value, @@ -982,10 +981,10 @@ static LLVMValueRef buffer_load(struct lp_build_tgsi_context *bld_base, } value = ac_build_buffer_load(&ctx->ac, buffer, 1, NULL, base, offset, - swizzle * 4, 1, 0, can_speculate, false); + swizzle * 4, ac_glc, can_speculate, false); value2 = ac_build_buffer_load(&ctx->ac, buffer, 1, NULL, base, offset, - swizzle * 4 + 4, 1, 0, can_speculate, false); + swizzle * 4 + 4, ac_glc, can_speculate, false); return si_llvm_emit_fetch_64bit(bld_base, type, value, value2); } @@ -1589,14 +1588,14 @@ LLVMValueRef si_llvm_load_input_gs(struct ac_shader_abi *abi, soffset = LLVMConstInt(ctx->i32, (param * 4 + swizzle) * 256, 0); value = ac_build_buffer_load(&ctx->ac, ctx->esgs_ring, 1, ctx->i32_0, - vtx_offset, soffset, 0, 1, 0, true, false); + vtx_offset, soffset, 0, ac_glc, true, false); if (llvm_type_is_64bit(ctx, type)) { LLVMValueRef value2; soffset = LLVMConstInt(ctx->i32, (param * 4 + swizzle + 1) * 256, 0); value2 = ac_build_buffer_load(&ctx->ac, ctx->esgs_ring, 1, ctx->i32_0, vtx_offset, soffset, - 0, 1, 0, true, false); + 0, ac_glc, true, false); return si_llvm_emit_fetch_64bit(bld_base, type, value, value2); } return LLVMBuildBitCast(ctx->ac.builder, value, type, ""); @@ -1908,7 +1907,7 @@ static LLVMValueRef buffer_load_const(struct si_shader_context *ctx, LLVMValueRef offset) { return ac_build_buffer_load(&ctx->ac, resource, 1, NULL, offset, NULL, - 0, 0, 0, true, true); + 0, 0, true, true); } static LLVMValueRef load_sample_position(struct ac_shader_abi *abi, LLVMValueRef sample_id) @@ -5787,7 +5786,7 @@ si_generate_gs_copy_shader(struct si_screen *sscreen, ac_build_buffer_load(&ctx.ac, ctx.gsvs_ring[0], 1, ctx.i32_0, voffset, - soffset, 0, 1, 1, + soffset, 0, ac_glc | ac_slc, true, false); } } diff --git a/src/gallium/drivers/radeonsi/si_shader_tgsi_mem.c b/src/gallium/drivers/radeonsi/si_shader_tgsi_mem.c index 74081c64599..e937ad8f3aa 100644 --- a/src/gallium/drivers/radeonsi/si_shader_tgsi_mem.c +++ b/src/gallium/drivers/radeonsi/si_shader_tgsi_mem.c @@ -515,7 +515,7 @@ static void load_emit( emit_data->output[emit_data->chan] = ac_build_buffer_load(&ctx->ac, args.resource, util_last_bit(inst->Dst[0].Register.WriteMask), - NULL, voffset, NULL, 0, 0, 0, true, true); + NULL, voffset, NULL, 0, 0, true, true); return; } @@ -552,9 +552,7 @@ static void load_emit( ac_build_buffer_load(&ctx->ac, args.resource, util_last_bit(inst->Dst[0].Register.WriteMask), NULL, voffset, NULL, 0, - !!(args.cache_policy & ac_glc), - !!(args.cache_policy & ac_slc), - can_speculate, false); + args.cache_policy, can_speculate, false); return; } @@ -566,7 +564,7 @@ static void load_emit( vindex, ctx->i32_0, num_channels, - !!(args.cache_policy & ac_glc), + args.cache_policy, can_speculate); emit_data->output[emit_data->chan] = ac_build_expand_to_vec4(&ctx->ac, result, num_channels); @@ -1340,7 +1338,7 @@ static void build_tex_intrinsic(const struct lp_build_tgsi_action *action, args.resource, vindex, ctx->i32_0, - num_channels, false, true); + num_channels, 0, true); emit_data->output[emit_data->chan] = ac_build_expand_to_vec4(&ctx->ac, result, num_channels); return;