X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;ds=sidebyside;f=src%2Fgallium%2Fdrivers%2Fradeonsi%2Fsi_shader_tgsi_mem.c;h=f79ed2c57e17c8660aaa11816b7b394c225a8aea;hb=902dd50cf0f31fe1e730ee077c9e23716d06e066;hp=8c44831bccb3268f7eef58e8a2e1013fffdc8c8d;hpb=77903c8cfbaa609d132ea5e5fbca062e0b2f0808;p=mesa.git diff --git a/src/gallium/drivers/radeonsi/si_shader_tgsi_mem.c b/src/gallium/drivers/radeonsi/si_shader_tgsi_mem.c index 8c44831bccb..f79ed2c57e1 100644 --- a/src/gallium/drivers/radeonsi/si_shader_tgsi_mem.c +++ b/src/gallium/drivers/radeonsi/si_shader_tgsi_mem.c @@ -48,8 +48,8 @@ static LLVMValueRef get_buffer_size( LLVMBuildExtractElement(builder, descriptor, LLVMConstInt(ctx->i32, 2, 0), ""); - if (ctx->screen->info.chip_class == VI) { - /* On VI, the descriptor contains the size in bytes, + if (ctx->screen->info.chip_class == GFX8) { + /* On GFX8, the descriptor contains the size in bytes, * but TXQ must return the size in elements. * The stride is always non-zero for resources using TXQ. */ @@ -93,7 +93,7 @@ ac_texture_dim_from_tgsi_target(struct si_screen *screen, enum tgsi_texture_type switch (target) { case TGSI_TEXTURE_1D: case TGSI_TEXTURE_SHADOW1D: - if (screen->info.chip_class >= GFX9) + if (screen->info.chip_class == GFX9) return ac_image_2d; return ac_image_1d; case TGSI_TEXTURE_2D: @@ -110,7 +110,7 @@ ac_texture_dim_from_tgsi_target(struct si_screen *screen, enum tgsi_texture_type return ac_image_cube; case TGSI_TEXTURE_1D_ARRAY: case TGSI_TEXTURE_SHADOW1D_ARRAY: - if (screen->info.chip_class >= GFX9) + if (screen->info.chip_class == GFX9) return ac_image_2darray; return ac_image_1darray; case TGSI_TEXTURE_2D_ARRAY: @@ -132,9 +132,9 @@ ac_image_dim_from_tgsi_target(struct si_screen *screen, enum tgsi_texture_type t /* Match the resource type set in the descriptor. */ if (dim == ac_image_cube || - (screen->info.chip_class <= VI && dim == ac_image_3d)) + (screen->info.chip_class <= GFX8 && dim == ac_image_3d)) dim = ac_image_2darray; - else if (target == TGSI_TEXTURE_2D && screen->info.chip_class >= GFX9) { + else if (target == TGSI_TEXTURE_2D && screen->info.chip_class == GFX9) { /* When a single layer of a 3D texture is bound, the shader * will refer to a 2D target, but the descriptor has a 3D type. * Since the HW ignores BASE_ARRAY in this case, we need to @@ -161,7 +161,7 @@ ac_image_dim_from_tgsi_target(struct si_screen *screen, enum tgsi_texture_type t static LLVMValueRef force_dcc_off(struct si_shader_context *ctx, LLVMValueRef rsrc) { - if (ctx->screen->info.chip_class <= CIK) { + if (ctx->screen->info.chip_class <= GFX7) { return rsrc; } else { LLVMValueRef i32_6 = LLVMConstInt(ctx->i32, 6, 0); @@ -176,8 +176,8 @@ static LLVMValueRef force_dcc_off(struct si_shader_context *ctx, LLVMValueRef si_load_image_desc(struct si_shader_context *ctx, LLVMValueRef list, LLVMValueRef index, - enum ac_descriptor_type desc_type, bool dcc_off, - bool bindless) + enum ac_descriptor_type desc_type, + bool uses_store, bool bindless) { LLVMBuilderRef builder = ctx->ac.builder; LLVMValueRef rsrc; @@ -196,7 +196,7 @@ LLVMValueRef si_load_image_desc(struct si_shader_context *ctx, else rsrc = ac_build_load_to_sgpr(&ctx->ac, list, index); - if (desc_type == AC_DESC_IMAGE && dcc_off) + if (desc_type == AC_DESC_IMAGE && uses_store) rsrc = force_dcc_off(ctx, rsrc); return rsrc; } @@ -215,18 +215,10 @@ image_fetch_rsrc( LLVMValueRef rsrc_ptr = LLVMGetParam(ctx->main_fn, ctx->param_samplers_and_images); LLVMValueRef index; - bool dcc_off = is_store; if (!image->Register.Indirect) { - const struct tgsi_shader_info *info = bld_base->info; - unsigned images_writemask = info->images_store | - info->images_atomic; - index = LLVMConstInt(ctx->i32, si_get_image_slot(image->Register.Index), 0); - - if (images_writemask & (1 << image->Register.Index)) - dcc_off = true; } else { /* From the GL_ARB_shader_image_load_store extension spec: * @@ -266,7 +258,7 @@ image_fetch_rsrc( *rsrc = si_load_image_desc(ctx, rsrc_ptr, index, target == TGSI_TEXTURE_BUFFER ? AC_DESC_BUFFER : AC_DESC_IMAGE, - dcc_off, bindless); + is_store, bindless); } static void image_fetch_coords( @@ -294,7 +286,7 @@ static void image_fetch_coords( coords[chan] = tmp; } - if (ctx->screen->info.chip_class >= GFX9) { + if (ctx->screen->info.chip_class == GFX9) { /* 1D textures are allocated and used as 2D on GFX9. */ if (target == TGSI_TEXTURE_1D) { coords[1] = ctx->i32_0; @@ -327,17 +319,18 @@ static unsigned get_cache_policy(struct si_shader_context *ctx, unsigned cache_policy = 0; if (!atomic && - /* SI has a TC L1 bug causing corruption of 8bit/16bit stores. + /* GFX6 has a TC L1 bug causing corruption of 8bit/16bit stores. * All store opcodes not aligned to a dword are affected. * The only way to get unaligned stores in radeonsi is through * shader images. */ - ((may_store_unaligned && ctx->screen->info.chip_class == SI) || + ((may_store_unaligned && ctx->screen->info.chip_class == GFX6) || /* If this is write-only, don't keep data in L1 to prevent * evicting L1 cache lines that may be needed by other * instructions. */ writeonly_memory || - inst->Memory.Qualifier & (TGSI_MEMORY_COHERENT | TGSI_MEMORY_VOLATILE))) + inst->Memory.Qualifier & (TGSI_MEMORY_COHERENT | TGSI_MEMORY_VOLATILE))) { cache_policy |= ac_glc; + } if (inst->Memory.Qualifier & TGSI_MEMORY_STREAM_CACHE_POLICY) cache_policy |= ac_slc; @@ -396,20 +389,53 @@ static void load_emit_memory( * For LOAD, set this to (store | atomic) slot usage in the shader. * For STORE, set this to (load | atomic) slot usage in the shader. * \param images_reverse_access_mask Same as above, but for images. + * \param bindless_buffer_reverse_access_mask Same as above, but for bindless image buffers. + * \param bindless_image_reverse_access_mask Same as above, but for bindless images. */ static bool is_oneway_access_only(const struct tgsi_full_instruction *inst, const struct tgsi_shader_info *info, unsigned shader_buffers_reverse_access_mask, - unsigned images_reverse_access_mask) + unsigned images_reverse_access_mask, + bool bindless_buffer_reverse_access_mask, + bool bindless_image_reverse_access_mask) { + enum tgsi_file_type resource_file; + unsigned resource_index; + bool resource_indirect; + + if (inst->Instruction.Opcode == TGSI_OPCODE_STORE) { + resource_file = inst->Dst[0].Register.File; + resource_index = inst->Dst[0].Register.Index; + resource_indirect = inst->Dst[0].Register.Indirect; + } else { + resource_file = inst->Src[0].Register.File; + resource_index = inst->Src[0].Register.Index; + resource_indirect = inst->Src[0].Register.Indirect; + } + + assert(resource_file == TGSI_FILE_BUFFER || + resource_file == TGSI_FILE_IMAGE || + /* bindless image */ + resource_file == TGSI_FILE_INPUT || + resource_file == TGSI_FILE_OUTPUT || + resource_file == TGSI_FILE_CONSTANT || + resource_file == TGSI_FILE_TEMPORARY || + resource_file == TGSI_FILE_IMMEDIATE); + + assert(resource_file != TGSI_FILE_BUFFER || + inst->Memory.Texture == TGSI_TEXTURE_BUFFER); + + bool bindless = resource_file != TGSI_FILE_BUFFER && + resource_file != TGSI_FILE_IMAGE; + /* RESTRICT means NOALIAS. * If there are no writes, we can assume the accessed memory is read-only. * If there are no reads, we can assume the accessed memory is write-only. */ - if (inst->Memory.Qualifier & TGSI_MEMORY_RESTRICT) { + if (inst->Memory.Qualifier & TGSI_MEMORY_RESTRICT && !bindless) { unsigned reverse_access_mask; - if (inst->Src[0].Register.File == TGSI_FILE_BUFFER) { + if (resource_file == TGSI_FILE_BUFFER) { reverse_access_mask = shader_buffers_reverse_access_mask; } else if (inst->Memory.Texture == TGSI_TEXTURE_BUFFER) { reverse_access_mask = info->images_buffers & @@ -419,12 +445,12 @@ static bool is_oneway_access_only(const struct tgsi_full_instruction *inst, images_reverse_access_mask; } - if (inst->Src[0].Register.Indirect) { + if (resource_indirect) { if (!reverse_access_mask) return true; } else { if (!(reverse_access_mask & - (1u << inst->Src[0].Register.Index))) + (1u << resource_index))) return true; } } @@ -437,15 +463,15 @@ static bool is_oneway_access_only(const struct tgsi_full_instruction *inst, * Same for the case when there are no writes/reads for non-buffer * images. */ - if (inst->Src[0].Register.File == TGSI_FILE_BUFFER || - (inst->Memory.Texture == TGSI_TEXTURE_BUFFER && - (inst->Src[0].Register.File == TGSI_FILE_IMAGE || - tgsi_is_bindless_image_file(inst->Src[0].Register.File)))) { + if (resource_file == TGSI_FILE_BUFFER || + inst->Memory.Texture == TGSI_TEXTURE_BUFFER) { if (!shader_buffers_reverse_access_mask && - !(info->images_buffers & images_reverse_access_mask)) + !(info->images_buffers & images_reverse_access_mask) && + !bindless_buffer_reverse_access_mask) return true; } else { - if (!(~info->images_buffers & images_reverse_access_mask)) + if (!(~info->images_buffers & images_reverse_access_mask) && + !bindless_image_reverse_access_mask) return true; } return false; @@ -474,8 +500,7 @@ static void load_emit( bool ubo = inst->Src[0].Register.File == TGSI_FILE_CONSTBUF; args.resource = shader_buffer_fetch_rsrc(ctx, &inst->Src[0], ubo); voffset = ac_to_integer(&ctx->ac, lp_build_emit_fetch(bld_base, inst, 1, 0)); - } else if (inst->Src[0].Register.File == TGSI_FILE_IMAGE || - tgsi_is_bindless_image_file(inst->Src[0].Register.File)) { + } else { unsigned target = inst->Memory.Texture; image_fetch_rsrc(bld_base, &inst->Src[0], false, target, &args.resource); @@ -487,19 +512,23 @@ static void load_emit( emit_data->output[emit_data->chan] = ac_build_buffer_load(&ctx->ac, args.resource, util_last_bit(inst->Dst[0].Register.WriteMask), - NULL, voffset, NULL, 0, 0, 0, true, true); + NULL, voffset, NULL, 0, 0, true, true); return; } if (inst->Memory.Qualifier & TGSI_MEMORY_VOLATILE) - ac_build_waitcnt(&ctx->ac, VM_CNT); + ac_build_waitcnt(&ctx->ac, AC_WAIT_VLOAD | AC_WAIT_VSTORE); can_speculate = !(inst->Memory.Qualifier & TGSI_MEMORY_VOLATILE) && is_oneway_access_only(inst, info, info->shader_buffers_store | info->shader_buffers_atomic, info->images_store | - info->images_atomic); + info->images_atomic, + info->uses_bindless_buffer_store | + info->uses_bindless_buffer_atomic, + info->uses_bindless_image_store | + info->uses_bindless_image_atomic); args.cache_policy = get_cache_policy(ctx, inst, false, false, false); if (inst->Src[0].Register.File == TGSI_FILE_BUFFER) { @@ -520,9 +549,7 @@ static void load_emit( ac_build_buffer_load(&ctx->ac, args.resource, util_last_bit(inst->Dst[0].Register.WriteMask), NULL, voffset, NULL, 0, - !!(args.cache_policy & ac_glc), - !!(args.cache_policy & ac_slc), - can_speculate, false); + args.cache_policy, can_speculate, false); return; } @@ -534,7 +561,7 @@ static void load_emit( vindex, ctx->i32_0, num_channels, - !!(args.cache_policy & ac_glc), + args.cache_policy, can_speculate); emit_data->output[emit_data->chan] = ac_build_expand_to_vec4(&ctx->ac, result, num_channels); @@ -563,21 +590,22 @@ static void store_emit_buffer(struct si_shader_context *ctx, while (writemask) { int start, count; - const char *intrinsic_name; LLVMValueRef data, voff; u_bit_scan_consecutive_range(&writemask, &start, &count); - /* Due to an LLVM limitation, split 3-element writes - * into a 2-element and a 1-element write. */ - if (count == 3) { - writemask |= 1 << (start + 2); - count = 2; - } - - if (count == 4) { + if (count == 3 && ac_has_vec3_support(ctx->ac.chip_class, false)) { + LLVMValueRef values[3] = { + LLVMBuildExtractElement(builder, base_data, + LLVMConstInt(ctx->i32, start, 0), ""), + LLVMBuildExtractElement(builder, base_data, + LLVMConstInt(ctx->i32, start + 1, 0), ""), + LLVMBuildExtractElement(builder, base_data, + LLVMConstInt(ctx->i32, start + 2, 0), ""), + }; + data = ac_build_gather_values(&ctx->ac, values, 3); + } else if (count >= 3) { data = base_data; - intrinsic_name = "llvm.amdgcn.buffer.store.v4f32"; } else if (count == 2) { LLVMValueRef values[2] = { LLVMBuildExtractElement(builder, base_data, @@ -587,13 +615,11 @@ static void store_emit_buffer(struct si_shader_context *ctx, }; data = ac_build_gather_values(&ctx->ac, values, 2); - intrinsic_name = "llvm.amdgcn.buffer.store.v2f32"; } else { assert(count == 1); data = LLVMBuildExtractElement( builder, base_data, LLVMConstInt(ctx->i32, start, 0), ""); - intrinsic_name = "llvm.amdgcn.buffer.store.f32"; } voff = base_offset; @@ -603,16 +629,9 @@ static void store_emit_buffer(struct si_shader_context *ctx, LLVMConstInt(ctx->i32, start * 4, 0), ""); } - LLVMValueRef args[] = { - data, - resource, - ctx->i32_0, /* vindex */ - voff, - LLVMConstInt(ctx->i1, !!(cache_policy & ac_glc), 0), - LLVMConstInt(ctx->i1, !!(cache_policy & ac_slc), 0), - }; - ac_build_intrinsic(&ctx->ac, intrinsic_name, ctx->voidt, args, 6, - ac_get_store_intr_attribs(writeonly_memory)); + ac_build_buffer_store_dword(&ctx->ac, resource, data, count, + voff, ctx->i32_0, 0, cache_policy, + false); } } @@ -650,42 +669,42 @@ static void store_emit( struct tgsi_full_src_register resource_reg = tgsi_full_src_register_from_dst(&inst->Dst[0]); unsigned target = inst->Memory.Texture; + + if (inst->Dst[0].Register.File == TGSI_FILE_MEMORY) { + store_emit_memory(ctx, emit_data); + return; + } + bool writeonly_memory = is_oneway_access_only(inst, info, info->shader_buffers_load | info->shader_buffers_atomic, info->images_load | - info->images_atomic); - bool is_image = inst->Dst[0].Register.File == TGSI_FILE_IMAGE || - tgsi_is_bindless_image_file(inst->Dst[0].Register.File); - LLVMValueRef chans[4], value; + info->images_atomic, + info->uses_bindless_buffer_load | + info->uses_bindless_buffer_atomic, + info->uses_bindless_image_load | + info->uses_bindless_image_atomic); + LLVMValueRef chans[4]; LLVMValueRef vindex = ctx->i32_0; LLVMValueRef voffset = ctx->i32_0; struct ac_image_args args = {}; - if (inst->Dst[0].Register.File == TGSI_FILE_MEMORY) { - store_emit_memory(ctx, emit_data); - return; - } - for (unsigned chan = 0; chan < 4; ++chan) chans[chan] = lp_build_emit_fetch(bld_base, inst, 1, chan); - value = ac_build_gather_values(&ctx->ac, chans, 4); - if (inst->Dst[0].Register.File == TGSI_FILE_BUFFER) { args.resource = shader_buffer_fetch_rsrc(ctx, &resource_reg, false); voffset = ac_to_integer(&ctx->ac, lp_build_emit_fetch(bld_base, inst, 0, 0)); - } else if (is_image) { + } else { image_fetch_rsrc(bld_base, &resource_reg, true, target, &args.resource); image_fetch_coords(bld_base, inst, 0, args.resource, args.coords); vindex = args.coords[0]; /* for buffers only */ - } else { - unreachable("unexpected register file"); } if (inst->Memory.Qualifier & TGSI_MEMORY_VOLATILE) - ac_build_waitcnt(&ctx->ac, VM_CNT); + ac_build_waitcnt(&ctx->ac, AC_WAIT_VLOAD | AC_WAIT_VSTORE); + bool is_image = inst->Dst[0].Register.File != TGSI_FILE_BUFFER; args.cache_policy = get_cache_policy(ctx, inst, false, /* atomic */ is_image, /* may_store_unaligned */ @@ -693,29 +712,24 @@ static void store_emit( if (inst->Dst[0].Register.File == TGSI_FILE_BUFFER) { store_emit_buffer(ctx, args.resource, inst->Dst[0].Register.WriteMask, - value, voffset, args.cache_policy, writeonly_memory); + ac_build_gather_values(&ctx->ac, chans, 4), + voffset, args.cache_policy, writeonly_memory); return; } if (target == TGSI_TEXTURE_BUFFER) { - LLVMValueRef buf_args[] = { - value, - args.resource, - vindex, - ctx->i32_0, /* voffset */ - LLVMConstInt(ctx->i1, !!(args.cache_policy & ac_glc), 0), - LLVMConstInt(ctx->i1, !!(args.cache_policy & ac_slc), 0), - }; - - emit_data->output[emit_data->chan] = ac_build_intrinsic( - &ctx->ac, "llvm.amdgcn.buffer.store.format.v4f32", - ctx->voidt, buf_args, 6, - ac_get_store_intr_attribs(writeonly_memory)); + unsigned num_channels = util_last_bit(inst->Dst[0].Register.WriteMask); + + ac_build_buffer_store_format(&ctx->ac, args.resource, + ac_build_gather_values(&ctx->ac, chans, num_channels), + vindex, ctx->i32_0 /* voffset */, + num_channels, + args.cache_policy); } else { args.opcode = ac_image_store; - args.data[0] = value; + args.data[0] = ac_build_gather_values(&ctx->ac, chans, 4); args.dim = ac_image_dim_from_tgsi_target(ctx->screen, inst->Memory.Texture); - args.attributes = ac_get_store_intr_attribs(writeonly_memory); + args.attributes = AC_FUNC_ATTR_INACCESSIBLE_MEM_ONLY; args.dmask = 0xf; emit_data->output[emit_data->chan] = @@ -728,6 +742,7 @@ static void atomic_emit_memory(struct si_shader_context *ctx, LLVMBuilderRef builder = ctx->ac.builder; const struct tgsi_full_instruction * inst = emit_data->inst; LLVMValueRef ptr, result, arg; + const char *sync_scope = HAVE_LLVM >= 0x0900 ? "workgroup-one-as" : "workgroup"; ptr = get_memory_ptr(ctx, inst, ctx->i32, 1); @@ -741,11 +756,8 @@ static void atomic_emit_memory(struct si_shader_context *ctx, new_data = ac_to_integer(&ctx->ac, new_data); - result = LLVMBuildAtomicCmpXchg(builder, ptr, arg, new_data, - LLVMAtomicOrderingSequentiallyConsistent, - LLVMAtomicOrderingSequentiallyConsistent, - false); - + result = ac_build_atomic_cmp_xchg(&ctx->ac, ptr, arg, new_data, + sync_scope); result = LLVMBuildExtractValue(builder, result, 0, ""); } else { LLVMAtomicRMWBinOp op; @@ -782,9 +794,7 @@ static void atomic_emit_memory(struct si_shader_context *ctx, unreachable("unknown atomic opcode"); } - result = LLVMBuildAtomicRMW(builder, op, ptr, arg, - LLVMAtomicOrderingSequentiallyConsistent, - false); + result = ac_build_atomic_rmw(&ctx->ac, op, ptr, arg, sync_scope); } emit_data->output[emit_data->chan] = LLVMBuildBitCast(builder, result, ctx->f32, ""); @@ -817,24 +827,51 @@ static void atomic_emit( args.data[num_data++] = ac_to_integer(&ctx->ac, lp_build_emit_fetch(bld_base, inst, 2, 0)); + args.cache_policy = get_cache_policy(ctx, inst, true, false, false); if (inst->Src[0].Register.File == TGSI_FILE_BUFFER) { args.resource = shader_buffer_fetch_rsrc(ctx, &inst->Src[0], false); voffset = ac_to_integer(&ctx->ac, lp_build_emit_fetch(bld_base, inst, 1, 0)); - } else if (inst->Src[0].Register.File == TGSI_FILE_IMAGE || - tgsi_is_bindless_image_file(inst->Src[0].Register.File)) { + } else { image_fetch_rsrc(bld_base, &inst->Src[0], true, inst->Memory.Texture, &args.resource); image_fetch_coords(bld_base, inst, 1, args.resource, args.coords); vindex = args.coords[0]; /* for buffers only */ } - if (inst->Src[0].Register.File == TGSI_FILE_BUFFER || + if (HAVE_LLVM >= 0x0800 && + inst->Src[0].Register.File != TGSI_FILE_BUFFER && inst->Memory.Texture == TGSI_TEXTURE_BUFFER) { LLVMValueRef buf_args[7]; unsigned num_args = 0; + buf_args[num_args++] = args.data[0]; + if (inst->Instruction.Opcode == TGSI_OPCODE_ATOMCAS) + buf_args[num_args++] = args.data[1]; + + buf_args[num_args++] = args.resource; + buf_args[num_args++] = vindex; + buf_args[num_args++] = voffset; + buf_args[num_args++] = ctx->i32_0; /* soffset */ + buf_args[num_args++] = LLVMConstInt(ctx->i32, args.cache_policy & ac_slc, 0); + + char intrinsic_name[64]; + snprintf(intrinsic_name, sizeof(intrinsic_name), + "llvm.amdgcn.struct.buffer.atomic.%s", action->intr_name); + emit_data->output[emit_data->chan] = + ac_to_float(&ctx->ac, + ac_build_intrinsic(&ctx->ac, intrinsic_name, + ctx->i32, buf_args, num_args, 0)); + return; + } + + if (inst->Src[0].Register.File == TGSI_FILE_BUFFER || + (HAVE_LLVM < 0x0800 && + inst->Memory.Texture == TGSI_TEXTURE_BUFFER)) { + LLVMValueRef buf_args[7]; + unsigned num_args = 0; + buf_args[num_args++] = args.data[0]; if (inst->Instruction.Opcode == TGSI_OPCODE_ATOMCAS) buf_args[num_args++] = args.data[1]; @@ -866,6 +903,12 @@ static void atomic_emit( case TGSI_OPCODE_ATOMUMAX: args.atomic = ac_atomic_umax; break; case TGSI_OPCODE_ATOMIMIN: args.atomic = ac_atomic_smin; break; case TGSI_OPCODE_ATOMIMAX: args.atomic = ac_atomic_smax; break; + case TGSI_OPCODE_ATOMINC_WRAP: + args.atomic = ac_atomic_inc_wrap; + break; + case TGSI_OPCODE_ATOMDEC_WRAP: + args.atomic = ac_atomic_dec_wrap; + break; default: unreachable("unhandled image atomic"); } } @@ -882,7 +925,7 @@ static LLVMValueRef fix_resinfo(struct si_shader_context *ctx, LLVMBuilderRef builder = ctx->ac.builder; /* 1D textures are allocated and used as 2D on GFX9. */ - if (ctx->screen->info.chip_class >= GFX9 && + if (ctx->screen->info.chip_class == GFX9 && (target == TGSI_TEXTURE_1D_ARRAY || target == TGSI_TEXTURE_SHADOW1D_ARRAY)) { LLVMValueRef layers = @@ -961,6 +1004,7 @@ static void resq_emit( args.opcode = ac_image_get_resinfo; args.dim = ac_texture_dim_from_tgsi_target(ctx->screen, target); args.dmask = 0xf; + args.attributes = AC_FUNC_ATTR_READNONE; if (inst->Instruction.Opcode == TGSI_OPCODE_TXQ) { tex_fetch_ptrs(bld_base, emit_data, &args.resource, NULL, NULL); @@ -1007,6 +1051,13 @@ LLVMValueRef si_load_sampler_desc(struct si_shader_context *ctx, list = LLVMBuildPointerCast(builder, list, ac_array_in_const32_addr_space(ctx->v4i32), ""); break; + case AC_DESC_PLANE_0: + case AC_DESC_PLANE_1: + case AC_DESC_PLANE_2: + /* Only used for the multiplane image support for Vulkan. Should + * never be reached in radeonsi. + */ + unreachable("Plane descriptor requested in radeonsi."); } return ac_build_load_to_sgpr(&ctx->ac, list, index); @@ -1014,13 +1065,13 @@ LLVMValueRef si_load_sampler_desc(struct si_shader_context *ctx, /* Disable anisotropic filtering if BASE_LEVEL == LAST_LEVEL. * - * SI-CI: + * GFX6-GFX7: * If BASE_LEVEL == LAST_LEVEL, the shader must disable anisotropic * filtering manually. The driver sets img7 to a mask clearing * MAX_ANISO_RATIO if BASE_LEVEL == LAST_LEVEL. The shader must do: * s_and_b32 samp0, samp0, img7 * - * VI: + * GFX8: * The ANISO_OVERRIDE sampler field enables this fix in TA. */ static LLVMValueRef sici_fix_sampler_aniso(struct si_shader_context *ctx, @@ -1028,7 +1079,7 @@ static LLVMValueRef sici_fix_sampler_aniso(struct si_shader_context *ctx, { LLVMValueRef img7, samp0; - if (ctx->screen->info.chip_class >= VI) + if (ctx->screen->info.chip_class >= GFX8) return samp; img7 = LLVMBuildExtractElement(ctx->ac.builder, res, @@ -1158,10 +1209,10 @@ si_lower_gather4_integer(struct si_shader_context *ctx, uint32_t wa_num_format = return_type == TGSI_RETURN_TYPE_UINT ? - S_008F14_NUM_FORMAT_GFX6(V_008F14_IMG_NUM_FORMAT_USCALED) : - S_008F14_NUM_FORMAT_GFX6(V_008F14_IMG_NUM_FORMAT_SSCALED); + S_008F14_NUM_FORMAT(V_008F14_IMG_NUM_FORMAT_USCALED) : + S_008F14_NUM_FORMAT(V_008F14_IMG_NUM_FORMAT_SSCALED); wa_formats = LLVMBuildAnd(builder, formats, - LLVMConstInt(ctx->i32, C_008F14_NUM_FORMAT_GFX6, false), + LLVMConstInt(ctx->i32, C_008F14_NUM_FORMAT, false), ""); wa_formats = LLVMBuildOr(builder, wa_formats, LLVMConstInt(ctx->i32, wa_num_format, false), ""); @@ -1191,6 +1242,7 @@ si_lower_gather4_integer(struct si_shader_context *ctx, resinfo.sampler = args->sampler; resinfo.lod = ctx->ac.i32_0; resinfo.dmask = 0xf; + resinfo.attributes = AC_FUNC_ATTR_READNONE; LLVMValueRef texsize = fix_resinfo(ctx, target, @@ -1289,7 +1341,7 @@ static void build_tex_intrinsic(const struct lp_build_tgsi_action *action, args.resource, vindex, ctx->i32_0, - num_channels, false, true); + num_channels, 0, true); emit_data->output[emit_data->chan] = ac_build_expand_to_vec4(&ctx->ac, result, num_channels); return; @@ -1358,9 +1410,11 @@ static void build_tex_intrinsic(const struct lp_build_tgsi_action *action, * * TC-compatible HTILE promotes Z16 and Z24 to Z32_FLOAT, * so the depth comparison value isn't clamped for Z16 and - * Z24 anymore. Do it manually here. + * Z24 anymore. Do it manually here for GFX8-9; GFX10 has + * an explicitly clamped 32-bit float format. */ - if (ctx->screen->info.chip_class >= VI) { + if (ctx->screen->info.chip_class >= GFX8 && + ctx->screen->info.chip_class <= GFX9) { LLVMValueRef upgraded; LLVMValueRef clamped; upgraded = LLVMBuildExtractElement(ctx->ac.builder, args.sampler, @@ -1408,7 +1462,7 @@ static void build_tex_intrinsic(const struct lp_build_tgsi_action *action, num_src_deriv_channels = 1; /* 1D textures are allocated and used as 2D on GFX9. */ - if (ctx->screen->info.chip_class >= GFX9) { + if (ctx->screen->info.chip_class == GFX9) { num_dst_deriv_channels = 2; } else { num_dst_deriv_channels = 1; @@ -1444,13 +1498,13 @@ static void build_tex_intrinsic(const struct lp_build_tgsi_action *action, } else if (tgsi_is_array_sampler(target) && opcode != TGSI_OPCODE_TXF && opcode != TGSI_OPCODE_TXF_LZ && - ctx->screen->info.chip_class <= VI) { + ctx->screen->info.chip_class <= GFX8) { unsigned array_coord = target == TGSI_TEXTURE_1D_ARRAY ? 1 : 2; args.coords[array_coord] = ac_build_round(&ctx->ac, args.coords[array_coord]); } /* 1D textures are allocated and used as 2D on GFX9. */ - if (ctx->screen->info.chip_class >= GFX9) { + if (ctx->screen->info.chip_class == GFX9) { LLVMValueRef filler; /* Use 0.5, so that we don't sample the border color. */ @@ -1484,8 +1538,9 @@ static void build_tex_intrinsic(const struct lp_build_tgsi_action *action, } } - if (target == TGSI_TEXTURE_2D_MSAA || - target == TGSI_TEXTURE_2D_ARRAY_MSAA) { + if ((target == TGSI_TEXTURE_2D_MSAA || + target == TGSI_TEXTURE_2D_ARRAY_MSAA) && + !(ctx->screen->debug_flags & DBG(NO_FMASK))) { ac_apply_fmask_to_sample(&ctx->ac, fmask_ptr, args.coords, target == TGSI_TEXTURE_2D_ARRAY_MSAA); } @@ -1601,7 +1656,7 @@ static void build_tex_intrinsic(const struct lp_build_tgsi_action *action, /* The hardware needs special lowering for Gather4 with integer formats. */ LLVMValueRef gather4_int_result_workaround = NULL; - if (ctx->screen->info.chip_class <= VI && + if (ctx->screen->info.chip_class <= GFX8 && opcode == TGSI_OPCODE_TG4) { assert(inst->Texture.ReturnType != TGSI_RETURN_TYPE_UNKNOWN); @@ -1650,11 +1705,8 @@ static void si_llvm_emit_txqs( emit_data->output[emit_data->chan] = samples; } -static void si_llvm_emit_fbfetch(const struct lp_build_tgsi_action *action, - struct lp_build_tgsi_context *bld_base, - struct lp_build_emit_data *emit_data) +static LLVMValueRef si_llvm_emit_fbfetch(struct si_shader_context *ctx) { - struct si_shader_context *ctx = si_shader_context(bld_base); struct ac_image_args args = {}; LLVMValueRef ptr, image, fmask; @@ -1684,7 +1736,8 @@ static void si_llvm_emit_fbfetch(const struct lp_build_tgsi_action *action, if (ctx->shader->key.mono.u.ps.fbfetch_msaa) args.coords[chan++] = si_get_sample_id(ctx); - if (ctx->shader->key.mono.u.ps.fbfetch_msaa) { + if (ctx->shader->key.mono.u.ps.fbfetch_msaa && + !(ctx->screen->debug_flags & DBG(NO_FMASK))) { fmask = ac_build_load_to_sgpr(&ctx->ac, ptr, LLVMConstInt(ctx->i32, SI_PS_IMAGE_COLORBUF0_FMASK / 2, 0)); @@ -1695,6 +1748,8 @@ static void si_llvm_emit_fbfetch(const struct lp_build_tgsi_action *action, args.opcode = ac_image_load; args.resource = image; args.dmask = 0xf; + args.attributes = AC_FUNC_ATTR_READNONE; + if (ctx->shader->key.mono.u.ps.fbfetch_msaa) args.dim = ctx->shader->key.mono.u.ps.fbfetch_layered ? ac_image_2darraymsaa : ac_image_2dmsaa; @@ -1705,8 +1760,23 @@ static void si_llvm_emit_fbfetch(const struct lp_build_tgsi_action *action, args.dim = ctx->shader->key.mono.u.ps.fbfetch_layered ? ac_image_2darray : ac_image_2d; - emit_data->output[emit_data->chan] = - ac_build_image_opcode(&ctx->ac, &args); + return ac_build_image_opcode(&ctx->ac, &args); +} + +static void si_tgsi_emit_fbfetch(const struct lp_build_tgsi_action *action, + struct lp_build_tgsi_context *bld_base, + struct lp_build_emit_data *emit_data) +{ + struct si_shader_context *ctx = si_shader_context(bld_base); + + emit_data->output[emit_data->chan] = si_llvm_emit_fbfetch(ctx); +} + +LLVMValueRef si_nir_emit_fbfetch(struct ac_shader_abi *abi) +{ + struct si_shader_context *ctx = si_shader_context_from_abi(abi); + + return si_llvm_emit_fbfetch(ctx); } /** @@ -1732,7 +1802,7 @@ void si_shader_context_init_mem(struct si_shader_context *ctx) bld_base->op_actions[TGSI_OPCODE_LODQ].emit = build_tex_intrinsic; bld_base->op_actions[TGSI_OPCODE_TXQS].emit = si_llvm_emit_txqs; - bld_base->op_actions[TGSI_OPCODE_FBFETCH].emit = si_llvm_emit_fbfetch; + bld_base->op_actions[TGSI_OPCODE_FBFETCH].emit = si_tgsi_emit_fbfetch; bld_base->op_actions[TGSI_OPCODE_LOAD].emit = load_emit; bld_base->op_actions[TGSI_OPCODE_STORE].emit = store_emit; @@ -1758,4 +1828,8 @@ void si_shader_context_init_mem(struct si_shader_context *ctx) bld_base->op_actions[TGSI_OPCODE_ATOMIMIN].intr_name = "smin"; bld_base->op_actions[TGSI_OPCODE_ATOMIMAX].emit = atomic_emit; bld_base->op_actions[TGSI_OPCODE_ATOMIMAX].intr_name = "smax"; + bld_base->op_actions[TGSI_OPCODE_ATOMINC_WRAP].emit = atomic_emit; + bld_base->op_actions[TGSI_OPCODE_ATOMINC_WRAP].intr_name = "inc"; + bld_base->op_actions[TGSI_OPCODE_ATOMDEC_WRAP].emit = atomic_emit; + bld_base->op_actions[TGSI_OPCODE_ATOMDEC_WRAP].intr_name = "dec"; }