if (tex->src[i].src_type == nir_tex_src_texture_deref ||
tex->src[i].src_type == nir_tex_src_sampler_deref ||
tex->src[i].src_type == nir_tex_src_texture_offset ||
- tex->src[i].src_type == nir_tex_src_sampler_offset)
+ tex->src[i].src_type == nir_tex_src_sampler_offset ||
+ tex->src[i].src_type == nir_tex_src_texture_handle ||
+ tex->src[i].src_type == nir_tex_src_sampler_handle)
num_srcs++;
}
if (tex->src[i].src_type == nir_tex_src_texture_deref ||
tex->src[i].src_type == nir_tex_src_sampler_deref ||
tex->src[i].src_type == nir_tex_src_texture_offset ||
- tex->src[i].src_type == nir_tex_src_sampler_offset) {
+ tex->src[i].src_type == nir_tex_src_sampler_offset ||
+ tex->src[i].src_type == nir_tex_src_texture_handle ||
+ tex->src[i].src_type == nir_tex_src_sampler_handle) {
nir_src_copy(&txs->src[idx].src, &tex->src[i].src, txs);
txs->src[idx].src_type = tex->src[i].src_type;
idx++;
tex->src[i].src_type == nir_tex_src_texture_deref ||
tex->src[i].src_type == nir_tex_src_sampler_deref ||
tex->src[i].src_type == nir_tex_src_texture_offset ||
- tex->src[i].src_type == nir_tex_src_sampler_offset)
+ tex->src[i].src_type == nir_tex_src_sampler_offset ||
+ tex->src[i].src_type == nir_tex_src_texture_handle ||
+ tex->src[i].src_type == nir_tex_src_sampler_handle)
num_srcs++;
}
tex->src[i].src_type == nir_tex_src_texture_deref ||
tex->src[i].src_type == nir_tex_src_sampler_deref ||
tex->src[i].src_type == nir_tex_src_texture_offset ||
- tex->src[i].src_type == nir_tex_src_sampler_offset) {
+ tex->src[i].src_type == nir_tex_src_sampler_offset ||
+ tex->src[i].src_type == nir_tex_src_texture_handle ||
+ tex->src[i].src_type == nir_tex_src_sampler_handle) {
nir_src_copy(&tql->src[idx].src, &tex->src[i].src, tql);
tql->src[idx].src_type = tex->src[i].src_type;
idx++;
nir_ssa_def *y, nir_ssa_def *u, nir_ssa_def *v,
nir_ssa_def *a)
{
- nir_const_value m[3] = {
- { .f32 = { 1.0f, 0.0f, 1.59602678f, 0.0f } },
- { .f32 = { 1.0f, -0.39176229f, -0.81296764f, 0.0f } },
- { .f32 = { 1.0f, 2.01723214f, 0.0f, 0.0f } }
+ nir_const_value m[3][4] = {
+ { { .f32 = 1.16438356f }, { .f32 = 1.16438356f }, { .f32 = 1.16438356f }, { .f32 = 0.0f } },
+ { { .f32 = 0.0f }, { .f32 = -0.39176229f }, { .f32 = 2.01723214f }, { .f32 = 0.0f } },
+ { { .f32 = 1.59602678f }, { .f32 = -0.81296764f }, { .f32 = 0.0f }, { .f32 = 0.0f } },
};
- nir_ssa_def *yuv =
+ nir_ssa_def *offset =
nir_vec4(b,
- nir_fmul(b, nir_imm_float(b, 1.16438356f),
- nir_fadd(b, y, nir_imm_float(b, -16.0f / 255.0f))),
- nir_channel(b, nir_fadd(b, u, nir_imm_float(b, -128.0f / 255.0f)), 0),
- nir_channel(b, nir_fadd(b, v, nir_imm_float(b, -128.0f / 255.0f)), 0),
- nir_imm_float(b, 0.0));
+ nir_imm_float(b, -0.874202214f),
+ nir_imm_float(b, 0.531667820f),
+ nir_imm_float(b, -1.085630787f),
+ a);
- nir_ssa_def *red = nir_fdot4(b, yuv, nir_build_imm(b, 4, 32, m[0]));
- nir_ssa_def *green = nir_fdot4(b, yuv, nir_build_imm(b, 4, 32, m[1]));
- nir_ssa_def *blue = nir_fdot4(b, yuv, nir_build_imm(b, 4, 32, m[2]));
-
- nir_ssa_def *result = nir_vec4(b, red, green, blue, a);
+ nir_ssa_def *result =
+ nir_ffma(b, y, nir_build_imm(b, 4, 32, m[0]),
+ nir_ffma(b, u, nir_build_imm(b, 4, 32, m[1]),
+ nir_ffma(b, v, nir_build_imm(b, 4, 32, m[2]),
+ offset)));
nir_ssa_def_rewrite_uses(&tex->dest.ssa, nir_src_for_ssa(result));
}
Q = nir_bcsel(b, cond_z,
p,
nir_bcsel(b, cond_y,
- nir_swizzle(b, p, xzy, 3, false),
- nir_swizzle(b, p, yzx, 3, false)));
+ nir_swizzle(b, p, xzy, 3),
+ nir_swizzle(b, p, yzx, 3)));
dQdx = nir_bcsel(b, cond_z,
dPdx,
nir_bcsel(b, cond_y,
- nir_swizzle(b, dPdx, xzy, 3, false),
- nir_swizzle(b, dPdx, yzx, 3, false)));
+ nir_swizzle(b, dPdx, xzy, 3),
+ nir_swizzle(b, dPdx, yzx, 3)));
dQdy = nir_bcsel(b, cond_z,
dPdy,
nir_bcsel(b, cond_y,
- nir_swizzle(b, dPdy, xzy, 3, false),
- nir_swizzle(b, dPdy, yzx, 3, false)));
+ nir_swizzle(b, dPdy, xzy, 3),
+ nir_swizzle(b, dPdy, yzx, 3)));
/* 2. quotient rule */
static nir_ssa_def *
get_zero_or_one(nir_builder *b, nir_alu_type type, uint8_t swizzle_val)
{
- nir_const_value v;
+ nir_const_value v[4];
memset(&v, 0, sizeof(v));
if (swizzle_val == 4) {
- v.u32[0] = v.u32[1] = v.u32[2] = v.u32[3] = 0;
+ v[0].u32 = v[1].u32 = v[2].u32 = v[3].u32 = 0;
} else {
assert(swizzle_val == 5);
if (type == nir_type_float)
- v.f32[0] = v.f32[1] = v.f32[2] = v.f32[3] = 1.0;
+ v[0].f32 = v[1].f32 = v[2].f32 = v[3].f32 = 1.0;
else
- v.u32[0] = v.u32[1] = v.u32[2] = v.u32[3] = 1;
+ v[0].u32 = v[1].u32 = v[2].u32 = v[3].u32 = 1;
}
return nir_build_imm(b, 4, 32, v);
assert(nir_tex_instr_dest_size(tex) == 4);
unsigned swiz[4] = { 2, 3, 1, 0 };
- nir_ssa_def *swizzled = nir_swizzle(b, &tex->dest.ssa, swiz, 4, false);
+ nir_ssa_def *swizzled = nir_swizzle(b, &tex->dest.ssa, swiz, 4);
nir_ssa_def_rewrite_uses_after(&tex->dest.ssa, nir_src_for_ssa(swizzled),
swizzled->parent_instr);
swizzle[2] < 4 && swizzle[3] < 4) {
unsigned swiz[4] = { swizzle[0], swizzle[1], swizzle[2], swizzle[3] };
/* We have no 0s or 1s, just emit a swizzling MOV */
- swizzled = nir_swizzle(b, &tex->dest.ssa, swiz, 4, false);
+ swizzled = nir_swizzle(b, &tex->dest.ssa, swiz, 4);
} else {
nir_ssa_def *srcs[4];
for (unsigned i = 0; i < 4; i++) {
return sampler_index < max;
}
+static bool
+lower_tg4_offsets(nir_builder *b, nir_tex_instr *tex)
+{
+ assert(tex->op == nir_texop_tg4);
+ assert(nir_tex_instr_has_explicit_tg4_offsets(tex));
+ assert(nir_tex_instr_src_index(tex, nir_tex_src_offset) == -1);
+
+ b->cursor = nir_after_instr(&tex->instr);
+
+ nir_ssa_def *dest[4];
+ for (unsigned i = 0; i < 4; ++i) {
+ nir_tex_instr *tex_copy = nir_tex_instr_create(b->shader, tex->num_srcs + 1);
+ tex_copy->op = tex->op;
+ tex_copy->coord_components = tex->coord_components;
+ tex_copy->sampler_dim = tex->sampler_dim;
+ tex_copy->is_array = tex->is_array;
+ tex_copy->is_shadow = tex->is_shadow;
+ tex_copy->is_new_style_shadow = tex->is_new_style_shadow;
+ tex_copy->component = tex->component;
+ tex_copy->dest_type = tex->dest_type;
+
+ for (unsigned j = 0; j < tex->num_srcs; ++j) {
+ nir_src_copy(&tex_copy->src[j].src, &tex->src[j].src, tex_copy);
+ tex_copy->src[j].src_type = tex->src[j].src_type;
+ }
+
+ nir_tex_src src;
+ src.src = nir_src_for_ssa(nir_imm_ivec2(b, tex->tg4_offsets[i][0],
+ tex->tg4_offsets[i][1]));
+ src.src_type = nir_tex_src_offset;
+ tex_copy->src[tex_copy->num_srcs - 1] = src;
+
+ nir_ssa_dest_init(&tex_copy->instr, &tex_copy->dest,
+ nir_tex_instr_dest_size(tex), 32, NULL);
+
+ nir_builder_instr_insert(b, &tex_copy->instr);
+
+ dest[i] = nir_channel(b, &tex_copy->dest.ssa, 3);
+ }
+
+ nir_ssa_def *res = nir_vec4(b, dest[0], dest[1], dest[2], dest[3]);
+ nir_ssa_def_rewrite_uses(&tex->dest.ssa, nir_src_for_ssa(res));
+ nir_instr_remove(&tex->instr);
+
+ return true;
+}
+
static bool
nir_lower_tex_block(nir_block *block, nir_builder *b,
const nir_lower_tex_options *options)
(options->lower_txd_shadow && tex->is_shadow) ||
(options->lower_txd_shadow_clamp && tex->is_shadow && has_min_lod) ||
(options->lower_txd_offset_clamp && has_offset && has_min_lod) ||
+ (options->lower_txd_clamp_bindless_sampler && has_min_lod &&
+ nir_tex_instr_src_index(tex, nir_tex_src_sampler_handle) != -1) ||
(options->lower_txd_clamp_if_sampler_index_not_lt_16 &&
has_min_lod && !sampler_index_lt(tex, 16)) ||
(options->lower_txd_cube_map &&
continue;
}
+ bool shader_supports_implicit_lod =
+ b->shader->info.stage == MESA_SHADER_FRAGMENT ||
+ (b->shader->info.stage == MESA_SHADER_COMPUTE &&
+ b->shader->info.cs.derivative_group != DERIVATIVE_GROUP_NONE);
+
/* TXF, TXS and TXL require a LOD but not everything we implement using those
* three opcodes provides one. Provide a default LOD of 0.
*/
if ((nir_tex_instr_src_index(tex, nir_tex_src_lod) == -1) &&
(tex->op == nir_texop_txf || tex->op == nir_texop_txs ||
tex->op == nir_texop_txl || tex->op == nir_texop_query_levels ||
- (tex->op == nir_texop_tex &&
- b->shader->info.stage != MESA_SHADER_FRAGMENT))) {
+ (tex->op == nir_texop_tex && !shader_supports_implicit_lod))) {
b->cursor = nir_before_instr(&tex->instr);
nir_tex_instr_add_src(tex, nir_tex_src_lod, nir_src_for_ssa(nir_imm_int(b, 0)));
+ if (tex->op == nir_texop_tex && options->lower_tex_without_implicit_lod)
+ tex->op = nir_texop_txl;
progress = true;
continue;
}
+
+ /* has to happen after all the other lowerings as the original tg4 gets
+ * replaced by 4 tg4 instructions.
+ */
+ if (tex->op == nir_texop_tg4 &&
+ nir_tex_instr_has_explicit_tg4_offsets(tex) &&
+ options->lower_tg4_offsets) {
+ progress |= lower_tg4_offsets(b, tex);
+ continue;
+ }
}
return progress;