.lower_txp = ~0,
.lower_tex_without_implicit_lod =
(quirks & MIDGARD_EXPLICIT_LOD),
+ .lower_tg4_broadcom_swizzle = true,
/* TODO: we have native gradient.. */
.lower_txd = true,
/* Once we have the NIR mask, we need to normalize to work in 32-bit space */
unsigned bytemask = pan_to_bytemask(dsize, nir_mask);
- mir_set_bytemask(ins, bytemask);
ins->dest_type = nir_type_uint | dsize;
+ mir_set_bytemask(ins, bytemask);
}
/* Uniforms and UBOs use a shared code path, as uniforms are just (slightly
return true;
}
+static enum mali_texture_mode
+mdg_texture_mode(nir_tex_instr *instr)
+{
+ if (instr->op == nir_texop_tg4 && instr->is_shadow)
+ return TEXTURE_GATHER_SHADOW;
+ else if (instr->op == nir_texop_tg4)
+ return TEXTURE_GATHER_X + instr->component;
+ else if (instr->is_shadow)
+ return TEXTURE_SHADOW;
+ else
+ return TEXTURE_NORMAL;
+}
+
static void
emit_texop_native(compiler_context *ctx, nir_tex_instr *instr,
unsigned midgard_texop)
.format = midgard_tex_format(instr->sampler_dim),
.texture_handle = texture_index,
.sampler_handle = sampler_index,
- .shadow = instr->is_shadow,
+ .mode = mdg_texture_mode(instr)
}
};
- if (instr->is_shadow && !instr->is_new_style_shadow)
+ if (instr->is_shadow && !instr->is_new_style_shadow && instr->op != nir_texop_tg4)
for (int i = 0; i < 4; ++i)
ins.swizzle[0][i] = COMPONENT_X;
emit_texop_native(ctx, instr, TEXTURE_OP_NORMAL);
break;
case nir_texop_txl:
+ case nir_texop_tg4:
emit_texop_native(ctx, instr, TEXTURE_OP_LOD);
break;
case nir_texop_txf: