for (unsigned i = 0; i < dest_size; i++)
nir_dest[i] = offset(dst, bld, i);
- bool is_cube_array = instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE &&
- instr->is_array;
-
if (instr->op == nir_texop_query_levels) {
/* # levels is in .w */
nir_dest[0] = offset(dst, bld, 3);
- } else if (instr->op == nir_texop_txs && dest_size >= 3 &&
- (devinfo->gen < 7 || is_cube_array)) {
+ } else if (instr->op == nir_texop_txs &&
+ dest_size >= 3 && devinfo->gen < 7) {
+ /* Gen4-6 return 0 instead of 1 for single layer surfaces. */
fs_reg depth = offset(dst, bld, 2);
- fs_reg fixed_depth = vgrf(glsl_type::int_type);
-
- if (is_cube_array) {
- /* fixup #layers for cube map arrays */
- bld.emit(SHADER_OPCODE_INT_QUOTIENT, fixed_depth, depth, brw_imm_d(6));
- } else if (devinfo->gen < 7) {
- /* Gen4-6 return 0 instead of 1 for single layer surfaces. */
- bld.emit_minmax(fixed_depth, depth, brw_imm_d(1), BRW_CONDITIONAL_GE);
- }
-
- nir_dest[2] = fixed_depth;
+ nir_dest[2] = vgrf(glsl_type::int_type);
+ bld.emit_minmax(nir_dest[2], depth, brw_imm_d(1), BRW_CONDITIONAL_GE);
}
bld.LOAD_PAYLOAD(get_nir_dest(instr->dest), nir_dest, dest_size, 0);
uint32_t constant_offset,
src_reg offset_value,
src_reg mcs,
- bool is_cube_array,
uint32_t surface, src_reg surface_reg,
uint32_t sampler, src_reg sampler_reg);
ir_texture_opcode op = ir_texture_opcode_for_nir_texop(instr->op);
- bool is_cube_array =
- instr->op == nir_texop_txs &&
- instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE &&
- instr->is_array;
-
emit_texture(op, dest, dest_type, coordinate, instr->coord_components,
shadow_comparitor,
lod, lod2, sample_index,
- constant_offset, offset_value,
- mcs, is_cube_array,
+ constant_offset, offset_value, mcs,
texture, texture_reg, sampler, sampler_reg);
}
uint32_t constant_offset,
src_reg offset_value,
src_reg mcs,
- bool is_cube_array,
uint32_t surface,
src_reg surface_reg,
uint32_t sampler,
/* fixup num layers (z) for cube arrays: hardware returns faces * layers;
* spec requires layers.
*/
- if (op == ir_txs) {
- if (is_cube_array) {
- emit_math(SHADER_OPCODE_INT_QUOTIENT,
- writemask(inst->dst, WRITEMASK_Z),
- src_reg(inst->dst), brw_imm_d(6));
- } else if (devinfo->gen < 7) {
- /* Gen4-6 return 0 instead of 1 for single layer surfaces. */
- emit_minmax(BRW_CONDITIONAL_GE, writemask(inst->dst, WRITEMASK_Z),
- src_reg(inst->dst), brw_imm_d(1));
- }
+ if (op == ir_txs && devinfo->gen < 7) {
+ /* Gen4-6 return 0 instead of 1 for single layer surfaces. */
+ emit_minmax(BRW_CONDITIONAL_GE, writemask(inst->dst, WRITEMASK_Z),
+ src_reg(inst->dst), brw_imm_d(1));
}
if (devinfo->gen == 6 && op == ir_tg4) {
/* If this is a view with restricted NumLayers, then our effective depth
* is not just the miptree depth.
*/
- const unsigned mt_num_layers =
- mt->logical_depth0 * (_mesa_is_cube_map_texture(mt->target) ? 6 : 1);
const unsigned view_num_layers =
(obj->Immutable && obj->Target != GL_TEXTURE_3D) ? obj->NumLayers :
- mt_num_layers;
+ mt->logical_depth0;
/* Handling GL_ALPHA as a surface format override breaks 1.30+ style
* texturing functions that return a float, as our code generation always