i965/fs was the only consumer, and we're now doing the lowering in NIR.
Reviewed-by: Iago Toral Quiroga <itoral@igalia.com>
break;
case ir_unop_noise:
- case ir_unop_unpack_half_2x16_split_x:
- case ir_unop_unpack_half_2x16_split_y:
this->type = glsl_type::float_type;
break;
this->type = op0->type->get_base_type();
break;
- case ir_binop_pack_half_2x16_split:
- this->type = glsl_type::uint_type;
- break;
-
case ir_binop_imul_high:
case ir_binop_carry:
case ir_binop_borrow:
"unpackUnorm2x16",
"unpackUnorm4x8",
"unpackHalf2x16",
- "unpackHalf2x16_split_x",
- "unpackHalf2x16_split_y",
"bitfield_reverse",
"bit_count",
"find_msb",
"min",
"max",
"pow",
- "packHalf2x16_split",
"ubo_load",
"ldexp",
"vector_extract",
ir_unop_unpack_half_2x16,
/*@}*/
- /**
- * \name Lowered floating point unpacking operations.
- *
- * \see lower_packing_builtins_visitor::split_unpack_half_2x16
- */
- /*@{*/
- ir_unop_unpack_half_2x16_split_x,
- ir_unop_unpack_half_2x16_split_y,
- /*@}*/
-
/**
* \name Bit operations, part of ARB_gpu_shader5.
*/
ir_binop_pow,
- /**
- * \name Lowered floating point packing operations.
- *
- * \see lower_packing_builtins_visitor::split_pack_half_2x16
- */
- /*@{*/
- ir_binop_pack_half_2x16_split,
- /*@}*/
-
/**
* Load a value the size of a given GLSL type from a uniform block.
*
LOWER_PACK_HALF_2x16 = 0x0010,
LOWER_UNPACK_HALF_2x16 = 0x0020,
- LOWER_PACK_HALF_2x16_TO_SPLIT = 0x0040,
- LOWER_UNPACK_HALF_2x16_TO_SPLIT = 0x0080,
+ LOWER_PACK_SNORM_4x8 = 0x0040,
+ LOWER_UNPACK_SNORM_4x8 = 0x0080,
- LOWER_PACK_SNORM_4x8 = 0x0100,
- LOWER_UNPACK_SNORM_4x8 = 0x0200,
+ LOWER_PACK_UNORM_4x8 = 0x0100,
+ LOWER_UNPACK_UNORM_4x8 = 0x0200,
- LOWER_PACK_UNORM_4x8 = 0x0400,
- LOWER_UNPACK_UNORM_4x8 = 0x0800,
-
- LOWER_PACK_USE_BFI = 0x1000,
- LOWER_PACK_USE_BFE = 0x2000,
+ LOWER_PACK_USE_BFI = 0x0400,
+ LOWER_PACK_USE_BFE = 0x0800,
};
bool do_common_optimization(exec_list *ir, bool linked,
assert(ir->operands[0]->type == glsl_type::uint_type);
break;
- case ir_unop_unpack_half_2x16_split_x:
- case ir_unop_unpack_half_2x16_split_y:
- assert(ir->type == glsl_type::float_type);
- assert(ir->operands[0]->type == glsl_type::uint_type);
- break;
-
case ir_unop_unpack_double_2x32:
assert(ir->type == glsl_type::uvec2_type);
assert(ir->operands[0]->type == glsl_type::double_type);
assert(ir->operands[0]->type == ir->operands[1]->type);
break;
- case ir_binop_pack_half_2x16_split:
- assert(ir->type == glsl_type::uint_type);
- assert(ir->operands[0]->type == glsl_type::float_type);
- assert(ir->operands[1]->type == glsl_type::float_type);
- break;
-
case ir_binop_ubo_load:
assert(ir->operands[0]->type == glsl_type::uint_type);
: op_mask(op_mask),
progress(false)
{
- /* Mutually exclusive options. */
- assert(!((op_mask & LOWER_PACK_HALF_2x16) &&
- (op_mask & LOWER_PACK_HALF_2x16_TO_SPLIT)));
-
- assert(!((op_mask & LOWER_UNPACK_HALF_2x16) &&
- (op_mask & LOWER_UNPACK_HALF_2x16_TO_SPLIT)));
-
factory.instructions = &factory_instructions;
}
case LOWER_PACK_HALF_2x16:
*rvalue = lower_pack_half_2x16(op0);
break;
- case LOWER_PACK_HALF_2x16_TO_SPLIT:
- *rvalue = split_pack_half_2x16(op0);
- break;
case LOWER_UNPACK_SNORM_2x16:
*rvalue = lower_unpack_snorm_2x16(op0);
break;
case LOWER_UNPACK_HALF_2x16:
*rvalue = lower_unpack_half_2x16(op0);
break;
- case LOWER_UNPACK_HALF_2x16_TO_SPLIT:
- *rvalue = split_unpack_half_2x16(op0);
- break;
case LOWER_PACK_UNPACK_NONE:
case LOWER_PACK_USE_BFI:
case LOWER_PACK_USE_BFE:
result = op_mask & LOWER_PACK_UNORM_4x8;
break;
case ir_unop_pack_half_2x16:
- result = op_mask & (LOWER_PACK_HALF_2x16 | LOWER_PACK_HALF_2x16_TO_SPLIT);
+ result = op_mask & LOWER_PACK_HALF_2x16;
break;
case ir_unop_unpack_snorm_2x16:
result = op_mask & LOWER_UNPACK_SNORM_2x16;
result = op_mask & LOWER_UNPACK_UNORM_4x8;
break;
case ir_unop_unpack_half_2x16:
- result = op_mask & (LOWER_UNPACK_HALF_2x16 | LOWER_UNPACK_HALF_2x16_TO_SPLIT);
+ result = op_mask & LOWER_UNPACK_HALF_2x16;
break;
default:
result = LOWER_PACK_UNPACK_NONE;
return result;
}
- /**
- * \brief Split packHalf2x16's vec2 operand into two floats.
- *
- * \param vec2_rval is packHalf2x16's input
- * \return a uint rvalue
- *
- * Some code generators, such as the i965 fragment shader, require that all
- * vector expressions be lowered to a sequence of scalar expressions.
- * However, packHalf2x16 cannot be scalarized by the same mechanism as
- * a true vector operation because its input and output have a differing
- * number of vector components.
- *
- * This method scalarizes packHalf2x16 by transforming it from an unary
- * operation having vector input to a binary operation having scalar input.
- * That is, it transforms
- *
- * packHalf2x16(VEC2_RVAL);
- *
- * into
- *
- * vec2 v = VEC2_RVAL;
- * return packHalf2x16_split(v.x, v.y);
- */
- ir_rvalue*
- split_pack_half_2x16(ir_rvalue *vec2_rval)
- {
- assert(vec2_rval->type == glsl_type::vec2_type);
-
- ir_variable *v = factory.make_temp(glsl_type::vec2_type,
- "tmp_split_pack_half_2x16_v");
- factory.emit(assign(v, vec2_rval));
-
- return expr(ir_binop_pack_half_2x16_split, swizzle_x(v), swizzle_y(v));
- }
-
/**
* \brief Lower the component-wise calculation of unpackHalf2x16.
*
assert(result->type == glsl_type::vec2_type);
return result;
}
-
- /**
- * \brief Split unpackHalf2x16 into two operations.
- *
- * \param uint_rval is unpackHalf2x16's input
- * \return a vec2 rvalue
- *
- * Some code generators, such as the i965 fragment shader, require that all
- * vector expressions be lowered to a sequence of scalar expressions.
- * However, unpackHalf2x16 cannot be scalarized by the same method as
- * a true vector operation because the number of components of its input
- * and output differ.
- *
- * This method scalarizes unpackHalf2x16 by transforming it from a single
- * operation having vec2 output to a pair of operations each having float
- * output. That is, it transforms
- *
- * unpackHalf2x16(UINT_RVAL)
- *
- * into
- *
- * uint u = UINT_RVAL;
- * vec2 v;
- *
- * v.x = unpackHalf2x16_split_x(u);
- * v.y = unpackHalf2x16_split_y(u);
- *
- * return v;
- */
- ir_rvalue*
- split_unpack_half_2x16(ir_rvalue *uint_rval)
- {
- assert(uint_rval->type == glsl_type::uint_type);
-
- /* uint u = uint_rval; */
- ir_variable *u = factory.make_temp(glsl_type::uint_type,
- "tmp_split_unpack_half_2x16_u");
- factory.emit(assign(u, uint_rval));
-
- /* vec2 v; */
- ir_variable *v = factory.make_temp(glsl_type::vec2_type,
- "tmp_split_unpack_half_2x16_v");
-
- /* v.x = unpack_half_2x16_split_x(u); */
- factory.emit(assign(v, expr(ir_unop_unpack_half_2x16_split_x, u),
- WRITEMASK_X));
-
- /* v.y = unpack_half_2x16_split_y(u); */
- factory.emit(assign(v, expr(ir_unop_unpack_half_2x16_split_y, u),
- WRITEMASK_Y));
-
- return deref(v).val;
- }
};
} // namespace anonymous
case ir_unop_unpack_half_2x16:
result = nir_unpack_half_2x16(&b, srcs[0]);
break;
- case ir_unop_unpack_half_2x16_split_x:
- result = nir_unpack_half_2x16_split_x(&b, srcs[0]);
- break;
- case ir_unop_unpack_half_2x16_split_y:
- result = nir_unpack_half_2x16_split_y(&b, srcs[0]);
- break;
case ir_unop_bitfield_reverse:
result = nir_bitfield_reverse(&b, srcs[0]);
break;
}
break;
- case ir_binop_pack_half_2x16_split:
- result = nir_pack_half_2x16_split(&b, srcs[0], srcs[1]);
- break;
case ir_binop_ldexp: result = nir_ldexp(&b, srcs[0], srcs[1]); break;
case ir_triop_fma:
result = nir_ffma(&b, srcs[0], srcs[1], srcs[2]);
case ir_unop_ssbo_unsized_array_length:
unreachable("should have been lowered");
- case ir_unop_unpack_half_2x16_split_x:
- case ir_unop_unpack_half_2x16_split_y:
- case ir_binop_pack_half_2x16_split:
case ir_unop_interpolate_at_centroid:
case ir_binop_interpolate_at_offset:
case ir_binop_interpolate_at_sample:
case ir_unop_unpack_unorm_2x16:
case ir_unop_unpack_unorm_4x8:
case ir_unop_unpack_half_2x16:
- case ir_unop_unpack_half_2x16_split_x:
- case ir_unop_unpack_half_2x16_split_y:
case ir_unop_unpack_double_2x32:
- case ir_binop_pack_half_2x16_split:
case ir_unop_bitfield_reverse:
case ir_unop_bit_count:
case ir_unop_find_msb:
case ir_unop_unpack_snorm_2x16:
case ir_unop_unpack_unorm_2x16:
- case ir_unop_unpack_half_2x16_split_x:
- case ir_unop_unpack_half_2x16_split_y:
case ir_unop_unpack_snorm_4x8:
case ir_unop_unpack_unorm_4x8:
- case ir_binop_pack_half_2x16_split:
case ir_quadop_vector:
case ir_binop_vector_extract:
case ir_triop_vector_insert: