{
hash = HASH(hash, instr->op);
hash = HASH(hash, instr->dest.dest.ssa.num_components);
+ hash = HASH(hash, instr->dest.dest.ssa.bit_size);
/* We explicitly don't hash instr->dest.dest.exact */
if (nir_op_infos[instr->op].algebraic_properties & NIR_OP_IS_COMMUTATIVE) {
{
hash = HASH(hash, instr->def.num_components);
- hash = _mesa_fnv32_1a_accumulate_block(hash, instr->value.f32,
- instr->def.num_components
- * sizeof(instr->value.f32[0]));
+ unsigned size = instr->def.num_components * (instr->def.bit_size / 8);
+ hash = _mesa_fnv32_1a_accumulate_block(hash, instr->value.f32, size);
return hash;
}
const nir_intrinsic_info *info = &nir_intrinsic_infos[instr->intrinsic];
hash = HASH(hash, instr->intrinsic);
- if (info->has_dest)
+ if (info->has_dest) {
hash = HASH(hash, instr->dest.ssa.num_components);
+ hash = HASH(hash, instr->dest.ssa.bit_size);
+ }
assert(info->num_variables == 0);
if (alu1->dest.dest.ssa.num_components != alu2->dest.dest.ssa.num_components)
return false;
+ if (alu1->dest.dest.ssa.bit_size != alu2->dest.dest.ssa.bit_size)
+ return false;
+
/* We explicitly don't hash instr->dest.dest.exact */
if (nir_op_infos[alu1->op].algebraic_properties & NIR_OP_IS_COMMUTATIVE) {
if (load1->def.num_components != load2->def.num_components)
return false;
+ if (load1->def.bit_size != load2->def.bit_size)
+ return false;
+
return memcmp(load1->value.f32, load2->value.f32,
- load1->def.num_components * sizeof(*load2->value.f32)) == 0;
+ load1->def.num_components * (load1->def.bit_size / 8)) == 0;
}
case nir_instr_type_phi: {
nir_phi_instr *phi1 = nir_instr_as_phi(instr1);
intrinsic2->dest.ssa.num_components)
return false;
+ if (info->has_dest && intrinsic1->dest.ssa.bit_size !=
+ intrinsic2->dest.ssa.bit_size)
+ return false;
+
for (unsigned i = 0; i < info->num_srcs; i++) {
if (!nir_srcs_equal(intrinsic1->src[i], intrinsic2->src[i]))
return false;