nir/opt_constant_folding: fix folding of 8 and 16 bit ints
authorKarol Herbst <kherbst@redhat.com>
Sun, 22 Apr 2018 01:29:07 +0000 (03:29 +0200)
committerKarol Herbst <kherbst@redhat.com>
Thu, 26 Apr 2018 09:16:15 +0000 (11:16 +0200)
Signed-off-by: Karol Herbst <kherbst@redhat.com>
Reviewed-by: Jose Maria Casanova Crespo <jmcasanova@igalia.com>
Reviewed-by: Jason Ekstrand <jason@jlekstrand.net>
src/compiler/nir/nir_opt_constant_folding.c

index d6be807b3dcbd58dcc890b7e64ece191256206d0..a848b145874113c90537b362b10b82194802b965 100644 (file)
@@ -76,10 +76,20 @@ constant_fold_alu_instr(nir_alu_instr *instr, void *mem_ctx)
 
       for (unsigned j = 0; j < nir_ssa_alu_instr_src_components(instr, i);
            j++) {
-         if (load_const->def.bit_size == 64)
+         switch(load_const->def.bit_size) {
+         case 64:
             src[i].u64[j] = load_const->value.u64[instr->src[i].swizzle[j]];
-         else
+            break;
+         case 32:
             src[i].u32[j] = load_const->value.u32[instr->src[i].swizzle[j]];
+            break;
+         case 16:
+            src[i].u16[j] = load_const->value.u16[instr->src[i].swizzle[j]];
+            break;
+         case 8:
+            src[i].u8[j] = load_const->value.u8[instr->src[i].swizzle[j]];
+            break;
+         }
       }
 
       /* We shouldn't have any source modifiers in the optimization loop. */