From: Eric Anholt Date: Tue, 8 May 2018 19:47:48 +0000 (-0700) Subject: nir: Add lowering for nir_op_bitfield_reverse. X-Git-Url: https://git.libre-soc.org/?a=commitdiff_plain;h=7afa26d4e39c73502ad75b95605197eb52c8d099;p=mesa.git nir: Add lowering for nir_op_bitfield_reverse. This is basically the same as the GLSL lowering path. Reviewed-by: Matt Turner Reviewed-by: Ian Romanick --- diff --git a/src/compiler/nir/nir.h b/src/compiler/nir/nir.h index 7d01eb23bc4..06154aa9900 100644 --- a/src/compiler/nir/nir.h +++ b/src/compiler/nir/nir.h @@ -1911,6 +1911,8 @@ typedef struct nir_shader_compiler_options { bool lower_bitfield_insert; /** Lowers bitfield_insert to bfm, compares, and shifts. */ bool lower_bitfield_insert_to_shifts; + /** Lowers bitfield_reverse to shifts. */ + bool lower_bitfield_reverse; /** Lowers bfm to shifts and subtracts. */ bool lower_bfm; /** Lowers ifind_msb to compare and ufind_msb */ diff --git a/src/compiler/nir/nir_lower_alu.c b/src/compiler/nir/nir_lower_alu.c index 28ecaf6badc..ff977f01696 100644 --- a/src/compiler/nir/nir_lower_alu.c +++ b/src/compiler/nir/nir_lower_alu.c @@ -50,6 +50,50 @@ lower_alu_instr(nir_alu_instr *instr, nir_builder *b) b->exact = instr->exact; switch (instr->op) { + case nir_op_bitfield_reverse: + if (b->shader->options->lower_bitfield_reverse) { + /* For more details, see: + * + * http://graphics.stanford.edu/~seander/bithacks.html#ReverseParallel + */ + nir_ssa_def *c1 = nir_imm_int(b, 1); + nir_ssa_def *c2 = nir_imm_int(b, 2); + nir_ssa_def *c4 = nir_imm_int(b, 4); + nir_ssa_def *c8 = nir_imm_int(b, 8); + nir_ssa_def *c16 = nir_imm_int(b, 16); + nir_ssa_def *c33333333 = nir_imm_int(b, 0x33333333); + nir_ssa_def *c55555555 = nir_imm_int(b, 0x55555555); + nir_ssa_def *c0f0f0f0f = nir_imm_int(b, 0x0f0f0f0f); + nir_ssa_def *c00ff00ff = nir_imm_int(b, 0x00ff00ff); + + lowered = nir_ssa_for_alu_src(b, instr, 0); + + /* Swap odd and even bits. */ + lowered = nir_ior(b, + nir_iand(b, nir_ushr(b, lowered, c1), c55555555), + nir_ishl(b, nir_iand(b, lowered, c55555555), c1)); + + /* Swap consecutive pairs. */ + lowered = nir_ior(b, + nir_iand(b, nir_ushr(b, lowered, c2), c33333333), + nir_ishl(b, nir_iand(b, lowered, c33333333), c2)); + + /* Swap nibbles. */ + lowered = nir_ior(b, + nir_iand(b, nir_ushr(b, lowered, c4), c0f0f0f0f), + nir_ishl(b, nir_iand(b, lowered, c0f0f0f0f), c4)); + + /* Swap bytes. */ + lowered = nir_ior(b, + nir_iand(b, nir_ushr(b, lowered, c8), c00ff00ff), + nir_ishl(b, nir_iand(b, lowered, c00ff00ff), c8)); + + lowered = nir_ior(b, + nir_ushr(b, lowered, c16), + nir_ishl(b, lowered, c16)); + } + break; + case nir_op_imul_high: case nir_op_umul_high: if (b->shader->options->lower_mul_high) { @@ -136,7 +180,8 @@ nir_lower_alu(nir_shader *shader) { bool progress = false; - if (!shader->options->lower_mul_high) + if (!shader->options->lower_bitfield_reverse && + !shader->options->lower_mul_high) return false; nir_foreach_function(function, shader) {