pan/midgard: Represent unused nodes by ~0
authorAlyssa Rosenzweig <alyssa.rosenzweig@collabora.com>
Wed, 21 Aug 2019 16:15:56 +0000 (09:15 -0700)
committerAlyssa Rosenzweig <alyssa.rosenzweig@collabora.com>
Wed, 21 Aug 2019 17:38:31 +0000 (10:38 -0700)
This allows nodes to be unsigned and prevents a class of weird
signedness bugs identified by Coverity.

Signed-off-by: Alyssa Rosenzweig <alyssa.rosenzweig@collabora.com>
src/panfrost/midgard/compiler.h
src/panfrost/midgard/helpers.h
src/panfrost/midgard/midgard_compile.c
src/panfrost/midgard/midgard_derivatives.c
src/panfrost/midgard/midgard_opt_dce.c
src/panfrost/midgard/midgard_opt_invert.c
src/panfrost/midgard/midgard_opt_perspective.c
src/panfrost/midgard/midgard_print.c
src/panfrost/midgard/midgard_ra.c
src/panfrost/midgard/midgard_schedule.c
src/panfrost/midgard/mir.c

index edf0c105a195709e8c6083f5ab25c87182757fb6..75f9e4a93fc625143d385917d3a5b4f9c0dd3656 100644 (file)
@@ -68,11 +68,11 @@ typedef struct midgard_branch {
 } midgard_branch;
 
 /* Instruction arguments represented as block-local SSA indices, rather than
- * registers. Negative values mean unused. */
+ * registers. ~0 means unused. */
 
 typedef struct {
-        int src[3];
-        int dest;
+        unsigned src[3];
+        unsigned dest;
 
         bool inline_constant;
 } ssa_args;
@@ -534,7 +534,7 @@ v_mov(unsigned src, midgard_vector_alu_src mod, unsigned dest)
                 .type = TAG_ALU_4,
                 .mask = 0xF,
                 .ssa_args = {
-                        .src = { SSA_UNUSED_1, src, -1 },
+                        .src = { SSA_UNUSED, src, SSA_UNUSED },
                         .dest = dest,
                 },
                 .alu = {
index ffcf9d84de1c39a5d30a10a8f5a5d51d8aa250a1..03fcc01893df3ba578e92090c36b008cfc68e9ea 100644 (file)
@@ -175,12 +175,9 @@ quadword_size(int tag)
 #define REGISTER_TEXTURE_BASE 28
 #define REGISTER_SELECT 31
 
-/* SSA helper aliases to mimic the registers. UNUSED_0 encoded as an inline
- * constant. UNUSED_1 encoded as REGISTER_UNUSED */
-
-#define SSA_UNUSED_0 0
-#define SSA_UNUSED_1 -2
+/* SSA helper aliases to mimic the registers. */
 
+#define SSA_UNUSED ~0
 #define SSA_FIXED_SHIFT 24
 #define SSA_FIXED_REGISTER(reg) (((1 + (reg)) << SSA_FIXED_SHIFT) | 1)
 #define SSA_REG_FROM_FIXED(reg) ((((reg) & ~1) >> SSA_FIXED_SHIFT) - 1)
index c004c95eeb2991d8f95e16425f53c349b29e501e..557e68339471e7598f03fb3092cd98b31ac7e2f8 100644 (file)
@@ -105,8 +105,8 @@ midgard_block_add_successor(midgard_block *block, midgard_block *successor)
                        .type = TAG_LOAD_STORE_4, \
                         .mask = 0xF, \
                        .ssa_args = { \
-                               .dest = -1, \
-                               .src = { -1, -1, -1 }, \
+                               .dest = ~0, \
+                               .src = { ~0, ~0, ~0 }, \
                        }, \
                        .load_store = { \
                                .op = midgard_op_##name, \
@@ -213,8 +213,8 @@ v_alu_br_compact_cond(midgard_jmp_writeout_op op, unsigned tag, signed offset, u
                 .compact_branch = true,
                 .br_compact = compact,
                 .ssa_args = {
-                        .dest = -1,
-                        .src = { -1, -1, -1 },
+                        .dest = ~0,
+                        .src = { ~0, ~0, ~0 },
                 }
         };
 
@@ -236,8 +236,8 @@ v_branch(bool conditional, bool invert)
                         .invert_conditional = invert
                 },
                 .ssa_args = {
-                        .dest = -1,
-                        .src = { -1, -1, -1 },
+                        .dest = ~0,
+                        .src = { ~0, ~0, ~0 },
                 }
         };
 
@@ -338,7 +338,7 @@ midgard_nir_sysval_for_intrinsic(nir_intrinsic_instr *instr)
         case nir_intrinsic_store_ssbo: 
                 return midgard_sysval_for_ssbo(instr);
         default:
-                return -1;
+                return ~0;
         }
 }
 
@@ -622,7 +622,7 @@ emit_condition(compiler_context *ctx, nir_src *src, bool for_branch, unsigned co
                 .mask = 1 << COMPONENT_W,
 
                 .ssa_args = {
-                        .src = { condition, condition, -1 },
+                        .src = { condition, condition, ~0 },
                         .dest = SSA_FIXED_REGISTER(31),
                 },
 
@@ -661,7 +661,7 @@ emit_condition_mixed(compiler_context *ctx, nir_alu_src *src, unsigned nr_comp)
                 .precede_break = true,
                 .mask = mask_of(nr_comp),
                 .ssa_args = {
-                        .src = { condition, condition, -1 },
+                        .src = { condition, condition, ~0 },
                         .dest = SSA_FIXED_REGISTER(31),
                 },
                 .alu = {
@@ -1021,7 +1021,7 @@ emit_alu(compiler_context *ctx, nir_alu_instr *instr)
          * needs it, or else we may segfault. */
 
         unsigned src0 = nir_alu_src_index(ctx, &instr->src[0]);
-        unsigned src1 = nr_inputs == 2 ? nir_alu_src_index(ctx, &instr->src[1]) : SSA_UNUSED_0;
+        unsigned src1 = nr_inputs == 2 ? nir_alu_src_index(ctx, &instr->src[1]) : ~0;
 
         /* Rather than use the instruction generation helpers, we do it
          * ourselves here to avoid the mess */
@@ -1030,9 +1030,9 @@ emit_alu(compiler_context *ctx, nir_alu_instr *instr)
                 .type = TAG_ALU_4,
                 .ssa_args = {
                         .src = {
-                                quirk_flipped_r24 ? SSA_UNUSED_1 : src0,
-                                quirk_flipped_r24 ? src0         : src1,
-                                -1
+                                quirk_flipped_r24 ? ~0 : src0,
+                                quirk_flipped_r24 ? src0       : src1,
+                                ~0
                         },
                         .dest = dest,
                 }
@@ -1370,13 +1370,13 @@ emit_fragment_store(compiler_context *ctx, unsigned src, unsigned rt)
 
         midgard_instruction rt_move = {
                 .ssa_args = {
-                        .dest = -1
+                        .dest = ~0
                 }
         };
 
         if (rt != 0) {
                 /* We'll write to r1.z */
-                rt_move = v_mov(-1, blank_alu_src, SSA_FIXED_REGISTER(1));
+                rt_move = v_mov(~0, blank_alu_src, SSA_FIXED_REGISTER(1));
                 rt_move.mask = 1 << COMPONENT_Z;
                 rt_move.unit = UNIT_SADD;
 
@@ -1627,7 +1627,7 @@ emit_intrinsic(compiler_context *ctx, nir_intrinsic_instr *instr)
         case nir_intrinsic_load_viewport_scale:
         case nir_intrinsic_load_viewport_offset:
         case nir_intrinsic_load_num_work_groups:
-                emit_sysval_read(ctx, &instr->instr, -1, 3);
+                emit_sysval_read(ctx, &instr->instr, ~0, 3);
                 break;
 
         case nir_intrinsic_load_work_group_id:
@@ -1733,7 +1733,7 @@ emit_texop_native(compiler_context *ctx, nir_tex_instr *instr,
                 .mask = 0xF,
                 .ssa_args = {
                         .dest = nir_dest_index(ctx, &instr->dest),
-                        .src = { -1, -1, -1 },
+                        .src = { ~0, ~0, ~0 },
                 },
                 .texture = {
                         .op = midgard_texop,
@@ -1867,7 +1867,7 @@ emit_tex(compiler_context *ctx, nir_tex_instr *instr)
                 emit_texop_native(ctx, instr, TEXTURE_OP_TEXEL_FETCH);
                 break;
         case nir_texop_txs:
-                emit_sysval_read(ctx, &instr->instr, -1, 4);
+                emit_sysval_read(ctx, &instr->instr, ~0, 4);
                 break;
         default:
                 unreachable("Unhanlded texture op");
@@ -2158,7 +2158,7 @@ embedded_to_inline_constant(compiler_context *ctx)
 
                         /* Get rid of the embedded constant */
                         ins->has_constants = false;
-                        ins->ssa_args.src[1] = -1;
+                        ins->ssa_args.src[1] = ~0;
                         ins->ssa_args.inline_constant = true;
                         ins->inline_constant = scaled_constant;
                 }
@@ -2260,7 +2260,7 @@ static void
 emit_fragment_epilogue(compiler_context *ctx)
 {
         /* Just emit the last chunk with the branch */
-        EMIT(alu_br_compact_cond, midgard_jmp_writeout_op_writeout, TAG_ALU_4, -1, midgard_condition_always);
+        EMIT(alu_br_compact_cond, midgard_jmp_writeout_op_writeout, TAG_ALU_4, ~0, midgard_condition_always);
 }
 
 static midgard_block *
@@ -2291,8 +2291,8 @@ emit_block(compiler_context *ctx, nir_block *block)
         this_block->is_scheduled = false;
         ++ctx->block_count;
 
-        ctx->texture_index[0] = -1;
-        ctx->texture_index[1] = -1;
+        ctx->texture_index[0] = ~0;
+        ctx->texture_index[1] = ~0;
 
         /* Set up current block */
         list_inithead(&this_block->instructions);
index 0f15af3db4272787ade8119fd8cd6f5068646b56..9a1506372b8b02a4fdf3ec96e4bc8b4bf6734ba6 100644 (file)
@@ -96,7 +96,7 @@ midgard_emit_derivatives(compiler_context *ctx, nir_alu_instr *instr)
                 .mask = mask_of(nr_components),
                 .ssa_args = {
                         .dest = nir_dest_index(ctx, &instr->dest.dest),
-                        .src = { nir_alu_src_index(ctx, &instr->src[0]), -1, -1 },
+                        .src = { nir_alu_src_index(ctx, &instr->src[0]), ~0, ~0 },
                 },
                 .texture = {
                         .op = mir_derivative_op(instr->op),
index f2011831944ba2da1e6732cddfacfa346947460b..57768ed69c30378e59536b35a872f84a85a3620c 100644 (file)
@@ -103,7 +103,7 @@ midgard_opt_post_move_eliminate(compiler_context *ctx, midgard_block *block, str
                 unsigned iA = ins->ssa_args.dest;
                 unsigned iB = ins->ssa_args.src[1];
 
-                if ((iA < 0) || (iB < 0)) continue;
+                if ((iA == ~0) || (iB == ~0)) continue;
 
                 unsigned A = iA >= SSA_FIXED_MINIMUM ?
                         SSA_REG_FROM_FIXED(iA) : 
index b68c98c74dbb74a053ace057a234d9e6c240e21a..592a5d381b5ba3db659194a578e5ac02b61f958f 100644 (file)
@@ -41,7 +41,7 @@ midgard_lower_invert(compiler_context *ctx, midgard_block *block)
                         .type = TAG_ALU_4,
                         .mask = ins->mask,
                         .ssa_args = {
-                                .src = { temp, -1, -1 },
+                                .src = { temp, ~0, ~0 },
                                 .dest = ins->ssa_args.dest,
                                 .inline_constant = true
                         },
index f5e483e68d0e81131634839260e6e08add11672c..22b7736a379766531c60ce3d9c1de53db4c9d9e3 100644 (file)
@@ -116,7 +116,7 @@ midgard_opt_combine_projection(compiler_context *ctx, midgard_block *block)
                         .mask = ins->mask,
                         .ssa_args = {
                                 .dest = to,
-                                .src = { frcp_from, -1, -1 },
+                                .src = { frcp_from, ~0, ~0 },
                         },
                         .load_store = {
                                 .op = frcp_component == COMPONENT_W ?
index 1f8bd12680ec30910d49b67ab68c0c763a6d2b79..add40511d2f398de8e8441be83102b5a3e982bef 100644 (file)
@@ -34,7 +34,7 @@
 static void
 mir_print_index(int source)
 {
-        if (source 0) {
+        if (source == ~0) {
                 printf("_");
                 return;
         }
index bdfb56b8efbe06c1ae63b334e081dd48fcba00b0..f900e847802a8e5d1f639a46ea126447e90209ad 100644 (file)
@@ -138,12 +138,14 @@ default_phys_reg(int reg)
  * register corresponds to */
 
 static struct phys_reg
-index_to_reg(compiler_context *ctx, struct ra_graph *g, int reg)
+index_to_reg(compiler_context *ctx, struct ra_graph *g, unsigned reg)
 {
         /* Check for special cases */
-        if (reg >= SSA_FIXED_MINIMUM)
+        if ((reg == ~0) && g)
+                return default_phys_reg(REGISTER_UNUSED);
+        else if (reg >= SSA_FIXED_MINIMUM)
                 return default_phys_reg(SSA_REG_FROM_FIXED(reg));
-        else if ((reg < 0) || !g)
+        else if (!g)
                 return default_phys_reg(REGISTER_UNUSED);
 
         /* Special cases aside, we pick the underlying register */
@@ -301,7 +303,7 @@ static void
 set_class(unsigned *classes, unsigned node, unsigned class)
 {
         /* Check that we're even a node */
-        if ((node < 0) || (node >= SSA_FIXED_MINIMUM))
+        if (node >= SSA_FIXED_MINIMUM)
                 return;
 
         /* First 4 are work, next 4 are load/store.. */
@@ -321,7 +323,7 @@ set_class(unsigned *classes, unsigned node, unsigned class)
 static void
 force_vec4(unsigned *classes, unsigned node)
 {
-        if ((node < 0) || (node >= SSA_FIXED_MINIMUM))
+        if (node >= SSA_FIXED_MINIMUM)
                 return;
 
         /* Force vec4 = 3 */
@@ -335,7 +337,7 @@ static bool
 check_read_class(unsigned *classes, unsigned tag, unsigned node)
 {
         /* Non-nodes are implicitly ok */
-        if ((node < 0) || (node >= SSA_FIXED_MINIMUM))
+        if (node >= SSA_FIXED_MINIMUM)
                 return true;
 
         unsigned current_class = classes[node] >> 2;
@@ -358,7 +360,7 @@ static bool
 check_write_class(unsigned *classes, unsigned tag, unsigned node)
 {
         /* Non-nodes are implicitly ok */
-        if ((node < 0) || (node >= SSA_FIXED_MINIMUM))
+        if (node >= SSA_FIXED_MINIMUM)
                 return true;
 
         unsigned current_class = classes[node] >> 2;
@@ -383,7 +385,7 @@ check_write_class(unsigned *classes, unsigned tag, unsigned node)
 static void
 mark_node_class (unsigned *bitfield, unsigned node)
 {
-        if ((node >= 0) && (node < SSA_FIXED_MINIMUM))
+        if (node < SSA_FIXED_MINIMUM)
                 BITSET_SET(bitfield, node);
 }
 
@@ -522,7 +524,7 @@ mir_lower_special_reads(compiler_context *ctx)
 static void
 liveness_gen(uint8_t *live, unsigned node, unsigned max, unsigned mask)
 {
-        if ((node < 0) || (node >= max))
+        if (node >= max)
                 return;
 
         live[node] |= mask;
@@ -531,7 +533,7 @@ liveness_gen(uint8_t *live, unsigned node, unsigned max, unsigned mask)
 static void
 liveness_kill(uint8_t *live, unsigned node, unsigned max, unsigned mask)
 {
-        if ((node < 0) || (node >= max))
+        if (node >= max)
                 return;
 
         live[node] &= ~mask;
@@ -659,7 +661,7 @@ mir_compute_liveness(
 
                         unsigned dest = ins->ssa_args.dest;
 
-                        if (dest >= 0 && dest < ctx->temp_count) {
+                        if (dest < ctx->temp_count) {
                                 for (unsigned i = 0; i < ctx->temp_count; ++i)
                                         if (live[i])
                                                 ra_add_node_interference(g, dest, i);
@@ -710,7 +712,6 @@ allocate_registers(compiler_context *ctx, bool *spilled)
         unsigned *found_class = calloc(sizeof(unsigned), ctx->temp_count);
 
         mir_foreach_instr_global(ctx, ins) {
-                if (ins->ssa_args.dest < 0) continue;
                 if (ins->ssa_args.dest >= SSA_FIXED_MINIMUM) continue;
 
                 /* 0 for x, 1 for xy, 2 for xyz, 3 for xyzw */
@@ -931,7 +932,7 @@ install_registers_instr(
                         compose_writemask(ins->mask, dest);
 
                 /* If there is a register LOD/bias, use it */
-                if (args.src[1] > -1) {
+                if (args.src[1] != ~0) {
                         midgard_tex_register_select sel = {
                                 .select = lod.reg,
                                 .full = 1,
index 66502813748d3d752fcb5456ab552bb9349768d6..399c259073ec1963a839fafec5e5867cf693387e 100644 (file)
@@ -159,15 +159,15 @@ can_writeout_fragment(compiler_context *ctx, midgard_instruction **bundle, unsig
                 unsigned src1 = ins->ssa_args.src[1];
 
                 if (!mir_is_written_before(ctx, bundle[0], src0))
-                        src0 = -1;
+                        src0 = ~0;
 
                 if (!mir_is_written_before(ctx, bundle[0], src1))
-                        src1 = -1;
+                        src1 = ~0;
 
-                if ((src0 > 0) && (src0 < node_count))
+                if (src0 < node_count)
                         BITSET_SET(dependencies, src0);
 
-                if ((src1 > 0) && (src1 < node_count))
+                if (src1 < node_count)
                         BITSET_SET(dependencies, src1);
 
                 /* Requirement 2 */
@@ -630,7 +630,7 @@ midgard_pair_load_store(compiler_context *ctx, midgard_block *block)
                                 bool deps = false;
 
                                 for (unsigned s = 0; s < ARRAY_SIZE(ins->ssa_args.src); ++s)
-                                        deps |= (c->ssa_args.src[s] != -1);
+                                        deps |= (c->ssa_args.src[s] != ~0);
 
                                 if (deps)
                                         continue;
@@ -652,7 +652,7 @@ midgard_pair_load_store(compiler_context *ctx, midgard_block *block)
 static unsigned
 find_or_allocate_temp(compiler_context *ctx, unsigned hash)
 {
-        if ((hash < 0) || (hash >= SSA_FIXED_MINIMUM))
+        if (hash >= SSA_FIXED_MINIMUM)
                 return hash;
 
         unsigned temp = (uintptr_t) _mesa_hash_table_u64_search(
@@ -703,8 +703,8 @@ v_load_store_scratch(
                 .type = TAG_LOAD_STORE_4,
                 .mask = mask,
                 .ssa_args = {
-                        .dest = -1,
-                        .src = { -1, -1, -1 },
+                        .dest = ~0,
+                        .src = { ~0, ~0, ~0 },
                 },
                 .load_store = {
                         .op = is_store ? midgard_op_st_int4 : midgard_op_ld_int4,
index 9e269629131fba0bf8f0ab7a853ec5ee70f77198..42b84b0f6a2c5c9e74b7db2f2a762110356cb94d 100644 (file)
@@ -327,7 +327,7 @@ mir_special_index(compiler_context *ctx, unsigned idx)
 bool
 mir_is_written_before(compiler_context *ctx, midgard_instruction *ins, unsigned node)
 {
-        if ((node < 0) || (node >= SSA_FIXED_MINIMUM))
+        if (node >= SSA_FIXED_MINIMUM)
                 return true;
 
         mir_foreach_instr_global(ctx, q) {