aco: implement subgroup shader_clock on GFX10.3
[mesa.git] / src / amd / compiler / aco_opt_value_numbering.cpp
index e19f125e29fa8b47857d3fc8183d287fb039f911..11aad20a0b09b808d5df31df20dbee558ab68945 100644 (file)
 namespace aco {
 namespace {
 
+inline
+uint32_t murmur_32_scramble(uint32_t h, uint32_t k) {
+   k *= 0xcc9e2d51;
+   k = (k << 15) | (k >> 17);
+   h ^= k * 0x1b873593;
+   h = (h << 13) | (h >> 19);
+   h = h * 5 + 0xe6546b64;
+   return h;
+}
+
+template<typename T>
+uint32_t hash_murmur_32(Instruction* instr)
+{
+   uint32_t hash = uint32_t(instr->format) << 16 | uint32_t(instr->opcode);
+
+   for (const Operand& op : instr->operands)
+      hash = murmur_32_scramble(hash, op.constantValue());
+
+   /* skip format, opcode and pass_flags */
+   for (unsigned i = 2; i < (sizeof(T) >> 2); i++) {
+      uint32_t u;
+      /* Accesses it though a byte array, so doesn't violate the strict aliasing rule */
+      memcpy(&u, reinterpret_cast<uint8_t *>(instr) + i * 4, 4);
+      hash = murmur_32_scramble(hash, u);
+   }
+
+   /* Finalize. */
+   uint32_t len = instr->operands.size() + instr->definitions.size() + sizeof(T);
+   hash ^= len;
+   hash ^= hash >> 16;
+   hash *= 0x85ebca6b;
+   hash ^= hash >> 13;
+   hash *= 0xc2b2ae35;
+   hash ^= hash >> 16;
+   return hash;
+}
+
 struct InstrHash {
+   /* This hash function uses the Murmur3 algorithm written by Austin Appleby
+    * https://github.com/aappleby/smhasher/blob/master/src/MurmurHash3.cpp
+    *
+    * In order to calculate the expression set, only the right-hand-side of an
+    * instruction is used for the hash, i.e. everything except the definitions.
+    */
    std::size_t operator()(Instruction* instr) const
    {
-      uint64_t hash = (uint64_t) instr->opcode + (uint64_t) instr->format;
-      for (unsigned i = 0; i < instr->operands.size(); i++) {
-         Operand op = instr->operands[i];
-         uint64_t val = op.isTemp() ? op.tempId() : op.isFixed() ? op.physReg() : op.constantValue();
-         hash |= val << (i+1) * 8;
-      }
-      if (instr->isVOP3()) {
-         VOP3A_instruction* vop3 = static_cast<VOP3A_instruction*>(instr);
-         for (unsigned i = 0; i < 3; i++) {
-            hash ^= vop3->abs[i] << (i*3 + 0);
-            hash ^= vop3->opsel[i] << (i*3 + 1);
-            hash ^= vop3->neg[i] << (i*3 + 2);
-         }
-         hash ^= (vop3->clamp << 28) * 13;
-         hash += vop3->omod << 19;
-      }
+      if (instr->isVOP3())
+         return hash_murmur_32<VOP3A_instruction>(instr);
+
+      if (instr->isDPP())
+         return hash_murmur_32<DPP_instruction>(instr);
+
+      if (instr->isSDWA())
+         return hash_murmur_32<SDWA_instruction>(instr);
+
       switch (instr->format) {
       case Format::SMEM:
-         break;
-      case Format::VINTRP: {
-         Interp_instruction* interp = static_cast<Interp_instruction*>(instr);
-         hash ^= interp->attribute << 13;
-         hash ^= interp->component << 27;
-         break;
-      }
+         return hash_murmur_32<SMEM_instruction>(instr);
+      case Format::VINTRP:
+         return hash_murmur_32<Interp_instruction>(instr);
       case Format::DS:
-         break;
+         return hash_murmur_32<DS_instruction>(instr);
+      case Format::SOPP:
+         return hash_murmur_32<SOPP_instruction>(instr);
+      case Format::SOPK:
+         return hash_murmur_32<SOPK_instruction>(instr);
+      case Format::EXP:
+         return hash_murmur_32<Export_instruction>(instr);
+      case Format::MUBUF:
+         return hash_murmur_32<MUBUF_instruction>(instr);
+      case Format::MIMG:
+         return hash_murmur_32<MIMG_instruction>(instr);
+      case Format::MTBUF:
+         return hash_murmur_32<MTBUF_instruction>(instr);
+      case Format::FLAT:
+         return hash_murmur_32<FLAT_instruction>(instr);
+      case Format::PSEUDO_BRANCH:
+         return hash_murmur_32<Pseudo_branch_instruction>(instr);
+      case Format::PSEUDO_REDUCTION:
+         return hash_murmur_32<Pseudo_reduction_instruction>(instr);
       default:
-         break;
+         return hash_murmur_32<Instruction>(instr);
       }
-
-      return hash;
    }
 };
 
@@ -81,17 +129,6 @@ struct InstrPred {
          return false;
       if (a->operands.size() != b->operands.size() || a->definitions.size() != b->definitions.size())
          return false; /* possible with pseudo-instructions */
-      /* We can't value number v_readlane_b32 across control flow or discards
-       * because of the possibility of live-range splits.
-       * We can't value number permutes for the same reason as
-       * v_readlane_b32 and because discards affect the result */
-      if (a->opcode == aco_opcode::v_readfirstlane_b32 || a->opcode == aco_opcode::v_readlane_b32 ||
-          a->opcode == aco_opcode::ds_bpermute_b32 || a->opcode == aco_opcode::ds_permute_b32 ||
-          a->opcode == aco_opcode::ds_swizzle_b32 || a->format == Format::PSEUDO_REDUCTION ||
-          a->opcode == aco_opcode::p_phi || a->opcode == aco_opcode::p_linear_phi) {
-         if (a->pass_flags != b->pass_flags)
-            return false;
-      }
       for (unsigned i = 0; i < a->operands.size(); i++) {
          if (a->operands[i].isConstant()) {
             if (!b->operands[i].isConstant())
@@ -108,11 +145,11 @@ struct InstrPred {
          else if (a->operands[i].isUndefined() ^ b->operands[i].isUndefined())
             return false;
          if (a->operands[i].isFixed()) {
-            if (a->operands[i].physReg() == exec)
-               return false;
             if (!b->operands[i].isFixed())
                return false;
-            if (!(a->operands[i].physReg() == b->operands[i].physReg()))
+            if (a->operands[i].physReg() != b->operands[i].physReg())
+               return false;
+            if (a->operands[i].physReg() == exec && a->pass_flags != b->pass_flags)
                return false;
          }
       }
@@ -126,28 +163,37 @@ struct InstrPred {
          if (a->definitions[i].isFixed()) {
             if (!b->definitions[i].isFixed())
                return false;
-            if (!(a->definitions[i].physReg() == b->definitions[i].physReg()))
+            if (a->definitions[i].physReg() != b->definitions[i].physReg())
+               return false;
+            if (a->definitions[i].physReg() == exec)
                return false;
          }
       }
-      if (a->format == Format::PSEUDO_BRANCH)
+
+      if (a->opcode == aco_opcode::v_readfirstlane_b32)
+         return a->pass_flags == b->pass_flags;
+
+      /* The results of VOPC depend on the exec mask if used for subgroup operations. */
+      if ((uint32_t) a->format & (uint32_t) Format::VOPC && a->pass_flags != b->pass_flags)
          return false;
+
       if (a->isVOP3()) {
          VOP3A_instruction* a3 = static_cast<VOP3A_instruction*>(a);
          VOP3A_instruction* b3 = static_cast<VOP3A_instruction*>(b);
          for (unsigned i = 0; i < 3; i++) {
             if (a3->abs[i] != b3->abs[i] ||
-                a3->opsel[i] != b3->opsel[i] ||
                 a3->neg[i] != b3->neg[i])
                return false;
          }
          return a3->clamp == b3->clamp &&
-                a3->omod == b3->omod;
+                a3->omod == b3->omod &&
+                a3->opsel == b3->opsel;
       }
       if (a->isDPP()) {
          DPP_instruction* aDPP = static_cast<DPP_instruction*>(a);
          DPP_instruction* bDPP = static_cast<DPP_instruction*>(b);
-         return aDPP->dpp_ctrl == bDPP->dpp_ctrl &&
+         return aDPP->pass_flags == bDPP->pass_flags &&
+                aDPP->dpp_ctrl == bDPP->dpp_ctrl &&
                 aDPP->bank_mask == bDPP->bank_mask &&
                 aDPP->row_mask == bDPP->row_mask &&
                 aDPP->bound_ctrl == bDPP->bound_ctrl &&
@@ -156,13 +202,25 @@ struct InstrPred {
                 aDPP->neg[0] == bDPP->neg[0] &&
                 aDPP->neg[1] == bDPP->neg[1];
       }
+      if (a->isSDWA()) {
+         SDWA_instruction* aSDWA = static_cast<SDWA_instruction*>(a);
+         SDWA_instruction* bSDWA = static_cast<SDWA_instruction*>(b);
+         return aSDWA->sel[0] == bSDWA->sel[0] &&
+                aSDWA->sel[1] == bSDWA->sel[1] &&
+                aSDWA->dst_sel == bSDWA->dst_sel &&
+                aSDWA->abs[0] == bSDWA->abs[0] &&
+                aSDWA->abs[1] == bSDWA->abs[1] &&
+                aSDWA->neg[0] == bSDWA->neg[0] &&
+                aSDWA->neg[1] == bSDWA->neg[1] &&
+                aSDWA->dst_preserve == bSDWA->dst_preserve &&
+                aSDWA->clamp == bSDWA->clamp &&
+                aSDWA->omod == bSDWA->omod;
+      }
+
       switch (a->format) {
-         case Format::VOPC: {
-            /* Since the results depend on the exec mask, these shouldn't
-             * be value numbered (this is especially useful for subgroupBallot()). */
-            return false;
-         }
          case Format::SOPK: {
+            if (a->opcode == aco_opcode::s_getreg_b32)
+               return false;
             SOPK_instruction* aK = static_cast<SOPK_instruction*>(a);
             SOPK_instruction* bK = static_cast<SOPK_instruction*>(b);
             return aK->imm == bK->imm;
@@ -170,8 +228,12 @@ struct InstrPred {
          case Format::SMEM: {
             SMEM_instruction* aS = static_cast<SMEM_instruction*>(a);
             SMEM_instruction* bS = static_cast<SMEM_instruction*>(b);
-            return aS->can_reorder && bS->can_reorder &&
-                   aS->glc == bS->glc && aS->nv == bS->nv;
+            /* isel shouldn't be creating situations where this assertion fails */
+            assert(aS->prevent_overflow == bS->prevent_overflow);
+            return aS->sync.can_reorder() && bS->sync.can_reorder() &&
+                   aS->sync == bS->sync && aS->glc == bS->glc && aS->dlc == bS->dlc &&
+                   aS->nv == bS->nv && aS->disable_wqm == bS->disable_wqm &&
+                   aS->prevent_overflow == bS->prevent_overflow;
          }
          case Format::VINTRP: {
             Interp_instruction* aI = static_cast<Interp_instruction*>(a);
@@ -185,42 +247,69 @@ struct InstrPred {
          case Format::PSEUDO_REDUCTION: {
             Pseudo_reduction_instruction *aR = static_cast<Pseudo_reduction_instruction*>(a);
             Pseudo_reduction_instruction *bR = static_cast<Pseudo_reduction_instruction*>(b);
-            return aR->reduce_op == bR->reduce_op && aR->cluster_size == bR->cluster_size;
+            return aR->pass_flags == bR->pass_flags &&
+                   aR->reduce_op == bR->reduce_op &&
+                   aR->cluster_size == bR->cluster_size;
          }
          case Format::MTBUF: {
-            /* this is fine since they are only used for vertex input fetches */
             MTBUF_instruction* aM = static_cast<MTBUF_instruction *>(a);
             MTBUF_instruction* bM = static_cast<MTBUF_instruction *>(b);
-            return aM->dfmt == bM->dfmt &&
+            return aM->sync.can_reorder() && bM->sync.can_reorder() &&
+                   aM->sync == bM->sync &&
+                   aM->dfmt == bM->dfmt &&
                    aM->nfmt == bM->nfmt &&
                    aM->offset == bM->offset &&
                    aM->offen == bM->offen &&
                    aM->idxen == bM->idxen &&
                    aM->glc == bM->glc &&
+                   aM->dlc == bM->dlc &&
+                   aM->slc == bM->slc &&
+                   aM->tfe == bM->tfe &&
+                   aM->disable_wqm == bM->disable_wqm;
+         }
+         case Format::MUBUF: {
+            MUBUF_instruction* aM = static_cast<MUBUF_instruction *>(a);
+            MUBUF_instruction* bM = static_cast<MUBUF_instruction *>(b);
+            return aM->sync.can_reorder() && bM->sync.can_reorder() &&
+                   aM->sync == bM->sync &&
+                   aM->offset == bM->offset &&
+                   aM->offen == bM->offen &&
+                   aM->idxen == bM->idxen &&
+                   aM->glc == bM->glc &&
+                   aM->dlc == bM->dlc &&
                    aM->slc == bM->slc &&
                    aM->tfe == bM->tfe &&
+                   aM->lds == bM->lds &&
                    aM->disable_wqm == bM->disable_wqm;
          }
          /* we want to optimize these in NIR and don't hassle with load-store dependencies */
-         case Format::MUBUF:
          case Format::FLAT:
          case Format::GLOBAL:
          case Format::SCRATCH:
+         case Format::EXP:
+         case Format::SOPP:
+         case Format::PSEUDO_BRANCH:
+         case Format::PSEUDO_BARRIER:
             return false;
          case Format::DS: {
-            /* we already handle potential issue with permute/swizzle above */
-            DS_instruction* aD = static_cast<DS_instruction *>(a);
-            DS_instruction* bD = static_cast<DS_instruction *>(b);
             if (a->opcode != aco_opcode::ds_bpermute_b32 &&
                 a->opcode != aco_opcode::ds_permute_b32 &&
                 a->opcode != aco_opcode::ds_swizzle_b32)
                return false;
-            return aD->gds == bD->gds && aD->offset0 == bD->offset0 && aD->offset1 == bD->offset1;
+            DS_instruction* aD = static_cast<DS_instruction *>(a);
+            DS_instruction* bD = static_cast<DS_instruction *>(b);
+            return aD->sync.can_reorder() && bD->sync.can_reorder() &&
+                   aD->sync == bD->sync &&
+                   aD->pass_flags == bD->pass_flags &&
+                   aD->gds == bD->gds &&
+                   aD->offset0 == bD->offset0 &&
+                   aD->offset1 == bD->offset1;
          }
          case Format::MIMG: {
             MIMG_instruction* aM = static_cast<MIMG_instruction*>(a);
             MIMG_instruction* bM = static_cast<MIMG_instruction*>(b);
-            return aM->can_reorder && bM->can_reorder &&
+            return aM->sync.can_reorder() && bM->sync.can_reorder() &&
+                   aM->sync == bM->sync &&
                    aM->dmask == bM->dmask &&
                    aM->unrm == bM->unrm &&
                    aM->glc == bM->glc &&
@@ -245,14 +334,31 @@ struct vn_ctx {
    Program* program;
    expr_set expr_values;
    std::map<uint32_t, Temp> renames;
-   uint32_t exec_id = 0;
 
-   vn_ctx(Program* program) : program(program) {}
+   /* The exec id should be the same on the same level of control flow depth.
+    * Together with the check for dominator relations, it is safe to assume
+    * that the same exec_id also means the same execution mask.
+    * Discards increment the exec_id, so that it won't return to the previous value.
+    */
+   uint32_t exec_id = 1;
+
+   vn_ctx(Program* program) : program(program) {
+      static_assert(sizeof(Temp) == 4, "Temp must fit in 32bits");
+      unsigned size = 0;
+      for (Block& block : program->blocks)
+         size += block.instructions.size();
+      expr_values.reserve(size);
+   }
 };
 
+
+/* dominates() returns true if the parent block dominates the child block and
+ * if the parent block is part of the same loop or has a smaller loop nest depth.
+ */
 bool dominates(vn_ctx& ctx, uint32_t parent, uint32_t child)
 {
-   while (parent < child)
+   unsigned parent_loop_nest_depth = ctx.program->blocks[parent].loop_nest_depth;
+   while (parent < child && parent_loop_nest_depth <= ctx.program->blocks[child].loop_nest_depth)
       child = ctx.program->blocks[child].logical_idom;
 
    return parent == child;
@@ -273,7 +379,11 @@ void process_block(vn_ctx& ctx, Block& block)
             op.setTemp(it->second);
       }
 
-      if (instr->definitions.empty()) {
+      if (instr->opcode == aco_opcode::p_discard_if ||
+          instr->opcode == aco_opcode::p_demote_to_helper)
+         ctx.exec_id++;
+
+      if (instr->definitions.empty() || instr->opcode == aco_opcode::p_phi || instr->opcode == aco_opcode::p_linear_phi) {
          new_instructions.emplace_back(std::move(instr));
          continue;
       }
@@ -285,10 +395,6 @@ void process_block(vn_ctx& ctx, Block& block)
          ctx.renames[instr->definitions[0].tempId()] = instr->operands[0].getTemp();
       }
 
-      if (instr->opcode == aco_opcode::p_discard_if ||
-          instr->opcode == aco_opcode::p_demote_to_helper)
-         ctx.exec_id++;
-
       instr->pass_flags = ctx.exec_id;
       std::pair<expr_set::iterator, bool> res = ctx.expr_values.emplace(instr.get(), block.index);
 
@@ -297,10 +403,20 @@ void process_block(vn_ctx& ctx, Block& block)
          Instruction* orig_instr = res.first->first;
          assert(instr->definitions.size() == orig_instr->definitions.size());
          /* check if the original instruction dominates the current one */
-         if (dominates(ctx, res.first->second, block.index)) {
+         if (dominates(ctx, res.first->second, block.index) &&
+             ctx.program->blocks[res.first->second].fp_mode.canReplace(block.fp_mode)) {
             for (unsigned i = 0; i < instr->definitions.size(); i++) {
                assert(instr->definitions[i].regClass() == orig_instr->definitions[i].regClass());
+               assert(instr->definitions[i].isTemp());
                ctx.renames[instr->definitions[i].tempId()] = orig_instr->definitions[i].getTemp();
+               if (instr->definitions[i].isPrecise())
+                  orig_instr->definitions[i].setPrecise(true);
+               /* SPIR_V spec says that an instruction marked with NUW wrapping
+                * around is undefined behaviour, so we can break additions in
+                * other contexts.
+                */
+               if (instr->definitions[i].isNUW())
+                  orig_instr->definitions[i].setNUW(true);
             }
          } else {
             ctx.expr_values.erase(res.first);
@@ -336,14 +452,35 @@ void rename_phi_operands(Block& block, std::map<uint32_t, Temp>& renames)
 void value_numbering(Program* program)
 {
    vn_ctx ctx(program);
+   std::vector<unsigned> loop_headers;
 
    for (Block& block : program->blocks) {
+      assert(ctx.exec_id > 0);
+      /* decrement exec_id when leaving nested control flow */
+      if (block.kind & block_kind_loop_header)
+         loop_headers.push_back(block.index);
+      if (block.kind & block_kind_merge) {
+         ctx.exec_id--;
+      } else if (block.kind & block_kind_loop_exit) {
+         ctx.exec_id -= program->blocks[loop_headers.back()].linear_preds.size();
+         ctx.exec_id -= block.linear_preds.size();
+         loop_headers.pop_back();
+      }
+
       if (block.logical_idom != -1)
          process_block(ctx, block);
       else
          rename_phi_operands(block, ctx.renames);
 
-      ctx.exec_id++;
+      /* increment exec_id when entering nested control flow */
+      if (block.kind & block_kind_branch ||
+          block.kind & block_kind_loop_preheader ||
+          block.kind & block_kind_break ||
+          block.kind & block_kind_continue ||
+          block.kind & block_kind_discard)
+         ctx.exec_id++;
+      else if (block.kind & block_kind_continue_or_break)
+         ctx.exec_id += 2;
    }
 
    /* rename loop header phi operands */