i965/fs: shuffle 32bits into 64bits for doubles
[mesa.git] / src / mesa / drivers / dri / i965 / brw_fs_nir.cpp
index 143d5f32e40beb7559e60c0d10d2d7e02885aa58..ad20dc8d92e61062413d1220743dbc91e517a60f 100644 (file)
@@ -103,10 +103,15 @@ fs_visitor::nir_setup_single_output_varying(fs_reg *reg,
       }
    } else {
       assert(type->is_scalar() || type->is_vector());
-      this->outputs[*location] = *reg;
-      this->output_components[*location] = type->vector_elements;
-      *reg = offset(*reg, bld, 4);
-      (*location)++;
+      unsigned num_elements = type->vector_elements;
+      if (type->is_double())
+         num_elements *= 2;
+      for (unsigned count = 0; count < num_elements; count += 4) {
+         this->outputs[*location] = *reg;
+         this->output_components[*location] = MIN2(4, num_elements - count);
+         *reg = offset(*reg, bld, 4);
+         (*location)++;
+      }
    }
 }
 
@@ -2327,34 +2332,82 @@ fs_visitor::nir_emit_tcs_intrinsic(const fs_builder &bld,
                   brw_imm_ud(4 * REG_SIZE));
       }
 
-      if (indirect_offset.file == BAD_FILE) {
-         /* Constant indexing - use global offset. */
-         inst = bld.emit(SHADER_OPCODE_URB_READ_SIMD8, dst, icp_handle);
-         inst->offset = imm_offset;
-         inst->mlen = 1;
-         inst->base_mrf = -1;
-         inst->regs_written = instr->num_components;
-      } else {
-         /* Indirect indexing - use per-slot offsets as well. */
-         const fs_reg srcs[] = { icp_handle, indirect_offset };
-         fs_reg payload = bld.vgrf(BRW_REGISTER_TYPE_UD, 2);
-         bld.LOAD_PAYLOAD(payload, srcs, ARRAY_SIZE(srcs), 0);
+      /* We can only read two double components with each URB read, so
+       * we send two read messages in that case, each one loading up to
+       * two double components.
+       */
+      unsigned num_iterations = 1;
+      unsigned num_components = instr->num_components;
+      fs_reg orig_dst = dst;
+      if (type_sz(dst.type) == 8) {
+         if (instr->num_components > 2) {
+            num_iterations = 2;
+            num_components = 2;
+         }
 
-         inst = bld.emit(SHADER_OPCODE_URB_READ_SIMD8_PER_SLOT, dst, payload);
-         inst->offset = imm_offset;
-         inst->base_mrf = -1;
-         inst->mlen = 2;
-         inst->regs_written = instr->num_components;
+         fs_reg tmp = fs_reg(VGRF, alloc.allocate(4), dst.type);
+         dst = tmp;
       }
 
-      /* Copy the temporary to the destination to deal with writemasking.
-       *
-       * Also attempt to deal with gl_PointSize being in the .w component.
-       */
-      if (inst->offset == 0 && indirect_offset.file == BAD_FILE) {
-         inst->dst = bld.vgrf(dst.type, 4);
-         inst->regs_written = 4;
-         bld.MOV(dst, offset(inst->dst, bld, 3));
+      for (unsigned iter = 0; iter < num_iterations; iter++) {
+         if (indirect_offset.file == BAD_FILE) {
+            /* Constant indexing - use global offset. */
+            inst = bld.emit(SHADER_OPCODE_URB_READ_SIMD8, dst, icp_handle);
+            inst->offset = imm_offset;
+            inst->mlen = 1;
+            inst->base_mrf = -1;
+         } else {
+            /* Indirect indexing - use per-slot offsets as well. */
+            const fs_reg srcs[] = { icp_handle, indirect_offset };
+            fs_reg payload = bld.vgrf(BRW_REGISTER_TYPE_UD, 2);
+            bld.LOAD_PAYLOAD(payload, srcs, ARRAY_SIZE(srcs), 0);
+
+            inst = bld.emit(SHADER_OPCODE_URB_READ_SIMD8_PER_SLOT, dst, payload);
+            inst->offset = imm_offset;
+            inst->base_mrf = -1;
+            inst->mlen = 2;
+         }
+         inst->regs_written = num_components * type_sz(dst.type) / 4;
+
+         /* If we are reading 64-bit data using 32-bit read messages we need
+          * build proper 64-bit data elements by shuffling the low and high
+          * 32-bit components around like we do for other things like UBOs
+          * or SSBOs.
+          */
+         if (type_sz(dst.type) == 8) {
+            shuffle_32bit_load_result_to_64bit_data(
+               bld, dst, retype(dst, BRW_REGISTER_TYPE_F), num_components);
+
+            for (unsigned c = 0; c < num_components; c++) {
+               bld.MOV(offset(orig_dst, bld, iter * 2 + c),
+                       offset(dst, bld, c));
+            }
+         }
+
+         /* Copy the temporary to the destination to deal with writemasking.
+          *
+          * Also attempt to deal with gl_PointSize being in the .w component.
+          */
+         if (inst->offset == 0 && indirect_offset.file == BAD_FILE) {
+            assert(type_sz(dst.type) < 8);
+            inst->dst = bld.vgrf(dst.type, 4);
+            inst->regs_written = 4;
+            bld.MOV(dst, offset(inst->dst, bld, 3));
+         }
+
+         /* If we are loading double data and we need a second read message
+          * adjust the write offset
+          */
+         if (num_iterations > 1) {
+            num_components = instr->num_components - 2;
+            if (indirect_offset.file == BAD_FILE) {
+               imm_offset++;
+            } else {
+               fs_reg new_indirect = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
+               bld.ADD(new_indirect, indirect_offset, brw_imm_ud(1u));
+               indirect_offset = new_indirect;
+            }
+         }
       }
       break;
    }
@@ -2473,6 +2526,8 @@ fs_visitor::nir_emit_tcs_intrinsic(const fs_builder &bld,
    case nir_intrinsic_store_output:
    case nir_intrinsic_store_per_vertex_output: {
       fs_reg value = get_nir_src(instr->src[0]);
+      bool is_64bit = (instr->src[0].is_ssa ?
+         instr->src[0].ssa->bit_size : instr->src[0].reg.reg->bit_size) == 64;
       fs_reg indirect_offset = get_indirect_offset(instr);
       unsigned imm_offset = instr->const_index[0];
       unsigned swiz = BRW_SWIZZLE_XYZW;
@@ -2540,32 +2595,105 @@ fs_visitor::nir_emit_tcs_intrinsic(const fs_builder &bld,
       unsigned num_components = _mesa_fls(mask);
       enum opcode opcode;
 
-      if (mask != WRITEMASK_XYZW) {
-         srcs[header_regs++] = brw_imm_ud(mask << 16);
-         opcode = indirect_offset.file != BAD_FILE ?
-            SHADER_OPCODE_URB_WRITE_SIMD8_MASKED_PER_SLOT :
-            SHADER_OPCODE_URB_WRITE_SIMD8_MASKED;
-      } else {
-         opcode = indirect_offset.file != BAD_FILE ?
-            SHADER_OPCODE_URB_WRITE_SIMD8_PER_SLOT :
-            SHADER_OPCODE_URB_WRITE_SIMD8;
+      /* We can only pack two 64-bit components in a single message, so send
+       * 2 messages if we have more components
+       */
+      unsigned num_iterations = 1;
+      unsigned iter_components = num_components;
+      if (is_64bit && instr->num_components > 2) {
+         num_iterations = 2;
+         iter_components = 2;
       }
 
-      for (unsigned i = 0; i < num_components; i++) {
-         if (mask & (1 << i))
-            srcs[header_regs + i] = offset(value, bld, BRW_GET_SWZ(swiz, i));
-      }
+      /* 64-bit data needs to me shuffled before we can write it to the URB.
+       * We will use this temporary to shuffle the components in each
+       * iteration.
+       */
+      fs_reg tmp =
+         fs_reg(VGRF, alloc.allocate(2 * iter_components), value.type);
+
+      for (unsigned iter = 0; iter < num_iterations; iter++) {
+         if (!is_64bit && mask != WRITEMASK_XYZW) {
+            srcs[header_regs++] = brw_imm_ud(mask << 16);
+            opcode = indirect_offset.file != BAD_FILE ?
+               SHADER_OPCODE_URB_WRITE_SIMD8_MASKED_PER_SLOT :
+               SHADER_OPCODE_URB_WRITE_SIMD8_MASKED;
+         } else if (is_64bit && ((mask & WRITEMASK_XY) != WRITEMASK_XY)) {
+            /* Expand the 64-bit mask to 32-bit channels. We only handle
+             * two channels in each iteration, so we only care about X/Y.
+             */
+            unsigned mask32 = 0;
+            if (mask & WRITEMASK_X)
+               mask32 |= WRITEMASK_XY;
+            if (mask & WRITEMASK_Y)
+               mask32 |= WRITEMASK_ZW;
+
+            /* If the mask does not include any of the channels X or Y there
+             * is nothing to do in this iteration. Move on to the next couple
+             * of 64-bit channels.
+             */
+            if (!mask32) {
+               mask >>= 2;
+               imm_offset++;
+               continue;
+            }
 
-      unsigned mlen = header_regs + num_components;
+            srcs[header_regs++] = brw_imm_ud(mask32 << 16);
+            opcode = indirect_offset.file != BAD_FILE ?
+               SHADER_OPCODE_URB_WRITE_SIMD8_MASKED_PER_SLOT :
+               SHADER_OPCODE_URB_WRITE_SIMD8_MASKED;
+         } else {
+            opcode = indirect_offset.file != BAD_FILE ?
+               SHADER_OPCODE_URB_WRITE_SIMD8_PER_SLOT :
+               SHADER_OPCODE_URB_WRITE_SIMD8;
+         }
 
-      fs_reg payload =
-         bld.vgrf(BRW_REGISTER_TYPE_UD, mlen);
-      bld.LOAD_PAYLOAD(payload, srcs, mlen, header_regs);
+         for (unsigned i = 0; i < iter_components; i++) {
+            if (!(mask & (1 << i)))
+               continue;
 
-      fs_inst *inst = bld.emit(opcode, bld.null_reg_ud(), payload);
-      inst->offset = imm_offset;
-      inst->mlen = mlen;
-      inst->base_mrf = -1;
+            if (!is_64bit) {
+               srcs[header_regs + i] = offset(value, bld, BRW_GET_SWZ(swiz, i));
+            } else {
+               /* We need to shuffle the 64-bit data to match the layout
+                * expected by our 32-bit URB write messages. We use a temporary
+                * for that.
+                */
+               unsigned channel = BRW_GET_SWZ(swiz, iter * 2 + i);
+               shuffle_64bit_data_for_32bit_write(bld,
+                  retype(offset(tmp, bld, 2 * i), BRW_REGISTER_TYPE_F),
+                  retype(offset(value, bld, 2 * channel), BRW_REGISTER_TYPE_DF),
+                  1);
+
+               /* Now copy the data to the destination */
+               fs_reg dest = fs_reg(VGRF, alloc.allocate(2), value.type);
+               unsigned idx = 2 * i;
+               bld.MOV(dest, offset(tmp, bld, idx));
+               bld.MOV(offset(dest, bld, 1), offset(tmp, bld, idx + 1));
+               srcs[header_regs + idx] = dest;
+               srcs[header_regs + idx + 1] = offset(dest, bld, 1);
+            }
+         }
+
+         unsigned mlen =
+            header_regs + (is_64bit ? 2 * iter_components : iter_components);
+         fs_reg payload =
+            bld.vgrf(BRW_REGISTER_TYPE_UD, mlen);
+         bld.LOAD_PAYLOAD(payload, srcs, mlen, header_regs);
+
+         fs_inst *inst = bld.emit(opcode, bld.null_reg_ud(), payload);
+         inst->offset = imm_offset;
+         inst->mlen = mlen;
+         inst->base_mrf = -1;
+
+         /* If this is a 64-bit attribute, select the next two 64-bit channels
+          * to be handled in the next iteration.
+          */
+         if (is_64bit) {
+            mask >>= 2;
+            imm_offset++;
+         }
+      }
       break;
    }
 
@@ -2653,8 +2781,8 @@ fs_visitor::nir_emit_tes_intrinsic(const fs_builder &bld,
          if (imm_offset < max_push_slots) {
             fs_reg src = fs_reg(ATTR, imm_offset / 2, dest.type);
             for (int i = 0; i < instr->num_components; i++) {
-               bld.MOV(offset(dest, bld, i),
-                       component(src, 4 * (imm_offset % 2) + i));
+               unsigned comp = 16 / type_sz(dest.type) * (imm_offset % 2) + i;
+               bld.MOV(offset(dest, bld, i), component(src, comp));
             }
             tes_prog_data->base.urb_read_length =
                MAX2(tes_prog_data->base.urb_read_length,
@@ -3125,6 +3253,29 @@ fs_visitor::nir_emit_cs_intrinsic(const fs_builder &bld,
       /* Writemask */
       unsigned writemask = instr->const_index[1];
 
+      /* get_nir_src() retypes to integer. Be wary of 64-bit types though
+       * since the untyped writes below operate in units of 32-bits, which
+       * means that we need to write twice as many components each time.
+       * Also, we have to suffle 64-bit data to be in the appropriate layout
+       * expected by our 32-bit write messages.
+       */
+      unsigned type_size = 4;
+      unsigned bit_size = instr->src[0].is_ssa ?
+         instr->src[0].ssa->bit_size : instr->src[0].reg.reg->bit_size;
+      if (bit_size == 64) {
+         type_size = 8;
+         fs_reg tmp =
+           fs_reg(VGRF, alloc.allocate(alloc.sizes[val_reg.nr]), val_reg.type);
+         shuffle_64bit_data_for_32bit_write(
+            bld,
+            retype(tmp, BRW_REGISTER_TYPE_F),
+            retype(val_reg, BRW_REGISTER_TYPE_DF),
+            instr->num_components);
+         val_reg = tmp;
+      }
+
+      unsigned type_slots = type_size / 4;
+
       /* Combine groups of consecutive enabled channels in one write
        * message. We use ffs to find the first enabled channel and then ffs on
        * the bit-inverse, down-shifted writemask to determine the length of
@@ -3133,22 +3284,29 @@ fs_visitor::nir_emit_cs_intrinsic(const fs_builder &bld,
       while (writemask) {
          unsigned first_component = ffs(writemask) - 1;
          unsigned length = ffs(~(writemask >> first_component)) - 1;
-         fs_reg offset_reg;
 
+         /* We can't write more than 2 64-bit components at once. Limit the
+          * length of the write to what we can do and let the next iteration
+          * handle the rest
+          */
+         if (type_size > 4)
+            length = MIN2(2, length);
+
+         fs_reg offset_reg;
          nir_const_value *const_offset = nir_src_as_const_value(instr->src[1]);
          if (const_offset) {
             offset_reg = brw_imm_ud(instr->const_index[0] + const_offset->u32[0] +
-                                    4 * first_component);
+                                    type_size * first_component);
          } else {
             offset_reg = vgrf(glsl_type::uint_type);
             bld.ADD(offset_reg,
                     retype(get_nir_src(instr->src[1]), BRW_REGISTER_TYPE_UD),
-                    brw_imm_ud(instr->const_index[0] + 4 * first_component));
+                    brw_imm_ud(instr->const_index[0] + type_size * first_component));
          }
 
          emit_untyped_write(bld, surf_index, offset_reg,
-                            offset(val_reg, bld, first_component),
-                            1 /* dims */, length,
+                            offset(val_reg, bld, first_component * type_slots),
+                            1 /* dims */, length * type_slots,
                             BRW_PREDICATE_NONE);
 
          /* Clear the bits in the writemask that we just wrote, then try
@@ -3553,6 +3711,14 @@ fs_visitor::nir_emit_intrinsic(const fs_builder &bld, nir_intrinsic_instr *instr
       for (unsigned j = 0; j < instr->num_components; j++) {
          bld.MOV(offset(dest, bld, j), offset(src, bld, j));
       }
+
+      if (type_sz(src.type) == 8) {
+         shuffle_32bit_load_result_to_64bit_data(bld,
+                                                 dest,
+                                                 retype(dest, BRW_REGISTER_TYPE_F),
+                                                 instr->num_components);
+      }
+
       break;
    }
 
@@ -3584,6 +3750,28 @@ fs_visitor::nir_emit_intrinsic(const fs_builder &bld, nir_intrinsic_instr *instr
       /* Writemask */
       unsigned writemask = instr->const_index[0];
 
+      /* get_nir_src() retypes to integer. Be wary of 64-bit types though
+       * since the untyped writes below operate in units of 32-bits, which
+       * means that we need to write twice as many components each time.
+       * Also, we have to suffle 64-bit data to be in the appropriate layout
+       * expected by our 32-bit write messages.
+       */
+      unsigned type_size = 4;
+      unsigned bit_size = instr->src[0].is_ssa ?
+         instr->src[0].ssa->bit_size : instr->src[0].reg.reg->bit_size;
+      if (bit_size == 64) {
+         type_size = 8;
+         fs_reg tmp =
+           fs_reg(VGRF, alloc.allocate(alloc.sizes[val_reg.nr]), val_reg.type);
+         shuffle_64bit_data_for_32bit_write(bld,
+            retype(tmp, BRW_REGISTER_TYPE_F),
+            retype(val_reg, BRW_REGISTER_TYPE_DF),
+            instr->num_components);
+         val_reg = tmp;
+      }
+
+      unsigned type_slots = type_size / 4;
+
       /* Combine groups of consecutive enabled channels in one write
        * message. We use ffs to find the first enabled channel and then ffs on
        * the bit-inverse, down-shifted writemask to determine the length of
@@ -3593,20 +3781,29 @@ fs_visitor::nir_emit_intrinsic(const fs_builder &bld, nir_intrinsic_instr *instr
          unsigned first_component = ffs(writemask) - 1;
          unsigned length = ffs(~(writemask >> first_component)) - 1;
 
+         /* We can't write more than 2 64-bit components at once. Limit the
+          * length of the write to what we can do and let the next iteration
+          * handle the rest
+          */
+         if (type_size > 4)
+            length = MIN2(2, length);
+
          fs_reg offset_reg;
          nir_const_value *const_offset = nir_src_as_const_value(instr->src[2]);
          if (const_offset) {
-            offset_reg = brw_imm_ud(const_offset->u32[0] + 4 * first_component);
+            offset_reg = brw_imm_ud(const_offset->u32[0] +
+                                    type_size * first_component);
          } else {
             offset_reg = vgrf(glsl_type::uint_type);
             bld.ADD(offset_reg,
                     retype(get_nir_src(instr->src[2]), BRW_REGISTER_TYPE_UD),
-                    brw_imm_ud(4 * first_component));
+                    brw_imm_ud(type_size * first_component));
          }
 
+
          emit_untyped_write(bld, surf_index, offset_reg,
-                            offset(val_reg, bld, first_component),
-                            1 /* dims */, length,
+                            offset(val_reg, bld, first_component * type_slots),
+                            1 /* dims */, length * type_slots,
                             BRW_PREDICATE_NONE);
 
          /* Clear the bits in the writemask that we just wrote, then try
@@ -3626,7 +3823,20 @@ fs_visitor::nir_emit_intrinsic(const fs_builder &bld, nir_intrinsic_instr *instr
       assert(const_offset && "Indirect output stores not allowed");
       new_dest = offset(new_dest, bld, const_offset->u32[0]);
 
-      for (unsigned j = 0; j < instr->num_components; j++) {
+      unsigned num_components = instr->num_components;
+      unsigned bit_size = instr->src[0].is_ssa ?
+         instr->src[0].ssa->bit_size : instr->src[0].reg.reg->bit_size;
+      if (bit_size == 64) {
+         fs_reg tmp =
+            fs_reg(VGRF, alloc.allocate(2 * num_components),
+                   BRW_REGISTER_TYPE_F);
+         shuffle_64bit_data_for_32bit_write(
+            bld, tmp, retype(src, BRW_REGISTER_TYPE_DF), num_components);
+         src = retype(tmp, src.type);
+         num_components *= 2;
+      }
+
+      for (unsigned j = 0; j < num_components; j++) {
          bld.MOV(offset(new_dest, bld, j), offset(src, bld, j));
       }
       break;