nir: move data.image.access to data.access
[mesa.git] / src / gallium / drivers / nouveau / codegen / nv50_ir_from_nir.cpp
index ed0c5d24b8514d4aa131e276f66775e944d5ad41..083659880699a53c25b8b4ce83581226018863e5 100644 (file)
@@ -53,7 +53,7 @@ using std::tr1::unordered_map;
 using namespace nv50_ir;
 
 int
-type_size(const struct glsl_type *type)
+type_size(const struct glsl_type *type, bool bindless)
 {
    return glsl_count_attribute_slots(type, false);
 }
@@ -71,6 +71,7 @@ private:
    typedef unordered_map<unsigned, uint32_t> NirArrayLMemOffsets;
    typedef unordered_map<unsigned, BasicBlock*> NirBlockMap;
 
+   CacheMode convert(enum gl_access_qualifier);
    TexTarget convert(glsl_sampler_dim, bool isArray, bool isShadow);
    LValues& convert(nir_alu_dest *);
    BasicBlock* convert(nir_block *);
@@ -95,7 +96,10 @@ private:
    // If the found value has not a constant part, the Value gets returned
    // through the Value parameter.
    uint32_t getIndirect(nir_src *, uint8_t, Value *&);
-   uint32_t getIndirect(nir_intrinsic_instr *, uint8_t s, uint8_t c, Value *&);
+   // isScalar indicates that the addressing is scalar, vec4 addressing is
+   // assumed otherwise
+   uint32_t getIndirect(nir_intrinsic_instr *, uint8_t s, uint8_t c, Value *&,
+                        bool isScalar = false);
 
    uint32_t getSlotAddress(nir_intrinsic_instr *, uint8_t idx, uint8_t slot);
 
@@ -347,7 +351,6 @@ Converter::getOperation(nir_op op)
    case nir_op_fadd:
    case nir_op_iadd:
       return OP_ADD;
-   case nir_op_fand:
    case nir_op_iand:
       return OP_AND;
    case nir_op_ifind_msb:
@@ -416,10 +419,8 @@ Converter::getOperation(nir_op op)
    case nir_op_fneg:
    case nir_op_ineg:
       return OP_NEG;
-   case nir_op_fnot:
    case nir_op_inot:
       return OP_NOT;
-   case nir_op_for:
    case nir_op_ior:
       return OP_OR;
    case nir_op_fpow:
@@ -450,12 +451,8 @@ Converter::getOperation(nir_op op)
       return OP_SIN;
    case nir_op_fsqrt:
       return OP_SQRT;
-   case nir_op_fsub:
-   case nir_op_isub:
-      return OP_SUB;
    case nir_op_ftrunc:
       return OP_TRUNC;
-   case nir_op_fxor:
    case nir_op_ixor:
       return OP_XOR;
    default:
@@ -503,20 +500,50 @@ Converter::getOperation(nir_intrinsic_op op)
       return OP_EMIT;
    case nir_intrinsic_end_primitive:
       return OP_RESTART;
+   case nir_intrinsic_bindless_image_atomic_add:
+   case nir_intrinsic_image_atomic_add:
    case nir_intrinsic_image_deref_atomic_add:
+   case nir_intrinsic_bindless_image_atomic_and:
+   case nir_intrinsic_image_atomic_and:
    case nir_intrinsic_image_deref_atomic_and:
+   case nir_intrinsic_bindless_image_atomic_comp_swap:
+   case nir_intrinsic_image_atomic_comp_swap:
    case nir_intrinsic_image_deref_atomic_comp_swap:
+   case nir_intrinsic_bindless_image_atomic_exchange:
+   case nir_intrinsic_image_atomic_exchange:
    case nir_intrinsic_image_deref_atomic_exchange:
-   case nir_intrinsic_image_deref_atomic_max:
-   case nir_intrinsic_image_deref_atomic_min:
+   case nir_intrinsic_bindless_image_atomic_imax:
+   case nir_intrinsic_image_atomic_imax:
+   case nir_intrinsic_image_deref_atomic_imax:
+   case nir_intrinsic_bindless_image_atomic_umax:
+   case nir_intrinsic_image_atomic_umax:
+   case nir_intrinsic_image_deref_atomic_umax:
+   case nir_intrinsic_bindless_image_atomic_imin:
+   case nir_intrinsic_image_atomic_imin:
+   case nir_intrinsic_image_deref_atomic_imin:
+   case nir_intrinsic_bindless_image_atomic_umin:
+   case nir_intrinsic_image_atomic_umin:
+   case nir_intrinsic_image_deref_atomic_umin:
+   case nir_intrinsic_bindless_image_atomic_or:
+   case nir_intrinsic_image_atomic_or:
    case nir_intrinsic_image_deref_atomic_or:
+   case nir_intrinsic_bindless_image_atomic_xor:
+   case nir_intrinsic_image_atomic_xor:
    case nir_intrinsic_image_deref_atomic_xor:
       return OP_SUREDP;
+   case nir_intrinsic_bindless_image_load:
+   case nir_intrinsic_image_load:
    case nir_intrinsic_image_deref_load:
       return OP_SULDP;
+   case nir_intrinsic_bindless_image_samples:
+   case nir_intrinsic_image_samples:
    case nir_intrinsic_image_deref_samples:
+   case nir_intrinsic_bindless_image_size:
+   case nir_intrinsic_image_size:
    case nir_intrinsic_image_deref_size:
       return OP_SUQ;
+   case nir_intrinsic_bindless_image_store:
+   case nir_intrinsic_image_store:
    case nir_intrinsic_image_deref_store:
       return OP_SUSTP;
    default:
@@ -554,38 +581,60 @@ int
 Converter::getSubOp(nir_intrinsic_op op)
 {
    switch (op) {
+   case nir_intrinsic_bindless_image_atomic_add:
+   case nir_intrinsic_image_atomic_add:
    case nir_intrinsic_image_deref_atomic_add:
    case nir_intrinsic_shared_atomic_add:
    case nir_intrinsic_ssbo_atomic_add:
       return  NV50_IR_SUBOP_ATOM_ADD;
+   case nir_intrinsic_bindless_image_atomic_and:
+   case nir_intrinsic_image_atomic_and:
    case nir_intrinsic_image_deref_atomic_and:
    case nir_intrinsic_shared_atomic_and:
    case nir_intrinsic_ssbo_atomic_and:
       return  NV50_IR_SUBOP_ATOM_AND;
+   case nir_intrinsic_bindless_image_atomic_comp_swap:
+   case nir_intrinsic_image_atomic_comp_swap:
    case nir_intrinsic_image_deref_atomic_comp_swap:
    case nir_intrinsic_shared_atomic_comp_swap:
    case nir_intrinsic_ssbo_atomic_comp_swap:
       return  NV50_IR_SUBOP_ATOM_CAS;
+   case nir_intrinsic_bindless_image_atomic_exchange:
+   case nir_intrinsic_image_atomic_exchange:
    case nir_intrinsic_image_deref_atomic_exchange:
    case nir_intrinsic_shared_atomic_exchange:
    case nir_intrinsic_ssbo_atomic_exchange:
       return  NV50_IR_SUBOP_ATOM_EXCH;
+   case nir_intrinsic_bindless_image_atomic_or:
+   case nir_intrinsic_image_atomic_or:
    case nir_intrinsic_image_deref_atomic_or:
    case nir_intrinsic_shared_atomic_or:
    case nir_intrinsic_ssbo_atomic_or:
       return  NV50_IR_SUBOP_ATOM_OR;
-   case nir_intrinsic_image_deref_atomic_max:
+   case nir_intrinsic_bindless_image_atomic_imax:
+   case nir_intrinsic_image_atomic_imax:
+   case nir_intrinsic_image_deref_atomic_imax:
+   case nir_intrinsic_bindless_image_atomic_umax:
+   case nir_intrinsic_image_atomic_umax:
+   case nir_intrinsic_image_deref_atomic_umax:
    case nir_intrinsic_shared_atomic_imax:
    case nir_intrinsic_shared_atomic_umax:
    case nir_intrinsic_ssbo_atomic_imax:
    case nir_intrinsic_ssbo_atomic_umax:
       return  NV50_IR_SUBOP_ATOM_MAX;
-   case nir_intrinsic_image_deref_atomic_min:
+   case nir_intrinsic_bindless_image_atomic_imin:
+   case nir_intrinsic_image_atomic_imin:
+   case nir_intrinsic_image_deref_atomic_imin:
+   case nir_intrinsic_bindless_image_atomic_umin:
+   case nir_intrinsic_image_atomic_umin:
+   case nir_intrinsic_image_deref_atomic_umin:
    case nir_intrinsic_shared_atomic_imin:
    case nir_intrinsic_shared_atomic_umin:
    case nir_intrinsic_ssbo_atomic_imin:
    case nir_intrinsic_ssbo_atomic_umin:
       return  NV50_IR_SUBOP_ATOM_MIN;
+   case nir_intrinsic_bindless_image_atomic_xor:
+   case nir_intrinsic_image_atomic_xor:
    case nir_intrinsic_image_deref_atomic_xor:
    case nir_intrinsic_shared_atomic_xor:
    case nir_intrinsic_ssbo_atomic_xor:
@@ -740,7 +789,7 @@ Converter::getIndirect(nir_src *src, uint8_t idx, Value *&indirect)
 
    if (offset) {
       indirect = NULL;
-      return offset->u32[0];
+      return offset[0].u32;
    }
 
    indirect = getSrc(src, idx, true);
@@ -748,10 +797,10 @@ Converter::getIndirect(nir_src *src, uint8_t idx, Value *&indirect)
 }
 
 uint32_t
-Converter::getIndirect(nir_intrinsic_instr *insn, uint8_t s, uint8_t c, Value *&indirect)
+Converter::getIndirect(nir_intrinsic_instr *insn, uint8_t s, uint8_t c, Value *&indirect, bool isScalar)
 {
    int32_t idx = nir_intrinsic_base(insn) + getIndirect(&insn->src[s], c, indirect);
-   if (indirect)
+   if (indirect && !isScalar)
       indirect = mkOp2v(OP_SHL, TYPE_U32, getSSA(4, FILE_ADDRESS), indirect, loadImm(NULL, 4));
    return idx;
 }
@@ -1128,6 +1177,7 @@ bool Converter::assignSlots() {
 
    info->io.viewportId = -1;
    info->numInputs = 0;
+   info->numOutputs = 0;
 
    // we have to fixup the uniform locations for arrays
    unsigned numImages = 0;
@@ -1139,6 +1189,37 @@ bool Converter::assignSlots() {
       numImages += type->is_array() ? type->arrays_of_arrays_size() : 1;
    }
 
+   info->numSysVals = 0;
+   for (uint8_t i = 0; i < SYSTEM_VALUE_MAX; ++i) {
+      if (!(nir->info.system_values_read & 1ull << i))
+         continue;
+
+      system_val_to_tgsi_semantic(i, &name, &index);
+      info->sv[info->numSysVals].sn = name;
+      info->sv[info->numSysVals].si = index;
+      info->sv[info->numSysVals].input = 0; // TODO inferSysValDirection(sn);
+
+      switch (i) {
+      case SYSTEM_VALUE_INSTANCE_ID:
+         info->io.instanceId = info->numSysVals;
+         break;
+      case SYSTEM_VALUE_TESS_LEVEL_INNER:
+      case SYSTEM_VALUE_TESS_LEVEL_OUTER:
+         info->sv[info->numSysVals].patch = 1;
+         break;
+      case SYSTEM_VALUE_VERTEX_ID:
+         info->io.vertexId = info->numSysVals;
+         break;
+      default:
+         break;
+      }
+
+      info->numSysVals += 1;
+   }
+
+   if (prog->getType() == Program::TYPE_COMPUTE)
+      return true;
+
    nir_foreach_variable(var, &nir->inputs) {
       const glsl_type *type = var->type;
       int slot = var->data.location;
@@ -1203,7 +1284,6 @@ bool Converter::assignSlots() {
       info->numInputs = std::max<uint8_t>(info->numInputs, vary);
    }
 
-   info->numOutputs = 0;
    nir_foreach_variable(var, &nir->outputs) {
       const glsl_type *type = var->type;
       int slot = var->data.location;
@@ -1289,40 +1369,12 @@ bool Converter::assignSlots() {
          else
             info->out[vary].mask |= ((1 << comp) - 1) << frac;
 
-         if (nir->info.outputs_read & 1ll << slot)
+         if (nir->info.outputs_read & 1ull << slot)
             info->out[vary].oread = 1;
       }
       info->numOutputs = std::max<uint8_t>(info->numOutputs, vary);
    }
 
-   info->numSysVals = 0;
-   for (uint8_t i = 0; i < 64; ++i) {
-      if (!(nir->info.system_values_read & 1ll << i))
-         continue;
-
-      system_val_to_tgsi_semantic(i, &name, &index);
-      info->sv[info->numSysVals].sn = name;
-      info->sv[info->numSysVals].si = index;
-      info->sv[info->numSysVals].input = 0; // TODO inferSysValDirection(sn);
-
-      switch (i) {
-      case SYSTEM_VALUE_INSTANCE_ID:
-         info->io.instanceId = info->numSysVals;
-         break;
-      case SYSTEM_VALUE_TESS_LEVEL_INNER:
-      case SYSTEM_VALUE_TESS_LEVEL_OUTER:
-         info->sv[info->numSysVals].patch = 1;
-         break;
-      case SYSTEM_VALUE_VERTEX_ID:
-         info->io.vertexId = info->numSysVals;
-         break;
-      default:
-         break;
-      }
-
-      info->numSysVals += 1;
-   }
-
    if (info->io.genUserClip > 0) {
       info->io.clipDistances = info->io.genUserClip;
 
@@ -1516,8 +1568,6 @@ Converter::parseNIR()
 bool
 Converter::visit(nir_function *function)
 {
-   // we only support emiting the main function for now
-   assert(!strcmp(function->name, "main"));
    assert(function->impl);
 
    // usually the blocks will set everything up, but main is special
@@ -1904,7 +1954,7 @@ Converter::visit(nir_intrinsic_instr *insn)
          }
          case Program::TYPE_GEOMETRY:
          case Program::TYPE_VERTEX: {
-            if (info->io.genUserClip > 0 && idx == clipVertexOutput) {
+            if (info->io.genUserClip > 0 && idx == (uint32_t)clipVertexOutput) {
                mkMov(clipVtx[i], src);
                src = clipVtx[i];
             }
@@ -2017,6 +2067,18 @@ Converter::visit(nir_intrinsic_instr *insn)
       }
       break;
    }
+   case nir_intrinsic_load_kernel_input: {
+      assert(prog->getType() == Program::TYPE_COMPUTE);
+      assert(insn->num_components == 1);
+
+      LValues &newDefs = convert(&insn->dest);
+      const DataType dType = getDType(insn);
+      Value *indirect;
+      uint32_t idx = getIndirect(insn, 0, 0, indirect, true);
+
+      mkLoad(dType, newDefs[0], mkSymbol(FILE_SHADER_INPUT, 0, dType, idx), indirect);
+      break;
+   }
    case nir_intrinsic_load_barycentric_at_offset:
    case nir_intrinsic_load_barycentric_at_sample:
    case nir_intrinsic_load_barycentric_centroid:
@@ -2317,12 +2379,116 @@ Converter::visit(nir_intrinsic_instr *insn)
       info->io.globalAccess |= 0x2;
       break;
    }
+   case nir_intrinsic_bindless_image_atomic_add:
+   case nir_intrinsic_bindless_image_atomic_and:
+   case nir_intrinsic_bindless_image_atomic_comp_swap:
+   case nir_intrinsic_bindless_image_atomic_exchange:
+   case nir_intrinsic_bindless_image_atomic_imax:
+   case nir_intrinsic_bindless_image_atomic_umax:
+   case nir_intrinsic_bindless_image_atomic_imin:
+   case nir_intrinsic_bindless_image_atomic_umin:
+   case nir_intrinsic_bindless_image_atomic_or:
+   case nir_intrinsic_bindless_image_atomic_xor:
+   case nir_intrinsic_bindless_image_load:
+   case nir_intrinsic_bindless_image_samples:
+   case nir_intrinsic_bindless_image_size:
+   case nir_intrinsic_bindless_image_store: {
+      std::vector<Value*> srcs, defs;
+      Value *indirect = getSrc(&insn->src[0], 0);
+      DataType ty;
+
+      uint32_t mask = 0;
+      TexInstruction::Target target =
+         convert(nir_intrinsic_image_dim(insn), !!nir_intrinsic_image_array(insn), false);
+      unsigned int argCount = getNIRArgCount(target);
+      uint16_t location = 0;
+
+      if (opInfo.has_dest) {
+         LValues &newDefs = convert(&insn->dest);
+         for (uint8_t i = 0u; i < newDefs.size(); ++i) {
+            defs.push_back(newDefs[i]);
+            mask |= 1 << i;
+         }
+      }
+
+      switch (op) {
+      case nir_intrinsic_bindless_image_atomic_add:
+      case nir_intrinsic_bindless_image_atomic_and:
+      case nir_intrinsic_bindless_image_atomic_comp_swap:
+      case nir_intrinsic_bindless_image_atomic_exchange:
+      case nir_intrinsic_bindless_image_atomic_imax:
+      case nir_intrinsic_bindless_image_atomic_umax:
+      case nir_intrinsic_bindless_image_atomic_imin:
+      case nir_intrinsic_bindless_image_atomic_umin:
+      case nir_intrinsic_bindless_image_atomic_or:
+      case nir_intrinsic_bindless_image_atomic_xor:
+         ty = getDType(insn);
+         mask = 0x1;
+         info->io.globalAccess |= 0x2;
+         break;
+      case nir_intrinsic_bindless_image_load:
+         ty = TYPE_U32;
+         info->io.globalAccess |= 0x1;
+         break;
+      case nir_intrinsic_bindless_image_store:
+         ty = TYPE_U32;
+         mask = 0xf;
+         info->io.globalAccess |= 0x2;
+         break;
+      case nir_intrinsic_bindless_image_samples:
+         mask = 0x8;
+         ty = TYPE_U32;
+         break;
+      case nir_intrinsic_bindless_image_size:
+         ty = TYPE_U32;
+         break;
+      default:
+         unreachable("unhandled image opcode");
+         break;
+      }
+
+      // coords
+      if (opInfo.num_srcs >= 2)
+         for (unsigned int i = 0u; i < argCount; ++i)
+            srcs.push_back(getSrc(&insn->src[1], i));
+
+      // the sampler is just another src added after coords
+      if (opInfo.num_srcs >= 3 && target.isMS())
+         srcs.push_back(getSrc(&insn->src[2], 0));
+
+      if (opInfo.num_srcs >= 4) {
+         unsigned components = opInfo.src_components[3] ? opInfo.src_components[3] : insn->num_components;
+         for (uint8_t i = 0u; i < components; ++i)
+            srcs.push_back(getSrc(&insn->src[3], i));
+      }
+
+      if (opInfo.num_srcs >= 5)
+         // 1 for aotmic swap
+         for (uint8_t i = 0u; i < opInfo.src_components[4]; ++i)
+            srcs.push_back(getSrc(&insn->src[4], i));
+
+      TexInstruction *texi = mkTex(getOperation(op), target.getEnum(), location, 0, defs, srcs);
+      texi->tex.bindless = false;
+      texi->tex.format = &nv50_ir::TexInstruction::formatTable[convertGLImgFormat(nir_intrinsic_format(insn))];
+      texi->tex.mask = mask;
+      texi->tex.bindless = true;
+      texi->cache = convert(nir_intrinsic_access(insn));
+      texi->setType(ty);
+      texi->subOp = getSubOp(op);
+
+      if (indirect)
+         texi->setIndirectR(indirect);
+
+      break;
+   }
    case nir_intrinsic_image_deref_atomic_add:
    case nir_intrinsic_image_deref_atomic_and:
    case nir_intrinsic_image_deref_atomic_comp_swap:
    case nir_intrinsic_image_deref_atomic_exchange:
-   case nir_intrinsic_image_deref_atomic_max:
-   case nir_intrinsic_image_deref_atomic_min:
+   case nir_intrinsic_image_deref_atomic_imax:
+   case nir_intrinsic_image_deref_atomic_umax:
+   case nir_intrinsic_image_deref_atomic_imin:
+   case nir_intrinsic_image_deref_atomic_umin:
    case nir_intrinsic_image_deref_atomic_or:
    case nir_intrinsic_image_deref_atomic_xor:
    case nir_intrinsic_image_deref_load:
@@ -2356,8 +2522,10 @@ Converter::visit(nir_intrinsic_instr *insn)
       case nir_intrinsic_image_deref_atomic_and:
       case nir_intrinsic_image_deref_atomic_comp_swap:
       case nir_intrinsic_image_deref_atomic_exchange:
-      case nir_intrinsic_image_deref_atomic_max:
-      case nir_intrinsic_image_deref_atomic_min:
+      case nir_intrinsic_image_deref_atomic_imax:
+      case nir_intrinsic_image_deref_atomic_umax:
+      case nir_intrinsic_image_deref_atomic_imin:
+      case nir_intrinsic_image_deref_atomic_umin:
       case nir_intrinsic_image_deref_atomic_or:
       case nir_intrinsic_image_deref_atomic_xor:
          ty = getDType(insn);
@@ -2469,6 +2637,42 @@ Converter::visit(nir_intrinsic_instr *insn)
       mkOp1(OP_RDSV, dType, newDefs[1], mkSysVal(SV_CLOCK, 0))->fixed = 1;
       break;
    }
+   case nir_intrinsic_load_global: {
+      const DataType dType = getDType(insn);
+      LValues &newDefs = convert(&insn->dest);
+      Value *indirectOffset;
+      uint32_t offset = getIndirect(&insn->src[0], 0, indirectOffset);
+
+      for (auto i = 0u; i < insn->num_components; ++i)
+         loadFrom(FILE_MEMORY_GLOBAL, 0, dType, newDefs[i], offset, i, indirectOffset);
+
+      info->io.globalAccess |= 0x1;
+      break;
+   }
+   case nir_intrinsic_store_global: {
+      DataType sType = getSType(insn->src[0], false, false);
+
+      for (auto i = 0u; i < insn->num_components; ++i) {
+         if (!((1u << i) & nir_intrinsic_write_mask(insn)))
+            continue;
+         if (typeSizeof(sType) == 8) {
+            Value *split[2];
+            mkSplit(split, 4, getSrc(&insn->src[0], i));
+
+            Symbol *sym = mkSymbol(FILE_MEMORY_GLOBAL, 0, TYPE_U32, i * typeSizeof(sType));
+            mkStore(OP_STORE, TYPE_U32, sym, getSrc(&insn->src[1], 0), split[0]);
+
+            sym = mkSymbol(FILE_MEMORY_GLOBAL, 0, TYPE_U32, i * typeSizeof(sType) + 4);
+            mkStore(OP_STORE, TYPE_U32, sym, getSrc(&insn->src[1], 0), split[1]);
+         } else {
+            Symbol *sym = mkSymbol(FILE_MEMORY_GLOBAL, 0, sType, i * typeSizeof(sType));
+            mkStore(OP_STORE, sType, sym, getSrc(&insn->src[1], 0), getSrc(&insn->src[0], i));
+         }
+      }
+
+      info->io.globalAccess |= 0x2;
+      break;
+   }
    default:
       ERROR("unknown nir_intrinsic_op %s\n", nir_intrinsic_infos[op].name);
       return false;
@@ -2516,16 +2720,16 @@ Converter::convert(nir_load_const_instr *insn, uint8_t idx)
 
    switch (insn->def.bit_size) {
    case 64:
-      val = loadImm(getSSA(8), insn->value.u64[idx]);
+      val = loadImm(getSSA(8), insn->value[idx].u64);
       break;
    case 32:
-      val = loadImm(getSSA(4), insn->value.u32[idx]);
+      val = loadImm(getSSA(4), insn->value[idx].u32);
       break;
    case 16:
-      val = loadImm(getSSA(2), insn->value.u16[idx]);
+      val = loadImm(getSSA(2), insn->value[idx].u16);
       break;
    case 8:
-      val = loadImm(getSSA(1), insn->value.u8[idx]);
+      val = loadImm(getSSA(1), insn->value[idx].u8);
       break;
    default:
       unreachable("unhandled bit size!\n");
@@ -2566,7 +2770,6 @@ Converter::visit(nir_alu_instr *insn)
    case nir_op_iabs:
    case nir_op_fadd:
    case nir_op_iadd:
-   case nir_op_fand:
    case nir_op_iand:
    case nir_op_fceil:
    case nir_op_fcos:
@@ -2598,9 +2801,7 @@ Converter::visit(nir_alu_instr *insn)
    case nir_op_umul_high:
    case nir_op_fneg:
    case nir_op_ineg:
-   case nir_op_fnot:
    case nir_op_inot:
-   case nir_op_for:
    case nir_op_ior:
    case nir_op_pack_64_2x32_split:
    case nir_op_fpow:
@@ -2613,11 +2814,8 @@ Converter::visit(nir_alu_instr *insn)
    case nir_op_ushr:
    case nir_op_fsin:
    case nir_op_fsqrt:
-   case nir_op_fsub:
-   case nir_op_isub:
    case nir_op_ftrunc:
    case nir_op_ishl:
-   case nir_op_fxor:
    case nir_op_ixor: {
       DEFAULT_CHECKS;
       LValues &newDefs = convert(&insn->dest);
@@ -2706,8 +2904,7 @@ Converter::visit(nir_alu_instr *insn)
    // those are weird ALU ops and need special handling, because
    //   1. they are always componend based
    //   2. they basically just merge multiple values into one data type
-   case nir_op_imov:
-   case nir_op_fmov:
+   case nir_op_mov:
       if (!insn->dest.dest.is_ssa && insn->dest.dest.reg.reg->num_array_elems) {
          nir_reg_dest& reg = insn->dest.dest.reg;
          uint32_t goffset = regToLmemOffset[reg.reg->index];
@@ -3028,7 +3225,7 @@ Converter::handleDeref(nir_deref_instr *deref, Value * &indirect, const nir_vari
       switch (deref->deref_type) {
       case nir_deref_type_array: {
          Value *indirect;
-         uint8_t size = type_size(deref->type);
+         uint8_t size = type_size(deref->type, true);
          result += size * getIndirect(&deref->arr.index, 0, indirect);
 
          if (indirect) {
@@ -3065,13 +3262,22 @@ Converter::handleDeref(nir_deref_instr *deref, Value * &indirect, const nir_vari
 }
 
 CacheMode
-Converter::getCacheModeFromVar(const nir_variable *var)
+Converter::convert(enum gl_access_qualifier access)
 {
-   if (var->data.image.access == ACCESS_VOLATILE)
+   switch (access) {
+   case ACCESS_VOLATILE:
       return CACHE_CV;
-   if (var->data.image.access == ACCESS_COHERENT)
+   case ACCESS_COHERENT:
       return CACHE_CG;
-   return CACHE_CA;
+   default:
+      return CACHE_CA;
+   }
+}
+
+CacheMode
+Converter::getCacheModeFromVar(const nir_variable *var)
+{
+   return convert(var->data.access);
 }
 
 bool
@@ -3111,6 +3317,11 @@ Converter::visit(nir_tex_instr *insn)
       int projIdx = nir_tex_instr_src_index(insn, nir_tex_src_projector);
       int sampOffIdx = nir_tex_instr_src_index(insn, nir_tex_src_sampler_offset);
       int texOffIdx = nir_tex_instr_src_index(insn, nir_tex_src_texture_offset);
+      int sampHandleIdx = nir_tex_instr_src_index(insn, nir_tex_src_sampler_handle);
+      int texHandleIdx = nir_tex_instr_src_index(insn, nir_tex_src_texture_handle);
+
+      bool bindless = sampHandleIdx != -1 || texHandleIdx != -1;
+      assert((sampHandleIdx != -1) == (texHandleIdx != -1));
 
       if (projIdx != -1)
          proj = mkOp1v(OP_RCP, TYPE_F32, getScratch(), getSrc(&insn->src[projIdx].src, 0));
@@ -3154,9 +3365,19 @@ Converter::visit(nir_tex_instr *insn)
          srcs.push_back(getSrc(&insn->src[sampOffIdx].src, 0));
          sampOffIdx = srcs.size() - 1;
       }
+      if (bindless) {
+         // currently we use the lower bits
+         Value *split[2];
+         Value *handle = getSrc(&insn->src[sampHandleIdx].src, 0);
+
+         mkSplit(split, 4, handle);
+
+         srcs.push_back(split[0]);
+         texOffIdx = srcs.size() - 1;
+      }
 
-      r = insn->texture_index;
-      s = insn->sampler_index;
+      r = bindless ? 0xff : insn->texture_index;
+      s = bindless ? 0x1f : insn->sampler_index;
 
       defs.resize(newDefs.size());
       for (uint8_t d = 0u; d < newDefs.size(); ++d) {
@@ -3169,6 +3390,7 @@ Converter::visit(nir_tex_instr *insn)
       TexInstruction *texi = mkTex(op, target.getEnum(), r, s, defs, srcs);
       texi->tex.levelZero = lz;
       texi->tex.mask = mask;
+      texi->tex.bindless = bindless;
 
       if (texOffIdx != -1)
          texi->tex.rIndirectSrc = texOffIdx;
@@ -3273,7 +3495,7 @@ Converter::run()
    NIR_PASS_V(nir, nir_lower_regs_to_ssa);
    NIR_PASS_V(nir, nir_lower_load_const_to_scalar);
    NIR_PASS_V(nir, nir_lower_vars_to_ssa);
-   NIR_PASS_V(nir, nir_lower_alu_to_scalar);
+   NIR_PASS_V(nir, nir_lower_alu_to_scalar, NULL, NULL);
    NIR_PASS_V(nir, nir_lower_phis_to_scalar);
 
    do {