#else
#include <tr1/unordered_map>
#endif
+#include <cstring>
+#include <list>
#include <vector>
namespace {
using namespace nv50_ir;
int
-type_size(const struct glsl_type *type)
+type_size(const struct glsl_type *type, bool bindless)
{
return glsl_count_attribute_slots(type, false);
}
private:
typedef std::vector<LValue*> LValues;
typedef unordered_map<unsigned, LValues> NirDefMap;
+ typedef unordered_map<unsigned, nir_load_const_instr*> ImmediateMap;
+ typedef unordered_map<unsigned, uint32_t> NirArrayLMemOffsets;
typedef unordered_map<unsigned, BasicBlock*> NirBlockMap;
+ CacheMode convert(enum gl_access_qualifier);
+ TexTarget convert(glsl_sampler_dim, bool isArray, bool isShadow);
LValues& convert(nir_alu_dest *);
BasicBlock* convert(nir_block *);
LValues& convert(nir_dest *);
+ SVSemantic convert(nir_intrinsic_op);
+ Value* convert(nir_load_const_instr*, uint8_t);
LValues& convert(nir_register *);
LValues& convert(nir_ssa_def *);
+ ImgFormat convertGLImgFormat(GLuint);
+
Value* getSrc(nir_alu_src *, uint8_t component = 0);
Value* getSrc(nir_register *, uint8_t);
Value* getSrc(nir_src *, uint8_t, bool indirect = false);
// If the found value has not a constant part, the Value gets returned
// through the Value parameter.
uint32_t getIndirect(nir_src *, uint8_t, Value *&);
- uint32_t getIndirect(nir_intrinsic_instr *, uint8_t s, uint8_t c, Value *&);
+ // isScalar indicates that the addressing is scalar, vec4 addressing is
+ // assumed otherwise
+ uint32_t getIndirect(nir_intrinsic_instr *, uint8_t s, uint8_t c, Value *&,
+ bool isScalar = false);
uint32_t getSlotAddress(nir_intrinsic_instr *, uint8_t idx, uint8_t slot);
DataType getDType(nir_alu_instr *);
DataType getDType(nir_intrinsic_instr *);
+ DataType getDType(nir_intrinsic_instr *, bool isSigned);
DataType getDType(nir_op, uint8_t);
std::vector<DataType> getSTypes(nir_alu_instr *);
DataType getSType(nir_src &, bool isFloat, bool isSigned);
+ operation getOperation(nir_intrinsic_op);
operation getOperation(nir_op);
+ operation getOperation(nir_texop);
operation preOperationNeeded(nir_op);
+ int getSubOp(nir_intrinsic_op);
int getSubOp(nir_op);
CondCode getCondCode(nir_op);
bool visit(nir_alu_instr *);
bool visit(nir_block *);
bool visit(nir_cf_node *);
+ bool visit(nir_deref_instr *);
bool visit(nir_function *);
bool visit(nir_if *);
bool visit(nir_instr *);
bool visit(nir_jump_instr *);
bool visit(nir_load_const_instr*);
bool visit(nir_loop *);
+ bool visit(nir_ssa_undef_instr *);
+ bool visit(nir_tex_instr *);
+
+ // tex stuff
+ Value* applyProjection(Value *src, Value *proj);
+ unsigned int getNIRArgCount(TexInstruction::Target&);
+
+ // image stuff
+ uint16_t handleDeref(nir_deref_instr *, Value * & indirect, const nir_variable * &);
+ CacheMode getCacheModeFromVar(const nir_variable *);
nir_shader *nir;
NirDefMap ssaDefs;
NirDefMap regDefs;
+ ImmediateMap immediates;
+ NirArrayLMemOffsets regToLmemOffset;
NirBlockMap blocks;
unsigned int curLoopDepth;
BasicBlock *exit;
Value *zero;
+ Instruction *immInsertPos;
+
+ int clipVertexOutput;
union {
struct {
Converter::Converter(Program *prog, nir_shader *nir, nv50_ir_prog_info *info)
: ConverterCommon(prog, info),
nir(nir),
- curLoopDepth(0)
+ curLoopDepth(0),
+ clipVertexOutput(-1)
{
zero = mkImm((uint32_t)0);
}
DataType
Converter::getDType(nir_intrinsic_instr *insn)
+{
+ bool isSigned;
+ switch (insn->intrinsic) {
+ case nir_intrinsic_shared_atomic_imax:
+ case nir_intrinsic_shared_atomic_imin:
+ case nir_intrinsic_ssbo_atomic_imax:
+ case nir_intrinsic_ssbo_atomic_imin:
+ isSigned = true;
+ break;
+ default:
+ isSigned = false;
+ break;
+ }
+
+ return getDType(insn, isSigned);
+}
+
+DataType
+Converter::getDType(nir_intrinsic_instr *insn, bool isSigned)
{
if (insn->dest.is_ssa)
- return typeOfSize(insn->dest.ssa.bit_size / 8, false, false);
+ return typeOfSize(insn->dest.ssa.bit_size / 8, false, isSigned);
else
- return typeOfSize(insn->dest.reg.reg->bit_size / 8, false, false);
+ return typeOfSize(insn->dest.reg.reg->bit_size / 8, false, isSigned);
}
DataType
case nir_op_fadd:
case nir_op_iadd:
return OP_ADD;
- case nir_op_fand:
case nir_op_iand:
return OP_AND;
case nir_op_ifind_msb:
case nir_op_fneg:
case nir_op_ineg:
return OP_NEG;
- case nir_op_fnot:
case nir_op_inot:
return OP_NOT;
- case nir_op_for:
case nir_op_ior:
return OP_OR;
case nir_op_fpow:
return OP_SIN;
case nir_op_fsqrt:
return OP_SQRT;
- case nir_op_fsub:
- case nir_op_isub:
- return OP_SUB;
case nir_op_ftrunc:
return OP_TRUNC;
- case nir_op_fxor:
case nir_op_ixor:
return OP_XOR;
default:
}
}
+operation
+Converter::getOperation(nir_texop op)
+{
+ switch (op) {
+ case nir_texop_tex:
+ return OP_TEX;
+ case nir_texop_lod:
+ return OP_TXLQ;
+ case nir_texop_txb:
+ return OP_TXB;
+ case nir_texop_txd:
+ return OP_TXD;
+ case nir_texop_txf:
+ case nir_texop_txf_ms:
+ return OP_TXF;
+ case nir_texop_tg4:
+ return OP_TXG;
+ case nir_texop_txl:
+ return OP_TXL;
+ case nir_texop_query_levels:
+ case nir_texop_texture_samples:
+ case nir_texop_txs:
+ return OP_TXQ;
+ default:
+ ERROR("couldn't get operation for nir_texop %u\n", op);
+ assert(false);
+ return OP_NOP;
+ }
+}
+
+operation
+Converter::getOperation(nir_intrinsic_op op)
+{
+ switch (op) {
+ case nir_intrinsic_emit_vertex:
+ return OP_EMIT;
+ case nir_intrinsic_end_primitive:
+ return OP_RESTART;
+ case nir_intrinsic_bindless_image_atomic_add:
+ case nir_intrinsic_image_atomic_add:
+ case nir_intrinsic_image_deref_atomic_add:
+ case nir_intrinsic_bindless_image_atomic_and:
+ case nir_intrinsic_image_atomic_and:
+ case nir_intrinsic_image_deref_atomic_and:
+ case nir_intrinsic_bindless_image_atomic_comp_swap:
+ case nir_intrinsic_image_atomic_comp_swap:
+ case nir_intrinsic_image_deref_atomic_comp_swap:
+ case nir_intrinsic_bindless_image_atomic_exchange:
+ case nir_intrinsic_image_atomic_exchange:
+ case nir_intrinsic_image_deref_atomic_exchange:
+ case nir_intrinsic_bindless_image_atomic_imax:
+ case nir_intrinsic_image_atomic_imax:
+ case nir_intrinsic_image_deref_atomic_imax:
+ case nir_intrinsic_bindless_image_atomic_umax:
+ case nir_intrinsic_image_atomic_umax:
+ case nir_intrinsic_image_deref_atomic_umax:
+ case nir_intrinsic_bindless_image_atomic_imin:
+ case nir_intrinsic_image_atomic_imin:
+ case nir_intrinsic_image_deref_atomic_imin:
+ case nir_intrinsic_bindless_image_atomic_umin:
+ case nir_intrinsic_image_atomic_umin:
+ case nir_intrinsic_image_deref_atomic_umin:
+ case nir_intrinsic_bindless_image_atomic_or:
+ case nir_intrinsic_image_atomic_or:
+ case nir_intrinsic_image_deref_atomic_or:
+ case nir_intrinsic_bindless_image_atomic_xor:
+ case nir_intrinsic_image_atomic_xor:
+ case nir_intrinsic_image_deref_atomic_xor:
+ return OP_SUREDP;
+ case nir_intrinsic_bindless_image_load:
+ case nir_intrinsic_image_load:
+ case nir_intrinsic_image_deref_load:
+ return OP_SULDP;
+ case nir_intrinsic_bindless_image_samples:
+ case nir_intrinsic_image_samples:
+ case nir_intrinsic_image_deref_samples:
+ case nir_intrinsic_bindless_image_size:
+ case nir_intrinsic_image_size:
+ case nir_intrinsic_image_deref_size:
+ return OP_SUQ;
+ case nir_intrinsic_bindless_image_store:
+ case nir_intrinsic_image_store:
+ case nir_intrinsic_image_deref_store:
+ return OP_SUSTP;
+ default:
+ ERROR("couldn't get operation for nir_intrinsic_op %u\n", op);
+ assert(false);
+ return OP_NOP;
+ }
+}
+
operation
Converter::preOperationNeeded(nir_op op)
{
}
}
+int
+Converter::getSubOp(nir_intrinsic_op op)
+{
+ switch (op) {
+ case nir_intrinsic_bindless_image_atomic_add:
+ case nir_intrinsic_image_atomic_add:
+ case nir_intrinsic_image_deref_atomic_add:
+ case nir_intrinsic_shared_atomic_add:
+ case nir_intrinsic_ssbo_atomic_add:
+ return NV50_IR_SUBOP_ATOM_ADD;
+ case nir_intrinsic_bindless_image_atomic_and:
+ case nir_intrinsic_image_atomic_and:
+ case nir_intrinsic_image_deref_atomic_and:
+ case nir_intrinsic_shared_atomic_and:
+ case nir_intrinsic_ssbo_atomic_and:
+ return NV50_IR_SUBOP_ATOM_AND;
+ case nir_intrinsic_bindless_image_atomic_comp_swap:
+ case nir_intrinsic_image_atomic_comp_swap:
+ case nir_intrinsic_image_deref_atomic_comp_swap:
+ case nir_intrinsic_shared_atomic_comp_swap:
+ case nir_intrinsic_ssbo_atomic_comp_swap:
+ return NV50_IR_SUBOP_ATOM_CAS;
+ case nir_intrinsic_bindless_image_atomic_exchange:
+ case nir_intrinsic_image_atomic_exchange:
+ case nir_intrinsic_image_deref_atomic_exchange:
+ case nir_intrinsic_shared_atomic_exchange:
+ case nir_intrinsic_ssbo_atomic_exchange:
+ return NV50_IR_SUBOP_ATOM_EXCH;
+ case nir_intrinsic_bindless_image_atomic_or:
+ case nir_intrinsic_image_atomic_or:
+ case nir_intrinsic_image_deref_atomic_or:
+ case nir_intrinsic_shared_atomic_or:
+ case nir_intrinsic_ssbo_atomic_or:
+ return NV50_IR_SUBOP_ATOM_OR;
+ case nir_intrinsic_bindless_image_atomic_imax:
+ case nir_intrinsic_image_atomic_imax:
+ case nir_intrinsic_image_deref_atomic_imax:
+ case nir_intrinsic_bindless_image_atomic_umax:
+ case nir_intrinsic_image_atomic_umax:
+ case nir_intrinsic_image_deref_atomic_umax:
+ case nir_intrinsic_shared_atomic_imax:
+ case nir_intrinsic_shared_atomic_umax:
+ case nir_intrinsic_ssbo_atomic_imax:
+ case nir_intrinsic_ssbo_atomic_umax:
+ return NV50_IR_SUBOP_ATOM_MAX;
+ case nir_intrinsic_bindless_image_atomic_imin:
+ case nir_intrinsic_image_atomic_imin:
+ case nir_intrinsic_image_deref_atomic_imin:
+ case nir_intrinsic_bindless_image_atomic_umin:
+ case nir_intrinsic_image_atomic_umin:
+ case nir_intrinsic_image_deref_atomic_umin:
+ case nir_intrinsic_shared_atomic_imin:
+ case nir_intrinsic_shared_atomic_umin:
+ case nir_intrinsic_ssbo_atomic_imin:
+ case nir_intrinsic_ssbo_atomic_umin:
+ return NV50_IR_SUBOP_ATOM_MIN;
+ case nir_intrinsic_bindless_image_atomic_xor:
+ case nir_intrinsic_image_atomic_xor:
+ case nir_intrinsic_image_deref_atomic_xor:
+ case nir_intrinsic_shared_atomic_xor:
+ case nir_intrinsic_ssbo_atomic_xor:
+ return NV50_IR_SUBOP_ATOM_XOR;
+
+ case nir_intrinsic_group_memory_barrier:
+ case nir_intrinsic_memory_barrier:
+ case nir_intrinsic_memory_barrier_atomic_counter:
+ case nir_intrinsic_memory_barrier_buffer:
+ case nir_intrinsic_memory_barrier_image:
+ return NV50_IR_SUBOP_MEMBAR(M, GL);
+ case nir_intrinsic_memory_barrier_shared:
+ return NV50_IR_SUBOP_MEMBAR(M, CTA);
+
+ case nir_intrinsic_vote_all:
+ return NV50_IR_SUBOP_VOTE_ALL;
+ case nir_intrinsic_vote_any:
+ return NV50_IR_SUBOP_VOTE_ANY;
+ case nir_intrinsic_vote_ieq:
+ return NV50_IR_SUBOP_VOTE_UNI;
+ default:
+ return 0;
+ }
+}
+
CondCode
Converter::getCondCode(nir_op op)
{
Value*
Converter::getSrc(nir_ssa_def *src, uint8_t idx)
{
+ ImmediateMap::iterator iit = immediates.find(src->index);
+ if (iit != immediates.end())
+ return convert((*iit).second, idx);
+
NirDefMap::iterator it = ssaDefs.find(src->index);
if (it == ssaDefs.end()) {
ERROR("SSA value %u not found\n", src->index);
if (offset) {
indirect = NULL;
- return offset->u32[0];
+ return offset[0].u32;
}
indirect = getSrc(src, idx, true);
}
uint32_t
-Converter::getIndirect(nir_intrinsic_instr *insn, uint8_t s, uint8_t c, Value *&indirect)
+Converter::getIndirect(nir_intrinsic_instr *insn, uint8_t s, uint8_t c, Value *&indirect, bool isScalar)
{
int32_t idx = nir_intrinsic_base(insn) + getIndirect(&insn->src[s], c, indirect);
- if (indirect)
+ if (indirect && !isScalar)
indirect = mkOp2v(OP_SHL, TYPE_U32, getSSA(4, FILE_ADDRESS), indirect, loadImm(NULL, 4));
return idx;
}
info->io.viewportId = -1;
info->numInputs = 0;
+ info->numOutputs = 0;
// we have to fixup the uniform locations for arrays
unsigned numImages = 0;
numImages += type->is_array() ? type->arrays_of_arrays_size() : 1;
}
+ info->numSysVals = 0;
+ for (uint8_t i = 0; i < SYSTEM_VALUE_MAX; ++i) {
+ if (!(nir->info.system_values_read & 1ull << i))
+ continue;
+
+ system_val_to_tgsi_semantic(i, &name, &index);
+ info->sv[info->numSysVals].sn = name;
+ info->sv[info->numSysVals].si = index;
+ info->sv[info->numSysVals].input = 0; // TODO inferSysValDirection(sn);
+
+ switch (i) {
+ case SYSTEM_VALUE_INSTANCE_ID:
+ info->io.instanceId = info->numSysVals;
+ break;
+ case SYSTEM_VALUE_TESS_LEVEL_INNER:
+ case SYSTEM_VALUE_TESS_LEVEL_OUTER:
+ info->sv[info->numSysVals].patch = 1;
+ break;
+ case SYSTEM_VALUE_VERTEX_ID:
+ info->io.vertexId = info->numSysVals;
+ break;
+ default:
+ break;
+ }
+
+ info->numSysVals += 1;
+ }
+
+ if (prog->getType() == Program::TYPE_COMPUTE)
+ return true;
+
nir_foreach_variable(var, &nir->inputs) {
const glsl_type *type = var->type;
int slot = var->data.location;
info->numInputs = std::max<uint8_t>(info->numInputs, vary);
}
- info->numOutputs = 0;
nir_foreach_variable(var, &nir->outputs) {
const glsl_type *type = var->type;
int slot = var->data.location;
case TGSI_SEMANTIC_CLIPDIST:
info->io.genUserClip = -1;
break;
+ case TGSI_SEMANTIC_CLIPVERTEX:
+ clipVertexOutput = vary;
+ break;
case TGSI_SEMANTIC_EDGEFLAG:
info->io.edgeFlagOut = vary;
break;
+ case TGSI_SEMANTIC_POSITION:
+ if (clipVertexOutput < 0)
+ clipVertexOutput = vary;
+ break;
default:
break;
}
else
info->out[vary].mask |= ((1 << comp) - 1) << frac;
- if (nir->info.outputs_read & 1ll << slot)
+ if (nir->info.outputs_read & 1ull << slot)
info->out[vary].oread = 1;
}
info->numOutputs = std::max<uint8_t>(info->numOutputs, vary);
}
- info->numSysVals = 0;
- for (uint8_t i = 0; i < 64; ++i) {
- if (!(nir->info.system_values_read & 1ll << i))
- continue;
-
- system_val_to_tgsi_semantic(i, &name, &index);
- info->sv[info->numSysVals].sn = name;
- info->sv[info->numSysVals].si = index;
- info->sv[info->numSysVals].input = 0; // TODO inferSysValDirection(sn);
-
- switch (i) {
- case SYSTEM_VALUE_INSTANCE_ID:
- info->io.instanceId = info->numSysVals;
- break;
- case SYSTEM_VALUE_TESS_LEVEL_INNER:
- case SYSTEM_VALUE_TESS_LEVEL_OUTER:
- info->sv[info->numSysVals].patch = 1;
- break;
- case SYSTEM_VALUE_VERTEX_ID:
- info->io.vertexId = info->numSysVals;
- break;
- default:
- break;
- }
-
- info->numSysVals += 1;
- }
-
if (info->io.genUserClip > 0) {
info->io.clipDistances = info->io.genUserClip;
bool
Converter::parseNIR()
{
+ info->bin.tlsSpace = 0;
info->io.clipDistances = nir->info.clip_distance_array_size;
info->io.cullDistances = nir->info.cull_distance_array_size;
bool
Converter::visit(nir_function *function)
{
- // we only support emiting the main function for now
- assert(!strcmp(function->name, "main"));
assert(function->impl);
// usually the blocks will set everything up, but main is special
setPosition(entry, true);
+ if (info->io.genUserClip > 0) {
+ for (int c = 0; c < 4; ++c)
+ clipVtx[c] = getScratch();
+ }
+
switch (prog->getType()) {
case Program::TYPE_TESSELLATION_CONTROL:
outBase = mkOp2v(
break;
}
+ nir_foreach_register(reg, &function->impl->registers) {
+ if (reg->num_array_elems) {
+ // TODO: packed variables would be nice, but MemoryOpt fails
+ // replace 4 with reg->num_components
+ uint32_t size = 4 * reg->num_array_elems * (reg->bit_size / 8);
+ regToLmemOffset[reg->index] = info->bin.tlsSpace;
+ info->bin.tlsSpace += size;
+ }
+ }
+
nir_index_ssa_defs(function->impl);
foreach_list_typed(nir_cf_node, node, node, &function->impl->body) {
if (!visit(node))
bb->cfg.attach(&exit->cfg, Graph::Edge::TREE);
setPosition(exit, true);
+ if ((prog->getType() == Program::TYPE_VERTEX ||
+ prog->getType() == Program::TYPE_TESSELLATION_EVAL)
+ && info->io.genUserClip > 0)
+ handleUserClipPlanes();
+
// TODO: for non main function this needs to be a OP_RETURN
mkOp(OP_EXIT, TYPE_NONE, NULL)->terminator = 1;
return true;
bool
Converter::visit(nir_instr *insn)
{
+ // we need an insertion point for on the fly generated immediate loads
+ immInsertPos = bb->getExit();
switch (insn->type) {
case nir_instr_type_alu:
return visit(nir_instr_as_alu(insn));
+ case nir_instr_type_deref:
+ return visit(nir_instr_as_deref(insn));
case nir_instr_type_intrinsic:
return visit(nir_instr_as_intrinsic(insn));
case nir_instr_type_jump:
return visit(nir_instr_as_jump(insn));
case nir_instr_type_load_const:
return visit(nir_instr_as_load_const(insn));
+ case nir_instr_type_ssa_undef:
+ return visit(nir_instr_as_ssa_undef(insn));
+ case nir_instr_type_tex:
+ return visit(nir_instr_as_tex(insn));
default:
ERROR("unknown nir_instr type %u\n", insn->type);
return false;
return true;
}
+SVSemantic
+Converter::convert(nir_intrinsic_op intr)
+{
+ switch (intr) {
+ case nir_intrinsic_load_base_vertex:
+ return SV_BASEVERTEX;
+ case nir_intrinsic_load_base_instance:
+ return SV_BASEINSTANCE;
+ case nir_intrinsic_load_draw_id:
+ return SV_DRAWID;
+ case nir_intrinsic_load_front_face:
+ return SV_FACE;
+ case nir_intrinsic_load_helper_invocation:
+ return SV_THREAD_KILL;
+ case nir_intrinsic_load_instance_id:
+ return SV_INSTANCE_ID;
+ case nir_intrinsic_load_invocation_id:
+ return SV_INVOCATION_ID;
+ case nir_intrinsic_load_local_group_size:
+ return SV_NTID;
+ case nir_intrinsic_load_local_invocation_id:
+ return SV_TID;
+ case nir_intrinsic_load_num_work_groups:
+ return SV_NCTAID;
+ case nir_intrinsic_load_patch_vertices_in:
+ return SV_VERTEX_COUNT;
+ case nir_intrinsic_load_primitive_id:
+ return SV_PRIMITIVE_ID;
+ case nir_intrinsic_load_sample_id:
+ return SV_SAMPLE_INDEX;
+ case nir_intrinsic_load_sample_mask_in:
+ return SV_SAMPLE_MASK;
+ case nir_intrinsic_load_sample_pos:
+ return SV_SAMPLE_POS;
+ case nir_intrinsic_load_subgroup_eq_mask:
+ return SV_LANEMASK_EQ;
+ case nir_intrinsic_load_subgroup_ge_mask:
+ return SV_LANEMASK_GE;
+ case nir_intrinsic_load_subgroup_gt_mask:
+ return SV_LANEMASK_GT;
+ case nir_intrinsic_load_subgroup_le_mask:
+ return SV_LANEMASK_LE;
+ case nir_intrinsic_load_subgroup_lt_mask:
+ return SV_LANEMASK_LT;
+ case nir_intrinsic_load_subgroup_invocation:
+ return SV_LANEID;
+ case nir_intrinsic_load_tess_coord:
+ return SV_TESS_COORD;
+ case nir_intrinsic_load_tess_level_inner:
+ return SV_TESS_INNER;
+ case nir_intrinsic_load_tess_level_outer:
+ return SV_TESS_OUTER;
+ case nir_intrinsic_load_vertex_id:
+ return SV_VERTEX_ID;
+ case nir_intrinsic_load_work_group_id:
+ return SV_CTAID;
+ default:
+ ERROR("unknown SVSemantic for nir_intrinsic_op %s\n",
+ nir_intrinsic_infos[intr].name);
+ assert(false);
+ return SV_LAST;
+ }
+}
+
+ImgFormat
+Converter::convertGLImgFormat(GLuint format)
+{
+#define FMT_CASE(a, b) \
+ case GL_ ## a: return nv50_ir::FMT_ ## b
+
+ switch (format) {
+ FMT_CASE(NONE, NONE);
+
+ FMT_CASE(RGBA32F, RGBA32F);
+ FMT_CASE(RGBA16F, RGBA16F);
+ FMT_CASE(RG32F, RG32F);
+ FMT_CASE(RG16F, RG16F);
+ FMT_CASE(R11F_G11F_B10F, R11G11B10F);
+ FMT_CASE(R32F, R32F);
+ FMT_CASE(R16F, R16F);
+
+ FMT_CASE(RGBA32UI, RGBA32UI);
+ FMT_CASE(RGBA16UI, RGBA16UI);
+ FMT_CASE(RGB10_A2UI, RGB10A2UI);
+ FMT_CASE(RGBA8UI, RGBA8UI);
+ FMT_CASE(RG32UI, RG32UI);
+ FMT_CASE(RG16UI, RG16UI);
+ FMT_CASE(RG8UI, RG8UI);
+ FMT_CASE(R32UI, R32UI);
+ FMT_CASE(R16UI, R16UI);
+ FMT_CASE(R8UI, R8UI);
+
+ FMT_CASE(RGBA32I, RGBA32I);
+ FMT_CASE(RGBA16I, RGBA16I);
+ FMT_CASE(RGBA8I, RGBA8I);
+ FMT_CASE(RG32I, RG32I);
+ FMT_CASE(RG16I, RG16I);
+ FMT_CASE(RG8I, RG8I);
+ FMT_CASE(R32I, R32I);
+ FMT_CASE(R16I, R16I);
+ FMT_CASE(R8I, R8I);
+
+ FMT_CASE(RGBA16, RGBA16);
+ FMT_CASE(RGB10_A2, RGB10A2);
+ FMT_CASE(RGBA8, RGBA8);
+ FMT_CASE(RG16, RG16);
+ FMT_CASE(RG8, RG8);
+ FMT_CASE(R16, R16);
+ FMT_CASE(R8, R8);
+
+ FMT_CASE(RGBA16_SNORM, RGBA16_SNORM);
+ FMT_CASE(RGBA8_SNORM, RGBA8_SNORM);
+ FMT_CASE(RG16_SNORM, RG16_SNORM);
+ FMT_CASE(RG8_SNORM, RG8_SNORM);
+ FMT_CASE(R16_SNORM, R16_SNORM);
+ FMT_CASE(R8_SNORM, R8_SNORM);
+
+ FMT_CASE(BGRA_INTEGER, BGRA8);
+ default:
+ ERROR("unknown format %x\n", format);
+ assert(false);
+ return nv50_ir::FMT_NONE;
+ }
+#undef FMT_CASE
+}
+
bool
Converter::visit(nir_intrinsic_instr *insn)
{
nir_intrinsic_op op = insn->intrinsic;
+ const nir_intrinsic_info &opInfo = nir_intrinsic_infos[op];
switch (op) {
+ case nir_intrinsic_load_uniform: {
+ LValues &newDefs = convert(&insn->dest);
+ const DataType dType = getDType(insn);
+ Value *indirect;
+ uint32_t coffset = getIndirect(insn, 0, 0, indirect);
+ for (uint8_t i = 0; i < insn->num_components; ++i) {
+ loadFrom(FILE_MEMORY_CONST, 0, dType, newDefs[i], 16 * coffset, i, indirect);
+ }
+ break;
+ }
+ case nir_intrinsic_store_output:
+ case nir_intrinsic_store_per_vertex_output: {
+ Value *indirect;
+ DataType dType = getSType(insn->src[0], false, false);
+ uint32_t idx = getIndirect(insn, op == nir_intrinsic_store_output ? 1 : 2, 0, indirect);
+
+ for (uint8_t i = 0u; i < insn->num_components; ++i) {
+ if (!((1u << i) & nir_intrinsic_write_mask(insn)))
+ continue;
+
+ uint8_t offset = 0;
+ Value *src = getSrc(&insn->src[0], i);
+ switch (prog->getType()) {
+ case Program::TYPE_FRAGMENT: {
+ if (info->out[idx].sn == TGSI_SEMANTIC_POSITION) {
+ // TGSI uses a different interface than NIR, TGSI stores that
+ // value in the z component, NIR in X
+ offset += 2;
+ src = mkOp1v(OP_SAT, TYPE_F32, getScratch(), src);
+ }
+ break;
+ }
+ case Program::TYPE_GEOMETRY:
+ case Program::TYPE_VERTEX: {
+ if (info->io.genUserClip > 0 && idx == (uint32_t)clipVertexOutput) {
+ mkMov(clipVtx[i], src);
+ src = clipVtx[i];
+ }
+ break;
+ }
+ default:
+ break;
+ }
+
+ storeTo(insn, FILE_SHADER_OUTPUT, OP_EXPORT, dType, src, idx, i + offset, indirect);
+ }
+ break;
+ }
+ case nir_intrinsic_load_input:
+ case nir_intrinsic_load_interpolated_input:
+ case nir_intrinsic_load_output: {
+ LValues &newDefs = convert(&insn->dest);
+
+ // FBFetch
+ if (prog->getType() == Program::TYPE_FRAGMENT &&
+ op == nir_intrinsic_load_output) {
+ std::vector<Value*> defs, srcs;
+ uint8_t mask = 0;
+
+ srcs.push_back(getSSA());
+ srcs.push_back(getSSA());
+ Value *x = mkOp1v(OP_RDSV, TYPE_F32, getSSA(), mkSysVal(SV_POSITION, 0));
+ Value *y = mkOp1v(OP_RDSV, TYPE_F32, getSSA(), mkSysVal(SV_POSITION, 1));
+ mkCvt(OP_CVT, TYPE_U32, srcs[0], TYPE_F32, x)->rnd = ROUND_Z;
+ mkCvt(OP_CVT, TYPE_U32, srcs[1], TYPE_F32, y)->rnd = ROUND_Z;
+
+ srcs.push_back(mkOp1v(OP_RDSV, TYPE_U32, getSSA(), mkSysVal(SV_LAYER, 0)));
+ srcs.push_back(mkOp1v(OP_RDSV, TYPE_U32, getSSA(), mkSysVal(SV_SAMPLE_INDEX, 0)));
+
+ for (uint8_t i = 0u; i < insn->num_components; ++i) {
+ defs.push_back(newDefs[i]);
+ mask |= 1 << i;
+ }
+
+ TexInstruction *texi = mkTex(OP_TXF, TEX_TARGET_2D_MS_ARRAY, 0, 0, defs, srcs);
+ texi->tex.levelZero = 1;
+ texi->tex.mask = mask;
+ texi->tex.useOffsets = 0;
+ texi->tex.r = 0xffff;
+ texi->tex.s = 0xffff;
+
+ info->prop.fp.readsFramebuffer = true;
+ break;
+ }
+
+ const DataType dType = getDType(insn);
+ Value *indirect;
+ bool input = op != nir_intrinsic_load_output;
+ operation nvirOp;
+ uint32_t mode = 0;
+
+ uint32_t idx = getIndirect(insn, op == nir_intrinsic_load_interpolated_input ? 1 : 0, 0, indirect);
+ nv50_ir_varying& vary = input ? info->in[idx] : info->out[idx];
+
+ // see load_barycentric_* handling
+ if (prog->getType() == Program::TYPE_FRAGMENT) {
+ mode = translateInterpMode(&vary, nvirOp);
+ if (op == nir_intrinsic_load_interpolated_input) {
+ ImmediateValue immMode;
+ if (getSrc(&insn->src[0], 1)->getUniqueInsn()->src(0).getImmediate(immMode))
+ mode |= immMode.reg.data.u32;
+ }
+ }
+
+ for (uint8_t i = 0u; i < insn->num_components; ++i) {
+ uint32_t address = getSlotAddress(insn, idx, i);
+ Symbol *sym = mkSymbol(input ? FILE_SHADER_INPUT : FILE_SHADER_OUTPUT, 0, dType, address);
+ if (prog->getType() == Program::TYPE_FRAGMENT) {
+ int s = 1;
+ if (typeSizeof(dType) == 8) {
+ Value *lo = getSSA();
+ Value *hi = getSSA();
+ Instruction *interp;
+
+ interp = mkOp1(nvirOp, TYPE_U32, lo, sym);
+ if (nvirOp == OP_PINTERP)
+ interp->setSrc(s++, fp.position);
+ if (mode & NV50_IR_INTERP_OFFSET)
+ interp->setSrc(s++, getSrc(&insn->src[0], 0));
+ interp->setInterpolate(mode);
+ interp->setIndirect(0, 0, indirect);
+
+ Symbol *sym1 = mkSymbol(input ? FILE_SHADER_INPUT : FILE_SHADER_OUTPUT, 0, dType, address + 4);
+ interp = mkOp1(nvirOp, TYPE_U32, hi, sym1);
+ if (nvirOp == OP_PINTERP)
+ interp->setSrc(s++, fp.position);
+ if (mode & NV50_IR_INTERP_OFFSET)
+ interp->setSrc(s++, getSrc(&insn->src[0], 0));
+ interp->setInterpolate(mode);
+ interp->setIndirect(0, 0, indirect);
+
+ mkOp2(OP_MERGE, dType, newDefs[i], lo, hi);
+ } else {
+ Instruction *interp = mkOp1(nvirOp, dType, newDefs[i], sym);
+ if (nvirOp == OP_PINTERP)
+ interp->setSrc(s++, fp.position);
+ if (mode & NV50_IR_INTERP_OFFSET)
+ interp->setSrc(s++, getSrc(&insn->src[0], 0));
+ interp->setInterpolate(mode);
+ interp->setIndirect(0, 0, indirect);
+ }
+ } else {
+ mkLoad(dType, newDefs[i], sym, indirect)->perPatch = vary.patch;
+ }
+ }
+ break;
+ }
+ case nir_intrinsic_load_kernel_input: {
+ assert(prog->getType() == Program::TYPE_COMPUTE);
+ assert(insn->num_components == 1);
+
+ LValues &newDefs = convert(&insn->dest);
+ const DataType dType = getDType(insn);
+ Value *indirect;
+ uint32_t idx = getIndirect(insn, 0, 0, indirect, true);
+
+ mkLoad(dType, newDefs[0], mkSymbol(FILE_SHADER_INPUT, 0, dType, idx), indirect);
+ break;
+ }
+ case nir_intrinsic_load_barycentric_at_offset:
+ case nir_intrinsic_load_barycentric_at_sample:
+ case nir_intrinsic_load_barycentric_centroid:
+ case nir_intrinsic_load_barycentric_pixel:
+ case nir_intrinsic_load_barycentric_sample: {
+ LValues &newDefs = convert(&insn->dest);
+ uint32_t mode;
+
+ if (op == nir_intrinsic_load_barycentric_centroid ||
+ op == nir_intrinsic_load_barycentric_sample) {
+ mode = NV50_IR_INTERP_CENTROID;
+ } else if (op == nir_intrinsic_load_barycentric_at_offset) {
+ Value *offs[2];
+ for (uint8_t c = 0; c < 2; c++) {
+ offs[c] = getScratch();
+ mkOp2(OP_MIN, TYPE_F32, offs[c], getSrc(&insn->src[0], c), loadImm(NULL, 0.4375f));
+ mkOp2(OP_MAX, TYPE_F32, offs[c], offs[c], loadImm(NULL, -0.5f));
+ mkOp2(OP_MUL, TYPE_F32, offs[c], offs[c], loadImm(NULL, 4096.0f));
+ mkCvt(OP_CVT, TYPE_S32, offs[c], TYPE_F32, offs[c]);
+ }
+ mkOp3v(OP_INSBF, TYPE_U32, newDefs[0], offs[1], mkImm(0x1010), offs[0]);
+
+ mode = NV50_IR_INTERP_OFFSET;
+ } else if (op == nir_intrinsic_load_barycentric_pixel) {
+ mode = NV50_IR_INTERP_DEFAULT;
+ } else if (op == nir_intrinsic_load_barycentric_at_sample) {
+ info->prop.fp.readsSampleLocations = true;
+ mkOp1(OP_PIXLD, TYPE_U32, newDefs[0], getSrc(&insn->src[0], 0))->subOp = NV50_IR_SUBOP_PIXLD_OFFSET;
+ mode = NV50_IR_INTERP_OFFSET;
+ } else {
+ unreachable("all intrinsics already handled above");
+ }
+
+ loadImm(newDefs[1], mode);
+ break;
+ }
+ case nir_intrinsic_discard:
+ mkOp(OP_DISCARD, TYPE_NONE, NULL);
+ break;
+ case nir_intrinsic_discard_if: {
+ Value *pred = getSSA(1, FILE_PREDICATE);
+ if (insn->num_components > 1) {
+ ERROR("nir_intrinsic_discard_if only with 1 component supported!\n");
+ assert(false);
+ return false;
+ }
+ mkCmp(OP_SET, CC_NE, TYPE_U8, pred, TYPE_U32, getSrc(&insn->src[0], 0), zero);
+ mkOp(OP_DISCARD, TYPE_NONE, NULL)->setPredicate(CC_P, pred);
+ break;
+ }
+ case nir_intrinsic_load_base_vertex:
+ case nir_intrinsic_load_base_instance:
+ case nir_intrinsic_load_draw_id:
+ case nir_intrinsic_load_front_face:
+ case nir_intrinsic_load_helper_invocation:
+ case nir_intrinsic_load_instance_id:
+ case nir_intrinsic_load_invocation_id:
+ case nir_intrinsic_load_local_group_size:
+ case nir_intrinsic_load_local_invocation_id:
+ case nir_intrinsic_load_num_work_groups:
+ case nir_intrinsic_load_patch_vertices_in:
+ case nir_intrinsic_load_primitive_id:
+ case nir_intrinsic_load_sample_id:
+ case nir_intrinsic_load_sample_mask_in:
+ case nir_intrinsic_load_sample_pos:
+ case nir_intrinsic_load_subgroup_eq_mask:
+ case nir_intrinsic_load_subgroup_ge_mask:
+ case nir_intrinsic_load_subgroup_gt_mask:
+ case nir_intrinsic_load_subgroup_le_mask:
+ case nir_intrinsic_load_subgroup_lt_mask:
+ case nir_intrinsic_load_subgroup_invocation:
+ case nir_intrinsic_load_tess_coord:
+ case nir_intrinsic_load_tess_level_inner:
+ case nir_intrinsic_load_tess_level_outer:
+ case nir_intrinsic_load_vertex_id:
+ case nir_intrinsic_load_work_group_id: {
+ const DataType dType = getDType(insn);
+ SVSemantic sv = convert(op);
+ LValues &newDefs = convert(&insn->dest);
+
+ for (uint8_t i = 0u; i < insn->num_components; ++i) {
+ Value *def;
+ if (typeSizeof(dType) == 8)
+ def = getSSA();
+ else
+ def = newDefs[i];
+
+ if (sv == SV_TID && info->prop.cp.numThreads[i] == 1) {
+ loadImm(def, 0u);
+ } else {
+ Symbol *sym = mkSysVal(sv, i);
+ Instruction *rdsv = mkOp1(OP_RDSV, TYPE_U32, def, sym);
+ if (sv == SV_TESS_OUTER || sv == SV_TESS_INNER)
+ rdsv->perPatch = 1;
+ }
+
+ if (typeSizeof(dType) == 8)
+ mkOp2(OP_MERGE, dType, newDefs[i], def, loadImm(getSSA(), 0u));
+ }
+ break;
+ }
+ // constants
+ case nir_intrinsic_load_subgroup_size: {
+ LValues &newDefs = convert(&insn->dest);
+ loadImm(newDefs[0], 32u);
+ break;
+ }
+ case nir_intrinsic_vote_all:
+ case nir_intrinsic_vote_any:
+ case nir_intrinsic_vote_ieq: {
+ LValues &newDefs = convert(&insn->dest);
+ Value *pred = getScratch(1, FILE_PREDICATE);
+ mkCmp(OP_SET, CC_NE, TYPE_U32, pred, TYPE_U32, getSrc(&insn->src[0], 0), zero);
+ mkOp1(OP_VOTE, TYPE_U32, pred, pred)->subOp = getSubOp(op);
+ mkCvt(OP_CVT, TYPE_U32, newDefs[0], TYPE_U8, pred);
+ break;
+ }
+ case nir_intrinsic_ballot: {
+ LValues &newDefs = convert(&insn->dest);
+ Value *pred = getSSA(1, FILE_PREDICATE);
+ mkCmp(OP_SET, CC_NE, TYPE_U32, pred, TYPE_U32, getSrc(&insn->src[0], 0), zero);
+ mkOp1(OP_VOTE, TYPE_U32, newDefs[0], pred)->subOp = NV50_IR_SUBOP_VOTE_ANY;
+ break;
+ }
+ case nir_intrinsic_read_first_invocation:
+ case nir_intrinsic_read_invocation: {
+ LValues &newDefs = convert(&insn->dest);
+ const DataType dType = getDType(insn);
+ Value *tmp = getScratch();
+
+ if (op == nir_intrinsic_read_first_invocation) {
+ mkOp1(OP_VOTE, TYPE_U32, tmp, mkImm(1))->subOp = NV50_IR_SUBOP_VOTE_ANY;
+ mkOp2(OP_EXTBF, TYPE_U32, tmp, tmp, mkImm(0x2000))->subOp = NV50_IR_SUBOP_EXTBF_REV;
+ mkOp1(OP_BFIND, TYPE_U32, tmp, tmp)->subOp = NV50_IR_SUBOP_BFIND_SAMT;
+ } else
+ tmp = getSrc(&insn->src[1], 0);
+
+ for (uint8_t i = 0; i < insn->num_components; ++i) {
+ mkOp3(OP_SHFL, dType, newDefs[i], getSrc(&insn->src[0], i), tmp, mkImm(0x1f))
+ ->subOp = NV50_IR_SUBOP_SHFL_IDX;
+ }
+ break;
+ }
+ case nir_intrinsic_load_per_vertex_input: {
+ const DataType dType = getDType(insn);
+ LValues &newDefs = convert(&insn->dest);
+ Value *indirectVertex;
+ Value *indirectOffset;
+ uint32_t baseVertex = getIndirect(&insn->src[0], 0, indirectVertex);
+ uint32_t idx = getIndirect(insn, 1, 0, indirectOffset);
+
+ Value *vtxBase = mkOp2v(OP_PFETCH, TYPE_U32, getSSA(4, FILE_ADDRESS),
+ mkImm(baseVertex), indirectVertex);
+ for (uint8_t i = 0u; i < insn->num_components; ++i) {
+ uint32_t address = getSlotAddress(insn, idx, i);
+ loadFrom(FILE_SHADER_INPUT, 0, dType, newDefs[i], address, 0,
+ indirectOffset, vtxBase, info->in[idx].patch);
+ }
+ break;
+ }
+ case nir_intrinsic_load_per_vertex_output: {
+ const DataType dType = getDType(insn);
+ LValues &newDefs = convert(&insn->dest);
+ Value *indirectVertex;
+ Value *indirectOffset;
+ uint32_t baseVertex = getIndirect(&insn->src[0], 0, indirectVertex);
+ uint32_t idx = getIndirect(insn, 1, 0, indirectOffset);
+ Value *vtxBase = NULL;
+
+ if (indirectVertex)
+ vtxBase = indirectVertex;
+ else
+ vtxBase = loadImm(NULL, baseVertex);
+
+ vtxBase = mkOp2v(OP_ADD, TYPE_U32, getSSA(4, FILE_ADDRESS), outBase, vtxBase);
+
+ for (uint8_t i = 0u; i < insn->num_components; ++i) {
+ uint32_t address = getSlotAddress(insn, idx, i);
+ loadFrom(FILE_SHADER_OUTPUT, 0, dType, newDefs[i], address, 0,
+ indirectOffset, vtxBase, info->in[idx].patch);
+ }
+ break;
+ }
+ case nir_intrinsic_emit_vertex:
+ if (info->io.genUserClip > 0)
+ handleUserClipPlanes();
+ // fallthrough
+ case nir_intrinsic_end_primitive: {
+ uint32_t idx = nir_intrinsic_stream_id(insn);
+ mkOp1(getOperation(op), TYPE_U32, NULL, mkImm(idx))->fixed = 1;
+ break;
+ }
+ case nir_intrinsic_load_ubo: {
+ const DataType dType = getDType(insn);
+ LValues &newDefs = convert(&insn->dest);
+ Value *indirectIndex;
+ Value *indirectOffset;
+ uint32_t index = getIndirect(&insn->src[0], 0, indirectIndex) + 1;
+ uint32_t offset = getIndirect(&insn->src[1], 0, indirectOffset);
+
+ for (uint8_t i = 0u; i < insn->num_components; ++i) {
+ loadFrom(FILE_MEMORY_CONST, index, dType, newDefs[i], offset, i,
+ indirectOffset, indirectIndex);
+ }
+ break;
+ }
+ case nir_intrinsic_get_buffer_size: {
+ LValues &newDefs = convert(&insn->dest);
+ const DataType dType = getDType(insn);
+ Value *indirectBuffer;
+ uint32_t buffer = getIndirect(&insn->src[0], 0, indirectBuffer);
+
+ Symbol *sym = mkSymbol(FILE_MEMORY_BUFFER, buffer, dType, 0);
+ mkOp1(OP_BUFQ, dType, newDefs[0], sym)->setIndirect(0, 0, indirectBuffer);
+ break;
+ }
+ case nir_intrinsic_store_ssbo: {
+ DataType sType = getSType(insn->src[0], false, false);
+ Value *indirectBuffer;
+ Value *indirectOffset;
+ uint32_t buffer = getIndirect(&insn->src[1], 0, indirectBuffer);
+ uint32_t offset = getIndirect(&insn->src[2], 0, indirectOffset);
+
+ for (uint8_t i = 0u; i < insn->num_components; ++i) {
+ if (!((1u << i) & nir_intrinsic_write_mask(insn)))
+ continue;
+ Symbol *sym = mkSymbol(FILE_MEMORY_BUFFER, buffer, sType,
+ offset + i * typeSizeof(sType));
+ mkStore(OP_STORE, sType, sym, indirectOffset, getSrc(&insn->src[0], i))
+ ->setIndirect(0, 1, indirectBuffer);
+ }
+ info->io.globalAccess |= 0x2;
+ break;
+ }
+ case nir_intrinsic_load_ssbo: {
+ const DataType dType = getDType(insn);
+ LValues &newDefs = convert(&insn->dest);
+ Value *indirectBuffer;
+ Value *indirectOffset;
+ uint32_t buffer = getIndirect(&insn->src[0], 0, indirectBuffer);
+ uint32_t offset = getIndirect(&insn->src[1], 0, indirectOffset);
+
+ for (uint8_t i = 0u; i < insn->num_components; ++i)
+ loadFrom(FILE_MEMORY_BUFFER, buffer, dType, newDefs[i], offset, i,
+ indirectOffset, indirectBuffer);
+
+ info->io.globalAccess |= 0x1;
+ break;
+ }
+ case nir_intrinsic_shared_atomic_add:
+ case nir_intrinsic_shared_atomic_and:
+ case nir_intrinsic_shared_atomic_comp_swap:
+ case nir_intrinsic_shared_atomic_exchange:
+ case nir_intrinsic_shared_atomic_or:
+ case nir_intrinsic_shared_atomic_imax:
+ case nir_intrinsic_shared_atomic_imin:
+ case nir_intrinsic_shared_atomic_umax:
+ case nir_intrinsic_shared_atomic_umin:
+ case nir_intrinsic_shared_atomic_xor: {
+ const DataType dType = getDType(insn);
+ LValues &newDefs = convert(&insn->dest);
+ Value *indirectOffset;
+ uint32_t offset = getIndirect(&insn->src[0], 0, indirectOffset);
+ Symbol *sym = mkSymbol(FILE_MEMORY_SHARED, 0, dType, offset);
+ Instruction *atom = mkOp2(OP_ATOM, dType, newDefs[0], sym, getSrc(&insn->src[1], 0));
+ if (op == nir_intrinsic_shared_atomic_comp_swap)
+ atom->setSrc(2, getSrc(&insn->src[2], 0));
+ atom->setIndirect(0, 0, indirectOffset);
+ atom->subOp = getSubOp(op);
+ break;
+ }
+ case nir_intrinsic_ssbo_atomic_add:
+ case nir_intrinsic_ssbo_atomic_and:
+ case nir_intrinsic_ssbo_atomic_comp_swap:
+ case nir_intrinsic_ssbo_atomic_exchange:
+ case nir_intrinsic_ssbo_atomic_or:
+ case nir_intrinsic_ssbo_atomic_imax:
+ case nir_intrinsic_ssbo_atomic_imin:
+ case nir_intrinsic_ssbo_atomic_umax:
+ case nir_intrinsic_ssbo_atomic_umin:
+ case nir_intrinsic_ssbo_atomic_xor: {
+ const DataType dType = getDType(insn);
+ LValues &newDefs = convert(&insn->dest);
+ Value *indirectBuffer;
+ Value *indirectOffset;
+ uint32_t buffer = getIndirect(&insn->src[0], 0, indirectBuffer);
+ uint32_t offset = getIndirect(&insn->src[1], 0, indirectOffset);
+
+ Symbol *sym = mkSymbol(FILE_MEMORY_BUFFER, buffer, dType, offset);
+ Instruction *atom = mkOp2(OP_ATOM, dType, newDefs[0], sym,
+ getSrc(&insn->src[2], 0));
+ if (op == nir_intrinsic_ssbo_atomic_comp_swap)
+ atom->setSrc(2, getSrc(&insn->src[3], 0));
+ atom->setIndirect(0, 0, indirectOffset);
+ atom->setIndirect(0, 1, indirectBuffer);
+ atom->subOp = getSubOp(op);
+
+ info->io.globalAccess |= 0x2;
+ break;
+ }
+ case nir_intrinsic_bindless_image_atomic_add:
+ case nir_intrinsic_bindless_image_atomic_and:
+ case nir_intrinsic_bindless_image_atomic_comp_swap:
+ case nir_intrinsic_bindless_image_atomic_exchange:
+ case nir_intrinsic_bindless_image_atomic_imax:
+ case nir_intrinsic_bindless_image_atomic_umax:
+ case nir_intrinsic_bindless_image_atomic_imin:
+ case nir_intrinsic_bindless_image_atomic_umin:
+ case nir_intrinsic_bindless_image_atomic_or:
+ case nir_intrinsic_bindless_image_atomic_xor:
+ case nir_intrinsic_bindless_image_load:
+ case nir_intrinsic_bindless_image_samples:
+ case nir_intrinsic_bindless_image_size:
+ case nir_intrinsic_bindless_image_store: {
+ std::vector<Value*> srcs, defs;
+ Value *indirect = getSrc(&insn->src[0], 0);
+ DataType ty;
+
+ uint32_t mask = 0;
+ TexInstruction::Target target =
+ convert(nir_intrinsic_image_dim(insn), !!nir_intrinsic_image_array(insn), false);
+ unsigned int argCount = getNIRArgCount(target);
+ uint16_t location = 0;
+
+ if (opInfo.has_dest) {
+ LValues &newDefs = convert(&insn->dest);
+ for (uint8_t i = 0u; i < newDefs.size(); ++i) {
+ defs.push_back(newDefs[i]);
+ mask |= 1 << i;
+ }
+ }
+
+ switch (op) {
+ case nir_intrinsic_bindless_image_atomic_add:
+ case nir_intrinsic_bindless_image_atomic_and:
+ case nir_intrinsic_bindless_image_atomic_comp_swap:
+ case nir_intrinsic_bindless_image_atomic_exchange:
+ case nir_intrinsic_bindless_image_atomic_imax:
+ case nir_intrinsic_bindless_image_atomic_umax:
+ case nir_intrinsic_bindless_image_atomic_imin:
+ case nir_intrinsic_bindless_image_atomic_umin:
+ case nir_intrinsic_bindless_image_atomic_or:
+ case nir_intrinsic_bindless_image_atomic_xor:
+ ty = getDType(insn);
+ mask = 0x1;
+ info->io.globalAccess |= 0x2;
+ break;
+ case nir_intrinsic_bindless_image_load:
+ ty = TYPE_U32;
+ info->io.globalAccess |= 0x1;
+ break;
+ case nir_intrinsic_bindless_image_store:
+ ty = TYPE_U32;
+ mask = 0xf;
+ info->io.globalAccess |= 0x2;
+ break;
+ case nir_intrinsic_bindless_image_samples:
+ mask = 0x8;
+ ty = TYPE_U32;
+ break;
+ case nir_intrinsic_bindless_image_size:
+ ty = TYPE_U32;
+ break;
+ default:
+ unreachable("unhandled image opcode");
+ break;
+ }
+
+ // coords
+ if (opInfo.num_srcs >= 2)
+ for (unsigned int i = 0u; i < argCount; ++i)
+ srcs.push_back(getSrc(&insn->src[1], i));
+
+ // the sampler is just another src added after coords
+ if (opInfo.num_srcs >= 3 && target.isMS())
+ srcs.push_back(getSrc(&insn->src[2], 0));
+
+ if (opInfo.num_srcs >= 4) {
+ unsigned components = opInfo.src_components[3] ? opInfo.src_components[3] : insn->num_components;
+ for (uint8_t i = 0u; i < components; ++i)
+ srcs.push_back(getSrc(&insn->src[3], i));
+ }
+
+ if (opInfo.num_srcs >= 5)
+ // 1 for aotmic swap
+ for (uint8_t i = 0u; i < opInfo.src_components[4]; ++i)
+ srcs.push_back(getSrc(&insn->src[4], i));
+
+ TexInstruction *texi = mkTex(getOperation(op), target.getEnum(), location, 0, defs, srcs);
+ texi->tex.bindless = false;
+ texi->tex.format = &nv50_ir::TexInstruction::formatTable[convertGLImgFormat(nir_intrinsic_format(insn))];
+ texi->tex.mask = mask;
+ texi->tex.bindless = true;
+ texi->cache = convert(nir_intrinsic_access(insn));
+ texi->setType(ty);
+ texi->subOp = getSubOp(op);
+
+ if (indirect)
+ texi->setIndirectR(indirect);
+
+ break;
+ }
+ case nir_intrinsic_image_deref_atomic_add:
+ case nir_intrinsic_image_deref_atomic_and:
+ case nir_intrinsic_image_deref_atomic_comp_swap:
+ case nir_intrinsic_image_deref_atomic_exchange:
+ case nir_intrinsic_image_deref_atomic_imax:
+ case nir_intrinsic_image_deref_atomic_umax:
+ case nir_intrinsic_image_deref_atomic_imin:
+ case nir_intrinsic_image_deref_atomic_umin:
+ case nir_intrinsic_image_deref_atomic_or:
+ case nir_intrinsic_image_deref_atomic_xor:
+ case nir_intrinsic_image_deref_load:
+ case nir_intrinsic_image_deref_samples:
+ case nir_intrinsic_image_deref_size:
+ case nir_intrinsic_image_deref_store: {
+ const nir_variable *tex;
+ std::vector<Value*> srcs, defs;
+ Value *indirect;
+ DataType ty;
+
+ uint32_t mask = 0;
+ nir_deref_instr *deref = nir_src_as_deref(insn->src[0]);
+ const glsl_type *type = deref->type;
+ TexInstruction::Target target =
+ convert((glsl_sampler_dim)type->sampler_dimensionality,
+ type->sampler_array, type->sampler_shadow);
+ unsigned int argCount = getNIRArgCount(target);
+ uint16_t location = handleDeref(deref, indirect, tex);
+
+ if (opInfo.has_dest) {
+ LValues &newDefs = convert(&insn->dest);
+ for (uint8_t i = 0u; i < newDefs.size(); ++i) {
+ defs.push_back(newDefs[i]);
+ mask |= 1 << i;
+ }
+ }
+
+ switch (op) {
+ case nir_intrinsic_image_deref_atomic_add:
+ case nir_intrinsic_image_deref_atomic_and:
+ case nir_intrinsic_image_deref_atomic_comp_swap:
+ case nir_intrinsic_image_deref_atomic_exchange:
+ case nir_intrinsic_image_deref_atomic_imax:
+ case nir_intrinsic_image_deref_atomic_umax:
+ case nir_intrinsic_image_deref_atomic_imin:
+ case nir_intrinsic_image_deref_atomic_umin:
+ case nir_intrinsic_image_deref_atomic_or:
+ case nir_intrinsic_image_deref_atomic_xor:
+ ty = getDType(insn);
+ mask = 0x1;
+ info->io.globalAccess |= 0x2;
+ break;
+ case nir_intrinsic_image_deref_load:
+ ty = TYPE_U32;
+ info->io.globalAccess |= 0x1;
+ break;
+ case nir_intrinsic_image_deref_store:
+ ty = TYPE_U32;
+ mask = 0xf;
+ info->io.globalAccess |= 0x2;
+ break;
+ case nir_intrinsic_image_deref_samples:
+ mask = 0x8;
+ ty = TYPE_U32;
+ break;
+ case nir_intrinsic_image_deref_size:
+ ty = TYPE_U32;
+ break;
+ default:
+ unreachable("unhandled image opcode");
+ break;
+ }
+
+ // coords
+ if (opInfo.num_srcs >= 2)
+ for (unsigned int i = 0u; i < argCount; ++i)
+ srcs.push_back(getSrc(&insn->src[1], i));
+
+ // the sampler is just another src added after coords
+ if (opInfo.num_srcs >= 3 && target.isMS())
+ srcs.push_back(getSrc(&insn->src[2], 0));
+
+ if (opInfo.num_srcs >= 4) {
+ unsigned components = opInfo.src_components[3] ? opInfo.src_components[3] : insn->num_components;
+ for (uint8_t i = 0u; i < components; ++i)
+ srcs.push_back(getSrc(&insn->src[3], i));
+ }
+
+ if (opInfo.num_srcs >= 5)
+ // 1 for aotmic swap
+ for (uint8_t i = 0u; i < opInfo.src_components[4]; ++i)
+ srcs.push_back(getSrc(&insn->src[4], i));
+
+ TexInstruction *texi = mkTex(getOperation(op), target.getEnum(), location, 0, defs, srcs);
+ texi->tex.bindless = false;
+ texi->tex.format = &nv50_ir::TexInstruction::formatTable[convertGLImgFormat(tex->data.image.format)];
+ texi->tex.mask = mask;
+ texi->cache = getCacheModeFromVar(tex);
+ texi->setType(ty);
+ texi->subOp = getSubOp(op);
+
+ if (indirect)
+ texi->setIndirectR(indirect);
+
+ break;
+ }
+ case nir_intrinsic_store_shared: {
+ DataType sType = getSType(insn->src[0], false, false);
+ Value *indirectOffset;
+ uint32_t offset = getIndirect(&insn->src[1], 0, indirectOffset);
+
+ for (uint8_t i = 0u; i < insn->num_components; ++i) {
+ if (!((1u << i) & nir_intrinsic_write_mask(insn)))
+ continue;
+ Symbol *sym = mkSymbol(FILE_MEMORY_SHARED, 0, sType, offset + i * typeSizeof(sType));
+ mkStore(OP_STORE, sType, sym, indirectOffset, getSrc(&insn->src[0], i));
+ }
+ break;
+ }
+ case nir_intrinsic_load_shared: {
+ const DataType dType = getDType(insn);
+ LValues &newDefs = convert(&insn->dest);
+ Value *indirectOffset;
+ uint32_t offset = getIndirect(&insn->src[0], 0, indirectOffset);
+
+ for (uint8_t i = 0u; i < insn->num_components; ++i)
+ loadFrom(FILE_MEMORY_SHARED, 0, dType, newDefs[i], offset, i, indirectOffset);
+
+ break;
+ }
+ case nir_intrinsic_barrier: {
+ // TODO: add flag to shader_info
+ info->numBarriers = 1;
+ Instruction *bar = mkOp2(OP_BAR, TYPE_U32, NULL, mkImm(0), mkImm(0));
+ bar->fixed = 1;
+ bar->subOp = NV50_IR_SUBOP_BAR_SYNC;
+ break;
+ }
+ case nir_intrinsic_group_memory_barrier:
+ case nir_intrinsic_memory_barrier:
+ case nir_intrinsic_memory_barrier_atomic_counter:
+ case nir_intrinsic_memory_barrier_buffer:
+ case nir_intrinsic_memory_barrier_image:
+ case nir_intrinsic_memory_barrier_shared: {
+ Instruction *bar = mkOp(OP_MEMBAR, TYPE_NONE, NULL);
+ bar->fixed = 1;
+ bar->subOp = getSubOp(op);
+ break;
+ }
+ case nir_intrinsic_shader_clock: {
+ const DataType dType = getDType(insn);
+ LValues &newDefs = convert(&insn->dest);
+
+ loadImm(newDefs[0], 0u);
+ mkOp1(OP_RDSV, dType, newDefs[1], mkSysVal(SV_CLOCK, 0))->fixed = 1;
+ break;
+ }
+ case nir_intrinsic_load_global: {
+ const DataType dType = getDType(insn);
+ LValues &newDefs = convert(&insn->dest);
+ Value *indirectOffset;
+ uint32_t offset = getIndirect(&insn->src[0], 0, indirectOffset);
+
+ for (auto i = 0u; i < insn->num_components; ++i)
+ loadFrom(FILE_MEMORY_GLOBAL, 0, dType, newDefs[i], offset, i, indirectOffset);
+
+ info->io.globalAccess |= 0x1;
+ break;
+ }
+ case nir_intrinsic_store_global: {
+ DataType sType = getSType(insn->src[0], false, false);
+
+ for (auto i = 0u; i < insn->num_components; ++i) {
+ if (!((1u << i) & nir_intrinsic_write_mask(insn)))
+ continue;
+ if (typeSizeof(sType) == 8) {
+ Value *split[2];
+ mkSplit(split, 4, getSrc(&insn->src[0], i));
+
+ Symbol *sym = mkSymbol(FILE_MEMORY_GLOBAL, 0, TYPE_U32, i * typeSizeof(sType));
+ mkStore(OP_STORE, TYPE_U32, sym, getSrc(&insn->src[1], 0), split[0]);
+
+ sym = mkSymbol(FILE_MEMORY_GLOBAL, 0, TYPE_U32, i * typeSizeof(sType) + 4);
+ mkStore(OP_STORE, TYPE_U32, sym, getSrc(&insn->src[1], 0), split[1]);
+ } else {
+ Symbol *sym = mkSymbol(FILE_MEMORY_GLOBAL, 0, sType, i * typeSizeof(sType));
+ mkStore(OP_STORE, sType, sym, getSrc(&insn->src[1], 0), getSrc(&insn->src[0], i));
+ }
+ }
+
+ info->io.globalAccess |= 0x2;
+ break;
+ }
default:
ERROR("unknown nir_intrinsic_op %s\n", nir_intrinsic_infos[op].name);
return false;
return true;
}
+Value*
+Converter::convert(nir_load_const_instr *insn, uint8_t idx)
+{
+ Value *val;
+
+ if (immInsertPos)
+ setPosition(immInsertPos, true);
+ else
+ setPosition(bb, false);
+
+ switch (insn->def.bit_size) {
+ case 64:
+ val = loadImm(getSSA(8), insn->value[idx].u64);
+ break;
+ case 32:
+ val = loadImm(getSSA(4), insn->value[idx].u32);
+ break;
+ case 16:
+ val = loadImm(getSSA(2), insn->value[idx].u16);
+ break;
+ case 8:
+ val = loadImm(getSSA(1), insn->value[idx].u8);
+ break;
+ default:
+ unreachable("unhandled bit size!\n");
+ }
+ setPosition(bb, true);
+ return val;
+}
+
bool
Converter::visit(nir_load_const_instr *insn)
{
assert(insn->def.bit_size <= 64);
-
- LValues &newDefs = convert(&insn->def);
- for (int i = 0; i < insn->def.num_components; i++) {
- switch (insn->def.bit_size) {
- case 64:
- loadImm(newDefs[i], insn->value.u64[i]);
- break;
- case 32:
- loadImm(newDefs[i], insn->value.u32[i]);
- break;
- case 16:
- loadImm(newDefs[i], insn->value.u16[i]);
- break;
- case 8:
- loadImm(newDefs[i], insn->value.u8[i]);
- break;
- }
- }
+ immediates[insn->def.index] = insn;
return true;
}
case nir_op_iabs:
case nir_op_fadd:
case nir_op_iadd:
- case nir_op_fand:
case nir_op_iand:
case nir_op_fceil:
case nir_op_fcos:
case nir_op_umul_high:
case nir_op_fneg:
case nir_op_ineg:
- case nir_op_fnot:
case nir_op_inot:
- case nir_op_for:
case nir_op_ior:
case nir_op_pack_64_2x32_split:
case nir_op_fpow:
case nir_op_ushr:
case nir_op_fsin:
case nir_op_fsqrt:
- case nir_op_fsub:
- case nir_op_isub:
case nir_op_ftrunc:
case nir_op_ishl:
- case nir_op_fxor:
case nir_op_ixor: {
DEFAULT_CHECKS;
LValues &newDefs = convert(&insn->dest);
// those are weird ALU ops and need special handling, because
// 1. they are always componend based
// 2. they basically just merge multiple values into one data type
- case nir_op_imov:
- case nir_op_fmov:
+ case nir_op_mov:
+ if (!insn->dest.dest.is_ssa && insn->dest.dest.reg.reg->num_array_elems) {
+ nir_reg_dest& reg = insn->dest.dest.reg;
+ uint32_t goffset = regToLmemOffset[reg.reg->index];
+ uint8_t comps = reg.reg->num_components;
+ uint8_t size = reg.reg->bit_size / 8;
+ uint8_t csize = 4 * size; // TODO after fixing MemoryOpts: comps * size;
+ uint32_t aoffset = csize * reg.base_offset;
+ Value *indirect = NULL;
+
+ if (reg.indirect)
+ indirect = mkOp2v(OP_MUL, TYPE_U32, getSSA(4, FILE_ADDRESS),
+ getSrc(reg.indirect, 0), mkImm(csize));
+
+ for (uint8_t i = 0u; i < comps; ++i) {
+ if (!((1u << i) & insn->dest.write_mask))
+ continue;
+
+ Symbol *sym = mkSymbol(FILE_MEMORY_LOCAL, 0, dType, goffset + aoffset + i * size);
+ mkStore(OP_STORE, dType, sym, indirect, getSrc(&insn->src[0], i));
+ }
+ break;
+ } else if (!insn->src[0].src.is_ssa && insn->src[0].src.reg.reg->num_array_elems) {
+ LValues &newDefs = convert(&insn->dest);
+ nir_reg_src& reg = insn->src[0].src.reg;
+ uint32_t goffset = regToLmemOffset[reg.reg->index];
+ // uint8_t comps = reg.reg->num_components;
+ uint8_t size = reg.reg->bit_size / 8;
+ uint8_t csize = 4 * size; // TODO after fixing MemoryOpts: comps * size;
+ uint32_t aoffset = csize * reg.base_offset;
+ Value *indirect = NULL;
+
+ if (reg.indirect)
+ indirect = mkOp2v(OP_MUL, TYPE_U32, getSSA(4, FILE_ADDRESS), getSrc(reg.indirect, 0), mkImm(csize));
+
+ for (uint8_t i = 0u; i < newDefs.size(); ++i)
+ loadFrom(FILE_MEMORY_LOCAL, 0, dType, newDefs[i], goffset + aoffset, i, indirect);
+
+ break;
+ } else {
+ LValues &newDefs = convert(&insn->dest);
+ for (LValues::size_type c = 0u; c < newDefs.size(); ++c) {
+ mkMov(newDefs[c], getSrc(&insn->src[0], c), dType);
+ }
+ }
+ break;
case nir_op_vec2:
case nir_op_vec3:
case nir_op_vec4: {
}
#undef DEFAULT_CHECKS
+bool
+Converter::visit(nir_ssa_undef_instr *insn)
+{
+ LValues &newDefs = convert(&insn->def);
+ for (uint8_t i = 0u; i < insn->def.num_components; ++i) {
+ mkOp(OP_NOP, TYPE_NONE, newDefs[i]);
+ }
+ return true;
+}
+
+#define CASE_SAMPLER(ty) \
+ case GLSL_SAMPLER_DIM_ ## ty : \
+ if (isArray && !isShadow) \
+ return TEX_TARGET_ ## ty ## _ARRAY; \
+ else if (!isArray && isShadow) \
+ return TEX_TARGET_## ty ## _SHADOW; \
+ else if (isArray && isShadow) \
+ return TEX_TARGET_## ty ## _ARRAY_SHADOW; \
+ else \
+ return TEX_TARGET_ ## ty
+
+TexTarget
+Converter::convert(glsl_sampler_dim dim, bool isArray, bool isShadow)
+{
+ switch (dim) {
+ CASE_SAMPLER(1D);
+ CASE_SAMPLER(2D);
+ CASE_SAMPLER(CUBE);
+ case GLSL_SAMPLER_DIM_3D:
+ return TEX_TARGET_3D;
+ case GLSL_SAMPLER_DIM_MS:
+ if (isArray)
+ return TEX_TARGET_2D_MS_ARRAY;
+ return TEX_TARGET_2D_MS;
+ case GLSL_SAMPLER_DIM_RECT:
+ if (isShadow)
+ return TEX_TARGET_RECT_SHADOW;
+ return TEX_TARGET_RECT;
+ case GLSL_SAMPLER_DIM_BUF:
+ return TEX_TARGET_BUFFER;
+ case GLSL_SAMPLER_DIM_EXTERNAL:
+ return TEX_TARGET_2D;
+ default:
+ ERROR("unknown glsl_sampler_dim %u\n", dim);
+ assert(false);
+ return TEX_TARGET_COUNT;
+ }
+}
+#undef CASE_SAMPLER
+
+Value*
+Converter::applyProjection(Value *src, Value *proj)
+{
+ if (!proj)
+ return src;
+ return mkOp2v(OP_MUL, TYPE_F32, getScratch(), src, proj);
+}
+
+unsigned int
+Converter::getNIRArgCount(TexInstruction::Target& target)
+{
+ unsigned int result = target.getArgCount();
+ if (target.isCube() && target.isArray())
+ result--;
+ if (target.isMS())
+ result--;
+ return result;
+}
+
+uint16_t
+Converter::handleDeref(nir_deref_instr *deref, Value * &indirect, const nir_variable * &tex)
+{
+ typedef std::pair<uint32_t,Value*> DerefPair;
+ std::list<DerefPair> derefs;
+
+ uint16_t result = 0;
+ while (deref->deref_type != nir_deref_type_var) {
+ switch (deref->deref_type) {
+ case nir_deref_type_array: {
+ Value *indirect;
+ uint8_t size = type_size(deref->type, true);
+ result += size * getIndirect(&deref->arr.index, 0, indirect);
+
+ if (indirect) {
+ derefs.push_front(std::make_pair(size, indirect));
+ }
+
+ break;
+ }
+ case nir_deref_type_struct: {
+ result += nir_deref_instr_parent(deref)->type->struct_location_offset(deref->strct.index);
+ break;
+ }
+ case nir_deref_type_var:
+ default:
+ unreachable("nir_deref_type_var reached in handleDeref!");
+ break;
+ }
+ deref = nir_deref_instr_parent(deref);
+ }
+
+ indirect = NULL;
+ for (std::list<DerefPair>::const_iterator it = derefs.begin(); it != derefs.end(); ++it) {
+ Value *offset = mkOp2v(OP_MUL, TYPE_U32, getSSA(), loadImm(getSSA(), it->first), it->second);
+ if (indirect)
+ indirect = mkOp2v(OP_ADD, TYPE_U32, getSSA(), indirect, offset);
+ else
+ indirect = offset;
+ }
+
+ tex = nir_deref_instr_get_variable(deref);
+ assert(tex);
+
+ return result + tex->data.driver_location;
+}
+
+CacheMode
+Converter::convert(enum gl_access_qualifier access)
+{
+ switch (access) {
+ case ACCESS_VOLATILE:
+ return CACHE_CV;
+ case ACCESS_COHERENT:
+ return CACHE_CG;
+ default:
+ return CACHE_CA;
+ }
+}
+
+CacheMode
+Converter::getCacheModeFromVar(const nir_variable *var)
+{
+ return convert(var->data.access);
+}
+
+bool
+Converter::visit(nir_tex_instr *insn)
+{
+ switch (insn->op) {
+ case nir_texop_lod:
+ case nir_texop_query_levels:
+ case nir_texop_tex:
+ case nir_texop_texture_samples:
+ case nir_texop_tg4:
+ case nir_texop_txb:
+ case nir_texop_txd:
+ case nir_texop_txf:
+ case nir_texop_txf_ms:
+ case nir_texop_txl:
+ case nir_texop_txs: {
+ LValues &newDefs = convert(&insn->dest);
+ std::vector<Value*> srcs;
+ std::vector<Value*> defs;
+ std::vector<nir_src*> offsets;
+ uint8_t mask = 0;
+ bool lz = false;
+ Value *proj = NULL;
+ TexInstruction::Target target = convert(insn->sampler_dim, insn->is_array, insn->is_shadow);
+ operation op = getOperation(insn->op);
+
+ int r, s;
+ int biasIdx = nir_tex_instr_src_index(insn, nir_tex_src_bias);
+ int compIdx = nir_tex_instr_src_index(insn, nir_tex_src_comparator);
+ int coordsIdx = nir_tex_instr_src_index(insn, nir_tex_src_coord);
+ int ddxIdx = nir_tex_instr_src_index(insn, nir_tex_src_ddx);
+ int ddyIdx = nir_tex_instr_src_index(insn, nir_tex_src_ddy);
+ int msIdx = nir_tex_instr_src_index(insn, nir_tex_src_ms_index);
+ int lodIdx = nir_tex_instr_src_index(insn, nir_tex_src_lod);
+ int offsetIdx = nir_tex_instr_src_index(insn, nir_tex_src_offset);
+ int projIdx = nir_tex_instr_src_index(insn, nir_tex_src_projector);
+ int sampOffIdx = nir_tex_instr_src_index(insn, nir_tex_src_sampler_offset);
+ int texOffIdx = nir_tex_instr_src_index(insn, nir_tex_src_texture_offset);
+ int sampHandleIdx = nir_tex_instr_src_index(insn, nir_tex_src_sampler_handle);
+ int texHandleIdx = nir_tex_instr_src_index(insn, nir_tex_src_texture_handle);
+
+ bool bindless = sampHandleIdx != -1 || texHandleIdx != -1;
+ assert((sampHandleIdx != -1) == (texHandleIdx != -1));
+
+ if (projIdx != -1)
+ proj = mkOp1v(OP_RCP, TYPE_F32, getScratch(), getSrc(&insn->src[projIdx].src, 0));
+
+ srcs.resize(insn->coord_components);
+ for (uint8_t i = 0u; i < insn->coord_components; ++i)
+ srcs[i] = applyProjection(getSrc(&insn->src[coordsIdx].src, i), proj);
+
+ // sometimes we get less args than target.getArgCount, but codegen expects the latter
+ if (insn->coord_components) {
+ uint32_t argCount = target.getArgCount();
+
+ if (target.isMS())
+ argCount -= 1;
+
+ for (uint32_t i = 0u; i < (argCount - insn->coord_components); ++i)
+ srcs.push_back(getSSA());
+ }
+
+ if (insn->op == nir_texop_texture_samples)
+ srcs.push_back(zero);
+ else if (!insn->num_srcs)
+ srcs.push_back(loadImm(NULL, 0));
+ if (biasIdx != -1)
+ srcs.push_back(getSrc(&insn->src[biasIdx].src, 0));
+ if (lodIdx != -1)
+ srcs.push_back(getSrc(&insn->src[lodIdx].src, 0));
+ else if (op == OP_TXF)
+ lz = true;
+ if (msIdx != -1)
+ srcs.push_back(getSrc(&insn->src[msIdx].src, 0));
+ if (offsetIdx != -1)
+ offsets.push_back(&insn->src[offsetIdx].src);
+ if (compIdx != -1)
+ srcs.push_back(applyProjection(getSrc(&insn->src[compIdx].src, 0), proj));
+ if (texOffIdx != -1) {
+ srcs.push_back(getSrc(&insn->src[texOffIdx].src, 0));
+ texOffIdx = srcs.size() - 1;
+ }
+ if (sampOffIdx != -1) {
+ srcs.push_back(getSrc(&insn->src[sampOffIdx].src, 0));
+ sampOffIdx = srcs.size() - 1;
+ }
+ if (bindless) {
+ // currently we use the lower bits
+ Value *split[2];
+ Value *handle = getSrc(&insn->src[sampHandleIdx].src, 0);
+
+ mkSplit(split, 4, handle);
+
+ srcs.push_back(split[0]);
+ texOffIdx = srcs.size() - 1;
+ }
+
+ r = bindless ? 0xff : insn->texture_index;
+ s = bindless ? 0x1f : insn->sampler_index;
+
+ defs.resize(newDefs.size());
+ for (uint8_t d = 0u; d < newDefs.size(); ++d) {
+ defs[d] = newDefs[d];
+ mask |= 1 << d;
+ }
+ if (target.isMS() || (op == OP_TEX && prog->getType() != Program::TYPE_FRAGMENT))
+ lz = true;
+
+ TexInstruction *texi = mkTex(op, target.getEnum(), r, s, defs, srcs);
+ texi->tex.levelZero = lz;
+ texi->tex.mask = mask;
+ texi->tex.bindless = bindless;
+
+ if (texOffIdx != -1)
+ texi->tex.rIndirectSrc = texOffIdx;
+ if (sampOffIdx != -1)
+ texi->tex.sIndirectSrc = sampOffIdx;
+
+ switch (insn->op) {
+ case nir_texop_tg4:
+ if (!target.isShadow())
+ texi->tex.gatherComp = insn->component;
+ break;
+ case nir_texop_txs:
+ texi->tex.query = TXQ_DIMS;
+ break;
+ case nir_texop_texture_samples:
+ texi->tex.mask = 0x4;
+ texi->tex.query = TXQ_TYPE;
+ break;
+ case nir_texop_query_levels:
+ texi->tex.mask = 0x8;
+ texi->tex.query = TXQ_DIMS;
+ break;
+ default:
+ break;
+ }
+
+ texi->tex.useOffsets = offsets.size();
+ if (texi->tex.useOffsets) {
+ for (uint8_t s = 0; s < texi->tex.useOffsets; ++s) {
+ for (uint32_t c = 0u; c < 3; ++c) {
+ uint8_t s2 = std::min(c, target.getDim() - 1);
+ texi->offset[s][c].set(getSrc(offsets[s], s2));
+ texi->offset[s][c].setInsn(texi);
+ }
+ }
+ }
+
+ if (op == OP_TXG && offsetIdx == -1) {
+ if (nir_tex_instr_has_explicit_tg4_offsets(insn)) {
+ texi->tex.useOffsets = 4;
+ setPosition(texi, false);
+ for (uint8_t i = 0; i < 4; ++i) {
+ for (uint8_t j = 0; j < 2; ++j) {
+ texi->offset[i][j].set(loadImm(NULL, insn->tg4_offsets[i][j]));
+ texi->offset[i][j].setInsn(texi);
+ }
+ }
+ setPosition(texi, true);
+ }
+ }
+
+ if (ddxIdx != -1 && ddyIdx != -1) {
+ for (uint8_t c = 0u; c < target.getDim() + target.isCube(); ++c) {
+ texi->dPdx[c].set(getSrc(&insn->src[ddxIdx].src, c));
+ texi->dPdy[c].set(getSrc(&insn->src[ddyIdx].src, c));
+ }
+ }
+
+ break;
+ }
+ default:
+ ERROR("unknown nir_texop %u\n", insn->op);
+ return false;
+ }
+ return true;
+}
+
+bool
+Converter::visit(nir_deref_instr *deref)
+{
+ // we just ignore those, because images intrinsics are the only place where
+ // we should end up with deref sources and those have to backtrack anyway
+ // to get the nir_variable. This code just exists to handle some special
+ // cases.
+ switch (deref->deref_type) {
+ case nir_deref_type_array:
+ case nir_deref_type_struct:
+ case nir_deref_type_var:
+ break;
+ default:
+ ERROR("unknown nir_deref_instr %u\n", deref->deref_type);
+ return false;
+ }
+ return true;
+}
+
bool
Converter::run()
{
if (prog->dbgFlags & NV50_IR_DEBUG_VERBOSE)
nir_print_shader(nir, stderr);
+ struct nir_lower_subgroups_options subgroup_options = {
+ .subgroup_size = 32,
+ .ballot_bit_size = 32,
+ };
+
NIR_PASS_V(nir, nir_lower_io, nir_var_all, type_size, (nir_lower_io_options)0);
+ NIR_PASS_V(nir, nir_lower_subgroups, &subgroup_options);
NIR_PASS_V(nir, nir_lower_regs_to_ssa);
NIR_PASS_V(nir, nir_lower_load_const_to_scalar);
NIR_PASS_V(nir, nir_lower_vars_to_ssa);
- NIR_PASS_V(nir, nir_lower_alu_to_scalar);
+ NIR_PASS_V(nir, nir_lower_alu_to_scalar, NULL, NULL);
NIR_PASS_V(nir, nir_lower_phis_to_scalar);
do {