#else
#include <tr1/unordered_map>
#endif
+#include <cstring>
+#include <list>
#include <vector>
namespace {
using namespace nv50_ir;
int
-type_size(const struct glsl_type *type)
+type_size(const struct glsl_type *type, bool bindless)
{
return glsl_count_attribute_slots(type, false);
}
private:
typedef std::vector<LValue*> LValues;
typedef unordered_map<unsigned, LValues> NirDefMap;
+ typedef unordered_map<unsigned, nir_load_const_instr*> ImmediateMap;
typedef unordered_map<unsigned, uint32_t> NirArrayLMemOffsets;
typedef unordered_map<unsigned, BasicBlock*> NirBlockMap;
+ CacheMode convert(enum gl_access_qualifier);
TexTarget convert(glsl_sampler_dim, bool isArray, bool isShadow);
LValues& convert(nir_alu_dest *);
BasicBlock* convert(nir_block *);
LValues& convert(nir_dest *);
SVSemantic convert(nir_intrinsic_op);
+ Value* convert(nir_load_const_instr*, uint8_t);
LValues& convert(nir_register *);
LValues& convert(nir_ssa_def *);
+ ImgFormat convertGLImgFormat(GLuint);
+
Value* getSrc(nir_alu_src *, uint8_t component = 0);
Value* getSrc(nir_register *, uint8_t);
Value* getSrc(nir_src *, uint8_t, bool indirect = false);
// If the found value has not a constant part, the Value gets returned
// through the Value parameter.
uint32_t getIndirect(nir_src *, uint8_t, Value *&);
- uint32_t getIndirect(nir_intrinsic_instr *, uint8_t s, uint8_t c, Value *&);
+ // isScalar indicates that the addressing is scalar, vec4 addressing is
+ // assumed otherwise
+ uint32_t getIndirect(nir_intrinsic_instr *, uint8_t s, uint8_t c, Value *&,
+ bool isScalar = false);
uint32_t getSlotAddress(nir_intrinsic_instr *, uint8_t idx, uint8_t slot);
DataType getDType(nir_alu_instr *);
DataType getDType(nir_intrinsic_instr *);
+ DataType getDType(nir_intrinsic_instr *, bool isSigned);
DataType getDType(nir_op, uint8_t);
std::vector<DataType> getSTypes(nir_alu_instr *);
bool visit(nir_alu_instr *);
bool visit(nir_block *);
bool visit(nir_cf_node *);
+ bool visit(nir_deref_instr *);
bool visit(nir_function *);
bool visit(nir_if *);
bool visit(nir_instr *);
// tex stuff
Value* applyProjection(Value *src, Value *proj);
+ unsigned int getNIRArgCount(TexInstruction::Target&);
+
+ // image stuff
+ uint16_t handleDeref(nir_deref_instr *, Value * & indirect, const nir_variable * &);
+ CacheMode getCacheModeFromVar(const nir_variable *);
nir_shader *nir;
NirDefMap ssaDefs;
NirDefMap regDefs;
+ ImmediateMap immediates;
NirArrayLMemOffsets regToLmemOffset;
NirBlockMap blocks;
unsigned int curLoopDepth;
BasicBlock *exit;
Value *zero;
+ Instruction *immInsertPos;
int clipVertexOutput;
DataType
Converter::getDType(nir_intrinsic_instr *insn)
+{
+ bool isSigned;
+ switch (insn->intrinsic) {
+ case nir_intrinsic_shared_atomic_imax:
+ case nir_intrinsic_shared_atomic_imin:
+ case nir_intrinsic_ssbo_atomic_imax:
+ case nir_intrinsic_ssbo_atomic_imin:
+ isSigned = true;
+ break;
+ default:
+ isSigned = false;
+ break;
+ }
+
+ return getDType(insn, isSigned);
+}
+
+DataType
+Converter::getDType(nir_intrinsic_instr *insn, bool isSigned)
{
if (insn->dest.is_ssa)
- return typeOfSize(insn->dest.ssa.bit_size / 8, false, false);
+ return typeOfSize(insn->dest.ssa.bit_size / 8, false, isSigned);
else
- return typeOfSize(insn->dest.reg.reg->bit_size / 8, false, false);
+ return typeOfSize(insn->dest.reg.reg->bit_size / 8, false, isSigned);
}
DataType
case nir_op_fadd:
case nir_op_iadd:
return OP_ADD;
- case nir_op_fand:
case nir_op_iand:
return OP_AND;
case nir_op_ifind_msb:
case nir_op_fneg:
case nir_op_ineg:
return OP_NEG;
- case nir_op_fnot:
case nir_op_inot:
return OP_NOT;
- case nir_op_for:
case nir_op_ior:
return OP_OR;
case nir_op_fpow:
return OP_SIN;
case nir_op_fsqrt:
return OP_SQRT;
- case nir_op_fsub:
- case nir_op_isub:
- return OP_SUB;
case nir_op_ftrunc:
return OP_TRUNC;
- case nir_op_fxor:
case nir_op_ixor:
return OP_XOR;
default:
return OP_EMIT;
case nir_intrinsic_end_primitive:
return OP_RESTART;
+ case nir_intrinsic_bindless_image_atomic_add:
+ case nir_intrinsic_image_atomic_add:
+ case nir_intrinsic_image_deref_atomic_add:
+ case nir_intrinsic_bindless_image_atomic_and:
+ case nir_intrinsic_image_atomic_and:
+ case nir_intrinsic_image_deref_atomic_and:
+ case nir_intrinsic_bindless_image_atomic_comp_swap:
+ case nir_intrinsic_image_atomic_comp_swap:
+ case nir_intrinsic_image_deref_atomic_comp_swap:
+ case nir_intrinsic_bindless_image_atomic_exchange:
+ case nir_intrinsic_image_atomic_exchange:
+ case nir_intrinsic_image_deref_atomic_exchange:
+ case nir_intrinsic_bindless_image_atomic_imax:
+ case nir_intrinsic_image_atomic_imax:
+ case nir_intrinsic_image_deref_atomic_imax:
+ case nir_intrinsic_bindless_image_atomic_umax:
+ case nir_intrinsic_image_atomic_umax:
+ case nir_intrinsic_image_deref_atomic_umax:
+ case nir_intrinsic_bindless_image_atomic_imin:
+ case nir_intrinsic_image_atomic_imin:
+ case nir_intrinsic_image_deref_atomic_imin:
+ case nir_intrinsic_bindless_image_atomic_umin:
+ case nir_intrinsic_image_atomic_umin:
+ case nir_intrinsic_image_deref_atomic_umin:
+ case nir_intrinsic_bindless_image_atomic_or:
+ case nir_intrinsic_image_atomic_or:
+ case nir_intrinsic_image_deref_atomic_or:
+ case nir_intrinsic_bindless_image_atomic_xor:
+ case nir_intrinsic_image_atomic_xor:
+ case nir_intrinsic_image_deref_atomic_xor:
+ return OP_SUREDP;
+ case nir_intrinsic_bindless_image_load:
+ case nir_intrinsic_image_load:
+ case nir_intrinsic_image_deref_load:
+ return OP_SULDP;
+ case nir_intrinsic_bindless_image_samples:
+ case nir_intrinsic_image_samples:
+ case nir_intrinsic_image_deref_samples:
+ case nir_intrinsic_bindless_image_size:
+ case nir_intrinsic_image_size:
+ case nir_intrinsic_image_deref_size:
+ return OP_SUQ;
+ case nir_intrinsic_bindless_image_store:
+ case nir_intrinsic_image_store:
+ case nir_intrinsic_image_deref_store:
+ return OP_SUSTP;
default:
ERROR("couldn't get operation for nir_intrinsic_op %u\n", op);
assert(false);
Converter::getSubOp(nir_intrinsic_op op)
{
switch (op) {
+ case nir_intrinsic_bindless_image_atomic_add:
+ case nir_intrinsic_image_atomic_add:
+ case nir_intrinsic_image_deref_atomic_add:
+ case nir_intrinsic_shared_atomic_add:
+ case nir_intrinsic_ssbo_atomic_add:
+ return NV50_IR_SUBOP_ATOM_ADD;
+ case nir_intrinsic_bindless_image_atomic_and:
+ case nir_intrinsic_image_atomic_and:
+ case nir_intrinsic_image_deref_atomic_and:
+ case nir_intrinsic_shared_atomic_and:
+ case nir_intrinsic_ssbo_atomic_and:
+ return NV50_IR_SUBOP_ATOM_AND;
+ case nir_intrinsic_bindless_image_atomic_comp_swap:
+ case nir_intrinsic_image_atomic_comp_swap:
+ case nir_intrinsic_image_deref_atomic_comp_swap:
+ case nir_intrinsic_shared_atomic_comp_swap:
+ case nir_intrinsic_ssbo_atomic_comp_swap:
+ return NV50_IR_SUBOP_ATOM_CAS;
+ case nir_intrinsic_bindless_image_atomic_exchange:
+ case nir_intrinsic_image_atomic_exchange:
+ case nir_intrinsic_image_deref_atomic_exchange:
+ case nir_intrinsic_shared_atomic_exchange:
+ case nir_intrinsic_ssbo_atomic_exchange:
+ return NV50_IR_SUBOP_ATOM_EXCH;
+ case nir_intrinsic_bindless_image_atomic_or:
+ case nir_intrinsic_image_atomic_or:
+ case nir_intrinsic_image_deref_atomic_or:
+ case nir_intrinsic_shared_atomic_or:
+ case nir_intrinsic_ssbo_atomic_or:
+ return NV50_IR_SUBOP_ATOM_OR;
+ case nir_intrinsic_bindless_image_atomic_imax:
+ case nir_intrinsic_image_atomic_imax:
+ case nir_intrinsic_image_deref_atomic_imax:
+ case nir_intrinsic_bindless_image_atomic_umax:
+ case nir_intrinsic_image_atomic_umax:
+ case nir_intrinsic_image_deref_atomic_umax:
+ case nir_intrinsic_shared_atomic_imax:
+ case nir_intrinsic_shared_atomic_umax:
+ case nir_intrinsic_ssbo_atomic_imax:
+ case nir_intrinsic_ssbo_atomic_umax:
+ return NV50_IR_SUBOP_ATOM_MAX;
+ case nir_intrinsic_bindless_image_atomic_imin:
+ case nir_intrinsic_image_atomic_imin:
+ case nir_intrinsic_image_deref_atomic_imin:
+ case nir_intrinsic_bindless_image_atomic_umin:
+ case nir_intrinsic_image_atomic_umin:
+ case nir_intrinsic_image_deref_atomic_umin:
+ case nir_intrinsic_shared_atomic_imin:
+ case nir_intrinsic_shared_atomic_umin:
+ case nir_intrinsic_ssbo_atomic_imin:
+ case nir_intrinsic_ssbo_atomic_umin:
+ return NV50_IR_SUBOP_ATOM_MIN;
+ case nir_intrinsic_bindless_image_atomic_xor:
+ case nir_intrinsic_image_atomic_xor:
+ case nir_intrinsic_image_deref_atomic_xor:
+ case nir_intrinsic_shared_atomic_xor:
+ case nir_intrinsic_ssbo_atomic_xor:
+ return NV50_IR_SUBOP_ATOM_XOR;
+
+ case nir_intrinsic_group_memory_barrier:
+ case nir_intrinsic_memory_barrier:
+ case nir_intrinsic_memory_barrier_atomic_counter:
+ case nir_intrinsic_memory_barrier_buffer:
+ case nir_intrinsic_memory_barrier_image:
+ return NV50_IR_SUBOP_MEMBAR(M, GL);
+ case nir_intrinsic_memory_barrier_shared:
+ return NV50_IR_SUBOP_MEMBAR(M, CTA);
+
case nir_intrinsic_vote_all:
return NV50_IR_SUBOP_VOTE_ALL;
case nir_intrinsic_vote_any:
Value*
Converter::getSrc(nir_ssa_def *src, uint8_t idx)
{
+ ImmediateMap::iterator iit = immediates.find(src->index);
+ if (iit != immediates.end())
+ return convert((*iit).second, idx);
+
NirDefMap::iterator it = ssaDefs.find(src->index);
if (it == ssaDefs.end()) {
ERROR("SSA value %u not found\n", src->index);
if (offset) {
indirect = NULL;
- return offset->u32[0];
+ return offset[0].u32;
}
indirect = getSrc(src, idx, true);
}
uint32_t
-Converter::getIndirect(nir_intrinsic_instr *insn, uint8_t s, uint8_t c, Value *&indirect)
+Converter::getIndirect(nir_intrinsic_instr *insn, uint8_t s, uint8_t c, Value *&indirect, bool isScalar)
{
int32_t idx = nir_intrinsic_base(insn) + getIndirect(&insn->src[s], c, indirect);
- if (indirect)
+ if (indirect && !isScalar)
indirect = mkOp2v(OP_SHL, TYPE_U32, getSSA(4, FILE_ADDRESS), indirect, loadImm(NULL, 4));
return idx;
}
info->io.viewportId = -1;
info->numInputs = 0;
+ info->numOutputs = 0;
// we have to fixup the uniform locations for arrays
unsigned numImages = 0;
numImages += type->is_array() ? type->arrays_of_arrays_size() : 1;
}
+ info->numSysVals = 0;
+ for (uint8_t i = 0; i < SYSTEM_VALUE_MAX; ++i) {
+ if (!(nir->info.system_values_read & 1ull << i))
+ continue;
+
+ system_val_to_tgsi_semantic(i, &name, &index);
+ info->sv[info->numSysVals].sn = name;
+ info->sv[info->numSysVals].si = index;
+ info->sv[info->numSysVals].input = 0; // TODO inferSysValDirection(sn);
+
+ switch (i) {
+ case SYSTEM_VALUE_INSTANCE_ID:
+ info->io.instanceId = info->numSysVals;
+ break;
+ case SYSTEM_VALUE_TESS_LEVEL_INNER:
+ case SYSTEM_VALUE_TESS_LEVEL_OUTER:
+ info->sv[info->numSysVals].patch = 1;
+ break;
+ case SYSTEM_VALUE_VERTEX_ID:
+ info->io.vertexId = info->numSysVals;
+ break;
+ default:
+ break;
+ }
+
+ info->numSysVals += 1;
+ }
+
+ if (prog->getType() == Program::TYPE_COMPUTE)
+ return true;
+
nir_foreach_variable(var, &nir->inputs) {
const glsl_type *type = var->type;
int slot = var->data.location;
info->numInputs = std::max<uint8_t>(info->numInputs, vary);
}
- info->numOutputs = 0;
nir_foreach_variable(var, &nir->outputs) {
const glsl_type *type = var->type;
int slot = var->data.location;
else
info->out[vary].mask |= ((1 << comp) - 1) << frac;
- if (nir->info.outputs_read & 1ll << slot)
+ if (nir->info.outputs_read & 1ull << slot)
info->out[vary].oread = 1;
}
info->numOutputs = std::max<uint8_t>(info->numOutputs, vary);
}
- info->numSysVals = 0;
- for (uint8_t i = 0; i < 64; ++i) {
- if (!(nir->info.system_values_read & 1ll << i))
- continue;
-
- system_val_to_tgsi_semantic(i, &name, &index);
- info->sv[info->numSysVals].sn = name;
- info->sv[info->numSysVals].si = index;
- info->sv[info->numSysVals].input = 0; // TODO inferSysValDirection(sn);
-
- switch (i) {
- case SYSTEM_VALUE_INSTANCE_ID:
- info->io.instanceId = info->numSysVals;
- break;
- case SYSTEM_VALUE_TESS_LEVEL_INNER:
- case SYSTEM_VALUE_TESS_LEVEL_OUTER:
- info->sv[info->numSysVals].patch = 1;
- break;
- case SYSTEM_VALUE_VERTEX_ID:
- info->io.vertexId = info->numSysVals;
- break;
- default:
- break;
- }
-
- info->numSysVals += 1;
- }
-
if (info->io.genUserClip > 0) {
info->io.clipDistances = info->io.genUserClip;
bool
Converter::visit(nir_function *function)
{
- // we only support emiting the main function for now
- assert(!strcmp(function->name, "main"));
assert(function->impl);
// usually the blocks will set everything up, but main is special
bb->cfg.attach(&exit->cfg, Graph::Edge::TREE);
setPosition(exit, true);
- if (info->io.genUserClip > 0)
+ if ((prog->getType() == Program::TYPE_VERTEX ||
+ prog->getType() == Program::TYPE_TESSELLATION_EVAL)
+ && info->io.genUserClip > 0)
handleUserClipPlanes();
// TODO: for non main function this needs to be a OP_RETURN
bool
Converter::visit(nir_instr *insn)
{
+ // we need an insertion point for on the fly generated immediate loads
+ immInsertPos = bb->getExit();
switch (insn->type) {
case nir_instr_type_alu:
return visit(nir_instr_as_alu(insn));
+ case nir_instr_type_deref:
+ return visit(nir_instr_as_deref(insn));
case nir_instr_type_intrinsic:
return visit(nir_instr_as_intrinsic(insn));
case nir_instr_type_jump:
}
}
+ImgFormat
+Converter::convertGLImgFormat(GLuint format)
+{
+#define FMT_CASE(a, b) \
+ case GL_ ## a: return nv50_ir::FMT_ ## b
+
+ switch (format) {
+ FMT_CASE(NONE, NONE);
+
+ FMT_CASE(RGBA32F, RGBA32F);
+ FMT_CASE(RGBA16F, RGBA16F);
+ FMT_CASE(RG32F, RG32F);
+ FMT_CASE(RG16F, RG16F);
+ FMT_CASE(R11F_G11F_B10F, R11G11B10F);
+ FMT_CASE(R32F, R32F);
+ FMT_CASE(R16F, R16F);
+
+ FMT_CASE(RGBA32UI, RGBA32UI);
+ FMT_CASE(RGBA16UI, RGBA16UI);
+ FMT_CASE(RGB10_A2UI, RGB10A2UI);
+ FMT_CASE(RGBA8UI, RGBA8UI);
+ FMT_CASE(RG32UI, RG32UI);
+ FMT_CASE(RG16UI, RG16UI);
+ FMT_CASE(RG8UI, RG8UI);
+ FMT_CASE(R32UI, R32UI);
+ FMT_CASE(R16UI, R16UI);
+ FMT_CASE(R8UI, R8UI);
+
+ FMT_CASE(RGBA32I, RGBA32I);
+ FMT_CASE(RGBA16I, RGBA16I);
+ FMT_CASE(RGBA8I, RGBA8I);
+ FMT_CASE(RG32I, RG32I);
+ FMT_CASE(RG16I, RG16I);
+ FMT_CASE(RG8I, RG8I);
+ FMT_CASE(R32I, R32I);
+ FMT_CASE(R16I, R16I);
+ FMT_CASE(R8I, R8I);
+
+ FMT_CASE(RGBA16, RGBA16);
+ FMT_CASE(RGB10_A2, RGB10A2);
+ FMT_CASE(RGBA8, RGBA8);
+ FMT_CASE(RG16, RG16);
+ FMT_CASE(RG8, RG8);
+ FMT_CASE(R16, R16);
+ FMT_CASE(R8, R8);
+
+ FMT_CASE(RGBA16_SNORM, RGBA16_SNORM);
+ FMT_CASE(RGBA8_SNORM, RGBA8_SNORM);
+ FMT_CASE(RG16_SNORM, RG16_SNORM);
+ FMT_CASE(RG8_SNORM, RG8_SNORM);
+ FMT_CASE(R16_SNORM, R16_SNORM);
+ FMT_CASE(R8_SNORM, R8_SNORM);
+
+ FMT_CASE(BGRA_INTEGER, BGRA8);
+ default:
+ ERROR("unknown format %x\n", format);
+ assert(false);
+ return nv50_ir::FMT_NONE;
+ }
+#undef FMT_CASE
+}
+
bool
Converter::visit(nir_intrinsic_instr *insn)
{
nir_intrinsic_op op = insn->intrinsic;
+ const nir_intrinsic_info &opInfo = nir_intrinsic_infos[op];
switch (op) {
case nir_intrinsic_load_uniform: {
}
break;
}
+ case Program::TYPE_GEOMETRY:
case Program::TYPE_VERTEX: {
- if (info->io.genUserClip > 0 && idx == clipVertexOutput) {
+ if (info->io.genUserClip > 0 && idx == (uint32_t)clipVertexOutput) {
mkMov(clipVtx[i], src);
src = clipVtx[i];
}
}
break;
}
+ case nir_intrinsic_load_kernel_input: {
+ assert(prog->getType() == Program::TYPE_COMPUTE);
+ assert(insn->num_components == 1);
+
+ LValues &newDefs = convert(&insn->dest);
+ const DataType dType = getDType(insn);
+ Value *indirect;
+ uint32_t idx = getIndirect(insn, 0, 0, indirect, true);
+
+ mkLoad(dType, newDefs[0], mkSymbol(FILE_SHADER_INPUT, 0, dType, idx), indirect);
+ break;
+ }
case nir_intrinsic_load_barycentric_at_offset:
case nir_intrinsic_load_barycentric_at_sample:
case nir_intrinsic_load_barycentric_centroid:
}
break;
}
+ case nir_intrinsic_load_per_vertex_output: {
+ const DataType dType = getDType(insn);
+ LValues &newDefs = convert(&insn->dest);
+ Value *indirectVertex;
+ Value *indirectOffset;
+ uint32_t baseVertex = getIndirect(&insn->src[0], 0, indirectVertex);
+ uint32_t idx = getIndirect(insn, 1, 0, indirectOffset);
+ Value *vtxBase = NULL;
+
+ if (indirectVertex)
+ vtxBase = indirectVertex;
+ else
+ vtxBase = loadImm(NULL, baseVertex);
+
+ vtxBase = mkOp2v(OP_ADD, TYPE_U32, getSSA(4, FILE_ADDRESS), outBase, vtxBase);
+
+ for (uint8_t i = 0u; i < insn->num_components; ++i) {
+ uint32_t address = getSlotAddress(insn, idx, i);
+ loadFrom(FILE_SHADER_OUTPUT, 0, dType, newDefs[i], address, 0,
+ indirectOffset, vtxBase, info->in[idx].patch);
+ }
+ break;
+ }
case nir_intrinsic_emit_vertex:
+ if (info->io.genUserClip > 0)
+ handleUserClipPlanes();
+ // fallthrough
case nir_intrinsic_end_primitive: {
uint32_t idx = nir_intrinsic_stream_id(insn);
mkOp1(getOperation(op), TYPE_U32, NULL, mkImm(idx))->fixed = 1;
}
break;
}
+ case nir_intrinsic_get_buffer_size: {
+ LValues &newDefs = convert(&insn->dest);
+ const DataType dType = getDType(insn);
+ Value *indirectBuffer;
+ uint32_t buffer = getIndirect(&insn->src[0], 0, indirectBuffer);
+
+ Symbol *sym = mkSymbol(FILE_MEMORY_BUFFER, buffer, dType, 0);
+ mkOp1(OP_BUFQ, dType, newDefs[0], sym)->setIndirect(0, 0, indirectBuffer);
+ break;
+ }
+ case nir_intrinsic_store_ssbo: {
+ DataType sType = getSType(insn->src[0], false, false);
+ Value *indirectBuffer;
+ Value *indirectOffset;
+ uint32_t buffer = getIndirect(&insn->src[1], 0, indirectBuffer);
+ uint32_t offset = getIndirect(&insn->src[2], 0, indirectOffset);
+
+ for (uint8_t i = 0u; i < insn->num_components; ++i) {
+ if (!((1u << i) & nir_intrinsic_write_mask(insn)))
+ continue;
+ Symbol *sym = mkSymbol(FILE_MEMORY_BUFFER, buffer, sType,
+ offset + i * typeSizeof(sType));
+ mkStore(OP_STORE, sType, sym, indirectOffset, getSrc(&insn->src[0], i))
+ ->setIndirect(0, 1, indirectBuffer);
+ }
+ info->io.globalAccess |= 0x2;
+ break;
+ }
+ case nir_intrinsic_load_ssbo: {
+ const DataType dType = getDType(insn);
+ LValues &newDefs = convert(&insn->dest);
+ Value *indirectBuffer;
+ Value *indirectOffset;
+ uint32_t buffer = getIndirect(&insn->src[0], 0, indirectBuffer);
+ uint32_t offset = getIndirect(&insn->src[1], 0, indirectOffset);
+
+ for (uint8_t i = 0u; i < insn->num_components; ++i)
+ loadFrom(FILE_MEMORY_BUFFER, buffer, dType, newDefs[i], offset, i,
+ indirectOffset, indirectBuffer);
+
+ info->io.globalAccess |= 0x1;
+ break;
+ }
+ case nir_intrinsic_shared_atomic_add:
+ case nir_intrinsic_shared_atomic_and:
+ case nir_intrinsic_shared_atomic_comp_swap:
+ case nir_intrinsic_shared_atomic_exchange:
+ case nir_intrinsic_shared_atomic_or:
+ case nir_intrinsic_shared_atomic_imax:
+ case nir_intrinsic_shared_atomic_imin:
+ case nir_intrinsic_shared_atomic_umax:
+ case nir_intrinsic_shared_atomic_umin:
+ case nir_intrinsic_shared_atomic_xor: {
+ const DataType dType = getDType(insn);
+ LValues &newDefs = convert(&insn->dest);
+ Value *indirectOffset;
+ uint32_t offset = getIndirect(&insn->src[0], 0, indirectOffset);
+ Symbol *sym = mkSymbol(FILE_MEMORY_SHARED, 0, dType, offset);
+ Instruction *atom = mkOp2(OP_ATOM, dType, newDefs[0], sym, getSrc(&insn->src[1], 0));
+ if (op == nir_intrinsic_shared_atomic_comp_swap)
+ atom->setSrc(2, getSrc(&insn->src[2], 0));
+ atom->setIndirect(0, 0, indirectOffset);
+ atom->subOp = getSubOp(op);
+ break;
+ }
+ case nir_intrinsic_ssbo_atomic_add:
+ case nir_intrinsic_ssbo_atomic_and:
+ case nir_intrinsic_ssbo_atomic_comp_swap:
+ case nir_intrinsic_ssbo_atomic_exchange:
+ case nir_intrinsic_ssbo_atomic_or:
+ case nir_intrinsic_ssbo_atomic_imax:
+ case nir_intrinsic_ssbo_atomic_imin:
+ case nir_intrinsic_ssbo_atomic_umax:
+ case nir_intrinsic_ssbo_atomic_umin:
+ case nir_intrinsic_ssbo_atomic_xor: {
+ const DataType dType = getDType(insn);
+ LValues &newDefs = convert(&insn->dest);
+ Value *indirectBuffer;
+ Value *indirectOffset;
+ uint32_t buffer = getIndirect(&insn->src[0], 0, indirectBuffer);
+ uint32_t offset = getIndirect(&insn->src[1], 0, indirectOffset);
+
+ Symbol *sym = mkSymbol(FILE_MEMORY_BUFFER, buffer, dType, offset);
+ Instruction *atom = mkOp2(OP_ATOM, dType, newDefs[0], sym,
+ getSrc(&insn->src[2], 0));
+ if (op == nir_intrinsic_ssbo_atomic_comp_swap)
+ atom->setSrc(2, getSrc(&insn->src[3], 0));
+ atom->setIndirect(0, 0, indirectOffset);
+ atom->setIndirect(0, 1, indirectBuffer);
+ atom->subOp = getSubOp(op);
+
+ info->io.globalAccess |= 0x2;
+ break;
+ }
+ case nir_intrinsic_bindless_image_atomic_add:
+ case nir_intrinsic_bindless_image_atomic_and:
+ case nir_intrinsic_bindless_image_atomic_comp_swap:
+ case nir_intrinsic_bindless_image_atomic_exchange:
+ case nir_intrinsic_bindless_image_atomic_imax:
+ case nir_intrinsic_bindless_image_atomic_umax:
+ case nir_intrinsic_bindless_image_atomic_imin:
+ case nir_intrinsic_bindless_image_atomic_umin:
+ case nir_intrinsic_bindless_image_atomic_or:
+ case nir_intrinsic_bindless_image_atomic_xor:
+ case nir_intrinsic_bindless_image_load:
+ case nir_intrinsic_bindless_image_samples:
+ case nir_intrinsic_bindless_image_size:
+ case nir_intrinsic_bindless_image_store: {
+ std::vector<Value*> srcs, defs;
+ Value *indirect = getSrc(&insn->src[0], 0);
+ DataType ty;
+
+ uint32_t mask = 0;
+ TexInstruction::Target target =
+ convert(nir_intrinsic_image_dim(insn), !!nir_intrinsic_image_array(insn), false);
+ unsigned int argCount = getNIRArgCount(target);
+ uint16_t location = 0;
+
+ if (opInfo.has_dest) {
+ LValues &newDefs = convert(&insn->dest);
+ for (uint8_t i = 0u; i < newDefs.size(); ++i) {
+ defs.push_back(newDefs[i]);
+ mask |= 1 << i;
+ }
+ }
+
+ switch (op) {
+ case nir_intrinsic_bindless_image_atomic_add:
+ case nir_intrinsic_bindless_image_atomic_and:
+ case nir_intrinsic_bindless_image_atomic_comp_swap:
+ case nir_intrinsic_bindless_image_atomic_exchange:
+ case nir_intrinsic_bindless_image_atomic_imax:
+ case nir_intrinsic_bindless_image_atomic_umax:
+ case nir_intrinsic_bindless_image_atomic_imin:
+ case nir_intrinsic_bindless_image_atomic_umin:
+ case nir_intrinsic_bindless_image_atomic_or:
+ case nir_intrinsic_bindless_image_atomic_xor:
+ ty = getDType(insn);
+ mask = 0x1;
+ info->io.globalAccess |= 0x2;
+ break;
+ case nir_intrinsic_bindless_image_load:
+ ty = TYPE_U32;
+ info->io.globalAccess |= 0x1;
+ break;
+ case nir_intrinsic_bindless_image_store:
+ ty = TYPE_U32;
+ mask = 0xf;
+ info->io.globalAccess |= 0x2;
+ break;
+ case nir_intrinsic_bindless_image_samples:
+ mask = 0x8;
+ ty = TYPE_U32;
+ break;
+ case nir_intrinsic_bindless_image_size:
+ ty = TYPE_U32;
+ break;
+ default:
+ unreachable("unhandled image opcode");
+ break;
+ }
+
+ // coords
+ if (opInfo.num_srcs >= 2)
+ for (unsigned int i = 0u; i < argCount; ++i)
+ srcs.push_back(getSrc(&insn->src[1], i));
+
+ // the sampler is just another src added after coords
+ if (opInfo.num_srcs >= 3 && target.isMS())
+ srcs.push_back(getSrc(&insn->src[2], 0));
+
+ if (opInfo.num_srcs >= 4) {
+ unsigned components = opInfo.src_components[3] ? opInfo.src_components[3] : insn->num_components;
+ for (uint8_t i = 0u; i < components; ++i)
+ srcs.push_back(getSrc(&insn->src[3], i));
+ }
+
+ if (opInfo.num_srcs >= 5)
+ // 1 for aotmic swap
+ for (uint8_t i = 0u; i < opInfo.src_components[4]; ++i)
+ srcs.push_back(getSrc(&insn->src[4], i));
+
+ TexInstruction *texi = mkTex(getOperation(op), target.getEnum(), location, 0, defs, srcs);
+ texi->tex.bindless = false;
+ texi->tex.format = &nv50_ir::TexInstruction::formatTable[convertGLImgFormat(nir_intrinsic_format(insn))];
+ texi->tex.mask = mask;
+ texi->tex.bindless = true;
+ texi->cache = convert(nir_intrinsic_access(insn));
+ texi->setType(ty);
+ texi->subOp = getSubOp(op);
+
+ if (indirect)
+ texi->setIndirectR(indirect);
+
+ break;
+ }
+ case nir_intrinsic_image_deref_atomic_add:
+ case nir_intrinsic_image_deref_atomic_and:
+ case nir_intrinsic_image_deref_atomic_comp_swap:
+ case nir_intrinsic_image_deref_atomic_exchange:
+ case nir_intrinsic_image_deref_atomic_imax:
+ case nir_intrinsic_image_deref_atomic_umax:
+ case nir_intrinsic_image_deref_atomic_imin:
+ case nir_intrinsic_image_deref_atomic_umin:
+ case nir_intrinsic_image_deref_atomic_or:
+ case nir_intrinsic_image_deref_atomic_xor:
+ case nir_intrinsic_image_deref_load:
+ case nir_intrinsic_image_deref_samples:
+ case nir_intrinsic_image_deref_size:
+ case nir_intrinsic_image_deref_store: {
+ const nir_variable *tex;
+ std::vector<Value*> srcs, defs;
+ Value *indirect;
+ DataType ty;
+
+ uint32_t mask = 0;
+ nir_deref_instr *deref = nir_src_as_deref(insn->src[0]);
+ const glsl_type *type = deref->type;
+ TexInstruction::Target target =
+ convert((glsl_sampler_dim)type->sampler_dimensionality,
+ type->sampler_array, type->sampler_shadow);
+ unsigned int argCount = getNIRArgCount(target);
+ uint16_t location = handleDeref(deref, indirect, tex);
+
+ if (opInfo.has_dest) {
+ LValues &newDefs = convert(&insn->dest);
+ for (uint8_t i = 0u; i < newDefs.size(); ++i) {
+ defs.push_back(newDefs[i]);
+ mask |= 1 << i;
+ }
+ }
+
+ switch (op) {
+ case nir_intrinsic_image_deref_atomic_add:
+ case nir_intrinsic_image_deref_atomic_and:
+ case nir_intrinsic_image_deref_atomic_comp_swap:
+ case nir_intrinsic_image_deref_atomic_exchange:
+ case nir_intrinsic_image_deref_atomic_imax:
+ case nir_intrinsic_image_deref_atomic_umax:
+ case nir_intrinsic_image_deref_atomic_imin:
+ case nir_intrinsic_image_deref_atomic_umin:
+ case nir_intrinsic_image_deref_atomic_or:
+ case nir_intrinsic_image_deref_atomic_xor:
+ ty = getDType(insn);
+ mask = 0x1;
+ info->io.globalAccess |= 0x2;
+ break;
+ case nir_intrinsic_image_deref_load:
+ ty = TYPE_U32;
+ info->io.globalAccess |= 0x1;
+ break;
+ case nir_intrinsic_image_deref_store:
+ ty = TYPE_U32;
+ mask = 0xf;
+ info->io.globalAccess |= 0x2;
+ break;
+ case nir_intrinsic_image_deref_samples:
+ mask = 0x8;
+ ty = TYPE_U32;
+ break;
+ case nir_intrinsic_image_deref_size:
+ ty = TYPE_U32;
+ break;
+ default:
+ unreachable("unhandled image opcode");
+ break;
+ }
+
+ // coords
+ if (opInfo.num_srcs >= 2)
+ for (unsigned int i = 0u; i < argCount; ++i)
+ srcs.push_back(getSrc(&insn->src[1], i));
+
+ // the sampler is just another src added after coords
+ if (opInfo.num_srcs >= 3 && target.isMS())
+ srcs.push_back(getSrc(&insn->src[2], 0));
+
+ if (opInfo.num_srcs >= 4) {
+ unsigned components = opInfo.src_components[3] ? opInfo.src_components[3] : insn->num_components;
+ for (uint8_t i = 0u; i < components; ++i)
+ srcs.push_back(getSrc(&insn->src[3], i));
+ }
+
+ if (opInfo.num_srcs >= 5)
+ // 1 for aotmic swap
+ for (uint8_t i = 0u; i < opInfo.src_components[4]; ++i)
+ srcs.push_back(getSrc(&insn->src[4], i));
+
+ TexInstruction *texi = mkTex(getOperation(op), target.getEnum(), location, 0, defs, srcs);
+ texi->tex.bindless = false;
+ texi->tex.format = &nv50_ir::TexInstruction::formatTable[convertGLImgFormat(tex->data.image.format)];
+ texi->tex.mask = mask;
+ texi->cache = getCacheModeFromVar(tex);
+ texi->setType(ty);
+ texi->subOp = getSubOp(op);
+
+ if (indirect)
+ texi->setIndirectR(indirect);
+
+ break;
+ }
+ case nir_intrinsic_store_shared: {
+ DataType sType = getSType(insn->src[0], false, false);
+ Value *indirectOffset;
+ uint32_t offset = getIndirect(&insn->src[1], 0, indirectOffset);
+
+ for (uint8_t i = 0u; i < insn->num_components; ++i) {
+ if (!((1u << i) & nir_intrinsic_write_mask(insn)))
+ continue;
+ Symbol *sym = mkSymbol(FILE_MEMORY_SHARED, 0, sType, offset + i * typeSizeof(sType));
+ mkStore(OP_STORE, sType, sym, indirectOffset, getSrc(&insn->src[0], i));
+ }
+ break;
+ }
+ case nir_intrinsic_load_shared: {
+ const DataType dType = getDType(insn);
+ LValues &newDefs = convert(&insn->dest);
+ Value *indirectOffset;
+ uint32_t offset = getIndirect(&insn->src[0], 0, indirectOffset);
+
+ for (uint8_t i = 0u; i < insn->num_components; ++i)
+ loadFrom(FILE_MEMORY_SHARED, 0, dType, newDefs[i], offset, i, indirectOffset);
+
+ break;
+ }
+ case nir_intrinsic_barrier: {
+ // TODO: add flag to shader_info
+ info->numBarriers = 1;
+ Instruction *bar = mkOp2(OP_BAR, TYPE_U32, NULL, mkImm(0), mkImm(0));
+ bar->fixed = 1;
+ bar->subOp = NV50_IR_SUBOP_BAR_SYNC;
+ break;
+ }
+ case nir_intrinsic_group_memory_barrier:
+ case nir_intrinsic_memory_barrier:
+ case nir_intrinsic_memory_barrier_atomic_counter:
+ case nir_intrinsic_memory_barrier_buffer:
+ case nir_intrinsic_memory_barrier_image:
+ case nir_intrinsic_memory_barrier_shared: {
+ Instruction *bar = mkOp(OP_MEMBAR, TYPE_NONE, NULL);
+ bar->fixed = 1;
+ bar->subOp = getSubOp(op);
+ break;
+ }
+ case nir_intrinsic_shader_clock: {
+ const DataType dType = getDType(insn);
+ LValues &newDefs = convert(&insn->dest);
+
+ loadImm(newDefs[0], 0u);
+ mkOp1(OP_RDSV, dType, newDefs[1], mkSysVal(SV_CLOCK, 0))->fixed = 1;
+ break;
+ }
+ case nir_intrinsic_load_global: {
+ const DataType dType = getDType(insn);
+ LValues &newDefs = convert(&insn->dest);
+ Value *indirectOffset;
+ uint32_t offset = getIndirect(&insn->src[0], 0, indirectOffset);
+
+ for (auto i = 0u; i < insn->num_components; ++i)
+ loadFrom(FILE_MEMORY_GLOBAL, 0, dType, newDefs[i], offset, i, indirectOffset);
+
+ info->io.globalAccess |= 0x1;
+ break;
+ }
+ case nir_intrinsic_store_global: {
+ DataType sType = getSType(insn->src[0], false, false);
+
+ for (auto i = 0u; i < insn->num_components; ++i) {
+ if (!((1u << i) & nir_intrinsic_write_mask(insn)))
+ continue;
+ if (typeSizeof(sType) == 8) {
+ Value *split[2];
+ mkSplit(split, 4, getSrc(&insn->src[0], i));
+
+ Symbol *sym = mkSymbol(FILE_MEMORY_GLOBAL, 0, TYPE_U32, i * typeSizeof(sType));
+ mkStore(OP_STORE, TYPE_U32, sym, getSrc(&insn->src[1], 0), split[0]);
+
+ sym = mkSymbol(FILE_MEMORY_GLOBAL, 0, TYPE_U32, i * typeSizeof(sType) + 4);
+ mkStore(OP_STORE, TYPE_U32, sym, getSrc(&insn->src[1], 0), split[1]);
+ } else {
+ Symbol *sym = mkSymbol(FILE_MEMORY_GLOBAL, 0, sType, i * typeSizeof(sType));
+ mkStore(OP_STORE, sType, sym, getSrc(&insn->src[1], 0), getSrc(&insn->src[0], i));
+ }
+ }
+
+ info->io.globalAccess |= 0x2;
+ break;
+ }
default:
ERROR("unknown nir_intrinsic_op %s\n", nir_intrinsic_infos[op].name);
return false;
return true;
}
+Value*
+Converter::convert(nir_load_const_instr *insn, uint8_t idx)
+{
+ Value *val;
+
+ if (immInsertPos)
+ setPosition(immInsertPos, true);
+ else
+ setPosition(bb, false);
+
+ switch (insn->def.bit_size) {
+ case 64:
+ val = loadImm(getSSA(8), insn->value[idx].u64);
+ break;
+ case 32:
+ val = loadImm(getSSA(4), insn->value[idx].u32);
+ break;
+ case 16:
+ val = loadImm(getSSA(2), insn->value[idx].u16);
+ break;
+ case 8:
+ val = loadImm(getSSA(1), insn->value[idx].u8);
+ break;
+ default:
+ unreachable("unhandled bit size!\n");
+ }
+ setPosition(bb, true);
+ return val;
+}
+
bool
Converter::visit(nir_load_const_instr *insn)
{
assert(insn->def.bit_size <= 64);
-
- LValues &newDefs = convert(&insn->def);
- for (int i = 0; i < insn->def.num_components; i++) {
- switch (insn->def.bit_size) {
- case 64:
- loadImm(newDefs[i], insn->value.u64[i]);
- break;
- case 32:
- loadImm(newDefs[i], insn->value.u32[i]);
- break;
- case 16:
- loadImm(newDefs[i], insn->value.u16[i]);
- break;
- case 8:
- loadImm(newDefs[i], insn->value.u8[i]);
- break;
- }
- }
+ immediates[insn->def.index] = insn;
return true;
}
case nir_op_iabs:
case nir_op_fadd:
case nir_op_iadd:
- case nir_op_fand:
case nir_op_iand:
case nir_op_fceil:
case nir_op_fcos:
case nir_op_umul_high:
case nir_op_fneg:
case nir_op_ineg:
- case nir_op_fnot:
case nir_op_inot:
- case nir_op_for:
case nir_op_ior:
case nir_op_pack_64_2x32_split:
case nir_op_fpow:
case nir_op_ushr:
case nir_op_fsin:
case nir_op_fsqrt:
- case nir_op_fsub:
- case nir_op_isub:
case nir_op_ftrunc:
case nir_op_ishl:
- case nir_op_fxor:
case nir_op_ixor: {
DEFAULT_CHECKS;
LValues &newDefs = convert(&insn->dest);
// those are weird ALU ops and need special handling, because
// 1. they are always componend based
// 2. they basically just merge multiple values into one data type
- case nir_op_imov:
- case nir_op_fmov:
+ case nir_op_mov:
if (!insn->dest.dest.is_ssa && insn->dest.dest.reg.reg->num_array_elems) {
nir_reg_dest& reg = insn->dest.dest.reg;
uint32_t goffset = regToLmemOffset[reg.reg->index];
return mkOp2v(OP_MUL, TYPE_F32, getScratch(), src, proj);
}
+unsigned int
+Converter::getNIRArgCount(TexInstruction::Target& target)
+{
+ unsigned int result = target.getArgCount();
+ if (target.isCube() && target.isArray())
+ result--;
+ if (target.isMS())
+ result--;
+ return result;
+}
+
+uint16_t
+Converter::handleDeref(nir_deref_instr *deref, Value * &indirect, const nir_variable * &tex)
+{
+ typedef std::pair<uint32_t,Value*> DerefPair;
+ std::list<DerefPair> derefs;
+
+ uint16_t result = 0;
+ while (deref->deref_type != nir_deref_type_var) {
+ switch (deref->deref_type) {
+ case nir_deref_type_array: {
+ Value *indirect;
+ uint8_t size = type_size(deref->type, true);
+ result += size * getIndirect(&deref->arr.index, 0, indirect);
+
+ if (indirect) {
+ derefs.push_front(std::make_pair(size, indirect));
+ }
+
+ break;
+ }
+ case nir_deref_type_struct: {
+ result += nir_deref_instr_parent(deref)->type->struct_location_offset(deref->strct.index);
+ break;
+ }
+ case nir_deref_type_var:
+ default:
+ unreachable("nir_deref_type_var reached in handleDeref!");
+ break;
+ }
+ deref = nir_deref_instr_parent(deref);
+ }
+
+ indirect = NULL;
+ for (std::list<DerefPair>::const_iterator it = derefs.begin(); it != derefs.end(); ++it) {
+ Value *offset = mkOp2v(OP_MUL, TYPE_U32, getSSA(), loadImm(getSSA(), it->first), it->second);
+ if (indirect)
+ indirect = mkOp2v(OP_ADD, TYPE_U32, getSSA(), indirect, offset);
+ else
+ indirect = offset;
+ }
+
+ tex = nir_deref_instr_get_variable(deref);
+ assert(tex);
+
+ return result + tex->data.driver_location;
+}
+
+CacheMode
+Converter::convert(enum gl_access_qualifier access)
+{
+ switch (access) {
+ case ACCESS_VOLATILE:
+ return CACHE_CV;
+ case ACCESS_COHERENT:
+ return CACHE_CG;
+ default:
+ return CACHE_CA;
+ }
+}
+
+CacheMode
+Converter::getCacheModeFromVar(const nir_variable *var)
+{
+ return convert(var->data.access);
+}
+
bool
Converter::visit(nir_tex_instr *insn)
{
int projIdx = nir_tex_instr_src_index(insn, nir_tex_src_projector);
int sampOffIdx = nir_tex_instr_src_index(insn, nir_tex_src_sampler_offset);
int texOffIdx = nir_tex_instr_src_index(insn, nir_tex_src_texture_offset);
+ int sampHandleIdx = nir_tex_instr_src_index(insn, nir_tex_src_sampler_handle);
+ int texHandleIdx = nir_tex_instr_src_index(insn, nir_tex_src_texture_handle);
+
+ bool bindless = sampHandleIdx != -1 || texHandleIdx != -1;
+ assert((sampHandleIdx != -1) == (texHandleIdx != -1));
if (projIdx != -1)
proj = mkOp1v(OP_RCP, TYPE_F32, getScratch(), getSrc(&insn->src[projIdx].src, 0));
srcs.push_back(getSrc(&insn->src[sampOffIdx].src, 0));
sampOffIdx = srcs.size() - 1;
}
+ if (bindless) {
+ // currently we use the lower bits
+ Value *split[2];
+ Value *handle = getSrc(&insn->src[sampHandleIdx].src, 0);
+
+ mkSplit(split, 4, handle);
+
+ srcs.push_back(split[0]);
+ texOffIdx = srcs.size() - 1;
+ }
- r = insn->texture_index;
- s = insn->sampler_index;
+ r = bindless ? 0xff : insn->texture_index;
+ s = bindless ? 0x1f : insn->sampler_index;
defs.resize(newDefs.size());
for (uint8_t d = 0u; d < newDefs.size(); ++d) {
TexInstruction *texi = mkTex(op, target.getEnum(), r, s, defs, srcs);
texi->tex.levelZero = lz;
texi->tex.mask = mask;
+ texi->tex.bindless = bindless;
if (texOffIdx != -1)
texi->tex.rIndirectSrc = texOffIdx;
}
}
+ if (op == OP_TXG && offsetIdx == -1) {
+ if (nir_tex_instr_has_explicit_tg4_offsets(insn)) {
+ texi->tex.useOffsets = 4;
+ setPosition(texi, false);
+ for (uint8_t i = 0; i < 4; ++i) {
+ for (uint8_t j = 0; j < 2; ++j) {
+ texi->offset[i][j].set(loadImm(NULL, insn->tg4_offsets[i][j]));
+ texi->offset[i][j].setInsn(texi);
+ }
+ }
+ setPosition(texi, true);
+ }
+ }
+
if (ddxIdx != -1 && ddyIdx != -1) {
for (uint8_t c = 0u; c < target.getDim() + target.isCube(); ++c) {
texi->dPdx[c].set(getSrc(&insn->src[ddxIdx].src, c));
return true;
}
+bool
+Converter::visit(nir_deref_instr *deref)
+{
+ // we just ignore those, because images intrinsics are the only place where
+ // we should end up with deref sources and those have to backtrack anyway
+ // to get the nir_variable. This code just exists to handle some special
+ // cases.
+ switch (deref->deref_type) {
+ case nir_deref_type_array:
+ case nir_deref_type_struct:
+ case nir_deref_type_var:
+ break;
+ default:
+ ERROR("unknown nir_deref_instr %u\n", deref->deref_type);
+ return false;
+ }
+ return true;
+}
+
bool
Converter::run()
{
NIR_PASS_V(nir, nir_lower_regs_to_ssa);
NIR_PASS_V(nir, nir_lower_load_const_to_scalar);
NIR_PASS_V(nir, nir_lower_vars_to_ssa);
- NIR_PASS_V(nir, nir_lower_alu_to_scalar);
+ NIR_PASS_V(nir, nir_lower_alu_to_scalar, NULL, NULL);
NIR_PASS_V(nir, nir_lower_phis_to_scalar);
do {