struct ir3_compile {
struct ir3_compiler *compiler;
- const struct tgsi_token *tokens;
struct nir_shader *s;
struct ir3 *ir;
/* mapping from nir_register to defining instruction: */
struct hash_table *def_ht;
- /* mapping from nir_variable to ir3_array: */
- struct hash_table *var_ht;
unsigned num_arrays;
/* a common pattern for indirect addressing is to request the
*/
bool unminify_coords;
- /* for looking up which system value is which */
- unsigned sysval_semantics[8];
+ /* on a4xx, for array textures we need to add 0.5 to the array
+ * index coordinate:
+ */
+ bool array_index_add_half;
+
+ /* on a4xx, bitmask of samplers which need astc+srgb workaround: */
+ unsigned astc_srgb;
+
+ unsigned max_texture_index;
/* set if we encounter something we can't handle yet, so we
* can bail cleanly and fallback to TGSI compiler f/e
ctx->flat_bypass = true;
ctx->levels_add_one = false;
ctx->unminify_coords = false;
+ ctx->array_index_add_half = true;
+
+ if (so->type == SHADER_VERTEX)
+ ctx->astc_srgb = so->key.vastc_srgb;
+ else if (so->type == SHADER_FRAGMENT)
+ ctx->astc_srgb = so->key.fastc_srgb;
+
} else {
/* no special handling for "flat" */
ctx->flat_bypass = false;
ctx->levels_add_one = true;
ctx->unminify_coords = true;
+ ctx->array_index_add_half = false;
}
ctx->compiler = compiler;
ctx->so = so;
ctx->def_ht = _mesa_hash_table_create(ctx,
_mesa_hash_pointer, _mesa_key_pointer_equal);
- ctx->var_ht = _mesa_hash_table_create(ctx,
- _mesa_hash_pointer, _mesa_key_pointer_equal);
ctx->block_ht = _mesa_hash_table_create(ctx,
_mesa_hash_pointer, _mesa_key_pointer_equal);
ralloc_free(ctx);
}
-/* global per-array information: */
-struct ir3_array {
- unsigned length, aid;
-};
-
-/* per-block array state: */
-struct ir3_array_value {
- /* TODO drop length/aid, and just have ptr back to ir3_array */
- unsigned length, aid;
- /* initial array element values are phi's, other than for the
- * entry block. The phi src's get added later in a resolve step
- * after we have visited all the blocks, to account for back
- * edges in the cfg.
- */
- struct ir3_instruction **phis;
- /* current array element values (as block is processed). When
- * the array phi's are resolved, it will contain the array state
- * at exit of block, so successor blocks can use it to add their
- * phi srcs.
- */
- struct ir3_instruction *arr[];
-};
-
-/* track array assignments per basic block. When an array is read
- * outside of the same basic block, we can use NIR's dominance-frontier
- * information to figure out where phi nodes are needed.
- */
-struct ir3_nir_block_data {
- unsigned foo;
- /* indexed by array-id (aid): */
- struct ir3_array_value *arrs[];
-};
-
-static struct ir3_nir_block_data *
-get_block_data(struct ir3_compile *ctx, struct ir3_block *block)
-{
- if (!block->data) {
- struct ir3_nir_block_data *bd = ralloc_size(ctx, sizeof(*bd) +
- ((ctx->num_arrays + 1) * sizeof(bd->arrs[0])));
- block->data = bd;
- }
- return block->data;
-}
-
static void
declare_var(struct ir3_compile *ctx, nir_variable *var)
{
unsigned length = glsl_get_length(var->type) * 4; /* always vec4, at least with ttn */
struct ir3_array *arr = ralloc(ctx, struct ir3_array);
+ arr->id = ++ctx->num_arrays;
arr->length = length;
- arr->aid = ++ctx->num_arrays;
- _mesa_hash_table_insert(ctx->var_ht, var, arr);
+ arr->var = var;
+ list_addtail(&arr->node, &ctx->ir->array_list);
}
-static nir_block *
-nir_block_pred(nir_block *block)
-{
- assert(block->predecessors->entries < 2);
- if (block->predecessors->entries == 0)
- return NULL;
- return (nir_block *)_mesa_set_next_entry(block->predecessors, NULL)->key;
-}
-
-static struct ir3_array_value *
+static struct ir3_array *
get_var(struct ir3_compile *ctx, nir_variable *var)
{
- struct hash_entry *entry = _mesa_hash_table_search(ctx->var_ht, var);
- struct ir3_block *block = ctx->block;
- struct ir3_nir_block_data *bd = get_block_data(ctx, block);
- struct ir3_array *arr = entry->data;
-
- if (!bd->arrs[arr->aid]) {
- struct ir3_array_value *av = ralloc_size(bd, sizeof(*av) +
- (arr->length * sizeof(av->arr[0])));
- struct ir3_array_value *defn = NULL;
- nir_block *pred_block;
-
- av->length = arr->length;
- av->aid = arr->aid;
-
- /* For loops, we have to consider that we have not visited some
- * of the blocks who should feed into the phi (ie. back-edges in
- * the cfg).. for example:
- *
- * loop {
- * block { load_var; ... }
- * if then block {} else block {}
- * block { store_var; ... }
- * if then block {} else block {}
- * block {...}
- * }
- *
- * We can skip the phi if we can chase the block predecessors
- * until finding the block previously defining the array without
- * crossing a block that has more than one predecessor.
- *
- * Otherwise create phi's and resolve them as a post-pass after
- * all the blocks have been visited (to handle back-edges).
- */
-
- for (pred_block = block->nblock;
- pred_block && (pred_block->predecessors->entries < 2) && !defn;
- pred_block = nir_block_pred(pred_block)) {
- struct ir3_block *pblock = get_block(ctx, pred_block);
- struct ir3_nir_block_data *pbd = pblock->data;
- if (!pbd)
- continue;
- defn = pbd->arrs[arr->aid];
- }
-
- if (defn) {
- /* only one possible definer: */
- for (unsigned i = 0; i < arr->length; i++)
- av->arr[i] = defn->arr[i];
- } else if (pred_block) {
- /* not the first block, and multiple potential definers: */
- av->phis = ralloc_size(av, arr->length * sizeof(av->phis[0]));
-
- for (unsigned i = 0; i < arr->length; i++) {
- struct ir3_instruction *phi;
-
- phi = ir3_instr_create2(block, -1, OPC_META_PHI,
- 1 + ctx->impl->num_blocks);
- ir3_reg_create(phi, 0, 0); /* dst */
-
- /* phi's should go at head of block: */
- list_delinit(&phi->node);
- list_add(&phi->node, &block->instr_list);
-
- av->phis[i] = av->arr[i] = phi;
- }
- } else {
- /* Some shaders end up reading array elements without
- * first writing.. so initialize things to prevent null
- * instr ptrs later:
- */
- for (unsigned i = 0; i < arr->length; i++)
- av->arr[i] = create_immed(block, 0);
- }
-
- bd->arrs[arr->aid] = av;
- }
-
- return bd->arrs[arr->aid];
-}
-
-static void
-add_array_phi_srcs(struct ir3_compile *ctx, nir_block *nblock,
- struct ir3_array_value *av, BITSET_WORD *visited)
-{
- struct ir3_block *block;
- struct ir3_nir_block_data *bd;
-
- if (BITSET_TEST(visited, nblock->index))
- return;
-
- BITSET_SET(visited, nblock->index);
-
- block = get_block(ctx, nblock);
- bd = block->data;
-
- if (bd && bd->arrs[av->aid]) {
- struct ir3_array_value *dav = bd->arrs[av->aid];
- for (unsigned i = 0; i < av->length; i++) {
- ir3_reg_create(av->phis[i], 0, IR3_REG_SSA)->instr =
- dav->arr[i];
- }
- } else {
- /* didn't find defn, recurse predecessors: */
- struct set_entry *entry;
- set_foreach(nblock->predecessors, entry) {
- add_array_phi_srcs(ctx, (nir_block *)entry->key, av, visited);
- }
- }
-}
-
-static void
-resolve_array_phis(struct ir3_compile *ctx, struct ir3_block *block)
-{
- struct ir3_nir_block_data *bd = block->data;
- unsigned bitset_words = BITSET_WORDS(ctx->impl->num_blocks);
-
- if (!bd)
- return;
-
- /* TODO use nir dom_frontier to help us with this? */
-
- for (unsigned i = 1; i <= ctx->num_arrays; i++) {
- struct ir3_array_value *av = bd->arrs[i];
- BITSET_WORD visited[bitset_words];
- struct set_entry *entry;
-
- if (!(av && av->phis))
- continue;
-
- memset(visited, 0, sizeof(visited));
- set_foreach(block->nblock->predecessors, entry) {
- add_array_phi_srcs(ctx, (nir_block *)entry->key, av, visited);
- }
+ list_for_each_entry (struct ir3_array, arr, &ctx->ir->array_list, node) {
+ if (arr->var == var)
+ return arr;
}
+ compile_error(ctx, "bogus var: %s\n", var->name);
+ return NULL;
}
/* allocate a n element value array (to be populated by caller) and
static struct ir3_instruction **
get_dst(struct ir3_compile *ctx, nir_dest *dst, unsigned n)
{
+ compile_assert(ctx, dst->is_ssa);
if (dst->is_ssa) {
return __get_dst(ctx, &dst->ssa, n);
} else {
get_src(struct ir3_compile *ctx, nir_src *src)
{
struct hash_entry *entry;
+ compile_assert(ctx, src->is_ssa);
if (src->is_ssa) {
entry = _mesa_hash_table_search(ctx->def_ht, src->ssa);
} else {
{
struct ir3_instruction *mov;
- mov = ir3_instr_create(block, 1, 0);
+ mov = ir3_instr_create(block, OPC_MOV);
mov->cat1.src_type = TYPE_U32;
mov->cat1.dst_type = TYPE_U32;
ir3_reg_create(mov, 0, 0);
{
struct ir3_instruction *mov;
- mov = ir3_instr_create(ctx->block, 1, 0);
+ mov = ir3_instr_create(ctx->block, OPC_MOV);
/* TODO get types right? */
mov->cat1.src_type = TYPE_F32;
mov->cat1.dst_type = TYPE_F32;
}
static struct ir3_instruction *
-create_uniform_indirect(struct ir3_compile *ctx, unsigned n,
+create_uniform_indirect(struct ir3_compile *ctx, int n,
struct ir3_instruction *address)
{
struct ir3_instruction *mov;
- mov = ir3_instr_create(ctx->block, 1, 0);
+ mov = ir3_instr_create(ctx->block, OPC_MOV);
mov->cat1.src_type = TYPE_U32;
mov->cat1.dst_type = TYPE_U32;
ir3_reg_create(mov, 0, 0);
- ir3_reg_create(mov, n, IR3_REG_CONST | IR3_REG_RELATIV);
+ ir3_reg_create(mov, 0, IR3_REG_CONST | IR3_REG_RELATIV)->array.offset = n;
ir3_instr_set_address(mov, address);
if (arrsz == 0)
return NULL;
- collect = ir3_instr_create2(block, -1, OPC_META_FI, 1 + arrsz);
+ collect = ir3_instr_create2(block, OPC_META_FI, 1 + arrsz);
ir3_reg_create(collect, 0, 0); /* dst */
for (unsigned i = 0; i < arrsz; i++)
ir3_reg_create(collect, 0, IR3_REG_SSA)->instr = arr[i];
}
static struct ir3_instruction *
-create_indirect_load(struct ir3_compile *ctx, unsigned arrsz, unsigned n,
+create_indirect_load(struct ir3_compile *ctx, unsigned arrsz, int n,
struct ir3_instruction *address, struct ir3_instruction *collect)
{
struct ir3_block *block = ctx->block;
struct ir3_instruction *mov;
struct ir3_register *src;
- mov = ir3_instr_create(block, 1, 0);
+ mov = ir3_instr_create(block, OPC_MOV);
mov->cat1.src_type = TYPE_U32;
mov->cat1.dst_type = TYPE_U32;
ir3_reg_create(mov, 0, 0);
src = ir3_reg_create(mov, 0, IR3_REG_SSA | IR3_REG_RELATIV);
src->instr = collect;
src->size = arrsz;
- src->offset = n;
+ src->array.offset = n;
ir3_instr_set_address(mov, address);
return mov;
}
+/* relative (indirect) if address!=NULL */
static struct ir3_instruction *
-create_indirect_store(struct ir3_compile *ctx, unsigned arrsz, unsigned n,
- struct ir3_instruction *src, struct ir3_instruction *address,
- struct ir3_instruction *collect)
+create_var_load(struct ir3_compile *ctx, struct ir3_array *arr, int n,
+ struct ir3_instruction *address)
+{
+ struct ir3_block *block = ctx->block;
+ struct ir3_instruction *mov;
+ struct ir3_register *src;
+
+ mov = ir3_instr_create(block, OPC_MOV);
+ mov->cat1.src_type = TYPE_U32;
+ mov->cat1.dst_type = TYPE_U32;
+ ir3_reg_create(mov, 0, 0);
+ src = ir3_reg_create(mov, 0, IR3_REG_ARRAY |
+ COND(address, IR3_REG_RELATIV));
+ src->instr = arr->last_write;
+ src->size = arr->length;
+ src->array.id = arr->id;
+ src->array.offset = n;
+
+ if (address)
+ ir3_instr_set_address(mov, address);
+
+ arr->last_access = mov;
+
+ return mov;
+}
+
+/* relative (indirect) if address!=NULL */
+static struct ir3_instruction *
+create_var_store(struct ir3_compile *ctx, struct ir3_array *arr, int n,
+ struct ir3_instruction *src, struct ir3_instruction *address)
{
struct ir3_block *block = ctx->block;
struct ir3_instruction *mov;
struct ir3_register *dst;
- mov = ir3_instr_create(block, 1, 0);
+ mov = ir3_instr_create(block, OPC_MOV);
mov->cat1.src_type = TYPE_U32;
mov->cat1.dst_type = TYPE_U32;
- dst = ir3_reg_create(mov, 0, IR3_REG_RELATIV);
- dst->size = arrsz;
- dst->offset = n;
+ dst = ir3_reg_create(mov, 0, IR3_REG_ARRAY |
+ COND(address, IR3_REG_RELATIV));
+ dst->instr = arr->last_access;
+ dst->size = arr->length;
+ dst->array.id = arr->id;
+ dst->array.offset = n;
ir3_reg_create(mov, 0, IR3_REG_SSA)->instr = src;
- mov->fanin = collect;
ir3_instr_set_address(mov, address);
+ arr->last_write = arr->last_access = mov;
+
return mov;
}
{
struct ir3_instruction *in;
- in = ir3_instr_create(block, -1, OPC_META_INPUT);
+ in = ir3_instr_create(block, OPC_META_INPUT);
in->inout.block = block;
ir3_reg_create(in, n, 0);
}
}
+/* NOTE: this creates the "TGSI" style fragface (ie. input slot
+ * VARYING_SLOT_FACE). For NIR style nir_intrinsic_load_front_face
+ * we can just use the value from hw directly (since it is boolean)
+ */
static struct ir3_instruction *
create_frag_face(struct ir3_compile *ctx, unsigned comp)
{
*/
static void
split_dest(struct ir3_block *block, struct ir3_instruction **dst,
- struct ir3_instruction *src, unsigned n)
+ struct ir3_instruction *src, unsigned base, unsigned n)
{
struct ir3_instruction *prev = NULL;
for (int i = 0, j = 0; i < n; i++) {
- struct ir3_instruction *split =
- ir3_instr_create(block, -1, OPC_META_FO);
+ struct ir3_instruction *split = ir3_instr_create(block, OPC_META_FO);
ir3_reg_create(split, 0, IR3_REG_SSA);
ir3_reg_create(split, 0, IR3_REG_SSA)->instr = src;
- split->fo.off = i;
+ split->fo.off = i + base;
if (prev) {
split->cp.left = prev;
}
prev = split;
- if (src->regs[0]->wrmask & (1 << i))
+ if (src->regs[0]->wrmask & (1 << (i + base)))
dst[j++] = split;
}
}
nir_const_value *const_offset;
/* UBO addresses are the first driver params: */
unsigned ubo = regid(ctx->so->first_driver_param + IR3_UBOS_OFF, 0);
- unsigned off = intr->const_index[0];
+ int off = 0;
/* First src is ubo index, which could either be an immed or not: */
src0 = get_src(ctx, &intr->src[0])[0];
const_offset = nir_src_as_const_value(intr->src[1]);
if (const_offset) {
- off += const_offset->u[0];
+ off += const_offset->u32[0];
} else {
/* For load_ubo_indirect, second src is indirect offset: */
src1 = get_src(ctx, &intr->src[1])[0];
{
nir_deref_var *dvar = intr->variables[0];
nir_deref_array *darr = nir_deref_as_array(dvar->deref.child);
- struct ir3_array_value *arr = get_var(ctx, dvar->var);
+ struct ir3_array *arr = get_var(ctx, dvar->var);
compile_assert(ctx, dvar->deref.child &&
(dvar->deref.child->deref_type == nir_deref_type_array));
for (int i = 0; i < intr->num_components; i++) {
unsigned n = darr->base_offset * 4 + i;
compile_assert(ctx, n < arr->length);
- dst[i] = arr->arr[n];
+ dst[i] = create_var_load(ctx, arr, n, NULL);
}
break;
case nir_deref_array_type_indirect: {
/* for indirect, we need to collect all the array elements: */
- struct ir3_instruction *collect =
- create_collect(ctx->block, arr->arr, arr->length);
struct ir3_instruction *addr =
get_addr(ctx, get_src(ctx, &darr->indirect)[0]);
for (int i = 0; i < intr->num_components; i++) {
unsigned n = darr->base_offset * 4 + i;
compile_assert(ctx, n < arr->length);
- dst[i] = create_indirect_load(ctx, arr->length, n, addr, collect);
+ dst[i] = create_var_load(ctx, arr, n, addr);
}
break;
}
{
nir_deref_var *dvar = intr->variables[0];
nir_deref_array *darr = nir_deref_as_array(dvar->deref.child);
- struct ir3_array_value *arr = get_var(ctx, dvar->var);
- struct ir3_instruction **src;
+ struct ir3_array *arr = get_var(ctx, dvar->var);
+ struct ir3_instruction *addr, **src;
+ unsigned wrmask = nir_intrinsic_write_mask(intr);
compile_assert(ctx, dvar->deref.child &&
(dvar->deref.child->deref_type == nir_deref_type_array));
switch (darr->deref_array_type) {
case nir_deref_array_type_direct:
- /* direct access does not require anything special: */
- for (int i = 0; i < intr->num_components; i++) {
- /* ttn doesn't generate partial writemasks */
- assert(intr->const_index[0] ==
- (1 << intr->num_components) - 1);
-
- unsigned n = darr->base_offset * 4 + i;
- compile_assert(ctx, n < arr->length);
- arr->arr[n] = src[i];
- }
+ addr = NULL;
break;
- case nir_deref_array_type_indirect: {
- /* for indirect, create indirect-store and fan that out: */
- struct ir3_instruction *collect =
- create_collect(ctx->block, arr->arr, arr->length);
- struct ir3_instruction *addr =
- get_addr(ctx, get_src(ctx, &darr->indirect)[0]);
- for (int i = 0; i < intr->num_components; i++) {
- /* ttn doesn't generate partial writemasks */
- assert(intr->const_index[0] ==
- (1 << intr->num_components) - 1);
-
- struct ir3_instruction *store;
- unsigned n = darr->base_offset * 4 + i;
- compile_assert(ctx, n < arr->length);
-
- store = create_indirect_store(ctx, arr->length,
- n, src[i], addr, collect);
-
- store->fanin->fi.aid = arr->aid;
-
- /* TODO: probably split this out to be used for
- * store_output_indirect? or move this into
- * create_indirect_store()?
- */
- for (int j = i; j < arr->length; j += intr->num_components) {
- struct ir3_instruction *split;
-
- split = ir3_instr_create(ctx->block, -1, OPC_META_FO);
- split->fo.off = j;
- ir3_reg_create(split, 0, 0);
- ir3_reg_create(split, 0, IR3_REG_SSA)->instr = store;
-
- arr->arr[j] = split;
- }
- }
- /* fixup fanout/split neighbors: */
- for (int i = 0; i < arr->length; i++) {
- arr->arr[i]->cp.right = (i < (arr->length - 1)) ?
- arr->arr[i+1] : NULL;
- arr->arr[i]->cp.left = (i > 0) ?
- arr->arr[i-1] : NULL;
- }
+ case nir_deref_array_type_indirect:
+ addr = get_addr(ctx, get_src(ctx, &darr->indirect)[0]);
break;
- }
default:
compile_error(ctx, "Unhandled store deref type: %u\n",
darr->deref_array_type);
- break;
+ return;
+ }
+
+ for (int i = 0; i < intr->num_components; i++) {
+ if (!(wrmask & (1 << i)))
+ continue;
+ unsigned n = darr->base_offset * 4 + i;
+ compile_assert(ctx, n < arr->length);
+ create_var_store(ctx, arr, n, src[i], addr);
}
}
const nir_intrinsic_info *info = &nir_intrinsic_infos[intr->intrinsic];
struct ir3_instruction **dst, **src;
struct ir3_block *b = ctx->block;
- unsigned idx = intr->const_index[0];
nir_const_value *const_offset;
+ int idx;
if (info->has_dest) {
dst = get_dst(ctx, &intr->dest, intr->num_components);
switch (intr->intrinsic) {
case nir_intrinsic_load_uniform:
+ idx = nir_intrinsic_base(intr);
const_offset = nir_src_as_const_value(intr->src[0]);
if (const_offset) {
- idx += const_offset->u[0];
+ idx += const_offset->u32[0];
for (int i = 0; i < intr->num_components; i++) {
unsigned n = idx * 4 + i;
dst[i] = create_uniform(ctx, n);
} else {
src = get_src(ctx, &intr->src[0]);
for (int i = 0; i < intr->num_components; i++) {
- unsigned n = idx * 4 + i;
+ int n = idx * 4 + i;
dst[i] = create_uniform_indirect(ctx, n,
get_addr(ctx, src[0]));
}
emit_intrinsic_load_ubo(ctx, intr, dst);
break;
case nir_intrinsic_load_input:
+ idx = nir_intrinsic_base(intr);
const_offset = nir_src_as_const_value(intr->src[0]);
if (const_offset) {
- idx += const_offset->u[0];
+ idx += const_offset->u32[0];
for (int i = 0; i < intr->num_components; i++) {
unsigned n = idx * 4 + i;
dst[i] = ctx->ir->inputs[n];
emit_intrinsic_store_var(ctx, intr);
break;
case nir_intrinsic_store_output:
+ idx = nir_intrinsic_base(intr);
const_offset = nir_src_as_const_value(intr->src[1]);
compile_assert(ctx, const_offset != NULL);
- idx += const_offset->u[0];
+ idx += const_offset->u32[0];
src = get_src(ctx, &intr->src[0]);
for (int i = 0; i < intr->num_components; i++) {
break;
case nir_intrinsic_load_vertex_id_zero_base:
if (!ctx->vertex_id) {
- ctx->vertex_id = create_input(ctx->block, 0);
+ ctx->vertex_id = create_input(b, 0);
add_sysval_input(ctx, SYSTEM_VALUE_VERTEX_ID_ZERO_BASE,
ctx->vertex_id);
}
break;
case nir_intrinsic_load_instance_id:
if (!ctx->instance_id) {
- ctx->instance_id = create_input(ctx->block, 0);
+ ctx->instance_id = create_input(b, 0);
add_sysval_input(ctx, SYSTEM_VALUE_INSTANCE_ID,
ctx->instance_id);
}
dst[0] = ctx->instance_id;
break;
case nir_intrinsic_load_user_clip_plane:
+ idx = nir_intrinsic_ucp_id(intr);
for (int i = 0; i < intr->num_components; i++) {
unsigned n = idx * 4 + i;
dst[i] = create_driver_param(ctx, IR3_DP_UCP0_X + n);
}
break;
+ case nir_intrinsic_load_front_face:
+ if (!ctx->frag_face) {
+ ctx->so->frag_face = true;
+ ctx->frag_face = create_input(b, 0);
+ ctx->frag_face->regs[0]->flags |= IR3_REG_HALF;
+ }
+ /* for fragface, we always get -1 or 0, but that is inverse
+ * of what nir expects (where ~0 is true). Unfortunately
+ * trying to widen from half to full in add.s seems to do a
+ * non-sign-extending widen (resulting in something that
+ * gets interpreted as float Inf??)
+ */
+ dst[0] = ir3_COV(b, ctx->frag_face, TYPE_S16, TYPE_S32);
+ dst[0] = ir3_ADD_S(b, dst[0], 0, create_immed(b, 1), 0);
+ break;
case nir_intrinsic_discard_if:
case nir_intrinsic_discard: {
struct ir3_instruction *cond, *kill;
struct ir3_instruction **dst = get_dst_ssa(ctx, &instr->def,
instr->def.num_components);
for (int i = 0; i < instr->def.num_components; i++)
- dst[i] = create_immed(ctx->block, instr->value.u[i]);
+ dst[i] = create_immed(ctx->block, instr->value.u32[i]);
}
static void
ddy = get_src(ctx, &tex->src[i].src);
break;
default:
- compile_error(ctx, "Unhandled NIR tex serc type: %d\n",
+ compile_error(ctx, "Unhandled NIR tex src type: %d\n",
tex->src[i].src_type);
return;
}
}
/* the array coord for cube arrays needs 0.5 added to it */
- if (tex->sampler_dim == GLSL_SAMPLER_DIM_CUBE && tex->is_array &&
- opc != OPC_ISAML)
- coord[3] = ir3_ADD_F(b, coord[3], 0, create_immed(b, fui(0.5)), 0);
+ if (ctx->array_index_add_half && tex->is_array && (opc != OPC_ISAML))
+ coord[coords] = ir3_ADD_F(b, coord[coords], 0, create_immed(b, fui(0.5)), 0);
/*
* lay out the first argument in the proper order:
if (opc == OPC_GETLOD)
type = TYPE_U32;
- sam = ir3_SAM(b, opc, type, TGSI_WRITEMASK_XYZW,
- flags, tex->sampler_index, tex->sampler_index,
- create_collect(b, src0, nsrc0),
- create_collect(b, src1, nsrc1));
+ unsigned tex_idx = tex->texture_index;
+
+ ctx->max_texture_index = MAX2(ctx->max_texture_index, tex_idx);
+
+ struct ir3_instruction *col0 = create_collect(b, src0, nsrc0);
+ struct ir3_instruction *col1 = create_collect(b, src1, nsrc1);
- split_dest(b, dst, sam, 4);
+ sam = ir3_SAM(b, opc, type, TGSI_WRITEMASK_XYZW, flags,
+ tex_idx, tex_idx, col0, col1);
+
+ if ((ctx->astc_srgb & (1 << tex_idx)) && !nir_tex_instr_is_query(tex)) {
+ /* only need first 3 components: */
+ sam->regs[0]->wrmask = 0x7;
+ split_dest(b, dst, sam, 0, 3);
+
+ /* we need to sample the alpha separately with a non-ASTC
+ * texture state:
+ */
+ sam = ir3_SAM(b, opc, type, TGSI_WRITEMASK_W, flags,
+ tex_idx, tex_idx, col0, col1);
+
+ array_insert(ctx->ir->astc_srgb, sam);
+
+ /* fixup .w component: */
+ split_dest(b, &dst[3], sam, 3, 1);
+ } else {
+ /* normal (non-workaround) case: */
+ split_dest(b, dst, sam, 0, 4);
+ }
/* GETLOD returns results in 4.8 fixed point */
if (opc == OPC_GETLOD) {
dst = get_dst(ctx, &tex->dest, 1);
sam = ir3_SAM(b, OPC_GETINFO, TYPE_U32, TGSI_WRITEMASK_Z, 0,
- tex->sampler_index, tex->sampler_index, NULL, NULL);
+ tex->texture_index, tex->texture_index, NULL, NULL);
/* even though there is only one component, since it ends
* up in .z rather than .x, we need a split_dest()
*/
- split_dest(b, dst, sam, 3);
+ split_dest(b, dst, sam, 0, 3);
/* The # of levels comes from getinfo.z. We need to add 1 to it, since
* the value in TEX_CONST_0 is zero-based.
lod = get_src(ctx, &tex->src[0].src)[0];
sam = ir3_SAM(b, OPC_GETSIZE, TYPE_U32, TGSI_WRITEMASK_XYZW, flags,
- tex->sampler_index, tex->sampler_index, lod, NULL);
+ tex->texture_index, tex->texture_index, lod, NULL);
- split_dest(b, dst, sam, 4);
+ split_dest(b, dst, sam, 0, 4);
/* Array size actually ends up in .w rather than .z. This doesn't
* matter for miplevel 0, but for higher mips the value in z is
dst = get_dst(ctx, &nphi->dest, 1);
- phi = ir3_instr_create2(ctx->block, -1, OPC_META_PHI,
+ phi = ir3_instr_create2(ctx->block, OPC_META_PHI,
1 + exec_list_length(&nphi->srcs));
ir3_reg_create(phi, 0, 0); /* dst */
phi->phi.nphi = nphi;
nir_phi_instr *nphi;
/* phi's only come at start of block: */
- if (!(is_meta(instr) && (instr->opc == OPC_META_PHI)))
+ if (instr->opc != OPC_META_PHI)
break;
if (!instr->phi.nphi)
foreach_list_typed(nir_phi_src, nsrc, node, &nphi->srcs) {
struct ir3_instruction *src = get_src(ctx, &nsrc->src)[0];
+
+ /* NOTE: src might not be in the same block as it comes from
+ * according to the phi.. but in the end the backend assumes
+ * it will be able to assign the same register to each (which
+ * only works if it is assigned in the src block), so insert
+ * an extra mov to make sure the phi src is assigned in the
+ * block it comes from:
+ */
+ src = ir3_MOV(get_block(ctx, nsrc->pred), src, TYPE_U32);
+
ir3_reg_create(instr, 0, IR3_REG_SSA)->instr = src;
}
}
-
- resolve_array_phis(ctx, block);
}
static void
case VARYING_SLOT_CLIP_DIST0:
case VARYING_SLOT_CLIP_DIST1:
break;
+ case VARYING_SLOT_CLIP_VERTEX:
+ /* handled entirely in nir_lower_clip: */
+ return;
default:
if (slot >= VARYING_SLOT_VAR0)
break;
if (ctx->so->type == SHADER_FRAGMENT) {
// TODO maybe a helper for fi since we need it a few places..
struct ir3_instruction *instr;
- instr = ir3_instr_create(ctx->block, -1, OPC_META_FI);
+ instr = ir3_instr_create(ctx->block, OPC_META_FI);
ir3_reg_create(instr, 0, 0);
ir3_reg_create(instr, 0, IR3_REG_SSA); /* r0.x */
ir3_reg_create(instr, 0, IR3_REG_SSA); /* r0.y */
setup_output(ctx, var);
}
- /* Setup variables (which should only be arrays): */
+ /* Setup global variables (which should only be arrays): */
nir_foreach_variable(var, &ctx->s->globals) {
declare_var(ctx, var);
}
+ /* Setup local variables (which should only be arrays): */
+ /* NOTE: need to do something more clever when we support >1 fxn */
+ nir_foreach_variable(var, &fxn->locals) {
+ declare_var(ctx, var);
+ }
+
/* And emit the body: */
ctx->impl = fxn;
emit_function(ctx, fxn);
ir->inputs = inputs;
}
+/* Fixup tex sampler state for astc/srgb workaround instructions. We
+ * need to assign the tex state indexes for these after we know the
+ * max tex index.
+ */
+static void
+fixup_astc_srgb(struct ir3_compile *ctx)
+{
+ struct ir3_shader_variant *so = ctx->so;
+ /* indexed by original tex idx, value is newly assigned alpha sampler
+ * state tex idx. Zero is invalid since there is at least one sampler
+ * if we get here.
+ */
+ unsigned alt_tex_state[16] = {0};
+ unsigned tex_idx = ctx->max_texture_index + 1;
+ unsigned idx = 0;
+
+ so->astc_srgb.base = tex_idx;
+
+ for (unsigned i = 0; i < ctx->ir->astc_srgb_count; i++) {
+ struct ir3_instruction *sam = ctx->ir->astc_srgb[i];
+
+ compile_assert(ctx, sam->cat5.tex < ARRAY_SIZE(alt_tex_state));
+
+ if (alt_tex_state[sam->cat5.tex] == 0) {
+ /* assign new alternate/alpha tex state slot: */
+ alt_tex_state[sam->cat5.tex] = tex_idx++;
+ so->astc_srgb.orig_idx[idx++] = sam->cat5.tex;
+ so->astc_srgb.count++;
+ }
+
+ sam->cat5.tex = alt_tex_state[sam->cat5.tex];
+ }
+}
+
int
ir3_compile_shader_nir(struct ir3_compiler *compiler,
struct ir3_shader_variant *so)
* in which case we need to propagate the half-reg flag
* up to the definer so that RA sees it:
*/
- if (is_meta(out) && (out->opc == OPC_META_FO)) {
+ if (out->opc == OPC_META_FO) {
out = out->regs[1]->instr;
out->regs[0]->flags |= IR3_REG_HALF;
}
- if (out->category == 1) {
+ if (out->opc == OPC_MOV) {
out->cat1.dst_type = half_type(out->cat1.dst_type);
}
}
so->inputs[i].compmask = compmask;
}
+ if (ctx->astc_srgb)
+ fixup_astc_srgb(ctx);
+
/* We need to do legalize after (for frag shader's) the "bary.f"
* offsets (inloc) have been assigned.
*/