#include "util/u_math.h"
#include "ir3_compiler.h"
+#include "ir3_image.h"
#include "ir3_shader.h"
#include "ir3_nir.h"
#include "instr-a3xx.h"
#include "ir3.h"
+#include "ir3_context.h"
-/* for conditionally setting boolean flag(s): */
-#define COND(bool, val) ((bool) ? (val) : 0)
-
-#define DBG(fmt, ...) \
- do { debug_printf("%s:%d: "fmt "\n", \
- __FUNCTION__, __LINE__, ##__VA_ARGS__); } while (0)
-
-struct ir3_context {
- struct ir3_compiler *compiler;
-
- struct nir_shader *s;
-
- struct nir_instr *cur_instr; /* current instruction, just for debug */
-
- struct ir3 *ir;
- struct ir3_shader_variant *so;
-
- struct ir3_block *block; /* the current block */
- struct ir3_block *in_block; /* block created for shader inputs */
-
- nir_function_impl *impl;
-
- /* For fragment shaders, varyings are not actual shader inputs,
- * instead the hw passes a varying-coord which is used with
- * bary.f.
- *
- * But NIR doesn't know that, it still declares varyings as
- * inputs. So we do all the input tracking normally and fix
- * things up after compile_instructions()
- *
- * NOTE that frag_vcoord is the hardware position (possibly it
- * is actually an index or tag or some such.. it is *not*
- * values that can be directly used for gl_FragCoord..)
- */
- struct ir3_instruction *frag_vcoord;
-
- /* for fragment shaders, for gl_FrontFacing and gl_FragCoord: */
- struct ir3_instruction *frag_face, *frag_coord;
-
- /* For vertex shaders, keep track of the system values sources */
- struct ir3_instruction *vertex_id, *basevertex, *instance_id;
-
- /* For fragment shaders: */
- struct ir3_instruction *samp_id, *samp_mask_in;
-
- /* Compute shader inputs: */
- struct ir3_instruction *local_invocation_id, *work_group_id;
-
- /* mapping from nir_register to defining instruction: */
- struct hash_table *def_ht;
-
- unsigned num_arrays;
-
- /* a common pattern for indirect addressing is to request the
- * same address register multiple times. To avoid generating
- * duplicate instruction sequences (which our backend does not
- * try to clean up, since that should be done as the NIR stage)
- * we cache the address value generated for a given src value:
- *
- * Note that we have to cache these per alignment, since same
- * src used for an array of vec1 cannot be also used for an
- * array of vec4.
- */
- struct hash_table *addr_ht[4];
-
- /* last dst array, for indirect we need to insert a var-store.
- */
- struct ir3_instruction **last_dst;
- unsigned last_dst_n;
-
- /* maps nir_block to ir3_block, mostly for the purposes of
- * figuring out the blocks successors
- */
- struct hash_table *block_ht;
-
- /* on a4xx, bitmask of samplers which need astc+srgb workaround: */
- unsigned astc_srgb;
-
- unsigned samples; /* bitmask of x,y sample shifts */
-
- unsigned max_texture_index;
-
- /* set if we encounter something we can't handle yet, so we
- * can bail cleanly and fallback to TGSI compiler f/e
- */
- bool error;
-};
-
-/* gpu pointer size in units of 32bit registers/slots */
-static unsigned pointer_size(struct ir3_context *ctx)
-{
- return (ctx->compiler->gpu_id >= 500) ? 2 : 1;
-}
-
-static struct ir3_instruction * create_immed(struct ir3_block *block, uint32_t val);
-static struct ir3_block * get_block(struct ir3_context *ctx, const nir_block *nblock);
-
-
-static struct ir3_context *
-compile_init(struct ir3_compiler *compiler,
- struct ir3_shader_variant *so)
-{
- struct ir3_context *ctx = rzalloc(NULL, struct ir3_context);
-
- if (compiler->gpu_id >= 400) {
- if (so->type == MESA_SHADER_VERTEX) {
- ctx->astc_srgb = so->key.vastc_srgb;
- } else if (so->type == MESA_SHADER_FRAGMENT) {
- ctx->astc_srgb = so->key.fastc_srgb;
- }
-
- } else {
- if (so->type == MESA_SHADER_VERTEX) {
- ctx->samples = so->key.vsamples;
- } else if (so->type == MESA_SHADER_FRAGMENT) {
- ctx->samples = so->key.fsamples;
- }
- }
-
- ctx->compiler = compiler;
- ctx->so = so;
- ctx->def_ht = _mesa_hash_table_create(ctx,
- _mesa_hash_pointer, _mesa_key_pointer_equal);
- ctx->block_ht = _mesa_hash_table_create(ctx,
- _mesa_hash_pointer, _mesa_key_pointer_equal);
-
- /* TODO: maybe generate some sort of bitmask of what key
- * lowers vs what shader has (ie. no need to lower
- * texture clamp lowering if no texture sample instrs)..
- * although should be done further up the stack to avoid
- * creating duplicate variants..
- */
-
- if (ir3_key_lowers_nir(&so->key)) {
- nir_shader *s = nir_shader_clone(ctx, so->shader->nir);
- ctx->s = ir3_optimize_nir(so->shader, s, &so->key);
- } else {
- /* fast-path for shader key that lowers nothing in NIR: */
- ctx->s = so->shader->nir;
- }
-
- /* this needs to be the last pass run, so do this here instead of
- * in ir3_optimize_nir():
- */
- NIR_PASS_V(ctx->s, nir_lower_locals_to_regs);
- NIR_PASS_V(ctx->s, nir_convert_from_ssa, true);
-
- if (ir3_shader_debug & IR3_DBG_DISASM) {
- printf("dump nir%dv%d: type=%d, k={cts=%u,hp=%u}",
- so->shader->id, so->id, so->type,
- so->key.color_two_side, so->key.half_precision);
- nir_print_shader(ctx->s, stdout);
- }
-
- if (shader_debug_enabled(so->type)) {
- fprintf(stderr, "NIR (final form) for %s shader:\n",
- _mesa_shader_stage_to_string(so->type));
- nir_print_shader(ctx->s, stderr);
- }
-
- ir3_nir_scan_driver_consts(ctx->s, &so->const_layout);
-
- so->num_uniforms = ctx->s->num_uniforms;
- so->num_ubos = ctx->s->info.num_ubos;
-
- /* Layout of constant registers, each section aligned to vec4. Note
- * that pointer size (ubo, etc) changes depending on generation.
- *
- * user consts
- * UBO addresses
- * SSBO sizes
- * if (vertex shader) {
- * driver params (IR3_DP_*)
- * if (stream_output.num_outputs > 0)
- * stream-out addresses
- * }
- * immediates
- *
- * Immediates go last mostly because they are inserted in the CP pass
- * after the nir -> ir3 frontend.
- */
- unsigned constoff = align(ctx->s->num_uniforms, 4);
- unsigned ptrsz = pointer_size(ctx);
-
- memset(&so->constbase, ~0, sizeof(so->constbase));
-
- if (so->num_ubos > 0) {
- so->constbase.ubo = constoff;
- constoff += align(ctx->s->info.num_ubos * ptrsz, 4) / 4;
- }
-
- if (so->const_layout.ssbo_size.count > 0) {
- unsigned cnt = so->const_layout.ssbo_size.count;
- so->constbase.ssbo_sizes = constoff;
- constoff += align(cnt, 4) / 4;
- }
-
- if (so->const_layout.image_dims.count > 0) {
- unsigned cnt = so->const_layout.image_dims.count;
- so->constbase.image_dims = constoff;
- constoff += align(cnt, 4) / 4;
- }
-
- unsigned num_driver_params = 0;
- if (so->type == MESA_SHADER_VERTEX) {
- num_driver_params = IR3_DP_VS_COUNT;
- } else if (so->type == MESA_SHADER_COMPUTE) {
- num_driver_params = IR3_DP_CS_COUNT;
- }
-
- so->constbase.driver_param = constoff;
- constoff += align(num_driver_params, 4) / 4;
-
- if ((so->type == MESA_SHADER_VERTEX) &&
- (compiler->gpu_id < 500) &&
- so->shader->stream_output.num_outputs > 0) {
- so->constbase.tfbo = constoff;
- constoff += align(IR3_MAX_SO_BUFFERS * ptrsz, 4) / 4;
- }
-
- so->constbase.immediate = constoff;
-
- return ctx;
-}
-
-static void
-compile_error(struct ir3_context *ctx, const char *format, ...)
-{
- struct hash_table *errors = NULL;
- va_list ap;
- va_start(ap, format);
- if (ctx->cur_instr) {
- errors = _mesa_hash_table_create(NULL,
- _mesa_hash_pointer,
- _mesa_key_pointer_equal);
- char *msg = ralloc_vasprintf(errors, format, ap);
- _mesa_hash_table_insert(errors, ctx->cur_instr, msg);
- } else {
- _debug_vprintf(format, ap);
- }
- va_end(ap);
- nir_print_shader_annotated(ctx->s, stdout, errors);
- ralloc_free(errors);
- ctx->error = true;
- debug_assert(0);
-}
-
-#define compile_assert(ctx, cond) do { \
- if (!(cond)) compile_error((ctx), "failed assert: "#cond"\n"); \
- } while (0)
-
-static void
-compile_free(struct ir3_context *ctx)
-{
- ralloc_free(ctx);
-}
-
-static void
-declare_array(struct ir3_context *ctx, nir_register *reg)
-{
- struct ir3_array *arr = rzalloc(ctx, struct ir3_array);
- arr->id = ++ctx->num_arrays;
- /* NOTE: sometimes we get non array regs, for example for arrays of
- * length 1. See fs-const-array-of-struct-of-array.shader_test. So
- * treat a non-array as if it was an array of length 1.
- *
- * It would be nice if there was a nir pass to convert arrays of
- * length 1 to ssa.
- */
- arr->length = reg->num_components * MAX2(1, reg->num_array_elems);
- compile_assert(ctx, arr->length > 0);
- arr->r = reg;
- list_addtail(&arr->node, &ctx->ir->array_list);
-}
-
-static struct ir3_array *
-get_array(struct ir3_context *ctx, nir_register *reg)
-{
- list_for_each_entry (struct ir3_array, arr, &ctx->ir->array_list, node) {
- if (arr->r == reg)
- return arr;
- }
- compile_error(ctx, "bogus reg: %s\n", reg->name);
- return NULL;
-}
-
-/* relative (indirect) if address!=NULL */
-static struct ir3_instruction *
-create_array_load(struct ir3_context *ctx, struct ir3_array *arr, int n,
- struct ir3_instruction *address)
-{
- struct ir3_block *block = ctx->block;
- struct ir3_instruction *mov;
- struct ir3_register *src;
-
- mov = ir3_instr_create(block, OPC_MOV);
- mov->cat1.src_type = TYPE_U32;
- mov->cat1.dst_type = TYPE_U32;
- mov->barrier_class = IR3_BARRIER_ARRAY_R;
- mov->barrier_conflict = IR3_BARRIER_ARRAY_W;
- ir3_reg_create(mov, 0, 0);
- src = ir3_reg_create(mov, 0, IR3_REG_ARRAY |
- COND(address, IR3_REG_RELATIV));
- src->instr = arr->last_write;
- src->size = arr->length;
- src->array.id = arr->id;
- src->array.offset = n;
-
- if (address)
- ir3_instr_set_address(mov, address);
-
- return mov;
-}
-
-/* relative (indirect) if address!=NULL */
-static void
-create_array_store(struct ir3_context *ctx, struct ir3_array *arr, int n,
- struct ir3_instruction *src, struct ir3_instruction *address)
-{
- struct ir3_block *block = ctx->block;
- struct ir3_instruction *mov;
- struct ir3_register *dst;
-
- /* if not relative store, don't create an extra mov, since that
- * ends up being difficult for cp to remove.
- */
- if (!address) {
- dst = src->regs[0];
-
- src->barrier_class |= IR3_BARRIER_ARRAY_W;
- src->barrier_conflict |= IR3_BARRIER_ARRAY_R | IR3_BARRIER_ARRAY_W;
-
- dst->flags |= IR3_REG_ARRAY;
- dst->instr = arr->last_write;
- dst->size = arr->length;
- dst->array.id = arr->id;
- dst->array.offset = n;
-
- arr->last_write = src;
-
- array_insert(block, block->keeps, src);
-
- return;
- }
-
- mov = ir3_instr_create(block, OPC_MOV);
- mov->cat1.src_type = TYPE_U32;
- mov->cat1.dst_type = TYPE_U32;
- mov->barrier_class = IR3_BARRIER_ARRAY_W;
- mov->barrier_conflict = IR3_BARRIER_ARRAY_R | IR3_BARRIER_ARRAY_W;
- dst = ir3_reg_create(mov, 0, IR3_REG_ARRAY |
- COND(address, IR3_REG_RELATIV));
- dst->instr = arr->last_write;
- dst->size = arr->length;
- dst->array.id = arr->id;
- dst->array.offset = n;
- ir3_reg_create(mov, 0, IR3_REG_SSA)->instr = src;
-
- if (address)
- ir3_instr_set_address(mov, address);
-
- arr->last_write = mov;
-
- /* the array store may only matter to something in an earlier
- * block (ie. loops), but since arrays are not in SSA, depth
- * pass won't know this.. so keep all array stores:
- */
- array_insert(block, block->keeps, mov);
-}
-
-static inline type_t utype_for_size(unsigned bit_size)
-{
- switch (bit_size) {
- case 32: return TYPE_U32;
- case 16: return TYPE_U16;
- case 8: return TYPE_U8;
- default: unreachable("bad bitsize"); return ~0;
- }
-}
-
-static inline type_t utype_src(nir_src src)
-{ return utype_for_size(nir_src_bit_size(src)); }
-
-static inline type_t utype_dst(nir_dest dst)
-{ return utype_for_size(nir_dest_bit_size(dst)); }
-
-/* allocate a n element value array (to be populated by caller) and
- * insert in def_ht
- */
-static struct ir3_instruction **
-get_dst_ssa(struct ir3_context *ctx, nir_ssa_def *dst, unsigned n)
-{
- struct ir3_instruction **value =
- ralloc_array(ctx->def_ht, struct ir3_instruction *, n);
- _mesa_hash_table_insert(ctx->def_ht, dst, value);
- return value;
-}
-
-static struct ir3_instruction **
-get_dst(struct ir3_context *ctx, nir_dest *dst, unsigned n)
-{
- struct ir3_instruction **value;
-
- if (dst->is_ssa) {
- value = get_dst_ssa(ctx, &dst->ssa, n);
- } else {
- value = ralloc_array(ctx, struct ir3_instruction *, n);
- }
-
- /* NOTE: in non-ssa case, we don't really need to store last_dst
- * but this helps us catch cases where put_dst() call is forgotten
- */
- compile_assert(ctx, !ctx->last_dst);
- ctx->last_dst = value;
- ctx->last_dst_n = n;
-
- return value;
-}
-
-static struct ir3_instruction * get_addr(struct ir3_context *ctx, struct ir3_instruction *src, int align);
-
-static struct ir3_instruction * const *
-get_src(struct ir3_context *ctx, nir_src *src)
-{
- if (src->is_ssa) {
- struct hash_entry *entry;
- entry = _mesa_hash_table_search(ctx->def_ht, src->ssa);
- compile_assert(ctx, entry);
- return entry->data;
- } else {
- nir_register *reg = src->reg.reg;
- struct ir3_array *arr = get_array(ctx, reg);
- unsigned num_components = arr->r->num_components;
- struct ir3_instruction *addr = NULL;
- struct ir3_instruction **value =
- ralloc_array(ctx, struct ir3_instruction *, num_components);
-
- if (src->reg.indirect)
- addr = get_addr(ctx, get_src(ctx, src->reg.indirect)[0],
- reg->num_components);
-
- for (unsigned i = 0; i < num_components; i++) {
- unsigned n = src->reg.base_offset * reg->num_components + i;
- compile_assert(ctx, n < arr->length);
- value[i] = create_array_load(ctx, arr, n, addr);
- }
-
- return value;
- }
-}
-
-static void
-put_dst(struct ir3_context *ctx, nir_dest *dst)
-{
- unsigned bit_size = nir_dest_bit_size(*dst);
-
- if (bit_size < 32) {
- for (unsigned i = 0; i < ctx->last_dst_n; i++) {
- struct ir3_instruction *dst = ctx->last_dst[i];
- dst->regs[0]->flags |= IR3_REG_HALF;
- if (ctx->last_dst[i]->opc == OPC_META_FO)
- dst->regs[1]->instr->regs[0]->flags |= IR3_REG_HALF;
- }
- }
-
- if (!dst->is_ssa) {
- nir_register *reg = dst->reg.reg;
- struct ir3_array *arr = get_array(ctx, reg);
- unsigned num_components = ctx->last_dst_n;
- struct ir3_instruction *addr = NULL;
-
- if (dst->reg.indirect)
- addr = get_addr(ctx, get_src(ctx, dst->reg.indirect)[0],
- reg->num_components);
-
- for (unsigned i = 0; i < num_components; i++) {
- unsigned n = dst->reg.base_offset * reg->num_components + i;
- compile_assert(ctx, n < arr->length);
- if (!ctx->last_dst[i])
- continue;
- create_array_store(ctx, arr, n, ctx->last_dst[i], addr);
- }
-
- ralloc_free(ctx->last_dst);
- }
- ctx->last_dst = NULL;
- ctx->last_dst_n = 0;
-}
-
-static struct ir3_instruction *
-create_immed_typed(struct ir3_block *block, uint32_t val, type_t type)
-{
- struct ir3_instruction *mov;
- unsigned flags = (type_size(type) < 32) ? IR3_REG_HALF : 0;
-
- mov = ir3_instr_create(block, OPC_MOV);
- mov->cat1.src_type = type;
- mov->cat1.dst_type = type;
- ir3_reg_create(mov, 0, flags);
- ir3_reg_create(mov, 0, IR3_REG_IMMED)->uim_val = val;
-
- return mov;
-}
-
-static struct ir3_instruction *
-create_immed(struct ir3_block *block, uint32_t val)
-{
- return create_immed_typed(block, val, TYPE_U32);
-}
-
-static struct ir3_instruction *
-create_addr(struct ir3_block *block, struct ir3_instruction *src, int align)
-{
- struct ir3_instruction *instr, *immed;
-
- /* TODO in at least some cases, the backend could probably be
- * made clever enough to propagate IR3_REG_HALF..
- */
- instr = ir3_COV(block, src, TYPE_U32, TYPE_S16);
- instr->regs[0]->flags |= IR3_REG_HALF;
-
- switch(align){
- case 1:
- /* src *= 1: */
- break;
- case 2:
- /* src *= 2 => src <<= 1: */
- immed = create_immed(block, 1);
- immed->regs[0]->flags |= IR3_REG_HALF;
-
- instr = ir3_SHL_B(block, instr, 0, immed, 0);
- instr->regs[0]->flags |= IR3_REG_HALF;
- instr->regs[1]->flags |= IR3_REG_HALF;
- break;
- case 3:
- /* src *= 3: */
- immed = create_immed(block, 3);
- immed->regs[0]->flags |= IR3_REG_HALF;
-
- instr = ir3_MULL_U(block, instr, 0, immed, 0);
- instr->regs[0]->flags |= IR3_REG_HALF;
- instr->regs[1]->flags |= IR3_REG_HALF;
- break;
- case 4:
- /* src *= 4 => src <<= 2: */
- immed = create_immed(block, 2);
- immed->regs[0]->flags |= IR3_REG_HALF;
-
- instr = ir3_SHL_B(block, instr, 0, immed, 0);
- instr->regs[0]->flags |= IR3_REG_HALF;
- instr->regs[1]->flags |= IR3_REG_HALF;
- break;
- default:
- unreachable("bad align");
- return NULL;
- }
-
- instr = ir3_MOV(block, instr, TYPE_S16);
- instr->regs[0]->num = regid(REG_A0, 0);
- instr->regs[0]->flags |= IR3_REG_HALF;
- instr->regs[1]->flags |= IR3_REG_HALF;
-
- return instr;
-}
-
-/* caches addr values to avoid generating multiple cov/shl/mova
- * sequences for each use of a given NIR level src as address
- */
-static struct ir3_instruction *
-get_addr(struct ir3_context *ctx, struct ir3_instruction *src, int align)
-{
- struct ir3_instruction *addr;
- unsigned idx = align - 1;
-
- compile_assert(ctx, idx < ARRAY_SIZE(ctx->addr_ht));
-
- if (!ctx->addr_ht[idx]) {
- ctx->addr_ht[idx] = _mesa_hash_table_create(ctx,
- _mesa_hash_pointer, _mesa_key_pointer_equal);
- } else {
- struct hash_entry *entry;
- entry = _mesa_hash_table_search(ctx->addr_ht[idx], src);
- if (entry)
- return entry->data;
- }
-
- addr = create_addr(ctx->block, src, align);
- _mesa_hash_table_insert(ctx->addr_ht[idx], src, addr);
-
- return addr;
-}
-
-static struct ir3_instruction *
-get_predicate(struct ir3_context *ctx, struct ir3_instruction *src)
-{
- struct ir3_block *b = ctx->block;
- struct ir3_instruction *cond;
-
- /* NOTE: only cmps.*.* can write p0.x: */
- cond = ir3_CMPS_S(b, src, 0, create_immed(b, 0), 0);
- cond->cat2.condition = IR3_COND_NE;
-
- /* condition always goes in predicate register: */
- cond->regs[0]->num = regid(REG_P0, 0);
-
- return cond;
-}
-
-static struct ir3_instruction *
-create_uniform(struct ir3_context *ctx, unsigned n)
-{
- struct ir3_instruction *mov;
-
- mov = ir3_instr_create(ctx->block, OPC_MOV);
- /* TODO get types right? */
- mov->cat1.src_type = TYPE_F32;
- mov->cat1.dst_type = TYPE_F32;
- ir3_reg_create(mov, 0, 0);
- ir3_reg_create(mov, n, IR3_REG_CONST);
-
- return mov;
-}
-
-static struct ir3_instruction *
-create_uniform_indirect(struct ir3_context *ctx, int n,
- struct ir3_instruction *address)
-{
- struct ir3_instruction *mov;
-
- mov = ir3_instr_create(ctx->block, OPC_MOV);
- mov->cat1.src_type = TYPE_U32;
- mov->cat1.dst_type = TYPE_U32;
- ir3_reg_create(mov, 0, 0);
- ir3_reg_create(mov, 0, IR3_REG_CONST | IR3_REG_RELATIV)->array.offset = n;
-
- ir3_instr_set_address(mov, address);
-
- return mov;
-}
-
-static struct ir3_instruction *
-create_collect(struct ir3_context *ctx, struct ir3_instruction *const *arr,
- unsigned arrsz)
-{
- struct ir3_block *block = ctx->block;
- struct ir3_instruction *collect;
-
- if (arrsz == 0)
- return NULL;
-
- unsigned flags = arr[0]->regs[0]->flags & IR3_REG_HALF;
-
- collect = ir3_instr_create2(block, OPC_META_FI, 1 + arrsz);
- ir3_reg_create(collect, 0, flags); /* dst */
- for (unsigned i = 0; i < arrsz; i++) {
- struct ir3_instruction *elem = arr[i];
-
- /* Since arrays are pre-colored in RA, we can't assume that
- * things will end up in the right place. (Ie. if a collect
- * joins elements from two different arrays.) So insert an
- * extra mov.
- *
- * We could possibly skip this if all the collected elements
- * are contiguous elements in a single array.. not sure how
- * likely that is to happen.
- *
- * Fixes a problem with glamor shaders, that in effect do
- * something like:
- *
- * if (foo)
- * texcoord = ..
- * else
- * texcoord = ..
- * color = texture2D(tex, texcoord);
- *
- * In this case, texcoord will end up as nir registers (which
- * translate to ir3 array's of length 1. And we can't assume
- * the two (or more) arrays will get allocated in consecutive
- * scalar registers.
- *
- */
- if (elem->regs[0]->flags & IR3_REG_ARRAY) {
- type_t type = (flags & IR3_REG_HALF) ? TYPE_U16 : TYPE_U32;
- elem = ir3_MOV(block, elem, type);
- }
-
- compile_assert(ctx, (elem->regs[0]->flags & IR3_REG_HALF) == flags);
- ir3_reg_create(collect, 0, IR3_REG_SSA | flags)->instr = elem;
- }
-
- return collect;
-}
static struct ir3_instruction *
create_indirect_load(struct ir3_context *ctx, unsigned arrsz, int n,
/* NOTE: dp is in scalar, but there can be >4 dp components: */
unsigned n = ctx->so->constbase.driver_param;
unsigned r = regid(n + dp / 4, dp % 4);
- return create_uniform(ctx, r);
-}
-
-/* helper for instructions that produce multiple consecutive scalar
- * outputs which need to have a split/fanout meta instruction inserted
- */
-static void
-split_dest(struct ir3_block *block, struct ir3_instruction **dst,
- struct ir3_instruction *src, unsigned base, unsigned n)
-{
- struct ir3_instruction *prev = NULL;
-
- if ((n == 1) && (src->regs[0]->wrmask == 0x1)) {
- dst[0] = src;
- return;
- }
-
- for (int i = 0, j = 0; i < n; i++) {
- struct ir3_instruction *split = ir3_instr_create(block, OPC_META_FO);
- ir3_reg_create(split, 0, IR3_REG_SSA);
- ir3_reg_create(split, 0, IR3_REG_SSA)->instr = src;
- split->fo.off = i + base;
-
- if (prev) {
- split->cp.left = prev;
- split->cp.left_cnt++;
- prev->cp.right = split;
- prev->cp.right_cnt++;
- }
- prev = split;
-
- if (src->regs[0]->wrmask & (1 << (i + base)))
- dst[j++] = split;
- }
+ return create_uniform(ctx->block, r);
}
/*
src_type = TYPE_F16;
break;
default:
- compile_error(ctx, "invalid src bit size: %u", src_bitsize);
+ ir3_context_error(ctx, "invalid src bit size: %u", src_bitsize);
}
break;
src_type = TYPE_S8;
break;
default:
- compile_error(ctx, "invalid src bit size: %u", src_bitsize);
+ ir3_context_error(ctx, "invalid src bit size: %u", src_bitsize);
}
break;
src_type = TYPE_U8;
break;
default:
- compile_error(ctx, "invalid src bit size: %u", src_bitsize);
+ ir3_context_error(ctx, "invalid src bit size: %u", src_bitsize);
}
break;
default:
- compile_error(ctx, "invalid conversion op: %u", op);
+ ir3_context_error(ctx, "invalid conversion op: %u", op);
}
switch (op) {
break;
default:
- compile_error(ctx, "invalid conversion op: %u", op);
+ ir3_context_error(ctx, "invalid conversion op: %u", op);
}
return ir3_COV(ctx->block, src, src_type, dst_type);
wrmask = alu->dest.write_mask;
}
- dst = get_dst(ctx, &alu->dest.dest, dst_sz);
+ dst = ir3_get_dst(ctx, &alu->dest.dest, dst_sz);
/* Vectors are special in that they have non-scalarized writemasks,
* and just take the first swizzle channel for each argument in
compile_assert(ctx, !asrc->abs);
compile_assert(ctx, !asrc->negate);
- src[i] = get_src(ctx, &asrc->src)[asrc->swizzle[0]];
+ src[i] = ir3_get_src(ctx, &asrc->src)[asrc->swizzle[0]];
if (!src[i])
src[i] = create_immed(ctx->block, 0);
dst[i] = ir3_MOV(b, src[i], TYPE_U32);
}
- put_dst(ctx, &alu->dest.dest);
+ ir3_put_dst(ctx, &alu->dest.dest);
return;
}
if ((alu->op == nir_op_imov) || (alu->op == nir_op_fmov)) {
type_t type = (alu->op == nir_op_imov) ? TYPE_U32 : TYPE_F32;
nir_alu_src *asrc = &alu->src[0];
- struct ir3_instruction *const *src0 = get_src(ctx, &asrc->src);
+ struct ir3_instruction *const *src0 = ir3_get_src(ctx, &asrc->src);
for (unsigned i = 0; i < dst_sz; i++) {
if (wrmask & (1 << i)) {
}
}
- put_dst(ctx, &alu->dest.dest);
+ ir3_put_dst(ctx, &alu->dest.dest);
return;
}
compile_assert(ctx, !asrc->abs);
compile_assert(ctx, !asrc->negate);
- src[i] = get_src(ctx, &asrc->src)[asrc->swizzle[chan]];
+ src[i] = ir3_get_src(ctx, &asrc->src)[asrc->swizzle[chan]];
bs[i] = nir_src_bit_size(asrc->src);
compile_assert(ctx, src[i]);
dst[0]->cat5.type = TYPE_F32;
break;
break;
- case nir_op_flt:
+ case nir_op_flt32:
dst[0] = ir3_CMPS_F(b, src[0], 0, src[1], 0);
dst[0]->cat2.condition = IR3_COND_LT;
dst[0] = ir3_n2b(b, dst[0]);
break;
- case nir_op_fge:
+ case nir_op_fge32:
dst[0] = ir3_CMPS_F(b, src[0], 0, src[1], 0);
dst[0]->cat2.condition = IR3_COND_GE;
dst[0] = ir3_n2b(b, dst[0]);
break;
- case nir_op_feq:
+ case nir_op_feq32:
dst[0] = ir3_CMPS_F(b, src[0], 0, src[1], 0);
dst[0]->cat2.condition = IR3_COND_EQ;
dst[0] = ir3_n2b(b, dst[0]);
break;
- case nir_op_fne:
+ case nir_op_fne32:
dst[0] = ir3_CMPS_F(b, src[0], 0, src[1], 0);
dst[0]->cat2.condition = IR3_COND_NE;
dst[0] = ir3_n2b(b, dst[0]);
case nir_op_ishr:
dst[0] = ir3_ASHR_B(b, src[0], 0, src[1], 0);
break;
- case nir_op_isign: {
- /* maybe this would be sane to lower in nir.. */
- struct ir3_instruction *neg, *pos;
-
- neg = ir3_CMPS_S(b, src[0], 0, create_immed(b, 0), 0);
- neg->cat2.condition = IR3_COND_LT;
-
- pos = ir3_CMPS_S(b, src[0], 0, create_immed(b, 0), 0);
- pos->cat2.condition = IR3_COND_GT;
-
- dst[0] = ir3_SUB_U(b, pos, 0, neg, 0);
-
- break;
- }
case nir_op_isub:
dst[0] = ir3_SUB_U(b, src[0], 0, src[1], 0);
break;
case nir_op_ushr:
dst[0] = ir3_SHR_B(b, src[0], 0, src[1], 0);
break;
- case nir_op_ilt:
+ case nir_op_ilt32:
dst[0] = ir3_CMPS_S(b, src[0], 0, src[1], 0);
dst[0]->cat2.condition = IR3_COND_LT;
dst[0] = ir3_n2b(b, dst[0]);
break;
- case nir_op_ige:
+ case nir_op_ige32:
dst[0] = ir3_CMPS_S(b, src[0], 0, src[1], 0);
dst[0]->cat2.condition = IR3_COND_GE;
dst[0] = ir3_n2b(b, dst[0]);
break;
- case nir_op_ieq:
+ case nir_op_ieq32:
dst[0] = ir3_CMPS_S(b, src[0], 0, src[1], 0);
dst[0]->cat2.condition = IR3_COND_EQ;
dst[0] = ir3_n2b(b, dst[0]);
break;
- case nir_op_ine:
+ case nir_op_ine32:
dst[0] = ir3_CMPS_S(b, src[0], 0, src[1], 0);
dst[0]->cat2.condition = IR3_COND_NE;
dst[0] = ir3_n2b(b, dst[0]);
break;
- case nir_op_ult:
+ case nir_op_ult32:
dst[0] = ir3_CMPS_U(b, src[0], 0, src[1], 0);
dst[0]->cat2.condition = IR3_COND_LT;
dst[0] = ir3_n2b(b, dst[0]);
break;
- case nir_op_uge:
+ case nir_op_uge32:
dst[0] = ir3_CMPS_U(b, src[0], 0, src[1], 0);
dst[0]->cat2.condition = IR3_COND_GE;
dst[0] = ir3_n2b(b, dst[0]);
break;
- case nir_op_bcsel: {
+ case nir_op_b32csel: {
struct ir3_instruction *cond = ir3_b2n(b, src[0]);
compile_assert(ctx, bs[1] == bs[2]);
/* the boolean condition is 32b even if src[1] and src[2] are
break;
default:
- compile_error(ctx, "Unhandled ALU op: %s\n",
+ ir3_context_error(ctx, "Unhandled ALU op: %s\n",
nir_op_infos[alu->op].name);
break;
}
- put_dst(ctx, &alu->dest.dest);
+ ir3_put_dst(ctx, &alu->dest.dest);
}
/* handles direct/indirect UBO reads: */
nir_const_value *const_offset;
/* UBO addresses are the first driver params: */
unsigned ubo = regid(ctx->so->constbase.ubo, 0);
- const unsigned ptrsz = pointer_size(ctx);
+ const unsigned ptrsz = ir3_pointer_size(ctx);
int off = 0;
/* First src is ubo index, which could either be an immed or not: */
- src0 = get_src(ctx, &intr->src[0])[0];
+ src0 = ir3_get_src(ctx, &intr->src[0])[0];
if (is_same_type_mov(src0) &&
(src0->regs[1]->flags & IR3_REG_IMMED)) {
- base_lo = create_uniform(ctx, ubo + (src0->regs[1]->iim_val * ptrsz));
- base_hi = create_uniform(ctx, ubo + (src0->regs[1]->iim_val * ptrsz) + 1);
+ base_lo = create_uniform(b, ubo + (src0->regs[1]->iim_val * ptrsz));
+ base_hi = create_uniform(b, ubo + (src0->regs[1]->iim_val * ptrsz) + 1);
} else {
- base_lo = create_uniform_indirect(ctx, ubo, get_addr(ctx, src0, 4));
- base_hi = create_uniform_indirect(ctx, ubo + 1, get_addr(ctx, src0, 4));
+ base_lo = create_uniform_indirect(b, ubo, ir3_get_addr(ctx, src0, 4));
+ base_hi = create_uniform_indirect(b, ubo + 1, ir3_get_addr(ctx, src0, 4));
}
/* note: on 32bit gpu's base_hi is ignored and DCE'd */
off += const_offset->u32[0];
} else {
/* For load_ubo_indirect, second src is indirect offset: */
- src1 = get_src(ctx, &intr->src[1])[0];
+ src1 = ir3_get_src(ctx, &intr->src[1])[0];
/* and add offset to addr: */
addr = ir3_ADD_S(b, addr, 0, src1, 0);
* if (addr < base_lo)
* base_hi++
*/
- carry = ir3_CMPS_U(b, addr, 0, base_lo, 0);
- carry->cat2.condition = IR3_COND_LT;
- base_hi = ir3_ADD_S(b, base_hi, 0, carry, 0);
-
- addr = create_collect(ctx, (struct ir3_instruction*[]){ addr, base_hi }, 2);
- }
-
- for (int i = 0; i < intr->num_components; i++) {
- struct ir3_instruction *load =
- ir3_LDG(b, addr, 0, create_immed(b, 1), 0);
- load->cat6.type = TYPE_U32;
- load->cat6.src_offset = off + i * 4; /* byte offset */
- dst[i] = load;
- }
-}
-
-/* src[] = { buffer_index, offset }. No const_index */
-static void
-emit_intrinsic_load_ssbo(struct ir3_context *ctx, nir_intrinsic_instr *intr,
- struct ir3_instruction **dst)
-{
- struct ir3_block *b = ctx->block;
- struct ir3_instruction *ldgb, *src0, *src1, *offset;
- nir_const_value *const_offset;
-
- /* can this be non-const buffer_index? how do we handle that? */
- const_offset = nir_src_as_const_value(intr->src[0]);
- compile_assert(ctx, const_offset);
-
- offset = get_src(ctx, &intr->src[1])[0];
-
- /* src0 is uvec2(offset*4, 0), src1 is offset.. nir already *= 4: */
- src0 = create_collect(ctx, (struct ir3_instruction*[]){
- offset,
- create_immed(b, 0),
- }, 2);
- src1 = ir3_SHR_B(b, offset, 0, create_immed(b, 2), 0);
-
- ldgb = ir3_LDGB(b, create_immed(b, const_offset->u32[0]), 0,
- src0, 0, src1, 0);
- ldgb->regs[0]->wrmask = MASK(intr->num_components);
- ldgb->cat6.iim_val = intr->num_components;
- ldgb->cat6.d = 4;
- ldgb->cat6.type = TYPE_U32;
- ldgb->barrier_class = IR3_BARRIER_BUFFER_R;
- ldgb->barrier_conflict = IR3_BARRIER_BUFFER_W;
-
- split_dest(b, dst, ldgb, 0, intr->num_components);
-}
-
-/* src[] = { value, block_index, offset }. const_index[] = { write_mask } */
-static void
-emit_intrinsic_store_ssbo(struct ir3_context *ctx, nir_intrinsic_instr *intr)
-{
- struct ir3_block *b = ctx->block;
- struct ir3_instruction *stgb, *src0, *src1, *src2, *offset;
- nir_const_value *const_offset;
- /* TODO handle wrmask properly, see _store_shared().. but I think
- * it is more a PITA than that, since blob ends up loading the
- * masked components and writing them back out.
- */
- unsigned wrmask = intr->const_index[0];
- unsigned ncomp = ffs(~wrmask) - 1;
-
- /* can this be non-const buffer_index? how do we handle that? */
- const_offset = nir_src_as_const_value(intr->src[1]);
- compile_assert(ctx, const_offset);
-
- offset = get_src(ctx, &intr->src[2])[0];
-
- /* src0 is value, src1 is offset, src2 is uvec2(offset*4, 0)..
- * nir already *= 4:
- */
- src0 = create_collect(ctx, get_src(ctx, &intr->src[0]), ncomp);
- src1 = ir3_SHR_B(b, offset, 0, create_immed(b, 2), 0);
- src2 = create_collect(ctx, (struct ir3_instruction*[]){
- offset,
- create_immed(b, 0),
- }, 2);
+ carry = ir3_CMPS_U(b, addr, 0, base_lo, 0);
+ carry->cat2.condition = IR3_COND_LT;
+ base_hi = ir3_ADD_S(b, base_hi, 0, carry, 0);
- stgb = ir3_STGB(b, create_immed(b, const_offset->u32[0]), 0,
- src0, 0, src1, 0, src2, 0);
- stgb->cat6.iim_val = ncomp;
- stgb->cat6.d = 4;
- stgb->cat6.type = TYPE_U32;
- stgb->barrier_class = IR3_BARRIER_BUFFER_W;
- stgb->barrier_conflict = IR3_BARRIER_BUFFER_R | IR3_BARRIER_BUFFER_W;
+ addr = ir3_create_collect(ctx, (struct ir3_instruction*[]){ addr, base_hi }, 2);
+ }
- array_insert(b, b->keeps, stgb);
+ for (int i = 0; i < intr->num_components; i++) {
+ struct ir3_instruction *load =
+ ir3_LDG(b, addr, 0, create_immed(b, 1), 0);
+ load->cat6.type = TYPE_U32;
+ load->cat6.src_offset = off + i * 4; /* byte offset */
+ dst[i] = load;
+ }
}
/* src[] = { block_index } */
debug_assert(ctx->so->const_layout.ssbo_size.mask & (1 << blk_idx));
- dst[0] = create_uniform(ctx, idx);
-}
-
-/*
- * SSBO atomic intrinsics
- *
- * All of the SSBO atomic memory operations read a value from memory,
- * compute a new value using one of the operations below, write the new
- * value to memory, and return the original value read.
- *
- * All operations take 3 sources except CompSwap that takes 4. These
- * sources represent:
- *
- * 0: The SSBO buffer index.
- * 1: The offset into the SSBO buffer of the variable that the atomic
- * operation will operate on.
- * 2: The data parameter to the atomic function (i.e. the value to add
- * in ssbo_atomic_add, etc).
- * 3: For CompSwap only: the second data parameter.
- */
-static struct ir3_instruction *
-emit_intrinsic_atomic_ssbo(struct ir3_context *ctx, nir_intrinsic_instr *intr)
-{
- struct ir3_block *b = ctx->block;
- struct ir3_instruction *atomic, *ssbo, *src0, *src1, *src2, *offset;
- nir_const_value *const_offset;
- type_t type = TYPE_U32;
-
- /* can this be non-const buffer_index? how do we handle that? */
- const_offset = nir_src_as_const_value(intr->src[0]);
- compile_assert(ctx, const_offset);
- ssbo = create_immed(b, const_offset->u32[0]);
-
- offset = get_src(ctx, &intr->src[1])[0];
-
- /* src0 is data (or uvec2(data, compare))
- * src1 is offset
- * src2 is uvec2(offset*4, 0) (appears to be 64b byte offset)
- *
- * Note that nir already multiplies the offset by four
- */
- src0 = get_src(ctx, &intr->src[2])[0];
- src1 = ir3_SHR_B(b, offset, 0, create_immed(b, 2), 0);
- src2 = create_collect(ctx, (struct ir3_instruction*[]){
- offset,
- create_immed(b, 0),
- }, 2);
-
- switch (intr->intrinsic) {
- case nir_intrinsic_ssbo_atomic_add:
- atomic = ir3_ATOMIC_ADD_G(b, ssbo, 0, src0, 0, src1, 0, src2, 0);
- break;
- case nir_intrinsic_ssbo_atomic_imin:
- atomic = ir3_ATOMIC_MIN_G(b, ssbo, 0, src0, 0, src1, 0, src2, 0);
- type = TYPE_S32;
- break;
- case nir_intrinsic_ssbo_atomic_umin:
- atomic = ir3_ATOMIC_MIN_G(b, ssbo, 0, src0, 0, src1, 0, src2, 0);
- break;
- case nir_intrinsic_ssbo_atomic_imax:
- atomic = ir3_ATOMIC_MAX_G(b, ssbo, 0, src0, 0, src1, 0, src2, 0);
- type = TYPE_S32;
- break;
- case nir_intrinsic_ssbo_atomic_umax:
- atomic = ir3_ATOMIC_MAX_G(b, ssbo, 0, src0, 0, src1, 0, src2, 0);
- break;
- case nir_intrinsic_ssbo_atomic_and:
- atomic = ir3_ATOMIC_AND_G(b, ssbo, 0, src0, 0, src1, 0, src2, 0);
- break;
- case nir_intrinsic_ssbo_atomic_or:
- atomic = ir3_ATOMIC_OR_G(b, ssbo, 0, src0, 0, src1, 0, src2, 0);
- break;
- case nir_intrinsic_ssbo_atomic_xor:
- atomic = ir3_ATOMIC_XOR_G(b, ssbo, 0, src0, 0, src1, 0, src2, 0);
- break;
- case nir_intrinsic_ssbo_atomic_exchange:
- atomic = ir3_ATOMIC_XCHG_G(b, ssbo, 0, src0, 0, src1, 0, src2, 0);
- break;
- case nir_intrinsic_ssbo_atomic_comp_swap:
- /* for cmpxchg, src0 is [ui]vec2(data, compare): */
- src0 = create_collect(ctx, (struct ir3_instruction*[]){
- get_src(ctx, &intr->src[3])[0],
- src0,
- }, 2);
- atomic = ir3_ATOMIC_CMPXCHG_G(b, ssbo, 0, src0, 0, src1, 0, src2, 0);
- break;
- default:
- unreachable("boo");
- }
-
- atomic->cat6.iim_val = 1;
- atomic->cat6.d = 4;
- atomic->cat6.type = type;
- atomic->barrier_class = IR3_BARRIER_BUFFER_W;
- atomic->barrier_conflict = IR3_BARRIER_BUFFER_R | IR3_BARRIER_BUFFER_W;
-
- /* even if nothing consume the result, we can't DCE the instruction: */
- array_insert(b, b->keeps, atomic);
-
- return atomic;
+ dst[0] = create_uniform(ctx->block, idx);
}
/* src[] = { offset }. const_index[] = { base } */
struct ir3_instruction *ldl, *offset;
unsigned base;
- offset = get_src(ctx, &intr->src[0])[0];
+ offset = ir3_get_src(ctx, &intr->src[0])[0];
base = nir_intrinsic_base(intr);
ldl = ir3_LDL(b, offset, 0, create_immed(b, intr->num_components), 0);
ldl->barrier_class = IR3_BARRIER_SHARED_R;
ldl->barrier_conflict = IR3_BARRIER_SHARED_W;
- split_dest(b, dst, ldl, 0, intr->num_components);
+ ir3_split_dest(b, dst, ldl, 0, intr->num_components);
}
/* src[] = { value, offset }. const_index[] = { base, write_mask } */
struct ir3_instruction * const *value;
unsigned base, wrmask;
- value = get_src(ctx, &intr->src[0]);
- offset = get_src(ctx, &intr->src[1])[0];
+ value = ir3_get_src(ctx, &intr->src[0]);
+ offset = ir3_get_src(ctx, &intr->src[1])[0];
base = nir_intrinsic_base(intr);
wrmask = nir_intrinsic_write_mask(intr);
unsigned length = ffs(~(wrmask >> first_component)) - 1;
stl = ir3_STL(b, offset, 0,
- create_collect(ctx, &value[first_component], length), 0,
+ ir3_create_collect(ctx, &value[first_component], length), 0,
create_immed(b, length), 0);
stl->cat6.dst_offset = first_component + base;
stl->cat6.type = utype_src(intr->src[0]);
struct ir3_instruction *atomic, *src0, *src1;
type_t type = TYPE_U32;
- src0 = get_src(ctx, &intr->src[0])[0]; /* offset */
- src1 = get_src(ctx, &intr->src[1])[0]; /* value */
+ src0 = ir3_get_src(ctx, &intr->src[0])[0]; /* offset */
+ src1 = ir3_get_src(ctx, &intr->src[1])[0]; /* value */
switch (intr->intrinsic) {
case nir_intrinsic_shared_atomic_add:
break;
case nir_intrinsic_shared_atomic_comp_swap:
/* for cmpxchg, src1 is [ui]vec2(data, compare): */
- src1 = create_collect(ctx, (struct ir3_instruction*[]){
- get_src(ctx, &intr->src[2])[0],
+ src1 = ir3_create_collect(ctx, (struct ir3_instruction*[]){
+ ir3_get_src(ctx, &intr->src[2])[0],
src1,
}, 2);
atomic = ir3_ATOMIC_CMPXCHG(b, src0, 0, src1, 0);
return atomic;
}
-/* Images get mapped into SSBO/image state (for store/atomic) and texture
- * state block (for load). To simplify things, invert the image id and
- * map it from end of state block, ie. image 0 becomes num-1, image 1
- * becomes num-2, etc. This potentially avoids needing to re-emit texture
- * state when switching shaders.
- *
- * TODO is max # of samplers and SSBOs the same. This shouldn't be hard-
- * coded. Also, since all the gl shader stages (ie. everything but CS)
- * share the same SSBO/image state block, this might require some more
- * logic if we supported images in anything other than FS..
- */
-static unsigned
-get_image_slot(struct ir3_context *ctx, nir_deref_instr *deref)
-{
- unsigned int loc = 0;
- unsigned inner_size = 1;
-
- while (deref->deref_type != nir_deref_type_var) {
- assert(deref->deref_type == nir_deref_type_array);
- nir_const_value *const_index = nir_src_as_const_value(deref->arr.index);
- assert(const_index);
-
- /* Go to the next instruction */
- deref = nir_deref_instr_parent(deref);
-
- assert(glsl_type_is_array(deref->type));
- const unsigned array_len = glsl_get_length(deref->type);
- loc += MIN2(const_index->u32[0], array_len - 1) * inner_size;
-
- /* Update the inner size */
- inner_size *= array_len;
- }
-
- loc += deref->var->data.driver_location;
-
- /* TODO figure out real limit per generation, and don't hardcode: */
- const unsigned max_samplers = 16;
- return max_samplers - loc - 1;
-}
-
-/* see tex_info() for equiv logic for texture instructions.. it would be
- * nice if this could be better unified..
+/* TODO handle actual indirect/dynamic case.. which is going to be weird
+ * to handle with the image_mapping table..
*/
-static unsigned
-get_image_coords(const nir_variable *var, unsigned *flagsp)
-{
- const struct glsl_type *type = glsl_without_array(var->type);
- unsigned coords, flags = 0;
-
- switch (glsl_get_sampler_dim(type)) {
- case GLSL_SAMPLER_DIM_1D:
- case GLSL_SAMPLER_DIM_BUF:
- coords = 1;
- break;
- case GLSL_SAMPLER_DIM_2D:
- case GLSL_SAMPLER_DIM_RECT:
- case GLSL_SAMPLER_DIM_EXTERNAL:
- case GLSL_SAMPLER_DIM_MS:
- coords = 2;
- break;
- case GLSL_SAMPLER_DIM_3D:
- case GLSL_SAMPLER_DIM_CUBE:
- flags |= IR3_INSTR_3D;
- coords = 3;
- break;
- default:
- unreachable("bad sampler dim");
- return 0;
- }
-
- if (glsl_sampler_type_is_array(type)) {
- /* note: unlike tex_info(), adjust # of coords to include array idx: */
- coords++;
- flags |= IR3_INSTR_A;
- }
-
- if (flagsp)
- *flagsp = flags;
-
- return coords;
-}
-
-static type_t
-get_image_type(const nir_variable *var)
-{
- switch (glsl_get_sampler_result_type(glsl_without_array(var->type))) {
- case GLSL_TYPE_UINT:
- return TYPE_U32;
- case GLSL_TYPE_INT:
- return TYPE_S32;
- case GLSL_TYPE_FLOAT:
- return TYPE_F32;
- default:
- unreachable("bad sampler type.");
- return 0;
- }
-}
-
static struct ir3_instruction *
-get_image_offset(struct ir3_context *ctx, const nir_variable *var,
- struct ir3_instruction * const *coords, bool byteoff)
+get_image_samp_tex_src(struct ir3_context *ctx, nir_intrinsic_instr *intr)
{
- struct ir3_block *b = ctx->block;
- struct ir3_instruction *offset;
- unsigned ncoords = get_image_coords(var, NULL);
-
- /* to calculate the byte offset (yes, uggg) we need (up to) three
- * const values to know the bytes per pixel, and y and z stride:
- */
- unsigned cb = regid(ctx->so->constbase.image_dims, 0) +
- ctx->so->const_layout.image_dims.off[var->data.driver_location];
+ unsigned slot = ir3_get_image_slot(nir_src_as_deref(intr->src[0]));
+ unsigned tex_idx = ir3_image_to_tex(&ctx->so->image_mapping, slot);
+ struct ir3_instruction *texture, *sampler;
- debug_assert(ctx->so->const_layout.image_dims.mask &
- (1 << var->data.driver_location));
+ texture = create_immed_typed(ctx->block, tex_idx, TYPE_U16);
+ sampler = create_immed_typed(ctx->block, tex_idx, TYPE_U16);
- /* offset = coords.x * bytes_per_pixel: */
- offset = ir3_MUL_S(b, coords[0], 0, create_uniform(ctx, cb + 0), 0);
- if (ncoords > 1) {
- /* offset += coords.y * y_pitch: */
- offset = ir3_MAD_S24(b, create_uniform(ctx, cb + 1), 0,
- coords[1], 0, offset, 0);
- }
- if (ncoords > 2) {
- /* offset += coords.z * z_pitch: */
- offset = ir3_MAD_S24(b, create_uniform(ctx, cb + 2), 0,
- coords[2], 0, offset, 0);
- }
-
- if (!byteoff) {
- /* Some cases, like atomics, seem to use dword offset instead
- * of byte offsets.. blob just puts an extra shr.b in there
- * in those cases:
- */
- offset = ir3_SHR_B(b, offset, 0, create_immed(b, 2), 0);
- }
-
- return create_collect(ctx, (struct ir3_instruction*[]){
- offset,
- create_immed(b, 0),
+ return ir3_create_collect(ctx, (struct ir3_instruction*[]){
+ sampler,
+ texture,
}, 2);
}
{
struct ir3_block *b = ctx->block;
const nir_variable *var = nir_intrinsic_get_var(intr, 0);
+ struct ir3_instruction *samp_tex = get_image_samp_tex_src(ctx, intr);
struct ir3_instruction *sam;
- struct ir3_instruction * const *src0 = get_src(ctx, &intr->src[1]);
+ struct ir3_instruction * const *src0 = ir3_get_src(ctx, &intr->src[1]);
struct ir3_instruction *coords[4];
- unsigned flags, ncoords = get_image_coords(var, &flags);
- unsigned tex_idx = get_image_slot(ctx, nir_src_as_deref(intr->src[0]));
- type_t type = get_image_type(var);
+ unsigned flags, ncoords = ir3_get_image_coords(var, &flags);
+ type_t type = ir3_get_image_type(var);
/* hmm, this seems a bit odd, but it is what blob does and (at least
* a5xx) just faults on bogus addresses otherwise:
coords[ncoords++] = create_immed(b, 0);
sam = ir3_SAM(b, OPC_ISAM, type, 0b1111, flags,
- tex_idx, tex_idx, create_collect(ctx, coords, ncoords), NULL);
+ samp_tex, ir3_create_collect(ctx, coords, ncoords), NULL);
sam->barrier_class = IR3_BARRIER_IMAGE_R;
sam->barrier_conflict = IR3_BARRIER_IMAGE_W;
- split_dest(b, dst, sam, 0, 4);
-}
-
-/* src[] = { deref, coord, sample_index, value }. const_index[] = {} */
-static void
-emit_intrinsic_store_image(struct ir3_context *ctx, nir_intrinsic_instr *intr)
-{
- struct ir3_block *b = ctx->block;
- const nir_variable *var = nir_intrinsic_get_var(intr, 0);
- struct ir3_instruction *stib, *offset;
- struct ir3_instruction * const *value = get_src(ctx, &intr->src[3]);
- struct ir3_instruction * const *coords = get_src(ctx, &intr->src[1]);
- unsigned ncoords = get_image_coords(var, NULL);
- unsigned tex_idx = get_image_slot(ctx, nir_src_as_deref(intr->src[0]));
-
- /* src0 is value
- * src1 is coords
- * src2 is 64b byte offset
- */
-
- offset = get_image_offset(ctx, var, coords, true);
-
- /* NOTE: stib seems to take byte offset, but stgb.typed can be used
- * too and takes a dword offset.. not quite sure yet why blob uses
- * one over the other in various cases.
- */
-
- stib = ir3_STIB(b, create_immed(b, tex_idx), 0,
- create_collect(ctx, value, 4), 0,
- create_collect(ctx, coords, ncoords), 0,
- offset, 0);
- stib->cat6.iim_val = 4;
- stib->cat6.d = ncoords;
- stib->cat6.type = get_image_type(var);
- stib->cat6.typed = true;
- stib->barrier_class = IR3_BARRIER_IMAGE_W;
- stib->barrier_conflict = IR3_BARRIER_IMAGE_R | IR3_BARRIER_IMAGE_W;
-
- array_insert(b, b->keeps, stib);
+ ir3_split_dest(b, dst, sam, 0, 4);
}
static void
{
struct ir3_block *b = ctx->block;
const nir_variable *var = nir_intrinsic_get_var(intr, 0);
- unsigned tex_idx = get_image_slot(ctx, nir_src_as_deref(intr->src[0]));
+ struct ir3_instruction *samp_tex = get_image_samp_tex_src(ctx, intr);
struct ir3_instruction *sam, *lod;
- unsigned flags, ncoords = get_image_coords(var, &flags);
+ unsigned flags, ncoords = ir3_get_image_coords(var, &flags);
lod = create_immed(b, 0);
sam = ir3_SAM(b, OPC_GETSIZE, TYPE_U32, 0b1111, flags,
- tex_idx, tex_idx, lod, NULL);
+ samp_tex, lod, NULL);
/* Array size actually ends up in .w rather than .z. This doesn't
* matter for miplevel 0, but for higher mips the value in z is
*/
struct ir3_instruction *tmp[4];
- split_dest(b, tmp, sam, 0, 4);
+ ir3_split_dest(b, tmp, sam, 0, 4);
/* get_size instruction returns size in bytes instead of texels
* for imageBuffer, so we need to divide it by the pixel size
*/
unsigned cb = regid(ctx->so->constbase.image_dims, 0) +
ctx->so->const_layout.image_dims.off[var->data.driver_location];
- struct ir3_instruction *aux = create_uniform(ctx, cb + 1);
+ struct ir3_instruction *aux = create_uniform(b, cb + 1);
tmp[0] = ir3_SHR_B(b, tmp[0], 0, aux, 0);
}
}
}
-/* src[] = { deref, coord, sample_index, value, compare }. const_index[] = {} */
-static struct ir3_instruction *
-emit_intrinsic_atomic_image(struct ir3_context *ctx, nir_intrinsic_instr *intr)
-{
- struct ir3_block *b = ctx->block;
- const nir_variable *var = nir_intrinsic_get_var(intr, 0);
- struct ir3_instruction *atomic, *image, *src0, *src1, *src2;
- struct ir3_instruction * const *coords = get_src(ctx, &intr->src[1]);
- unsigned ncoords = get_image_coords(var, NULL);
-
- image = create_immed(b, get_image_slot(ctx, nir_src_as_deref(intr->src[0])));
-
- /* src0 is value (or uvec2(value, compare))
- * src1 is coords
- * src2 is 64b byte offset
- */
- src0 = get_src(ctx, &intr->src[3])[0];
- src1 = create_collect(ctx, coords, ncoords);
- src2 = get_image_offset(ctx, var, coords, false);
-
- switch (intr->intrinsic) {
- case nir_intrinsic_image_deref_atomic_add:
- atomic = ir3_ATOMIC_ADD_G(b, image, 0, src0, 0, src1, 0, src2, 0);
- break;
- case nir_intrinsic_image_deref_atomic_min:
- atomic = ir3_ATOMIC_MIN_G(b, image, 0, src0, 0, src1, 0, src2, 0);
- break;
- case nir_intrinsic_image_deref_atomic_max:
- atomic = ir3_ATOMIC_MAX_G(b, image, 0, src0, 0, src1, 0, src2, 0);
- break;
- case nir_intrinsic_image_deref_atomic_and:
- atomic = ir3_ATOMIC_AND_G(b, image, 0, src0, 0, src1, 0, src2, 0);
- break;
- case nir_intrinsic_image_deref_atomic_or:
- atomic = ir3_ATOMIC_OR_G(b, image, 0, src0, 0, src1, 0, src2, 0);
- break;
- case nir_intrinsic_image_deref_atomic_xor:
- atomic = ir3_ATOMIC_XOR_G(b, image, 0, src0, 0, src1, 0, src2, 0);
- break;
- case nir_intrinsic_image_deref_atomic_exchange:
- atomic = ir3_ATOMIC_XCHG_G(b, image, 0, src0, 0, src1, 0, src2, 0);
- break;
- case nir_intrinsic_image_deref_atomic_comp_swap:
- /* for cmpxchg, src0 is [ui]vec2(data, compare): */
- src0 = create_collect(ctx, (struct ir3_instruction*[]){
- get_src(ctx, &intr->src[4])[0],
- src0,
- }, 2);
- atomic = ir3_ATOMIC_CMPXCHG_G(b, image, 0, src0, 0, src1, 0, src2, 0);
- break;
- default:
- unreachable("boo");
- }
-
- atomic->cat6.iim_val = 1;
- atomic->cat6.d = ncoords;
- atomic->cat6.type = get_image_type(var);
- atomic->cat6.typed = true;
- atomic->barrier_class = IR3_BARRIER_IMAGE_W;
- atomic->barrier_conflict = IR3_BARRIER_IMAGE_R | IR3_BARRIER_IMAGE_W;
-
- /* even if nothing consume the result, we can't DCE the instruction: */
- array_insert(b, b->keeps, atomic);
-
- return atomic;
-}
-
static void
emit_intrinsic_barrier(struct ir3_context *ctx, nir_intrinsic_instr *intr)
{
if (info->has_dest) {
unsigned n = nir_intrinsic_dest_components(intr);
- dst = get_dst(ctx, &intr->dest, n);
+ dst = ir3_get_dst(ctx, &intr->dest, n);
} else {
dst = NULL;
}
idx += const_offset->u32[0];
for (int i = 0; i < intr->num_components; i++) {
unsigned n = idx * 4 + i;
- dst[i] = create_uniform(ctx, n);
+ dst[i] = create_uniform(b, n);
}
} else {
- src = get_src(ctx, &intr->src[0]);
+ src = ir3_get_src(ctx, &intr->src[0]);
for (int i = 0; i < intr->num_components; i++) {
int n = idx * 4 + i;
- dst[i] = create_uniform_indirect(ctx, n,
- get_addr(ctx, src[0], 4));
+ dst[i] = create_uniform_indirect(b, n,
+ ir3_get_addr(ctx, src[0], 4));
}
/* NOTE: if relative addressing is used, we set
* constlen in the compiler (to worst-case value)
dst[i] = ctx->ir->inputs[n];
}
} else {
- src = get_src(ctx, &intr->src[0]);
+ src = ir3_get_src(ctx, &intr->src[0]);
struct ir3_instruction *collect =
- create_collect(ctx, ctx->ir->inputs, ctx->ir->ninputs);
- struct ir3_instruction *addr = get_addr(ctx, src[0], 4);
+ ir3_create_collect(ctx, ctx->ir->inputs, ctx->ir->ninputs);
+ struct ir3_instruction *addr = ir3_get_addr(ctx, src[0], 4);
for (int i = 0; i < intr->num_components; i++) {
unsigned n = idx * 4 + i + comp;
dst[i] = create_indirect_load(ctx, ctx->ir->ninputs,
}
}
break;
- case nir_intrinsic_load_ssbo:
- emit_intrinsic_load_ssbo(ctx, intr, dst);
+ /* All SSBO intrinsics should have been lowered by 'lower_io_offsets'
+ * pass and replaced by an ir3-specifc version that adds the
+ * dword-offset in the last source.
+ */
+ case nir_intrinsic_load_ssbo_ir3:
+ ctx->funcs->emit_intrinsic_load_ssbo(ctx, intr, dst);
break;
- case nir_intrinsic_store_ssbo:
- emit_intrinsic_store_ssbo(ctx, intr);
+ case nir_intrinsic_store_ssbo_ir3:
+ ctx->funcs->emit_intrinsic_store_ssbo(ctx, intr);
break;
case nir_intrinsic_get_buffer_size:
emit_intrinsic_ssbo_size(ctx, intr, dst);
break;
- case nir_intrinsic_ssbo_atomic_add:
- case nir_intrinsic_ssbo_atomic_imin:
- case nir_intrinsic_ssbo_atomic_umin:
- case nir_intrinsic_ssbo_atomic_imax:
- case nir_intrinsic_ssbo_atomic_umax:
- case nir_intrinsic_ssbo_atomic_and:
- case nir_intrinsic_ssbo_atomic_or:
- case nir_intrinsic_ssbo_atomic_xor:
- case nir_intrinsic_ssbo_atomic_exchange:
- case nir_intrinsic_ssbo_atomic_comp_swap:
- dst[0] = emit_intrinsic_atomic_ssbo(ctx, intr);
+ case nir_intrinsic_ssbo_atomic_add_ir3:
+ case nir_intrinsic_ssbo_atomic_imin_ir3:
+ case nir_intrinsic_ssbo_atomic_umin_ir3:
+ case nir_intrinsic_ssbo_atomic_imax_ir3:
+ case nir_intrinsic_ssbo_atomic_umax_ir3:
+ case nir_intrinsic_ssbo_atomic_and_ir3:
+ case nir_intrinsic_ssbo_atomic_or_ir3:
+ case nir_intrinsic_ssbo_atomic_xor_ir3:
+ case nir_intrinsic_ssbo_atomic_exchange_ir3:
+ case nir_intrinsic_ssbo_atomic_comp_swap_ir3:
+ dst[0] = ctx->funcs->emit_intrinsic_atomic_ssbo(ctx, intr);
break;
case nir_intrinsic_load_shared:
emit_intrinsic_load_shared(ctx, intr, dst);
emit_intrinsic_load_image(ctx, intr, dst);
break;
case nir_intrinsic_image_deref_store:
- emit_intrinsic_store_image(ctx, intr);
+ ctx->funcs->emit_intrinsic_store_image(ctx, intr);
break;
case nir_intrinsic_image_deref_size:
emit_intrinsic_image_size(ctx, intr, dst);
case nir_intrinsic_image_deref_atomic_xor:
case nir_intrinsic_image_deref_atomic_exchange:
case nir_intrinsic_image_deref_atomic_comp_swap:
- dst[0] = emit_intrinsic_atomic_image(ctx, intr);
+ dst[0] = ctx->funcs->emit_intrinsic_atomic_image(ctx, intr);
break;
case nir_intrinsic_barrier:
case nir_intrinsic_memory_barrier:
compile_assert(ctx, const_offset != NULL);
idx += const_offset->u32[0];
- src = get_src(ctx, &intr->src[0]);
+ src = ir3_get_src(ctx, &intr->src[0]);
for (int i = 0; i < intr->num_components; i++) {
unsigned n = idx * 4 + i + comp;
ctx->ir->outputs[n] = src[i];
add_sysval_input_compmask(ctx, SYSTEM_VALUE_LOCAL_INVOCATION_ID,
0x7, ctx->local_invocation_id);
}
- split_dest(b, dst, ctx->local_invocation_id, 0, 3);
+ ir3_split_dest(b, dst, ctx->local_invocation_id, 0, 3);
break;
case nir_intrinsic_load_work_group_id:
if (!ctx->work_group_id) {
0x7, ctx->work_group_id);
ctx->work_group_id->regs[0]->flags |= IR3_REG_HIGH;
}
- split_dest(b, dst, ctx->work_group_id, 0, 3);
+ ir3_split_dest(b, dst, ctx->work_group_id, 0, 3);
break;
case nir_intrinsic_load_num_work_groups:
for (int i = 0; i < intr->num_components; i++) {
if (intr->intrinsic == nir_intrinsic_discard_if) {
/* conditional discard: */
- src = get_src(ctx, &intr->src[0]);
+ src = ir3_get_src(ctx, &intr->src[0]);
cond = ir3_b2n(b, src[0]);
} else {
/* unconditional discard: */
break;
}
default:
- compile_error(ctx, "Unhandled intrinsic type: %s\n",
+ ir3_context_error(ctx, "Unhandled intrinsic type: %s\n",
nir_intrinsic_infos[intr->intrinsic].name);
break;
}
if (info->has_dest)
- put_dst(ctx, &intr->dest);
+ ir3_put_dst(ctx, &intr->dest);
}
static void
emit_load_const(struct ir3_context *ctx, nir_load_const_instr *instr)
{
- struct ir3_instruction **dst = get_dst_ssa(ctx, &instr->def,
+ struct ir3_instruction **dst = ir3_get_dst_ssa(ctx, &instr->def,
instr->def.num_components);
type_t type = (instr->def.bit_size < 32) ? TYPE_U16 : TYPE_U32;
static void
emit_undef(struct ir3_context *ctx, nir_ssa_undef_instr *undef)
{
- struct ir3_instruction **dst = get_dst_ssa(ctx, &undef->def,
+ struct ir3_instruction **dst = ir3_get_dst_ssa(ctx, &undef->def,
undef->def.num_components);
type_t type = (undef->def.bit_size < 32) ? TYPE_U16 : TYPE_U32;
*coordsp = coords;
}
+/* Gets the sampler/texture idx as a hvec2. Which could either be dynamic
+ * or immediate (in which case it will get lowered later to a non .s2en
+ * version of the tex instruction which encode tex/samp as immediates:
+ */
+static struct ir3_instruction *
+get_tex_samp_tex_src(struct ir3_context *ctx, nir_tex_instr *tex)
+{
+ int texture_idx = nir_tex_instr_src_index(tex, nir_tex_src_texture_offset);
+ int sampler_idx = nir_tex_instr_src_index(tex, nir_tex_src_sampler_offset);
+ struct ir3_instruction *texture, *sampler;
+
+ if (texture_idx >= 0) {
+ texture = ir3_get_src(ctx, &tex->src[texture_idx].src)[0];
+ texture = ir3_COV(ctx->block, texture, TYPE_U32, TYPE_U16);
+ } else {
+ /* TODO what to do for dynamic case? I guess we only need the
+ * max index for astc srgb workaround so maybe not a problem
+ * to worry about if we don't enable indirect samplers for
+ * a4xx?
+ */
+ ctx->max_texture_index = MAX2(ctx->max_texture_index, tex->texture_index);
+ texture = create_immed_typed(ctx->block, tex->texture_index, TYPE_U16);
+ }
+
+ if (sampler_idx >= 0) {
+ sampler = ir3_get_src(ctx, &tex->src[sampler_idx].src)[0];
+ sampler = ir3_COV(ctx->block, sampler, TYPE_U32, TYPE_U16);
+ } else {
+ sampler = create_immed_typed(ctx->block, tex->sampler_index, TYPE_U16);
+ }
+
+ return ir3_create_collect(ctx, (struct ir3_instruction*[]){
+ sampler,
+ texture,
+ }, 2);
+}
+
static void
emit_tex(struct ir3_context *ctx, nir_tex_instr *tex)
{
struct ir3_instruction * const *coord, * const *off, * const *ddx, * const *ddy;
struct ir3_instruction *lod, *compare, *proj, *sample_index;
bool has_bias = false, has_lod = false, has_proj = false, has_off = false;
- unsigned i, coords, flags;
+ unsigned i, coords, flags, ncomp;
unsigned nsrc0 = 0, nsrc1 = 0;
type_t type;
opc_t opc = 0;
+ ncomp = nir_dest_num_components(tex->dest);
+
coord = off = ddx = ddy = NULL;
lod = proj = compare = sample_index = NULL;
- /* TODO: might just be one component for gathers? */
- dst = get_dst(ctx, &tex->dest, 4);
+ dst = ir3_get_dst(ctx, &tex->dest, ncomp);
for (unsigned i = 0; i < tex->num_srcs; i++) {
switch (tex->src[i].src_type) {
case nir_tex_src_coord:
- coord = get_src(ctx, &tex->src[i].src);
+ coord = ir3_get_src(ctx, &tex->src[i].src);
break;
case nir_tex_src_bias:
- lod = get_src(ctx, &tex->src[i].src)[0];
+ lod = ir3_get_src(ctx, &tex->src[i].src)[0];
has_bias = true;
break;
case nir_tex_src_lod:
- lod = get_src(ctx, &tex->src[i].src)[0];
+ lod = ir3_get_src(ctx, &tex->src[i].src)[0];
has_lod = true;
break;
case nir_tex_src_comparator: /* shadow comparator */
- compare = get_src(ctx, &tex->src[i].src)[0];
+ compare = ir3_get_src(ctx, &tex->src[i].src)[0];
break;
case nir_tex_src_projector:
- proj = get_src(ctx, &tex->src[i].src)[0];
+ proj = ir3_get_src(ctx, &tex->src[i].src)[0];
has_proj = true;
break;
case nir_tex_src_offset:
- off = get_src(ctx, &tex->src[i].src);
+ off = ir3_get_src(ctx, &tex->src[i].src);
has_off = true;
break;
case nir_tex_src_ddx:
- ddx = get_src(ctx, &tex->src[i].src);
+ ddx = ir3_get_src(ctx, &tex->src[i].src);
break;
case nir_tex_src_ddy:
- ddy = get_src(ctx, &tex->src[i].src);
+ ddy = ir3_get_src(ctx, &tex->src[i].src);
break;
case nir_tex_src_ms_index:
- sample_index = get_src(ctx, &tex->src[i].src)[0];
+ sample_index = ir3_get_src(ctx, &tex->src[i].src)[0];
+ break;
+ case nir_tex_src_texture_offset:
+ case nir_tex_src_sampler_offset:
+ /* handled in get_tex_samp_src() */
break;
default:
- compile_error(ctx, "Unhandled NIR tex src type: %d\n",
+ ir3_context_error(ctx, "Unhandled NIR tex src type: %d\n",
tex->src[i].src_type);
return;
}
case nir_texop_texture_samples:
case nir_texop_samples_identical:
case nir_texop_txf_ms_mcs:
- compile_error(ctx, "Unhandled NIR tex type: %d\n", tex->op);
+ ir3_context_error(ctx, "Unhandled NIR tex type: %d\n", tex->op);
return;
}
nsrc0 = i;
- /* NOTE a3xx (and possibly a4xx?) might be different, using isaml
- * with scaled x coord according to requested sample:
- */
- if (tex->op == nir_texop_txf_ms) {
- if (ctx->compiler->txf_ms_with_isaml) {
- /* the samples are laid out in x dimension as
- * 0 1 2 3
- * x_ms = (x << ms) + sample_index;
- */
- struct ir3_instruction *ms;
- ms = create_immed(b, (ctx->samples >> (2 * tex->texture_index)) & 3);
-
- src0[0] = ir3_SHL_B(b, src0[0], 0, ms, 0);
- src0[0] = ir3_ADD_U(b, src0[0], 0, sample_index, 0);
-
- opc = OPC_ISAML;
- } else {
- src0[nsrc0++] = sample_index;
- }
- }
-
/* scale up integer coords for TXF based on the LOD */
if (ctx->compiler->unminify_coords && (opc == OPC_ISAML)) {
assert(has_lod);
if (coords == 1) {
/* hw doesn't do 1d, so we treat it as 2d with
* height of 1, and patch up the y coord.
- * TODO: y coord should be (int)0 in some cases..
*/
- src0[nsrc0++] = create_immed(b, fui(0.5));
+ if (is_isam(opc)) {
+ src0[nsrc0++] = create_immed(b, 0);
+ } else {
+ src0[nsrc0++] = create_immed(b, fui(0.5));
+ }
}
if (tex->is_shadow && tex->op != nir_texop_lod)
struct ir3_instruction *idx = coord[coords];
/* the array coord for cube arrays needs 0.5 added to it */
- if (ctx->compiler->array_index_add_half && (opc != OPC_ISAML))
+ if (ctx->compiler->array_index_add_half && !is_isam(opc))
idx = ir3_ADD_F(b, idx, 0, create_immed(b, fui(0.5)), 0);
src0[nsrc0++] = idx;
src0[nsrc0++] = create_immed(b, fui(0.0));
}
+ /* NOTE a3xx (and possibly a4xx?) might be different, using isaml
+ * with scaled x coord according to requested sample:
+ */
+ if (tex->op == nir_texop_txf_ms) {
+ if (ctx->compiler->txf_ms_with_isaml) {
+ /* the samples are laid out in x dimension as
+ * 0 1 2 3
+ * x_ms = (x << ms) + sample_index;
+ */
+ struct ir3_instruction *ms;
+ ms = create_immed(b, (ctx->samples >> (2 * tex->texture_index)) & 3);
+
+ src0[0] = ir3_SHL_B(b, src0[0], 0, ms, 0);
+ src0[0] = ir3_ADD_U(b, src0[0], 0, sample_index, 0);
+
+ opc = OPC_ISAML;
+ } else {
+ src0[nsrc0++] = sample_index;
+ }
+ }
+
/*
* second argument (if applicable):
* - offsets
if (opc == OPC_GETLOD)
type = TYPE_U32;
- unsigned tex_idx = tex->texture_index;
+ struct ir3_instruction *samp_tex = get_tex_samp_tex_src(ctx, tex);
+ struct ir3_instruction *col0 = ir3_create_collect(ctx, src0, nsrc0);
+ struct ir3_instruction *col1 = ir3_create_collect(ctx, src1, nsrc1);
- ctx->max_texture_index = MAX2(ctx->max_texture_index, tex_idx);
+ sam = ir3_SAM(b, opc, type, MASK(ncomp), flags,
+ samp_tex, col0, col1);
- struct ir3_instruction *col0 = create_collect(ctx, src0, nsrc0);
- struct ir3_instruction *col1 = create_collect(ctx, src1, nsrc1);
-
- sam = ir3_SAM(b, opc, type, 0b1111, flags,
- tex_idx, tex_idx, col0, col1);
-
- if ((ctx->astc_srgb & (1 << tex_idx)) && !nir_tex_instr_is_query(tex)) {
+ if ((ctx->astc_srgb & (1 << tex->texture_index)) && !nir_tex_instr_is_query(tex)) {
/* only need first 3 components: */
sam->regs[0]->wrmask = 0x7;
- split_dest(b, dst, sam, 0, 3);
+ ir3_split_dest(b, dst, sam, 0, 3);
/* we need to sample the alpha separately with a non-ASTC
* texture state:
*/
sam = ir3_SAM(b, opc, type, 0b1000, flags,
- tex_idx, tex_idx, col0, col1);
+ samp_tex, col0, col1);
array_insert(ctx->ir, ctx->ir->astc_srgb, sam);
/* fixup .w component: */
- split_dest(b, &dst[3], sam, 3, 1);
+ ir3_split_dest(b, &dst[3], sam, 3, 1);
} else {
/* normal (non-workaround) case: */
- split_dest(b, dst, sam, 0, 4);
+ ir3_split_dest(b, dst, sam, 0, ncomp);
}
/* GETLOD returns results in 4.8 fixed point */
}
}
- put_dst(ctx, &tex->dest);
+ ir3_put_dst(ctx, &tex->dest);
}
static void
struct ir3_block *b = ctx->block;
struct ir3_instruction **dst, *sam;
- dst = get_dst(ctx, &tex->dest, 1);
+ dst = ir3_get_dst(ctx, &tex->dest, 1);
sam = ir3_SAM(b, OPC_GETINFO, TYPE_U32, 0b0100, 0,
- tex->texture_index, tex->texture_index, NULL, NULL);
+ get_tex_samp_tex_src(ctx, tex), NULL, NULL);
/* even though there is only one component, since it ends
* up in .z rather than .x, we need a split_dest()
*/
- split_dest(b, dst, sam, 0, 3);
+ ir3_split_dest(b, dst, sam, 0, 3);
/* The # of levels comes from getinfo.z. We need to add 1 to it, since
* the value in TEX_CONST_0 is zero-based.
if (ctx->compiler->levels_add_one)
dst[0] = ir3_ADD_U(b, dst[0], 0, create_immed(b, 1), 0);
- put_dst(ctx, &tex->dest);
+ ir3_put_dst(ctx, &tex->dest);
}
static void
if (tex->sampler_dim == GLSL_SAMPLER_DIM_CUBE)
coords = 2;
- dst = get_dst(ctx, &tex->dest, 4);
+ dst = ir3_get_dst(ctx, &tex->dest, 4);
compile_assert(ctx, tex->num_srcs == 1);
compile_assert(ctx, tex->src[0].src_type == nir_tex_src_lod);
- lod = get_src(ctx, &tex->src[0].src)[0];
+ lod = ir3_get_src(ctx, &tex->src[0].src)[0];
sam = ir3_SAM(b, OPC_GETSIZE, TYPE_U32, 0b1111, flags,
- tex->texture_index, tex->texture_index, lod, NULL);
+ get_tex_samp_tex_src(ctx, tex), lod, NULL);
- split_dest(b, dst, sam, 0, 4);
+ ir3_split_dest(b, dst, sam, 0, 4);
/* Array size actually ends up in .w rather than .z. This doesn't
* matter for miplevel 0, but for higher mips the value in z is
}
}
- put_dst(ctx, &tex->dest);
+ ir3_put_dst(ctx, &tex->dest);
}
static void
*/
break;
default:
- compile_error(ctx, "Unhandled NIR jump type: %d\n", jump->type);
+ ir3_context_error(ctx, "Unhandled NIR jump type: %d\n", jump->type);
break;
}
}
break;
case nir_instr_type_phi:
/* we have converted phi webs to regs in NIR by now */
- compile_error(ctx, "Unexpected NIR instruction type: %d\n", instr->type);
+ ir3_context_error(ctx, "Unexpected NIR instruction type: %d\n", instr->type);
break;
case nir_instr_type_call:
case nir_instr_type_parallel_copy:
- compile_error(ctx, "Unhandled NIR instruction type: %d\n", instr->type);
+ ir3_context_error(ctx, "Unhandled NIR instruction type: %d\n", instr->type);
break;
}
}
static void
emit_if(struct ir3_context *ctx, nir_if *nif)
{
- struct ir3_instruction *condition = get_src(ctx, &nif->condition)[0];
+ struct ir3_instruction *condition = ir3_get_src(ctx, &nif->condition)[0];
ctx->block->condition =
- get_predicate(ctx, ir3_b2n(condition->block, condition));
+ ir3_get_predicate(ctx, ir3_b2n(condition->block, condition));
emit_cf_list(ctx, &nif->then_list);
emit_cf_list(ctx, &nif->else_list);
emit_cf_list(ctx, &nloop->body);
}
+static void
+stack_push(struct ir3_context *ctx)
+{
+ ctx->stack++;
+ ctx->max_stack = MAX2(ctx->max_stack, ctx->stack);
+}
+
+static void
+stack_pop(struct ir3_context *ctx)
+{
+ compile_assert(ctx, ctx->stack > 0);
+ ctx->stack--;
+}
+
static void
emit_cf_list(struct ir3_context *ctx, struct exec_list *list)
{
emit_block(ctx, nir_cf_node_as_block(node));
break;
case nir_cf_node_if:
+ stack_push(ctx);
emit_if(ctx, nir_cf_node_as_if(node));
+ stack_pop(ctx);
break;
case nir_cf_node_loop:
+ stack_push(ctx);
emit_loop(ctx, nir_cf_node_as_loop(node));
+ stack_pop(ctx);
break;
case nir_cf_node_function:
- compile_error(ctx, "TODO\n");
+ ir3_context_error(ctx, "TODO\n");
break;
}
}
unsigned stride = strmout->stride[i];
struct ir3_instruction *base, *off;
- base = create_uniform(ctx, regid(v->constbase.tfbo, i));
+ base = create_uniform(ctx->block, regid(v->constbase.tfbo, i));
/* 24-bit should be enough: */
off = ir3_MUL_U(ctx->block, vtxcnt, 0,
{
nir_metadata_require(impl, nir_metadata_block_index);
+ compile_assert(ctx, ctx->stack == 0);
+
emit_cf_list(ctx, &impl->body);
emit_block(ctx, impl->end_block);
+ compile_assert(ctx, ctx->stack == 0);
+
/* at this point, we should have a single empty block,
* into which we emit the 'end' instruction.
*/
/* defer add_sysval_input() until after all inputs created */
}
- split_dest(block, &instr, ctx->frag_coord, comp, 1);
+ ir3_split_dest(block, &instr, ctx->frag_coord, comp, 1);
switch (comp) {
case 0: /* .x */
struct ir3_shader_variant *so = ctx->so;
unsigned ncomp = glsl_get_components(in->type);
unsigned n = in->data.driver_location;
+ unsigned frac = in->data.location_frac;
unsigned slot = in->data.location;
- /* let's pretend things other than vec4 don't exist: */
- ncomp = MAX2(ncomp, 4);
-
/* skip unread inputs, we could end up with (for example), unsplit
* matrix/etc inputs in the case they are not read, so just silently
* skip these.
if (ncomp > 4)
return;
- compile_assert(ctx, ncomp == 4);
-
so->inputs[n].slot = slot;
- so->inputs[n].compmask = (1 << ncomp) - 1;
+ so->inputs[n].compmask = (1 << (ncomp + frac)) - 1;
so->inputs_count = MAX2(so->inputs_count, n + 1);
so->inputs[n].interpolate = in->data.interpolation;
if (ctx->so->type == MESA_SHADER_FRAGMENT) {
for (int i = 0; i < ncomp; i++) {
struct ir3_instruction *instr = NULL;
- unsigned idx = (n * 4) + i;
+ unsigned idx = (n * 4) + i + frac;
if (slot == VARYING_SLOT_POS) {
so->inputs[n].bary = false;
}
} else if (ctx->so->type == MESA_SHADER_VERTEX) {
for (int i = 0; i < ncomp; i++) {
- unsigned idx = (n * 4) + i;
+ unsigned idx = (n * 4) + i + frac;
compile_assert(ctx, idx < ctx->ir->ninputs);
ctx->ir->inputs[idx] = create_input(ctx, idx);
}
} else {
- compile_error(ctx, "unknown shader type: %d\n", ctx->so->type);
+ ir3_context_error(ctx, "unknown shader type: %d\n", ctx->so->type);
}
if (so->inputs[n].bary || (ctx->so->type == MESA_SHADER_VERTEX)) {
struct ir3_shader_variant *so = ctx->so;
unsigned ncomp = glsl_get_components(out->type);
unsigned n = out->data.driver_location;
+ unsigned frac = out->data.location_frac;
unsigned slot = out->data.location;
unsigned comp = 0;
- /* let's pretend things other than vec4 don't exist: */
- ncomp = MAX2(ncomp, 4);
- compile_assert(ctx, ncomp == 4);
-
if (ctx->so->type == MESA_SHADER_FRAGMENT) {
switch (slot) {
case FRAG_RESULT_DEPTH:
default:
if (slot >= FRAG_RESULT_DATA0)
break;
- compile_error(ctx, "unknown FS output name: %s\n",
+ ir3_context_error(ctx, "unknown FS output name: %s\n",
gl_frag_result_name(slot));
}
} else if (ctx->so->type == MESA_SHADER_VERTEX) {
break;
if ((VARYING_SLOT_TEX0 <= slot) && (slot <= VARYING_SLOT_TEX7))
break;
- compile_error(ctx, "unknown VS output name: %s\n",
+ ir3_context_error(ctx, "unknown VS output name: %s\n",
gl_varying_slot_name(slot));
}
} else {
- compile_error(ctx, "unknown shader type: %d\n", ctx->so->type);
+ ir3_context_error(ctx, "unknown shader type: %d\n", ctx->so->type);
}
compile_assert(ctx, n < ARRAY_SIZE(so->outputs));
so->outputs_count = MAX2(so->outputs_count, n + 1);
for (int i = 0; i < ncomp; i++) {
- unsigned idx = (n * 4) + i;
+ unsigned idx = (n * 4) + i + frac;
compile_assert(ctx, idx < ctx->ir->noutputs);
ctx->ir->outputs[idx] = create_immed(ctx->block, fui(0.0));
}
+
+ /* if varying packing doesn't happen, we could end up in a situation
+ * with "holes" in the output, and since the per-generation code that
+ * sets up varying linkage registers doesn't expect to have more than
+ * one varying per vec4 slot, pad the holes.
+ *
+ * Note that this should probably generate a performance warning of
+ * some sort.
+ */
+ for (int i = 0; i < frac; i++) {
+ unsigned idx = (n * 4) + i;
+ if (!ctx->ir->outputs[idx]) {
+ ctx->ir->outputs[idx] = create_immed(ctx->block, fui(0.0));
+ }
+ }
}
static int
[MESA_SHADER_FRAGMENT] = 24, // TODO
[MESA_SHADER_VERTEX] = 16,
[MESA_SHADER_COMPUTE] = 16, // TODO how many do we actually need?
+ [MESA_SHADER_KERNEL] = 16, // TODO how many do we actually need?
};
static void
struct ir3_instruction *xy[2];
vcoord = create_input_compmask(ctx, 0, 0x3);
- split_dest(ctx->block, xy, vcoord, 0, 2);
+ ir3_split_dest(ctx->block, xy, vcoord, 0, 2);
- ctx->frag_vcoord = create_collect(ctx, xy, 2);
+ ctx->frag_vcoord = ir3_create_collect(ctx, xy, 2);
}
/* Setup inputs: */
setup_output(ctx, var);
}
+ /* Find # of samplers: */
+ nir_foreach_variable(var, &ctx->s->uniforms) {
+ ctx->so->num_samp += glsl_type_get_sampler_count(var->type);
+ /* just assume that we'll be reading from images.. if it
+ * is write-only we don't have to count it, but not sure
+ * if there is a good way to know?
+ */
+ ctx->so->num_samp += glsl_type_get_image_count(var->type);
+ }
+
/* Setup registers (which should only be arrays): */
nir_foreach_register(reg, &ctx->s->registers) {
- declare_array(ctx, reg);
+ ir3_declare_array(ctx, reg);
}
/* NOTE: need to do something more clever when we support >1 fxn */
nir_foreach_register(reg, &fxn->registers) {
- declare_array(ctx, reg);
+ ir3_declare_array(ctx, reg);
}
/* And emit the body: */
ctx->impl = fxn;
assert(!so->ir);
- ctx = compile_init(compiler, so);
+ ctx = ir3_context_init(compiler, so);
if (!ctx) {
DBG("INIT failed!");
ret = -1;
ir3_print(ir);
}
+ /* do Sethi–Ullman numbering before scheduling: */
+ ir3_sun(ir);
+
ret = ir3_sched(ir);
if (ret) {
DBG("SCHED failed!");
goto out;
}
+ if (compiler->gpu_id >= 600) {
+ ir3_a6xx_fixup_atomic_dests(ir, so);
+ }
+
if (ir3_shader_debug & IR3_DBG_OPTMSGS) {
printf("AFTER SCHED:\n");
ir3_print(ir);
/* fixup input/outputs: */
for (i = 0; i < so->outputs_count; i++) {
- so->outputs[i].regid = ir->outputs[i*4]->regs[0]->num;
+ /* sometimes we get outputs that don't write the .x coord, like:
+ *
+ * decl_var shader_out INTERP_MODE_NONE float Color (VARYING_SLOT_VAR9.z, 1, 0)
+ *
+ * Presumably the result of varying packing and then eliminating
+ * some unneeded varyings? Just skip head to the first valid
+ * component of the output.
+ */
+ for (unsigned j = 0; j < 4; j++) {
+ struct ir3_instruction *instr = ir->outputs[(i*4) + j];
+ if (instr) {
+ so->outputs[i].regid = instr->regs[0]->num;
+ break;
+ }
+ }
}
/* Note that some or all channels of an input may be unused: */
/* We need to do legalize after (for frag shader's) the "bary.f"
* offsets (inloc) have been assigned.
*/
- ir3_legalize(ir, &so->num_samp, &so->has_ssbo, &max_bary);
+ ir3_legalize(ir, &so->has_ssbo, &max_bary);
if (ir3_shader_debug & IR3_DBG_OPTMSGS) {
printf("AFTER LEGALIZE:\n");
ir3_print(ir);
}
+ so->branchstack = ctx->max_stack;
+
/* Note that actual_in counts inputs that are not bary.f'd for FS: */
if (so->type == MESA_SHADER_VERTEX)
so->total_in = actual_in;
else
so->total_in = max_bary + 1;
+ so->max_sun = ir->max_sun;
+
out:
if (ret) {
if (so->ir)
ir3_destroy(so->ir);
so->ir = NULL;
}
- compile_free(ctx);
+ ir3_context_free(ctx);
return ret;
}