-/* -*- mode: C; c-file-style: "k&r"; tab-width 4; indent-tabs-mode: t; -*- */
-
/*
* Copyright (C) 2014 Rob Clark <robclark@freedesktop.org>
*
ir3_destroy(v->ir);
if (v->bo)
fd_bo_del(v->bo);
+ if (v->immediates)
+ free(v->immediates);
free(v);
}
static void
fixup_regfootprint(struct ir3_shader_variant *v)
{
- if (v->type == SHADER_VERTEX) {
- unsigned i;
- for (i = 0; i < v->inputs_count; i++) {
- /* skip frag inputs fetch via bary.f since their reg's are
- * not written by gpu before shader starts (and in fact the
- * regid's might not even be valid)
- */
- if (v->inputs[i].bary)
- continue;
+ unsigned i;
- if (v->inputs[i].compmask) {
- int32_t regid = (v->inputs[i].regid + 3) >> 2;
- v->info.max_reg = MAX2(v->info.max_reg, regid);
- }
- }
- for (i = 0; i < v->outputs_count; i++) {
- int32_t regid = (v->outputs[i].regid + 3) >> 2;
+ for (i = 0; i < v->inputs_count; i++) {
+ /* skip frag inputs fetch via bary.f since their reg's are
+ * not written by gpu before shader starts (and in fact the
+ * regid's might not even be valid)
+ */
+ if (v->inputs[i].bary)
+ continue;
+
+ /* ignore high regs that are global to all threads in a warp
+ * (they exist by default) (a5xx+)
+ */
+ if (v->inputs[i].regid >= regid(48,0))
+ continue;
+
+ if (v->inputs[i].compmask) {
+ unsigned n = util_last_bit(v->inputs[i].compmask) - 1;
+ int32_t regid = (v->inputs[i].regid + n) >> 2;
v->info.max_reg = MAX2(v->info.max_reg, regid);
}
- } else if (v->type == SHADER_FRAGMENT) {
- /* NOTE: not sure how to turn pos_regid off.. but this could
- * be, for example, r1.x while max reg used by the shader is
- * r0.*, in which case we need to fixup the reg footprint:
- */
- v->info.max_reg = MAX2(v->info.max_reg, v->pos_regid >> 2);
- if (v->frag_coord)
- debug_assert(v->info.max_reg >= 0); /* hard coded r0.x */
- if (v->frag_face)
- debug_assert(v->info.max_half_reg >= 0); /* hr0.x */
+ }
+
+ for (i = 0; i < v->outputs_count; i++) {
+ int32_t regid = (v->outputs[i].regid + 3) >> 2;
+ v->info.max_reg = MAX2(v->info.max_reg, regid);
}
}
if (fd_mesa_debug & FD_DBG_DISASM) {
struct ir3_shader_key key = v->key;
- DBG("disassemble: type=%d, k={bp=%u,cts=%u,hp=%u}", v->type,
- key.binning_pass, key.color_two_side, key.half_precision);
- ir3_shader_disasm(v, bin);
+ printf("disassemble: type=%d, k={bp=%u,cts=%u,hp=%u}", v->type,
+ v->binning_pass, key.color_two_side, key.half_precision);
+ ir3_shader_disasm(v, bin, stdout);
+ }
+
+ if (shader_debug_enabled(v->shader->type)) {
+ fprintf(stderr, "Native code for unnamed %s shader %s:\n",
+ shader_stage_name(v->shader->type), v->shader->nir->info.name);
+ if (v->shader->type == SHADER_FRAGMENT)
+ fprintf(stderr, "SIMD0\n");
+ ir3_shader_disasm(v, bin, stderr);
}
free(bin);
pipe_debug_message(debug, SHADER_INFO, "\n"
"SHADER-DB: %s prog %d/%d: %u instructions, %u dwords\n"
"SHADER-DB: %s prog %d/%d: %u half, %u full\n"
- "SHADER-DB: %s prog %d/%d: %u const, %u constlen\n",
+ "SHADER-DB: %s prog %d/%d: %u const, %u constlen\n"
+ "SHADER-DB: %s prog %d/%d: %u (ss), %u (sy)\n",
ir3_shader_stage(v->shader),
v->shader->id, v->id,
v->info.instrs_count,
ir3_shader_stage(v->shader),
v->shader->id, v->id,
v->info.max_const + 1,
- v->constlen);
+ v->constlen,
+ ir3_shader_stage(v->shader),
+ v->shader->id, v->id,
+ v->info.ss, v->info.sy);
}
static struct ir3_shader_variant *
-create_variant(struct ir3_shader *shader, struct ir3_shader_key key)
+create_variant(struct ir3_shader *shader, struct ir3_shader_key key,
+ bool binning_pass)
{
struct ir3_shader_variant *v = CALLOC_STRUCT(ir3_shader_variant);
int ret;
v->id = ++shader->variant_count;
v->shader = shader;
+ v->binning_pass = binning_pass;
v->key = key;
v->type = shader->type;
return NULL;
}
-struct ir3_shader_variant *
-ir3_shader_variant(struct ir3_shader *shader, struct ir3_shader_key key,
+static inline struct ir3_shader_variant *
+shader_variant(struct ir3_shader *shader, struct ir3_shader_key key,
struct pipe_debug_callback *debug)
{
struct ir3_shader_variant *v;
*/
switch (shader->type) {
case SHADER_FRAGMENT:
- key.binning_pass = false;
if (key.has_per_samp) {
key.vsaturate_s = 0;
key.vsaturate_t = 0;
key.vsaturate_r = 0;
key.vastc_srgb = 0;
+ key.vsamples = 0;
}
break;
case SHADER_VERTEX:
key.fsaturate_t = 0;
key.fsaturate_r = 0;
key.fastc_srgb = 0;
+ key.fsamples = 0;
}
break;
default:
return v;
/* compile new variant if it doesn't exist already: */
- v = create_variant(shader, key);
+ v = create_variant(shader, key, false);
if (v) {
v->next = shader->variants;
shader->variants = v;
}
+struct ir3_shader_variant *
+ir3_shader_variant(struct ir3_shader *shader, struct ir3_shader_key key,
+ bool binning_pass, struct pipe_debug_callback *debug)
+{
+ struct ir3_shader_variant *v =
+ shader_variant(shader, key, debug);
+
+ if (binning_pass) {
+ if (!v->binning)
+ v->binning = create_variant(shader, key, true);
+ return v->binning;
+ }
+
+ return v;
+}
+
void
ir3_shader_destroy(struct ir3_shader *shader)
{
}
nir = ir3_tgsi_to_nir(cso->tokens);
}
+ NIR_PASS_V(nir, nir_lower_io, nir_var_all, ir3_glsl_type_size,
+ (nir_lower_io_options)0);
/* do first pass optimization, ignoring the key: */
shader->nir = ir3_optimize_nir(shader, nir, NULL);
if (fd_mesa_debug & FD_DBG_DISASM) {
*/
static struct ir3_shader_key key;
memset(&key, 0, sizeof(key));
- ir3_shader_variant(shader, key, debug);
+ ir3_shader_variant(shader, key, false, debug);
}
return shader;
}
if (cso->ir_type == PIPE_SHADER_IR_NIR) {
/* we take ownership of the reference: */
nir = (nir_shader *)cso->prog;
+
+ NIR_PASS_V(nir, nir_lower_io, nir_var_all, ir3_glsl_type_size,
+ (nir_lower_io_options)0);
} else {
debug_assert(cso->ir_type == PIPE_SHADER_IR_TGSI);
if (fd_mesa_debug & FD_DBG_DISASM) {
/* do first pass optimization, ignoring the key: */
shader->nir = ir3_optimize_nir(shader, nir, NULL);
if (fd_mesa_debug & FD_DBG_DISASM) {
- DBG("dump nir%d: type=%d", shader->id, shader->type);
+ printf("dump nir%d: type=%d\n", shader->id, shader->type);
nir_print_shader(shader->nir, stdout);
}
return shader;
}
-static void dump_reg(const char *name, uint32_t r)
+static void dump_reg(FILE *out, const char *name, uint32_t r)
{
if (r != regid(63,0))
- debug_printf("; %s: r%d.%c\n", name, r >> 2, "xyzw"[r & 0x3]);
+ fprintf(out, "; %s: r%d.%c\n", name, r >> 2, "xyzw"[r & 0x3]);
}
-static void dump_output(struct ir3_shader_variant *so,
+static void dump_output(FILE *out, struct ir3_shader_variant *so,
unsigned slot, const char *name)
{
uint32_t regid;
regid = ir3_find_output_regid(so, slot);
- dump_reg(name, regid);
+ dump_reg(out, name, regid);
}
void
-ir3_shader_disasm(struct ir3_shader_variant *so, uint32_t *bin)
+ir3_shader_disasm(struct ir3_shader_variant *so, uint32_t *bin, FILE *out)
{
struct ir3 *ir = so->ir;
struct ir3_register *reg;
for (i = 0; i < ir->ninputs; i++) {
if (!ir->inputs[i]) {
- debug_printf("; in%d unused\n", i);
+ fprintf(out, "; in%d unused\n", i);
continue;
}
reg = ir->inputs[i]->regs[0];
regid = reg->num;
- debug_printf("@in(%sr%d.%c)\tin%d\n",
+ fprintf(out, "@in(%sr%d.%c)\tin%d\n",
(reg->flags & IR3_REG_HALF) ? "h" : "",
(regid >> 2), "xyzw"[regid & 0x3], i);
}
for (i = 0; i < ir->noutputs; i++) {
if (!ir->outputs[i]) {
- debug_printf("; out%d unused\n", i);
+ fprintf(out, "; out%d unused\n", i);
continue;
}
/* kill shows up as a virtual output.. skip it! */
continue;
reg = ir->outputs[i]->regs[0];
regid = reg->num;
- debug_printf("@out(%sr%d.%c)\tout%d\n",
+ fprintf(out, "@out(%sr%d.%c)\tout%d\n",
(reg->flags & IR3_REG_HALF) ? "h" : "",
(regid >> 2), "xyzw"[regid & 0x3], i);
}
for (i = 0; i < so->immediates_count; i++) {
- debug_printf("@const(c%d.x)\t", so->constbase.immediate + i);
- debug_printf("0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
+ fprintf(out, "@const(c%d.x)\t", so->constbase.immediate + i);
+ fprintf(out, "0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
so->immediates[i].val[0],
so->immediates[i].val[1],
so->immediates[i].val[2],
so->immediates[i].val[3]);
}
- disasm_a3xx(bin, so->info.sizedwords, 0, so->type);
+ disasm_a3xx(bin, so->info.sizedwords, 0, out);
switch (so->type) {
case SHADER_VERTEX:
- debug_printf("; %s: outputs:", type);
+ fprintf(out, "; %s: outputs:", type);
for (i = 0; i < so->outputs_count; i++) {
uint8_t regid = so->outputs[i].regid;
- debug_printf(" r%d.%c (%s)",
+ fprintf(out, " r%d.%c (%s)",
(regid >> 2), "xyzw"[regid & 0x3],
gl_varying_slot_name(so->outputs[i].slot));
}
- debug_printf("\n");
- debug_printf("; %s: inputs:", type);
+ fprintf(out, "\n");
+ fprintf(out, "; %s: inputs:", type);
for (i = 0; i < so->inputs_count; i++) {
uint8_t regid = so->inputs[i].regid;
- debug_printf(" r%d.%c (cm=%x,il=%u,b=%u)",
+ fprintf(out, " r%d.%c (cm=%x,il=%u,b=%u)",
(regid >> 2), "xyzw"[regid & 0x3],
so->inputs[i].compmask,
so->inputs[i].inloc,
so->inputs[i].bary);
}
- debug_printf("\n");
+ fprintf(out, "\n");
break;
case SHADER_FRAGMENT:
- debug_printf("; %s: outputs:", type);
+ fprintf(out, "; %s: outputs:", type);
for (i = 0; i < so->outputs_count; i++) {
uint8_t regid = so->outputs[i].regid;
- debug_printf(" r%d.%c (%s)",
+ fprintf(out, " r%d.%c (%s)",
(regid >> 2), "xyzw"[regid & 0x3],
gl_frag_result_name(so->outputs[i].slot));
}
- debug_printf("\n");
- debug_printf("; %s: inputs:", type);
+ fprintf(out, "\n");
+ fprintf(out, "; %s: inputs:", type);
for (i = 0; i < so->inputs_count; i++) {
uint8_t regid = so->inputs[i].regid;
- debug_printf(" r%d.%c (%s,cm=%x,il=%u,b=%u)",
+ fprintf(out, " r%d.%c (%s,cm=%x,il=%u,b=%u)",
(regid >> 2), "xyzw"[regid & 0x3],
gl_varying_slot_name(so->inputs[i].slot),
so->inputs[i].compmask,
so->inputs[i].inloc,
so->inputs[i].bary);
}
- debug_printf("\n");
+ fprintf(out, "\n");
break;
default:
/* TODO */
}
/* print generic shader info: */
- debug_printf("; %s prog %d/%d: %u instructions, %d half, %d full\n",
+ fprintf(out, "; %s prog %d/%d: %u instructions, %d half, %d full\n",
type, so->shader->id, so->id,
so->info.instrs_count,
so->info.max_half_reg + 1,
so->info.max_reg + 1);
- debug_printf("; %d const, %u constlen\n",
+ fprintf(out, "; %d const, %u constlen\n",
so->info.max_const + 1,
so->constlen);
+ fprintf(out, "; %u (ss), %u (sy)\n", so->info.ss, so->info.sy);
+
/* print shader type specific info: */
switch (so->type) {
case SHADER_VERTEX:
- dump_output(so, VARYING_SLOT_POS, "pos");
- dump_output(so, VARYING_SLOT_PSIZ, "psize");
+ dump_output(out, so, VARYING_SLOT_POS, "pos");
+ dump_output(out, so, VARYING_SLOT_PSIZ, "psize");
break;
case SHADER_FRAGMENT:
- dump_reg("pos (bary)", so->pos_regid);
- dump_output(so, FRAG_RESULT_DEPTH, "posz");
+ dump_reg(out, "pos (bary)",
+ ir3_find_sysval_regid(so, SYSTEM_VALUE_VARYING_COORD));
+ dump_output(out, so, FRAG_RESULT_DEPTH, "posz");
if (so->color0_mrt) {
- dump_output(so, FRAG_RESULT_COLOR, "color");
+ dump_output(out, so, FRAG_RESULT_COLOR, "color");
} else {
- dump_output(so, FRAG_RESULT_DATA0, "data0");
- dump_output(so, FRAG_RESULT_DATA1, "data1");
- dump_output(so, FRAG_RESULT_DATA2, "data2");
- dump_output(so, FRAG_RESULT_DATA3, "data3");
- dump_output(so, FRAG_RESULT_DATA4, "data4");
- dump_output(so, FRAG_RESULT_DATA5, "data5");
- dump_output(so, FRAG_RESULT_DATA6, "data6");
- dump_output(so, FRAG_RESULT_DATA7, "data7");
+ dump_output(out, so, FRAG_RESULT_DATA0, "data0");
+ dump_output(out, so, FRAG_RESULT_DATA1, "data1");
+ dump_output(out, so, FRAG_RESULT_DATA2, "data2");
+ dump_output(out, so, FRAG_RESULT_DATA3, "data3");
+ dump_output(out, so, FRAG_RESULT_DATA4, "data4");
+ dump_output(out, so, FRAG_RESULT_DATA5, "data5");
+ dump_output(out, so, FRAG_RESULT_DATA6, "data6");
+ dump_output(out, so, FRAG_RESULT_DATA7, "data7");
}
/* these two are hard-coded since we don't know how to
* program them to anything but all 0's...
*/
if (so->frag_coord)
- debug_printf("; fragcoord: r0.x\n");
+ fprintf(out, "; fragcoord: r0.x\n");
if (so->frag_face)
- debug_printf("; fragface: hr0.x\n");
+ fprintf(out, "; fragface: hr0.x\n");
break;
default:
/* TODO */
break;
}
- debug_printf("\n");
+ fprintf(out, "\n");
}
uint64_t
#include "freedreno_resource.h"
+static inline bool
+is_stateobj(struct fd_ringbuffer *ring)
+{
+ /* XXX this is an ugly way to differentiate.. */
+ return !!(ring->flags & FD_RINGBUFFER_STREAMING);
+}
+
+static inline void
+ring_wfi(struct fd_batch *batch, struct fd_ringbuffer *ring)
+{
+ /* when we emit const state via ring (IB2) we need a WFI, but when
+ * it is emit'd via stateobj, we don't
+ */
+ if (is_stateobj(ring))
+ return;
+
+ fd_wfi(batch, ring);
+}
+
static void
emit_user_consts(struct fd_context *ctx, const struct ir3_shader_variant *v,
struct fd_ringbuffer *ring, struct fd_constbuf_stateobj *constbuf)
{
const unsigned index = 0; /* user consts are index 0 */
- /* TODO save/restore dirty_mask for binning pass instead: */
- uint32_t dirty_mask = constbuf->enabled_mask;
- if (dirty_mask & (1 << index)) {
+ if (constbuf->enabled_mask & (1 << index)) {
struct pipe_constant_buffer *cb = &constbuf->cb[index];
unsigned size = align(cb->buffer_size, 4) / 4; /* size in dwords */
size = MIN2(size, 4 * max_const);
if (size > 0) {
- fd_wfi(ctx->batch, ring);
+ ring_wfi(ctx->batch, ring);
ctx->emit_const(ring, v->type, 0,
cb->buffer_offset, size,
cb->user_buffer, cb->buffer);
- constbuf->dirty_mask &= ~(1 << index);
}
}
}
}
}
- fd_wfi(ctx->batch, ring);
+ ring_wfi(ctx->batch, ring);
ctx->emit_const_bo(ring, v->type, false, offset * 4, params, prscs, offsets);
}
}
+static void
+emit_ssbo_sizes(struct fd_context *ctx, const struct ir3_shader_variant *v,
+ struct fd_ringbuffer *ring, struct fd_shaderbuf_stateobj *sb)
+{
+ uint32_t offset = v->constbase.ssbo_sizes;
+ if (v->constlen > offset) {
+ uint32_t sizes[align(v->const_layout.ssbo_size.count, 4)];
+ unsigned mask = v->const_layout.ssbo_size.mask;
+
+ while (mask) {
+ unsigned index = u_bit_scan(&mask);
+ unsigned off = v->const_layout.ssbo_size.off[index];
+ sizes[off] = sb->sb[index].buffer_size;
+ }
+
+ ring_wfi(ctx->batch, ring);
+ ctx->emit_const(ring, v->type, offset * 4,
+ 0, ARRAY_SIZE(sizes), sizes, NULL);
+ }
+}
+
+static void
+emit_image_dims(struct fd_context *ctx, const struct ir3_shader_variant *v,
+ struct fd_ringbuffer *ring, struct fd_shaderimg_stateobj *si)
+{
+ uint32_t offset = v->constbase.image_dims;
+ if (v->constlen > offset) {
+ uint32_t dims[align(v->const_layout.image_dims.count, 4)];
+ unsigned mask = v->const_layout.image_dims.mask;
+
+ while (mask) {
+ struct pipe_image_view *img;
+ struct fd_resource *rsc;
+ unsigned index = u_bit_scan(&mask);
+ unsigned off = v->const_layout.image_dims.off[index];
+
+ img = &si->si[index];
+ rsc = fd_resource(img->resource);
+
+ dims[off + 0] = util_format_get_blocksize(img->format);
+ if (img->resource->target != PIPE_BUFFER) {
+ unsigned lvl = img->u.tex.level;
+ /* note for 2d/cube/etc images, even if re-interpreted
+ * as a different color format, the pixel size should
+ * be the same, so use original dimensions for y and z
+ * stride:
+ */
+ dims[off + 1] = rsc->slices[lvl].pitch * rsc->cpp;
+ /* see corresponding logic in fd_resource_offset(): */
+ if (rsc->layer_first) {
+ dims[off + 2] = rsc->layer_size;
+ } else {
+ dims[off + 2] = rsc->slices[lvl].size0;
+ }
+ } else {
+ /* For buffer-backed images, the log2 of the format's
+ * bytes-per-pixel is placed on the 2nd slot. This is useful
+ * when emitting image_size instructions, for which we need
+ * to divide by bpp for image buffers. Since the bpp
+ * can only be power-of-two, the division is implemented
+ * as a SHR, and for that it is handy to have the log2 of
+ * bpp as a constant. (log2 = first-set-bit - 1)
+ */
+ dims[off + 1] = ffs(dims[off + 0]) - 1;
+ }
+ }
+
+ ring_wfi(ctx->batch, ring);
+ ctx->emit_const(ring, v->type, offset * 4,
+ 0, ARRAY_SIZE(dims), dims, NULL);
+ }
+}
+
static void
emit_immediates(struct fd_context *ctx, const struct ir3_shader_variant *v,
struct fd_ringbuffer *ring)
size *= 4;
if (size > 0) {
- fd_wfi(ctx->batch, ring);
+ ring_wfi(ctx->batch, ring);
ctx->emit_const(ring, v->type, base,
0, size, v->immediates[0].val, NULL);
}
}
}
- fd_wfi(ctx->batch, ring);
+ ring_wfi(ctx->batch, ring);
ctx->emit_const_bo(ring, v->type, true, offset * 4, params, prscs, offsets);
}
}
if (ctx->screen->gpu_id >= 500)
return 0;
- if (v->key.binning_pass)
+ if (v->binning_pass)
return 0;
if (v->shader->stream_output.num_outputs == 0)
return 0;
return maxvtxcnt;
}
-void
-ir3_emit_vs_consts(const struct ir3_shader_variant *v, struct fd_ringbuffer *ring,
- struct fd_context *ctx, const struct pipe_draw_info *info)
+static void
+emit_common_consts(const struct ir3_shader_variant *v, struct fd_ringbuffer *ring,
+ struct fd_context *ctx, enum pipe_shader_type t)
{
- enum fd_dirty_shader_state dirty = ctx->dirty_shader[PIPE_SHADER_VERTEX];
-
- debug_assert(v->type == SHADER_VERTEX);
+ enum fd_dirty_shader_state dirty = ctx->dirty_shader[t];
+
+ /* When we use CP_SET_DRAW_STATE objects to emit constant state,
+ * if we emit any of it we need to emit all. This is because
+ * we are using the same state-group-id each time for uniform
+ * state, and if previous update is never evaluated (due to no
+ * visible primitives in the current tile) then the new stateobj
+ * completely replaces the old one.
+ *
+ * Possibly if we split up different parts of the const state to
+ * different state-objects we could avoid this.
+ */
+ if (dirty && is_stateobj(ring))
+ dirty = ~0;
if (dirty & (FD_DIRTY_SHADER_PROG | FD_DIRTY_SHADER_CONST)) {
struct fd_constbuf_stateobj *constbuf;
bool shader_dirty;
- constbuf = &ctx->constbuf[PIPE_SHADER_VERTEX];
+ constbuf = &ctx->constbuf[t];
shader_dirty = !!(dirty & FD_DIRTY_SHADER_PROG);
emit_user_consts(ctx, v, ring, constbuf);
emit_immediates(ctx, v, ring);
}
+ if (dirty & (FD_DIRTY_SHADER_PROG | FD_DIRTY_SHADER_SSBO)) {
+ struct fd_shaderbuf_stateobj *sb = &ctx->shaderbuf[t];
+ emit_ssbo_sizes(ctx, v, ring, sb);
+ }
+
+ if (dirty & (FD_DIRTY_SHADER_PROG | FD_DIRTY_SHADER_IMAGE)) {
+ struct fd_shaderimg_stateobj *si = &ctx->shaderimg[t];
+ emit_image_dims(ctx, v, ring, si);
+ }
+}
+
+void
+ir3_emit_vs_consts(const struct ir3_shader_variant *v, struct fd_ringbuffer *ring,
+ struct fd_context *ctx, const struct pipe_draw_info *info)
+{
+ debug_assert(v->type == SHADER_VERTEX);
+
+ emit_common_consts(v, ring, ctx, PIPE_SHADER_VERTEX);
+
/* emit driver params every time: */
/* TODO skip emit if shader doesn't use driver params to avoid WFI.. */
if (info) {
vertex_params_size = ARRAY_SIZE(vertex_params);
}
- fd_wfi(ctx->batch, ring);
- ctx->emit_const(ring, SHADER_VERTEX, offset * 4, 0,
- vertex_params_size, vertex_params, NULL);
+ ring_wfi(ctx->batch, ring);
+
+ bool needs_vtxid_base =
+ ir3_find_sysval_regid(v, SYSTEM_VALUE_VERTEX_ID_ZERO_BASE) != regid(63, 0);
+
+ /* for indirect draw, we need to copy VTXID_BASE from
+ * indirect-draw parameters buffer.. which is annoying
+ * and means we can't easily emit these consts in cmd
+ * stream so need to copy them to bo.
+ */
+ if (info->indirect && needs_vtxid_base) {
+ struct pipe_draw_indirect_info *indirect = info->indirect;
+ struct pipe_resource *vertex_params_rsc =
+ pipe_buffer_create(&ctx->screen->base,
+ PIPE_BIND_CONSTANT_BUFFER, PIPE_USAGE_STREAM,
+ vertex_params_size * 4);
+ unsigned src_off = info->indirect->offset;;
+ void *ptr;
+
+ ptr = fd_bo_map(fd_resource(vertex_params_rsc)->bo);
+ memcpy(ptr, vertex_params, vertex_params_size * 4);
+
+ if (info->index_size) {
+ /* indexed draw, index_bias is 4th field: */
+ src_off += 3 * 4;
+ } else {
+ /* non-indexed draw, start is 3rd field: */
+ src_off += 2 * 4;
+ }
+
+ /* copy index_bias or start from draw params: */
+ ctx->mem_to_mem(ring, vertex_params_rsc, 0,
+ indirect->buffer, src_off, 1);
+
+ ctx->emit_const(ring, SHADER_VERTEX, offset * 4, 0,
+ vertex_params_size, NULL, vertex_params_rsc);
+
+ pipe_resource_reference(&vertex_params_rsc, NULL);
+ } else {
+ ctx->emit_const(ring, SHADER_VERTEX, offset * 4, 0,
+ vertex_params_size, vertex_params, NULL);
+ }
/* if needed, emit stream-out buffer addresses: */
if (vertex_params[IR3_DP_VTXCNT_MAX] > 0) {
ir3_emit_fs_consts(const struct ir3_shader_variant *v, struct fd_ringbuffer *ring,
struct fd_context *ctx)
{
- enum fd_dirty_shader_state dirty = ctx->dirty_shader[PIPE_SHADER_FRAGMENT];
-
debug_assert(v->type == SHADER_FRAGMENT);
- if (dirty & (FD_DIRTY_SHADER_PROG | FD_DIRTY_SHADER_CONST)) {
- struct fd_constbuf_stateobj *constbuf;
- bool shader_dirty;
-
- constbuf = &ctx->constbuf[PIPE_SHADER_FRAGMENT];
- shader_dirty = !!(dirty & FD_DIRTY_SHADER_PROG);
-
- emit_user_consts(ctx, v, ring, constbuf);
- emit_ubos(ctx, v, ring, constbuf);
- if (shader_dirty)
- emit_immediates(ctx, v, ring);
- }
+ emit_common_consts(v, ring, ctx, PIPE_SHADER_FRAGMENT);
}
/* emit compute-shader consts: */
ir3_emit_cs_consts(const struct ir3_shader_variant *v, struct fd_ringbuffer *ring,
struct fd_context *ctx, const struct pipe_grid_info *info)
{
- enum fd_dirty_shader_state dirty = ctx->dirty_shader[PIPE_SHADER_COMPUTE];
-
- if (dirty & (FD_DIRTY_SHADER_PROG | FD_DIRTY_SHADER_CONST)) {
- struct fd_constbuf_stateobj *constbuf;
- bool shader_dirty;
-
- constbuf = &ctx->constbuf[PIPE_SHADER_COMPUTE];
- shader_dirty = !!(dirty & FD_DIRTY_SHADER_PROG);
+ debug_assert(v->type == SHADER_COMPUTE);
- emit_user_consts(ctx, v, ring, constbuf);
- emit_ubos(ctx, v, ring, constbuf);
- if (shader_dirty)
- emit_immediates(ctx, v, ring);
- }
+ emit_common_consts(v, ring, ctx, PIPE_SHADER_COMPUTE);
/* emit compute-shader driver-params: */
uint32_t offset = v->constbase.driver_param;
if (v->constlen > offset) {
- uint32_t compute_params[IR3_DP_CS_COUNT] = {
- [IR3_DP_NUM_WORK_GROUPS_X] = info->grid[0],
- [IR3_DP_NUM_WORK_GROUPS_Y] = info->grid[1],
- [IR3_DP_NUM_WORK_GROUPS_Z] = info->grid[2],
- /* do we need work-group-size? */
- };
-
- fd_wfi(ctx->batch, ring);
- ctx->emit_const(ring, SHADER_COMPUTE, offset * 4, 0,
- ARRAY_SIZE(compute_params), compute_params, NULL);
+ ring_wfi(ctx->batch, ring);
+
+ if (info->indirect) {
+ struct pipe_resource *indirect = NULL;
+ unsigned indirect_offset;
+
+ /* This is a bit awkward, but CP_LOAD_STATE.EXT_SRC_ADDR needs
+ * to be aligned more strongly than 4 bytes. So in this case
+ * we need a temporary buffer to copy NumWorkGroups.xyz to.
+ *
+ * TODO if previous compute job is writing to info->indirect,
+ * we might need a WFI.. but since we currently flush for each
+ * compute job, we are probably ok for now.
+ */
+ if (info->indirect_offset & 0xf) {
+ indirect = pipe_buffer_create(&ctx->screen->base,
+ PIPE_BIND_COMMAND_ARGS_BUFFER, PIPE_USAGE_STREAM,
+ 0x1000);
+ indirect_offset = 0;
+
+ ctx->mem_to_mem(ring, indirect, 0, info->indirect,
+ info->indirect_offset, 3);
+ } else {
+ pipe_resource_reference(&indirect, info->indirect);
+ indirect_offset = info->indirect_offset;
+ }
+
+ ctx->emit_const(ring, SHADER_COMPUTE, offset * 4,
+ indirect_offset, 4, NULL, indirect);
+
+ pipe_resource_reference(&indirect, NULL);
+ } else {
+ uint32_t compute_params[IR3_DP_CS_COUNT] = {
+ [IR3_DP_NUM_WORK_GROUPS_X] = info->grid[0],
+ [IR3_DP_NUM_WORK_GROUPS_Y] = info->grid[1],
+ [IR3_DP_NUM_WORK_GROUPS_Z] = info->grid[2],
+ [IR3_DP_LOCAL_GROUP_SIZE_X] = info->block[0],
+ [IR3_DP_LOCAL_GROUP_SIZE_Y] = info->block[1],
+ [IR3_DP_LOCAL_GROUP_SIZE_Z] = info->block[2],
+ };
+
+ ctx->emit_const(ring, SHADER_COMPUTE, offset * 4, 0,
+ ARRAY_SIZE(compute_params), compute_params, NULL);
+ }
}
}