#include "util/u_memory.h"
#include "util/u_helpers.h"
#include "util/u_format.h"
+#include "util/u_viewport.h"
#include "freedreno_resource.h"
+#include "freedreno_query_hw.h"
#include "fd3_emit.h"
#include "fd3_blend.h"
static void
fd3_emit_const_bo(struct fd_ringbuffer *ring, enum shader_t type, boolean write,
- uint32_t regid, uint32_t num, struct fd_bo **bos, uint32_t *offsets)
+ uint32_t regid, uint32_t num, struct pipe_resource **prscs, uint32_t *offsets)
{
uint32_t i;
CP_LOAD_STATE_1_STATE_TYPE(ST_CONSTANTS));
for (i = 0; i < num; i++) {
- if (bos[i]) {
+ if (prscs[i]) {
if (write) {
- OUT_RELOCW(ring, bos[i], offsets[i], 0, 0);
+ OUT_RELOCW(ring, fd_resource(prscs[i])->bo, offsets[i], 0, 0);
} else {
- OUT_RELOC(ring, bos[i], offsets[i], 0, 0);
+ OUT_RELOC(ring, fd_resource(prscs[i])->bo, offsets[i], 0, 0);
}
} else {
OUT_RING(ring, 0xbad00000 | (i << 16));
[SB_FRAG_TEX] = REG_A3XX_TPL1_TP_FS_BORDER_COLOR_BASE_ADDR,
};
struct fd3_context *fd3_ctx = fd3_context(ctx);
- unsigned i, j, off;
- void *ptr;
-
- u_upload_alloc(fd3_ctx->border_color_uploader,
- 0, 2 * PIPE_MAX_SAMPLERS * BORDERCOLOR_SIZE, &off,
- &fd3_ctx->border_color_buf,
- &ptr);
+ bool needs_border = false;
+ unsigned i, j;
if (tex->num_samplers > 0) {
/* output sampler state: */
const struct fd3_sampler_stateobj *sampler = tex->samplers[i] ?
fd3_sampler_stateobj(tex->samplers[i]) :
&dummy_sampler;
- uint16_t *bcolor = (uint16_t *)((uint8_t *)ptr +
- (BORDERCOLOR_SIZE * tex_off[sb]) +
- (BORDERCOLOR_SIZE * i));
- uint32_t *bcolor32 = (uint32_t *)&bcolor[16];
-
- /*
- * XXX HACK ALERT XXX
- *
- * The border colors need to be swizzled in a particular
- * format-dependent order. Even though samplers don't know about
- * formats, we can assume that with a GL state tracker, there's a
- * 1:1 correspondence between sampler and texture. Take advantage
- * of that knowledge.
- */
- if (i < tex->num_textures && tex->textures[i]) {
- const struct util_format_description *desc =
- util_format_description(tex->textures[i]->format);
- for (j = 0; j < 4; j++) {
- if (desc->swizzle[j] >= 4)
- continue;
-
- const struct util_format_channel_description *chan =
- &desc->channel[desc->swizzle[j]];
- int size = chan->size;
-
- /* The Z16 texture format we use seems to look in the
- * 32-bit border color slots
- */
- if (desc->colorspace == UTIL_FORMAT_COLORSPACE_ZS)
- size = 32;
-
- /* Formats like R11G11B10 or RGB9_E5 don't specify
- * per-channel sizes properly.
- */
- if (desc->layout == UTIL_FORMAT_LAYOUT_OTHER)
- size = 16;
-
- if (chan->pure_integer && size > 16)
- bcolor32[desc->swizzle[j] + 4] =
- sampler->base.border_color.i[j];
- else if (size > 16)
- bcolor32[desc->swizzle[j]] =
- fui(sampler->base.border_color.f[j]);
- else if (chan->pure_integer)
- bcolor[desc->swizzle[j] + 8] =
- sampler->base.border_color.i[j];
- else
- bcolor[desc->swizzle[j]] =
- util_float_to_half(sampler->base.border_color.f[j]);
- }
- }
OUT_RING(ring, sampler->texsamp0);
OUT_RING(ring, sampler->texsamp1);
+
+ needs_border |= sampler->needs_border;
}
}
fd3_pipe_sampler_view(tex->textures[i]) :
&dummy_view;
struct fd_resource *rsc = fd_resource(view->base.texture);
- unsigned start = fd_sampler_first_level(&view->base);
- unsigned end = fd_sampler_last_level(&view->base);;
+ if (rsc && rsc->base.b.target == PIPE_BUFFER) {
+ OUT_RELOC(ring, rsc->bo, view->base.u.buf.offset, 0, 0);
+ j = 1;
+ } else {
+ unsigned start = fd_sampler_first_level(&view->base);
+ unsigned end = fd_sampler_last_level(&view->base);
- for (j = 0; j < (end - start + 1); j++) {
- struct fd_resource_slice *slice =
+ for (j = 0; j < (end - start + 1); j++) {
+ struct fd_resource_slice *slice =
fd_resource_slice(rsc, j + start);
- OUT_RELOC(ring, rsc->bo, slice->offset, 0, 0);
+ OUT_RELOC(ring, rsc->bo, slice->offset, 0, 0);
+ }
}
/* pad the remaining entries w/ null: */
}
}
- OUT_PKT0(ring, bcolor_reg[sb], 1);
- OUT_RELOC(ring, fd_resource(fd3_ctx->border_color_buf)->bo, off, 0, 0);
+ if (needs_border) {
+ unsigned off;
+ void *ptr;
+
+ u_upload_alloc(fd3_ctx->border_color_uploader,
+ 0, BORDER_COLOR_UPLOAD_SIZE,
+ BORDER_COLOR_UPLOAD_SIZE, &off,
+ &fd3_ctx->border_color_buf,
+ &ptr);
+
+ fd_setup_border_colors(tex, ptr, tex_off[sb]);
- u_upload_unmap(fd3_ctx->border_color_uploader);
+ OUT_PKT0(ring, bcolor_reg[sb], 1);
+ OUT_RELOC(ring, fd_resource(fd3_ctx->border_color_buf)->bo, off, 0, 0);
+
+ u_upload_unmap(fd3_ctx->border_color_uploader);
+ }
}
/* emit texture state for mem->gmem restore operation.. eventually it would
OUT_RING(ring, A3XX_TEX_CONST_0_FMT(fd3_pipe2tex(format)) |
A3XX_TEX_CONST_0_TYPE(A3XX_TEX_2D) |
- fd3_tex_swiz(format, PIPE_SWIZZLE_RED, PIPE_SWIZZLE_GREEN,
- PIPE_SWIZZLE_BLUE, PIPE_SWIZZLE_ALPHA));
+ fd3_tex_swiz(format, PIPE_SWIZZLE_X, PIPE_SWIZZLE_Y,
+ PIPE_SWIZZLE_Z, PIPE_SWIZZLE_W));
OUT_RING(ring, A3XX_TEX_CONST_1_FETCHSIZE(TFETCH_DISABLE) |
A3XX_TEX_CONST_1_WIDTH(psurf[i]->width) |
A3XX_TEX_CONST_1_HEIGHT(psurf[i]->height));
int32_t i, j, last = -1;
uint32_t total_in = 0;
const struct fd_vertex_state *vtx = emit->vtx;
- struct ir3_shader_variant *vp = fd3_emit_get_vp(emit);
+ const struct ir3_shader_variant *vp = fd3_emit_get_vp(emit);
unsigned vertex_regid = regid(63, 0);
unsigned instance_regid = regid(63, 0);
unsigned vtxcnt_regid = regid(63, 0);
+ /* Note that sysvals come *after* normal inputs: */
for (i = 0; i < vp->inputs_count; i++) {
- uint8_t semantic = sem2name(vp->inputs[i].semantic);
- if (semantic == TGSI_SEMANTIC_VERTEXID_NOBASE)
- vertex_regid = vp->inputs[i].regid;
- else if (semantic == TGSI_SEMANTIC_INSTANCEID)
- instance_regid = vp->inputs[i].regid;
- else if (semantic == IR3_SEMANTIC_VTXCNT)
- vtxcnt_regid = vp->inputs[i].regid;
- else if (i < vtx->vtx->num_elements && vp->inputs[i].compmask)
+ if (!vp->inputs[i].compmask)
+ continue;
+ if (vp->inputs[i].sysval) {
+ switch(vp->inputs[i].slot) {
+ case SYSTEM_VALUE_BASE_VERTEX:
+ /* handled elsewhere */
+ break;
+ case SYSTEM_VALUE_VERTEX_ID_ZERO_BASE:
+ vertex_regid = vp->inputs[i].regid;
+ break;
+ case SYSTEM_VALUE_INSTANCE_ID:
+ instance_regid = vp->inputs[i].regid;
+ break;
+ case SYSTEM_VALUE_VERTEX_CNT:
+ vtxcnt_regid = vp->inputs[i].regid;
+ break;
+ default:
+ unreachable("invalid system value");
+ break;
+ }
+ } else if (i < vtx->vtx->num_elements) {
last = i;
+ }
}
- /* hw doesn't like to be configured for zero vbo's, it seems: */
- if ((vtx->vtx->num_elements == 0) &&
- (vertex_regid == regid(63, 0)) &&
- (instance_regid == regid(63, 0)) &&
- (vtxcnt_regid == regid(63, 0)))
- return;
-
for (i = 0, j = 0; i <= last; i++) {
- assert(sem2name(vp->inputs[i].semantic) == 0);
+ assert(!vp->inputs[i].sysval);
if (vp->inputs[i].compmask) {
struct pipe_vertex_element *elem = &vtx->vtx->pipe[i];
const struct pipe_vertex_buffer *vb =
}
}
+ /* hw doesn't like to be configured for zero vbo's, it seems: */
+ if (last < 0) {
+ /* just recycle the shader bo, we just need to point to *something*
+ * valid:
+ */
+ struct fd_bo *dummy_vbo = vp->bo;
+ bool switchnext = (vertex_regid != regid(63, 0)) ||
+ (instance_regid != regid(63, 0)) ||
+ (vtxcnt_regid != regid(63, 0));
+
+ OUT_PKT0(ring, REG_A3XX_VFD_FETCH(0), 2);
+ OUT_RING(ring, A3XX_VFD_FETCH_INSTR_0_FETCHSIZE(0) |
+ A3XX_VFD_FETCH_INSTR_0_BUFSTRIDE(0) |
+ COND(switchnext, A3XX_VFD_FETCH_INSTR_0_SWITCHNEXT) |
+ A3XX_VFD_FETCH_INSTR_0_INDEXCODE(0) |
+ A3XX_VFD_FETCH_INSTR_0_STEPRATE(1));
+ OUT_RELOC(ring, dummy_vbo, 0, 0, 0);
+
+ OUT_PKT0(ring, REG_A3XX_VFD_DECODE_INSTR(0), 1);
+ OUT_RING(ring, A3XX_VFD_DECODE_INSTR_CONSTFILL |
+ A3XX_VFD_DECODE_INSTR_WRITEMASK(0x1) |
+ A3XX_VFD_DECODE_INSTR_FORMAT(VFMT_8_UNORM) |
+ A3XX_VFD_DECODE_INSTR_SWAP(XYZW) |
+ A3XX_VFD_DECODE_INSTR_REGID(regid(0,0)) |
+ A3XX_VFD_DECODE_INSTR_SHIFTCNT(1) |
+ A3XX_VFD_DECODE_INSTR_LASTCOMPVALID |
+ COND(switchnext, A3XX_VFD_DECODE_INSTR_SWITCHNEXT));
+
+ total_in = 1;
+ j = 1;
+ }
+
OUT_PKT0(ring, REG_A3XX_VFD_CONTROL_0, 2);
OUT_RING(ring, A3XX_VFD_CONTROL_0_TOTALATTRTOVS(total_in) |
A3XX_VFD_CONTROL_0_PACKETSIZE(2) |
fd3_emit_state(struct fd_context *ctx, struct fd_ringbuffer *ring,
struct fd3_emit *emit)
{
- struct ir3_shader_variant *vp = fd3_emit_get_vp(emit);
- struct ir3_shader_variant *fp = fd3_emit_get_fp(emit);
+ const struct ir3_shader_variant *vp = fd3_emit_get_vp(emit);
+ const struct ir3_shader_variant *fp = fd3_emit_get_fp(emit);
uint32_t dirty = emit->dirty;
emit_marker(ring, 5);
A3XX_RB_MSAA_CONTROL_SAMPLE_MASK(ctx->sample_mask));
}
- if ((dirty & (FD_DIRTY_ZSA | FD_DIRTY_PROG)) && !emit->key.binning_pass) {
- uint32_t val = fd3_zsa_stateobj(ctx->zsa)->rb_render_control;
+ if ((dirty & (FD_DIRTY_ZSA | FD_DIRTY_PROG | FD_DIRTY_BLEND_DUAL)) &&
+ !emit->key.binning_pass) {
+ uint32_t val = fd3_zsa_stateobj(ctx->zsa)->rb_render_control |
+ fd3_blend_stateobj(ctx->blend)->rb_render_control;
val |= COND(fp->frag_face, A3XX_RB_RENDER_CONTROL_FACENESS);
val |= COND(fp->frag_coord, A3XX_RB_RENDER_CONTROL_XCOORD |
*/
OUT_PKT0(ring, REG_A3XX_RB_RENDER_CONTROL, 1);
- OUT_RINGP(ring, val, &fd3_context(ctx)->rbrc_patches);
+ OUT_RINGP(ring, val, &ctx->batch->rbrc_patches);
}
if (dirty & (FD_DIRTY_ZSA | FD_DIRTY_STENCIL_REF)) {
A3XX_RB_STENCILREFMASK_BF_STENCILREF(sr->ref_value[1]));
}
- if (dirty & (FD_DIRTY_ZSA | FD_DIRTY_PROG)) {
+ if (dirty & (FD_DIRTY_ZSA | FD_DIRTY_RASTERIZER | FD_DIRTY_PROG)) {
uint32_t val = fd3_zsa_stateobj(ctx->zsa)->rb_depth_control;
if (fp->writes_pos) {
val |= A3XX_RB_DEPTH_CONTROL_FRAG_WRITES_Z;
if (fp->has_kill) {
val |= A3XX_RB_DEPTH_CONTROL_EARLY_Z_DISABLE;
}
+ if (!ctx->rasterizer->depth_clip) {
+ val |= A3XX_RB_DEPTH_CONTROL_Z_CLAMP_ENABLE;
+ }
OUT_PKT0(ring, REG_A3XX_RB_DEPTH_CONTROL, 1);
OUT_RING(ring, val);
}
val |= COND(fp->frag_coord, A3XX_GRAS_CL_CLIP_CNTL_ZCOORD |
A3XX_GRAS_CL_CLIP_CNTL_WCOORD);
/* TODO only use if prog doesn't use clipvertex/clipdist */
- val |= MIN2(util_bitcount(ctx->rasterizer->clip_plane_enable), 6) << 26;
+ val |= A3XX_GRAS_CL_CLIP_CNTL_NUM_USER_CLIP_PLANES(
+ MIN2(util_bitcount(ctx->rasterizer->clip_plane_enable), 6));
OUT_PKT0(ring, REG_A3XX_GRAS_CL_CLIP_CNTL, 1);
OUT_RING(ring, val);
}
int i = ffs(planes) - 1;
planes &= ~(1U << i);
- fd_wfi(ctx, ring);
+ fd_wfi(ctx->batch, ring);
OUT_PKT0(ring, REG_A3XX_GRAS_CL_USER_PLANE(count++), 4);
OUT_RING(ring, fui(ctx->ucp.ucp[i][0]));
OUT_RING(ring, fui(ctx->ucp.ucp[i][1]));
OUT_RING(ring, A3XX_GRAS_SC_WINDOW_SCISSOR_BR_X(scissor->maxx - 1) |
A3XX_GRAS_SC_WINDOW_SCISSOR_BR_Y(scissor->maxy - 1));
- ctx->max_scissor.minx = MIN2(ctx->max_scissor.minx, scissor->minx);
- ctx->max_scissor.miny = MIN2(ctx->max_scissor.miny, scissor->miny);
- ctx->max_scissor.maxx = MAX2(ctx->max_scissor.maxx, scissor->maxx);
- ctx->max_scissor.maxy = MAX2(ctx->max_scissor.maxy, scissor->maxy);
+ ctx->batch->max_scissor.minx = MIN2(ctx->batch->max_scissor.minx, scissor->minx);
+ ctx->batch->max_scissor.miny = MIN2(ctx->batch->max_scissor.miny, scissor->miny);
+ ctx->batch->max_scissor.maxx = MAX2(ctx->batch->max_scissor.maxx, scissor->maxx);
+ ctx->batch->max_scissor.maxy = MAX2(ctx->batch->max_scissor.maxy, scissor->maxy);
}
if (dirty & FD_DIRTY_VIEWPORT) {
- fd_wfi(ctx, ring);
+ fd_wfi(ctx->batch, ring);
OUT_PKT0(ring, REG_A3XX_GRAS_CL_VPORT_XOFFSET, 6);
OUT_RING(ring, A3XX_GRAS_CL_VPORT_XOFFSET(ctx->viewport.translate[0] - 0.5));
OUT_RING(ring, A3XX_GRAS_CL_VPORT_XSCALE(ctx->viewport.scale[0]));
OUT_RING(ring, A3XX_GRAS_CL_VPORT_ZSCALE(ctx->viewport.scale[2]));
}
- if (dirty & (FD_DIRTY_PROG | FD_DIRTY_FRAMEBUFFER)) {
- struct pipe_framebuffer_state *pfb = &ctx->framebuffer;
- fd3_program_emit(ring, emit, pfb->nr_cbufs, pfb->cbufs);
+ if (dirty & (FD_DIRTY_VIEWPORT | FD_DIRTY_RASTERIZER | FD_DIRTY_FRAMEBUFFER)) {
+ float zmin, zmax;
+ int depth = 24;
+ if (ctx->batch->framebuffer.zsbuf) {
+ depth = util_format_get_component_bits(
+ pipe_surface_format(ctx->batch->framebuffer.zsbuf),
+ UTIL_FORMAT_COLORSPACE_ZS, 0);
+ }
+ util_viewport_zmin_zmax(&ctx->viewport, ctx->rasterizer->clip_halfz,
+ &zmin, &zmax);
+
+ OUT_PKT0(ring, REG_A3XX_RB_Z_CLAMP_MIN, 2);
+ if (depth == 32) {
+ OUT_RING(ring, (uint32_t)(zmin * 0xffffffff));
+ OUT_RING(ring, (uint32_t)(zmax * 0xffffffff));
+ } else if (depth == 16) {
+ OUT_RING(ring, (uint32_t)(zmin * 0xffff));
+ OUT_RING(ring, (uint32_t)(zmax * 0xffff));
+ } else {
+ OUT_RING(ring, (uint32_t)(zmin * 0xffffff));
+ OUT_RING(ring, (uint32_t)(zmax * 0xffffff));
+ }
+ }
+
+ if (dirty & (FD_DIRTY_PROG | FD_DIRTY_FRAMEBUFFER | FD_DIRTY_BLEND_DUAL)) {
+ struct pipe_framebuffer_state *pfb = &ctx->batch->framebuffer;
+ int nr_cbufs = pfb->nr_cbufs;
+ if (fd3_blend_stateobj(ctx->blend)->rb_render_control &
+ A3XX_RB_RENDER_CONTROL_DUAL_COLOR_IN_ENABLE)
+ nr_cbufs++;
+ fd3_program_emit(ring, emit, nr_cbufs, pfb->cbufs);
}
/* TODO we should not need this or fd_wfi() before emit_constants():
OUT_RING(ring, HLSQ_FLUSH);
if (emit->prog == &ctx->prog) { /* evil hack to deal sanely with clear path */
- ir3_emit_consts(vp, ring, emit->info, dirty);
+ ir3_emit_consts(vp, ring, ctx, emit->info, dirty);
if (!emit->key.binning_pass)
- ir3_emit_consts(fp, ring, emit->info, dirty);
- /* mark clean after emitting consts: */
- ctx->prog.dirty = 0;
+ ir3_emit_consts(fp, ring, ctx, emit->info, dirty);
}
- if ((dirty & (FD_DIRTY_BLEND | FD_DIRTY_FRAMEBUFFER)) && ctx->blend) {
+ if (dirty & (FD_DIRTY_BLEND | FD_DIRTY_FRAMEBUFFER)) {
struct fd3_blend_stateobj *blend = fd3_blend_stateobj(ctx->blend);
uint32_t i;
for (i = 0; i < ARRAY_SIZE(blend->rb_mrt); i++) {
- enum pipe_format format = pipe_surface_format(ctx->framebuffer.cbufs[i]);
+ enum pipe_format format =
+ pipe_surface_format(ctx->batch->framebuffer.cbufs[i]);
const struct util_format_description *desc =
util_format_description(format);
bool is_float = util_format_is_float(format);
}
if (dirty & (FD_DIRTY_VERTTEX | FD_DIRTY_FRAGTEX))
- fd_wfi(ctx, ring);
+ fd_wfi(ctx->batch, ring);
if (dirty & FD_DIRTY_VERTTEX) {
if (vp->has_samp)
* state, there could have been a context switch between ioctls):
*/
void
-fd3_emit_restore(struct fd_context *ctx)
+fd3_emit_restore(struct fd_batch *batch, struct fd_ringbuffer *ring)
{
+ struct fd_context *ctx = batch->ctx;
struct fd3_context *fd3_ctx = fd3_context(ctx);
- struct fd_ringbuffer *ring = ctx->ring;
int i;
if (ctx->screen->gpu_id == 320) {
OUT_RING(ring, 0x00000000);
}
- fd_wfi(ctx, ring);
+ fd_wfi(batch, ring);
OUT_PKT3(ring, CP_INVALIDATE_STATE, 1);
OUT_RING(ring, 0x00007fff);
OUT_RING(ring, A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_STARTENTRY(0) |
A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_ENDENTRY(0));
- OUT_PKT0(ring, REG_A3XX_UCHE_CACHE_INVALIDATE0_REG, 2);
- OUT_RING(ring, A3XX_UCHE_CACHE_INVALIDATE0_REG_ADDR(0));
- OUT_RING(ring, A3XX_UCHE_CACHE_INVALIDATE1_REG_ADDR(0) |
- A3XX_UCHE_CACHE_INVALIDATE1_REG_OPCODE(INVALIDATE) |
- A3XX_UCHE_CACHE_INVALIDATE1_REG_ENTIRE_CACHE);
+ fd3_emit_cache_flush(batch, ring);
OUT_PKT0(ring, REG_A3XX_GRAS_CL_CLIP_CNTL, 1);
OUT_RING(ring, 0x00000000); /* GRAS_CL_CLIP_CNTL */
OUT_PKT0(ring, REG_A3XX_PC_VSTREAM_CONTROL, 1);
OUT_RING(ring, 0x00000000);
- fd_event_write(ctx, ring, CACHE_FLUSH);
+ fd_event_write(batch, ring, CACHE_FLUSH);
if (is_a3xx_p0(ctx->screen)) {
OUT_PKT3(ring, CP_DRAW_INDX, 3);
OUT_RING(ring, 0x00000000);
OUT_RING(ring, 0x00000000);
- fd_wfi(ctx, ring);
+ fd_wfi(batch, ring);
- ctx->needs_rb_fbd = true;
+ fd_hw_query_enable(batch, ring);
+}
+
+static void
+fd3_emit_ib(struct fd_ringbuffer *ring, struct fd_ringbuffer *target)
+{
+ __OUT_IB(ring, true, target);
}
void
struct fd_context *ctx = fd_context(pctx);
ctx->emit_const = fd3_emit_const;
ctx->emit_const_bo = fd3_emit_const_bo;
+ ctx->emit_ib = fd3_emit_ib;
}