#include "fd6_emit.h"
#include "fd6_program.h"
#include "fd6_format.h"
+#include "fd6_vsc.h"
#include "fd6_zsa.h"
-/* some bits in common w/ a4xx: */
-#include "a4xx/fd4_draw.h"
+#include "fd6_pack.h"
static void
-draw_emit_indirect(struct fd_batch *batch, struct fd_ringbuffer *ring,
- enum pc_di_primtype primtype,
- enum pc_di_vis_cull_mode vismode,
+draw_emit_indirect(struct fd_ringbuffer *ring,
+ struct CP_DRAW_INDX_OFFSET_0 *draw0,
const struct pipe_draw_info *info,
unsigned index_offset)
{
if (info->index_size) {
struct pipe_resource *idx = info->index.resource;
- unsigned max_indicies = (idx->width0 - info->indirect->offset) /
- info->index_size;
-
- OUT_PKT7(ring, CP_DRAW_INDX_INDIRECT, 6);
- OUT_RINGP(ring, DRAW4(primtype, DI_SRC_SEL_DMA,
- fd4_size2indextype(info->index_size), 0),
- &batch->draw_patches);
- OUT_RELOC(ring, fd_resource(idx)->bo,
- index_offset, 0, 0);
- // XXX: Check A5xx vs A6xx
- OUT_RING(ring, A5XX_CP_DRAW_INDX_INDIRECT_3_MAX_INDICES(max_indicies));
- OUT_RELOC(ring, ind->bo, info->indirect->offset, 0, 0);
+ unsigned max_indices = (idx->width0 - index_offset) / info->index_size;
+
+ OUT_PKT(ring, CP_DRAW_INDX_INDIRECT,
+ pack_CP_DRAW_INDX_OFFSET_0(*draw0),
+ A5XX_CP_DRAW_INDX_INDIRECT_INDX_BASE(
+ fd_resource(idx)->bo, index_offset),
+ A5XX_CP_DRAW_INDX_INDIRECT_3(.max_indices = max_indices),
+ A5XX_CP_DRAW_INDX_INDIRECT_INDIRECT(
+ ind->bo, info->indirect->offset)
+ );
} else {
- OUT_PKT7(ring, CP_DRAW_INDIRECT, 3);
- OUT_RINGP(ring, DRAW4(primtype, DI_SRC_SEL_AUTO_INDEX, 0, 0),
- &batch->draw_patches);
- OUT_RELOC(ring, ind->bo, info->indirect->offset, 0, 0);
+ OUT_PKT(ring, CP_DRAW_INDIRECT,
+ pack_CP_DRAW_INDX_OFFSET_0(*draw0),
+ A5XX_CP_DRAW_INDIRECT_INDIRECT(
+ ind->bo, info->indirect->offset)
+ );
}
}
static void
-draw_emit(struct fd_batch *batch, struct fd_ringbuffer *ring,
- enum pc_di_primtype primtype,
- enum pc_di_vis_cull_mode vismode,
+draw_emit(struct fd_ringbuffer *ring,
+ struct CP_DRAW_INDX_OFFSET_0 *draw0,
const struct pipe_draw_info *info,
unsigned index_offset)
{
assert(!info->has_user_indices);
struct pipe_resource *idx_buffer = info->index.resource;
- uint32_t idx_size = info->index_size * info->count;
- uint32_t idx_offset = index_offset + info->start * info->index_size;
-
- /* leave vis mode blank for now, it will be patched up when
- * we know if we are binning or not
- */
- uint32_t draw = CP_DRAW_INDX_OFFSET_0_PRIM_TYPE(primtype) |
- CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT(DI_SRC_SEL_DMA) |
- CP_DRAW_INDX_OFFSET_0_INDEX_SIZE(fd4_size2indextype(info->index_size)) |
- 0x2000;
-
- OUT_PKT7(ring, CP_DRAW_INDX_OFFSET, 7);
- if (vismode == USE_VISIBILITY) {
- OUT_RINGP(ring, draw, &batch->draw_patches);
- } else {
- OUT_RING(ring, draw);
- }
- OUT_RING(ring, info->instance_count); /* NumInstances */
- OUT_RING(ring, info->count); /* NumIndices */
- OUT_RING(ring, 0x0); /* XXX */
- OUT_RELOC(ring, fd_resource(idx_buffer)->bo, idx_offset, 0, 0);
- OUT_RING (ring, idx_size);
+ unsigned max_indices = (idx_buffer->width0 - index_offset) / info->index_size;
+
+ OUT_PKT(ring, CP_DRAW_INDX_OFFSET,
+ pack_CP_DRAW_INDX_OFFSET_0(*draw0),
+ CP_DRAW_INDX_OFFSET_1(.num_instances = info->instance_count),
+ CP_DRAW_INDX_OFFSET_2(.num_indices = info->count),
+ CP_DRAW_INDX_OFFSET_3(.first_indx = info->start),
+ A5XX_CP_DRAW_INDX_OFFSET_INDX_BASE(
+ fd_resource(idx_buffer)->bo, index_offset),
+ A5XX_CP_DRAW_INDX_OFFSET_6(.max_indices = max_indices)
+ );
} else {
- /* leave vis mode blank for now, it will be patched up when
- * we know if we are binning or not
- */
- uint32_t draw = CP_DRAW_INDX_OFFSET_0_PRIM_TYPE(primtype) |
- CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT(DI_SRC_SEL_AUTO_INDEX) |
- 0x2000;
-
- OUT_PKT7(ring, CP_DRAW_INDX_OFFSET, 3);
- if (vismode == USE_VISIBILITY) {
- OUT_RINGP(ring, draw, &batch->draw_patches);
- } else {
- OUT_RING(ring, draw);
- }
- OUT_RING(ring, info->instance_count); /* NumInstances */
- OUT_RING(ring, info->count); /* NumIndices */
+ OUT_PKT(ring, CP_DRAW_INDX_OFFSET,
+ pack_CP_DRAW_INDX_OFFSET_0(*draw0),
+ CP_DRAW_INDX_OFFSET_1(.num_instances = info->instance_count),
+ CP_DRAW_INDX_OFFSET_2(.num_indices = info->count)
+ );
}
}
-static void
-draw_impl(struct fd_context *ctx, struct fd_ringbuffer *ring,
- struct fd6_emit *emit, unsigned index_offset)
-{
- const struct pipe_draw_info *info = emit->info;
- enum pc_di_primtype primtype = ctx->primtypes[info->mode];
-
- fd6_emit_state(ring, emit);
-
- if (emit->dirty & (FD_DIRTY_VTXBUF | FD_DIRTY_VTXSTATE))
- fd6_emit_vertex_bufs(ring, emit);
-
- OUT_PKT4(ring, REG_A6XX_VFD_INDEX_OFFSET, 2);
- OUT_RING(ring, info->index_size ? info->index_bias : info->start); /* VFD_INDEX_OFFSET */
- OUT_RING(ring, info->start_instance); /* VFD_INSTANCE_START_OFFSET */
-
- OUT_PKT4(ring, REG_A6XX_PC_RESTART_INDEX, 1);
- OUT_RING(ring, info->primitive_restart ? /* PC_RESTART_INDEX */
- info->restart_index : 0xffffffff);
-
- /* for debug after a lock up, write a unique counter value
- * to scratch7 for each draw, to make it easier to match up
- * register dumps to cmdstream. The combination of IB
- * (scratch6) and DRAW is enough to "triangulate" the
- * particular draw that caused lockup.
- */
- emit_marker6(ring, 7);
-
- if (info->indirect) {
- draw_emit_indirect(ctx->batch, ring, primtype,
- emit->binning_pass ? IGNORE_VISIBILITY : USE_VISIBILITY,
- info, index_offset);
- } else {
- draw_emit(ctx->batch, ring, primtype,
- emit->binning_pass ? IGNORE_VISIBILITY : USE_VISIBILITY,
- info, index_offset);
- }
-
- emit_marker6(ring, 7);
- fd_reset_wfi(ctx->batch);
-}
-
/* fixup dirty shader state in case some "unrelated" (from the state-
* tracker's perspective) state change causes us to switch to a
* different variant.
}
}
+static void
+fixup_draw_state(struct fd_context *ctx, struct fd6_emit *emit)
+{
+ if (ctx->last.dirty ||
+ (ctx->last.primitive_restart != emit->primitive_restart)) {
+ /* rasterizer state is effected by primitive-restart: */
+ ctx->dirty |= FD_DIRTY_RASTERIZER;
+ ctx->last.primitive_restart = emit->primitive_restart;
+ }
+}
+
static bool
fd6_draw_vbo(struct fd_context *ctx, const struct pipe_draw_info *info,
unsigned index_offset)
{
struct fd6_context *fd6_ctx = fd6_context(ctx);
+ struct ir3_shader *gs = ctx->prog.gs;
struct fd6_emit emit = {
.ctx = ctx,
.vtx = &ctx->vtx,
.info = info,
.key = {
- .vs = ctx->prog.vp,
- .fs = ctx->prog.fp,
+ .vs = ctx->prog.vs,
+ .gs = ctx->prog.gs,
+ .fs = ctx->prog.fs,
.key = {
.color_two_side = ctx->rasterizer->light_twoside,
.vclamp_color = ctx->rasterizer->clamp_vertex_color,
.fclamp_color = ctx->rasterizer->clamp_fragment_color,
.rasterflat = ctx->rasterizer->flatshade,
.ucp_enables = ctx->rasterizer->clip_plane_enable,
- .has_per_samp = (fd6_ctx->fsaturate || fd6_ctx->vsaturate ||
- fd6_ctx->fastc_srgb || fd6_ctx->vastc_srgb),
+ .has_per_samp = (fd6_ctx->fsaturate || fd6_ctx->vsaturate),
.vsaturate_s = fd6_ctx->vsaturate_s,
.vsaturate_t = fd6_ctx->vsaturate_t,
.vsaturate_r = fd6_ctx->vsaturate_r,
.fsaturate_s = fd6_ctx->fsaturate_s,
.fsaturate_t = fd6_ctx->fsaturate_t,
.fsaturate_r = fd6_ctx->fsaturate_r,
- .vastc_srgb = fd6_ctx->vastc_srgb,
- .fastc_srgb = fd6_ctx->fastc_srgb,
+ .layer_zero = !gs || !(gs->nir->info.outputs_written & VARYING_BIT_LAYER),
.vsamples = ctx->tex[PIPE_SHADER_VERTEX].samples,
.fsamples = ctx->tex[PIPE_SHADER_FRAGMENT].samples,
- }
+ .sample_shading = (ctx->min_samples > 1),
+ .msaa = (ctx->framebuffer.samples > 1),
+ },
},
.rasterflat = ctx->rasterizer->flatshade,
.sprite_coord_enable = ctx->rasterizer->sprite_coord_enable,
.sprite_coord_mode = ctx->rasterizer->sprite_coord_mode,
+ .primitive_restart = info->primitive_restart && info->index_size,
};
- fixup_shader_state(ctx, &emit.key.key);
+ if (!(ctx->prog.vs && ctx->prog.fs))
+ return false;
- unsigned dirty = ctx->dirty;
+ if (info->mode == PIPE_PRIM_PATCHES) {
+ emit.key.hs = ctx->prog.hs;
+ emit.key.ds = ctx->prog.ds;
- if (!(dirty & FD_DIRTY_PROG)) {
+ if (!(ctx->prog.hs && ctx->prog.ds))
+ return false;
+
+ shader_info *ds_info = &emit.key.ds->nir->info;
+ emit.key.key.tessellation = ir3_tess_mode(ds_info->tess.primitive_mode);
+ }
+
+ if (emit.key.gs)
+ emit.key.key.has_gs = true;
+
+ if (!(emit.key.hs || emit.key.ds || emit.key.gs || info->indirect))
+ fd6_vsc_update_sizes(ctx->batch, info);
+
+ fixup_shader_state(ctx, &emit.key.key);
+
+ if (!(ctx->dirty & FD_DIRTY_PROG)) {
emit.prog = fd6_ctx->prog;
} else {
fd6_ctx->prog = fd6_emit_get_prog(&emit);
}
+ /* bail if compile failed: */
+ if (!fd6_ctx->prog)
+ return NULL;
+
+ fixup_draw_state(ctx, &emit);
+
+ emit.dirty = ctx->dirty; /* *after* fixup_shader_state() */
+ emit.bs = fd6_emit_get_prog(&emit)->bs;
emit.vs = fd6_emit_get_prog(&emit)->vs;
+ emit.hs = fd6_emit_get_prog(&emit)->hs;
+ emit.ds = fd6_emit_get_prog(&emit)->ds;
+ emit.gs = fd6_emit_get_prog(&emit)->gs;
emit.fs = fd6_emit_get_prog(&emit)->fs;
- const struct ir3_shader_variant *vp = emit.vs;
- const struct ir3_shader_variant *fp = emit.fs;
+ ctx->stats.vs_regs += ir3_shader_halfregs(emit.vs);
+ ctx->stats.hs_regs += COND(emit.hs, ir3_shader_halfregs(emit.hs));
+ ctx->stats.ds_regs += COND(emit.ds, ir3_shader_halfregs(emit.ds));
+ ctx->stats.gs_regs += COND(emit.gs, ir3_shader_halfregs(emit.gs));
+ ctx->stats.fs_regs += ir3_shader_halfregs(emit.fs);
- /* do regular pass first, since that is more likely to fail compiling: */
+ struct fd_ringbuffer *ring = ctx->batch->draw;
- if (!vp || !fp)
- return false;
+ struct CP_DRAW_INDX_OFFSET_0 draw0 = {
+ .prim_type = ctx->primtypes[info->mode],
+ .vis_cull = USE_VISIBILITY,
+ .gs_enable = !!emit.key.gs,
+ };
- ctx->stats.vs_regs += ir3_shader_halfregs(vp);
- ctx->stats.fs_regs += ir3_shader_halfregs(fp);
+ if (info->index_size) {
+ draw0.source_select = DI_SRC_SEL_DMA;
+ draw0.index_size = fd4_size2indextype(info->index_size);
+ } else {
+ draw0.source_select = DI_SRC_SEL_AUTO_INDEX;
+ }
- /* figure out whether we need to disable LRZ write for binning
- * pass using draw pass's fp:
- */
- emit.no_lrz_write = fp->writes_pos || fp->has_kill;
+ if (info->mode == PIPE_PRIM_PATCHES) {
+ shader_info *ds_info = &emit.ds->shader->nir->info;
+ uint32_t factor_stride;
+
+ switch (ds_info->tess.primitive_mode) {
+ case GL_ISOLINES:
+ draw0.patch_type = TESS_ISOLINES;
+ factor_stride = 12;
+ break;
+ case GL_TRIANGLES:
+ draw0.patch_type = TESS_TRIANGLES;
+ factor_stride = 20;
+ break;
+ case GL_QUADS:
+ draw0.patch_type = TESS_QUADS;
+ factor_stride = 28;
+ break;
+ default:
+ unreachable("bad tessmode");
+ }
+
+ draw0.prim_type = DI_PT_PATCHES0 + info->vertices_per_patch;
+ draw0.tess_enable = true;
+
+ ctx->batch->tessellation = true;
+ ctx->batch->tessparam_size = MAX2(ctx->batch->tessparam_size,
+ emit.hs->output_size * 4 * info->count);
+ ctx->batch->tessfactor_size = MAX2(ctx->batch->tessfactor_size,
+ factor_stride * info->count);
+
+ if (!ctx->batch->tess_addrs_constobj) {
+ /* Reserve space for the bo address - we'll write them later in
+ * setup_tess_buffers(). We need 2 bo address, but indirect
+ * constant upload needs at least 4 vec4s.
+ */
+ unsigned size = 4 * 16;
+
+ ctx->batch->tess_addrs_constobj = fd_submit_new_ringbuffer(
+ ctx->batch->submit, size, FD_RINGBUFFER_STREAMING);
+
+ ctx->batch->tess_addrs_constobj->cur += size;
+ }
+ }
- emit.binning_pass = false;
- emit.dirty = dirty;
+ uint32_t index_start = info->index_size ? info->index_bias : info->start;
+ if (ctx->last.dirty || (ctx->last.index_start != index_start)) {
+ OUT_PKT4(ring, REG_A6XX_VFD_INDEX_OFFSET, 1);
+ OUT_RING(ring, index_start); /* VFD_INDEX_OFFSET */
+ ctx->last.index_start = index_start;
+ }
+
+ if (ctx->last.dirty || (ctx->last.instance_start != info->start_instance)) {
+ OUT_PKT4(ring, REG_A6XX_VFD_INSTANCE_START_OFFSET, 1);
+ OUT_RING(ring, info->start_instance); /* VFD_INSTANCE_START_OFFSET */
+ ctx->last.instance_start = info->start_instance;
+ }
+
+ uint32_t restart_index = info->primitive_restart ? info->restart_index : 0xffffffff;
+ if (ctx->last.dirty || (ctx->last.restart_index != restart_index)) {
+ OUT_PKT4(ring, REG_A6XX_PC_RESTART_INDEX, 1);
+ OUT_RING(ring, restart_index); /* PC_RESTART_INDEX */
+ ctx->last.restart_index = restart_index;
+ }
+
+ fd6_emit_state(ring, &emit);
- draw_impl(ctx, ctx->batch->draw, &emit, index_offset);
+ /* for debug after a lock up, write a unique counter value
+ * to scratch7 for each draw, to make it easier to match up
+ * register dumps to cmdstream. The combination of IB
+ * (scratch6) and DRAW is enough to "triangulate" the
+ * particular draw that caused lockup.
+ */
+ emit_marker6(ring, 7);
- /* and now binning pass: */
- emit.binning_pass = true;
- emit.dirty = dirty & ~(FD_DIRTY_BLEND);
- emit.vs = fd6_emit_get_prog(&emit)->bs;
+ if (info->indirect) {
+ draw_emit_indirect(ring, &draw0, info, index_offset);
+ } else {
+ draw_emit(ring, &draw0, info, index_offset);
+ }
- draw_impl(ctx, ctx->batch->binning, &emit, index_offset);
+ emit_marker6(ring, 7);
+ fd_reset_wfi(ctx->batch);
if (emit.streamout_mask) {
struct fd_ringbuffer *ring = ctx->batch->draw;
return true;
}
-static bool is_z32(enum pipe_format format)
-{
- switch (format) {
- case PIPE_FORMAT_Z32_FLOAT_S8X24_UINT:
- case PIPE_FORMAT_Z32_UNORM:
- case PIPE_FORMAT_Z32_FLOAT:
- return true;
- default:
- return false;
- }
-}
-
static void
fd6_clear_lrz(struct fd_batch *batch, struct fd_resource *zsbuf, double depth)
{
struct fd_ringbuffer *ring;
+ struct fd6_context *fd6_ctx = fd6_context(batch->ctx);
- // TODO mid-frame clears (ie. app doing crazy stuff)?? Maybe worth
- // splitting both clear and lrz clear out into their own rb's. And
- // just throw away any draws prior to clear. (Anything not fullscreen
- // clear, just fallback to generic path that treats it as a normal
- // draw
-
- if (!batch->lrz_clear) {
- batch->lrz_clear = fd_ringbuffer_new(batch->ctx->pipe, 0x1000);
- fd_ringbuffer_set_parent(batch->lrz_clear, batch->gmem);
- }
-
- ring = batch->lrz_clear;
+ ring = fd_batch_get_prologue(batch);
emit_marker6(ring, 7);
OUT_PKT7(ring, CP_SET_MARKER, 1);
- OUT_RING(ring, A2XX_CP_SET_MARKER_0_MODE(RM6_BYPASS));
+ OUT_RING(ring, A6XX_CP_SET_MARKER_0_MODE(RM6_BYPASS));
emit_marker6(ring, 7);
- OUT_PKT4(ring, REG_A6XX_RB_CCU_CNTL, 1);
- OUT_RING(ring, 0x10000000);
+ OUT_WFI5(ring);
- OUT_PKT4(ring, REG_A6XX_HLSQ_UPDATE_CNTL, 1);
- OUT_RING(ring, 0x7ffff);
+ OUT_PKT4(ring, REG_A6XX_RB_CCU_CNTL, 1);
+ OUT_RING(ring, fd6_ctx->magic.RB_CCU_CNTL_bypass);
+
+ OUT_REG(ring, A6XX_HLSQ_INVALIDATE_CMD(
+ .vs_state = true,
+ .hs_state = true,
+ .ds_state = true,
+ .gs_state = true,
+ .fs_state = true,
+ .cs_state = true,
+ .gfx_ibo = true,
+ .cs_ibo = true,
+ .gfx_shared_const = true,
+ .gfx_bindless = 0x1f,
+ .cs_bindless = 0x1f
+ ));
emit_marker6(ring, 7);
OUT_PKT7(ring, CP_SET_MARKER, 1);
- OUT_RING(ring, A2XX_CP_SET_MARKER_0_MODE(0xc));
+ OUT_RING(ring, A6XX_CP_SET_MARKER_0_MODE(RM6_BLIT2DSCALE));
emit_marker6(ring, 7);
- OUT_PKT4(ring, REG_A6XX_RB_UNKNOWN_8C01, 1);
+ OUT_PKT4(ring, REG_A6XX_RB_2D_UNKNOWN_8C01, 1);
OUT_RING(ring, 0x0);
OUT_PKT4(ring, REG_A6XX_SP_PS_2D_SRC_INFO, 13);
OUT_RING(ring, 0x00000000);
OUT_RING(ring, 0x00000000);
- OUT_PKT4(ring, REG_A6XX_SP_UNKNOWN_ACC0, 1);
+ OUT_PKT4(ring, REG_A6XX_SP_2D_DST_FORMAT, 1);
OUT_RING(ring, 0x0000f410);
OUT_PKT4(ring, REG_A6XX_GRAS_2D_BLIT_CNTL, 1);
- OUT_RING(ring, A6XX_GRAS_2D_BLIT_CNTL_COLOR_FORMAT(RB6_R16_UNORM) |
+ OUT_RING(ring, A6XX_GRAS_2D_BLIT_CNTL_COLOR_FORMAT(FMT6_16_UNORM) |
0x4f00080);
OUT_PKT4(ring, REG_A6XX_RB_2D_BLIT_CNTL, 1);
- OUT_RING(ring, A6XX_RB_2D_BLIT_CNTL_COLOR_FORMAT(RB6_R16_UNORM) |
+ OUT_RING(ring, A6XX_RB_2D_BLIT_CNTL_COLOR_FORMAT(FMT6_16_UNORM) |
0x4f00080);
- fd6_event_write(batch, ring, UNK_1D, true);
+ fd6_event_write(batch, ring, PC_CCU_FLUSH_COLOR_TS, true);
fd6_event_write(batch, ring, PC_CCU_INVALIDATE_COLOR, false);
OUT_PKT4(ring, REG_A6XX_RB_2D_SRC_SOLID_C0, 4);
OUT_RING(ring, 0x00000000);
OUT_PKT4(ring, REG_A6XX_RB_2D_DST_INFO, 9);
- OUT_RING(ring, A6XX_RB_2D_DST_INFO_COLOR_FORMAT(RB6_R16_UNORM) |
+ OUT_RING(ring, A6XX_RB_2D_DST_INFO_COLOR_FORMAT(FMT6_16_UNORM) |
A6XX_RB_2D_DST_INFO_TILE_MODE(TILE6_LINEAR) |
A6XX_RB_2D_DST_INFO_COLOR_SWAP(WZYX));
- OUT_RELOCW(ring, zsbuf->lrz, 0, 0, 0);
- OUT_RING(ring, A6XX_RB_2D_DST_SIZE_PITCH(zsbuf->lrz_pitch * 2));
+ OUT_RELOC(ring, zsbuf->lrz, 0, 0, 0);
+ OUT_RING(ring, A6XX_RB_2D_DST_PITCH(zsbuf->lrz_pitch * 2).value);
OUT_RING(ring, 0x00000000);
OUT_RING(ring, 0x00000000);
OUT_RING(ring, 0x00000000);
OUT_RING(ring, 0x00000000);
OUT_RING(ring, 0x00000000);
- OUT_PKT4(ring, REG_A6XX_GRAS_2D_SRC_TL_X, 4);
- OUT_RING(ring, A6XX_GRAS_2D_SRC_TL_X_X(0));
- OUT_RING(ring, A6XX_GRAS_2D_SRC_BR_X_X(0));
- OUT_RING(ring, A6XX_GRAS_2D_SRC_TL_Y_Y(0));
- OUT_RING(ring, A6XX_GRAS_2D_SRC_BR_Y_Y(0));
+ OUT_REG(ring,
+ A6XX_GRAS_2D_SRC_TL_X(0),
+ A6XX_GRAS_2D_SRC_BR_X(0),
+ A6XX_GRAS_2D_SRC_TL_Y(0),
+ A6XX_GRAS_2D_SRC_BR_Y(0));
OUT_PKT4(ring, REG_A6XX_GRAS_2D_DST_TL, 2);
OUT_RING(ring, A6XX_GRAS_2D_DST_TL_X(0) |
OUT_WFI5(ring);
OUT_PKT4(ring, REG_A6XX_RB_UNKNOWN_8E04, 1);
- OUT_RING(ring, 0x1000000);
+ OUT_RING(ring, fd6_ctx->magic.RB_UNKNOWN_8E04_blit);
OUT_PKT7(ring, CP_BLIT, 1);
OUT_RING(ring, CP_BLIT_0_OP(BLIT_OP_SCALE));
OUT_WFI5(ring);
OUT_PKT4(ring, REG_A6XX_RB_UNKNOWN_8E04, 1);
- OUT_RING(ring, 0x0);
+ OUT_RING(ring, 0x0); /* RB_UNKNOWN_8E04 */
- fd6_event_write(batch, ring, UNK_1D, true);
- fd6_event_write(batch, ring, FACENESS_FLUSH, true);
+ fd6_event_write(batch, ring, PC_CCU_FLUSH_COLOR_TS, true);
+ fd6_event_write(batch, ring, PC_CCU_FLUSH_DEPTH_TS, true);
fd6_event_write(batch, ring, CACHE_FLUSH_TS, true);
- fd6_cache_flush(batch, ring);
+ fd6_cache_inv(batch, ring);
+}
+
+static bool is_z32(enum pipe_format format)
+{
+ switch (format) {
+ case PIPE_FORMAT_Z32_FLOAT_S8X24_UINT:
+ case PIPE_FORMAT_Z32_UNORM:
+ case PIPE_FORMAT_Z32_FLOAT:
+ return true;
+ default:
+ return false;
+ }
}
static bool
const union pipe_color_union *color, double depth, unsigned stencil)
{
struct pipe_framebuffer_state *pfb = &ctx->batch->framebuffer;
- struct pipe_scissor_state *scissor = fd_context_get_scissor(ctx);
- struct fd_ringbuffer *ring = ctx->batch->draw;
-
- if ((buffers & (PIPE_CLEAR_DEPTH | PIPE_CLEAR_STENCIL)) &&
- is_z32(pfb->zsbuf->format))
+ const bool has_depth = pfb->zsbuf;
+ unsigned color_buffers = buffers >> 2;
+
+ /* If we're clearing after draws, fallback to 3D pipe clears. We could
+ * use blitter clears in the draw batch but then we'd have to patch up the
+ * gmem offsets. This doesn't seem like a useful thing to optimize for
+ * however.*/
+ if (ctx->batch->num_draws > 0)
return false;
- OUT_PKT4(ring, REG_A6XX_RB_BLIT_SCISSOR_TL, 2);
- OUT_RING(ring, A6XX_RB_BLIT_SCISSOR_TL_X(scissor->minx) |
- A6XX_RB_BLIT_SCISSOR_TL_Y(scissor->miny));
- OUT_RING(ring, A6XX_RB_BLIT_SCISSOR_BR_X(scissor->maxx - 1) |
- A6XX_RB_BLIT_SCISSOR_BR_Y(scissor->maxy - 1));
-
- if (buffers & PIPE_CLEAR_COLOR) {
- for (int i = 0; i < pfb->nr_cbufs; i++) {
- union util_color uc = {0};
-
- if (!pfb->cbufs[i])
- continue;
-
- if (!(buffers & (PIPE_CLEAR_COLOR0 << i)))
- continue;
-
- enum pipe_format pfmt = pfb->cbufs[i]->format;
-
- // XXX I think RB_CLEAR_COLOR_DWn wants to take into account SWAP??
- union pipe_color_union swapped;
- switch (fd6_pipe2swap(pfmt)) {
- case WZYX:
- swapped.ui[0] = color->ui[0];
- swapped.ui[1] = color->ui[1];
- swapped.ui[2] = color->ui[2];
- swapped.ui[3] = color->ui[3];
- break;
- case WXYZ:
- swapped.ui[2] = color->ui[0];
- swapped.ui[1] = color->ui[1];
- swapped.ui[0] = color->ui[2];
- swapped.ui[3] = color->ui[3];
- break;
- case ZYXW:
- swapped.ui[3] = color->ui[0];
- swapped.ui[0] = color->ui[1];
- swapped.ui[1] = color->ui[2];
- swapped.ui[2] = color->ui[3];
- break;
- case XYZW:
- swapped.ui[3] = color->ui[0];
- swapped.ui[2] = color->ui[1];
- swapped.ui[1] = color->ui[2];
- swapped.ui[0] = color->ui[3];
- break;
- }
-
- if (util_format_is_pure_uint(pfmt)) {
- util_format_write_4ui(pfmt, swapped.ui, 0, &uc, 0, 0, 0, 1, 1);
- } else if (util_format_is_pure_sint(pfmt)) {
- util_format_write_4i(pfmt, swapped.i, 0, &uc, 0, 0, 0, 1, 1);
- } else {
- util_pack_color(swapped.f, pfmt, &uc);
- }
-
- OUT_PKT4(ring, REG_A6XX_RB_BLIT_DST_INFO, 1);
- OUT_RING(ring, A6XX_RB_BLIT_DST_INFO_TILE_MODE(TILE6_LINEAR) |
- A6XX_RB_BLIT_DST_INFO_COLOR_FORMAT(fd6_pipe2color(pfmt)));
-
- OUT_PKT4(ring, REG_A6XX_RB_BLIT_INFO, 1);
- OUT_RING(ring, A6XX_RB_BLIT_INFO_GMEM |
- A6XX_RB_BLIT_INFO_CLEAR_MASK(0xf));
-
- OUT_PKT4(ring, REG_A6XX_RB_BLIT_BASE_GMEM, 1);
- OUT_RINGP(ring, i, &ctx->batch->gmem_patches);
-
- OUT_PKT4(ring, REG_A6XX_RB_UNKNOWN_88D0, 1);
- OUT_RING(ring, 0);
-
- OUT_PKT4(ring, REG_A6XX_RB_BLIT_CLEAR_COLOR_DW0, 4);
- OUT_RING(ring, uc.ui[0]);
- OUT_RING(ring, uc.ui[1]);
- OUT_RING(ring, uc.ui[2]);
- OUT_RING(ring, uc.ui[3]);
-
- fd6_emit_blit(ctx->batch, ring);
- }
- }
-
- if (pfb->zsbuf && (buffers & (PIPE_CLEAR_DEPTH | PIPE_CLEAR_STENCIL))) {
- enum pipe_format pfmt = pfb->zsbuf->format;
- uint32_t clear = util_pack_z_stencil(pfmt, depth, stencil);
- uint32_t mask = 0;
-
- if (buffers & PIPE_CLEAR_DEPTH)
- mask |= 0x1;
-
- if (buffers & PIPE_CLEAR_STENCIL)
- mask |= 0x2;
-
- OUT_PKT4(ring, REG_A6XX_RB_BLIT_DST_INFO, 1);
- OUT_RING(ring, A6XX_RB_BLIT_DST_INFO_TILE_MODE(TILE6_LINEAR) |
- A6XX_RB_BLIT_DST_INFO_COLOR_FORMAT(fd6_pipe2color(pfmt)));
-
- OUT_PKT4(ring, REG_A6XX_RB_BLIT_INFO, 1);
- OUT_RING(ring, A6XX_RB_BLIT_INFO_GMEM |
- // XXX UNK0 for separate stencil ??
- A6XX_RB_BLIT_INFO_DEPTH |
- A6XX_RB_BLIT_INFO_CLEAR_MASK(mask));
-
- OUT_PKT4(ring, REG_A6XX_RB_BLIT_BASE_GMEM, 1);
- OUT_RINGP(ring, MAX_RENDER_TARGETS, &ctx->batch->gmem_patches);
-
- OUT_PKT4(ring, REG_A6XX_RB_UNKNOWN_88D0, 1);
- OUT_RING(ring, 0);
-
- OUT_PKT4(ring, REG_A6XX_RB_BLIT_CLEAR_COLOR_DW0, 1);
- OUT_RING(ring, clear);
-
- fd6_emit_blit(ctx->batch, ring);
-
- if (pfb->zsbuf && (buffers & PIPE_CLEAR_DEPTH)) {
- struct fd_resource *zsbuf = fd_resource(pfb->zsbuf->texture);
- if (zsbuf->lrz) {
- zsbuf->lrz_valid = true;
- fd6_clear_lrz(ctx->batch, zsbuf, depth);
- }
+ foreach_bit(i, color_buffers)
+ ctx->batch->clear_color[i] = *color;
+ if (buffers & PIPE_CLEAR_DEPTH)
+ ctx->batch->clear_depth = depth;
+ if (buffers & PIPE_CLEAR_STENCIL)
+ ctx->batch->clear_stencil = stencil;
+
+ ctx->batch->fast_cleared |= buffers;
+
+ if (has_depth && (buffers & PIPE_CLEAR_DEPTH)) {
+ struct fd_resource *zsbuf = fd_resource(pfb->zsbuf->texture);
+ if (zsbuf->lrz && !is_z32(pfb->zsbuf->format)) {
+ zsbuf->lrz_valid = true;
+ zsbuf->lrz_direction = FD_LRZ_UNKNOWN;
+ fd6_clear_lrz(ctx->batch, zsbuf, depth);
}
}