X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Fgallium%2Fdrivers%2Ffreedreno%2Fa4xx%2Ffd4_emit.c;h=f0a1fdea0df9a279e78bd3b3512cbe9dd48b8fd3;hb=fd6ed7b5628678ada0db3bf6ae1bcf80628c6947;hp=0144ba492ea20063e9e7d975e38978cd21010c51;hpb=173871dfb988c3e9fb74a8016d2b024619a5d918;p=mesa.git diff --git a/src/gallium/drivers/freedreno/a4xx/fd4_emit.c b/src/gallium/drivers/freedreno/a4xx/fd4_emit.c index 0144ba492ea..f0a1fdea0df 100644 --- a/src/gallium/drivers/freedreno/a4xx/fd4_emit.c +++ b/src/gallium/drivers/freedreno/a4xx/fd4_emit.c @@ -31,6 +31,7 @@ #include "util/u_memory.h" #include "util/u_helpers.h" #include "util/u_format.h" +#include "util/u_viewport.h" #include "freedreno_resource.h" #include "freedreno_query_hw.h" @@ -44,46 +45,41 @@ #include "fd4_format.h" #include "fd4_zsa.h" -static const enum adreno_state_block sb[] = { - [SHADER_VERTEX] = SB_VERT_SHADER, - [SHADER_FRAGMENT] = SB_FRAG_SHADER, -}; - /* regid: base const register * prsc or dwords: buffer containing constant values * sizedwords: size of const value buffer */ -void +static void fd4_emit_const(struct fd_ringbuffer *ring, enum shader_t type, uint32_t regid, uint32_t offset, uint32_t sizedwords, const uint32_t *dwords, struct pipe_resource *prsc) { uint32_t i, sz; - enum adreno_state_src src; + enum a4xx_state_src src; debug_assert((regid % 4) == 0); debug_assert((sizedwords % 4) == 0); if (prsc) { sz = 0; - src = 0x2; // TODO ?? + src = SS4_INDIRECT; } else { sz = sizedwords; - src = SS_DIRECT; + src = SS4_DIRECT; } - OUT_PKT3(ring, CP_LOAD_STATE, 2 + sz); - OUT_RING(ring, CP_LOAD_STATE_0_DST_OFF(regid/4) | - CP_LOAD_STATE_0_STATE_SRC(src) | - CP_LOAD_STATE_0_STATE_BLOCK(sb[type]) | - CP_LOAD_STATE_0_NUM_UNIT(sizedwords/4)); + OUT_PKT3(ring, CP_LOAD_STATE4, 2 + sz); + OUT_RING(ring, CP_LOAD_STATE4_0_DST_OFF(regid/4) | + CP_LOAD_STATE4_0_STATE_SRC(src) | + CP_LOAD_STATE4_0_STATE_BLOCK(fd4_stage2shadersb(type)) | + CP_LOAD_STATE4_0_NUM_UNIT(sizedwords/4)); if (prsc) { struct fd_bo *bo = fd_resource(prsc)->bo; OUT_RELOC(ring, bo, offset, - CP_LOAD_STATE_1_STATE_TYPE(ST_CONSTANTS), 0); + CP_LOAD_STATE4_1_STATE_TYPE(ST4_CONSTANTS), 0); } else { - OUT_RING(ring, CP_LOAD_STATE_1_EXT_SRC_ADDR(0) | - CP_LOAD_STATE_1_STATE_TYPE(ST_CONSTANTS)); + OUT_RING(ring, CP_LOAD_STATE4_1_EXT_SRC_ADDR(0) | + CP_LOAD_STATE4_1_STATE_TYPE(ST4_CONSTANTS)); dwords = (uint32_t *)&((uint8_t *)dwords)[offset]; } for (i = 0; i < sz; i++) { @@ -93,54 +89,49 @@ fd4_emit_const(struct fd_ringbuffer *ring, enum shader_t type, static void fd4_emit_const_bo(struct fd_ringbuffer *ring, enum shader_t type, boolean write, - uint32_t regid, uint32_t num, struct fd_bo **bos, uint32_t *offsets) + uint32_t regid, uint32_t num, struct pipe_resource **prscs, uint32_t *offsets) { + uint32_t anum = align(num, 4); uint32_t i; debug_assert((regid % 4) == 0); - debug_assert((num % 4) == 0); - OUT_PKT3(ring, CP_LOAD_STATE, 2 + num); - OUT_RING(ring, CP_LOAD_STATE_0_DST_OFF(regid/4) | - CP_LOAD_STATE_0_STATE_SRC(SS_DIRECT) | - CP_LOAD_STATE_0_STATE_BLOCK(sb[type]) | - CP_LOAD_STATE_0_NUM_UNIT(num/4)); - OUT_RING(ring, CP_LOAD_STATE_1_EXT_SRC_ADDR(0) | - CP_LOAD_STATE_1_STATE_TYPE(ST_CONSTANTS)); + OUT_PKT3(ring, CP_LOAD_STATE4, 2 + anum); + OUT_RING(ring, CP_LOAD_STATE4_0_DST_OFF(regid/4) | + CP_LOAD_STATE4_0_STATE_SRC(SS4_DIRECT) | + CP_LOAD_STATE4_0_STATE_BLOCK(fd4_stage2shadersb(type)) | + CP_LOAD_STATE4_0_NUM_UNIT(anum/4)); + OUT_RING(ring, CP_LOAD_STATE4_1_EXT_SRC_ADDR(0) | + CP_LOAD_STATE4_1_STATE_TYPE(ST4_CONSTANTS)); for (i = 0; i < num; i++) { - if (bos[i]) { + if (prscs[i]) { if (write) { - OUT_RELOCW(ring, bos[i], offsets[i], 0, 0); + OUT_RELOCW(ring, fd_resource(prscs[i])->bo, offsets[i], 0, 0); } else { - OUT_RELOC(ring, bos[i], offsets[i], 0, 0); + OUT_RELOC(ring, fd_resource(prscs[i])->bo, offsets[i], 0, 0); } } else { OUT_RING(ring, 0xbad00000 | (i << 16)); } } + + for (; i < anum; i++) + OUT_RING(ring, 0xffffffff); } static void emit_textures(struct fd_context *ctx, struct fd_ringbuffer *ring, - enum adreno_state_block sb, struct fd_texture_stateobj *tex, + enum a4xx_state_block sb, struct fd_texture_stateobj *tex, const struct ir3_shader_variant *v) { static const uint32_t bcolor_reg[] = { - [SB_VERT_TEX] = REG_A4XX_TPL1_TP_VS_BORDER_COLOR_BASE_ADDR, - [SB_FRAG_TEX] = REG_A4XX_TPL1_TP_FS_BORDER_COLOR_BASE_ADDR, + [SB4_VS_TEX] = REG_A4XX_TPL1_TP_VS_BORDER_COLOR_BASE_ADDR, + [SB4_FS_TEX] = REG_A4XX_TPL1_TP_FS_BORDER_COLOR_BASE_ADDR, }; struct fd4_context *fd4_ctx = fd4_context(ctx); - unsigned i, off; - void *ptr; - - u_upload_alloc(fd4_ctx->border_color_uploader, - 0, BORDER_COLOR_UPLOAD_SIZE, - BORDER_COLOR_UPLOAD_SIZE, &off, - &fd4_ctx->border_color_buf, - &ptr); - - fd_setup_border_colors(tex, ptr, 0); + bool needs_border = false; + unsigned i; if (tex->num_samplers > 0) { int num_samplers; @@ -152,13 +143,13 @@ emit_textures(struct fd_context *ctx, struct fd_ringbuffer *ring, num_samplers = align(tex->num_samplers, 2); /* output sampler state: */ - OUT_PKT3(ring, CP_LOAD_STATE, 2 + (2 * num_samplers)); - OUT_RING(ring, CP_LOAD_STATE_0_DST_OFF(0) | - CP_LOAD_STATE_0_STATE_SRC(SS_DIRECT) | - CP_LOAD_STATE_0_STATE_BLOCK(sb) | - CP_LOAD_STATE_0_NUM_UNIT(num_samplers)); - OUT_RING(ring, CP_LOAD_STATE_1_STATE_TYPE(ST_SHADER) | - CP_LOAD_STATE_1_EXT_SRC_ADDR(0)); + OUT_PKT3(ring, CP_LOAD_STATE4, 2 + (2 * num_samplers)); + OUT_RING(ring, CP_LOAD_STATE4_0_DST_OFF(0) | + CP_LOAD_STATE4_0_STATE_SRC(SS4_DIRECT) | + CP_LOAD_STATE4_0_STATE_BLOCK(sb) | + CP_LOAD_STATE4_0_NUM_UNIT(num_samplers)); + OUT_RING(ring, CP_LOAD_STATE4_1_STATE_TYPE(ST4_SHADER) | + CP_LOAD_STATE4_1_EXT_SRC_ADDR(0)); for (i = 0; i < tex->num_samplers; i++) { static const struct fd4_sampler_stateobj dummy_sampler = {}; const struct fd4_sampler_stateobj *sampler = tex->samplers[i] ? @@ -166,6 +157,8 @@ emit_textures(struct fd_context *ctx, struct fd_ringbuffer *ring, &dummy_sampler; OUT_RING(ring, sampler->texsamp0); OUT_RING(ring, sampler->texsamp1); + + needs_border |= sampler->needs_border; } for (; i < num_samplers; i++) { @@ -178,13 +171,13 @@ emit_textures(struct fd_context *ctx, struct fd_ringbuffer *ring, unsigned num_textures = tex->num_textures + v->astc_srgb.count; /* emit texture state: */ - OUT_PKT3(ring, CP_LOAD_STATE, 2 + (8 * num_textures)); - OUT_RING(ring, CP_LOAD_STATE_0_DST_OFF(0) | - CP_LOAD_STATE_0_STATE_SRC(SS_DIRECT) | - CP_LOAD_STATE_0_STATE_BLOCK(sb) | - CP_LOAD_STATE_0_NUM_UNIT(num_textures)); - OUT_RING(ring, CP_LOAD_STATE_1_STATE_TYPE(ST_CONSTANTS) | - CP_LOAD_STATE_1_EXT_SRC_ADDR(0)); + OUT_PKT3(ring, CP_LOAD_STATE4, 2 + (8 * num_textures)); + OUT_RING(ring, CP_LOAD_STATE4_0_DST_OFF(0) | + CP_LOAD_STATE4_0_STATE_SRC(SS4_DIRECT) | + CP_LOAD_STATE4_0_STATE_BLOCK(sb) | + CP_LOAD_STATE4_0_NUM_UNIT(num_textures)); + OUT_RING(ring, CP_LOAD_STATE4_1_STATE_TYPE(ST4_CONSTANTS) | + CP_LOAD_STATE4_1_EXT_SRC_ADDR(0)); for (i = 0; i < tex->num_textures; i++) { static const struct fd4_pipe_sampler_view dummy_view = {}; const struct fd4_pipe_sampler_view *view = tex->textures[i] ? @@ -235,10 +228,22 @@ emit_textures(struct fd_context *ctx, struct fd_ringbuffer *ring, debug_assert(v->astc_srgb.count == 0); } - OUT_PKT0(ring, bcolor_reg[sb], 1); - OUT_RELOC(ring, fd_resource(fd4_ctx->border_color_buf)->bo, off, 0, 0); + if (needs_border) { + unsigned off; + void *ptr; - u_upload_unmap(fd4_ctx->border_color_uploader); + u_upload_alloc(fd4_ctx->border_color_uploader, + 0, BORDER_COLOR_UPLOAD_SIZE, + BORDER_COLOR_UPLOAD_SIZE, &off, + &fd4_ctx->border_color_buf, + &ptr); + + fd_setup_border_colors(tex, ptr, 0); + OUT_PKT0(ring, bcolor_reg[sb], 1); + OUT_RELOC(ring, fd_resource(fd4_ctx->border_color_buf)->bo, off, 0, 0); + + u_upload_unmap(fd4_ctx->border_color_uploader); + } } /* emit texture state for mem->gmem restore operation.. eventually it would @@ -257,13 +262,13 @@ fd4_emit_gmem_restore_tex(struct fd_ringbuffer *ring, unsigned nr_bufs, } /* output sampler state: */ - OUT_PKT3(ring, CP_LOAD_STATE, 2 + (2 * nr_bufs)); - OUT_RING(ring, CP_LOAD_STATE_0_DST_OFF(0) | - CP_LOAD_STATE_0_STATE_SRC(SS_DIRECT) | - CP_LOAD_STATE_0_STATE_BLOCK(SB_FRAG_TEX) | - CP_LOAD_STATE_0_NUM_UNIT(nr_bufs)); - OUT_RING(ring, CP_LOAD_STATE_1_STATE_TYPE(ST_SHADER) | - CP_LOAD_STATE_1_EXT_SRC_ADDR(0)); + OUT_PKT3(ring, CP_LOAD_STATE4, 2 + (2 * nr_bufs)); + OUT_RING(ring, CP_LOAD_STATE4_0_DST_OFF(0) | + CP_LOAD_STATE4_0_STATE_SRC(SS4_DIRECT) | + CP_LOAD_STATE4_0_STATE_BLOCK(SB4_FS_TEX) | + CP_LOAD_STATE4_0_NUM_UNIT(nr_bufs)); + OUT_RING(ring, CP_LOAD_STATE4_1_STATE_TYPE(ST4_SHADER) | + CP_LOAD_STATE4_1_EXT_SRC_ADDR(0)); for (i = 0; i < nr_bufs; i++) { OUT_RING(ring, A4XX_TEX_SAMP_0_XY_MAG(A4XX_TEX_NEAREST) | A4XX_TEX_SAMP_0_XY_MIN(A4XX_TEX_NEAREST) | @@ -274,30 +279,31 @@ fd4_emit_gmem_restore_tex(struct fd_ringbuffer *ring, unsigned nr_bufs, } /* emit texture state: */ - OUT_PKT3(ring, CP_LOAD_STATE, 2 + (8 * nr_bufs)); - OUT_RING(ring, CP_LOAD_STATE_0_DST_OFF(0) | - CP_LOAD_STATE_0_STATE_SRC(SS_DIRECT) | - CP_LOAD_STATE_0_STATE_BLOCK(SB_FRAG_TEX) | - CP_LOAD_STATE_0_NUM_UNIT(nr_bufs)); - OUT_RING(ring, CP_LOAD_STATE_1_STATE_TYPE(ST_CONSTANTS) | - CP_LOAD_STATE_1_EXT_SRC_ADDR(0)); + OUT_PKT3(ring, CP_LOAD_STATE4, 2 + (8 * nr_bufs)); + OUT_RING(ring, CP_LOAD_STATE4_0_DST_OFF(0) | + CP_LOAD_STATE4_0_STATE_SRC(SS4_DIRECT) | + CP_LOAD_STATE4_0_STATE_BLOCK(SB4_FS_TEX) | + CP_LOAD_STATE4_0_NUM_UNIT(nr_bufs)); + OUT_RING(ring, CP_LOAD_STATE4_1_STATE_TYPE(ST4_CONSTANTS) | + CP_LOAD_STATE4_1_EXT_SRC_ADDR(0)); for (i = 0; i < nr_bufs; i++) { if (bufs[i]) { struct fd_resource *rsc = fd_resource(bufs[i]->texture); - /* note: PIPE_BUFFER disallowed for surfaces */ - unsigned lvl = bufs[i]->u.tex.level; - struct fd_resource_slice *slice = fd_resource_slice(rsc, lvl); - uint32_t offset = fd_resource_offset(rsc, lvl, bufs[i]->u.tex.first_layer); - enum pipe_format format = fd4_gmem_restore_format(bufs[i]->format); + enum pipe_format format = fd_gmem_restore_format(bufs[i]->format); /* The restore blit_zs shader expects stencil in sampler 0, * and depth in sampler 1 */ if (rsc->stencil && (i == 0)) { rsc = rsc->stencil; - format = fd4_gmem_restore_format(rsc->base.b.format); + format = fd_gmem_restore_format(rsc->base.b.format); } + /* note: PIPE_BUFFER disallowed for surfaces */ + unsigned lvl = bufs[i]->u.tex.level; + struct fd_resource_slice *slice = fd_resource_slice(rsc, lvl); + unsigned offset = fd_resource_offset(rsc, lvl, bufs[i]->u.tex.first_layer); + /* z32 restore is accomplished using depth write. If there is * no stencil component (ie. PIPE_FORMAT_Z32_FLOAT_S8X24_UINT) * then no render target: @@ -493,12 +499,12 @@ fd4_emit_state(struct fd_context *ctx, struct fd_ringbuffer *ring, { const struct ir3_shader_variant *vp = fd4_emit_get_vp(emit); const struct ir3_shader_variant *fp = fd4_emit_get_fp(emit); - uint32_t dirty = emit->dirty; + const enum fd_dirty_3d_state dirty = emit->dirty; emit_marker(ring, 5); if ((dirty & FD_DIRTY_FRAMEBUFFER) && !emit->key.binning_pass) { - struct pipe_framebuffer_state *pfb = &ctx->framebuffer; + struct pipe_framebuffer_state *pfb = &ctx->batch->framebuffer; unsigned char mrt_comp[A4XX_MAX_RENDER_TARGETS] = {0}; for (unsigned i = 0; i < A4XX_MAX_RENDER_TARGETS; i++) { @@ -518,7 +524,7 @@ fd4_emit_state(struct fd_context *ctx, struct fd_ringbuffer *ring, if (dirty & (FD_DIRTY_ZSA | FD_DIRTY_FRAMEBUFFER)) { struct fd4_zsa_stateobj *zsa = fd4_zsa_stateobj(ctx->zsa); - struct pipe_framebuffer_state *pfb = &ctx->framebuffer; + struct pipe_framebuffer_state *pfb = &ctx->batch->framebuffer; uint32_t rb_alpha_control = zsa->rb_alpha_control; if (util_format_is_pure_integer(pipe_surface_format(pfb->cbufs[0]))) @@ -543,12 +549,14 @@ fd4_emit_state(struct fd_context *ctx, struct fd_ringbuffer *ring, A4XX_RB_STENCILREFMASK_BF_STENCILREF(sr->ref_value[1])); } - if (dirty & (FD_DIRTY_ZSA | FD_DIRTY_PROG)) { + if (dirty & (FD_DIRTY_ZSA | FD_DIRTY_RASTERIZER | FD_DIRTY_PROG)) { struct fd4_zsa_stateobj *zsa = fd4_zsa_stateobj(ctx->zsa); bool fragz = fp->has_kill | fp->writes_pos; + bool clamp = !ctx->rasterizer->depth_clip; OUT_PKT0(ring, REG_A4XX_RB_DEPTH_CONTROL, 1); OUT_RING(ring, zsa->rb_depth_control | + COND(clamp, A4XX_RB_DEPTH_CONTROL_Z_CLAMP_ENABLE) | COND(fragz, A4XX_RB_DEPTH_CONTROL_EARLY_Z_DISABLE) | COND(fragz && fp->frag_coord, A4XX_RB_DEPTH_CONTROL_FORCE_FRAGZ_TO_FS)); @@ -618,14 +626,14 @@ fd4_emit_state(struct fd_context *ctx, struct fd_ringbuffer *ring, OUT_RING(ring, A4XX_GRAS_SC_WINDOW_SCISSOR_TL_X(scissor->minx) | A4XX_GRAS_SC_WINDOW_SCISSOR_TL_Y(scissor->miny)); - ctx->max_scissor.minx = MIN2(ctx->max_scissor.minx, scissor->minx); - ctx->max_scissor.miny = MIN2(ctx->max_scissor.miny, scissor->miny); - ctx->max_scissor.maxx = MAX2(ctx->max_scissor.maxx, scissor->maxx); - ctx->max_scissor.maxy = MAX2(ctx->max_scissor.maxy, scissor->maxy); + ctx->batch->max_scissor.minx = MIN2(ctx->batch->max_scissor.minx, scissor->minx); + ctx->batch->max_scissor.miny = MIN2(ctx->batch->max_scissor.miny, scissor->miny); + ctx->batch->max_scissor.maxx = MAX2(ctx->batch->max_scissor.maxx, scissor->maxx); + ctx->batch->max_scissor.maxy = MAX2(ctx->batch->max_scissor.maxy, scissor->maxy); } if (dirty & FD_DIRTY_VIEWPORT) { - fd_wfi(ctx, ring); + fd_wfi(ctx->batch, ring); OUT_PKT0(ring, REG_A4XX_GRAS_CL_VPORT_XOFFSET_0, 6); OUT_RING(ring, A4XX_GRAS_CL_VPORT_XOFFSET_0(ctx->viewport.translate[0])); OUT_RING(ring, A4XX_GRAS_CL_VPORT_XSCALE_0(ctx->viewport.scale[0])); @@ -635,8 +643,32 @@ fd4_emit_state(struct fd_context *ctx, struct fd_ringbuffer *ring, OUT_RING(ring, A4XX_GRAS_CL_VPORT_ZSCALE_0(ctx->viewport.scale[2])); } + if (dirty & (FD_DIRTY_VIEWPORT | FD_DIRTY_RASTERIZER | FD_DIRTY_FRAMEBUFFER)) { + float zmin, zmax; + int depth = 24; + if (ctx->batch->framebuffer.zsbuf) { + depth = util_format_get_component_bits( + pipe_surface_format(ctx->batch->framebuffer.zsbuf), + UTIL_FORMAT_COLORSPACE_ZS, 0); + } + util_viewport_zmin_zmax(&ctx->viewport, ctx->rasterizer->clip_halfz, + &zmin, &zmax); + + OUT_PKT0(ring, REG_A4XX_RB_VPORT_Z_CLAMP(0), 2); + if (depth == 32) { + OUT_RING(ring, fui(zmin)); + OUT_RING(ring, fui(zmax)); + } else if (depth == 16) { + OUT_RING(ring, (uint32_t)(zmin * 0xffff)); + OUT_RING(ring, (uint32_t)(zmax * 0xffff)); + } else { + OUT_RING(ring, (uint32_t)(zmin * 0xffffff)); + OUT_RING(ring, (uint32_t)(zmax * 0xffffff)); + } + } + if (dirty & (FD_DIRTY_PROG | FD_DIRTY_FRAMEBUFFER)) { - struct pipe_framebuffer_state *pfb = &ctx->framebuffer; + struct pipe_framebuffer_state *pfb = &ctx->batch->framebuffer; unsigned n = pfb->nr_cbufs; /* if we have depth/stencil, we need at least on MRT: */ if (pfb->zsbuf) @@ -645,14 +677,9 @@ fd4_emit_state(struct fd_context *ctx, struct fd_ringbuffer *ring, } if (emit->prog == &ctx->prog) { /* evil hack to deal sanely with clear path */ - ir3_emit_consts(vp, ring, ctx, emit->info, dirty); - if (!emit->key.binning_pass) - ir3_emit_consts(fp, ring, ctx, emit->info, dirty); - /* mark clean after emitting consts.. a bit ugly, but since binning - * pass is emitted first, we want to do this only for main draw: - */ + ir3_emit_vs_consts(vp, ring, ctx, emit->info); if (!emit->key.binning_pass) - ctx->prog.dirty = 0; + ir3_emit_fs_consts(fp, ring, ctx); } if ((dirty & FD_DIRTY_BLEND)) { @@ -661,7 +688,7 @@ fd4_emit_state(struct fd_context *ctx, struct fd_ringbuffer *ring, for (i = 0; i < A4XX_MAX_RENDER_TARGETS; i++) { enum pipe_format format = pipe_surface_format( - ctx->framebuffer.cbufs[i]); + ctx->batch->framebuffer.cbufs[i]); bool is_int = util_format_is_pure_integer(format); bool has_alpha = util_format_has_alpha(format); uint32_t control = blend->rb_mrt[i].control; @@ -713,31 +740,21 @@ fd4_emit_state(struct fd_context *ctx, struct fd_ringbuffer *ring, OUT_RING(ring, A4XX_RB_BLEND_ALPHA_F32(bcolor->color[3])); } - if (dirty & FD_DIRTY_VERTTEX) { - if (vp->has_samp) - emit_textures(ctx, ring, SB_VERT_TEX, &ctx->verttex, vp); - else - dirty &= ~FD_DIRTY_VERTTEX; - } + if (ctx->dirty_shader[PIPE_SHADER_VERTEX] & FD_DIRTY_SHADER_TEX) + emit_textures(ctx, ring, SB4_VS_TEX, &ctx->tex[PIPE_SHADER_VERTEX], vp); - if (dirty & FD_DIRTY_FRAGTEX) { - if (fp->has_samp) - emit_textures(ctx, ring, SB_FRAG_TEX, &ctx->fragtex, fp); - else - dirty &= ~FD_DIRTY_FRAGTEX; - } - - ctx->dirty &= ~dirty; + if (ctx->dirty_shader[PIPE_SHADER_FRAGMENT] & FD_DIRTY_SHADER_TEX) + emit_textures(ctx, ring, SB4_FS_TEX, &ctx->tex[PIPE_SHADER_FRAGMENT], fp); } /* emit setup at begin of new cmdstream buffer (don't rely on previous * state, there could have been a context switch between ioctls): */ void -fd4_emit_restore(struct fd_context *ctx) +fd4_emit_restore(struct fd_batch *batch, struct fd_ringbuffer *ring) { + struct fd_context *ctx = batch->ctx; struct fd4_context *fd4_ctx = fd4_context(ctx); - struct fd_ringbuffer *ring = ctx->ring; OUT_PKT0(ring, REG_A4XX_RBBM_PERFCTR_CTL, 1); OUT_RING(ring, 0x00000001); @@ -845,10 +862,10 @@ fd4_emit_restore(struct fd_context *ctx) /* we don't use this yet.. probably best to disable.. */ OUT_PKT3(ring, CP_SET_DRAW_STATE, 2); - OUT_RING(ring, CP_SET_DRAW_STATE_0_COUNT(0) | - CP_SET_DRAW_STATE_0_DISABLE_ALL_GROUPS | - CP_SET_DRAW_STATE_0_GROUP_ID(0)); - OUT_RING(ring, CP_SET_DRAW_STATE_1_ADDR(0)); + OUT_RING(ring, CP_SET_DRAW_STATE__0_COUNT(0) | + CP_SET_DRAW_STATE__0_DISABLE_ALL_GROUPS | + CP_SET_DRAW_STATE__0_GROUP_ID(0)); + OUT_RING(ring, CP_SET_DRAW_STATE__1_ADDR_LO(0)); OUT_PKT0(ring, REG_A4XX_SP_VS_PVT_MEM_PARAM, 2); OUT_RING(ring, 0x08000001); /* SP_VS_PVT_MEM_PARAM */ @@ -884,16 +901,13 @@ fd4_emit_restore(struct fd_context *ctx) OUT_PKT0(ring, REG_A4XX_GRAS_ALPHA_CONTROL, 1); OUT_RING(ring, 0x0); - fd_hw_query_enable(ctx, ring); - - ctx->needs_rb_fbd = true; + fd_hw_query_enable(batch, ring); } static void -fd4_emit_ib(struct fd_ringbuffer *ring, struct fd_ringmarker *start, - struct fd_ringmarker *end) +fd4_emit_ib(struct fd_ringbuffer *ring, struct fd_ringbuffer *target) { - __OUT_IB(ring, true, start, end); + __OUT_IB(ring, true, target); } void