X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Fgallium%2Fdrivers%2Ffreedreno%2Ffreedreno_state.c;h=05717da95995e5cec6a50a2194e440c45d8ca234;hb=d1d2b13518c190cf6db45dfb45b07f1246769767;hp=27869295622c6693307de94a3c923abcbe4017f5;hpb=d5d80b37392c7f15c4fb39b6b1826230239930fd;p=mesa.git diff --git a/src/gallium/drivers/freedreno/freedreno_state.c b/src/gallium/drivers/freedreno/freedreno_state.c index 27869295622..05717da9599 100644 --- a/src/gallium/drivers/freedreno/freedreno_state.c +++ b/src/gallium/drivers/freedreno/freedreno_state.c @@ -27,6 +27,7 @@ */ #include "pipe/p_state.h" +#include "util/u_dual_blend.h" #include "util/u_string.h" #include "util/u_memory.h" #include "util/u_helpers.h" @@ -36,6 +37,7 @@ #include "freedreno_resource.h" #include "freedreno_texture.h" #include "freedreno_gmem.h" +#include "freedreno_query_hw.h" #include "freedreno_util.h" /* All the generic state handling.. In case of CSO's that are specific @@ -65,7 +67,9 @@ static void fd_set_clip_state(struct pipe_context *pctx, const struct pipe_clip_state *clip) { - DBG("TODO: "); + struct fd_context *ctx = fd_context(pctx); + ctx->ucp = *clip; + ctx->dirty |= FD_DIRTY_UCP; } static void @@ -85,30 +89,123 @@ fd_set_sample_mask(struct pipe_context *pctx, unsigned sample_mask) * index>0 will be UBO's.. well, I'll worry about that later */ static void -fd_set_constant_buffer(struct pipe_context *pctx, uint shader, uint index, - struct pipe_constant_buffer *cb) +fd_set_constant_buffer(struct pipe_context *pctx, + enum pipe_shader_type shader, uint index, + const struct pipe_constant_buffer *cb) { struct fd_context *ctx = fd_context(pctx); struct fd_constbuf_stateobj *so = &ctx->constbuf[shader]; + util_copy_constant_buffer(&so->cb[index], cb); + /* Note that the state tracker can unbind constant buffers by * passing NULL here. */ if (unlikely(!cb)) { so->enabled_mask &= ~(1 << index); so->dirty_mask &= ~(1 << index); - pipe_resource_reference(&so->cb[index].buffer, NULL); return; } - pipe_resource_reference(&so->cb[index].buffer, cb->buffer); - so->cb[index].buffer_offset = cb->buffer_offset; - so->cb[index].buffer_size = cb->buffer_size; - so->cb[index].user_buffer = cb->user_buffer; - so->enabled_mask |= 1 << index; so->dirty_mask |= 1 << index; - ctx->dirty |= FD_DIRTY_CONSTBUF; + ctx->dirty_shader[shader] |= FD_DIRTY_SHADER_CONST; + ctx->dirty |= FD_DIRTY_CONST; +} + +static void +fd_set_shader_buffers(struct pipe_context *pctx, + enum pipe_shader_type shader, + unsigned start, unsigned count, + const struct pipe_shader_buffer *buffers) +{ + struct fd_context *ctx = fd_context(pctx); + struct fd_shaderbuf_stateobj *so = &ctx->shaderbuf[shader]; + unsigned mask = 0; + + if (buffers) { + for (unsigned i = 0; i < count; i++) { + unsigned n = i + start; + struct pipe_shader_buffer *buf = &so->sb[n]; + + if ((buf->buffer == buffers[i].buffer) && + (buf->buffer_offset == buffers[i].buffer_offset) && + (buf->buffer_size == buffers[i].buffer_size)) + continue; + + mask |= BIT(n); + + buf->buffer_offset = buffers[i].buffer_offset; + buf->buffer_size = buffers[i].buffer_size; + pipe_resource_reference(&buf->buffer, buffers[i].buffer); + + if (buf->buffer) + so->enabled_mask |= BIT(n); + else + so->enabled_mask &= ~BIT(n); + } + } else { + mask = (BIT(count) - 1) << start; + + for (unsigned i = 0; i < count; i++) { + unsigned n = i + start; + struct pipe_shader_buffer *buf = &so->sb[n]; + + pipe_resource_reference(&buf->buffer, NULL); + } + + so->enabled_mask &= ~mask; + } + + so->dirty_mask |= mask; + ctx->dirty_shader[shader] |= FD_DIRTY_SHADER_SSBO; +} + +static void +fd_set_shader_images(struct pipe_context *pctx, + enum pipe_shader_type shader, + unsigned start, unsigned count, + const struct pipe_image_view *images) +{ + struct fd_context *ctx = fd_context(pctx); + struct fd_shaderimg_stateobj *so = &ctx->shaderimg[shader]; + + unsigned mask = 0; + + if (images) { + for (unsigned i = 0; i < count; i++) { + unsigned n = i + start; + struct pipe_image_view *buf = &so->si[n]; + + if ((buf->resource == images[i].resource) && + (buf->format == images[i].format) && + (buf->access == images[i].access) && + !memcmp(&buf->u, &images[i].u, sizeof(buf->u))) + continue; + + mask |= BIT(n); + util_copy_image_view(buf, &images[i]); + + if (buf->resource) + so->enabled_mask |= BIT(n); + else + so->enabled_mask &= ~BIT(n); + } + } else { + mask = (BIT(count) - 1) << start; + + for (unsigned i = 0; i < count; i++) { + unsigned n = i + start; + struct pipe_image_view *img = &so->si[n]; + + pipe_resource_reference(&img->resource, NULL); + } + + so->enabled_mask &= ~mask; + } + + so->dirty_mask |= mask; + ctx->dirty_shader[shader] |= FD_DIRTY_SHADER_IMAGE; } static void @@ -116,18 +213,41 @@ fd_set_framebuffer_state(struct pipe_context *pctx, const struct pipe_framebuffer_state *framebuffer) { struct fd_context *ctx = fd_context(pctx); - struct pipe_framebuffer_state *cso = &ctx->framebuffer; + struct pipe_framebuffer_state *cso; - DBG("%d: cbufs[0]=%p, zsbuf=%p", ctx->needs_flush, - framebuffer->cbufs[0], framebuffer->zsbuf); + if (ctx->screen->reorder) { + struct fd_batch *batch, *old_batch = NULL; - fd_context_render(pctx); + fd_batch_reference(&old_batch, ctx->batch); - util_copy_framebuffer_state(cso, framebuffer); + if (likely(old_batch)) + fd_batch_set_stage(old_batch, FD_STAGE_NULL); - if ((cso->width != framebuffer->width) || - (cso->height != framebuffer->height)) - ctx->needs_rb_fbd = true; + batch = fd_batch_from_fb(&ctx->screen->batch_cache, ctx, framebuffer); + fd_batch_reference(&ctx->batch, NULL); + fd_reset_wfi(batch); + ctx->batch = batch; + fd_context_all_dirty(ctx); + + if (old_batch && old_batch->blit && !old_batch->back_blit) { + /* for blits, there is not really much point in hanging on + * to the uncommitted batch (ie. you probably don't blit + * multiple times to the same surface), so we might as + * well go ahead and flush this one: + */ + fd_batch_flush(old_batch, false, false); + } + + fd_batch_reference(&old_batch, NULL); + } else { + DBG("%d: cbufs[0]=%p, zsbuf=%p", ctx->batch->needs_flush, + framebuffer->cbufs[0], framebuffer->zsbuf); + fd_batch_flush(ctx->batch, false, false); + } + + cso = &ctx->batch->framebuffer; + + util_copy_framebuffer_state(cso, framebuffer); ctx->dirty |= FD_DIRTY_FRAMEBUFFER; @@ -184,14 +304,16 @@ fd_set_vertex_buffers(struct pipe_context *pctx, * we need to mark VTXSTATE as dirty as well to trigger patching * and re-emitting the vtx shader: */ - for (i = 0; i < count; i++) { - bool new_enabled = vb && (vb[i].buffer || vb[i].user_buffer); - bool old_enabled = so->vb[i].buffer || so->vb[i].user_buffer; - uint32_t new_stride = vb ? vb[i].stride : 0; - uint32_t old_stride = so->vb[i].stride; - if ((new_enabled != old_enabled) || (new_stride != old_stride)) { - ctx->dirty |= FD_DIRTY_VTXSTATE; - break; + if (ctx->screen->gpu_id < 300) { + for (i = 0; i < count; i++) { + bool new_enabled = vb && vb[i].buffer.resource; + bool old_enabled = so->vb[i].buffer.resource != NULL; + uint32_t new_stride = vb ? vb[i].stride : 0; + uint32_t old_stride = so->vb[i].stride; + if ((new_enabled != old_enabled) || (new_stride != old_stride)) { + ctx->dirty |= FD_DIRTY_VTXSTATE; + break; + } } } @@ -201,30 +323,21 @@ fd_set_vertex_buffers(struct pipe_context *pctx, ctx->dirty |= FD_DIRTY_VTXBUF; } -static void -fd_set_index_buffer(struct pipe_context *pctx, - const struct pipe_index_buffer *ib) -{ - struct fd_context *ctx = fd_context(pctx); - - if (ib) { - pipe_resource_reference(&ctx->indexbuf.buffer, ib->buffer); - ctx->indexbuf.index_size = ib->index_size; - ctx->indexbuf.offset = ib->offset; - ctx->indexbuf.user_buffer = ib->user_buffer; - } else { - pipe_resource_reference(&ctx->indexbuf.buffer, NULL); - } - - ctx->dirty |= FD_DIRTY_INDEXBUF; -} - static void fd_blend_state_bind(struct pipe_context *pctx, void *hwcso) { struct fd_context *ctx = fd_context(pctx); + struct pipe_blend_state *cso = hwcso; + bool old_is_dual = ctx->blend ? + ctx->blend->rt[0].blend_enable && util_blend_state_is_dual(ctx->blend, 0) : + false; + bool new_is_dual = cso ? + cso->rt[0].blend_enable && util_blend_state_is_dual(cso, 0) : + false; ctx->blend = hwcso; ctx->dirty |= FD_DIRTY_BLEND; + if (old_is_dual != new_is_dual) + ctx->dirty |= FD_DIRTY_BLEND_DUAL; } static void @@ -237,8 +350,18 @@ static void fd_rasterizer_state_bind(struct pipe_context *pctx, void *hwcso) { struct fd_context *ctx = fd_context(pctx); + struct pipe_scissor_state *old_scissor = fd_context_get_scissor(ctx); + ctx->rasterizer = hwcso; ctx->dirty |= FD_DIRTY_RASTERIZER; + + /* if scissor enable bit changed we need to mark scissor + * state as dirty as well: + * NOTE: we can do a shallow compare, since we only care + * if it changed to/from &ctx->disable_scissor + */ + if (old_scissor != fd_context_get_scissor(ctx)) + ctx->dirty |= FD_DIRTY_SCISSOR; } static void @@ -290,6 +413,137 @@ fd_vertex_state_bind(struct pipe_context *pctx, void *hwcso) ctx->dirty |= FD_DIRTY_VTXSTATE; } +static struct pipe_stream_output_target * +fd_create_stream_output_target(struct pipe_context *pctx, + struct pipe_resource *prsc, unsigned buffer_offset, + unsigned buffer_size) +{ + struct pipe_stream_output_target *target; + struct fd_resource *rsc = fd_resource(prsc); + + target = CALLOC_STRUCT(pipe_stream_output_target); + if (!target) + return NULL; + + pipe_reference_init(&target->reference, 1); + pipe_resource_reference(&target->buffer, prsc); + + target->context = pctx; + target->buffer_offset = buffer_offset; + target->buffer_size = buffer_size; + + assert(rsc->base.target == PIPE_BUFFER); + util_range_add(&rsc->valid_buffer_range, + buffer_offset, buffer_offset + buffer_size); + + return target; +} + +static void +fd_stream_output_target_destroy(struct pipe_context *pctx, + struct pipe_stream_output_target *target) +{ + pipe_resource_reference(&target->buffer, NULL); + FREE(target); +} + +static void +fd_set_stream_output_targets(struct pipe_context *pctx, + unsigned num_targets, struct pipe_stream_output_target **targets, + const unsigned *offsets) +{ + struct fd_context *ctx = fd_context(pctx); + struct fd_streamout_stateobj *so = &ctx->streamout; + unsigned i; + + debug_assert(num_targets <= ARRAY_SIZE(so->targets)); + + for (i = 0; i < num_targets; i++) { + boolean changed = targets[i] != so->targets[i]; + boolean append = (offsets[i] == (unsigned)-1); + + if (!changed && append) + continue; + + if (!append) + so->offsets[i] = offsets[i]; + + pipe_so_target_reference(&so->targets[i], targets[i]); + } + + for (; i < so->num_targets; i++) { + pipe_so_target_reference(&so->targets[i], NULL); + } + + so->num_targets = num_targets; + + ctx->dirty |= FD_DIRTY_STREAMOUT; +} + +static void +fd_bind_compute_state(struct pipe_context *pctx, void *state) +{ + struct fd_context *ctx = fd_context(pctx); + ctx->compute = state; + ctx->dirty_shader[PIPE_SHADER_COMPUTE] |= FD_DIRTY_SHADER_PROG; +} + +static void +fd_set_compute_resources(struct pipe_context *pctx, + unsigned start, unsigned count, struct pipe_surface **prscs) +{ + // TODO +} + +/* used by clover to bind global objects, returning the bo address + * via handles[n] + */ +static void +fd_set_global_binding(struct pipe_context *pctx, + unsigned first, unsigned count, struct pipe_resource **prscs, + uint32_t **handles) +{ + struct fd_context *ctx = fd_context(pctx); + struct fd_global_bindings_stateobj *so = &ctx->global_bindings; + unsigned mask = 0; + + if (prscs) { + for (unsigned i = 0; i < count; i++) { + unsigned n = i + first; + + mask |= BIT(n); + + pipe_resource_reference(&so->buf[n], prscs[i]); + + if (so->buf[n]) { + struct fd_resource *rsc = fd_resource(so->buf[n]); + uint64_t iova = fd_bo_get_iova(rsc->bo); + // TODO need to scream if iova > 32b or fix gallium API.. + *handles[i] += iova; + } + + if (prscs[i]) + so->enabled_mask |= BIT(n); + else + so->enabled_mask &= ~BIT(n); + } + } else { + mask = (BIT(count) - 1) << first; + + for (unsigned i = 0; i < count; i++) { + unsigned n = i + first; + if (so->buf[n]) { + struct fd_resource *rsc = fd_resource(so->buf[n]); + fd_bo_put_iova(rsc->bo); + } + pipe_resource_reference(&so->buf[n], NULL); + } + + so->enabled_mask &= ~mask; + } + +} + void fd_state_init(struct pipe_context *pctx) { @@ -298,13 +552,14 @@ fd_state_init(struct pipe_context *pctx) pctx->set_clip_state = fd_set_clip_state; pctx->set_sample_mask = fd_set_sample_mask; pctx->set_constant_buffer = fd_set_constant_buffer; + pctx->set_shader_buffers = fd_set_shader_buffers; + pctx->set_shader_images = fd_set_shader_images; pctx->set_framebuffer_state = fd_set_framebuffer_state; pctx->set_polygon_stipple = fd_set_polygon_stipple; pctx->set_scissor_states = fd_set_scissor_states; pctx->set_viewport_states = fd_set_viewport_states; pctx->set_vertex_buffers = fd_set_vertex_buffers; - pctx->set_index_buffer = fd_set_index_buffer; pctx->bind_blend_state = fd_blend_state_bind; pctx->delete_blend_state = fd_blend_state_delete; @@ -318,4 +573,14 @@ fd_state_init(struct pipe_context *pctx) pctx->create_vertex_elements_state = fd_vertex_state_create; pctx->delete_vertex_elements_state = fd_vertex_state_delete; pctx->bind_vertex_elements_state = fd_vertex_state_bind; + + pctx->create_stream_output_target = fd_create_stream_output_target; + pctx->stream_output_target_destroy = fd_stream_output_target_destroy; + pctx->set_stream_output_targets = fd_set_stream_output_targets; + + if (has_compute(fd_screen(pctx->screen))) { + pctx->bind_compute_state = fd_bind_compute_state; + pctx->set_compute_resources = fd_set_compute_resources; + pctx->set_global_binding = fd_set_global_binding; + } }