#include "freedreno_resource.h"
#include "freedreno_texture.h"
#include "freedreno_gmem.h"
+#include "freedreno_query_hw.h"
#include "freedreno_util.h"
/* All the generic state handling.. In case of CSO's that are specific
* index>0 will be UBO's.. well, I'll worry about that later
*/
static void
-fd_set_constant_buffer(struct pipe_context *pctx, uint shader, uint index,
- struct pipe_constant_buffer *cb)
+fd_set_constant_buffer(struct pipe_context *pctx,
+ enum pipe_shader_type shader, uint index,
+ const struct pipe_constant_buffer *cb)
{
struct fd_context *ctx = fd_context(pctx);
struct fd_constbuf_stateobj *so = &ctx->constbuf[shader];
+ util_copy_constant_buffer(&so->cb[index], cb);
+
/* Note that the state tracker can unbind constant buffers by
* passing NULL here.
*/
if (unlikely(!cb)) {
so->enabled_mask &= ~(1 << index);
so->dirty_mask &= ~(1 << index);
- pipe_resource_reference(&so->cb[index].buffer, NULL);
return;
}
- pipe_resource_reference(&so->cb[index].buffer, cb->buffer);
- so->cb[index].buffer_offset = cb->buffer_offset;
- so->cb[index].buffer_size = cb->buffer_size;
- so->cb[index].user_buffer = cb->user_buffer;
-
so->enabled_mask |= 1 << index;
so->dirty_mask |= 1 << index;
- ctx->dirty |= FD_DIRTY_CONSTBUF;
+ ctx->dirty_shader[shader] |= FD_DIRTY_SHADER_CONST;
+ ctx->dirty |= FD_DIRTY_CONST;
}
static void
const struct pipe_framebuffer_state *framebuffer)
{
struct fd_context *ctx = fd_context(pctx);
- struct pipe_framebuffer_state *cso = &ctx->framebuffer;
+ struct pipe_framebuffer_state *cso;
+
+ if (ctx->screen->reorder) {
+ struct fd_batch *batch, *old_batch = NULL;
+
+ fd_batch_reference(&old_batch, ctx->batch);
+
+ if (likely(old_batch))
+ fd_batch_set_stage(old_batch, FD_STAGE_NULL);
- DBG("%d: cbufs[0]=%p, zsbuf=%p", ctx->needs_flush,
- framebuffer->cbufs[0], framebuffer->zsbuf);
+ batch = fd_batch_from_fb(&ctx->screen->batch_cache, ctx, framebuffer);
+ fd_batch_reference(&ctx->batch, NULL);
+ fd_reset_wfi(batch);
+ ctx->batch = batch;
+ fd_context_all_dirty(ctx);
- fd_context_render(pctx);
+ if (old_batch && old_batch->blit && !old_batch->back_blit) {
+ /* for blits, there is not really much point in hanging on
+ * to the uncommitted batch (ie. you probably don't blit
+ * multiple times to the same surface), so we might as
+ * well go ahead and flush this one:
+ */
+ fd_batch_flush(old_batch, false);
+ }
- if ((cso->width != framebuffer->width) ||
- (cso->height != framebuffer->height))
- ctx->needs_rb_fbd = true;
+ fd_batch_reference(&old_batch, NULL);
+ } else {
+ DBG("%d: cbufs[0]=%p, zsbuf=%p", ctx->batch->needs_flush,
+ framebuffer->cbufs[0], framebuffer->zsbuf);
+ fd_batch_flush(ctx->batch, false);
+ }
+
+ cso = &ctx->batch->framebuffer;
util_copy_framebuffer_state(cso, framebuffer);
* we need to mark VTXSTATE as dirty as well to trigger patching
* and re-emitting the vtx shader:
*/
- for (i = 0; i < count; i++) {
- bool new_enabled = vb && (vb[i].buffer || vb[i].user_buffer);
- bool old_enabled = so->vb[i].buffer || so->vb[i].user_buffer;
- uint32_t new_stride = vb ? vb[i].stride : 0;
- uint32_t old_stride = so->vb[i].stride;
- if ((new_enabled != old_enabled) || (new_stride != old_stride)) {
- ctx->dirty |= FD_DIRTY_VTXSTATE;
- break;
+ if (ctx->screen->gpu_id < 300) {
+ for (i = 0; i < count; i++) {
+ bool new_enabled = vb && (vb[i].buffer || vb[i].user_buffer);
+ bool old_enabled = so->vb[i].buffer || so->vb[i].user_buffer;
+ uint32_t new_stride = vb ? vb[i].stride : 0;
+ uint32_t old_stride = so->vb[i].stride;
+ if ((new_enabled != old_enabled) || (new_stride != old_stride)) {
+ ctx->dirty |= FD_DIRTY_VTXSTATE;
+ break;
+ }
}
}
unsigned buffer_size)
{
struct pipe_stream_output_target *target;
+ struct fd_resource *rsc = fd_resource(prsc);
target = CALLOC_STRUCT(pipe_stream_output_target);
if (!target)
target->buffer_offset = buffer_offset;
target->buffer_size = buffer_size;
+ assert(rsc->base.b.target == PIPE_BUFFER);
+ util_range_add(&rsc->valid_buffer_range,
+ buffer_offset, buffer_offset + buffer_size);
+
return target;
}