nvc0_memory_barrier(struct pipe_context *pipe, unsigned flags)
{
struct nvc0_context *nvc0 = nvc0_context(pipe);
+ struct nouveau_pushbuf *push = nvc0->base.pushbuf;
int i, s;
if (flags & PIPE_BARRIER_MAPPED_BUFFER) {
if (!nvc0->vtxbuf[i].buffer)
continue;
if (nvc0->vtxbuf[i].buffer->flags & PIPE_RESOURCE_FLAG_MAP_PERSISTENT)
- nvc0->base.vbo_dirty = TRUE;
+ nvc0->base.vbo_dirty = true;
}
if (nvc0->idxbuf.buffer &&
nvc0->idxbuf.buffer->flags & PIPE_RESOURCE_FLAG_MAP_PERSISTENT)
- nvc0->base.vbo_dirty = TRUE;
+ nvc0->base.vbo_dirty = true;
for (s = 0; s < 5 && !nvc0->cb_dirty; ++s) {
uint32_t valid = nvc0->constbuf_valid[s];
continue;
if (res->flags & PIPE_RESOURCE_FLAG_MAP_PERSISTENT)
- nvc0->cb_dirty = TRUE;
+ nvc0->cb_dirty = true;
}
}
}
+ if (flags & PIPE_BARRIER_SHADER_BUFFER) {
+ IMMED_NVC0(push, NVC0_3D(MEM_BARRIER), 0x1011);
+ }
}
static void
pipe_surface_reference(&nvc0->surfaces[s][i], NULL);
}
+ for (s = 0; s < 6; ++s)
+ for (i = 0; i < NVC0_MAX_BUFFERS; ++i)
+ pipe_resource_reference(&nvc0->buffers[s][i].buffer, NULL);
+
for (i = 0; i < nvc0->num_tfbbufs; ++i)
pipe_so_target_reference(&nvc0->tfbbuf[i], NULL);
pipe_resource_reference(res, NULL);
}
util_dynarray_fini(&nvc0->global_residents);
+
+ if (nvc0->tcp_empty)
+ nvc0->base.pipe.delete_tcs_state(&nvc0->base.pipe, nvc0->tcp_empty);
}
static void
{
struct nvc0_context *nvc0 = nvc0_context(pipe);
- if (nvc0->screen->cur_ctx == nvc0)
+ if (nvc0->screen->cur_ctx == nvc0) {
nvc0->screen->cur_ctx = NULL;
+ nvc0->screen->save_state = nvc0->state;
+ nvc0->screen->save_state.tfb = NULL;
+ }
+
/* Unset bufctx, we don't want to revalidate any resources after the flush.
* Other contexts will always set their bufctx again on action calls.
*/
if (screen) {
nouveau_fence_next(&screen->base);
- nouveau_fence_update(&screen->base, TRUE);
+ nouveau_fence_update(&screen->base, true);
if (screen->cur_ctx)
- screen->cur_ctx->state.flushed = TRUE;
+ screen->cur_ctx->state.flushed = true;
NOUVEAU_DRV_STAT(&screen->base, pushbuf_count, 1);
}
}
}
}
- if (res->bind & (PIPE_BIND_VERTEX_BUFFER |
- PIPE_BIND_INDEX_BUFFER |
- PIPE_BIND_CONSTANT_BUFFER |
- PIPE_BIND_STREAM_OUTPUT |
- PIPE_BIND_COMMAND_ARGS_BUFFER |
- PIPE_BIND_SAMPLER_VIEW)) {
+ if (res->target == PIPE_BUFFER) {
for (i = 0; i < nvc0->num_vtxbufs; ++i) {
if (nvc0->vtxbuf[i].buffer == res) {
nvc0->dirty |= NVC0_NEW_ARRAYS;
}
}
}
+
+ for (s = 0; s < 5; ++s) {
+ for (i = 0; i < NVC0_MAX_BUFFERS; ++i) {
+ if (nvc0->buffers[s][i].buffer == res) {
+ nvc0->buffers_dirty[s] |= 1 << i;
+ nvc0->dirty |= NVC0_NEW_BUFFERS;
+ nouveau_bufctx_reset(nvc0->bufctx_3d, NVC0_BIND_BUF);
+ if (!--ref)
+ return ref;
+ }
+ }
+ }
}
return ref;
float *);
struct pipe_context *
-nvc0_create(struct pipe_screen *pscreen, void *priv)
+nvc0_create(struct pipe_screen *pscreen, void *priv, unsigned ctxflags)
{
struct nvc0_screen *screen = nvc0_screen(pscreen);
struct nvc0_context *nvc0;
pipe->memory_barrier = nvc0_memory_barrier;
pipe->get_sample_position = nvc0_context_get_sample_position;
- if (!screen->cur_ctx) {
- screen->cur_ctx = nvc0;
- nouveau_pushbuf_bufctx(screen->base.pushbuf, nvc0->bufctx);
- }
- screen->base.pushbuf->kick_notify = nvc0_default_kick_notify;
-
+ nouveau_context_init(&nvc0->base);
nvc0_init_query_functions(nvc0);
nvc0_init_surface_functions(nvc0);
nvc0_init_state_functions(nvc0);
/* shader builtin library is per-screen, but we need a context for m2mf */
nvc0_program_library_upload(nvc0);
+ nvc0_program_init_tcp_empty(nvc0);
+ if (!nvc0->tcp_empty)
+ goto out_err;
+ /* set the empty tctl prog on next draw in case one is never set */
+ nvc0->dirty |= NVC0_NEW_TCTLPROG;
+
+ /* now that there are no more opportunities for errors, set the current
+ * context if there isn't already one.
+ */
+ if (!screen->cur_ctx) {
+ nvc0->state = screen->save_state;
+ screen->cur_ctx = nvc0;
+ nouveau_pushbuf_bufctx(screen->base.pushbuf, nvc0->bufctx);
+ }
+ screen->base.pushbuf->kick_notify = nvc0_default_kick_notify;
/* add permanently resident buffers to bufctxts */
- flags = NOUVEAU_BO_VRAM | NOUVEAU_BO_RD;
+ flags = NV_VRAM_DOMAIN(&screen->base) | NOUVEAU_BO_RD;
BCTX_REFN_bo(nvc0->bufctx_3d, SCREEN, flags, screen->text);
BCTX_REFN_bo(nvc0->bufctx_3d, SCREEN, flags, screen->uniform_bo);
BCTX_REFN_bo(nvc0->bufctx_cp, CP_SCREEN, flags, screen->parm);
}
- flags = NOUVEAU_BO_VRAM | NOUVEAU_BO_RDWR;
+ flags = NV_VRAM_DOMAIN(&screen->base) | NOUVEAU_BO_RDWR;
if (screen->poly_cache)
BCTX_REFN_bo(nvc0->bufctx_3d, SCREEN, flags, screen->poly_cache);
void
nvc0_bufctx_fence(struct nvc0_context *nvc0, struct nouveau_bufctx *bufctx,
- boolean on_flush)
+ bool on_flush)
{
struct nouveau_list *list = on_flush ? &bufctx->current : &bufctx->pending;
struct nouveau_list *it;