if (nv50->framebuffer.cbufs[i] &&
nv50->framebuffer.cbufs[i]->texture == res) {
nv50->dirty_3d |= NV50_NEW_3D_FRAMEBUFFER;
- nouveau_bufctx_reset(nv50->bufctx_3d, NV50_BIND_FB);
+ nouveau_bufctx_reset(nv50->bufctx_3d, NV50_BIND_3D_FB);
if (!--ref)
return ref;
}
if (nv50->framebuffer.zsbuf &&
nv50->framebuffer.zsbuf->texture == res) {
nv50->dirty_3d |= NV50_NEW_3D_FRAMEBUFFER;
- nouveau_bufctx_reset(nv50->bufctx_3d, NV50_BIND_FB);
+ nouveau_bufctx_reset(nv50->bufctx_3d, NV50_BIND_3D_FB);
if (!--ref)
return ref;
}
for (i = 0; i < nv50->num_vtxbufs; ++i) {
if (nv50->vtxbuf[i].buffer == res) {
nv50->dirty_3d |= NV50_NEW_3D_ARRAYS;
- nouveau_bufctx_reset(nv50->bufctx_3d, NV50_BIND_VERTEX);
+ nouveau_bufctx_reset(nv50->bufctx_3d, NV50_BIND_3D_VERTEX);
if (!--ref)
return ref;
}
if (nv50->idxbuf.buffer == res) {
/* Just rebind to the bufctx as there is no separate dirty bit */
- nouveau_bufctx_reset(nv50->bufctx_3d, NV50_BIND_INDEX);
- BCTX_REFN(nv50->bufctx_3d, INDEX, nv04_resource(res), RD);
+ nouveau_bufctx_reset(nv50->bufctx_3d, NV50_BIND_3D_INDEX);
+ BCTX_REFN(nv50->bufctx_3d, 3D_INDEX, nv04_resource(res), RD);
if (!--ref)
return ref;
}
if (nv50->textures[s][i] &&
nv50->textures[s][i]->texture == res) {
nv50->dirty_3d |= NV50_NEW_3D_TEXTURES;
- nouveau_bufctx_reset(nv50->bufctx_3d, NV50_BIND_TEXTURES);
+ nouveau_bufctx_reset(nv50->bufctx_3d, NV50_BIND_3D_TEXTURES);
if (!--ref)
return ref;
}
nv50->constbuf[s][i].u.buf == res) {
nv50->dirty_3d |= NV50_NEW_3D_CONSTBUF;
nv50->constbuf_dirty[s] |= 1 << i;
- nouveau_bufctx_reset(nv50->bufctx_3d, NV50_BIND_CB(s, i));
+ nouveau_bufctx_reset(nv50->bufctx_3d, NV50_BIND_3D_CB(s, i));
if (!--ref)
return ref;
}
flags = NOUVEAU_BO_VRAM | NOUVEAU_BO_RD;
- BCTX_REFN_bo(nv50->bufctx_3d, SCREEN, flags, screen->code);
- BCTX_REFN_bo(nv50->bufctx_3d, SCREEN, flags, screen->uniforms);
- BCTX_REFN_bo(nv50->bufctx_3d, SCREEN, flags, screen->txc);
- BCTX_REFN_bo(nv50->bufctx_3d, SCREEN, flags, screen->stack_bo);
+ BCTX_REFN_bo(nv50->bufctx_3d, 3D_SCREEN, flags, screen->code);
+ BCTX_REFN_bo(nv50->bufctx_3d, 3D_SCREEN, flags, screen->uniforms);
+ BCTX_REFN_bo(nv50->bufctx_3d, 3D_SCREEN, flags, screen->txc);
+ BCTX_REFN_bo(nv50->bufctx_3d, 3D_SCREEN, flags, screen->stack_bo);
if (screen->compute) {
BCTX_REFN_bo(nv50->bufctx_cp, CP_SCREEN, flags, screen->code);
BCTX_REFN_bo(nv50->bufctx_cp, CP_SCREEN, flags, screen->txc);
flags = NOUVEAU_BO_GART | NOUVEAU_BO_WR;
- BCTX_REFN_bo(nv50->bufctx_3d, SCREEN, flags, screen->fence.bo);
+ BCTX_REFN_bo(nv50->bufctx_3d, 3D_SCREEN, flags, screen->fence.bo);
BCTX_REFN_bo(nv50->bufctx, FENCE, flags, screen->fence.bo);
if (screen->compute)
BCTX_REFN_bo(nv50->bufctx_cp, CP_SCREEN, flags, screen->fence.bo);
#define NV50_NEW_CP_GLOBALS (1 << 1)
/* 3d bufctx (during draw_vbo, blit_3d) */
-#define NV50_BIND_FB 0
-#define NV50_BIND_VERTEX 1
-#define NV50_BIND_VERTEX_TMP 2
-#define NV50_BIND_INDEX 3
-#define NV50_BIND_TEXTURES 4
-#define NV50_BIND_CB(s, i) (5 + 16 * (s) + (i))
-#define NV50_BIND_SO 53
-#define NV50_BIND_SCREEN 54
-#define NV50_BIND_TLS 55
-#define NV50_BIND_3D_COUNT 56
+#define NV50_BIND_3D_FB 0
+#define NV50_BIND_3D_VERTEX 1
+#define NV50_BIND_3D_VERTEX_TMP 2
+#define NV50_BIND_3D_INDEX 3
+#define NV50_BIND_3D_TEXTURES 4
+#define NV50_BIND_3D_CB(s, i) (5 + 16 * (s) + (i))
+#define NV50_BIND_3D_SO 53
+#define NV50_BIND_3D_SCREEN 54
+#define NV50_BIND_3D_TLS 55
+#define NV50_BIND_3D_COUNT 56
/* compute bufctx (during launch_grid) */
#define NV50_BIND_CP_GLOBAL 0
BEGIN_NV04(push, NV50_3D(SET_PROGRAM_CB), 1);
PUSH_DATA (push, (b << 12) | (i << 8) | p | 1);
- BCTX_REFN(nv50->bufctx_3d, CB(s, i), res, RD);
+ BCTX_REFN(nv50->bufctx_3d, 3D_CB(s, i), res, RD);
nv50->cb_dirty = 1; /* Force cache flush for UBO. */
} else {
if (prog && prog->tls_space) {
if (nv50->state.new_tls_space)
- nouveau_bufctx_reset(nv50->bufctx_3d, NV50_BIND_TLS);
+ nouveau_bufctx_reset(nv50->bufctx_3d, NV50_BIND_3D_TLS);
if (!nv50->state.tls_required || nv50->state.new_tls_space)
- BCTX_REFN_bo(nv50->bufctx_3d, TLS, flags, nv50->screen->tls_bo);
+ BCTX_REFN_bo(nv50->bufctx_3d, 3D_TLS, flags, nv50->screen->tls_bo);
nv50->state.new_tls_space = false;
nv50->state.tls_required |= 1 << stage;
} else {
if (nv50->state.tls_required == (1 << stage))
- nouveau_bufctx_reset(nv50->bufctx_3d, NV50_BIND_TLS);
+ nouveau_bufctx_reset(nv50->bufctx_3d, NV50_BIND_3D_TLS);
nv50->state.tls_required &= ~(1 << stage);
}
}
BEGIN_NV04(push, NV50_3D(STRMOUT_BUFFERS_CTRL), 1);
PUSH_DATA (push, ctrl);
- nouveau_bufctx_reset(nv50->bufctx_3d, NV50_BIND_SO);
+ nouveau_bufctx_reset(nv50->bufctx_3d, NV50_BIND_3D_SO);
for (i = 0; i < nv50->num_so_targets; ++i) {
struct nv50_so_target *targ = nv50_so_target(nv50->so_target[i]);
prims = MIN2(prims, limit);
}
targ->stride = so->stride[i];
- BCTX_REFN(nv50->bufctx_3d, SO, buf, WR);
+ BCTX_REFN(nv50->bufctx_3d, 3D_SO, buf, WR);
}
if (prims != ~0) {
BEGIN_NV04(push, NV50_3D(STRMOUT_PRIMITIVE_LIMIT), 1);
nv50->num_textures[s] = nr;
- nouveau_bufctx_reset(nv50->bufctx_3d, NV50_BIND_TEXTURES);
+ nouveau_bufctx_reset(nv50->bufctx_3d, NV50_BIND_3D_TEXTURES);
nv50->dirty_3d |= NV50_NEW_3D_TEXTURES;
}
nv50->constbuf[s][i].u.buf = NULL;
else
if (nv50->constbuf[s][i].u.buf)
- nouveau_bufctx_reset(nv50->bufctx_3d, NV50_BIND_CB(s, i));
+ nouveau_bufctx_reset(nv50->bufctx_3d, NV50_BIND_3D_CB(s, i));
pipe_resource_reference(&nv50->constbuf[s][i].u.buf, res);
{
struct nv50_context *nv50 = nv50_context(pipe);
- nouveau_bufctx_reset(nv50->bufctx_3d, NV50_BIND_FB);
+ nouveau_bufctx_reset(nv50->bufctx_3d, NV50_BIND_3D_FB);
util_copy_framebuffer_state(&nv50->framebuffer, fb);
struct nv50_context *nv50 = nv50_context(pipe);
unsigned i;
- nouveau_bufctx_reset(nv50->bufctx_3d, NV50_BIND_VERTEX);
+ nouveau_bufctx_reset(nv50->bufctx_3d, NV50_BIND_3D_VERTEX);
nv50->dirty_3d |= NV50_NEW_3D_ARRAYS;
util_set_vertex_buffers_count(nv50->vtxbuf, &nv50->num_vtxbufs, vb,
struct nv50_context *nv50 = nv50_context(pipe);
if (nv50->idxbuf.buffer)
- nouveau_bufctx_reset(nv50->bufctx_3d, NV50_BIND_INDEX);
+ nouveau_bufctx_reset(nv50->bufctx_3d, NV50_BIND_3D_INDEX);
if (ib) {
pipe_resource_reference(&nv50->idxbuf.buffer, ib->buffer);
nv50->idxbuf.index_size = ib->index_size;
if (ib->buffer) {
nv50->idxbuf.offset = ib->offset;
- BCTX_REFN(nv50->bufctx_3d, INDEX, nv04_resource(ib->buffer), RD);
+ BCTX_REFN(nv50->bufctx_3d, 3D_INDEX, nv04_resource(ib->buffer), RD);
} else {
nv50->idxbuf.user_buffer = ib->user_buffer;
}
unsigned ms_mode = NV50_3D_MULTISAMPLE_MODE_MS1;
uint32_t array_size = 0xffff, array_mode = 0;
- nouveau_bufctx_reset(nv50->bufctx_3d, NV50_BIND_FB);
+ nouveau_bufctx_reset(nv50->bufctx_3d, NV50_BIND_3D_FB);
BEGIN_NV04(push, NV50_3D(RT_CONTROL), 1);
PUSH_DATA (push, (076543210 << 4) | fb->nr_cbufs);
mt->base.status &= ~NOUVEAU_BUFFER_STATUS_GPU_READING;
/* only register for writing, otherwise we'd always serialize here */
- BCTX_REFN(nv50->bufctx_3d, FB, &mt->base, WR);
+ BCTX_REFN(nv50->bufctx_3d, 3D_FB, &mt->base, WR);
}
if (fb->zsbuf) {
mt->base.status |= NOUVEAU_BUFFER_STATUS_GPU_WRITING;
mt->base.status &= ~NOUVEAU_BUFFER_STATUS_GPU_READING;
- BCTX_REFN(nv50->bufctx_3d, FB, &mt->base, WR);
+ BCTX_REFN(nv50->bufctx_3d, 3D_FB, &mt->base, WR);
} else {
BEGIN_NV04(push, NV50_3D(ZETA_ENABLE), 1);
PUSH_DATA (push, 0);
ctx->saved.dirty_3d = nv50->dirty_3d;
- nouveau_bufctx_reset(nv50->bufctx_3d, NV50_BIND_FB);
- nouveau_bufctx_reset(nv50->bufctx_3d, NV50_BIND_TEXTURES);
+ nouveau_bufctx_reset(nv50->bufctx_3d, NV50_BIND_3D_FB);
+ nouveau_bufctx_reset(nv50->bufctx_3d, NV50_BIND_3D_TEXTURES);
nv50->dirty_3d =
NV50_NEW_3D_FRAMEBUFFER | NV50_NEW_3D_MIN_SAMPLES |
nv50->base.pipe.render_condition(&nv50->base.pipe, nv50->cond_query,
nv50->cond_cond, nv50->cond_mode);
- nouveau_bufctx_reset(nv50->bufctx_3d, NV50_BIND_FB);
- nouveau_bufctx_reset(nv50->bufctx_3d, NV50_BIND_TEXTURES);
+ nouveau_bufctx_reset(nv50->bufctx_3d, NV50_BIND_3D_FB);
+ nouveau_bufctx_reset(nv50->bufctx_3d, NV50_BIND_3D_TEXTURES);
nv50->dirty_3d = blit->saved.dirty_3d |
(NV50_NEW_3D_FRAMEBUFFER | NV50_NEW_3D_SCISSOR | NV50_NEW_3D_SAMPLE_MASK |
res->status &= ~NOUVEAU_BUFFER_STATUS_GPU_WRITING;
res->status |= NOUVEAU_BUFFER_STATUS_GPU_READING;
- BCTX_REFN(nv50->bufctx_3d, TEXTURES, res, RD);
+ BCTX_REFN(nv50->bufctx_3d, 3D_TEXTURES, res, RD);
BEGIN_NV04(push, NV50_3D(BIND_TIC(s)), 1);
PUSH_DATA (push, (tic->id << 9) | (i << 1) | 1);
addrs[b] = nouveau_scratch_data(&nv50->base, vb->user_buffer, base, size,
&bo);
if (addrs[b])
- BCTX_REFN_bo(nv50->bufctx_3d, VERTEX_TMP, NOUVEAU_BO_GART |
+ BCTX_REFN_bo(nv50->bufctx_3d, 3D_VERTEX_TMP, NOUVEAU_BO_GART |
NOUVEAU_BO_RD, bo);
}
nv50->base.vbo_dirty = true;
address[b] = nouveau_scratch_data(&nv50->base, vb->user_buffer,
base, size, &bo);
if (address[b])
- BCTX_REFN_bo(nv50->bufctx_3d, VERTEX_TMP, bo_flags, bo);
+ BCTX_REFN_bo(nv50->bufctx_3d, 3D_VERTEX_TMP, bo_flags, bo);
}
BEGIN_NV04(push, NV50_3D(VERTEX_ARRAY_LIMIT_HIGH(i)), 2);
nv50_release_user_vbufs(struct nv50_context *nv50)
{
if (nv50->vbo_user) {
- nouveau_bufctx_reset(nv50->bufctx_3d, NV50_BIND_VERTEX_TMP);
+ nouveau_bufctx_reset(nv50->bufctx_3d, NV50_BIND_3D_VERTEX_TMP);
nouveau_scratch_done(&nv50->base);
}
}
struct nv04_resource *buf = nv04_resource(vb->buffer);
if (!(refd & (1 << b))) {
refd |= 1 << b;
- BCTX_REFN(nv50->bufctx_3d, VERTEX, buf, RD);
+ BCTX_REFN(nv50->bufctx_3d, 3D_VERTEX, buf, RD);
}
address = buf->address + vb->buffer_offset + ve->pipe.src_offset;
limit = buf->address + buf->base.width0 - 1;