BCTX_REFN_bo(nv50->bufctx_3d, SCREEN, flags, screen->fence.bo);
BCTX_REFN_bo(nv50->bufctx, FENCE, flags, screen->fence.bo);
+ nv50->base.scratch.bo_size = 2 << 20;
+
return pipe;
out_err:
#define NV50_CODE_BO_SIZE_LOG2 19
-#define NV50_SCRATCH_SIZE (2 << 20)
-#define NV50_SCRATCH_NR_BUFFERS 2
-
#define NV50_SCREEN_RESIDENT_BO_COUNT 5
struct nv50_blitctx;
static void
nv50_prevalidate_vbufs(struct nv50_context *nv50)
{
+ const uint32_t bo_flags = NOUVEAU_BO_RD | NOUVEAU_BO_GART;
+ struct nouveau_bo *bo;
struct pipe_vertex_buffer *vb;
struct nv04_resource *buf;
int i;
continue;
buf = nv04_resource(vb->buffer);
- /* NOTE: user buffers with temporary storage count as mapped by GPU */
- if (!nouveau_resource_mapped_by_gpu(vb->buffer)) {
+ if (nouveau_resource_mapped_by_gpu(vb->buffer)) {
+ BCTX_REFN(nv50->bufctx_3d, VERTEX, buf, RD);
+ } else {
if (nv50->vbo_push_hint) {
nv50->vbo_fifo = ~0;
- continue;
+ return;
+ }
+ nv50->base.vbo_dirty = TRUE;
+
+ if (buf->status & NOUVEAU_BUFFER_STATUS_USER_MEMORY) {
+ assert(vb->stride > vb->buffer_offset);
+ nv50->vbo_user |= 1 << i;
+ nv50_vbuf_range(nv50, i, &base, &size);
+ bo = nouveau_scratch_data(&nv50->base, buf, base, size);
+ if (bo)
+ BCTX_REFN_bo(nv50->bufctx_3d, VERTEX_TMP, bo_flags, bo);
} else {
- if (buf->status & NOUVEAU_BUFFER_STATUS_USER_MEMORY) {
- nv50->vbo_user |= 1 << i;
- assert(vb->stride > vb->buffer_offset);
- nv50_vbuf_range(nv50, i, &base, &size);
- nouveau_user_buffer_upload(&nv50->base, buf, base, size);
- } else {
- nouveau_buffer_migrate(&nv50->base, buf, NOUVEAU_BO_GART);
- }
- nv50->base.vbo_dirty = TRUE;
+ if (nouveau_buffer_migrate(&nv50->base, buf, NOUVEAU_BO_GART))
+ BCTX_REFN(nv50->bufctx_3d, VERTEX, buf, RD);
}
}
- BCTX_REFN(nv50->bufctx_3d, VERTEX, buf, RD);
}
}
static void
nv50_update_user_vbufs(struct nv50_context *nv50)
{
+ const uint32_t bo_flags = NOUVEAU_BO_RD | NOUVEAU_BO_GART;
+ struct nouveau_bo *bo;
struct nouveau_pushbuf *push = nv50->base.pushbuf;
uint32_t base, offset, size;
int i;
uint32_t written = 0;
- nouveau_bufctx_reset(nv50->bufctx_3d, NV50_BIND_VERTEX);
-
for (i = 0; i < nv50->vertex->num_elements; ++i) {
struct pipe_vertex_element *ve = &nv50->vertex->element[i].pipe;
const int b = ve->vertex_buffer_index;
struct pipe_vertex_buffer *vb = &nv50->vtxbuf[b];
struct nv04_resource *buf = nv04_resource(vb->buffer);
- if (!(nv50->vbo_user & (1 << b))) {
- BCTX_REFN(nv50->bufctx_3d, VERTEX, buf, RD);
+ if (!(nv50->vbo_user & (1 << b)))
continue;
- }
if (!vb->stride) {
nv50_emit_vtxattr(nv50, vb, ve, i);
if (!(written & (1 << b))) {
written |= 1 << b;
- nouveau_user_buffer_upload(&nv50->base, buf, base, size);
+ bo = nouveau_scratch_data(&nv50->base, buf, base, size);
+ if (bo)
+ BCTX_REFN_bo(nv50->bufctx_3d, VERTEX_TMP, bo_flags, bo);
}
offset = vb->buffer_offset + ve->src_offset;
BEGIN_NV04(push, NV50_3D(VERTEX_ARRAY_START_HIGH(i)), 2);
PUSH_DATAh(push, buf->address + offset);
PUSH_DATA (push, buf->address + offset);
-
- BCTX_REFN(nv50->bufctx_3d, VERTEX, buf, RD);
}
nv50->base.vbo_dirty = TRUE;
}
static INLINE void
nv50_release_user_vbufs(struct nv50_context *nv50)
{
- uint32_t vbo_user = nv50->vbo_user;
-
- while (vbo_user) {
- int i = ffs(vbo_user) - 1;
- vbo_user &= ~(1 << i);
-
- nouveau_buffer_release_gpu_storage(nv04_resource(nv50->vtxbuf[i].buffer));
+ if (nv50->vbo_user) {
+ nouveau_bufctx_reset(nv50->bufctx_3d, NV50_BIND_VERTEX_TMP);
+ nouveau_scratch_done(&nv50->base);
}
}
BCTX_REFN_bo(nvc0->bufctx_3d, SCREEN, flags, screen->fence.bo);
BCTX_REFN_bo(nvc0->bufctx, FENCE, flags, screen->fence.bo);
+ nvc0->base.scratch.bo_size = 2 << 20;
+
return pipe;
out_err:
#define NVC0_BIND_FB 0
#define NVC0_BIND_VTX 1
-#define NVC0_BIND_IDX 2
-#define NVC0_BIND_TEX 3
-#define NVC0_BIND_CB(s, i) (4 + 16 * (s) + (i))
-#define NVC0_BIND_TFB 84
-#define NVC0_BIND_SCREEN 85
-#define NVC0_BIND_TLS 86
-#define NVC0_BIND_COUNT 87
+#define NVC0_BIND_VTX_TMP 2
+#define NVC0_BIND_IDX 3
+#define NVC0_BIND_TEX 4
+#define NVC0_BIND_CB(s, i) (5 + 16 * (s) + (i))
+#define NVC0_BIND_TFB 85
+#define NVC0_BIND_SCREEN 86
+#define NVC0_BIND_TLS 87
+#define NVC0_BIND_COUNT 88
#define NVC0_BIND_2D 0
#define NVC0_BIND_M2MF 0
#define NVC0_BIND_FENCE 1
screen->base.fence.emit = nvc0_screen_fence_emit;
screen->base.fence.update = nvc0_screen_fence_update;
- for (i = 0; i < NVC0_SCRATCH_NR_BUFFERS; ++i) {
- ret = nouveau_bo_new(dev, NOUVEAU_BO_GART, 0, NVC0_SCRATCH_SIZE, NULL,
- &screen->scratch.bo[i]);
- if (ret)
- goto fail;
- }
-
ret = nouveau_object_new(chan, 0xbeef9039, NVC0_M2MF_CLASS, NULL, 0,
&screen->m2mf);
if (ret)
struct nvc0_context;
-#define NVC0_SCRATCH_SIZE (2 << 20)
-#define NVC0_SCRATCH_NR_BUFFERS 2
-
-#define NVC0_SCREEN_RESIDENT_BO_COUNT 5
-
struct nvc0_blitctx;
struct nvc0_screen {
struct nvc0_blitctx *blitctx;
- struct {
- struct nouveau_bo *bo[NVC0_SCRATCH_NR_BUFFERS];
- uint8_t *buf;
- int index;
- uint32_t offset;
- } scratch;
-
struct {
void **entries;
int next;
static void
nvc0_prevalidate_vbufs(struct nvc0_context *nvc0)
{
+ const uint32_t bo_flags = NOUVEAU_BO_RD | NOUVEAU_BO_GART;
+ struct nouveau_bo *bo;
struct pipe_vertex_buffer *vb;
struct nv04_resource *buf;
int i;
continue;
buf = nv04_resource(vb->buffer);
- /* NOTE: user buffers with temporary storage count as mapped by GPU */
if (!nouveau_resource_mapped_by_gpu(vb->buffer)) {
if (nvc0->vbo_push_hint) {
nvc0->vbo_fifo = ~0;
+ return;
+ }
+ nvc0->base.vbo_dirty = TRUE;
+
+ if (buf->status & NOUVEAU_BUFFER_STATUS_USER_MEMORY) {
+ assert(vb->stride > vb->buffer_offset);
+ nvc0->vbo_user |= 1 << i;
+ nvc0_vbuf_range(nvc0, i, &base, &size);
+ bo = nouveau_scratch_data(&nvc0->base, buf, base, size);
+ if (bo)
+ BCTX_REFN_bo(nvc0->bufctx_3d, VTX_TMP, bo_flags, bo);
continue;
} else {
- if (buf->status & NOUVEAU_BUFFER_STATUS_USER_MEMORY) {
- nvc0->vbo_user |= 1 << i;
- assert(vb->stride > vb->buffer_offset);
- nvc0_vbuf_range(nvc0, i, &base, &size);
- nouveau_user_buffer_upload(&nvc0->base, buf, base, size);
- } else {
- nouveau_buffer_migrate(&nvc0->base, buf, NOUVEAU_BO_GART);
- }
- nvc0->base.vbo_dirty = TRUE;
+ nouveau_buffer_migrate(&nvc0->base, buf, NOUVEAU_BO_GART);
}
}
BCTX_REFN(nvc0->bufctx_3d, VTX, buf, RD);
static void
nvc0_update_user_vbufs(struct nvc0_context *nvc0)
{
+ const uint32_t bo_flags = NOUVEAU_BO_RD | NOUVEAU_BO_GART;
+ struct nouveau_bo *bo;
struct nouveau_pushbuf *push = nvc0->base.pushbuf;
uint32_t base, offset, size;
int i;
uint32_t written = 0;
- /* TODO: use separate bufctx bin for user buffers
- */
- nouveau_bufctx_reset(nvc0->bufctx_3d, NVC0_BIND_VTX);
-
PUSH_SPACE(push, nvc0->vertex->num_elements * 8);
for (i = 0; i < nvc0->vertex->num_elements; ++i) {
struct pipe_vertex_buffer *vb = &nvc0->vtxbuf[b];
struct nv04_resource *buf = nv04_resource(vb->buffer);
- if (!(nvc0->vbo_user & (1 << b))) {
- BCTX_REFN(nvc0->bufctx_3d, VTX, buf, RD);
+ if (!(nvc0->vbo_user & (1 << b)))
continue;
- }
if (!vb->stride) {
nvc0_emit_vtxattr(nvc0, vb, ve, i);
if (!(written & (1 << b))) {
written |= 1 << b;
- nouveau_user_buffer_upload(&nvc0->base, buf, base, size);
+ bo = nouveau_scratch_data(&nvc0->base, buf, base, size);
+ if (bo)
+ BCTX_REFN_bo(nvc0->bufctx_3d, VTX_TMP, bo_flags, bo);
}
offset = vb->buffer_offset + ve->src_offset;
PUSH_DATA (push, buf->address + base + size - 1);
PUSH_DATAh(push, buf->address + offset);
PUSH_DATA (push, buf->address + offset);
-
- BCTX_REFN(nvc0->bufctx_3d, VTX, buf, RD);
}
nvc0->base.vbo_dirty = TRUE;
}
static INLINE void
nvc0_release_user_vbufs(struct nvc0_context *nvc0)
{
- uint32_t vbo_user = nvc0->vbo_user;
-
- while (vbo_user) {
- int i = ffs(vbo_user) - 1;
- vbo_user &= ~(1 << i);
-
- nouveau_buffer_release_gpu_storage(nv04_resource(nvc0->vtxbuf[i].buffer));
+ if (nvc0->vbo_user) {
+ nouveau_bufctx_reset(nvc0->bufctx_3d, NVC0_BIND_VTX_TMP);
+ nouveau_scratch_done(&nvc0->base);
}
}