return FALSE;
if (unlikely(buf->base.bind & PIPE_BIND_SHARED))
return FALSE;
+ if (unlikely(usage & PIPE_TRANSFER_PERSISTENT))
+ return FALSE;
return buf->mm && nouveau_buffer_busy(buf, PIPE_TRANSFER_WRITE);
}
!util_ranges_intersect(&buf->valid_buffer_range, box->x, box->x + box->width))
usage |= PIPE_TRANSFER_DISCARD_RANGE | PIPE_TRANSFER_UNSYNCHRONIZED;
+ if (usage & PIPE_TRANSFER_PERSISTENT)
+ usage |= PIPE_TRANSFER_UNSYNCHRONIZED;
+
if (buf->domain == NOUVEAU_BO_VRAM) {
if (usage & NOUVEAU_TRANSFER_DISCARD) {
/* Set up a staging area for the user to write to. It will be copied
pipe_reference_init(&buffer->base.reference, 1);
buffer->base.screen = pscreen;
- if (buffer->base.bind &
- (screen->vidmem_bindings & screen->sysmem_bindings)) {
+ if (buffer->base.flags & (PIPE_RESOURCE_FLAG_MAP_PERSISTENT |
+ PIPE_RESOURCE_FLAG_MAP_COHERENT)) {
+ buffer->domain = NOUVEAU_BO_GART;
+ } else if (buffer->base.bind &
+ (screen->vidmem_bindings & screen->sysmem_bindings)) {
switch (buffer->base.usage) {
case PIPE_USAGE_DEFAULT:
case PIPE_USAGE_IMMUTABLE:
#include "nv30/nv30_resource.h"
#include "nv30/nv30_transfer.h"
+static void
+nv30_memory_barrier(struct pipe_context *pipe, unsigned flags)
+{
+ struct nv30_context *nv30 = nv30_context(pipe);
+ int i;
+
+ if (flags & PIPE_BARRIER_MAPPED_BUFFER) {
+ for (i = 0; i < nv30->num_vtxbufs; ++i) {
+ if (!nv30->vtxbuf[i].buffer)
+ continue;
+ if (nv30->vtxbuf[i].buffer->flags & PIPE_RESOURCE_FLAG_MAP_PERSISTENT)
+ nv30->base.vbo_dirty = TRUE;
+ }
+
+ if (nv30->idxbuf.buffer &&
+ nv30->idxbuf.buffer->flags & PIPE_RESOURCE_FLAG_MAP_PERSISTENT)
+ nv30->base.vbo_dirty = TRUE;
+ }
+}
+
static struct pipe_resource *
nv30_resource_create(struct pipe_screen *pscreen,
const struct pipe_resource *tmpl)
pipe->resource_copy_region = nv30_resource_copy_region;
pipe->blit = nv30_blit;
pipe->flush_resource = nv30_flush_resource;
+ pipe->memory_barrier = nv30_memory_barrier;
}
case PIPE_CAP_TGSI_TEXCOORD:
case PIPE_CAP_USER_CONSTANT_BUFFERS:
case PIPE_CAP_USER_INDEX_BUFFERS:
+ case PIPE_CAP_BUFFER_MAP_PERSISTENT_COHERENT:
return 1;
case PIPE_CAP_USER_VERTEX_BUFFERS:
return 0;
case PIPE_CAP_TGSI_VS_LAYER:
case PIPE_CAP_MAX_TEXTURE_GATHER_COMPONENTS:
case PIPE_CAP_TEXTURE_GATHER_SM5:
- case PIPE_CAP_BUFFER_MAP_PERSISTENT_COHERENT:
case PIPE_CAP_FAKE_SW_MSAA:
case PIPE_CAP_TEXTURE_QUERY_LOD:
case PIPE_CAP_SAMPLE_SHADING:
{
struct nv30_context *nv30 = nv30_context(pipe);
struct nouveau_pushbuf *push = nv30->base.pushbuf;
+ int i;
/* For picking only a few vertices from a large user buffer, push is better,
* if index count is larger and we expect repeated vertices, suggest upload.
return;
}
+ for (i = 0; i < nv30->num_vtxbufs && !nv30->base.vbo_dirty; ++i) {
+ if (!nv30->vtxbuf[i].buffer)
+ continue;
+ if (nv30->vtxbuf[i].buffer->flags & PIPE_RESOURCE_FLAG_MAP_COHERENT)
+ nv30->base.vbo_dirty = TRUE;
+ }
+
+ if (!nv30->base.vbo_dirty && nv30->idxbuf.buffer &&
+ nv30->idxbuf.buffer->flags & PIPE_RESOURCE_FLAG_MAP_COHERENT)
+ nv30->base.vbo_dirty = TRUE;
+
if (nv30->base.vbo_dirty) {
BEGIN_NV04(push, NV30_3D(VTX_CACHE_INVALIDATE_1710), 1);
PUSH_DATA (push, 0);
PUSH_DATA (push, 0x20);
}
+static void
+nv50_memory_barrier(struct pipe_context *pipe, unsigned flags)
+{
+ struct nv50_context *nv50 = nv50_context(pipe);
+ int i;
+
+ if (flags & PIPE_BARRIER_MAPPED_BUFFER) {
+ for (i = 0; i < nv50->num_vtxbufs; ++i) {
+ if (!nv50->vtxbuf[i].buffer)
+ continue;
+ if (nv50->vtxbuf[i].buffer->flags & PIPE_RESOURCE_FLAG_MAP_PERSISTENT)
+ nv50->base.vbo_dirty = TRUE;
+ }
+
+ if (nv50->idxbuf.buffer &&
+ nv50->idxbuf.buffer->flags & PIPE_RESOURCE_FLAG_MAP_PERSISTENT)
+ nv50->base.vbo_dirty = TRUE;
+ }
+}
+
void
nv50_default_kick_notify(struct nouveau_pushbuf *push)
{
pipe->flush = nv50_flush;
pipe->texture_barrier = nv50_texture_barrier;
+ pipe->memory_barrier = nv50_memory_barrier;
pipe->get_sample_position = nv50_context_get_sample_position;
if (!screen->cur_ctx) {
case PIPE_CAP_MIXED_FRAMEBUFFER_SIZES:
case PIPE_CAP_ANISOTROPIC_FILTER:
case PIPE_CAP_TEXTURE_BUFFER_OBJECTS:
+ case PIPE_CAP_BUFFER_MAP_PERSISTENT_COHERENT:
return 1;
case PIPE_CAP_MAX_TEXTURE_BUFFER_SIZE:
return 65536;
return PIPE_ENDIAN_LITTLE;
case PIPE_CAP_TGSI_VS_LAYER:
case PIPE_CAP_TEXTURE_GATHER_SM5:
- case PIPE_CAP_BUFFER_MAP_PERSISTENT_COHERENT:
case PIPE_CAP_FAKE_SW_MSAA:
return 0;
case PIPE_CAP_MAX_VIEWPORTS:
{
struct nv50_context *nv50 = nv50_context(pipe);
struct nouveau_pushbuf *push = nv50->base.pushbuf;
+ int i;
/* NOTE: caller must ensure that (min_index + index_bias) is >= 0 */
nv50->vb_elt_first = info->min_index + info->index_bias;
PUSH_DATA (push, info->start_instance);
}
+ for (i = 0; i < nv50->num_vtxbufs && !nv50->base.vbo_dirty; ++i) {
+ if (!nv50->vtxbuf[i].buffer)
+ continue;
+ if (nv50->vtxbuf[i].buffer->flags & PIPE_RESOURCE_FLAG_MAP_COHERENT)
+ nv50->base.vbo_dirty = TRUE;
+ }
+
+ if (!nv50->base.vbo_dirty && nv50->idxbuf.buffer &&
+ nv50->idxbuf.buffer->flags & PIPE_RESOURCE_FLAG_MAP_COHERENT)
+ nv50->base.vbo_dirty = TRUE;
+
if (nv50->base.vbo_dirty) {
BEGIN_NV04(push, NV50_3D(VERTEX_ARRAY_FLUSH), 1);
PUSH_DATA (push, 0);
IMMED_NVC0(push, NVC0_3D(TEX_CACHE_CTL), 0);
}
+static void
+nvc0_memory_barrier(struct pipe_context *pipe, unsigned flags)
+{
+ struct nvc0_context *nvc0 = nvc0_context(pipe);
+ int i;
+
+ if (flags & PIPE_BARRIER_MAPPED_BUFFER) {
+ for (i = 0; i < nvc0->num_vtxbufs; ++i) {
+ if (!nvc0->vtxbuf[i].buffer)
+ continue;
+ if (nvc0->vtxbuf[i].buffer->flags & PIPE_RESOURCE_FLAG_MAP_PERSISTENT)
+ nvc0->base.vbo_dirty = TRUE;
+ }
+
+ if (nvc0->idxbuf.buffer &&
+ nvc0->idxbuf.buffer->flags & PIPE_RESOURCE_FLAG_MAP_PERSISTENT)
+ nvc0->base.vbo_dirty = TRUE;
+ }
+}
+
static void
nvc0_context_unreference_resources(struct nvc0_context *nvc0)
{
pipe->flush = nvc0_flush;
pipe->texture_barrier = nvc0_texture_barrier;
+ pipe->memory_barrier = nvc0_memory_barrier;
pipe->get_sample_position = nvc0_context_get_sample_position;
if (!screen->cur_ctx) {
case PIPE_CAP_TEXTURE_BARRIER:
case PIPE_CAP_QUADS_FOLLOW_PROVOKING_VERTEX_CONVENTION:
case PIPE_CAP_START_INSTANCE:
+ case PIPE_CAP_BUFFER_MAP_PERSISTENT_COHERENT:
return 1;
case PIPE_CAP_TGSI_CAN_COMPACT_CONSTANTS:
return 0; /* state trackers will know better */
return PIPE_ENDIAN_LITTLE;
case PIPE_CAP_TGSI_VS_LAYER:
case PIPE_CAP_TEXTURE_GATHER_SM5:
- case PIPE_CAP_BUFFER_MAP_PERSISTENT_COHERENT:
case PIPE_CAP_FAKE_SW_MSAA:
return 0;
case PIPE_CAP_MAX_VIEWPORTS:
{
struct nvc0_context *nvc0 = nvc0_context(pipe);
struct nouveau_pushbuf *push = nvc0->base.pushbuf;
+ int i;
/* NOTE: caller must ensure that (min_index + index_bias) is >= 0 */
nvc0->vb_elt_first = info->min_index + info->index_bias;
PUSH_DATA (push, info->start_instance);
}
+ for (i = 0; i < nvc0->num_vtxbufs && !nvc0->base.vbo_dirty; ++i) {
+ if (!nvc0->vtxbuf[i].buffer)
+ continue;
+ if (nvc0->vtxbuf[i].buffer->flags & PIPE_RESOURCE_FLAG_MAP_COHERENT)
+ nvc0->base.vbo_dirty = TRUE;
+ }
+
+ if (!nvc0->base.vbo_dirty && nvc0->idxbuf.buffer &&
+ nvc0->idxbuf.buffer->flags & PIPE_RESOURCE_FLAG_MAP_COHERENT)
+ nvc0->base.vbo_dirty = TRUE;
+
if (nvc0->base.vbo_dirty) {
IMMED_NVC0(push, NVC0_3D(VERTEX_ARRAY_FLUSH), 0);
nvc0->base.vbo_dirty = FALSE;