* even if the buffer is currently referenced by hardware - they
* just queue the upload as dma rather than mapping the underlying
* buffer directly.
+ *
+ * If the buffer is mapped, suppress implicit buffer range invalidation
+ * by using PIPE_TRANSFER_MAP_DIRECTLY.
*/
- pipe_buffer_write(st_context(ctx)->pipe,
- st_obj->buffer,
- offset, size, data);
+ struct pipe_context *pipe = st_context(ctx)->pipe;
+
+ pipe->buffer_subdata(pipe, st_obj->buffer,
+ _mesa_bufferobj_mapped(obj, MAP_USER) ?
+ PIPE_TRANSFER_MAP_DIRECTLY : 0,
+ offset, size, data);
}
struct pipe_screen *screen = pipe->screen;
struct st_buffer_object *st_obj = st_buffer_object(obj);
struct st_memory_object *st_mem_obj = st_memory_object(memObj);
+ bool is_mapped = _mesa_bufferobj_mapped(obj, MAP_USER);
if (target != GL_EXTERNAL_VIRTUAL_MEMORY_BUFFER_AMD &&
size && st_obj->buffer &&
/* Just discard the old contents and write new data.
* This should be the same as creating a new buffer, but we avoid
* a lot of validation in Mesa.
+ *
+ * If the buffer is mapped, we can't discard it.
+ *
+ * PIPE_TRANSFER_MAP_DIRECTLY supresses implicit buffer range
+ * invalidation.
*/
pipe->buffer_subdata(pipe, st_obj->buffer,
- PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE,
+ is_mapped ? PIPE_TRANSFER_MAP_DIRECTLY :
+ PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE,
0, size, data);
return GL_TRUE;
+ } else if (is_mapped) {
+ return GL_TRUE; /* can't reallocate, nothing to do */
} else if (screen->get_param(screen, PIPE_CAP_INVALIDATE_BUFFER)) {
pipe->invalidate_resource(pipe, st_obj->buffer);
return GL_TRUE;
if (offset != 0 || size != obj->Size)
return;
- /* Nothing to invalidate. */
- if (!st_obj->buffer)
+ /* If the buffer is mapped, we can't invalidate it. */
+ if (!st_obj->buffer || _mesa_bufferobj_mapped(obj, MAP_USER))
return;
pipe->invalidate_resource(pipe, st_obj->buffer);