#include <inttypes.h> /* for PRId64 macro */
#include "main/errors.h"
-#include "main/imports.h"
+
#include "main/mtypes.h"
#include "main/arrayobj.h"
#include "main/bufferobj.h"
* even if the buffer is currently referenced by hardware - they
* just queue the upload as dma rather than mapping the underlying
* buffer directly.
+ *
+ * If the buffer is mapped, suppress implicit buffer range invalidation
+ * by using PIPE_TRANSFER_MAP_DIRECTLY.
*/
- pipe_buffer_write(st_context(ctx)->pipe,
- st_obj->buffer,
- offset, size, data);
+ struct pipe_context *pipe = st_context(ctx)->pipe;
+
+ pipe->buffer_subdata(pipe, st_obj->buffer,
+ _mesa_bufferobj_mapped(obj, MAP_USER) ?
+ PIPE_TRANSFER_MAP_DIRECTLY : 0,
+ offset, size, data);
}
* usage hint, return a pipe_resource_usage value (PIPE_USAGE_DYNAMIC,
* STREAM, etc).
*/
-static const enum pipe_resource_usage
+static enum pipe_resource_usage
buffer_usage(GLenum target, GLboolean immutable,
GLbitfield storageFlags, GLenum usage)
{
}
}
else {
+ /* These are often read by the CPU, so enable CPU caches. */
+ if (target == GL_PIXEL_PACK_BUFFER ||
+ target == GL_PIXEL_UNPACK_BUFFER)
+ return PIPE_USAGE_STAGING;
+
/* BufferData */
switch (usage) {
case GL_DYNAMIC_DRAW:
return PIPE_USAGE_DYNAMIC;
case GL_STREAM_DRAW:
case GL_STREAM_COPY:
- /* XXX: Remove this test and fall-through when we have PBO unpacking
- * acceleration. Right now, PBO unpacking is done by the CPU, so we
- * have to make sure CPU reads are fast.
- */
- if (target != GL_PIXEL_UNPACK_BUFFER_ARB) {
- return PIPE_USAGE_STREAM;
- }
- /* fall through */
+ return PIPE_USAGE_STREAM;
case GL_STATIC_READ:
case GL_DYNAMIC_READ:
case GL_STREAM_READ:
struct pipe_screen *screen = pipe->screen;
struct st_buffer_object *st_obj = st_buffer_object(obj);
struct st_memory_object *st_mem_obj = st_memory_object(memObj);
+ bool is_mapped = _mesa_bufferobj_mapped(obj, MAP_USER);
+
+ if (size > UINT32_MAX || offset > UINT32_MAX) {
+ /* pipe_resource.width0 is 32 bits only and increasing it
+ * to 64 bits doesn't make much sense since hw support
+ * for > 4GB resources is limited.
+ */
+ st_obj->Base.Size = 0;
+ return GL_FALSE;
+ }
if (target != GL_EXTERNAL_VIRTUAL_MEMORY_BUFFER_AMD &&
size && st_obj->buffer &&
/* Just discard the old contents and write new data.
* This should be the same as creating a new buffer, but we avoid
* a lot of validation in Mesa.
+ *
+ * If the buffer is mapped, we can't discard it.
+ *
+ * PIPE_TRANSFER_MAP_DIRECTLY supresses implicit buffer range
+ * invalidation.
*/
pipe->buffer_subdata(pipe, st_obj->buffer,
- PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE,
+ is_mapped ? PIPE_TRANSFER_MAP_DIRECTLY :
+ PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE,
0, size, data);
return GL_TRUE;
+ } else if (is_mapped) {
+ return GL_TRUE; /* can't reallocate, nothing to do */
} else if (screen->get_param(screen, PIPE_CAP_INVALIDATE_BUFFER)) {
pipe->invalidate_resource(pipe, st_obj->buffer);
return GL_TRUE;
if (offset != 0 || size != obj->Size)
return;
- /* Nothing to invalidate. */
- if (!st_obj->buffer)
+ /* If the buffer is mapped, we can't invalidate it. */
+ if (!st_obj->buffer || _mesa_bufferobj_mapped(obj, MAP_USER))
return;
pipe->invalidate_resource(pipe, st_obj->buffer);
if (access & MESA_MAP_NOWAIT_BIT)
flags |= PIPE_TRANSFER_DONTBLOCK;
+ if (access & MESA_MAP_THREAD_SAFE_BIT)
+ flags |= PIPE_TRANSFER_THREAD_SAFE;
return flags;
}
/* buffer should not already be mapped */
assert(!_mesa_check_disallowed_mapping(src));
- assert(!_mesa_check_disallowed_mapping(dst));
+ /* dst can be mapped, just not the same range as the target range */
u_box_1d(readOffset, size, &box);