/**************************************************************************
- *
- * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ *
+ * Copyright 2007 VMware, Inc.
* All Rights Reserved.
- *
+ *
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
- *
+ *
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
- *
+ *
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
- * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
+ *
**************************************************************************/
*/
+#include <inttypes.h> /* for PRId64 macro */
+
+#include "main/errors.h"
#include "main/imports.h"
#include "main/mtypes.h"
#include "main/arrayobj.h"
#include "st_context.h"
#include "st_cb_bufferobjects.h"
+#include "st_cb_memoryobjects.h"
#include "st_debug.h"
+#include "st_util.h"
#include "pipe/p_context.h"
#include "pipe/p_defines.h"
* internal structure where somehow shared.
*/
static struct gl_buffer_object *
-st_bufferobj_alloc(struct gl_context *ctx, GLuint name, GLenum target)
+st_bufferobj_alloc(struct gl_context *ctx, GLuint name)
{
struct st_buffer_object *st_obj = ST_CALLOC_STRUCT(st_buffer_object);
if (!st_obj)
return NULL;
- _mesa_initialize_buffer_object(ctx, &st_obj->Base, name, target);
+ _mesa_initialize_buffer_object(ctx, &st_obj->Base, name);
return &st_obj->Base;
}
struct st_buffer_object *st_obj = st_buffer_object(obj);
assert(obj->RefCount == 0);
- assert(st_obj->transfer == NULL);
+ _mesa_buffer_unmap_all_mappings(ctx, obj);
- if (st_obj->buffer)
+ if (st_obj->buffer)
pipe_resource_reference(&st_obj->buffer, NULL);
- free(st_obj);
+ _mesa_delete_buffer_object(ctx, obj);
}
*/
static void
st_bufferobj_subdata(struct gl_context *ctx,
- GLintptrARB offset,
- GLsizeiptrARB size,
- const GLvoid * data, struct gl_buffer_object *obj)
+ GLintptrARB offset,
+ GLsizeiptrARB size,
+ const void * data, struct gl_buffer_object *obj)
{
struct st_buffer_object *st_obj = st_buffer_object(obj);
/* we may be called from VBO code, so double-check params here */
- ASSERT(offset >= 0);
- ASSERT(size >= 0);
- ASSERT(offset + size <= obj->Size);
+ assert(offset >= 0);
+ assert(size >= 0);
+ assert(offset + size <= obj->Size);
if (!size)
return;
* even if the buffer is currently referenced by hardware - they
* just queue the upload as dma rather than mapping the underlying
* buffer directly.
+ *
+ * If the buffer is mapped, suppress implicit buffer range invalidation
+ * by using PIPE_TRANSFER_MAP_DIRECTLY.
*/
- pipe_buffer_write(st_context(ctx)->pipe,
- st_obj->buffer,
- offset, size, data);
+ struct pipe_context *pipe = st_context(ctx)->pipe;
+
+ pipe->buffer_subdata(pipe, st_obj->buffer,
+ _mesa_bufferobj_mapped(obj, MAP_USER) ?
+ PIPE_TRANSFER_MAP_DIRECTLY : 0,
+ offset, size, data);
}
st_bufferobj_get_subdata(struct gl_context *ctx,
GLintptrARB offset,
GLsizeiptrARB size,
- GLvoid * data, struct gl_buffer_object *obj)
+ void * data, struct gl_buffer_object *obj)
{
struct st_buffer_object *st_obj = st_buffer_object(obj);
/* we may be called from VBO code, so double-check params here */
- ASSERT(offset >= 0);
- ASSERT(size >= 0);
- ASSERT(offset + size <= obj->Size);
+ assert(offset >= 0);
+ assert(size >= 0);
+ assert(offset + size <= obj->Size);
if (!size)
return;
/**
- * Allocate space for and store data in a buffer object. Any data that was
- * previously stored in the buffer object is lost. If data is NULL,
- * memory will be allocated, but no copy will occur.
- * Called via ctx->Driver.BufferData().
- * \return GL_TRUE for success, GL_FALSE if out of memory
+ * Return bitmask of PIPE_BIND_x flags corresponding a GL buffer target.
*/
-static GLboolean
-st_bufferobj_data(struct gl_context *ctx,
- GLenum target,
- GLsizeiptrARB size,
- const GLvoid * data,
- GLenum usage,
- struct gl_buffer_object *obj)
+static unsigned
+buffer_target_to_bind_flags(GLenum target)
{
- struct st_context *st = st_context(ctx);
- struct pipe_context *pipe = st->pipe;
- struct st_buffer_object *st_obj = st_buffer_object(obj);
- unsigned bind, pipe_usage;
-
- st_obj->Base.Size = size;
- st_obj->Base.Usage = usage;
-
- switch(target) {
+ switch (target) {
case GL_PIXEL_PACK_BUFFER_ARB:
case GL_PIXEL_UNPACK_BUFFER_ARB:
- bind = PIPE_BIND_RENDER_TARGET | PIPE_BIND_SAMPLER_VIEW;
- break;
+ return PIPE_BIND_RENDER_TARGET | PIPE_BIND_SAMPLER_VIEW;
case GL_ARRAY_BUFFER_ARB:
- bind = PIPE_BIND_VERTEX_BUFFER;
- break;
+ return PIPE_BIND_VERTEX_BUFFER;
case GL_ELEMENT_ARRAY_BUFFER_ARB:
- bind = PIPE_BIND_INDEX_BUFFER;
- break;
+ return PIPE_BIND_INDEX_BUFFER;
case GL_TEXTURE_BUFFER:
- bind = PIPE_BIND_SAMPLER_VIEW;
- break;
+ return PIPE_BIND_SAMPLER_VIEW;
case GL_TRANSFORM_FEEDBACK_BUFFER:
- bind = PIPE_BIND_STREAM_OUTPUT;
- break;
+ return PIPE_BIND_STREAM_OUTPUT;
case GL_UNIFORM_BUFFER:
- bind = PIPE_BIND_CONSTANT_BUFFER;
- break;
+ return PIPE_BIND_CONSTANT_BUFFER;
+ case GL_DRAW_INDIRECT_BUFFER:
+ case GL_PARAMETER_BUFFER_ARB:
+ return PIPE_BIND_COMMAND_ARGS_BUFFER;
+ case GL_ATOMIC_COUNTER_BUFFER:
+ case GL_SHADER_STORAGE_BUFFER:
+ return PIPE_BIND_SHADER_BUFFER;
+ case GL_QUERY_BUFFER:
+ return PIPE_BIND_QUERY_BUFFER;
default:
- bind = 0;
+ return 0;
}
+}
- switch (usage) {
- case GL_STATIC_DRAW:
- case GL_STATIC_READ:
- case GL_STATIC_COPY:
- pipe_usage = PIPE_USAGE_STATIC;
- break;
- case GL_DYNAMIC_DRAW:
- case GL_DYNAMIC_READ:
- case GL_DYNAMIC_COPY:
- pipe_usage = PIPE_USAGE_DYNAMIC;
- break;
- case GL_STREAM_DRAW:
- case GL_STREAM_READ:
- case GL_STREAM_COPY:
- pipe_usage = PIPE_USAGE_STREAM;
- break;
- default:
- pipe_usage = PIPE_USAGE_DEFAULT;
+
+/**
+ * Return bitmask of PIPE_RESOURCE_x flags corresponding to GL_MAP_x flags.
+ */
+static unsigned
+storage_flags_to_buffer_flags(GLbitfield storageFlags)
+{
+ unsigned flags = 0;
+ if (storageFlags & GL_MAP_PERSISTENT_BIT)
+ flags |= PIPE_RESOURCE_FLAG_MAP_PERSISTENT;
+ if (storageFlags & GL_MAP_COHERENT_BIT)
+ flags |= PIPE_RESOURCE_FLAG_MAP_COHERENT;
+ if (storageFlags & GL_SPARSE_STORAGE_BIT_ARB)
+ flags |= PIPE_RESOURCE_FLAG_SPARSE;
+ return flags;
+}
+
+
+/**
+ * From a buffer object's target, immutability flag, storage flags and
+ * usage hint, return a pipe_resource_usage value (PIPE_USAGE_DYNAMIC,
+ * STREAM, etc).
+ */
+static enum pipe_resource_usage
+buffer_usage(GLenum target, GLboolean immutable,
+ GLbitfield storageFlags, GLenum usage)
+{
+ if (immutable) {
+ /* BufferStorage */
+ if (storageFlags & GL_CLIENT_STORAGE_BIT) {
+ if (storageFlags & GL_MAP_READ_BIT)
+ return PIPE_USAGE_STAGING;
+ else
+ return PIPE_USAGE_STREAM;
+ } else {
+ return PIPE_USAGE_DEFAULT;
+ }
}
+ else {
+ /* BufferData */
+ switch (usage) {
+ case GL_DYNAMIC_DRAW:
+ case GL_DYNAMIC_COPY:
+ return PIPE_USAGE_DYNAMIC;
+ case GL_STREAM_DRAW:
+ case GL_STREAM_COPY:
+ /* XXX: Remove this test and fall-through when we have PBO unpacking
+ * acceleration. Right now, PBO unpacking is done by the CPU, so we
+ * have to make sure CPU reads are fast.
+ */
+ if (target != GL_PIXEL_UNPACK_BUFFER_ARB) {
+ return PIPE_USAGE_STREAM;
+ }
+ /* fall through */
+ case GL_STATIC_READ:
+ case GL_DYNAMIC_READ:
+ case GL_STREAM_READ:
+ return PIPE_USAGE_STAGING;
+ case GL_STATIC_DRAW:
+ case GL_STATIC_COPY:
+ default:
+ return PIPE_USAGE_DEFAULT;
+ }
+ }
+}
+
+
+static ALWAYS_INLINE GLboolean
+bufferobj_data(struct gl_context *ctx,
+ GLenum target,
+ GLsizeiptrARB size,
+ const void *data,
+ struct gl_memory_object *memObj,
+ GLuint64 offset,
+ GLenum usage,
+ GLbitfield storageFlags,
+ struct gl_buffer_object *obj)
+{
+ struct st_context *st = st_context(ctx);
+ struct pipe_context *pipe = st->pipe;
+ struct pipe_screen *screen = pipe->screen;
+ struct st_buffer_object *st_obj = st_buffer_object(obj);
+ struct st_memory_object *st_mem_obj = st_memory_object(memObj);
+ bool is_mapped = _mesa_bufferobj_mapped(obj, MAP_USER);
+
+ if (target != GL_EXTERNAL_VIRTUAL_MEMORY_BUFFER_AMD &&
+ size && st_obj->buffer &&
+ st_obj->Base.Size == size &&
+ st_obj->Base.Usage == usage &&
+ st_obj->Base.StorageFlags == storageFlags) {
+ if (data) {
+ /* Just discard the old contents and write new data.
+ * This should be the same as creating a new buffer, but we avoid
+ * a lot of validation in Mesa.
+ *
+ * If the buffer is mapped, we can't discard it.
+ *
+ * PIPE_TRANSFER_MAP_DIRECTLY supresses implicit buffer range
+ * invalidation.
+ */
+ pipe->buffer_subdata(pipe, st_obj->buffer,
+ is_mapped ? PIPE_TRANSFER_MAP_DIRECTLY :
+ PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE,
+ 0, size, data);
+ return GL_TRUE;
+ } else if (is_mapped) {
+ return GL_TRUE; /* can't reallocate, nothing to do */
+ } else if (screen->get_param(screen, PIPE_CAP_INVALIDATE_BUFFER)) {
+ pipe->invalidate_resource(pipe, st_obj->buffer);
+ return GL_TRUE;
+ }
+ }
+
+ st_obj->Base.Size = size;
+ st_obj->Base.Usage = usage;
+ st_obj->Base.StorageFlags = storageFlags;
pipe_resource_reference( &st_obj->buffer, NULL );
+ const unsigned bindings = buffer_target_to_bind_flags(target);
+
if (ST_DEBUG & DEBUG_BUFFER) {
- debug_printf("Create buffer size %lu bind 0x%x\n", size, bind);
+ debug_printf("Create buffer size %" PRId64 " bind 0x%x\n",
+ (int64_t) size, bindings);
}
if (size != 0) {
- st_obj->buffer = pipe_buffer_create(pipe->screen, bind,
- pipe_usage, size);
+ struct pipe_resource buffer;
+
+ memset(&buffer, 0, sizeof buffer);
+ buffer.target = PIPE_BUFFER;
+ buffer.format = PIPE_FORMAT_R8_UNORM; /* want TYPELESS or similar */
+ buffer.bind = bindings;
+ buffer.usage =
+ buffer_usage(target, st_obj->Base.Immutable, storageFlags, usage);
+ buffer.flags = storage_flags_to_buffer_flags(storageFlags);
+ buffer.width0 = size;
+ buffer.height0 = 1;
+ buffer.depth0 = 1;
+ buffer.array_size = 1;
+
+ if (st_mem_obj) {
+ st_obj->buffer = screen->resource_from_memobj(screen, &buffer,
+ st_mem_obj->memory,
+ offset);
+ }
+ else if (target == GL_EXTERNAL_VIRTUAL_MEMORY_BUFFER_AMD) {
+ st_obj->buffer =
+ screen->resource_from_user_memory(screen, &buffer, (void*)data);
+ }
+ else {
+ st_obj->buffer = screen->resource_create(screen, &buffer);
+
+ if (st_obj->buffer && data)
+ pipe_buffer_write(pipe, st_obj->buffer, 0, size, data);
+ }
if (!st_obj->buffer) {
/* out of memory */
st_obj->Base.Size = 0;
return GL_FALSE;
}
-
- if (data)
- pipe_buffer_write(pipe, st_obj->buffer, 0, size, data);
- return GL_TRUE;
}
+ /* The current buffer may be bound, so we have to revalidate all atoms that
+ * might be using it.
+ */
+ if (st_obj->Base.UsageHistory & USAGE_ARRAY_BUFFER)
+ ctx->NewDriverState |= ST_NEW_VERTEX_ARRAYS;
+ /* if (st_obj->Base.UsageHistory & USAGE_ELEMENT_ARRAY_BUFFER) */
+ /* ctx->NewDriverState |= TODO: Handle indices as gallium state; */
+ if (st_obj->Base.UsageHistory & USAGE_UNIFORM_BUFFER)
+ ctx->NewDriverState |= ST_NEW_UNIFORM_BUFFER;
+ if (st_obj->Base.UsageHistory & USAGE_SHADER_STORAGE_BUFFER)
+ ctx->NewDriverState |= ST_NEW_STORAGE_BUFFER;
+ if (st_obj->Base.UsageHistory & USAGE_TEXTURE_BUFFER)
+ ctx->NewDriverState |= ST_NEW_SAMPLER_VIEWS | ST_NEW_IMAGE_UNITS;
+ if (st_obj->Base.UsageHistory & USAGE_ATOMIC_COUNTER_BUFFER)
+ ctx->NewDriverState |= ctx->DriverFlags.NewAtomicBuffer;
+
return GL_TRUE;
}
+/**
+ * Allocate space for and store data in a buffer object. Any data that was
+ * previously stored in the buffer object is lost. If data is NULL,
+ * memory will be allocated, but no copy will occur.
+ * Called via ctx->Driver.BufferData().
+ * \return GL_TRUE for success, GL_FALSE if out of memory
+ */
+static GLboolean
+st_bufferobj_data(struct gl_context *ctx,
+ GLenum target,
+ GLsizeiptrARB size,
+ const void *data,
+ GLenum usage,
+ GLbitfield storageFlags,
+ struct gl_buffer_object *obj)
+{
+ return bufferobj_data(ctx, target, size, data, NULL, 0, usage, storageFlags, obj);
+}
+
+static GLboolean
+st_bufferobj_data_mem(struct gl_context *ctx,
+ GLenum target,
+ GLsizeiptrARB size,
+ struct gl_memory_object *memObj,
+ GLuint64 offset,
+ GLenum usage,
+ struct gl_buffer_object *bufObj)
+{
+ return bufferobj_data(ctx, target, size, NULL, memObj, offset, usage, 0, bufObj);
+}
/**
- * Called via glMapBufferRange().
+ * Called via glInvalidateBuffer(Sub)Data.
*/
-static void *
-st_bufferobj_map_range(struct gl_context *ctx,
- GLintptr offset, GLsizeiptr length, GLbitfield access,
- struct gl_buffer_object *obj)
+static void
+st_bufferobj_invalidate(struct gl_context *ctx,
+ struct gl_buffer_object *obj,
+ GLintptr offset,
+ GLsizeiptr size)
{
- struct pipe_context *pipe = st_context(ctx)->pipe;
+ struct st_context *st = st_context(ctx);
+ struct pipe_context *pipe = st->pipe;
struct st_buffer_object *st_obj = st_buffer_object(obj);
- enum pipe_transfer_usage flags = 0x0;
+
+ /* We ignore partial invalidates. */
+ if (offset != 0 || size != obj->Size)
+ return;
+
+ /* If the buffer is mapped, we can't invalidate it. */
+ if (!st_obj->buffer || _mesa_bufferobj_mapped(obj, MAP_USER))
+ return;
+
+ pipe->invalidate_resource(pipe, st_obj->buffer);
+}
+
+
+/**
+ * Convert GLbitfield of GL_MAP_x flags to gallium pipe_transfer_usage flags.
+ * \param wholeBuffer is the whole buffer being mapped?
+ */
+enum pipe_transfer_usage
+st_access_flags_to_transfer_flags(GLbitfield access, bool wholeBuffer)
+{
+ enum pipe_transfer_usage flags = 0;
if (access & GL_MAP_WRITE_BIT)
flags |= PIPE_TRANSFER_WRITE;
flags |= PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE;
}
else if (access & GL_MAP_INVALIDATE_RANGE_BIT) {
- if (offset == 0 && length == obj->Size)
+ if (wholeBuffer)
flags |= PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE;
else
flags |= PIPE_TRANSFER_DISCARD_RANGE;
}
-
+
if (access & GL_MAP_UNSYNCHRONIZED_BIT)
flags |= PIPE_TRANSFER_UNSYNCHRONIZED;
+ if (access & GL_MAP_PERSISTENT_BIT)
+ flags |= PIPE_TRANSFER_PERSISTENT;
+
+ if (access & GL_MAP_COHERENT_BIT)
+ flags |= PIPE_TRANSFER_COHERENT;
+
/* ... other flags ...
- */
+ */
if (access & MESA_MAP_NOWAIT_BIT)
flags |= PIPE_TRANSFER_DONTBLOCK;
+ return flags;
+}
+
+
+/**
+ * Called via glMapBufferRange().
+ */
+static void *
+st_bufferobj_map_range(struct gl_context *ctx,
+ GLintptr offset, GLsizeiptr length, GLbitfield access,
+ struct gl_buffer_object *obj,
+ gl_map_buffer_index index)
+{
+ struct pipe_context *pipe = st_context(ctx)->pipe;
+ struct st_buffer_object *st_obj = st_buffer_object(obj);
+
assert(offset >= 0);
assert(length >= 0);
assert(offset < obj->Size);
assert(offset + length <= obj->Size);
- obj->Pointer = pipe_buffer_map_range(pipe,
- st_obj->buffer,
- offset, length,
- flags,
- &st_obj->transfer);
- if (obj->Pointer) {
- obj->Offset = offset;
- obj->Length = length;
- obj->AccessFlags = access;
+ const enum pipe_transfer_usage transfer_flags =
+ st_access_flags_to_transfer_flags(access,
+ offset == 0 && length == obj->Size);
+
+ obj->Mappings[index].Pointer = pipe_buffer_map_range(pipe,
+ st_obj->buffer,
+ offset, length,
+ transfer_flags,
+ &st_obj->transfer[index]);
+ if (obj->Mappings[index].Pointer) {
+ obj->Mappings[index].Offset = offset;
+ obj->Mappings[index].Length = length;
+ obj->Mappings[index].AccessFlags = access;
}
else {
- st_obj->transfer = NULL;
+ st_obj->transfer[index] = NULL;
}
- return obj->Pointer;
+ return obj->Mappings[index].Pointer;
}
static void
st_bufferobj_flush_mapped_range(struct gl_context *ctx,
GLintptr offset, GLsizeiptr length,
- struct gl_buffer_object *obj)
+ struct gl_buffer_object *obj,
+ gl_map_buffer_index index)
{
struct pipe_context *pipe = st_context(ctx)->pipe;
struct st_buffer_object *st_obj = st_buffer_object(obj);
/* Subrange is relative to mapped range */
assert(offset >= 0);
assert(length >= 0);
- assert(offset + length <= obj->Length);
- assert(obj->Pointer);
-
+ assert(offset + length <= obj->Mappings[index].Length);
+ assert(obj->Mappings[index].Pointer);
+
if (!length)
return;
- pipe_buffer_flush_mapped_range(pipe, st_obj->transfer,
- obj->Offset + offset, length);
+ pipe_buffer_flush_mapped_range(pipe, st_obj->transfer[index],
+ obj->Mappings[index].Offset + offset,
+ length);
}
* Called via glUnmapBufferARB().
*/
static GLboolean
-st_bufferobj_unmap(struct gl_context *ctx, struct gl_buffer_object *obj)
+st_bufferobj_unmap(struct gl_context *ctx, struct gl_buffer_object *obj,
+ gl_map_buffer_index index)
{
struct pipe_context *pipe = st_context(ctx)->pipe;
struct st_buffer_object *st_obj = st_buffer_object(obj);
- if (obj->Length)
- pipe_buffer_unmap(pipe, st_obj->transfer);
+ if (obj->Mappings[index].Length)
+ pipe_buffer_unmap(pipe, st_obj->transfer[index]);
- st_obj->transfer = NULL;
- obj->Pointer = NULL;
- obj->Offset = 0;
- obj->Length = 0;
+ st_obj->transfer[index] = NULL;
+ obj->Mappings[index].Pointer = NULL;
+ obj->Mappings[index].Offset = 0;
+ obj->Mappings[index].Length = 0;
return GL_TRUE;
}
struct st_buffer_object *dstObj = st_buffer_object(dst);
struct pipe_box box;
- if(!size)
+ if (!size)
return;
/* buffer should not already be mapped */
- assert(!src->Pointer);
- assert(!dst->Pointer);
+ assert(!_mesa_check_disallowed_mapping(src));
+ assert(!_mesa_check_disallowed_mapping(dst));
u_box_1d(readOffset, size, &box);
srcObj->buffer, 0, &box);
}
-
-/* TODO: if buffer wasn't created with appropriate usage flags, need
- * to recreate it now and copy contents -- or possibly create a
- * gallium entrypoint to extend the usage flags and let the driver
- * decide if a copy is necessary.
+/**
+ * Called via glClearBufferSubData().
*/
-void
-st_bufferobj_validate_usage(struct st_context *st,
- struct st_buffer_object *obj,
- unsigned usage)
+static void
+st_clear_buffer_subdata(struct gl_context *ctx,
+ GLintptr offset, GLsizeiptr size,
+ const void *clearValue,
+ GLsizeiptr clearValueSize,
+ struct gl_buffer_object *bufObj)
{
+ struct pipe_context *pipe = st_context(ctx)->pipe;
+ struct st_buffer_object *buf = st_buffer_object(bufObj);
+ static const char zeros[16] = {0};
+
+ if (!pipe->clear_buffer) {
+ _mesa_ClearBufferSubData_sw(ctx, offset, size,
+ clearValue, clearValueSize, bufObj);
+ return;
+ }
+
+ if (!clearValue)
+ clearValue = zeros;
+
+ pipe->clear_buffer(pipe, buf->buffer, offset, size,
+ clearValue, clearValueSize);
}
+static void
+st_bufferobj_page_commitment(struct gl_context *ctx,
+ struct gl_buffer_object *bufferObj,
+ GLintptr offset, GLsizeiptr size,
+ GLboolean commit)
+{
+ struct pipe_context *pipe = st_context(ctx)->pipe;
+ struct st_buffer_object *buf = st_buffer_object(bufferObj);
+ struct pipe_box box;
+
+ u_box_1d(offset, size, &box);
+
+ if (!pipe->resource_commit(pipe, buf->buffer, 0, &box, commit)) {
+ _mesa_error(ctx, GL_OUT_OF_MEMORY, "glBufferPageCommitmentARB(out of memory)");
+ return;
+ }
+}
void
-st_init_bufferobject_functions(struct dd_function_table *functions)
+st_init_bufferobject_functions(struct pipe_screen *screen,
+ struct dd_function_table *functions)
{
functions->NewBufferObject = st_bufferobj_alloc;
functions->DeleteBuffer = st_bufferobj_free;
functions->BufferData = st_bufferobj_data;
+ functions->BufferDataMem = st_bufferobj_data_mem;
functions->BufferSubData = st_bufferobj_subdata;
functions->GetBufferSubData = st_bufferobj_get_subdata;
functions->MapBufferRange = st_bufferobj_map_range;
functions->FlushMappedBufferRange = st_bufferobj_flush_mapped_range;
functions->UnmapBuffer = st_bufferobj_unmap;
functions->CopyBufferSubData = st_copy_buffer_subdata;
+ functions->ClearBufferSubData = st_clear_buffer_subdata;
+ functions->BufferPageCommitment = st_bufferobj_page_commitment;
- /* For GL_APPLE_vertex_array_object */
- functions->NewArrayObject = _mesa_new_array_object;
- functions->DeleteArrayObject = _mesa_delete_array_object;
+ if (screen->get_param(screen, PIPE_CAP_INVALIDATE_BUFFER))
+ functions->InvalidateBufferSubData = st_bufferobj_invalidate;
}