#include "intel_regions.h"
static GLboolean
-intel_bufferobj_unmap(struct gl_context * ctx, struct gl_buffer_object *obj);
+intel_bufferobj_unmap(struct gl_context * ctx, struct gl_buffer_object *obj,
+ gl_map_buffer_index index);
/** Allocates a new drm_intel_bo to store the data for the buffer object. */
static void
* to the spec, but Mesa doesn't do UnmapBuffer for us at context destroy
* (though it does if you call glDeleteBuffers)
*/
- if (obj->Pointer)
- intel_bufferobj_unmap(ctx, obj);
+ _mesa_buffer_unmap_all_mappings(ctx, obj);
free(intel_obj->sys_buffer);
intel_obj->Base.Usage = usage;
intel_obj->Base.StorageFlags = storageFlags;
- assert(!obj->Pointer); /* Mesa should have unmapped it */
+ assert(!obj->Mappings[MAP_USER].Pointer); /* Mesa should have unmapped it */
+ assert(!obj->Mappings[MAP_INTERNAL].Pointer);
if (intel_obj->buffer != NULL)
release_buffer(intel_obj);
static void *
intel_bufferobj_map_range(struct gl_context * ctx,
GLintptr offset, GLsizeiptr length,
- GLbitfield access, struct gl_buffer_object *obj)
+ GLbitfield access, struct gl_buffer_object *obj,
+ gl_map_buffer_index index)
{
struct intel_context *intel = intel_context(ctx);
struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
/* _mesa_MapBufferRange (GL entrypoint) sets these, but the vbo module also
* internally uses our functions directly.
*/
- obj->Offset = offset;
- obj->Length = length;
- obj->AccessFlags = access;
+ obj->Mappings[index].Offset = offset;
+ obj->Mappings[index].Length = length;
+ obj->Mappings[index].AccessFlags = access;
if (intel_obj->sys_buffer) {
const bool read_only =
release_buffer(intel_obj);
if (!intel_obj->buffer || intel_obj->source) {
- obj->Pointer = intel_obj->sys_buffer + offset;
- return obj->Pointer;
+ obj->Mappings[index].Pointer = intel_obj->sys_buffer + offset;
+ return obj->Mappings[index].Pointer;
}
free(intel_obj->sys_buffer);
}
if (intel_obj->buffer == NULL) {
- obj->Pointer = NULL;
+ obj->Mappings[index].Pointer = NULL;
return NULL;
}
const unsigned extra = (uintptr_t) offset % alignment;
if (access & GL_MAP_FLUSH_EXPLICIT_BIT) {
- intel_obj->range_map_buffer = _mesa_align_malloc(length + extra,
- alignment);
- obj->Pointer = intel_obj->range_map_buffer + extra;
+ intel_obj->range_map_buffer[index] =
+ _mesa_align_malloc(length + extra, alignment);
+ obj->Mappings[index].Pointer =
+ intel_obj->range_map_buffer[index] + extra;
} else {
- intel_obj->range_map_bo = drm_intel_bo_alloc(intel->bufmgr,
- "range map",
- length + extra,
- alignment);
+ intel_obj->range_map_bo[index] = drm_intel_bo_alloc(intel->bufmgr,
+ "range map",
+ length + extra,
+ alignment);
if (!(access & GL_MAP_READ_BIT)) {
- drm_intel_gem_bo_map_gtt(intel_obj->range_map_bo);
+ drm_intel_gem_bo_map_gtt(intel_obj->range_map_bo[index]);
} else {
- drm_intel_bo_map(intel_obj->range_map_bo,
+ drm_intel_bo_map(intel_obj->range_map_bo[index],
(access & GL_MAP_WRITE_BIT) != 0);
}
- obj->Pointer = intel_obj->range_map_bo->virtual + extra;
+ obj->Mappings[index].Pointer =
+ intel_obj->range_map_bo[index]->virtual + extra;
}
- return obj->Pointer;
+ return obj->Mappings[index].Pointer;
}
if (access & GL_MAP_UNSYNCHRONIZED_BIT)
drm_intel_bo_map(intel_obj->buffer, (access & GL_MAP_WRITE_BIT) != 0);
}
- obj->Pointer = intel_obj->buffer->virtual + offset;
- return obj->Pointer;
+ obj->Mappings[index].Pointer = intel_obj->buffer->virtual + offset;
+ return obj->Mappings[index].Pointer;
}
/* Ideally we'd use a BO to avoid taking up cache space for the temporary
static void
intel_bufferobj_flush_mapped_range(struct gl_context *ctx,
GLintptr offset, GLsizeiptr length,
- struct gl_buffer_object *obj)
+ struct gl_buffer_object *obj,
+ gl_map_buffer_index index)
{
struct intel_context *intel = intel_context(ctx);
struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
/* Unless we're in the range map using a temporary system buffer,
* there's no work to do.
*/
- if (intel_obj->range_map_buffer == NULL)
+ if (intel_obj->range_map_buffer[index] == NULL)
return;
if (length == 0)
* former points to the actual mapping while the latter may be offset to
* meet alignment guarantees.
*/
- drm_intel_bo_subdata(temp_bo, 0, length, obj->Pointer);
+ drm_intel_bo_subdata(temp_bo, 0, length, obj->Mappings[index].Pointer);
intel_emit_linear_blit(intel,
- intel_obj->buffer, obj->Offset + offset,
+ intel_obj->buffer,
+ obj->Mappings[index].Offset + offset,
temp_bo, 0,
length);
* Called via glUnmapBuffer().
*/
static GLboolean
-intel_bufferobj_unmap(struct gl_context * ctx, struct gl_buffer_object *obj)
+intel_bufferobj_unmap(struct gl_context * ctx, struct gl_buffer_object *obj,
+ gl_map_buffer_index index)
{
struct intel_context *intel = intel_context(ctx);
struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
assert(intel_obj);
- assert(obj->Pointer);
+ assert(obj->Mappings[index].Pointer);
if (intel_obj->sys_buffer != NULL) {
/* always keep the mapping around. */
- } else if (intel_obj->range_map_buffer != NULL) {
+ } else if (intel_obj->range_map_buffer[index] != NULL) {
/* Since we've emitted some blits to buffers that will (likely) be used
* in rendering operations in other cache domains in this batch, emit a
* flush. Once again, we wish for a domain tracker in libdrm to cover
* usage inside of a batchbuffer.
*/
intel_batchbuffer_emit_mi_flush(intel);
- _mesa_align_free(intel_obj->range_map_buffer);
- intel_obj->range_map_buffer = NULL;
- } else if (intel_obj->range_map_bo != NULL) {
- const unsigned extra = obj->Pointer - intel_obj->range_map_bo->virtual;
+ _mesa_align_free(intel_obj->range_map_buffer[index]);
+ intel_obj->range_map_buffer[index] = NULL;
+ } else if (intel_obj->range_map_bo[index] != NULL) {
+ const unsigned extra = obj->Mappings[index].Pointer -
+ intel_obj->range_map_bo[index]->virtual;
- drm_intel_bo_unmap(intel_obj->range_map_bo);
+ drm_intel_bo_unmap(intel_obj->range_map_bo[index]);
intel_emit_linear_blit(intel,
- intel_obj->buffer, obj->Offset,
- intel_obj->range_map_bo, extra,
- obj->Length);
+ intel_obj->buffer, obj->Mappings[index].Offset,
+ intel_obj->range_map_bo[index], extra,
+ obj->Mappings[index].Length);
/* Since we've emitted some blits to buffers that will (likely) be used
* in rendering operations in other cache domains in this batch, emit a
*/
intel_batchbuffer_emit_mi_flush(intel);
- drm_intel_bo_unreference(intel_obj->range_map_bo);
- intel_obj->range_map_bo = NULL;
+ drm_intel_bo_unreference(intel_obj->range_map_bo[index]);
+ intel_obj->range_map_bo[index] = NULL;
} else if (intel_obj->buffer != NULL) {
drm_intel_bo_unmap(intel_obj->buffer);
}
- obj->Pointer = NULL;
- obj->Offset = 0;
- obj->Length = 0;
+ obj->Mappings[index].Pointer = NULL;
+ obj->Mappings[index].Offset = 0;
+ obj->Mappings[index].Length = 0;
return true;
}
char *ptr = intel_bufferobj_map_range(ctx, 0, dst->Size,
GL_MAP_READ_BIT |
GL_MAP_WRITE_BIT,
- dst);
+ dst, MAP_INTERNAL);
memmove(ptr + write_offset, ptr + read_offset, size);
- intel_bufferobj_unmap(ctx, dst);
+ intel_bufferobj_unmap(ctx, dst, MAP_INTERNAL);
} else {
const char *src_ptr;
char *dst_ptr;
src_ptr = intel_bufferobj_map_range(ctx, 0, src->Size,
- GL_MAP_READ_BIT, src);
+ GL_MAP_READ_BIT, src,
+ MAP_INTERNAL);
dst_ptr = intel_bufferobj_map_range(ctx, 0, dst->Size,
- GL_MAP_WRITE_BIT, dst);
+ GL_MAP_WRITE_BIT, dst,
+ MAP_INTERNAL);
memcpy(dst_ptr + write_offset, src_ptr + read_offset, size);
- intel_bufferobj_unmap(ctx, src);
- intel_bufferobj_unmap(ctx, dst);
+ intel_bufferobj_unmap(ctx, src, MAP_INTERNAL);
+ intel_bufferobj_unmap(ctx, dst, MAP_INTERNAL);
}
return;
}
/** System memory buffer data, if not using a BO to store the data. */
void *sys_buffer;
- drm_intel_bo *range_map_bo;
- void *range_map_buffer;
- unsigned int range_map_offset;
- GLsizei range_map_size;
+ drm_intel_bo *range_map_bo[MAP_COUNT];
+ void *range_map_buffer[MAP_COUNT];
bool source;
};
buf = (GLubyte *) ctx->Driver.MapBufferRange(ctx, 0, unpack->BufferObj->Size,
GL_MAP_READ_BIT,
- unpack->BufferObj);
+ unpack->BufferObj,
+ MAP_INTERNAL);
if (!buf) {
_mesa_error(ctx, GL_INVALID_OPERATION, "glBitmap(PBO is mapped)");
return NULL;
if (_mesa_is_bufferobj(unpack->BufferObj)) {
/* done with PBO so unmap it now */
- ctx->Driver.UnmapBuffer(ctx, unpack->BufferObj);
+ ctx->Driver.UnmapBuffer(ctx, unpack->BufferObj, MAP_INTERNAL);
}
intel_check_front_buffer_rendering(intel);
offset,
ib_size,
GL_MAP_READ_BIT,
- bufferobj);
+ bufferobj,
+ MAP_INTERNAL);
intel_upload_data(brw, map, ib_size, ib_type_size, &bo, &offset);
brw->ib.start_vertex_offset = offset / ib_type_size;
- ctx->Driver.UnmapBuffer(ctx, bufferobj);
+ ctx->Driver.UnmapBuffer(ctx, bufferobj, MAP_INTERNAL);
} else {
/* Use CMD_3D_PRIM's start_vertex_offset to avoid re-uploading
* the index buffer state when we're just moving the start index
}
static GLboolean
-intel_bufferobj_unmap(struct gl_context * ctx, struct gl_buffer_object *obj);
+intel_bufferobj_unmap(struct gl_context * ctx, struct gl_buffer_object *obj,
+ gl_map_buffer_index index);
static void
intel_bufferobj_mark_gpu_usage(struct intel_buffer_object *intel_obj,
* to the spec, but Mesa doesn't do UnmapBuffer for us at context destroy
* (though it does if you call glDeleteBuffers)
*/
- if (obj->Pointer)
- intel_bufferobj_unmap(ctx, obj);
+ _mesa_buffer_unmap_all_mappings(ctx, obj);
drm_intel_bo_unreference(intel_obj->buffer);
free(intel_obj);
intel_obj->Base.Usage = usage;
intel_obj->Base.StorageFlags = storageFlags;
- assert(!obj->Pointer); /* Mesa should have unmapped it */
+ assert(!obj->Mappings[MAP_USER].Pointer); /* Mesa should have unmapped it */
+ assert(!obj->Mappings[MAP_INTERNAL].Pointer);
if (intel_obj->buffer != NULL)
release_buffer(intel_obj);
static void *
intel_bufferobj_map_range(struct gl_context * ctx,
GLintptr offset, GLsizeiptr length,
- GLbitfield access, struct gl_buffer_object *obj)
+ GLbitfield access, struct gl_buffer_object *obj,
+ gl_map_buffer_index index)
{
struct brw_context *brw = brw_context(ctx);
struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
/* _mesa_MapBufferRange (GL entrypoint) sets these, but the vbo module also
* internally uses our functions directly.
*/
- obj->Offset = offset;
- obj->Length = length;
- obj->AccessFlags = access;
+ obj->Mappings[index].Offset = offset;
+ obj->Mappings[index].Length = length;
+ obj->Mappings[index].AccessFlags = access;
if (intel_obj->buffer == NULL) {
- obj->Pointer = NULL;
+ obj->Mappings[index].Pointer = NULL;
return NULL;
}
const unsigned extra = (uintptr_t) offset % alignment;
if (access & GL_MAP_FLUSH_EXPLICIT_BIT) {
- intel_obj->range_map_buffer = _mesa_align_malloc(length + extra,
- alignment);
- obj->Pointer = intel_obj->range_map_buffer + extra;
+ intel_obj->range_map_buffer[index] = _mesa_align_malloc(length + extra,
+ alignment);
+ obj->Mappings[index].Pointer =
+ intel_obj->range_map_buffer[index] + extra;
} else {
- intel_obj->range_map_bo = drm_intel_bo_alloc(brw->bufmgr,
- "range map",
- length + extra,
- alignment);
+ intel_obj->range_map_bo[index] = drm_intel_bo_alloc(brw->bufmgr,
+ "range map",
+ length + extra,
+ alignment);
if (!(access & GL_MAP_READ_BIT)) {
- drm_intel_gem_bo_map_gtt(intel_obj->range_map_bo);
+ drm_intel_gem_bo_map_gtt(intel_obj->range_map_bo[index]);
} else {
- drm_intel_bo_map(intel_obj->range_map_bo,
+ drm_intel_bo_map(intel_obj->range_map_bo[index],
(access & GL_MAP_WRITE_BIT) != 0);
}
- obj->Pointer = intel_obj->range_map_bo->virtual + extra;
+ obj->Mappings[index].Pointer =
+ intel_obj->range_map_bo[index]->virtual + extra;
}
- return obj->Pointer;
+ return obj->Mappings[index].Pointer;
}
if (access & GL_MAP_UNSYNCHRONIZED_BIT)
intel_bufferobj_mark_inactive(intel_obj);
}
- obj->Pointer = intel_obj->buffer->virtual + offset;
- return obj->Pointer;
+ obj->Mappings[index].Pointer = intel_obj->buffer->virtual + offset;
+ return obj->Mappings[index].Pointer;
}
/**
static void
intel_bufferobj_flush_mapped_range(struct gl_context *ctx,
GLintptr offset, GLsizeiptr length,
- struct gl_buffer_object *obj)
+ struct gl_buffer_object *obj,
+ gl_map_buffer_index index)
{
struct brw_context *brw = brw_context(ctx);
struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
/* Unless we're in the range map using a temporary system buffer,
* there's no work to do.
*/
- if (intel_obj->range_map_buffer == NULL)
+ if (intel_obj->range_map_buffer[index] == NULL)
return;
if (length == 0)
* former points to the actual mapping while the latter may be offset to
* meet alignment guarantees.
*/
- drm_intel_bo_subdata(temp_bo, 0, length, obj->Pointer);
+ drm_intel_bo_subdata(temp_bo, 0, length, obj->Mappings[index].Pointer);
intel_emit_linear_blit(brw,
- intel_obj->buffer, obj->Offset + offset,
+ intel_obj->buffer,
+ obj->Mappings[index].Offset + offset,
temp_bo, 0,
length);
- intel_bufferobj_mark_gpu_usage(intel_obj, obj->Offset + offset, length);
+ intel_bufferobj_mark_gpu_usage(intel_obj,
+ obj->Mappings[index].Offset + offset,
+ length);
drm_intel_bo_unreference(temp_bo);
}
* Implements glUnmapBuffer().
*/
static GLboolean
-intel_bufferobj_unmap(struct gl_context * ctx, struct gl_buffer_object *obj)
+intel_bufferobj_unmap(struct gl_context * ctx, struct gl_buffer_object *obj,
+ gl_map_buffer_index index)
{
struct brw_context *brw = brw_context(ctx);
struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
assert(intel_obj);
- assert(obj->Pointer);
- if (intel_obj->range_map_buffer != NULL) {
+ assert(obj->Mappings[index].Pointer);
+ if (intel_obj->range_map_buffer[index] != NULL) {
/* Since we've emitted some blits to buffers that will (likely) be used
* in rendering operations in other cache domains in this batch, emit a
* flush. Once again, we wish for a domain tracker in libdrm to cover
* usage inside of a batchbuffer.
*/
intel_batchbuffer_emit_mi_flush(brw);
- _mesa_align_free(intel_obj->range_map_buffer);
- intel_obj->range_map_buffer = NULL;
+ _mesa_align_free(intel_obj->range_map_buffer[index]);
+ intel_obj->range_map_buffer[index] = NULL;
} else if (intel_obj->range_map_bo != NULL) {
- const unsigned extra = obj->Pointer - intel_obj->range_map_bo->virtual;
+ const unsigned extra = obj->Mappings[index].Pointer -
+ intel_obj->range_map_bo[index]->virtual;
- drm_intel_bo_unmap(intel_obj->range_map_bo);
+ drm_intel_bo_unmap(intel_obj->range_map_bo[index]);
intel_emit_linear_blit(brw,
- intel_obj->buffer, obj->Offset,
- intel_obj->range_map_bo, extra,
- obj->Length);
- intel_bufferobj_mark_gpu_usage(intel_obj, obj->Offset, obj->Length);
+ intel_obj->buffer, obj->Mappings[index].Offset,
+ intel_obj->range_map_bo[index], extra,
+ obj->Mappings[index].Length);
+ intel_bufferobj_mark_gpu_usage(intel_obj, obj->Mappings[index].Offset,
+ obj->Mappings[index].Length);
/* Since we've emitted some blits to buffers that will (likely) be used
* in rendering operations in other cache domains in this batch, emit a
*/
intel_batchbuffer_emit_mi_flush(brw);
- drm_intel_bo_unreference(intel_obj->range_map_bo);
- intel_obj->range_map_bo = NULL;
+ drm_intel_bo_unreference(intel_obj->range_map_bo[index]);
+ intel_obj->range_map_bo[index] = NULL;
} else if (intel_obj->buffer != NULL) {
drm_intel_bo_unmap(intel_obj->buffer);
}
- obj->Pointer = NULL;
- obj->Offset = 0;
- obj->Length = 0;
+ obj->Mappings[index].Pointer = NULL;
+ obj->Mappings[index].Offset = 0;
+ obj->Mappings[index].Length = 0;
return true;
}
struct gl_buffer_object Base;
drm_intel_bo *buffer; /* the low-level buffer manager's buffer handle */
- drm_intel_bo *range_map_bo;
- void *range_map_buffer;
- unsigned int range_map_offset;
+ drm_intel_bo *range_map_bo[MAP_COUNT];
+ void *range_map_buffer[MAP_COUNT];
/** @{
* Tracking for what range of the BO may currently be in use by the GPU.
buf = (GLubyte *) ctx->Driver.MapBufferRange(ctx, 0, unpack->BufferObj->Size,
GL_MAP_READ_BIT,
- unpack->BufferObj);
+ unpack->BufferObj,
+ MAP_INTERNAL);
if (!buf) {
_mesa_error(ctx, GL_INVALID_OPERATION, "glBitmap(PBO is mapped)");
return NULL;
if (_mesa_is_bufferobj(unpack->BufferObj)) {
/* done with PBO so unmap it now */
- ctx->Driver.UnmapBuffer(ctx, unpack->BufferObj);
+ ctx->Driver.UnmapBuffer(ctx, unpack->BufferObj, MAP_INTERNAL);
}
intel_check_front_buffer_rendering(brw);
static void *
nouveau_bufferobj_map_range(struct gl_context *ctx, GLintptr offset,
GLsizeiptr length, GLbitfield access,
- struct gl_buffer_object *obj)
+ struct gl_buffer_object *obj,
+ gl_map_buffer_index index)
{
unsigned flags = 0;
char *map;
- assert(!obj->Pointer);
+ assert(!obj->Mappings[index].Pointer);
if (!(access & GL_MAP_UNSYNCHRONIZED_BIT)) {
if (access & GL_MAP_READ_BIT)
if (!map)
return NULL;
- obj->Pointer = map + offset;
- obj->Offset = offset;
- obj->Length = length;
- obj->AccessFlags = access;
+ obj->Mappings[index].Pointer = map + offset;
+ obj->Mappings[index].Offset = offset;
+ obj->Mappings[index].Length = length;
+ obj->Mappings[index].AccessFlags = access;
- return obj->Pointer;
+ return obj->Mappings[index].Pointer;
}
static GLboolean
-nouveau_bufferobj_unmap(struct gl_context *ctx, struct gl_buffer_object *obj)
+nouveau_bufferobj_unmap(struct gl_context *ctx, struct gl_buffer_object *obj,
+ gl_map_buffer_index index)
{
- assert(obj->Pointer);
+ assert(obj->Mappings[index].Pointer);
- obj->Pointer = NULL;
- obj->Offset = 0;
- obj->Length = 0;
- obj->AccessFlags = 0;
+ obj->Mappings[index].Pointer = NULL;
+ obj->Mappings[index].Offset = 0;
+ obj->Mappings[index].Length = 0;
+ obj->Mappings[index].AccessFlags = 0;
return GL_TRUE;
}
struct gl_buffer_object *obj)
{
struct radeon_buffer_object *radeon_obj = get_radeon_buffer_object(obj);
+ int i;
- if (obj->Pointer) {
- radeon_bo_unmap(radeon_obj->bo);
+ for (i = 0; i < MAP_COUNT; i++) {
+ if (obj->Mappings[i].Pointer) {
+ radeon_bo_unmap(radeon_obj->bo);
+ }
}
if (radeon_obj->bo) {
static void *
radeonMapBufferRange(struct gl_context * ctx,
GLintptr offset, GLsizeiptr length,
- GLbitfield access, struct gl_buffer_object *obj)
+ GLbitfield access, struct gl_buffer_object *obj,
+ gl_map_buffer_index index)
{
struct radeon_buffer_object *radeon_obj = get_radeon_buffer_object(obj);
const GLboolean write_only =
}
if (radeon_obj->bo == NULL) {
- obj->Pointer = NULL;
+ obj->Mappings[index].Pointer = NULL;
return NULL;
}
- obj->Offset = offset;
- obj->Length = length;
- obj->AccessFlags = access;
+ obj->Mappings[index].Offset = offset;
+ obj->Mappings[index].Length = length;
+ obj->Mappings[index].AccessFlags = access;
radeon_bo_map(radeon_obj->bo, write_only);
- obj->Pointer = radeon_obj->bo->ptr + offset;
- return obj->Pointer;
+ obj->Mappings[index].Pointer = radeon_obj->bo->ptr + offset;
+ return obj->Mappings[index].Pointer;
}
*/
static GLboolean
radeonUnmapBuffer(struct gl_context * ctx,
- struct gl_buffer_object *obj)
+ struct gl_buffer_object *obj,
+ gl_map_buffer_index index)
{
struct radeon_buffer_object *radeon_obj = get_radeon_buffer_object(obj);
radeon_bo_unmap(radeon_obj->bo);
}
- obj->Pointer = NULL;
- obj->Offset = 0;
- obj->Length = 0;
+ obj->Mappings[index].Pointer = NULL;
+ obj->Mappings[index].Offset = 0;
+ obj->Mappings[index].Length = 0;
return GL_TRUE;
}
buf = (GLubyte *) ctx->Driver.MapBufferRange(ctx, 0,
unpack->BufferObj->Size,
GL_MAP_READ_BIT,
- unpack->BufferObj);
+ unpack->BufferObj,
+ MAP_INTERNAL);
if (!buf) {
- /* buffer is already mapped - that's an error */
- _mesa_error(ctx, GL_INVALID_OPERATION,
- "glDrawPixels(PBO is mapped)");
- return;
+ return; /* error */
}
pixels = ADD_POINTERS(buf, pixels);
}
}
if (_mesa_is_bufferobj(unpack->BufferObj)) {
- ctx->Driver.UnmapBuffer(ctx, unpack->BufferObj);
+ ctx->Driver.UnmapBuffer(ctx, unpack->BufferObj, MAP_INTERNAL);
}
}
else {
buf = (GLubyte *) ctx->Driver.MapBufferRange(ctx, 0,
unpack->BufferObj->Size,
GL_MAP_READ_BIT,
- unpack->BufferObj);
+ unpack->BufferObj,
+ MAP_INTERNAL);
if (!buf) {
- /* buffer is already mapped - that's an error */
- _mesa_error(ctx, GL_INVALID_OPERATION,
- "glDrawPixels(PBO is mapped)");
- return;
+ return; /* error */
}
pixels = ADD_POINTERS(buf, pixels);
}
}
if (unpack->BufferObj->Name) {
- ctx->Driver.UnmapBuffer(ctx, unpack->BufferObj);
+ ctx->Driver.UnmapBuffer(ctx, unpack->BufferObj, MAP_INTERNAL);
}
}
else {
static void
check_vbo(AEcontext *actx, struct gl_buffer_object *vbo)
{
- if (_mesa_is_bufferobj(vbo) && !_mesa_bufferobj_mapped(vbo)) {
+ if (_mesa_is_bufferobj(vbo) &&
+ !_mesa_bufferobj_mapped(vbo, MAP_INTERNAL)) {
GLuint i;
for (i = 0; i < actx->nr_vbos; i++)
if (actx->vbo[i] == vbo)
ctx->Driver.MapBufferRange(ctx, 0,
actx->vbo[i]->Size,
GL_MAP_READ_BIT,
- actx->vbo[i]);
+ actx->vbo[i],
+ MAP_INTERNAL);
if (actx->nr_vbos)
actx->mapped_vbos = GL_TRUE;
assert (!actx->NewState);
for (i = 0; i < actx->nr_vbos; i++)
- ctx->Driver.UnmapBuffer(ctx, actx->vbo[i]);
+ ctx->Driver.UnmapBuffer(ctx, actx->vbo[i], MAP_INTERNAL);
actx->mapped_vbos = GL_FALSE;
}
/* emit generic attribute elements */
for (at = actx->attribs; at->func; at++) {
const GLubyte *src
- = ADD_POINTERS(at->array->BufferObj->Pointer, at->array->Ptr)
+ = ADD_POINTERS(at->array->BufferObj->Mappings[MAP_INTERNAL].Pointer,
+ at->array->Ptr)
+ elt * at->array->StrideB;
at->func(at->index, src);
}
/* emit conventional arrays elements */
for (aa = actx->arrays; aa->offset != -1 ; aa++) {
const GLubyte *src
- = ADD_POINTERS(aa->array->BufferObj->Pointer, aa->array->Ptr)
+ = ADD_POINTERS(aa->array->BufferObj->Mappings[MAP_INTERNAL].Pointer,
+ aa->array->Ptr)
+ elt * aa->array->StrideB;
CALL_by_offset(disp, (array_func), aa->offset, ((const void *) src));
}
if (_mesa_is_bufferobj(elementBuf)) {
/* elements are in a user-defined buffer object. need to map it */
map = ctx->Driver.MapBufferRange(ctx, 0, elementBuf->Size,
- GL_MAP_READ_BIT, elementBuf);
+ GL_MAP_READ_BIT, elementBuf,
+ MAP_INTERNAL);
/* Actual address is the sum of pointers */
indices = (const GLvoid *) ADD_POINTERS(map, (const GLubyte *) indices);
}
}
if (map) {
- ctx->Driver.UnmapBuffer(ctx, elementBuf);
+ ctx->Driver.UnmapBuffer(ctx, elementBuf, MAP_INTERNAL);
}
return max;
bufferobj_range_mapped(const struct gl_buffer_object *obj,
GLintptr offset, GLsizeiptr size)
{
- if (_mesa_bufferobj_mapped(obj)) {
+ if (_mesa_bufferobj_mapped(obj, MAP_USER)) {
const GLintptr end = offset + size;
- const GLintptr mapEnd = obj->Offset + obj->Length;
+ const GLintptr mapEnd = obj->Mappings[MAP_USER].Offset +
+ obj->Mappings[MAP_USER].Length;
- if (!(end <= obj->Offset || offset >= mapEnd)) {
+ if (!(end <= obj->Mappings[MAP_USER].Offset || offset >= mapEnd)) {
return true;
}
}
return NULL;
}
- if (bufObj->AccessFlags & GL_MAP_PERSISTENT_BIT)
+ if (bufObj->Mappings[MAP_USER].AccessFlags & GL_MAP_PERSISTENT_BIT)
return bufObj;
if (mappedRange) {
}
}
else {
- if (_mesa_bufferobj_mapped(bufObj)) {
+ if (_mesa_bufferobj_mapped(bufObj, MAP_USER)) {
_mesa_error(ctx, GL_INVALID_OPERATION, "%s", caller);
return NULL;
}
obj->RefCount = 1;
obj->Name = name;
obj->Usage = GL_STATIC_DRAW_ARB;
- obj->AccessFlags = 0;
}
GLsizeiptr i;
GLubyte *dest;
- if (_mesa_bufferobj_mapped(bufObj)) {
- GLubyte *data = malloc(size);
- GLubyte *dataStart = data;
- if (data == NULL) {
- _mesa_error(ctx, GL_OUT_OF_MEMORY, "glClearBuffer[Sub]Data");
- return;
- }
-
- if (clearValue == NULL) {
- /* Clear with zeros, per the spec */
- memset(data, 0, size);
- }
- else {
- for (i = 0; i < size/clearValueSize; ++i) {
- memcpy(data, clearValue, clearValueSize);
- data += clearValueSize;
- }
- }
- ctx->Driver.BufferSubData(ctx, offset, size, dataStart, bufObj);
- return;
- }
-
ASSERT(ctx->Driver.MapBufferRange);
dest = ctx->Driver.MapBufferRange(ctx, offset, size,
GL_MAP_WRITE_BIT |
GL_MAP_INVALIDATE_RANGE_BIT,
- bufObj);
+ bufObj, MAP_INTERNAL);
if (!dest) {
_mesa_error(ctx, GL_OUT_OF_MEMORY, "glClearBuffer[Sub]Data");
if (clearValue == NULL) {
/* Clear with zeros, per the spec */
memset(dest, 0, size);
- ctx->Driver.UnmapBuffer(ctx, bufObj);
+ ctx->Driver.UnmapBuffer(ctx, bufObj, MAP_INTERNAL);
return;
}
dest += clearValueSize;
}
- ctx->Driver.UnmapBuffer(ctx, bufObj);
+ ctx->Driver.UnmapBuffer(ctx, bufObj, MAP_INTERNAL);
}
static void *
_mesa_buffer_map_range( struct gl_context *ctx, GLintptr offset,
GLsizeiptr length, GLbitfield access,
- struct gl_buffer_object *bufObj )
+ struct gl_buffer_object *bufObj,
+ gl_map_buffer_index index)
{
(void) ctx;
- assert(!_mesa_bufferobj_mapped(bufObj));
+ assert(!_mesa_bufferobj_mapped(bufObj, index));
/* Just return a direct pointer to the data */
- bufObj->Pointer = bufObj->Data + offset;
- bufObj->Length = length;
- bufObj->Offset = offset;
- bufObj->AccessFlags = access;
- return bufObj->Pointer;
+ bufObj->Mappings[index].Pointer = bufObj->Data + offset;
+ bufObj->Mappings[index].Length = length;
+ bufObj->Mappings[index].Offset = offset;
+ bufObj->Mappings[index].AccessFlags = access;
+ return bufObj->Mappings[index].Pointer;
}
static void
_mesa_buffer_flush_mapped_range( struct gl_context *ctx,
GLintptr offset, GLsizeiptr length,
- struct gl_buffer_object *obj )
+ struct gl_buffer_object *obj,
+ gl_map_buffer_index index)
{
(void) ctx;
(void) offset;
* \sa glUnmapBufferARB, dd_function_table::UnmapBuffer
*/
static GLboolean
-_mesa_buffer_unmap( struct gl_context *ctx, struct gl_buffer_object *bufObj )
+_mesa_buffer_unmap(struct gl_context *ctx, struct gl_buffer_object *bufObj,
+ gl_map_buffer_index index)
{
(void) ctx;
/* XXX we might assert here that bufObj->Pointer is non-null */
- bufObj->Pointer = NULL;
- bufObj->Length = 0;
- bufObj->Offset = 0;
- bufObj->AccessFlags = 0x0;
+ bufObj->Mappings[index].Pointer = NULL;
+ bufObj->Mappings[index].Length = 0;
+ bufObj->Mappings[index].Offset = 0;
+ bufObj->Mappings[index].AccessFlags = 0x0;
return GL_TRUE;
}
{
GLubyte *srcPtr, *dstPtr;
- /* the buffers should not be mapped */
- assert(!_mesa_bufferobj_mapped(src));
- assert(!_mesa_bufferobj_mapped(dst));
-
if (src == dst) {
srcPtr = dstPtr = ctx->Driver.MapBufferRange(ctx, 0, src->Size,
GL_MAP_READ_BIT |
- GL_MAP_WRITE_BIT, src);
+ GL_MAP_WRITE_BIT, src,
+ MAP_INTERNAL);
if (!srcPtr)
return;
dstPtr += writeOffset;
} else {
srcPtr = ctx->Driver.MapBufferRange(ctx, readOffset, size,
- GL_MAP_READ_BIT, src);
+ GL_MAP_READ_BIT, src,
+ MAP_INTERNAL);
dstPtr = ctx->Driver.MapBufferRange(ctx, writeOffset, size,
(GL_MAP_WRITE_BIT |
- GL_MAP_INVALIDATE_RANGE_BIT), dst);
+ GL_MAP_INVALIDATE_RANGE_BIT), dst,
+ MAP_INTERNAL);
}
/* Note: the src and dst regions will never overlap. Trying to do so
if (srcPtr && dstPtr)
memcpy(dstPtr, srcPtr, size);
- ctx->Driver.UnmapBuffer(ctx, src);
+ ctx->Driver.UnmapBuffer(ctx, src, MAP_INTERNAL);
if (dst != src)
- ctx->Driver.UnmapBuffer(ctx, dst);
+ ctx->Driver.UnmapBuffer(ctx, dst, MAP_INTERNAL);
}
}
+void
+_mesa_buffer_unmap_all_mappings(struct gl_context *ctx,
+ struct gl_buffer_object *bufObj)
+{
+ int i;
+
+ for (i = 0; i < MAP_COUNT; i++) {
+ if (_mesa_bufferobj_mapped(bufObj, i)) {
+ ctx->Driver.UnmapBuffer(ctx, bufObj, i);
+ ASSERT(bufObj->Mappings[i].Pointer == NULL);
+ bufObj->Mappings[i].AccessFlags = 0;
+ }
+ }
+}
+
/**********************************************************************/
/* API Functions */
ASSERT(bufObj->Name == ids[i] || bufObj == &DummyBufferObject);
- if (_mesa_bufferobj_mapped(bufObj)) {
- /* if mapped, unmap it now */
- ctx->Driver.UnmapBuffer(ctx, bufObj);
- bufObj->AccessFlags = 0;
- bufObj->Pointer = NULL;
- }
+ _mesa_buffer_unmap_all_mappings(ctx, bufObj);
/* unbind any vertex pointers bound to this buffer */
for (j = 0; j < Elements(vao->VertexBinding); j++) {
return;
}
- if (_mesa_bufferobj_mapped(bufObj)) {
- /* Unmap the existing buffer. We'll replace it now. Not an error. */
- ctx->Driver.UnmapBuffer(ctx, bufObj);
- bufObj->AccessFlags = 0;
- ASSERT(bufObj->Pointer == NULL);
- }
+ /* Unmap the existing buffer. We'll replace it now. Not an error. */
+ _mesa_buffer_unmap_all_mappings(ctx, bufObj);
FLUSH_VERTICES(ctx, _NEW_BUFFER_OBJECT);
return;
}
- if (_mesa_bufferobj_mapped(bufObj)) {
- /* Unmap the existing buffer. We'll replace it now. Not an error. */
- ctx->Driver.UnmapBuffer(ctx, bufObj);
- bufObj->AccessFlags = 0;
- ASSERT(bufObj->Pointer == NULL);
- }
+ /* Unmap the existing buffer. We'll replace it now. Not an error. */
+ _mesa_buffer_unmap_all_mappings(ctx, bufObj);
FLUSH_VERTICES(ctx, _NEW_BUFFER_OBJECT);
return NULL;
}
- if (_mesa_bufferobj_mapped(bufObj)) {
+ if (_mesa_bufferobj_mapped(bufObj, MAP_USER)) {
_mesa_error(ctx, GL_INVALID_OPERATION, "glMapBufferARB(already mapped)");
return NULL;
}
}
ASSERT(ctx->Driver.MapBufferRange);
- map = ctx->Driver.MapBufferRange(ctx, 0, bufObj->Size, accessFlags, bufObj);
+ map = ctx->Driver.MapBufferRange(ctx, 0, bufObj->Size, accessFlags, bufObj,
+ MAP_USER);
if (!map) {
_mesa_error(ctx, GL_OUT_OF_MEMORY, "glMapBufferARB(map failed)");
return NULL;
* This is important because other modules (like VBO) might call
* the driver function directly.
*/
- ASSERT(bufObj->Pointer == map);
- ASSERT(bufObj->Length == bufObj->Size);
- ASSERT(bufObj->Offset == 0);
- bufObj->AccessFlags = accessFlags;
+ ASSERT(bufObj->Mappings[MAP_USER].Pointer == map);
+ ASSERT(bufObj->Mappings[MAP_USER].Length == bufObj->Size);
+ ASSERT(bufObj->Mappings[MAP_USER].Offset == 0);
+ bufObj->Mappings[MAP_USER].AccessFlags = accessFlags;
}
if (access == GL_WRITE_ONLY_ARB || access == GL_READ_WRITE_ARB)
}
#endif
- return bufObj->Pointer;
+ return bufObj->Mappings[MAP_USER].Pointer;
}
if (!bufObj)
return GL_FALSE;
- if (!_mesa_bufferobj_mapped(bufObj)) {
+ if (!_mesa_bufferobj_mapped(bufObj, MAP_USER)) {
_mesa_error(ctx, GL_INVALID_OPERATION, "glUnmapBufferARB");
return GL_FALSE;
}
}
#endif
- status = ctx->Driver.UnmapBuffer( ctx, bufObj );
- bufObj->AccessFlags = 0;
- ASSERT(bufObj->Pointer == NULL);
- ASSERT(bufObj->Offset == 0);
- ASSERT(bufObj->Length == 0);
+ status = ctx->Driver.UnmapBuffer(ctx, bufObj, MAP_USER);
+ bufObj->Mappings[MAP_USER].AccessFlags = 0;
+ ASSERT(bufObj->Mappings[MAP_USER].Pointer == NULL);
+ ASSERT(bufObj->Mappings[MAP_USER].Offset == 0);
+ ASSERT(bufObj->Mappings[MAP_USER].Length == 0);
return status;
}
*params = bufObj->Usage;
return;
case GL_BUFFER_ACCESS_ARB:
- *params = simplified_access_mode(ctx, bufObj->AccessFlags);
+ *params = simplified_access_mode(ctx,
+ bufObj->Mappings[MAP_USER].AccessFlags);
return;
case GL_BUFFER_MAPPED_ARB:
- *params = _mesa_bufferobj_mapped(bufObj);
+ *params = _mesa_bufferobj_mapped(bufObj, MAP_USER);
return;
case GL_BUFFER_ACCESS_FLAGS:
if (!ctx->Extensions.ARB_map_buffer_range)
goto invalid_pname;
- *params = bufObj->AccessFlags;
+ *params = bufObj->Mappings[MAP_USER].AccessFlags;
return;
case GL_BUFFER_MAP_OFFSET:
if (!ctx->Extensions.ARB_map_buffer_range)
goto invalid_pname;
- *params = (GLint) bufObj->Offset;
+ *params = (GLint) bufObj->Mappings[MAP_USER].Offset;
return;
case GL_BUFFER_MAP_LENGTH:
if (!ctx->Extensions.ARB_map_buffer_range)
goto invalid_pname;
- *params = (GLint) bufObj->Length;
+ *params = (GLint) bufObj->Mappings[MAP_USER].Length;
return;
case GL_BUFFER_IMMUTABLE_STORAGE:
if (!ctx->Extensions.ARB_buffer_storage)
*params = bufObj->Usage;
return;
case GL_BUFFER_ACCESS_ARB:
- *params = simplified_access_mode(ctx, bufObj->AccessFlags);
+ *params = simplified_access_mode(ctx,
+ bufObj->Mappings[MAP_USER].AccessFlags);
return;
case GL_BUFFER_ACCESS_FLAGS:
if (!ctx->Extensions.ARB_map_buffer_range)
goto invalid_pname;
- *params = bufObj->AccessFlags;
+ *params = bufObj->Mappings[MAP_USER].AccessFlags;
return;
case GL_BUFFER_MAPPED_ARB:
- *params = _mesa_bufferobj_mapped(bufObj);
+ *params = _mesa_bufferobj_mapped(bufObj, MAP_USER);
return;
case GL_BUFFER_MAP_OFFSET:
if (!ctx->Extensions.ARB_map_buffer_range)
goto invalid_pname;
- *params = bufObj->Offset;
+ *params = bufObj->Mappings[MAP_USER].Offset;
return;
case GL_BUFFER_MAP_LENGTH:
if (!ctx->Extensions.ARB_map_buffer_range)
goto invalid_pname;
- *params = bufObj->Length;
+ *params = bufObj->Mappings[MAP_USER].Length;
return;
case GL_BUFFER_IMMUTABLE_STORAGE:
if (!ctx->Extensions.ARB_buffer_storage)
if (!bufObj)
return;
- *params = bufObj->Pointer;
+ *params = bufObj->Mappings[MAP_USER].Pointer;
}
return NULL;
}
- if (_mesa_bufferobj_mapped(bufObj)) {
+ if (_mesa_bufferobj_mapped(bufObj, MAP_USER)) {
_mesa_error(ctx, GL_INVALID_OPERATION,
"glMapBufferRange(buffer already mapped)");
return NULL;
/* Mapping zero bytes should return a non-null pointer. */
if (!length) {
static long dummy = 0;
- bufObj->Pointer = &dummy;
- bufObj->Length = length;
- bufObj->Offset = offset;
- bufObj->AccessFlags = access;
- return bufObj->Pointer;
+ bufObj->Mappings[MAP_USER].Pointer = &dummy;
+ bufObj->Mappings[MAP_USER].Length = length;
+ bufObj->Mappings[MAP_USER].Offset = offset;
+ bufObj->Mappings[MAP_USER].AccessFlags = access;
+ return bufObj->Mappings[MAP_USER].Pointer;
}
ASSERT(ctx->Driver.MapBufferRange);
- map = ctx->Driver.MapBufferRange(ctx, offset, length, access, bufObj);
+ map = ctx->Driver.MapBufferRange(ctx, offset, length, access, bufObj,
+ MAP_USER);
if (!map) {
_mesa_error(ctx, GL_OUT_OF_MEMORY, "glMapBufferARB(map failed)");
}
* This is important because other modules (like VBO) might call
* the driver function directly.
*/
- ASSERT(bufObj->Pointer == map);
- ASSERT(bufObj->Length == length);
- ASSERT(bufObj->Offset == offset);
- ASSERT(bufObj->AccessFlags == access);
+ ASSERT(bufObj->Mappings[MAP_USER].Pointer == map);
+ ASSERT(bufObj->Mappings[MAP_USER].Length == length);
+ ASSERT(bufObj->Mappings[MAP_USER].Offset == offset);
+ ASSERT(bufObj->Mappings[MAP_USER].AccessFlags == access);
}
return map;
if (!bufObj)
return;
- if (!_mesa_bufferobj_mapped(bufObj)) {
+ if (!_mesa_bufferobj_mapped(bufObj, MAP_USER)) {
/* buffer is not mapped */
_mesa_error(ctx, GL_INVALID_OPERATION,
"glFlushMappedBufferRange(buffer is not mapped)");
return;
}
- if ((bufObj->AccessFlags & GL_MAP_FLUSH_EXPLICIT_BIT) == 0) {
+ if ((bufObj->Mappings[MAP_USER].AccessFlags &
+ GL_MAP_FLUSH_EXPLICIT_BIT) == 0) {
_mesa_error(ctx, GL_INVALID_OPERATION,
"glFlushMappedBufferRange(GL_MAP_FLUSH_EXPLICIT_BIT not set)");
return;
}
- if (offset + length > bufObj->Length) {
+ if (offset + length > bufObj->Mappings[MAP_USER].Length) {
_mesa_error(ctx, GL_INVALID_VALUE,
"glFlushMappedBufferRange(offset %ld + length %ld > mapped length %ld)",
- (long)offset, (long)length, (long)bufObj->Length);
+ (long)offset, (long)length,
+ (long)bufObj->Mappings[MAP_USER].Length);
return;
}
- ASSERT(bufObj->AccessFlags & GL_MAP_WRITE_BIT);
+ ASSERT(bufObj->Mappings[MAP_USER].AccessFlags & GL_MAP_WRITE_BIT);
if (ctx->Driver.FlushMappedBufferRange)
- ctx->Driver.FlushMappedBufferRange(ctx, offset, length, bufObj);
+ ctx->Driver.FlushMappedBufferRange(ctx, offset, length, bufObj,
+ MAP_USER);
}
* currently mapped by MapBufferRange, unless it was mapped
* with MAP_PERSISTENT_BIT set in the MapBufferRange access flags."
*/
- if (!(bufObj->AccessFlags & GL_MAP_PERSISTENT_BIT) &&
+ if (!(bufObj->Mappings[MAP_USER].AccessFlags & GL_MAP_PERSISTENT_BIT) &&
bufferobj_range_mapped(bufObj, offset, length)) {
_mesa_error(ctx, GL_INVALID_OPERATION,
"glInvalidateBufferSubData(intersection with mapped "
*/
-/** Is the given buffer object currently mapped? */
+/** Is the given buffer object currently mapped by the GL user? */
static inline GLboolean
-_mesa_bufferobj_mapped(const struct gl_buffer_object *obj)
+_mesa_bufferobj_mapped(const struct gl_buffer_object *obj,
+ gl_map_buffer_index index)
{
- return obj->Pointer != NULL;
+ return obj->Mappings[index].Pointer != NULL;
}
/** Can we not use this buffer while mapped? */
static inline GLboolean
_mesa_check_disallowed_mapping(const struct gl_buffer_object *obj)
{
- return _mesa_bufferobj_mapped(obj) &&
- !(obj->AccessFlags & GL_MAP_PERSISTENT_BIT);
+ return _mesa_bufferobj_mapped(obj, MAP_USER) &&
+ !(obj->Mappings[MAP_USER].AccessFlags &
+ GL_MAP_PERSISTENT_BIT);
}
/**
extern void
_mesa_init_buffer_object_functions(struct dd_function_table *driver);
+extern void
+_mesa_buffer_unmap_all_mappings(struct gl_context *ctx,
+ struct gl_buffer_object *bufObj);
/*
* API functions
*/
void * (*MapBufferRange)( struct gl_context *ctx, GLintptr offset,
GLsizeiptr length, GLbitfield access,
- struct gl_buffer_object *obj);
+ struct gl_buffer_object *obj,
+ gl_map_buffer_index index);
void (*FlushMappedBufferRange)(struct gl_context *ctx,
GLintptr offset, GLsizeiptr length,
- struct gl_buffer_object *obj);
+ struct gl_buffer_object *obj,
+ gl_map_buffer_index index);
GLboolean (*UnmapBuffer)( struct gl_context *ctx,
- struct gl_buffer_object *obj );
+ struct gl_buffer_object *obj,
+ gl_map_buffer_index index);
/*@}*/
/**
map = (GLubyte *)
ctx->Driver.MapBufferRange(ctx, 0, unpack->BufferObj->Size,
- GL_MAP_READ_BIT, unpack->BufferObj);
+ GL_MAP_READ_BIT, unpack->BufferObj,
+ MAP_INTERNAL);
if (!map) {
/* unable to map src buffer! */
_mesa_error(ctx, GL_INVALID_OPERATION, "unable to map PBO");
image = _mesa_unpack_image(dimensions, width, height, depth,
format, type, src, unpack);
- ctx->Driver.UnmapBuffer(ctx, unpack->BufferObj);
+ ctx->Driver.UnmapBuffer(ctx, unpack->BufferObj, MAP_INTERNAL);
if (!image) {
_mesa_error(ctx, GL_OUT_OF_MEMORY, "display list construction");
};
+typedef enum {
+ MAP_USER,
+ MAP_INTERNAL,
+
+ MAP_COUNT
+} gl_map_buffer_index;
+
+
+/**
+ * Fields describing a mapped buffer range.
+ */
+struct gl_buffer_mapping {
+ GLbitfield AccessFlags; /**< Mask of GL_MAP_x_BIT flags */
+ GLvoid *Pointer; /**< User-space address of mapping */
+ GLintptr Offset; /**< Mapped offset */
+ GLsizeiptr Length; /**< Mapped length */
+};
+
+
/**
* GL_ARB_vertex/pixel_buffer_object buffer object
*/
GLbitfield StorageFlags; /**< GL_MAP_PERSISTENT_BIT, etc. */
GLsizeiptrARB Size; /**< Size of buffer storage in bytes */
GLubyte *Data; /**< Location of storage either in RAM or VRAM. */
- /** Fields describing a mapped buffer */
- /*@{*/
- GLbitfield AccessFlags; /**< Mask of GL_MAP_x_BIT flags */
- GLvoid *Pointer; /**< User-space address of mapping */
- GLintptr Offset; /**< Mapped offset */
- GLsizeiptr Length; /**< Mapped length */
- /*@}*/
GLboolean DeletePending; /**< true if buffer object is removed from the hash */
GLboolean Written; /**< Ever written to? (for debugging) */
GLboolean Purgeable; /**< Is the buffer purgeable under memory pressure? */
GLboolean Immutable; /**< GL_ARB_buffer_storage */
+
+ struct gl_buffer_mapping Mappings[MAP_COUNT];
};
buf = (GLubyte *) ctx->Driver.MapBufferRange(ctx, 0,
unpack->BufferObj->Size,
GL_MAP_READ_BIT,
- unpack->BufferObj);
+ unpack->BufferObj,
+ MAP_INTERNAL);
if (!buf)
return NULL;
{
ASSERT(unpack != &ctx->Pack); /* catch pack/unpack mismatch */
if (_mesa_is_bufferobj(unpack->BufferObj)) {
- ctx->Driver.UnmapBuffer(ctx, unpack->BufferObj);
+ ctx->Driver.UnmapBuffer(ctx, unpack->BufferObj, MAP_INTERNAL);
}
}
buf = (GLubyte *) ctx->Driver.MapBufferRange(ctx, 0,
pack->BufferObj->Size,
GL_MAP_WRITE_BIT,
- pack->BufferObj);
+ pack->BufferObj,
+ MAP_INTERNAL);
if (!buf)
return NULL;
{
ASSERT(pack != &ctx->Unpack); /* catch pack/unpack mismatch */
if (_mesa_is_bufferobj(pack->BufferObj)) {
- ctx->Driver.UnmapBuffer(ctx, pack->BufferObj);
+ ctx->Driver.UnmapBuffer(ctx, pack->BufferObj, MAP_INTERNAL);
}
}
buf = (GLubyte *) ctx->Driver.MapBufferRange(ctx, 0,
unpack->BufferObj->Size,
GL_MAP_READ_BIT,
- unpack->BufferObj);
+ unpack->BufferObj,
+ MAP_INTERNAL);
if (!buf) {
_mesa_error(ctx, GL_INVALID_OPERATION, "%s%uD(PBO is mapped)", funcName,
dimensions);
buf = (GLubyte*) ctx->Driver.MapBufferRange(ctx, 0,
packing->BufferObj->Size,
GL_MAP_READ_BIT,
- packing->BufferObj);
+ packing->BufferObj,
+ MAP_INTERNAL);
if (!buf) {
_mesa_error(ctx, GL_INVALID_OPERATION, "%s%uD(PBO is mapped)", funcName,
dimensions);
const struct gl_pixelstore_attrib *unpack)
{
if (_mesa_is_bufferobj(unpack->BufferObj)) {
- ctx->Driver.UnmapBuffer(ctx, unpack->BufferObj);
+ ctx->Driver.UnmapBuffer(ctx, unpack->BufferObj, MAP_INTERNAL);
}
}
{
struct gl_buffer_object *bufObj = (struct gl_buffer_object *) data;
struct gl_context *ctx = (struct gl_context *) userData;
- if (_mesa_bufferobj_mapped(bufObj)) {
- ctx->Driver.UnmapBuffer(ctx, bufObj);
- bufObj->Pointer = NULL;
- }
+
+ _mesa_buffer_unmap_all_mappings(ctx, bufObj);
_mesa_reference_buffer_object(ctx, &bufObj, NULL);
}
*/
GLubyte *buf = (GLubyte *)
ctx->Driver.MapBufferRange(ctx, 0, ctx->Pack.BufferObj->Size,
- GL_MAP_WRITE_BIT, ctx->Pack.BufferObj);
+ GL_MAP_WRITE_BIT, ctx->Pack.BufferObj,
+ MAP_INTERNAL);
if (!buf) {
/* out of memory or other unexpected error */
_mesa_error(ctx, GL_OUT_OF_MEMORY, "glGetTexImage(map PBO failed)");
}
if (_mesa_is_bufferobj(ctx->Pack.BufferObj)) {
- ctx->Driver.UnmapBuffer(ctx, ctx->Pack.BufferObj);
+ ctx->Driver.UnmapBuffer(ctx, ctx->Pack.BufferObj, MAP_INTERNAL);
}
}
/* pack texture image into a PBO */
GLubyte *buf = (GLubyte *)
ctx->Driver.MapBufferRange(ctx, 0, ctx->Pack.BufferObj->Size,
- GL_MAP_WRITE_BIT, ctx->Pack.BufferObj);
+ GL_MAP_WRITE_BIT, ctx->Pack.BufferObj,
+ MAP_INTERNAL);
if (!buf) {
/* out of memory or other unexpected error */
_mesa_error(ctx, GL_OUT_OF_MEMORY,
}
if (_mesa_is_bufferobj(ctx->Pack.BufferObj)) {
- ctx->Driver.UnmapBuffer(ctx, ctx->Pack.BufferObj);
+ ctx->Driver.UnmapBuffer(ctx, ctx->Pack.BufferObj, MAP_INTERNAL);
}
}
struct st_buffer_object *st_obj = st_buffer_object(obj);
assert(obj->RefCount == 0);
- assert(st_obj->transfer == NULL);
+ _mesa_buffer_unmap_all_mappings(ctx, obj);
if (st_obj->buffer)
pipe_resource_reference(&st_obj->buffer, NULL);
static void *
st_bufferobj_map_range(struct gl_context *ctx,
GLintptr offset, GLsizeiptr length, GLbitfield access,
- struct gl_buffer_object *obj)
+ struct gl_buffer_object *obj,
+ gl_map_buffer_index index)
{
struct pipe_context *pipe = st_context(ctx)->pipe;
struct st_buffer_object *st_obj = st_buffer_object(obj);
assert(offset < obj->Size);
assert(offset + length <= obj->Size);
- obj->Pointer = pipe_buffer_map_range(pipe,
+ obj->Mappings[index].Pointer = pipe_buffer_map_range(pipe,
st_obj->buffer,
offset, length,
flags,
- &st_obj->transfer);
- if (obj->Pointer) {
- obj->Offset = offset;
- obj->Length = length;
- obj->AccessFlags = access;
+ &st_obj->transfer[index]);
+ if (obj->Mappings[index].Pointer) {
+ obj->Mappings[index].Offset = offset;
+ obj->Mappings[index].Length = length;
+ obj->Mappings[index].AccessFlags = access;
}
else {
- st_obj->transfer = NULL;
+ st_obj->transfer[index] = NULL;
}
- return obj->Pointer;
+ return obj->Mappings[index].Pointer;
}
static void
st_bufferobj_flush_mapped_range(struct gl_context *ctx,
GLintptr offset, GLsizeiptr length,
- struct gl_buffer_object *obj)
+ struct gl_buffer_object *obj,
+ gl_map_buffer_index index)
{
struct pipe_context *pipe = st_context(ctx)->pipe;
struct st_buffer_object *st_obj = st_buffer_object(obj);
/* Subrange is relative to mapped range */
assert(offset >= 0);
assert(length >= 0);
- assert(offset + length <= obj->Length);
- assert(obj->Pointer);
+ assert(offset + length <= obj->Mappings[index].Length);
+ assert(obj->Mappings[index].Pointer);
if (!length)
return;
- pipe_buffer_flush_mapped_range(pipe, st_obj->transfer,
- obj->Offset + offset, length);
+ pipe_buffer_flush_mapped_range(pipe, st_obj->transfer[index],
+ obj->Mappings[index].Offset + offset,
+ length);
}
* Called via glUnmapBufferARB().
*/
static GLboolean
-st_bufferobj_unmap(struct gl_context *ctx, struct gl_buffer_object *obj)
+st_bufferobj_unmap(struct gl_context *ctx, struct gl_buffer_object *obj,
+ gl_map_buffer_index index)
{
struct pipe_context *pipe = st_context(ctx)->pipe;
struct st_buffer_object *st_obj = st_buffer_object(obj);
- if (obj->Length)
- pipe_buffer_unmap(pipe, st_obj->transfer);
+ if (obj->Mappings[index].Length)
+ pipe_buffer_unmap(pipe, st_obj->transfer[index]);
- st_obj->transfer = NULL;
- obj->Pointer = NULL;
- obj->Offset = 0;
- obj->Length = 0;
+ st_obj->transfer[index] = NULL;
+ obj->Mappings[index].Pointer = NULL;
+ obj->Mappings[index].Offset = 0;
+ obj->Mappings[index].Length = 0;
return GL_TRUE;
}
{
struct gl_buffer_object Base;
struct pipe_resource *buffer; /* GPU storage */
- struct pipe_transfer *transfer; /* In-progress map information */
+ struct pipe_transfer *transfer[MAP_COUNT];
};
const void *ptr;
if (inputs[i]->BufferObj->Name) {
- if (!inputs[i]->BufferObj->Pointer) {
+ if (!inputs[i]->BufferObj->Mappings[MAP_INTERNAL].Pointer) {
bo[*nr_bo] = inputs[i]->BufferObj;
(*nr_bo)++;
ctx->Driver.MapBufferRange(ctx, 0, inputs[i]->BufferObj->Size,
GL_MAP_READ_BIT,
- inputs[i]->BufferObj);
+ inputs[i]->BufferObj,
+ MAP_INTERNAL);
- assert(inputs[i]->BufferObj->Pointer);
+ assert(inputs[i]->BufferObj->Mappings[MAP_INTERNAL].Pointer);
}
- ptr = ADD_POINTERS(inputs[i]->BufferObj->Pointer,
+ ptr = ADD_POINTERS(inputs[i]->BufferObj->Mappings[MAP_INTERNAL].Pointer,
inputs[i]->Ptr);
}
else
return;
}
- if (_mesa_is_bufferobj(ib->obj) && !_mesa_bufferobj_mapped(ib->obj)) {
+ if (_mesa_is_bufferobj(ib->obj) &&
+ !_mesa_bufferobj_mapped(ib->obj, MAP_INTERNAL)) {
/* if the buffer object isn't mapped yet, map it now */
bo[*nr_bo] = ib->obj;
(*nr_bo)++;
ptr = ctx->Driver.MapBufferRange(ctx, (GLsizeiptr) ib->ptr,
ib->count * vbo_sizeof_ib_type(ib->type),
- GL_MAP_READ_BIT, ib->obj);
- assert(ib->obj->Pointer);
+ GL_MAP_READ_BIT, ib->obj,
+ MAP_INTERNAL);
+ assert(ib->obj->Mappings[MAP_INTERNAL].Pointer);
} else {
/* user-space elements, or buffer already mapped */
- ptr = ADD_POINTERS(ib->obj->Pointer, ib->ptr);
+ ptr = ADD_POINTERS(ib->obj->Mappings[MAP_INTERNAL].Pointer, ib->ptr);
}
if (ib->type == GL_UNSIGNED_INT && VB->Primitive[0].basevertex == 0) {
{
GLuint i;
for (i = 0; i < nr_bo; i++) {
- ctx->Driver.UnmapBuffer(ctx, bo[i]);
+ ctx->Driver.UnmapBuffer(ctx, bo[i], MAP_INTERNAL);
}
}
/* Free the vertex buffer. Unmap first if needed.
*/
- if (_mesa_bufferobj_mapped(exec->vtx.bufferobj)) {
- ctx->Driver.UnmapBuffer(ctx, exec->vtx.bufferobj);
+ if (_mesa_bufferobj_mapped(exec->vtx.bufferobj, MAP_INTERNAL)) {
+ ctx->Driver.UnmapBuffer(ctx, exec->vtx.bufferobj, MAP_INTERNAL);
}
_mesa_reference_buffer_object(ctx, &exec->vtx.bufferobj, NULL);
}
if (_mesa_is_bufferobj(ib->obj)) {
GLsizeiptr size = MIN2(count * index_size, ib->obj->Size);
indices = ctx->Driver.MapBufferRange(ctx, (GLintptr) indices, size,
- GL_MAP_READ_BIT, ib->obj);
+ GL_MAP_READ_BIT, ib->obj,
+ MAP_INTERNAL);
}
switch (ib->type) {
}
if (_mesa_is_bufferobj(ib->obj)) {
- ctx->Driver.UnmapBuffer(ctx, ib->obj);
+ ctx->Driver.UnmapBuffer(ctx, ib->obj, MAP_INTERNAL);
}
}
if (array->Enabled) {
const void *data = array->Ptr;
if (_mesa_is_bufferobj(array->BufferObj)) {
- if (!array->BufferObj->Pointer) {
+ if (!array->BufferObj->Mappings[MAP_INTERNAL].Pointer) {
/* need to map now */
- array->BufferObj->Pointer =
+ array->BufferObj->Mappings[MAP_INTERNAL].Pointer =
ctx->Driver.MapBufferRange(ctx, 0, array->BufferObj->Size,
- GL_MAP_READ_BIT, array->BufferObj);
+ GL_MAP_READ_BIT, array->BufferObj,
+ MAP_INTERNAL);
}
- data = ADD_POINTERS(data, array->BufferObj->Pointer);
+ data = ADD_POINTERS(data,
+ array->BufferObj->Mappings[MAP_INTERNAL].Pointer);
}
switch (array->Type) {
case GL_FLOAT:
{
if (array->Enabled &&
_mesa_is_bufferobj(array->BufferObj) &&
- _mesa_bufferobj_mapped(array->BufferObj)) {
- ctx->Driver.UnmapBuffer(ctx, array->BufferObj);
+ _mesa_bufferobj_mapped(array->BufferObj, MAP_INTERNAL)) {
+ ctx->Driver.UnmapBuffer(ctx, array->BufferObj, MAP_INTERNAL);
}
}
elemMap = ctx->Driver.MapBufferRange(ctx, 0,
ctx->Array.VAO->IndexBufferObj->Size,
GL_MAP_READ_BIT,
- ctx->Array.VAO->IndexBufferObj);
+ ctx->Array.VAO->IndexBufferObj,
+ MAP_INTERNAL);
elements = ADD_POINTERS(elements, elemMap);
}
}
if (_mesa_is_bufferobj(vao->IndexBufferObj)) {
- ctx->Driver.UnmapBuffer(ctx, ctx->Array.VAO->IndexBufferObj);
+ ctx->Driver.UnmapBuffer(ctx, ctx->Array.VAO->IndexBufferObj,
+ MAP_INTERNAL);
}
for (k = 0; k < Elements(vao->_VertexAttrib); k++) {
if (bufName) {
GLubyte *p = ctx->Driver.MapBufferRange(ctx, 0, bufObj->Size,
- GL_MAP_READ_BIT, bufObj);
+ GL_MAP_READ_BIT, bufObj,
+ MAP_INTERNAL);
int offset = (int) (GLintptr) exec->array.inputs[i]->Ptr;
float *f = (float *) (p + offset);
int *k = (int *) f;
for (i = 0; i < n; i++) {
printf(" float[%d] = 0x%08x %f\n", i, k[i], f[i]);
}
- ctx->Driver.UnmapBuffer(ctx, bufObj);
+ ctx->Driver.UnmapBuffer(ctx, bufObj, MAP_INTERNAL);
}
}
}
ctx->Driver.MapBufferRange(ctx, 0,
ctx->Array.VAO->IndexBufferObj->Size,
GL_MAP_READ_BIT,
- ctx->Array.VAO->IndexBufferObj);
+ ctx->Array.VAO->IndexBufferObj,
+ MAP_INTERNAL);
switch (type) {
case GL_UNSIGNED_BYTE:
{
;
}
- ctx->Driver.UnmapBuffer(ctx, ctx->Array.VAO->IndexBufferObj);
+ ctx->Driver.UnmapBuffer(ctx, ctx->Array.VAO->IndexBufferObj,
+ MAP_INTERNAL);
}
#endif
if (_mesa_is_bufferobj(exec->vtx.bufferobj)) {
/* a real buffer obj: Ptr is an offset, not a pointer*/
- assert(exec->vtx.bufferobj->Pointer); /* buf should be mapped */
+ assert(exec->vtx.bufferobj->Mappings[MAP_INTERNAL].Pointer);
assert(offset >= 0);
- arrays[attr].Ptr = (GLubyte *)exec->vtx.bufferobj->Offset + offset;
+ arrays[attr].Ptr = (GLubyte *)
+ exec->vtx.bufferobj->Mappings[MAP_INTERNAL].Offset + offset;
}
else {
/* Ptr into ordinary app memory */
struct gl_context *ctx = exec->ctx;
if (ctx->Driver.FlushMappedBufferRange) {
- GLintptr offset = exec->vtx.buffer_used - exec->vtx.bufferobj->Offset;
- GLsizeiptr length = (exec->vtx.buffer_ptr - exec->vtx.buffer_map) * sizeof(float);
+ GLintptr offset = exec->vtx.buffer_used -
+ exec->vtx.bufferobj->Mappings[MAP_INTERNAL].Offset;
+ GLsizeiptr length = (exec->vtx.buffer_ptr - exec->vtx.buffer_map) *
+ sizeof(float);
if (length)
ctx->Driver.FlushMappedBufferRange(ctx, offset, length,
- exec->vtx.bufferobj);
+ exec->vtx.bufferobj,
+ MAP_INTERNAL);
}
exec->vtx.buffer_used += (exec->vtx.buffer_ptr -
assert(exec->vtx.buffer_used <= VBO_VERT_BUFFER_SIZE);
assert(exec->vtx.buffer_ptr != NULL);
- ctx->Driver.UnmapBuffer(ctx, exec->vtx.bufferobj);
+ ctx->Driver.UnmapBuffer(ctx, exec->vtx.bufferobj, MAP_INTERNAL);
exec->vtx.buffer_map = NULL;
exec->vtx.buffer_ptr = NULL;
exec->vtx.max_vert = 0;
(VBO_VERT_BUFFER_SIZE -
exec->vtx.buffer_used),
accessRange,
- exec->vtx.bufferobj);
+ exec->vtx.bufferobj,
+ MAP_INTERNAL);
exec->vtx.buffer_ptr = exec->vtx.buffer_map;
}
else {
(GLfloat *)ctx->Driver.MapBufferRange(ctx,
0, VBO_VERT_BUFFER_SIZE,
accessRange,
- exec->vtx.bufferobj);
+ exec->vtx.bufferobj,
+ MAP_INTERNAL);
}
else {
_mesa_error(ctx, GL_OUT_OF_MEMORY, "VBO allocation");
struct _mesa_prim temp_prim;
struct vbo_context *vbo = vbo_context(ctx);
vbo_draw_func draw_prims_func = vbo->draw_prims;
- GLboolean map_ib = ib->obj->Name && !ib->obj->Pointer;
+ GLboolean map_ib = ib->obj->Name && !ib->obj->Mappings[MAP_INTERNAL].Pointer;
void *ptr;
/* If there is an indirect buffer, map it and extract the draw params */
struct _mesa_index_buffer new_ib = *ib;
const uint32_t *indirect_params;
if (!ctx->Driver.MapBufferRange(ctx, 0, indirect->Size, GL_MAP_READ_BIT,
- indirect)) {
+ indirect, MAP_INTERNAL)) {
/* something went wrong with mapping, give up */
_mesa_error(ctx, GL_OUT_OF_MEMORY,
}
assert(nr_prims == 1);
- indirect_params = (const uint32_t *) ADD_POINTERS(indirect->Pointer,
- new_prim.indirect_offset);
+ indirect_params = (const uint32_t *)
+ ADD_POINTERS(indirect->Mappings[MAP_INTERNAL].Pointer,
+ new_prim.indirect_offset);
new_prim.is_indirect = 0;
new_prim.count = indirect_params[0];
prims = &new_prim;
ib = &new_ib;
- ctx->Driver.UnmapBuffer(ctx, indirect);
+ ctx->Driver.UnmapBuffer(ctx, indirect, MAP_INTERNAL);
}
/* Find the sub-primitives. These are regions in the index buffer which
*/
if (map_ib) {
ctx->Driver.MapBufferRange(ctx, 0, ib->obj->Size, GL_MAP_READ_BIT,
- ib->obj);
+ ib->obj, MAP_INTERNAL);
}
- ptr = ADD_POINTERS(ib->obj->Pointer, ib->ptr);
+ ptr = ADD_POINTERS(ib->obj->Mappings[MAP_INTERNAL].Pointer, ib->ptr);
sub_prims = find_sub_primitives(ptr, vbo_sizeof_ib_type(ib->type),
0, ib->count, restart_index,
&num_sub_prims);
if (map_ib) {
- ctx->Driver.UnmapBuffer(ctx, ib->obj);
+ ctx->Driver.UnmapBuffer(ctx, ib->obj, MAP_INTERNAL);
}
/* Loop over the primitives, and use the located sub-primitives to draw
} else if (ib) {
/* Unfortunately need to adjust each index individually.
*/
- GLboolean map_ib = ib->obj->Name && !ib->obj->Pointer;
+ GLboolean map_ib = ib->obj->Name &&
+ !ib->obj->Mappings[MAP_INTERNAL].Pointer;
void *ptr;
if (map_ib)
ctx->Driver.MapBufferRange(ctx, 0, ib->obj->Size, GL_MAP_READ_BIT,
- ib->obj);
+ ib->obj, MAP_INTERNAL);
- ptr = ADD_POINTERS(ib->obj->Pointer, ib->ptr);
+ ptr = ADD_POINTERS(ib->obj->Mappings[MAP_INTERNAL].Pointer, ib->ptr);
/* Some users might prefer it if we translated elements to
* GLuints here. Others wouldn't...
}
if (map_ib)
- ctx->Driver.UnmapBuffer(ctx, ib->obj);
+ ctx->Driver.UnmapBuffer(ctx, ib->obj, MAP_INTERNAL);
tmp_ib.obj = ctx->Shared->NullBufferObj;
tmp_ib.ptr = tmp_indices;
GLsizeiptr size = vertex_store->bufferobj->Size - offset;
GLfloat *range = (GLfloat *)
ctx->Driver.MapBufferRange(ctx, offset, size, access,
- vertex_store->bufferobj);
+ vertex_store->bufferobj,
+ MAP_INTERNAL);
if (range) {
/* compute address of start of whole buffer (needed elsewhere) */
vertex_store->buffer = range - vertex_store->used;
if (vertex_store->bufferobj->Size > 0) {
GLintptr offset = 0;
GLsizeiptr length = vertex_store->used * sizeof(GLfloat)
- - vertex_store->bufferobj->Offset;
+ - vertex_store->bufferobj->Mappings[MAP_INTERNAL].Offset;
/* Explicitly flush the region we wrote to */
ctx->Driver.FlushMappedBufferRange(ctx, offset, length,
- vertex_store->bufferobj);
+ vertex_store->bufferobj,
+ MAP_INTERNAL);
- ctx->Driver.UnmapBuffer(ctx, vertex_store->bufferobj);
+ ctx->Driver.UnmapBuffer(ctx, vertex_store->bufferobj, MAP_INTERNAL);
}
vertex_store->buffer = NULL;
}
{
GET_CURRENT_CONTEXT(ctx);
struct vbo_save_context *save = &vbo_context(ctx)->save;
+ struct gl_buffer_object *indexbuf = ctx->Array.VAO->IndexBufferObj;
GLint i;
if (!_mesa_is_valid_prim_mode(ctx, mode)) {
_ae_map_vbos(ctx);
- if (_mesa_is_bufferobj(ctx->Array.VAO->IndexBufferObj))
+ if (_mesa_is_bufferobj(indexbuf))
indices =
- ADD_POINTERS(ctx->Array.VAO->IndexBufferObj->Pointer, indices);
+ ADD_POINTERS(indexbuf->Mappings[MAP_INTERNAL].Pointer, indices);
vbo_save_NotifyBegin(ctx, (mode | VBO_SAVE_PRIM_WEAK |
VBO_SAVE_PRIM_NO_CURRENT_UPDATE));
ctx->Driver.MapBufferRange(ctx, 0,
list->vertex_store->bufferobj->Size,
GL_MAP_READ_BIT, /* ? */
- list->vertex_store->bufferobj);
+ list->vertex_store->bufferobj,
+ MAP_INTERNAL);
vbo_loopback_vertex_list(ctx,
(const GLfloat *)(buffer + list->buffer_offset),
list->wrap_count,
list->vertex_size);
- ctx->Driver.UnmapBuffer(ctx, list->vertex_store->bufferobj);
+ ctx->Driver.UnmapBuffer(ctx, list->vertex_store->bufferobj,
+ MAP_INTERNAL);
}
copy->varying[j].size = attr_size(copy->array[i]);
copy->vertex_size += attr_size(copy->array[i]);
- if (_mesa_is_bufferobj(vbo) && !_mesa_bufferobj_mapped(vbo))
- ctx->Driver.MapBufferRange(ctx, 0, vbo->Size, GL_MAP_READ_BIT, vbo);
+ if (_mesa_is_bufferobj(vbo) &&
+ !_mesa_bufferobj_mapped(vbo, MAP_INTERNAL))
+ ctx->Driver.MapBufferRange(ctx, 0, vbo->Size, GL_MAP_READ_BIT, vbo,
+ MAP_INTERNAL);
- copy->varying[j].src_ptr = ADD_POINTERS(vbo->Pointer,
- copy->array[i]->Ptr);
+ copy->varying[j].src_ptr =
+ ADD_POINTERS(vbo->Mappings[MAP_INTERNAL].Pointer,
+ copy->array[i]->Ptr);
copy->dstarray_ptr[i] = ©->varying[j].dstarray;
}
* do it internally.
*/
if (_mesa_is_bufferobj(copy->ib->obj) &&
- !_mesa_bufferobj_mapped(copy->ib->obj))
+ !_mesa_bufferobj_mapped(copy->ib->obj, MAP_INTERNAL))
ctx->Driver.MapBufferRange(ctx, 0, copy->ib->obj->Size, GL_MAP_READ_BIT,
- copy->ib->obj);
+ copy->ib->obj, MAP_INTERNAL);
- srcptr = (const GLubyte *) ADD_POINTERS(copy->ib->obj->Pointer,
- copy->ib->ptr);
+ srcptr = (const GLubyte *)
+ ADD_POINTERS(copy->ib->obj->Mappings[MAP_INTERNAL].Pointer,
+ copy->ib->ptr);
switch (copy->ib->type) {
case GL_UNSIGNED_BYTE:
*/
for (i = 0; i < copy->nr_varying; i++) {
struct gl_buffer_object *vbo = copy->varying[i].array->BufferObj;
- if (_mesa_is_bufferobj(vbo) && _mesa_bufferobj_mapped(vbo))
- ctx->Driver.UnmapBuffer(ctx, vbo);
+ if (_mesa_is_bufferobj(vbo) && _mesa_bufferobj_mapped(vbo, MAP_INTERNAL))
+ ctx->Driver.UnmapBuffer(ctx, vbo, MAP_INTERNAL);
}
/* Unmap index buffer:
*/
if (_mesa_is_bufferobj(copy->ib->obj) &&
- _mesa_bufferobj_mapped(copy->ib->obj)) {
- ctx->Driver.UnmapBuffer(ctx, copy->ib->obj);
+ _mesa_bufferobj_mapped(copy->ib->obj, MAP_INTERNAL)) {
+ ctx->Driver.UnmapBuffer(ctx, copy->ib->obj, MAP_INTERNAL);
}
}