/**************************************************************************
- *
- * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ *
+ * Copyright 2003 VMware, Inc.
* All Rights Reserved.
- *
+ *
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
- *
+ *
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
- *
+ *
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
- * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
+ *
**************************************************************************/
/**
#include "intel_buffer_objects.h"
#include "intel_batchbuffer.h"
+/**
+ * Map a buffer object; issue performance warnings if mapping causes stalls.
+ *
+ * This matches the drm_intel_bo_map API, but takes an additional human-readable
+ * name for the buffer object to use in the performance debug message.
+ */
+int
+brw_bo_map(struct brw_context *brw,
+ drm_intel_bo *bo, int write_enable,
+ const char *bo_name)
+{
+ if (likely(!brw->perf_debug) || !drm_intel_bo_busy(bo))
+ return drm_intel_bo_map(bo, write_enable);
+
+ double start_time = get_time();
+
+ int ret = drm_intel_bo_map(bo, write_enable);
+
+ perf_debug("CPU mapping a busy %s BO stalled and took %.03f ms.\n",
+ bo_name, (get_time() - start_time) * 1000);
+
+ return ret;
+}
+
+int
+brw_bo_map_gtt(struct brw_context *brw, drm_intel_bo *bo, const char *bo_name)
+{
+ if (likely(!brw->perf_debug) || !drm_intel_bo_busy(bo))
+ return drm_intel_gem_bo_map_gtt(bo);
+
+ double start_time = get_time();
+
+ int ret = drm_intel_gem_bo_map_gtt(bo);
+
+ perf_debug("GTT mapping a busy %s BO stalled and took %.03f ms.\n",
+ bo_name, (get_time() - start_time) * 1000);
+
+ return ret;
+}
+
static GLboolean
-intel_bufferobj_unmap(struct gl_context * ctx, struct gl_buffer_object *obj);
+intel_bufferobj_unmap(struct gl_context * ctx, struct gl_buffer_object *obj,
+ gl_map_buffer_index index);
+
+static void
+intel_bufferobj_mark_gpu_usage(struct intel_buffer_object *intel_obj,
+ uint32_t offset, uint32_t size)
+{
+ intel_obj->gpu_active_start = MIN2(intel_obj->gpu_active_start, offset);
+ intel_obj->gpu_active_end = MAX2(intel_obj->gpu_active_end, offset + size);
+}
+
+static void
+intel_bufferobj_mark_inactive(struct intel_buffer_object *intel_obj)
+{
+ intel_obj->gpu_active_start = ~0;
+ intel_obj->gpu_active_end = 0;
+}
/** Allocates a new drm_intel_bo to store the data for the buffer object. */
static void
/* the buffer might be bound as a uniform buffer, need to update it
*/
brw->state.dirty.brw |= BRW_NEW_UNIFORM_BUFFER;
+
+ intel_bufferobj_mark_inactive(intel_obj);
}
static void
{
drm_intel_bo_unreference(intel_obj->buffer);
intel_obj->buffer = NULL;
- intel_obj->offset = 0;
}
/**
* to the spec, but Mesa doesn't do UnmapBuffer for us at context destroy
* (though it does if you call glDeleteBuffers)
*/
- if (obj->Pointer)
- intel_bufferobj_unmap(ctx, obj);
+ _mesa_buffer_unmap_all_mappings(ctx, obj);
drm_intel_bo_unreference(intel_obj->buffer);
free(intel_obj);
GLenum target,
GLsizeiptrARB size,
const GLvoid * data,
- GLenum usage, struct gl_buffer_object *obj)
+ GLenum usage,
+ GLbitfield storageFlags,
+ struct gl_buffer_object *obj)
{
struct brw_context *brw = brw_context(ctx);
struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
intel_obj->Base.Size = size;
intel_obj->Base.Usage = usage;
+ intel_obj->Base.StorageFlags = storageFlags;
- assert(!obj->Pointer); /* Mesa should have unmapped it */
+ assert(!obj->Mappings[MAP_USER].Pointer); /* Mesa should have unmapped it */
+ assert(!obj->Mappings[MAP_INTERNAL].Pointer);
if (intel_obj->buffer != NULL)
release_buffer(intel_obj);
assert(intel_obj);
+ /* See if we can unsynchronized write the data into the user's BO. This
+ * avoids GPU stalls in unfortunately common user patterns (uploading
+ * sequentially into a BO, with draw calls in between each upload).
+ *
+ * Once we've hit this path, we mark this GL BO as preferring stalling to
+ * blits, so that we can hopefully hit this path again in the future
+ * (otherwise, an app that might occasionally stall but mostly not will end
+ * up with blitting all the time, at the cost of bandwidth)
+ */
+ if (brw->has_llc) {
+ if (offset + size <= intel_obj->gpu_active_start ||
+ intel_obj->gpu_active_end <= offset) {
+ drm_intel_gem_bo_map_unsynchronized(intel_obj->buffer);
+ memcpy(intel_obj->buffer->virtual + offset, data, size);
+ drm_intel_bo_unmap(intel_obj->buffer);
+
+ if (intel_obj->gpu_active_end > intel_obj->gpu_active_start)
+ intel_obj->prefer_stall_to_blit = true;
+ return;
+ }
+ }
+
busy =
drm_intel_bo_busy(intel_obj->buffer) ||
drm_intel_bo_references(brw->batch.bo, intel_obj->buffer);
if (busy) {
if (size == intel_obj->Base.Size) {
- /* Replace the current busy bo with fresh data. */
+ /* Replace the current busy bo so the subdata doesn't stall. */
drm_intel_bo_unreference(intel_obj->buffer);
intel_bufferobj_alloc_buffer(brw, intel_obj);
- drm_intel_bo_subdata(intel_obj->buffer, 0, size, data);
- } else {
- perf_debug("Using a blit copy to avoid stalling on %ldb "
- "glBufferSubData() to a busy buffer object.\n",
- (long)size);
+ } else if (!intel_obj->prefer_stall_to_blit) {
+ perf_debug("Using a blit copy to avoid stalling on "
+ "glBufferSubData(%ld, %ld) (%ldkb) to a busy "
+ "(%d-%d) buffer object.\n",
+ (long)offset, (long)offset + size, (long)(size/1024),
+ intel_obj->gpu_active_start,
+ intel_obj->gpu_active_end);
drm_intel_bo *temp_bo =
drm_intel_bo_alloc(brw->bufmgr, "subdata temp", size, 64);
size);
drm_intel_bo_unreference(temp_bo);
+ return;
+ } else {
+ perf_debug("Stalling on glBufferSubData(%ld, %ld) (%ldkb) to a busy "
+ "(%d-%d) buffer object. Use glMapBufferRange() to "
+ "avoid this.\n",
+ (long)offset, (long)offset + size, (long)(size/1024),
+ intel_obj->gpu_active_start,
+ intel_obj->gpu_active_end);
+ intel_batchbuffer_flush(brw);
}
- } else {
- drm_intel_bo_subdata(intel_obj->buffer, offset, size, data);
}
+
+ drm_intel_bo_subdata(intel_obj->buffer, offset, size, data);
+ intel_bufferobj_mark_inactive(intel_obj);
}
intel_batchbuffer_flush(brw);
}
drm_intel_bo_get_subdata(intel_obj->buffer, offset, size, data);
+
+ intel_bufferobj_mark_inactive(intel_obj);
}
static void *
intel_bufferobj_map_range(struct gl_context * ctx,
GLintptr offset, GLsizeiptr length,
- GLbitfield access, struct gl_buffer_object *obj)
+ GLbitfield access, struct gl_buffer_object *obj,
+ gl_map_buffer_index index)
{
struct brw_context *brw = brw_context(ctx);
struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
/* _mesa_MapBufferRange (GL entrypoint) sets these, but the vbo module also
* internally uses our functions directly.
*/
- obj->Offset = offset;
- obj->Length = length;
- obj->AccessFlags = access;
+ obj->Mappings[index].Offset = offset;
+ obj->Mappings[index].Length = length;
+ obj->Mappings[index].AccessFlags = access;
if (intel_obj->buffer == NULL) {
- obj->Pointer = NULL;
+ obj->Mappings[index].Pointer = NULL;
return NULL;
}
* doesn't require the current contents of that range, make a new
* BO, and we'll copy what they put in there out at unmap or
* FlushRange time.
+ *
+ * That is, unless they're looking for a persistent mapping -- we would
+ * need to do blits in the MemoryBarrier call, and it's easier to just do a
+ * GPU stall and do a mapping.
*/
- if ((access & GL_MAP_INVALIDATE_RANGE_BIT) &&
+ if (!(access & (GL_MAP_UNSYNCHRONIZED_BIT | GL_MAP_PERSISTENT_BIT)) &&
+ (access & GL_MAP_INVALIDATE_RANGE_BIT) &&
drm_intel_bo_busy(intel_obj->buffer)) {
- if (access & GL_MAP_FLUSH_EXPLICIT_BIT) {
- intel_obj->range_map_buffer = malloc(length);
- obj->Pointer = intel_obj->range_map_buffer;
+ /* Ensure that the base alignment of the allocation meets the alignment
+ * guarantees the driver has advertised to the application.
+ */
+ const unsigned alignment = ctx->Const.MinMapBufferAlignment;
+
+ intel_obj->map_extra[index] = (uintptr_t) offset % alignment;
+ intel_obj->range_map_bo[index] = drm_intel_bo_alloc(brw->bufmgr,
+ "BO blit temp",
+ length +
+ intel_obj->map_extra[index],
+ alignment);
+ if (brw->has_llc) {
+ drm_intel_bo_map(intel_obj->range_map_bo[index],
+ (access & GL_MAP_WRITE_BIT) != 0);
} else {
- intel_obj->range_map_bo = drm_intel_bo_alloc(brw->bufmgr,
- "range map",
- length, 64);
- if (!(access & GL_MAP_READ_BIT)) {
- drm_intel_gem_bo_map_gtt(intel_obj->range_map_bo);
- } else {
- drm_intel_bo_map(intel_obj->range_map_bo,
- (access & GL_MAP_WRITE_BIT) != 0);
- }
- obj->Pointer = intel_obj->range_map_bo->virtual;
+ drm_intel_gem_bo_map_gtt(intel_obj->range_map_bo[index]);
}
- return obj->Pointer;
+ obj->Mappings[index].Pointer =
+ intel_obj->range_map_bo[index]->virtual + intel_obj->map_extra[index];
+ return obj->Mappings[index].Pointer;
}
if (access & GL_MAP_UNSYNCHRONIZED_BIT)
drm_intel_gem_bo_map_unsynchronized(intel_obj->buffer);
- else if (!(access & GL_MAP_READ_BIT)) {
+ else if (!brw->has_llc && (!(access & GL_MAP_READ_BIT) ||
+ (access & GL_MAP_PERSISTENT_BIT))) {
drm_intel_gem_bo_map_gtt(intel_obj->buffer);
+ intel_bufferobj_mark_inactive(intel_obj);
} else {
drm_intel_bo_map(intel_obj->buffer, (access & GL_MAP_WRITE_BIT) != 0);
+ intel_bufferobj_mark_inactive(intel_obj);
}
- obj->Pointer = intel_obj->buffer->virtual + offset;
- return obj->Pointer;
+ obj->Mappings[index].Pointer = intel_obj->buffer->virtual + offset;
+ return obj->Mappings[index].Pointer;
}
/**
static void
intel_bufferobj_flush_mapped_range(struct gl_context *ctx,
GLintptr offset, GLsizeiptr length,
- struct gl_buffer_object *obj)
+ struct gl_buffer_object *obj,
+ gl_map_buffer_index index)
{
struct brw_context *brw = brw_context(ctx);
struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
- drm_intel_bo *temp_bo;
+ GLbitfield access = obj->Mappings[index].AccessFlags;
+
+ assert(access & GL_MAP_FLUSH_EXPLICIT_BIT);
- /* Unless we're in the range map using a temporary system buffer,
- * there's no work to do.
+ /* If we gave a direct mapping of the buffer instead of using a temporary,
+ * then there's nothing to do.
*/
- if (intel_obj->range_map_buffer == NULL)
+ if (intel_obj->range_map_bo[index] == NULL)
return;
if (length == 0)
return;
- temp_bo = drm_intel_bo_alloc(brw->bufmgr, "range map flush", length, 64);
-
- drm_intel_bo_subdata(temp_bo, 0, length, intel_obj->range_map_buffer);
-
+ /* Note that we're not unmapping our buffer while executing the blit. We
+ * need to have a mapping still at the end of this call, since the user
+ * gets to make further modifications and glFlushMappedBufferRange() calls.
+ * This is safe, because:
+ *
+ * - On LLC platforms, we're using a CPU mapping that's coherent with the
+ * GPU (except for the render caches), so the kernel doesn't need to do
+ * any flushing work for us except for what happens at batch exec time
+ * anyway.
+ *
+ * - On non-LLC platforms, we're using a GTT mapping that writes directly
+ * to system memory (except for the chipset cache that gets flushed at
+ * batch exec time).
+ *
+ * In both cases we don't need to stall for the previous blit to complete
+ * so we can re-map (and we definitely don't want to, since that would be
+ * slow): If the user edits a part of their buffer that's previously been
+ * blitted, then our lack of synchoronization is fine, because either
+ * they'll get some too-new data in the first blit and not do another blit
+ * of that area (but in that case the results are undefined), or they'll do
+ * another blit of that area and the complete newer data will land the
+ * second time.
+ */
intel_emit_linear_blit(brw,
- intel_obj->buffer, obj->Offset + offset,
- temp_bo, 0,
+ intel_obj->buffer,
+ obj->Mappings[index].Offset + offset,
+ intel_obj->range_map_bo[index],
+ intel_obj->map_extra[index] + offset,
length);
-
- drm_intel_bo_unreference(temp_bo);
+ intel_bufferobj_mark_gpu_usage(intel_obj,
+ obj->Mappings[index].Offset + offset,
+ length);
}
* Implements glUnmapBuffer().
*/
static GLboolean
-intel_bufferobj_unmap(struct gl_context * ctx, struct gl_buffer_object *obj)
+intel_bufferobj_unmap(struct gl_context * ctx, struct gl_buffer_object *obj,
+ gl_map_buffer_index index)
{
struct brw_context *brw = brw_context(ctx);
struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
assert(intel_obj);
- assert(obj->Pointer);
- if (intel_obj->range_map_buffer != NULL) {
- /* Since we've emitted some blits to buffers that will (likely) be used
- * in rendering operations in other cache domains in this batch, emit a
- * flush. Once again, we wish for a domain tracker in libdrm to cover
- * usage inside of a batchbuffer.
- */
- intel_batchbuffer_emit_mi_flush(brw);
- free(intel_obj->range_map_buffer);
- intel_obj->range_map_buffer = NULL;
- } else if (intel_obj->range_map_bo != NULL) {
- drm_intel_bo_unmap(intel_obj->range_map_bo);
-
- intel_emit_linear_blit(brw,
- intel_obj->buffer, obj->Offset,
- intel_obj->range_map_bo, 0,
- obj->Length);
+ assert(obj->Mappings[index].Pointer);
+ if (intel_obj->range_map_bo[index] != NULL) {
+ drm_intel_bo_unmap(intel_obj->range_map_bo[index]);
+
+ if (!(obj->Mappings[index].AccessFlags & GL_MAP_FLUSH_EXPLICIT_BIT)) {
+ intel_emit_linear_blit(brw,
+ intel_obj->buffer, obj->Mappings[index].Offset,
+ intel_obj->range_map_bo[index],
+ intel_obj->map_extra[index],
+ obj->Mappings[index].Length);
+ intel_bufferobj_mark_gpu_usage(intel_obj, obj->Mappings[index].Offset,
+ obj->Mappings[index].Length);
+ }
/* Since we've emitted some blits to buffers that will (likely) be used
* in rendering operations in other cache domains in this batch, emit a
*/
intel_batchbuffer_emit_mi_flush(brw);
- drm_intel_bo_unreference(intel_obj->range_map_bo);
- intel_obj->range_map_bo = NULL;
+ drm_intel_bo_unreference(intel_obj->range_map_bo[index]);
+ intel_obj->range_map_bo[index] = NULL;
} else if (intel_obj->buffer != NULL) {
drm_intel_bo_unmap(intel_obj->buffer);
}
- obj->Pointer = NULL;
- obj->Offset = 0;
- obj->Length = 0;
+ obj->Mappings[index].Pointer = NULL;
+ obj->Mappings[index].Offset = 0;
+ obj->Mappings[index].Length = 0;
return true;
}
+/**
+ * Gets a pointer to the object's BO, and marks the given range as being used
+ * on the GPU.
+ *
+ * Anywhere that uses buffer objects in the pipeline should be using this to
+ * mark the range of the buffer that is being accessed by the pipeline.
+ */
drm_intel_bo *
intel_bufferobj_buffer(struct brw_context *brw,
struct intel_buffer_object *intel_obj,
- GLuint flag)
+ uint32_t offset, uint32_t size)
{
+ /* This is needed so that things like transform feedback and texture buffer
+ * objects that need a BO but don't want to check that they exist for
+ * draw-time validation can just always get a BO from a GL buffer object.
+ */
if (intel_obj->buffer == NULL)
intel_bufferobj_alloc_buffer(brw, intel_obj);
- return intel_obj->buffer;
-}
+ intel_bufferobj_mark_gpu_usage(intel_obj, offset, size);
-drm_intel_bo *
-intel_bufferobj_source(struct brw_context *brw,
- struct intel_buffer_object *intel_obj,
- GLuint align, GLuint *offset)
-{
- *offset = intel_obj->offset;
return intel_obj->buffer;
}
struct intel_buffer_object *intel_src = intel_buffer_object(src);
struct intel_buffer_object *intel_dst = intel_buffer_object(dst);
drm_intel_bo *src_bo, *dst_bo;
- GLuint src_offset;
if (size == 0)
return;
- dst_bo = intel_bufferobj_buffer(brw, intel_dst, INTEL_WRITE_PART);
- src_bo = intel_bufferobj_source(brw, intel_src, 64, &src_offset);
+ dst_bo = intel_bufferobj_buffer(brw, intel_dst, write_offset, size);
+ src_bo = intel_bufferobj_buffer(brw, intel_src, read_offset, size);
intel_emit_linear_blit(brw,
dst_bo, write_offset,
- src_bo, read_offset + src_offset, size);
+ src_bo, read_offset, size);
/* Since we've emitted some blits to buffers that will (likely) be used
* in rendering operations in other cache domains in this batch, emit a