}
static GLboolean
-intel_bufferobj_unmap(struct gl_context * ctx, struct gl_buffer_object *obj);
+intel_bufferobj_unmap(struct gl_context * ctx, struct gl_buffer_object *obj,
+ gl_map_buffer_index index);
static void
intel_bufferobj_mark_gpu_usage(struct intel_buffer_object *intel_obj,
* to the spec, but Mesa doesn't do UnmapBuffer for us at context destroy
* (though it does if you call glDeleteBuffers)
*/
- if (obj->Pointer)
- intel_bufferobj_unmap(ctx, obj);
+ _mesa_buffer_unmap_all_mappings(ctx, obj);
drm_intel_bo_unreference(intel_obj->buffer);
free(intel_obj);
GLenum target,
GLsizeiptrARB size,
const GLvoid * data,
- GLenum usage, struct gl_buffer_object *obj)
+ GLenum usage,
+ GLbitfield storageFlags,
+ struct gl_buffer_object *obj)
{
struct brw_context *brw = brw_context(ctx);
struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
intel_obj->Base.Size = size;
intel_obj->Base.Usage = usage;
+ intel_obj->Base.StorageFlags = storageFlags;
- assert(!obj->Pointer); /* Mesa should have unmapped it */
+ assert(!obj->Mappings[MAP_USER].Pointer); /* Mesa should have unmapped it */
+ assert(!obj->Mappings[MAP_INTERNAL].Pointer);
if (intel_obj->buffer != NULL)
release_buffer(intel_obj);
static void *
intel_bufferobj_map_range(struct gl_context * ctx,
GLintptr offset, GLsizeiptr length,
- GLbitfield access, struct gl_buffer_object *obj)
+ GLbitfield access, struct gl_buffer_object *obj,
+ gl_map_buffer_index index)
{
struct brw_context *brw = brw_context(ctx);
struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
/* _mesa_MapBufferRange (GL entrypoint) sets these, but the vbo module also
* internally uses our functions directly.
*/
- obj->Offset = offset;
- obj->Length = length;
- obj->AccessFlags = access;
+ obj->Mappings[index].Offset = offset;
+ obj->Mappings[index].Length = length;
+ obj->Mappings[index].AccessFlags = access;
if (intel_obj->buffer == NULL) {
- obj->Pointer = NULL;
+ obj->Mappings[index].Pointer = NULL;
return NULL;
}
* doesn't require the current contents of that range, make a new
* BO, and we'll copy what they put in there out at unmap or
* FlushRange time.
+ *
+ * That is, unless they're looking for a persistent mapping -- we would
+ * need to do blits in the MemoryBarrier call, and it's easier to just do a
+ * GPU stall and do a mapping.
*/
- if (!(access & GL_MAP_UNSYNCHRONIZED_BIT) &&
+ if (!(access & (GL_MAP_UNSYNCHRONIZED_BIT | GL_MAP_PERSISTENT_BIT)) &&
(access & GL_MAP_INVALIDATE_RANGE_BIT) &&
drm_intel_bo_busy(intel_obj->buffer)) {
/* Ensure that the base alignment of the allocation meets the alignment
* guarantees the driver has advertised to the application.
*/
const unsigned alignment = ctx->Const.MinMapBufferAlignment;
- const unsigned extra = (uintptr_t) offset % alignment;
- if (access & GL_MAP_FLUSH_EXPLICIT_BIT) {
- intel_obj->range_map_buffer = _mesa_align_malloc(length + extra,
+ intel_obj->map_extra[index] = (uintptr_t) offset % alignment;
+ intel_obj->range_map_bo[index] = drm_intel_bo_alloc(brw->bufmgr,
+ "BO blit temp",
+ length +
+ intel_obj->map_extra[index],
alignment);
- obj->Pointer = intel_obj->range_map_buffer + extra;
+ if (brw->has_llc) {
+ drm_intel_bo_map(intel_obj->range_map_bo[index],
+ (access & GL_MAP_WRITE_BIT) != 0);
} else {
- intel_obj->range_map_bo = drm_intel_bo_alloc(brw->bufmgr,
- "range map",
- length + extra,
- alignment);
- if (!(access & GL_MAP_READ_BIT)) {
- drm_intel_gem_bo_map_gtt(intel_obj->range_map_bo);
- } else {
- drm_intel_bo_map(intel_obj->range_map_bo,
- (access & GL_MAP_WRITE_BIT) != 0);
- }
- obj->Pointer = intel_obj->range_map_bo->virtual + extra;
+ drm_intel_gem_bo_map_gtt(intel_obj->range_map_bo[index]);
}
- return obj->Pointer;
+ obj->Mappings[index].Pointer =
+ intel_obj->range_map_bo[index]->virtual + intel_obj->map_extra[index];
+ return obj->Mappings[index].Pointer;
}
if (access & GL_MAP_UNSYNCHRONIZED_BIT)
drm_intel_gem_bo_map_unsynchronized(intel_obj->buffer);
- else if (!(access & GL_MAP_READ_BIT)) {
+ else if (!brw->has_llc && (!(access & GL_MAP_READ_BIT) ||
+ (access & GL_MAP_PERSISTENT_BIT))) {
drm_intel_gem_bo_map_gtt(intel_obj->buffer);
intel_bufferobj_mark_inactive(intel_obj);
} else {
intel_bufferobj_mark_inactive(intel_obj);
}
- obj->Pointer = intel_obj->buffer->virtual + offset;
- return obj->Pointer;
+ obj->Mappings[index].Pointer = intel_obj->buffer->virtual + offset;
+ return obj->Mappings[index].Pointer;
}
/**
static void
intel_bufferobj_flush_mapped_range(struct gl_context *ctx,
GLintptr offset, GLsizeiptr length,
- struct gl_buffer_object *obj)
+ struct gl_buffer_object *obj,
+ gl_map_buffer_index index)
{
struct brw_context *brw = brw_context(ctx);
struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
- drm_intel_bo *temp_bo;
+ GLbitfield access = obj->Mappings[index].AccessFlags;
- /* Unless we're in the range map using a temporary system buffer,
- * there's no work to do.
+ assert(access & GL_MAP_FLUSH_EXPLICIT_BIT);
+
+ /* If we gave a direct mapping of the buffer instead of using a temporary,
+ * then there's nothing to do.
*/
- if (intel_obj->range_map_buffer == NULL)
+ if (intel_obj->range_map_bo[index] == NULL)
return;
if (length == 0)
return;
- temp_bo = drm_intel_bo_alloc(brw->bufmgr, "range map flush", length, 64);
-
- /* Use obj->Pointer instead of intel_obj->range_map_buffer because the
- * former points to the actual mapping while the latter may be offset to
- * meet alignment guarantees.
+ /* Note that we're not unmapping our buffer while executing the blit. We
+ * need to have a mapping still at the end of this call, since the user
+ * gets to make further modifications and glFlushMappedBufferRange() calls.
+ * This is safe, because:
+ *
+ * - On LLC platforms, we're using a CPU mapping that's coherent with the
+ * GPU (except for the render caches), so the kernel doesn't need to do
+ * any flushing work for us except for what happens at batch exec time
+ * anyway.
+ *
+ * - On non-LLC platforms, we're using a GTT mapping that writes directly
+ * to system memory (except for the chipset cache that gets flushed at
+ * batch exec time).
+ *
+ * In both cases we don't need to stall for the previous blit to complete
+ * so we can re-map (and we definitely don't want to, since that would be
+ * slow): If the user edits a part of their buffer that's previously been
+ * blitted, then our lack of synchoronization is fine, because either
+ * they'll get some too-new data in the first blit and not do another blit
+ * of that area (but in that case the results are undefined), or they'll do
+ * another blit of that area and the complete newer data will land the
+ * second time.
*/
- drm_intel_bo_subdata(temp_bo, 0, length, obj->Pointer);
-
intel_emit_linear_blit(brw,
- intel_obj->buffer, obj->Offset + offset,
- temp_bo, 0,
+ intel_obj->buffer,
+ obj->Mappings[index].Offset + offset,
+ intel_obj->range_map_bo[index],
+ intel_obj->map_extra[index] + offset,
length);
- intel_bufferobj_mark_gpu_usage(intel_obj, obj->Offset + offset, length);
-
- drm_intel_bo_unreference(temp_bo);
+ intel_bufferobj_mark_gpu_usage(intel_obj,
+ obj->Mappings[index].Offset + offset,
+ length);
}
* Implements glUnmapBuffer().
*/
static GLboolean
-intel_bufferobj_unmap(struct gl_context * ctx, struct gl_buffer_object *obj)
+intel_bufferobj_unmap(struct gl_context * ctx, struct gl_buffer_object *obj,
+ gl_map_buffer_index index)
{
struct brw_context *brw = brw_context(ctx);
struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
assert(intel_obj);
- assert(obj->Pointer);
- if (intel_obj->range_map_buffer != NULL) {
- /* Since we've emitted some blits to buffers that will (likely) be used
- * in rendering operations in other cache domains in this batch, emit a
- * flush. Once again, we wish for a domain tracker in libdrm to cover
- * usage inside of a batchbuffer.
- */
- intel_batchbuffer_emit_mi_flush(brw);
- _mesa_align_free(intel_obj->range_map_buffer);
- intel_obj->range_map_buffer = NULL;
- } else if (intel_obj->range_map_bo != NULL) {
- const unsigned extra = obj->Pointer - intel_obj->range_map_bo->virtual;
-
- drm_intel_bo_unmap(intel_obj->range_map_bo);
-
- intel_emit_linear_blit(brw,
- intel_obj->buffer, obj->Offset,
- intel_obj->range_map_bo, extra,
- obj->Length);
- intel_bufferobj_mark_gpu_usage(intel_obj, obj->Offset, obj->Length);
+ assert(obj->Mappings[index].Pointer);
+ if (intel_obj->range_map_bo[index] != NULL) {
+ drm_intel_bo_unmap(intel_obj->range_map_bo[index]);
+
+ if (!(obj->Mappings[index].AccessFlags & GL_MAP_FLUSH_EXPLICIT_BIT)) {
+ intel_emit_linear_blit(brw,
+ intel_obj->buffer, obj->Mappings[index].Offset,
+ intel_obj->range_map_bo[index],
+ intel_obj->map_extra[index],
+ obj->Mappings[index].Length);
+ intel_bufferobj_mark_gpu_usage(intel_obj, obj->Mappings[index].Offset,
+ obj->Mappings[index].Length);
+ }
/* Since we've emitted some blits to buffers that will (likely) be used
* in rendering operations in other cache domains in this batch, emit a
*/
intel_batchbuffer_emit_mi_flush(brw);
- drm_intel_bo_unreference(intel_obj->range_map_bo);
- intel_obj->range_map_bo = NULL;
+ drm_intel_bo_unreference(intel_obj->range_map_bo[index]);
+ intel_obj->range_map_bo[index] = NULL;
} else if (intel_obj->buffer != NULL) {
drm_intel_bo_unmap(intel_obj->buffer);
}
- obj->Pointer = NULL;
- obj->Offset = 0;
- obj->Length = 0;
+ obj->Mappings[index].Pointer = NULL;
+ obj->Mappings[index].Offset = 0;
+ obj->Mappings[index].Length = 0;
return true;
}