X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Fmesa%2Fdrivers%2Fdri%2Fintel%2Fintel_buffer_objects.c;h=3b7015b5ad39a7842277046c48be24163ee0b5cd;hb=539a14a1dd5a0d277b193d9cd2d06423ed98dc8a;hp=aed0e45a28f35508c79b3814ecaed9164d923d7b;hpb=6530fabb93d8e1664f16059fd31f540352a866a5;p=mesa.git diff --git a/src/mesa/drivers/dri/intel/intel_buffer_objects.c b/src/mesa/drivers/dri/intel/intel_buffer_objects.c index aed0e45a28f..3b7015b5ad3 100644 --- a/src/mesa/drivers/dri/intel/intel_buffer_objects.c +++ b/src/mesa/drivers/dri/intel/intel_buffer_objects.c @@ -28,9 +28,11 @@ #include "main/imports.h" #include "main/mtypes.h" +#include "main/macros.h" #include "main/bufferobj.h" #include "intel_context.h" +#include "intel_blit.h" #include "intel_buffer_objects.h" #include "intel_batchbuffer.h" #include "intel_regions.h" @@ -128,9 +130,10 @@ intel_bufferobj_free(GLcontext * ctx, struct gl_buffer_object *obj) * Allocate space for and store data in a buffer object. Any data that was * previously stored in the buffer object is lost. If data is NULL, * memory will be allocated, but no copy will occur. - * Called via glBufferDataARB(). + * Called via ctx->Driver.BufferData(). + * \return GL_TRUE for success, GL_FALSE if out of memory */ -static void +static GLboolean intel_bufferobj_data(GLcontext * ctx, GLenum target, GLsizeiptrARB size, @@ -165,15 +168,19 @@ intel_bufferobj_data(GLcontext * ctx, if (intel_obj->sys_buffer != NULL) { if (data != NULL) memcpy(intel_obj->sys_buffer, data, size); - return; + return GL_TRUE; } } #endif intel_bufferobj_alloc_buffer(intel, intel_obj); + if (!intel_obj->buffer) + return GL_FALSE; if (data != NULL) dri_bo_subdata(intel_obj->buffer, 0, size, data); } + + return GL_TRUE; } @@ -200,8 +207,26 @@ intel_bufferobj_subdata(GLcontext * ctx, if (intel_obj->sys_buffer) memcpy((char *)intel_obj->sys_buffer + offset, data, size); - else - dri_bo_subdata(intel_obj->buffer, offset, size, data); + else { + /* Flush any existing batchbuffer that might reference this data. */ + if (drm_intel_bo_busy(intel_obj->buffer) || + drm_intel_bo_references(intel->batch->buf, intel_obj->buffer)) { + drm_intel_bo *temp_bo; + + temp_bo = drm_intel_bo_alloc(intel->bufmgr, "subdata temp", size, 64); + + drm_intel_bo_subdata(temp_bo, 0, size, data); + + intel_emit_linear_blit(intel, + intel_obj->buffer, offset, + temp_bo, 0, + size); + + drm_intel_bo_unreference(temp_bo); + } else { + dri_bo_subdata(intel_obj->buffer, offset, size, data); + } + } } @@ -218,7 +243,10 @@ intel_bufferobj_get_subdata(GLcontext * ctx, struct intel_buffer_object *intel_obj = intel_buffer_object(obj); assert(intel_obj); - dri_bo_get_subdata(intel_obj->buffer, offset, size, data); + if (intel_obj->sys_buffer) + memcpy(data, (char *)intel_obj->sys_buffer + offset, size); + else + dri_bo_get_subdata(intel_obj->buffer, offset, size, data); } @@ -240,9 +268,15 @@ intel_bufferobj_map(GLcontext * ctx, if (intel_obj->sys_buffer) { obj->Pointer = intel_obj->sys_buffer; + obj->Length = obj->Size; + obj->Offset = 0; return obj->Pointer; } + /* Flush any existing batchbuffer that might reference this data. */ + if (drm_intel_bo_references(intel->batch->buf, intel_obj->buffer)) + intelFlush(ctx); + if (intel_obj->region) intel_bufferobj_cow(intel, intel_obj); @@ -260,29 +294,206 @@ intel_bufferobj_map(GLcontext * ctx, } obj->Pointer = intel_obj->buffer->virtual; + obj->Length = obj->Size; + obj->Offset = 0; + return obj->Pointer; } +/** + * Called via glMapBufferRange(). + * + * The goal of this extension is to allow apps to accumulate their rendering + * at the same time as they accumulate their buffer object. Without it, + * you'd end up blocking on execution of rendering every time you mapped + * the buffer to put new data in. + * + * We support it in 3 ways: If unsynchronized, then don't bother + * flushing the batchbuffer before mapping the buffer, which can save blocking + * in many cases. If we would still block, and they allow the whole buffer + * to be invalidated, then just allocate a new buffer to replace the old one. + * If not, and we'd block, and they allow the subrange of the buffer to be + * invalidated, then we can make a new little BO, let them write into that, + * and blit it into the real BO at unmap time. + */ +static void * +intel_bufferobj_map_range(GLcontext * ctx, + GLenum target, GLintptr offset, GLsizeiptr length, + GLbitfield access, struct gl_buffer_object *obj) +{ + struct intel_context *intel = intel_context(ctx); + struct intel_buffer_object *intel_obj = intel_buffer_object(obj); + + assert(intel_obj); + + /* _mesa_MapBufferRange (GL entrypoint) sets these, but the vbo module also + * internally uses our functions directly. + */ + obj->Offset = offset; + obj->Length = length; + obj->AccessFlags = access; + + if (intel_obj->sys_buffer) { + obj->Pointer = intel_obj->sys_buffer + offset; + return obj->Pointer; + } + + if (intel_obj->region) + intel_bufferobj_cow(intel, intel_obj); + + /* If the mapping is synchronized with other GL operations, flush + * the batchbuffer so that GEM knows about the buffer access for later + * syncing. + */ + if (!(access & GL_MAP_UNSYNCHRONIZED_BIT) && + drm_intel_bo_references(intel->batch->buf, intel_obj->buffer)) + intelFlush(ctx); + + if (intel_obj->buffer == NULL) { + obj->Pointer = NULL; + return NULL; + } + + /* If the user doesn't care about existing buffer contents and mapping + * would cause us to block, then throw out the old buffer. + */ + if (!(access & GL_MAP_UNSYNCHRONIZED_BIT) && + (access & GL_MAP_INVALIDATE_BUFFER_BIT) && + drm_intel_bo_busy(intel_obj->buffer)) { + drm_intel_bo_unreference(intel_obj->buffer); + intel_obj->buffer = dri_bo_alloc(intel->bufmgr, "bufferobj", + intel_obj->Base.Size, 64); + } + + /* If the user is mapping a range of an active buffer object but + * doesn't require the current contents of that range, make a new + * BO, and we'll copy what they put in there out at unmap or + * FlushRange time. + */ + if ((access & GL_MAP_INVALIDATE_RANGE_BIT) && + drm_intel_bo_busy(intel_obj->buffer)) { + if (access & GL_MAP_FLUSH_EXPLICIT_BIT) { + intel_obj->range_map_buffer = _mesa_malloc(length); + obj->Pointer = intel_obj->range_map_buffer; + } else { + intel_obj->range_map_bo = drm_intel_bo_alloc(intel->bufmgr, + "range map", + length, 64); + if (!(access & GL_MAP_READ_BIT) && + intel->intelScreen->kernel_exec_fencing) { + drm_intel_gem_bo_map_gtt(intel_obj->range_map_bo); + intel_obj->mapped_gtt = GL_TRUE; + } else { + drm_intel_bo_map(intel_obj->range_map_bo, + (access & GL_MAP_WRITE_BIT) != 0); + intel_obj->mapped_gtt = GL_FALSE; + } + obj->Pointer = intel_obj->range_map_bo->virtual; + } + return obj->Pointer; + } + + if (!(access & GL_MAP_READ_BIT) && + intel->intelScreen->kernel_exec_fencing) { + drm_intel_gem_bo_map_gtt(intel_obj->buffer); + intel_obj->mapped_gtt = GL_TRUE; + } else { + drm_intel_bo_map(intel_obj->buffer, (access & GL_MAP_WRITE_BIT) != 0); + intel_obj->mapped_gtt = GL_FALSE; + } + + obj->Pointer = intel_obj->buffer->virtual + offset; + return obj->Pointer; +} + +/* Ideally we'd use a BO to avoid taking up cache space for the temporary + * data, but FlushMappedBufferRange may be followed by further writes to + * the pointer, so we would have to re-map after emitting our blit, which + * would defeat the point. + */ +static void +intel_bufferobj_flush_mapped_range(GLcontext *ctx, GLenum target, + GLintptr offset, GLsizeiptr length, + struct gl_buffer_object *obj) +{ + struct intel_context *intel = intel_context(ctx); + struct intel_buffer_object *intel_obj = intel_buffer_object(obj); + drm_intel_bo *temp_bo; + + /* Unless we're in the range map using a temporary system buffer, + * there's no work to do. + */ + if (intel_obj->range_map_buffer == NULL) + return; + + temp_bo = drm_intel_bo_alloc(intel->bufmgr, "range map flush", length, 64); + + drm_intel_bo_subdata(temp_bo, 0, length, intel_obj->range_map_buffer); + + intel_emit_linear_blit(intel, + intel_obj->buffer, obj->Offset + offset, + temp_bo, 0, + length); + + drm_intel_bo_unreference(temp_bo); +} + /** - * Called via glMapBufferARB(). + * Called via glUnmapBuffer(). */ static GLboolean intel_bufferobj_unmap(GLcontext * ctx, GLenum target, struct gl_buffer_object *obj) { + struct intel_context *intel = intel_context(ctx); struct intel_buffer_object *intel_obj = intel_buffer_object(obj); assert(intel_obj); - if (intel_obj->buffer != NULL) { - assert(obj->Pointer); + assert(obj->Pointer); + if (intel_obj->sys_buffer != NULL) { + /* always keep the mapping around. */ + } else if (intel_obj->range_map_buffer != NULL) { + /* Since we've emitted some blits to buffers that will (likely) be used + * in rendering operations in other cache domains in this batch, emit a + * flush. Once again, we wish for a domain tracker in libdrm to cover + * usage inside of a batchbuffer. + */ + intel_batchbuffer_emit_mi_flush(intel->batch); + free(intel_obj->range_map_buffer); + intel_obj->range_map_buffer = NULL; + } else if (intel_obj->range_map_bo != NULL) { + if (intel_obj->mapped_gtt) { + drm_intel_gem_bo_unmap_gtt(intel_obj->range_map_bo); + } else { + drm_intel_bo_unmap(intel_obj->range_map_bo); + } + + intel_emit_linear_blit(intel, + intel_obj->buffer, obj->Offset, + intel_obj->range_map_bo, 0, + obj->Length); + + /* Since we've emitted some blits to buffers that will (likely) be used + * in rendering operations in other cache domains in this batch, emit a + * flush. Once again, we wish for a domain tracker in libdrm to cover + * usage inside of a batchbuffer. + */ + intel_batchbuffer_emit_mi_flush(intel->batch); + + drm_intel_bo_unreference(intel_obj->range_map_bo); + intel_obj->range_map_bo = NULL; + } else if (intel_obj->buffer != NULL) { if (intel_obj->mapped_gtt) { drm_intel_gem_bo_unmap_gtt(intel_obj->buffer); } else { drm_intel_bo_unmap(intel_obj->buffer); } - obj->Pointer = NULL; } + obj->Pointer = NULL; + obj->Offset = 0; + obj->Length = 0; + return GL_TRUE; } @@ -319,6 +530,64 @@ intel_bufferobj_buffer(struct intel_context *intel, return intel_obj->buffer; } +static void +intel_bufferobj_copy_subdata(GLcontext *ctx, + struct gl_buffer_object *src, + struct gl_buffer_object *dst, + GLintptr read_offset, GLintptr write_offset, + GLsizeiptr size) +{ + struct intel_context *intel = intel_context(ctx); + struct intel_buffer_object *intel_src = intel_buffer_object(src); + struct intel_buffer_object *intel_dst = intel_buffer_object(dst); + drm_intel_bo *src_bo, *dst_bo; + + if (size == 0) + return; + + /* If we're in system memory, just map and memcpy. */ + if (intel_src->sys_buffer || intel_dst->sys_buffer) { + /* The same buffer may be used, but note that regions copied may + * not overlap. + */ + if (src == dst) { + char *ptr = intel_bufferobj_map(ctx, GL_COPY_WRITE_BUFFER, + GL_READ_WRITE, dst); + memcpy(ptr + write_offset, ptr + read_offset, size); + intel_bufferobj_unmap(ctx, GL_COPY_WRITE_BUFFER, dst); + } else { + const char *src_ptr; + char *dst_ptr; + + src_ptr = intel_bufferobj_map(ctx, GL_COPY_READ_BUFFER, + GL_READ_ONLY, src); + dst_ptr = intel_bufferobj_map(ctx, GL_COPY_WRITE_BUFFER, + GL_WRITE_ONLY, dst); + + memcpy(dst_ptr + write_offset, src_ptr + read_offset, size); + + intel_bufferobj_unmap(ctx, GL_COPY_READ_BUFFER, src); + intel_bufferobj_unmap(ctx, GL_COPY_WRITE_BUFFER, dst); + } + } + + /* Otherwise, we have real BOs, so blit them. */ + + dst_bo = intel_bufferobj_buffer(intel, intel_dst, INTEL_WRITE_PART); + src_bo = intel_bufferobj_buffer(intel, intel_src, INTEL_READ); + + intel_emit_linear_blit(intel, + dst_bo, write_offset, + src_bo, read_offset, size); + + /* Since we've emitted some blits to buffers that will (likely) be used + * in rendering operations in other cache domains in this batch, emit a + * flush. Once again, we wish for a domain tracker in libdrm to cover + * usage inside of a batchbuffer. + */ + intel_batchbuffer_emit_mi_flush(intel->batch); +} + void intelInitBufferObjectFuncs(struct dd_function_table *functions) { @@ -328,5 +597,8 @@ intelInitBufferObjectFuncs(struct dd_function_table *functions) functions->BufferSubData = intel_bufferobj_subdata; functions->GetBufferSubData = intel_bufferobj_get_subdata; functions->MapBuffer = intel_bufferobj_map; + functions->MapBufferRange = intel_bufferobj_map_range; + functions->FlushMappedBufferRange = intel_bufferobj_flush_mapped_range; functions->UnmapBuffer = intel_bufferobj_unmap; + functions->CopyBufferSubData = intel_bufferobj_copy_subdata; }