intel_obj->gpu_active_end = 0;
}
+static void
+mark_buffer_valid_data(struct intel_buffer_object *intel_obj,
+ uint32_t offset, uint32_t size)
+{
+ intel_obj->valid_data_start = MIN2(intel_obj->valid_data_start, offset);
+ intel_obj->valid_data_end = MAX2(intel_obj->valid_data_end, offset + size);
+}
+
+static void
+mark_buffer_invalid(struct intel_buffer_object *intel_obj)
+{
+ intel_obj->valid_data_start = ~0;
+ intel_obj->valid_data_end = 0;
+}
+
/** Allocates a new brw_bo to store the data for the buffer object. */
static void
alloc_buffer_object(struct brw_context *brw,
struct intel_buffer_object *intel_obj)
{
- intel_obj->buffer = brw_bo_alloc(brw->bufmgr, "bufferobj",
- intel_obj->Base.Size, 64);
+ const struct gl_context *ctx = &brw->ctx;
+
+ uint64_t size = intel_obj->Base.Size;
+ if (ctx->Const.RobustAccess) {
+ /* Pad out buffer objects with an extra 2kB (half a page).
+ *
+ * When pushing UBOs, we need to safeguard against 3DSTATE_CONSTANT_*
+ * reading out of bounds memory. The application might bind a UBO that's
+ * smaller than what the program expects. Ideally, we'd bind an extra
+ * push buffer containing zeros, but we have a limited number of those,
+ * so it's not always viable. Our only safe option is to pad all buffer
+ * objects by the maximum push data length, so that it will never read
+ * past the end of a BO.
+ *
+ * This is unfortunate, but it should result in at most 1 extra page,
+ * which probably isn't too terrible.
+ */
+ size += 64 * 32; /* max read length of 64 256-bit units */
+ }
+ intel_obj->buffer = brw_bo_alloc(brw->bufmgr, "bufferobj", size, 64);
/* the buffer might be bound as a uniform buffer, need to update it
*/
brw->ctx.NewDriverState |= BRW_NEW_ATOMIC_BUFFER;
mark_buffer_inactive(intel_obj);
+ mark_buffer_invalid(intel_obj);
}
static void
struct intel_buffer_object *obj = CALLOC_STRUCT(intel_buffer_object);
if (!obj) {
_mesa_error_no_memory(__func__);
+ return NULL;
}
_mesa_initialize_buffer_object(ctx, &obj->Base, name);
if (!intel_obj->buffer)
return false;
- if (data != NULL)
+ if (data != NULL) {
brw_bo_subdata(intel_obj->buffer, 0, size, data);
+ mark_buffer_valid_data(intel_obj, 0, size);
+ }
}
return true;
* up with blitting all the time, at the cost of bandwidth)
*/
if (offset + size <= intel_obj->gpu_active_start ||
- intel_obj->gpu_active_end <= offset) {
- if (brw->has_llc) {
- brw_bo_map_unsynchronized(brw, intel_obj->buffer);
- memcpy(intel_obj->buffer->virtual + offset, data, size);
- brw_bo_unmap(intel_obj->buffer);
-
- if (intel_obj->gpu_active_end > intel_obj->gpu_active_start)
- intel_obj->prefer_stall_to_blit = true;
- return;
- } else {
- perf_debug("BufferSubData could be unsynchronized, but !LLC doesn't support it yet\n");
- }
+ intel_obj->gpu_active_end <= offset ||
+ offset + size <= intel_obj->valid_data_start ||
+ intel_obj->valid_data_end <= offset) {
+ void *map = brw_bo_map(brw, intel_obj->buffer, MAP_WRITE | MAP_ASYNC);
+ memcpy(map + offset, data, size);
+ brw_bo_unmap(intel_obj->buffer);
+
+ if (intel_obj->gpu_active_end > intel_obj->gpu_active_start)
+ intel_obj->prefer_stall_to_blit = true;
+
+ mark_buffer_valid_data(intel_obj, offset, size);
+ return;
}
busy =
brw_batch_references(&brw->batch, intel_obj->buffer);
if (busy) {
- if (size == intel_obj->Base.Size) {
+ if (size == intel_obj->Base.Size ||
+ (intel_obj->valid_data_start >= offset &&
+ intel_obj->valid_data_end <= offset + size)) {
/* Replace the current busy bo so the subdata doesn't stall. */
brw_bo_unreference(intel_obj->buffer);
alloc_buffer_object(brw, intel_obj);
} else if (!intel_obj->prefer_stall_to_blit) {
perf_debug("Using a blit copy to avoid stalling on "
"glBufferSubData(%ld, %ld) (%ldkb) to a busy "
- "(%d-%d) buffer object.\n",
+ "(%d-%d) / valid (%d-%d) buffer object.\n",
(long)offset, (long)offset + size, (long)(size/1024),
intel_obj->gpu_active_start,
- intel_obj->gpu_active_end);
+ intel_obj->gpu_active_end,
+ intel_obj->valid_data_start,
+ intel_obj->valid_data_end);
struct brw_bo *temp_bo =
brw_bo_alloc(brw->bufmgr, "subdata temp", size, 64);
size);
brw_bo_unreference(temp_bo);
+ mark_buffer_valid_data(intel_obj, offset, size);
return;
} else {
perf_debug("Stalling on glBufferSubData(%ld, %ld) (%ldkb) to a busy "
brw_bo_subdata(intel_obj->buffer, offset, size, data);
mark_buffer_inactive(intel_obj);
+ mark_buffer_valid_data(intel_obj, offset, size);
}
if (brw_batch_references(&brw->batch, intel_obj->buffer)) {
intel_batchbuffer_flush(brw);
}
- brw_bo_get_subdata(intel_obj->buffer, offset, size, data);
+
+ void *map = brw_bo_map(brw, intel_obj->buffer, MAP_READ);
+
+ if (unlikely(!map)) {
+ _mesa_error_no_memory(__func__);
+ return;
+ }
+
+ memcpy(data, map + offset, size);
+ brw_bo_unmap(intel_obj->buffer);
mark_buffer_inactive(intel_obj);
}
assert(intel_obj);
+ STATIC_ASSERT(GL_MAP_UNSYNCHRONIZED_BIT == MAP_ASYNC);
+ STATIC_ASSERT(GL_MAP_WRITE_BIT == MAP_WRITE);
+ STATIC_ASSERT(GL_MAP_READ_BIT == MAP_READ);
+ STATIC_ASSERT(GL_MAP_PERSISTENT_BIT == MAP_PERSISTENT);
+ STATIC_ASSERT(GL_MAP_COHERENT_BIT == MAP_COHERENT);
+ assert((access & MAP_INTERNAL_MASK) == 0);
+
/* _mesa_MapBufferRange (GL entrypoint) sets these, but the vbo module also
* internally uses our functions directly.
*/
}
}
+ if (access & MAP_WRITE)
+ mark_buffer_valid_data(intel_obj, offset, length);
+
/* If the user is mapping a range of an active buffer object but
* doesn't require the current contents of that range, make a new
* BO, and we'll copy what they put in there out at unmap or
length +
intel_obj->map_extra[index],
alignment);
- if (brw->has_llc) {
- brw_bo_map(brw, intel_obj->range_map_bo[index],
- (access & GL_MAP_WRITE_BIT) != 0);
- } else {
- brw_bo_map_gtt(brw, intel_obj->range_map_bo[index]);
- }
- obj->Mappings[index].Pointer =
- intel_obj->range_map_bo[index]->virtual + intel_obj->map_extra[index];
+ void *map = brw_bo_map(brw, intel_obj->range_map_bo[index], access);
+ obj->Mappings[index].Pointer = map + intel_obj->map_extra[index];
return obj->Mappings[index].Pointer;
}
- if (access & GL_MAP_UNSYNCHRONIZED_BIT) {
- if (!brw->has_llc && brw->perf_debug &&
- brw_bo_busy(intel_obj->buffer)) {
- perf_debug("MapBufferRange with GL_MAP_UNSYNCHRONIZED_BIT stalling (it's actually synchronized on non-LLC platforms)\n");
- }
- brw_bo_map_unsynchronized(brw, intel_obj->buffer);
- } else if (!brw->has_llc && (!(access & GL_MAP_READ_BIT) ||
- (access & GL_MAP_PERSISTENT_BIT))) {
- brw_bo_map_gtt(brw, intel_obj->buffer);
- mark_buffer_inactive(intel_obj);
- } else {
- brw_bo_map(brw, intel_obj->buffer, (access & GL_MAP_WRITE_BIT) != 0);
+ void *map = brw_bo_map(brw, intel_obj->buffer, access);
+ if (!(access & GL_MAP_UNSYNCHRONIZED_BIT)) {
mark_buffer_inactive(intel_obj);
}
- obj->Mappings[index].Pointer = intel_obj->buffer->virtual + offset;
+ obj->Mappings[index].Pointer = map + offset;
return obj->Mappings[index].Pointer;
}
struct brw_bo *
intel_bufferobj_buffer(struct brw_context *brw,
struct intel_buffer_object *intel_obj,
- uint32_t offset, uint32_t size)
+ uint32_t offset, uint32_t size, bool write)
{
/* This is needed so that things like transform feedback and texture buffer
* objects that need a BO but don't want to check that they exist for
mark_buffer_gpu_usage(intel_obj, offset, size);
+ /* If writing, (conservatively) mark this section as having valid data. */
+ if (write)
+ mark_buffer_valid_data(intel_obj, offset, size);
+
return intel_obj->buffer;
}
if (size == 0)
return;
- dst_bo = intel_bufferobj_buffer(brw, intel_dst, write_offset, size);
- src_bo = intel_bufferobj_buffer(brw, intel_src, read_offset, size);
+ dst_bo = intel_bufferobj_buffer(brw, intel_dst, write_offset, size, true);
+ src_bo = intel_bufferobj_buffer(brw, intel_src, read_offset, size, false);
intel_emit_linear_blit(brw,
dst_bo, write_offset,