2 * Copyright 2003 VMware, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial portions
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
18 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
21 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
22 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
23 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27 * @file intel_buffer_objects.c
29 * This provides core GL buffer object functionality.
32 #include "main/imports.h"
33 #include "main/mtypes.h"
34 #include "main/macros.h"
35 #include "main/bufferobj.h"
37 #include "brw_context.h"
38 #include "intel_blit.h"
39 #include "intel_buffer_objects.h"
40 #include "intel_batchbuffer.h"
43 mark_buffer_gpu_usage(struct intel_buffer_object
*intel_obj
,
44 uint32_t offset
, uint32_t size
)
46 intel_obj
->gpu_active_start
= MIN2(intel_obj
->gpu_active_start
, offset
);
47 intel_obj
->gpu_active_end
= MAX2(intel_obj
->gpu_active_end
, offset
+ size
);
51 mark_buffer_inactive(struct intel_buffer_object
*intel_obj
)
53 intel_obj
->gpu_active_start
= ~0;
54 intel_obj
->gpu_active_end
= 0;
58 mark_buffer_valid_data(struct intel_buffer_object
*intel_obj
,
59 uint32_t offset
, uint32_t size
)
61 intel_obj
->valid_data_start
= MIN2(intel_obj
->valid_data_start
, offset
);
62 intel_obj
->valid_data_end
= MAX2(intel_obj
->valid_data_end
, offset
+ size
);
66 mark_buffer_invalid(struct intel_buffer_object
*intel_obj
)
68 intel_obj
->valid_data_start
= ~0;
69 intel_obj
->valid_data_end
= 0;
72 /** Allocates a new brw_bo to store the data for the buffer object. */
74 alloc_buffer_object(struct brw_context
*brw
,
75 struct intel_buffer_object
*intel_obj
)
77 const struct gl_context
*ctx
= &brw
->ctx
;
79 uint64_t size
= intel_obj
->Base
.Size
;
80 if (ctx
->Const
.RobustAccess
) {
81 /* Pad out buffer objects with an extra 2kB (half a page).
83 * When pushing UBOs, we need to safeguard against 3DSTATE_CONSTANT_*
84 * reading out of bounds memory. The application might bind a UBO that's
85 * smaller than what the program expects. Ideally, we'd bind an extra
86 * push buffer containing zeros, but we have a limited number of those,
87 * so it's not always viable. Our only safe option is to pad all buffer
88 * objects by the maximum push data length, so that it will never read
89 * past the end of a BO.
91 * This is unfortunate, but it should result in at most 1 extra page,
92 * which probably isn't too terrible.
94 size
+= 64 * 32; /* max read length of 64 256-bit units */
96 intel_obj
->buffer
= brw_bo_alloc(brw
->bufmgr
, "bufferobj", size
, 64);
98 /* the buffer might be bound as a uniform buffer, need to update it
100 if (intel_obj
->Base
.UsageHistory
& USAGE_UNIFORM_BUFFER
)
101 brw
->ctx
.NewDriverState
|= BRW_NEW_UNIFORM_BUFFER
;
102 if (intel_obj
->Base
.UsageHistory
& USAGE_SHADER_STORAGE_BUFFER
)
103 brw
->ctx
.NewDriverState
|= BRW_NEW_UNIFORM_BUFFER
;
104 if (intel_obj
->Base
.UsageHistory
& USAGE_TEXTURE_BUFFER
)
105 brw
->ctx
.NewDriverState
|= BRW_NEW_TEXTURE_BUFFER
;
106 if (intel_obj
->Base
.UsageHistory
& USAGE_ATOMIC_COUNTER_BUFFER
)
107 brw
->ctx
.NewDriverState
|= BRW_NEW_ATOMIC_BUFFER
;
109 mark_buffer_inactive(intel_obj
);
110 mark_buffer_invalid(intel_obj
);
114 release_buffer(struct intel_buffer_object
*intel_obj
)
116 brw_bo_unreference(intel_obj
->buffer
);
117 intel_obj
->buffer
= NULL
;
121 * The NewBufferObject() driver hook.
123 * Allocates a new intel_buffer_object structure and initializes it.
125 * There is some duplication between mesa's bufferobjects and our
126 * bufmgr buffers. Both have an integer handle and a hashtable to
127 * lookup an opaque structure. It would be nice if the handles and
128 * internal structure where somehow shared.
130 static struct gl_buffer_object
*
131 brw_new_buffer_object(struct gl_context
* ctx
, GLuint name
)
133 struct intel_buffer_object
*obj
= CALLOC_STRUCT(intel_buffer_object
);
135 _mesa_error_no_memory(__func__
);
139 _mesa_initialize_buffer_object(ctx
, &obj
->Base
, name
);
147 * The DeleteBuffer() driver hook.
149 * Deletes a single OpenGL buffer object. Used by glDeleteBuffers().
152 brw_delete_buffer(struct gl_context
* ctx
, struct gl_buffer_object
*obj
)
154 struct intel_buffer_object
*intel_obj
= intel_buffer_object(obj
);
158 /* Buffer objects are automatically unmapped when deleting according
159 * to the spec, but Mesa doesn't do UnmapBuffer for us at context destroy
160 * (though it does if you call glDeleteBuffers)
162 _mesa_buffer_unmap_all_mappings(ctx
, obj
);
164 brw_bo_unreference(intel_obj
->buffer
);
165 _mesa_delete_buffer_object(ctx
, obj
);
170 * The BufferData() driver hook.
172 * Implements glBufferData(), which recreates a buffer object's data store
173 * and populates it with the given data, if present.
175 * Any data that was previously stored in the buffer object is lost.
177 * \return true for success, false if out of memory
180 brw_buffer_data(struct gl_context
*ctx
,
185 GLbitfield storageFlags
,
186 struct gl_buffer_object
*obj
)
188 struct brw_context
*brw
= brw_context(ctx
);
189 struct intel_buffer_object
*intel_obj
= intel_buffer_object(obj
);
191 /* Part of the ABI, but this function doesn't use it.
195 intel_obj
->Base
.Size
= size
;
196 intel_obj
->Base
.Usage
= usage
;
197 intel_obj
->Base
.StorageFlags
= storageFlags
;
199 assert(!obj
->Mappings
[MAP_USER
].Pointer
); /* Mesa should have unmapped it */
200 assert(!obj
->Mappings
[MAP_INTERNAL
].Pointer
);
202 if (intel_obj
->buffer
!= NULL
)
203 release_buffer(intel_obj
);
206 alloc_buffer_object(brw
, intel_obj
);
207 if (!intel_obj
->buffer
)
211 brw_bo_subdata(intel_obj
->buffer
, 0, size
, data
);
212 mark_buffer_valid_data(intel_obj
, 0, size
);
221 * The BufferSubData() driver hook.
223 * Implements glBufferSubData(), which replaces a portion of the data in a
226 * If the data range specified by (size + offset) extends beyond the end of
227 * the buffer or if data is NULL, no copy is performed.
230 brw_buffer_subdata(struct gl_context
*ctx
,
234 struct gl_buffer_object
*obj
)
236 struct brw_context
*brw
= brw_context(ctx
);
237 struct intel_buffer_object
*intel_obj
= intel_buffer_object(obj
);
245 /* See if we can unsynchronized write the data into the user's BO. This
246 * avoids GPU stalls in unfortunately common user patterns (uploading
247 * sequentially into a BO, with draw calls in between each upload).
249 * Once we've hit this path, we mark this GL BO as preferring stalling to
250 * blits, so that we can hopefully hit this path again in the future
251 * (otherwise, an app that might occasionally stall but mostly not will end
252 * up with blitting all the time, at the cost of bandwidth)
254 if (offset
+ size
<= intel_obj
->gpu_active_start
||
255 intel_obj
->gpu_active_end
<= offset
||
256 offset
+ size
<= intel_obj
->valid_data_start
||
257 intel_obj
->valid_data_end
<= offset
) {
258 void *map
= brw_bo_map(brw
, intel_obj
->buffer
, MAP_WRITE
| MAP_ASYNC
);
259 memcpy(map
+ offset
, data
, size
);
260 brw_bo_unmap(intel_obj
->buffer
);
262 if (intel_obj
->gpu_active_end
> intel_obj
->gpu_active_start
)
263 intel_obj
->prefer_stall_to_blit
= true;
265 mark_buffer_valid_data(intel_obj
, offset
, size
);
270 brw_bo_busy(intel_obj
->buffer
) ||
271 brw_batch_references(&brw
->batch
, intel_obj
->buffer
);
274 if (size
== intel_obj
->Base
.Size
||
275 (intel_obj
->valid_data_start
>= offset
&&
276 intel_obj
->valid_data_end
<= offset
+ size
)) {
277 /* Replace the current busy bo so the subdata doesn't stall. */
278 brw_bo_unreference(intel_obj
->buffer
);
279 alloc_buffer_object(brw
, intel_obj
);
280 } else if (!intel_obj
->prefer_stall_to_blit
) {
281 perf_debug("Using a blit copy to avoid stalling on "
282 "glBufferSubData(%ld, %ld) (%ldkb) to a busy "
283 "(%d-%d) / valid (%d-%d) buffer object.\n",
284 (long)offset
, (long)offset
+ size
, (long)(size
/1024),
285 intel_obj
->gpu_active_start
,
286 intel_obj
->gpu_active_end
,
287 intel_obj
->valid_data_start
,
288 intel_obj
->valid_data_end
);
289 struct brw_bo
*temp_bo
=
290 brw_bo_alloc(brw
->bufmgr
, "subdata temp", size
, 64);
292 brw_bo_subdata(temp_bo
, 0, size
, data
);
294 intel_emit_linear_blit(brw
,
295 intel_obj
->buffer
, offset
,
299 brw_bo_unreference(temp_bo
);
300 mark_buffer_valid_data(intel_obj
, offset
, size
);
303 perf_debug("Stalling on glBufferSubData(%ld, %ld) (%ldkb) to a busy "
304 "(%d-%d) buffer object. Use glMapBufferRange() to "
306 (long)offset
, (long)offset
+ size
, (long)(size
/1024),
307 intel_obj
->gpu_active_start
,
308 intel_obj
->gpu_active_end
);
309 intel_batchbuffer_flush(brw
);
313 brw_bo_subdata(intel_obj
->buffer
, offset
, size
, data
);
314 mark_buffer_inactive(intel_obj
);
315 mark_buffer_valid_data(intel_obj
, offset
, size
);
320 * The GetBufferSubData() driver hook.
322 * Implements glGetBufferSubData(), which copies a subrange of a buffer
323 * object into user memory.
326 brw_get_buffer_subdata(struct gl_context
*ctx
,
330 struct gl_buffer_object
*obj
)
332 struct intel_buffer_object
*intel_obj
= intel_buffer_object(obj
);
333 struct brw_context
*brw
= brw_context(ctx
);
336 if (brw_batch_references(&brw
->batch
, intel_obj
->buffer
)) {
337 intel_batchbuffer_flush(brw
);
340 void *map
= brw_bo_map(brw
, intel_obj
->buffer
, MAP_READ
);
342 if (unlikely(!map
)) {
343 _mesa_error_no_memory(__func__
);
347 memcpy(data
, map
+ offset
, size
);
348 brw_bo_unmap(intel_obj
->buffer
);
350 mark_buffer_inactive(intel_obj
);
355 * The MapBufferRange() driver hook.
357 * This implements both glMapBufferRange() and glMapBuffer().
359 * The goal of this extension is to allow apps to accumulate their rendering
360 * at the same time as they accumulate their buffer object. Without it,
361 * you'd end up blocking on execution of rendering every time you mapped
362 * the buffer to put new data in.
364 * We support it in 3 ways: If unsynchronized, then don't bother
365 * flushing the batchbuffer before mapping the buffer, which can save blocking
366 * in many cases. If we would still block, and they allow the whole buffer
367 * to be invalidated, then just allocate a new buffer to replace the old one.
368 * If not, and we'd block, and they allow the subrange of the buffer to be
369 * invalidated, then we can make a new little BO, let them write into that,
370 * and blit it into the real BO at unmap time.
373 brw_map_buffer_range(struct gl_context
*ctx
,
374 GLintptr offset
, GLsizeiptr length
,
375 GLbitfield access
, struct gl_buffer_object
*obj
,
376 gl_map_buffer_index index
)
378 struct brw_context
*brw
= brw_context(ctx
);
379 struct intel_buffer_object
*intel_obj
= intel_buffer_object(obj
);
383 STATIC_ASSERT(GL_MAP_UNSYNCHRONIZED_BIT
== MAP_ASYNC
);
384 STATIC_ASSERT(GL_MAP_WRITE_BIT
== MAP_WRITE
);
385 STATIC_ASSERT(GL_MAP_READ_BIT
== MAP_READ
);
386 STATIC_ASSERT(GL_MAP_PERSISTENT_BIT
== MAP_PERSISTENT
);
387 STATIC_ASSERT(GL_MAP_COHERENT_BIT
== MAP_COHERENT
);
388 assert((access
& MAP_INTERNAL_MASK
) == 0);
390 /* _mesa_MapBufferRange (GL entrypoint) sets these, but the vbo module also
391 * internally uses our functions directly.
393 obj
->Mappings
[index
].Offset
= offset
;
394 obj
->Mappings
[index
].Length
= length
;
395 obj
->Mappings
[index
].AccessFlags
= access
;
397 if (intel_obj
->buffer
== NULL
) {
398 obj
->Mappings
[index
].Pointer
= NULL
;
402 /* If the access is synchronized (like a normal buffer mapping), then get
403 * things flushed out so the later mapping syncs appropriately through GEM.
404 * If the user doesn't care about existing buffer contents and mapping would
405 * cause us to block, then throw out the old buffer.
407 * If they set INVALIDATE_BUFFER, we can pitch the current contents to
408 * achieve the required synchronization.
410 if (!(access
& GL_MAP_UNSYNCHRONIZED_BIT
)) {
411 if (brw_batch_references(&brw
->batch
, intel_obj
->buffer
)) {
412 if (access
& GL_MAP_INVALIDATE_BUFFER_BIT
) {
413 brw_bo_unreference(intel_obj
->buffer
);
414 alloc_buffer_object(brw
, intel_obj
);
416 perf_debug("Stalling on the GPU for mapping a busy buffer "
418 intel_batchbuffer_flush(brw
);
420 } else if (brw_bo_busy(intel_obj
->buffer
) &&
421 (access
& GL_MAP_INVALIDATE_BUFFER_BIT
)) {
422 brw_bo_unreference(intel_obj
->buffer
);
423 alloc_buffer_object(brw
, intel_obj
);
427 if (access
& MAP_WRITE
)
428 mark_buffer_valid_data(intel_obj
, offset
, length
);
430 /* If the user is mapping a range of an active buffer object but
431 * doesn't require the current contents of that range, make a new
432 * BO, and we'll copy what they put in there out at unmap or
435 * That is, unless they're looking for a persistent mapping -- we would
436 * need to do blits in the MemoryBarrier call, and it's easier to just do a
437 * GPU stall and do a mapping.
439 if (!(access
& (GL_MAP_UNSYNCHRONIZED_BIT
| GL_MAP_PERSISTENT_BIT
)) &&
440 (access
& GL_MAP_INVALIDATE_RANGE_BIT
) &&
441 brw_bo_busy(intel_obj
->buffer
)) {
442 /* Ensure that the base alignment of the allocation meets the alignment
443 * guarantees the driver has advertised to the application.
445 const unsigned alignment
= ctx
->Const
.MinMapBufferAlignment
;
447 intel_obj
->map_extra
[index
] = (uintptr_t) offset
% alignment
;
448 intel_obj
->range_map_bo
[index
] = brw_bo_alloc(brw
->bufmgr
,
451 intel_obj
->map_extra
[index
],
453 void *map
= brw_bo_map(brw
, intel_obj
->range_map_bo
[index
], access
);
454 obj
->Mappings
[index
].Pointer
= map
+ intel_obj
->map_extra
[index
];
455 return obj
->Mappings
[index
].Pointer
;
458 void *map
= brw_bo_map(brw
, intel_obj
->buffer
, access
);
459 if (!(access
& GL_MAP_UNSYNCHRONIZED_BIT
)) {
460 mark_buffer_inactive(intel_obj
);
463 obj
->Mappings
[index
].Pointer
= map
+ offset
;
464 return obj
->Mappings
[index
].Pointer
;
468 * The FlushMappedBufferRange() driver hook.
470 * Implements glFlushMappedBufferRange(), which signifies that modifications
471 * have been made to a range of a mapped buffer, and it should be flushed.
473 * This is only used for buffers mapped with GL_MAP_FLUSH_EXPLICIT_BIT.
475 * Ideally we'd use a BO to avoid taking up cache space for the temporary
476 * data, but FlushMappedBufferRange may be followed by further writes to
477 * the pointer, so we would have to re-map after emitting our blit, which
478 * would defeat the point.
481 brw_flush_mapped_buffer_range(struct gl_context
*ctx
,
482 GLintptr offset
, GLsizeiptr length
,
483 struct gl_buffer_object
*obj
,
484 gl_map_buffer_index index
)
486 struct brw_context
*brw
= brw_context(ctx
);
487 struct intel_buffer_object
*intel_obj
= intel_buffer_object(obj
);
489 assert(obj
->Mappings
[index
].AccessFlags
& GL_MAP_FLUSH_EXPLICIT_BIT
);
491 /* If we gave a direct mapping of the buffer instead of using a temporary,
492 * then there's nothing to do.
494 if (intel_obj
->range_map_bo
[index
] == NULL
)
500 /* Note that we're not unmapping our buffer while executing the blit. We
501 * need to have a mapping still at the end of this call, since the user
502 * gets to make further modifications and glFlushMappedBufferRange() calls.
503 * This is safe, because:
505 * - On LLC platforms, we're using a CPU mapping that's coherent with the
506 * GPU (except for the render caches), so the kernel doesn't need to do
507 * any flushing work for us except for what happens at batch exec time
510 * - On non-LLC platforms, we're using a GTT mapping that writes directly
511 * to system memory (except for the chipset cache that gets flushed at
514 * In both cases we don't need to stall for the previous blit to complete
515 * so we can re-map (and we definitely don't want to, since that would be
516 * slow): If the user edits a part of their buffer that's previously been
517 * blitted, then our lack of synchoronization is fine, because either
518 * they'll get some too-new data in the first blit and not do another blit
519 * of that area (but in that case the results are undefined), or they'll do
520 * another blit of that area and the complete newer data will land the
523 intel_emit_linear_blit(brw
,
525 obj
->Mappings
[index
].Offset
+ offset
,
526 intel_obj
->range_map_bo
[index
],
527 intel_obj
->map_extra
[index
] + offset
,
529 mark_buffer_gpu_usage(intel_obj
,
530 obj
->Mappings
[index
].Offset
+ offset
,
536 * The UnmapBuffer() driver hook.
538 * Implements glUnmapBuffer().
541 brw_unmap_buffer(struct gl_context
*ctx
,
542 struct gl_buffer_object
*obj
,
543 gl_map_buffer_index index
)
545 struct brw_context
*brw
= brw_context(ctx
);
546 struct intel_buffer_object
*intel_obj
= intel_buffer_object(obj
);
549 assert(obj
->Mappings
[index
].Pointer
);
550 if (intel_obj
->range_map_bo
[index
] != NULL
) {
551 brw_bo_unmap(intel_obj
->range_map_bo
[index
]);
553 if (!(obj
->Mappings
[index
].AccessFlags
& GL_MAP_FLUSH_EXPLICIT_BIT
)) {
554 intel_emit_linear_blit(brw
,
555 intel_obj
->buffer
, obj
->Mappings
[index
].Offset
,
556 intel_obj
->range_map_bo
[index
],
557 intel_obj
->map_extra
[index
],
558 obj
->Mappings
[index
].Length
);
559 mark_buffer_gpu_usage(intel_obj
, obj
->Mappings
[index
].Offset
,
560 obj
->Mappings
[index
].Length
);
563 /* Since we've emitted some blits to buffers that will (likely) be used
564 * in rendering operations in other cache domains in this batch, emit a
565 * flush. Once again, we wish for a domain tracker in libdrm to cover
566 * usage inside of a batchbuffer.
568 brw_emit_mi_flush(brw
);
570 brw_bo_unreference(intel_obj
->range_map_bo
[index
]);
571 intel_obj
->range_map_bo
[index
] = NULL
;
572 } else if (intel_obj
->buffer
!= NULL
) {
573 brw_bo_unmap(intel_obj
->buffer
);
575 obj
->Mappings
[index
].Pointer
= NULL
;
576 obj
->Mappings
[index
].Offset
= 0;
577 obj
->Mappings
[index
].Length
= 0;
583 * Gets a pointer to the object's BO, and marks the given range as being used
586 * Anywhere that uses buffer objects in the pipeline should be using this to
587 * mark the range of the buffer that is being accessed by the pipeline.
590 intel_bufferobj_buffer(struct brw_context
*brw
,
591 struct intel_buffer_object
*intel_obj
,
592 uint32_t offset
, uint32_t size
, bool write
)
594 /* This is needed so that things like transform feedback and texture buffer
595 * objects that need a BO but don't want to check that they exist for
596 * draw-time validation can just always get a BO from a GL buffer object.
598 if (intel_obj
->buffer
== NULL
)
599 alloc_buffer_object(brw
, intel_obj
);
601 mark_buffer_gpu_usage(intel_obj
, offset
, size
);
603 /* If writing, (conservatively) mark this section as having valid data. */
605 mark_buffer_valid_data(intel_obj
, offset
, size
);
607 return intel_obj
->buffer
;
611 * The CopyBufferSubData() driver hook.
613 * Implements glCopyBufferSubData(), which copies a portion of one buffer
614 * object's data to another. Independent source and destination offsets
618 brw_copy_buffer_subdata(struct gl_context
*ctx
,
619 struct gl_buffer_object
*src
,
620 struct gl_buffer_object
*dst
,
621 GLintptr read_offset
, GLintptr write_offset
,
624 struct brw_context
*brw
= brw_context(ctx
);
625 struct intel_buffer_object
*intel_src
= intel_buffer_object(src
);
626 struct intel_buffer_object
*intel_dst
= intel_buffer_object(dst
);
627 struct brw_bo
*src_bo
, *dst_bo
;
632 dst_bo
= intel_bufferobj_buffer(brw
, intel_dst
, write_offset
, size
, true);
633 src_bo
= intel_bufferobj_buffer(brw
, intel_src
, read_offset
, size
, false);
635 intel_emit_linear_blit(brw
,
636 dst_bo
, write_offset
,
637 src_bo
, read_offset
, size
);
639 /* Since we've emitted some blits to buffers that will (likely) be used
640 * in rendering operations in other cache domains in this batch, emit a
641 * flush. Once again, we wish for a domain tracker in libdrm to cover
642 * usage inside of a batchbuffer.
644 brw_emit_mi_flush(brw
);
648 intelInitBufferObjectFuncs(struct dd_function_table
*functions
)
650 functions
->NewBufferObject
= brw_new_buffer_object
;
651 functions
->DeleteBuffer
= brw_delete_buffer
;
652 functions
->BufferData
= brw_buffer_data
;
653 functions
->BufferSubData
= brw_buffer_subdata
;
654 functions
->GetBufferSubData
= brw_get_buffer_subdata
;
655 functions
->MapBufferRange
= brw_map_buffer_range
;
656 functions
->FlushMappedBufferRange
= brw_flush_mapped_buffer_range
;
657 functions
->UnmapBuffer
= brw_unmap_buffer
;
658 functions
->CopyBufferSubData
= brw_copy_buffer_subdata
;