2 * Copyright 2003 VMware, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial portions
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
18 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
21 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
22 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
23 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27 * @file intel_buffer_objects.c
29 * This provides core GL buffer object functionality.
32 #include "main/imports.h"
33 #include "main/mtypes.h"
34 #include "main/macros.h"
35 #include "main/bufferobj.h"
37 #include "brw_context.h"
38 #include "intel_blit.h"
39 #include "intel_buffer_objects.h"
40 #include "intel_batchbuffer.h"
43 mark_buffer_gpu_usage(struct intel_buffer_object
*intel_obj
,
44 uint32_t offset
, uint32_t size
)
46 intel_obj
->gpu_active_start
= MIN2(intel_obj
->gpu_active_start
, offset
);
47 intel_obj
->gpu_active_end
= MAX2(intel_obj
->gpu_active_end
, offset
+ size
);
51 mark_buffer_inactive(struct intel_buffer_object
*intel_obj
)
53 intel_obj
->gpu_active_start
= ~0;
54 intel_obj
->gpu_active_end
= 0;
57 /** Allocates a new brw_bo to store the data for the buffer object. */
59 alloc_buffer_object(struct brw_context
*brw
,
60 struct intel_buffer_object
*intel_obj
)
62 intel_obj
->buffer
= brw_bo_alloc(brw
->bufmgr
, "bufferobj",
63 intel_obj
->Base
.Size
, 64);
65 /* the buffer might be bound as a uniform buffer, need to update it
67 if (intel_obj
->Base
.UsageHistory
& USAGE_UNIFORM_BUFFER
)
68 brw
->ctx
.NewDriverState
|= BRW_NEW_UNIFORM_BUFFER
;
69 if (intel_obj
->Base
.UsageHistory
& USAGE_SHADER_STORAGE_BUFFER
)
70 brw
->ctx
.NewDriverState
|= BRW_NEW_UNIFORM_BUFFER
;
71 if (intel_obj
->Base
.UsageHistory
& USAGE_TEXTURE_BUFFER
)
72 brw
->ctx
.NewDriverState
|= BRW_NEW_TEXTURE_BUFFER
;
73 if (intel_obj
->Base
.UsageHistory
& USAGE_ATOMIC_COUNTER_BUFFER
)
74 brw
->ctx
.NewDriverState
|= BRW_NEW_ATOMIC_BUFFER
;
76 mark_buffer_inactive(intel_obj
);
80 release_buffer(struct intel_buffer_object
*intel_obj
)
82 brw_bo_unreference(intel_obj
->buffer
);
83 intel_obj
->buffer
= NULL
;
87 * The NewBufferObject() driver hook.
89 * Allocates a new intel_buffer_object structure and initializes it.
91 * There is some duplication between mesa's bufferobjects and our
92 * bufmgr buffers. Both have an integer handle and a hashtable to
93 * lookup an opaque structure. It would be nice if the handles and
94 * internal structure where somehow shared.
96 static struct gl_buffer_object
*
97 brw_new_buffer_object(struct gl_context
* ctx
, GLuint name
)
99 struct intel_buffer_object
*obj
= CALLOC_STRUCT(intel_buffer_object
);
101 _mesa_error_no_memory(__func__
);
104 _mesa_initialize_buffer_object(ctx
, &obj
->Base
, name
);
112 * The DeleteBuffer() driver hook.
114 * Deletes a single OpenGL buffer object. Used by glDeleteBuffers().
117 brw_delete_buffer(struct gl_context
* ctx
, struct gl_buffer_object
*obj
)
119 struct intel_buffer_object
*intel_obj
= intel_buffer_object(obj
);
123 /* Buffer objects are automatically unmapped when deleting according
124 * to the spec, but Mesa doesn't do UnmapBuffer for us at context destroy
125 * (though it does if you call glDeleteBuffers)
127 _mesa_buffer_unmap_all_mappings(ctx
, obj
);
129 brw_bo_unreference(intel_obj
->buffer
);
130 _mesa_delete_buffer_object(ctx
, obj
);
135 * The BufferData() driver hook.
137 * Implements glBufferData(), which recreates a buffer object's data store
138 * and populates it with the given data, if present.
140 * Any data that was previously stored in the buffer object is lost.
142 * \return true for success, false if out of memory
145 brw_buffer_data(struct gl_context
*ctx
,
150 GLbitfield storageFlags
,
151 struct gl_buffer_object
*obj
)
153 struct brw_context
*brw
= brw_context(ctx
);
154 struct intel_buffer_object
*intel_obj
= intel_buffer_object(obj
);
156 /* Part of the ABI, but this function doesn't use it.
160 intel_obj
->Base
.Size
= size
;
161 intel_obj
->Base
.Usage
= usage
;
162 intel_obj
->Base
.StorageFlags
= storageFlags
;
164 assert(!obj
->Mappings
[MAP_USER
].Pointer
); /* Mesa should have unmapped it */
165 assert(!obj
->Mappings
[MAP_INTERNAL
].Pointer
);
167 if (intel_obj
->buffer
!= NULL
)
168 release_buffer(intel_obj
);
171 alloc_buffer_object(brw
, intel_obj
);
172 if (!intel_obj
->buffer
)
176 brw_bo_subdata(intel_obj
->buffer
, 0, size
, data
);
184 * The BufferSubData() driver hook.
186 * Implements glBufferSubData(), which replaces a portion of the data in a
189 * If the data range specified by (size + offset) extends beyond the end of
190 * the buffer or if data is NULL, no copy is performed.
193 brw_buffer_subdata(struct gl_context
*ctx
,
197 struct gl_buffer_object
*obj
)
199 struct brw_context
*brw
= brw_context(ctx
);
200 struct intel_buffer_object
*intel_obj
= intel_buffer_object(obj
);
208 /* See if we can unsynchronized write the data into the user's BO. This
209 * avoids GPU stalls in unfortunately common user patterns (uploading
210 * sequentially into a BO, with draw calls in between each upload).
212 * Once we've hit this path, we mark this GL BO as preferring stalling to
213 * blits, so that we can hopefully hit this path again in the future
214 * (otherwise, an app that might occasionally stall but mostly not will end
215 * up with blitting all the time, at the cost of bandwidth)
217 if (offset
+ size
<= intel_obj
->gpu_active_start
||
218 intel_obj
->gpu_active_end
<= offset
) {
219 void *map
= brw_bo_map(brw
, intel_obj
->buffer
, MAP_WRITE
| MAP_ASYNC
);
220 memcpy(map
+ offset
, data
, size
);
221 brw_bo_unmap(intel_obj
->buffer
);
223 if (intel_obj
->gpu_active_end
> intel_obj
->gpu_active_start
)
224 intel_obj
->prefer_stall_to_blit
= true;
229 brw_bo_busy(intel_obj
->buffer
) ||
230 brw_batch_references(&brw
->batch
, intel_obj
->buffer
);
233 if (size
== intel_obj
->Base
.Size
) {
234 /* Replace the current busy bo so the subdata doesn't stall. */
235 brw_bo_unreference(intel_obj
->buffer
);
236 alloc_buffer_object(brw
, intel_obj
);
237 } else if (!intel_obj
->prefer_stall_to_blit
) {
238 perf_debug("Using a blit copy to avoid stalling on "
239 "glBufferSubData(%ld, %ld) (%ldkb) to a busy "
240 "(%d-%d) buffer object.\n",
241 (long)offset
, (long)offset
+ size
, (long)(size
/1024),
242 intel_obj
->gpu_active_start
,
243 intel_obj
->gpu_active_end
);
244 struct brw_bo
*temp_bo
=
245 brw_bo_alloc(brw
->bufmgr
, "subdata temp", size
, 64);
247 brw_bo_subdata(temp_bo
, 0, size
, data
);
249 intel_emit_linear_blit(brw
,
250 intel_obj
->buffer
, offset
,
254 brw_bo_unreference(temp_bo
);
257 perf_debug("Stalling on glBufferSubData(%ld, %ld) (%ldkb) to a busy "
258 "(%d-%d) buffer object. Use glMapBufferRange() to "
260 (long)offset
, (long)offset
+ size
, (long)(size
/1024),
261 intel_obj
->gpu_active_start
,
262 intel_obj
->gpu_active_end
);
263 intel_batchbuffer_flush(brw
);
267 brw_bo_subdata(intel_obj
->buffer
, offset
, size
, data
);
268 mark_buffer_inactive(intel_obj
);
273 * The GetBufferSubData() driver hook.
275 * Implements glGetBufferSubData(), which copies a subrange of a buffer
276 * object into user memory.
279 brw_get_buffer_subdata(struct gl_context
*ctx
,
283 struct gl_buffer_object
*obj
)
285 struct intel_buffer_object
*intel_obj
= intel_buffer_object(obj
);
286 struct brw_context
*brw
= brw_context(ctx
);
289 if (brw_batch_references(&brw
->batch
, intel_obj
->buffer
)) {
290 intel_batchbuffer_flush(brw
);
293 void *map
= brw_bo_map(brw
, intel_obj
->buffer
, MAP_READ
);
295 if (unlikely(!map
)) {
296 _mesa_error_no_memory(__func__
);
300 memcpy(data
, map
+ offset
, size
);
301 brw_bo_unmap(intel_obj
->buffer
);
303 mark_buffer_inactive(intel_obj
);
308 * The MapBufferRange() driver hook.
310 * This implements both glMapBufferRange() and glMapBuffer().
312 * The goal of this extension is to allow apps to accumulate their rendering
313 * at the same time as they accumulate their buffer object. Without it,
314 * you'd end up blocking on execution of rendering every time you mapped
315 * the buffer to put new data in.
317 * We support it in 3 ways: If unsynchronized, then don't bother
318 * flushing the batchbuffer before mapping the buffer, which can save blocking
319 * in many cases. If we would still block, and they allow the whole buffer
320 * to be invalidated, then just allocate a new buffer to replace the old one.
321 * If not, and we'd block, and they allow the subrange of the buffer to be
322 * invalidated, then we can make a new little BO, let them write into that,
323 * and blit it into the real BO at unmap time.
326 brw_map_buffer_range(struct gl_context
*ctx
,
327 GLintptr offset
, GLsizeiptr length
,
328 GLbitfield access
, struct gl_buffer_object
*obj
,
329 gl_map_buffer_index index
)
331 struct brw_context
*brw
= brw_context(ctx
);
332 struct intel_buffer_object
*intel_obj
= intel_buffer_object(obj
);
336 STATIC_ASSERT(GL_MAP_UNSYNCHRONIZED_BIT
== MAP_ASYNC
);
337 STATIC_ASSERT(GL_MAP_WRITE_BIT
== MAP_WRITE
);
338 STATIC_ASSERT(GL_MAP_READ_BIT
== MAP_READ
);
339 STATIC_ASSERT(GL_MAP_PERSISTENT_BIT
== MAP_PERSISTENT
);
340 STATIC_ASSERT(GL_MAP_COHERENT_BIT
== MAP_COHERENT
);
341 assert((access
& MAP_INTERNAL_MASK
) == 0);
343 /* _mesa_MapBufferRange (GL entrypoint) sets these, but the vbo module also
344 * internally uses our functions directly.
346 obj
->Mappings
[index
].Offset
= offset
;
347 obj
->Mappings
[index
].Length
= length
;
348 obj
->Mappings
[index
].AccessFlags
= access
;
350 if (intel_obj
->buffer
== NULL
) {
351 obj
->Mappings
[index
].Pointer
= NULL
;
355 /* If the access is synchronized (like a normal buffer mapping), then get
356 * things flushed out so the later mapping syncs appropriately through GEM.
357 * If the user doesn't care about existing buffer contents and mapping would
358 * cause us to block, then throw out the old buffer.
360 * If they set INVALIDATE_BUFFER, we can pitch the current contents to
361 * achieve the required synchronization.
363 if (!(access
& GL_MAP_UNSYNCHRONIZED_BIT
)) {
364 if (brw_batch_references(&brw
->batch
, intel_obj
->buffer
)) {
365 if (access
& GL_MAP_INVALIDATE_BUFFER_BIT
) {
366 brw_bo_unreference(intel_obj
->buffer
);
367 alloc_buffer_object(brw
, intel_obj
);
369 perf_debug("Stalling on the GPU for mapping a busy buffer "
371 intel_batchbuffer_flush(brw
);
373 } else if (brw_bo_busy(intel_obj
->buffer
) &&
374 (access
& GL_MAP_INVALIDATE_BUFFER_BIT
)) {
375 brw_bo_unreference(intel_obj
->buffer
);
376 alloc_buffer_object(brw
, intel_obj
);
380 /* If the user is mapping a range of an active buffer object but
381 * doesn't require the current contents of that range, make a new
382 * BO, and we'll copy what they put in there out at unmap or
385 * That is, unless they're looking for a persistent mapping -- we would
386 * need to do blits in the MemoryBarrier call, and it's easier to just do a
387 * GPU stall and do a mapping.
389 if (!(access
& (GL_MAP_UNSYNCHRONIZED_BIT
| GL_MAP_PERSISTENT_BIT
)) &&
390 (access
& GL_MAP_INVALIDATE_RANGE_BIT
) &&
391 brw_bo_busy(intel_obj
->buffer
)) {
392 /* Ensure that the base alignment of the allocation meets the alignment
393 * guarantees the driver has advertised to the application.
395 const unsigned alignment
= ctx
->Const
.MinMapBufferAlignment
;
397 intel_obj
->map_extra
[index
] = (uintptr_t) offset
% alignment
;
398 intel_obj
->range_map_bo
[index
] = brw_bo_alloc(brw
->bufmgr
,
401 intel_obj
->map_extra
[index
],
403 void *map
= brw_bo_map(brw
, intel_obj
->range_map_bo
[index
], access
);
404 obj
->Mappings
[index
].Pointer
= map
+ intel_obj
->map_extra
[index
];
405 return obj
->Mappings
[index
].Pointer
;
408 void *map
= brw_bo_map(brw
, intel_obj
->buffer
, access
);
409 if (!(access
& GL_MAP_UNSYNCHRONIZED_BIT
)) {
410 mark_buffer_inactive(intel_obj
);
413 obj
->Mappings
[index
].Pointer
= map
+ offset
;
414 return obj
->Mappings
[index
].Pointer
;
418 * The FlushMappedBufferRange() driver hook.
420 * Implements glFlushMappedBufferRange(), which signifies that modifications
421 * have been made to a range of a mapped buffer, and it should be flushed.
423 * This is only used for buffers mapped with GL_MAP_FLUSH_EXPLICIT_BIT.
425 * Ideally we'd use a BO to avoid taking up cache space for the temporary
426 * data, but FlushMappedBufferRange may be followed by further writes to
427 * the pointer, so we would have to re-map after emitting our blit, which
428 * would defeat the point.
431 brw_flush_mapped_buffer_range(struct gl_context
*ctx
,
432 GLintptr offset
, GLsizeiptr length
,
433 struct gl_buffer_object
*obj
,
434 gl_map_buffer_index index
)
436 struct brw_context
*brw
= brw_context(ctx
);
437 struct intel_buffer_object
*intel_obj
= intel_buffer_object(obj
);
439 assert(obj
->Mappings
[index
].AccessFlags
& GL_MAP_FLUSH_EXPLICIT_BIT
);
441 /* If we gave a direct mapping of the buffer instead of using a temporary,
442 * then there's nothing to do.
444 if (intel_obj
->range_map_bo
[index
] == NULL
)
450 /* Note that we're not unmapping our buffer while executing the blit. We
451 * need to have a mapping still at the end of this call, since the user
452 * gets to make further modifications and glFlushMappedBufferRange() calls.
453 * This is safe, because:
455 * - On LLC platforms, we're using a CPU mapping that's coherent with the
456 * GPU (except for the render caches), so the kernel doesn't need to do
457 * any flushing work for us except for what happens at batch exec time
460 * - On non-LLC platforms, we're using a GTT mapping that writes directly
461 * to system memory (except for the chipset cache that gets flushed at
464 * In both cases we don't need to stall for the previous blit to complete
465 * so we can re-map (and we definitely don't want to, since that would be
466 * slow): If the user edits a part of their buffer that's previously been
467 * blitted, then our lack of synchoronization is fine, because either
468 * they'll get some too-new data in the first blit and not do another blit
469 * of that area (but in that case the results are undefined), or they'll do
470 * another blit of that area and the complete newer data will land the
473 intel_emit_linear_blit(brw
,
475 obj
->Mappings
[index
].Offset
+ offset
,
476 intel_obj
->range_map_bo
[index
],
477 intel_obj
->map_extra
[index
] + offset
,
479 mark_buffer_gpu_usage(intel_obj
,
480 obj
->Mappings
[index
].Offset
+ offset
,
486 * The UnmapBuffer() driver hook.
488 * Implements glUnmapBuffer().
491 brw_unmap_buffer(struct gl_context
*ctx
,
492 struct gl_buffer_object
*obj
,
493 gl_map_buffer_index index
)
495 struct brw_context
*brw
= brw_context(ctx
);
496 struct intel_buffer_object
*intel_obj
= intel_buffer_object(obj
);
499 assert(obj
->Mappings
[index
].Pointer
);
500 if (intel_obj
->range_map_bo
[index
] != NULL
) {
501 brw_bo_unmap(intel_obj
->range_map_bo
[index
]);
503 if (!(obj
->Mappings
[index
].AccessFlags
& GL_MAP_FLUSH_EXPLICIT_BIT
)) {
504 intel_emit_linear_blit(brw
,
505 intel_obj
->buffer
, obj
->Mappings
[index
].Offset
,
506 intel_obj
->range_map_bo
[index
],
507 intel_obj
->map_extra
[index
],
508 obj
->Mappings
[index
].Length
);
509 mark_buffer_gpu_usage(intel_obj
, obj
->Mappings
[index
].Offset
,
510 obj
->Mappings
[index
].Length
);
513 /* Since we've emitted some blits to buffers that will (likely) be used
514 * in rendering operations in other cache domains in this batch, emit a
515 * flush. Once again, we wish for a domain tracker in libdrm to cover
516 * usage inside of a batchbuffer.
518 brw_emit_mi_flush(brw
);
520 brw_bo_unreference(intel_obj
->range_map_bo
[index
]);
521 intel_obj
->range_map_bo
[index
] = NULL
;
522 } else if (intel_obj
->buffer
!= NULL
) {
523 brw_bo_unmap(intel_obj
->buffer
);
525 obj
->Mappings
[index
].Pointer
= NULL
;
526 obj
->Mappings
[index
].Offset
= 0;
527 obj
->Mappings
[index
].Length
= 0;
533 * Gets a pointer to the object's BO, and marks the given range as being used
536 * Anywhere that uses buffer objects in the pipeline should be using this to
537 * mark the range of the buffer that is being accessed by the pipeline.
540 intel_bufferobj_buffer(struct brw_context
*brw
,
541 struct intel_buffer_object
*intel_obj
,
542 uint32_t offset
, uint32_t size
)
544 /* This is needed so that things like transform feedback and texture buffer
545 * objects that need a BO but don't want to check that they exist for
546 * draw-time validation can just always get a BO from a GL buffer object.
548 if (intel_obj
->buffer
== NULL
)
549 alloc_buffer_object(brw
, intel_obj
);
551 mark_buffer_gpu_usage(intel_obj
, offset
, size
);
553 return intel_obj
->buffer
;
557 * The CopyBufferSubData() driver hook.
559 * Implements glCopyBufferSubData(), which copies a portion of one buffer
560 * object's data to another. Independent source and destination offsets
564 brw_copy_buffer_subdata(struct gl_context
*ctx
,
565 struct gl_buffer_object
*src
,
566 struct gl_buffer_object
*dst
,
567 GLintptr read_offset
, GLintptr write_offset
,
570 struct brw_context
*brw
= brw_context(ctx
);
571 struct intel_buffer_object
*intel_src
= intel_buffer_object(src
);
572 struct intel_buffer_object
*intel_dst
= intel_buffer_object(dst
);
573 struct brw_bo
*src_bo
, *dst_bo
;
578 dst_bo
= intel_bufferobj_buffer(brw
, intel_dst
, write_offset
, size
);
579 src_bo
= intel_bufferobj_buffer(brw
, intel_src
, read_offset
, size
);
581 intel_emit_linear_blit(brw
,
582 dst_bo
, write_offset
,
583 src_bo
, read_offset
, size
);
585 /* Since we've emitted some blits to buffers that will (likely) be used
586 * in rendering operations in other cache domains in this batch, emit a
587 * flush. Once again, we wish for a domain tracker in libdrm to cover
588 * usage inside of a batchbuffer.
590 brw_emit_mi_flush(brw
);
594 intelInitBufferObjectFuncs(struct dd_function_table
*functions
)
596 functions
->NewBufferObject
= brw_new_buffer_object
;
597 functions
->DeleteBuffer
= brw_delete_buffer
;
598 functions
->BufferData
= brw_buffer_data
;
599 functions
->BufferSubData
= brw_buffer_subdata
;
600 functions
->GetBufferSubData
= brw_get_buffer_subdata
;
601 functions
->MapBufferRange
= brw_map_buffer_range
;
602 functions
->FlushMappedBufferRange
= brw_flush_mapped_buffer_range
;
603 functions
->UnmapBuffer
= brw_unmap_buffer
;
604 functions
->CopyBufferSubData
= brw_copy_buffer_subdata
;