1 /**************************************************************************
3 * Copyright 2003 VMware, Inc.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
29 * @file intel_buffer_objects.c
31 * This provides core GL buffer object functionality.
34 #include "main/imports.h"
35 #include "main/mtypes.h"
36 #include "main/macros.h"
37 #include "main/bufferobj.h"
39 #include "brw_context.h"
40 #include "intel_blit.h"
41 #include "intel_buffer_objects.h"
42 #include "intel_batchbuffer.h"
45 * Map a buffer object; issue performance warnings if mapping causes stalls.
47 * This matches the drm_intel_bo_map API, but takes an additional human-readable
48 * name for the buffer object to use in the performance debug message.
51 brw_bo_map(struct brw_context
*brw
,
52 drm_intel_bo
*bo
, int write_enable
,
55 if (likely(!brw
->perf_debug
) || !drm_intel_bo_busy(bo
))
56 return drm_intel_bo_map(bo
, write_enable
);
58 double start_time
= get_time();
60 int ret
= drm_intel_bo_map(bo
, write_enable
);
62 perf_debug("CPU mapping a busy %s BO stalled and took %.03f ms.\n",
63 bo_name
, (get_time() - start_time
) * 1000);
69 brw_bo_map_gtt(struct brw_context
*brw
, drm_intel_bo
*bo
, const char *bo_name
)
71 if (likely(!brw
->perf_debug
) || !drm_intel_bo_busy(bo
))
72 return drm_intel_gem_bo_map_gtt(bo
);
74 double start_time
= get_time();
76 int ret
= drm_intel_gem_bo_map_gtt(bo
);
78 perf_debug("GTT mapping a busy %s BO stalled and took %.03f ms.\n",
79 bo_name
, (get_time() - start_time
) * 1000);
85 mark_buffer_gpu_usage(struct intel_buffer_object
*intel_obj
,
86 uint32_t offset
, uint32_t size
)
88 intel_obj
->gpu_active_start
= MIN2(intel_obj
->gpu_active_start
, offset
);
89 intel_obj
->gpu_active_end
= MAX2(intel_obj
->gpu_active_end
, offset
+ size
);
93 mark_buffer_inactive(struct intel_buffer_object
*intel_obj
)
95 intel_obj
->gpu_active_start
= ~0;
96 intel_obj
->gpu_active_end
= 0;
99 /** Allocates a new drm_intel_bo to store the data for the buffer object. */
101 alloc_buffer_object(struct brw_context
*brw
,
102 struct intel_buffer_object
*intel_obj
)
104 intel_obj
->buffer
= drm_intel_bo_alloc(brw
->bufmgr
, "bufferobj",
105 intel_obj
->Base
.Size
, 64);
107 /* the buffer might be bound as a uniform buffer, need to update it
109 if (intel_obj
->Base
.UsageHistory
& USAGE_UNIFORM_BUFFER
)
110 brw
->ctx
.NewDriverState
|= BRW_NEW_UNIFORM_BUFFER
;
111 if (intel_obj
->Base
.UsageHistory
& USAGE_TEXTURE_BUFFER
)
112 brw
->ctx
.NewDriverState
|= BRW_NEW_TEXTURE_BUFFER
;
113 if (intel_obj
->Base
.UsageHistory
& USAGE_ATOMIC_COUNTER_BUFFER
)
114 brw
->ctx
.NewDriverState
|= BRW_NEW_ATOMIC_BUFFER
;
116 mark_buffer_inactive(intel_obj
);
120 release_buffer(struct intel_buffer_object
*intel_obj
)
122 drm_intel_bo_unreference(intel_obj
->buffer
);
123 intel_obj
->buffer
= NULL
;
127 * The NewBufferObject() driver hook.
129 * Allocates a new intel_buffer_object structure and initializes it.
131 * There is some duplication between mesa's bufferobjects and our
132 * bufmgr buffers. Both have an integer handle and a hashtable to
133 * lookup an opaque structure. It would be nice if the handles and
134 * internal structure where somehow shared.
136 static struct gl_buffer_object
*
137 brw_new_buffer_object(struct gl_context
* ctx
, GLuint name
)
139 struct intel_buffer_object
*obj
= CALLOC_STRUCT(intel_buffer_object
);
141 _mesa_error_no_memory(__func__
);
144 _mesa_initialize_buffer_object(ctx
, &obj
->Base
, name
);
152 * The DeleteBuffer() driver hook.
154 * Deletes a single OpenGL buffer object. Used by glDeleteBuffers().
157 brw_delete_buffer(struct gl_context
* ctx
, struct gl_buffer_object
*obj
)
159 struct intel_buffer_object
*intel_obj
= intel_buffer_object(obj
);
163 /* Buffer objects are automatically unmapped when deleting according
164 * to the spec, but Mesa doesn't do UnmapBuffer for us at context destroy
165 * (though it does if you call glDeleteBuffers)
167 _mesa_buffer_unmap_all_mappings(ctx
, obj
);
169 drm_intel_bo_unreference(intel_obj
->buffer
);
175 * The BufferData() driver hook.
177 * Implements glBufferData(), which recreates a buffer object's data store
178 * and populates it with the given data, if present.
180 * Any data that was previously stored in the buffer object is lost.
182 * \return true for success, false if out of memory
185 brw_buffer_data(struct gl_context
*ctx
,
190 GLbitfield storageFlags
,
191 struct gl_buffer_object
*obj
)
193 struct brw_context
*brw
= brw_context(ctx
);
194 struct intel_buffer_object
*intel_obj
= intel_buffer_object(obj
);
196 /* Part of the ABI, but this function doesn't use it.
200 intel_obj
->Base
.Size
= size
;
201 intel_obj
->Base
.Usage
= usage
;
202 intel_obj
->Base
.StorageFlags
= storageFlags
;
204 assert(!obj
->Mappings
[MAP_USER
].Pointer
); /* Mesa should have unmapped it */
205 assert(!obj
->Mappings
[MAP_INTERNAL
].Pointer
);
207 if (intel_obj
->buffer
!= NULL
)
208 release_buffer(intel_obj
);
211 alloc_buffer_object(brw
, intel_obj
);
212 if (!intel_obj
->buffer
)
216 drm_intel_bo_subdata(intel_obj
->buffer
, 0, size
, data
);
224 * The BufferSubData() driver hook.
226 * Implements glBufferSubData(), which replaces a portion of the data in a
229 * If the data range specified by (size + offset) extends beyond the end of
230 * the buffer or if data is NULL, no copy is performed.
233 brw_buffer_subdata(struct gl_context
*ctx
,
237 struct gl_buffer_object
*obj
)
239 struct brw_context
*brw
= brw_context(ctx
);
240 struct intel_buffer_object
*intel_obj
= intel_buffer_object(obj
);
248 /* See if we can unsynchronized write the data into the user's BO. This
249 * avoids GPU stalls in unfortunately common user patterns (uploading
250 * sequentially into a BO, with draw calls in between each upload).
252 * Once we've hit this path, we mark this GL BO as preferring stalling to
253 * blits, so that we can hopefully hit this path again in the future
254 * (otherwise, an app that might occasionally stall but mostly not will end
255 * up with blitting all the time, at the cost of bandwidth)
258 if (offset
+ size
<= intel_obj
->gpu_active_start
||
259 intel_obj
->gpu_active_end
<= offset
) {
260 drm_intel_gem_bo_map_unsynchronized(intel_obj
->buffer
);
261 memcpy(intel_obj
->buffer
->virtual + offset
, data
, size
);
262 drm_intel_bo_unmap(intel_obj
->buffer
);
264 if (intel_obj
->gpu_active_end
> intel_obj
->gpu_active_start
)
265 intel_obj
->prefer_stall_to_blit
= true;
271 drm_intel_bo_busy(intel_obj
->buffer
) ||
272 drm_intel_bo_references(brw
->batch
.bo
, intel_obj
->buffer
);
275 if (size
== intel_obj
->Base
.Size
) {
276 /* Replace the current busy bo so the subdata doesn't stall. */
277 drm_intel_bo_unreference(intel_obj
->buffer
);
278 alloc_buffer_object(brw
, intel_obj
);
279 } else if (!intel_obj
->prefer_stall_to_blit
) {
280 perf_debug("Using a blit copy to avoid stalling on "
281 "glBufferSubData(%ld, %ld) (%ldkb) to a busy "
282 "(%d-%d) buffer object.\n",
283 (long)offset
, (long)offset
+ size
, (long)(size
/1024),
284 intel_obj
->gpu_active_start
,
285 intel_obj
->gpu_active_end
);
286 drm_intel_bo
*temp_bo
=
287 drm_intel_bo_alloc(brw
->bufmgr
, "subdata temp", size
, 64);
289 drm_intel_bo_subdata(temp_bo
, 0, size
, data
);
291 intel_emit_linear_blit(brw
,
292 intel_obj
->buffer
, offset
,
296 drm_intel_bo_unreference(temp_bo
);
299 perf_debug("Stalling on glBufferSubData(%ld, %ld) (%ldkb) to a busy "
300 "(%d-%d) buffer object. Use glMapBufferRange() to "
302 (long)offset
, (long)offset
+ size
, (long)(size
/1024),
303 intel_obj
->gpu_active_start
,
304 intel_obj
->gpu_active_end
);
305 intel_batchbuffer_flush(brw
);
309 drm_intel_bo_subdata(intel_obj
->buffer
, offset
, size
, data
);
310 mark_buffer_inactive(intel_obj
);
315 * The GetBufferSubData() driver hook.
317 * Implements glGetBufferSubData(), which copies a subrange of a buffer
318 * object into user memory.
321 brw_get_buffer_subdata(struct gl_context
*ctx
,
325 struct gl_buffer_object
*obj
)
327 struct intel_buffer_object
*intel_obj
= intel_buffer_object(obj
);
328 struct brw_context
*brw
= brw_context(ctx
);
331 if (drm_intel_bo_references(brw
->batch
.bo
, intel_obj
->buffer
)) {
332 intel_batchbuffer_flush(brw
);
334 drm_intel_bo_get_subdata(intel_obj
->buffer
, offset
, size
, data
);
336 mark_buffer_inactive(intel_obj
);
341 * The MapBufferRange() driver hook.
343 * This implements both glMapBufferRange() and glMapBuffer().
345 * The goal of this extension is to allow apps to accumulate their rendering
346 * at the same time as they accumulate their buffer object. Without it,
347 * you'd end up blocking on execution of rendering every time you mapped
348 * the buffer to put new data in.
350 * We support it in 3 ways: If unsynchronized, then don't bother
351 * flushing the batchbuffer before mapping the buffer, which can save blocking
352 * in many cases. If we would still block, and they allow the whole buffer
353 * to be invalidated, then just allocate a new buffer to replace the old one.
354 * If not, and we'd block, and they allow the subrange of the buffer to be
355 * invalidated, then we can make a new little BO, let them write into that,
356 * and blit it into the real BO at unmap time.
359 brw_map_buffer_range(struct gl_context
*ctx
,
360 GLintptr offset
, GLsizeiptr length
,
361 GLbitfield access
, struct gl_buffer_object
*obj
,
362 gl_map_buffer_index index
)
364 struct brw_context
*brw
= brw_context(ctx
);
365 struct intel_buffer_object
*intel_obj
= intel_buffer_object(obj
);
369 /* _mesa_MapBufferRange (GL entrypoint) sets these, but the vbo module also
370 * internally uses our functions directly.
372 obj
->Mappings
[index
].Offset
= offset
;
373 obj
->Mappings
[index
].Length
= length
;
374 obj
->Mappings
[index
].AccessFlags
= access
;
376 if (intel_obj
->buffer
== NULL
) {
377 obj
->Mappings
[index
].Pointer
= NULL
;
381 /* If the access is synchronized (like a normal buffer mapping), then get
382 * things flushed out so the later mapping syncs appropriately through GEM.
383 * If the user doesn't care about existing buffer contents and mapping would
384 * cause us to block, then throw out the old buffer.
386 * If they set INVALIDATE_BUFFER, we can pitch the current contents to
387 * achieve the required synchronization.
389 if (!(access
& GL_MAP_UNSYNCHRONIZED_BIT
)) {
390 if (drm_intel_bo_references(brw
->batch
.bo
, intel_obj
->buffer
)) {
391 if (access
& GL_MAP_INVALIDATE_BUFFER_BIT
) {
392 drm_intel_bo_unreference(intel_obj
->buffer
);
393 alloc_buffer_object(brw
, intel_obj
);
395 perf_debug("Stalling on the GPU for mapping a busy buffer "
397 intel_batchbuffer_flush(brw
);
399 } else if (drm_intel_bo_busy(intel_obj
->buffer
) &&
400 (access
& GL_MAP_INVALIDATE_BUFFER_BIT
)) {
401 drm_intel_bo_unreference(intel_obj
->buffer
);
402 alloc_buffer_object(brw
, intel_obj
);
406 /* If the user is mapping a range of an active buffer object but
407 * doesn't require the current contents of that range, make a new
408 * BO, and we'll copy what they put in there out at unmap or
411 * That is, unless they're looking for a persistent mapping -- we would
412 * need to do blits in the MemoryBarrier call, and it's easier to just do a
413 * GPU stall and do a mapping.
415 if (!(access
& (GL_MAP_UNSYNCHRONIZED_BIT
| GL_MAP_PERSISTENT_BIT
)) &&
416 (access
& GL_MAP_INVALIDATE_RANGE_BIT
) &&
417 drm_intel_bo_busy(intel_obj
->buffer
)) {
418 /* Ensure that the base alignment of the allocation meets the alignment
419 * guarantees the driver has advertised to the application.
421 const unsigned alignment
= ctx
->Const
.MinMapBufferAlignment
;
423 intel_obj
->map_extra
[index
] = (uintptr_t) offset
% alignment
;
424 intel_obj
->range_map_bo
[index
] = drm_intel_bo_alloc(brw
->bufmgr
,
427 intel_obj
->map_extra
[index
],
430 brw_bo_map(brw
, intel_obj
->range_map_bo
[index
],
431 (access
& GL_MAP_WRITE_BIT
) != 0, "range-map");
433 drm_intel_gem_bo_map_gtt(intel_obj
->range_map_bo
[index
]);
435 obj
->Mappings
[index
].Pointer
=
436 intel_obj
->range_map_bo
[index
]->virtual + intel_obj
->map_extra
[index
];
437 return obj
->Mappings
[index
].Pointer
;
440 if (access
& GL_MAP_UNSYNCHRONIZED_BIT
)
441 drm_intel_gem_bo_map_unsynchronized(intel_obj
->buffer
);
442 else if (!brw
->has_llc
&& (!(access
& GL_MAP_READ_BIT
) ||
443 (access
& GL_MAP_PERSISTENT_BIT
))) {
444 drm_intel_gem_bo_map_gtt(intel_obj
->buffer
);
445 mark_buffer_inactive(intel_obj
);
447 brw_bo_map(brw
, intel_obj
->buffer
, (access
& GL_MAP_WRITE_BIT
) != 0,
449 mark_buffer_inactive(intel_obj
);
452 obj
->Mappings
[index
].Pointer
= intel_obj
->buffer
->virtual + offset
;
453 return obj
->Mappings
[index
].Pointer
;
457 * The FlushMappedBufferRange() driver hook.
459 * Implements glFlushMappedBufferRange(), which signifies that modifications
460 * have been made to a range of a mapped buffer, and it should be flushed.
462 * This is only used for buffers mapped with GL_MAP_FLUSH_EXPLICIT_BIT.
464 * Ideally we'd use a BO to avoid taking up cache space for the temporary
465 * data, but FlushMappedBufferRange may be followed by further writes to
466 * the pointer, so we would have to re-map after emitting our blit, which
467 * would defeat the point.
470 brw_flush_mapped_buffer_range(struct gl_context
*ctx
,
471 GLintptr offset
, GLsizeiptr length
,
472 struct gl_buffer_object
*obj
,
473 gl_map_buffer_index index
)
475 struct brw_context
*brw
= brw_context(ctx
);
476 struct intel_buffer_object
*intel_obj
= intel_buffer_object(obj
);
478 assert(obj
->Mappings
[index
].AccessFlags
& GL_MAP_FLUSH_EXPLICIT_BIT
);
480 /* If we gave a direct mapping of the buffer instead of using a temporary,
481 * then there's nothing to do.
483 if (intel_obj
->range_map_bo
[index
] == NULL
)
489 /* Note that we're not unmapping our buffer while executing the blit. We
490 * need to have a mapping still at the end of this call, since the user
491 * gets to make further modifications and glFlushMappedBufferRange() calls.
492 * This is safe, because:
494 * - On LLC platforms, we're using a CPU mapping that's coherent with the
495 * GPU (except for the render caches), so the kernel doesn't need to do
496 * any flushing work for us except for what happens at batch exec time
499 * - On non-LLC platforms, we're using a GTT mapping that writes directly
500 * to system memory (except for the chipset cache that gets flushed at
503 * In both cases we don't need to stall for the previous blit to complete
504 * so we can re-map (and we definitely don't want to, since that would be
505 * slow): If the user edits a part of their buffer that's previously been
506 * blitted, then our lack of synchoronization is fine, because either
507 * they'll get some too-new data in the first blit and not do another blit
508 * of that area (but in that case the results are undefined), or they'll do
509 * another blit of that area and the complete newer data will land the
512 intel_emit_linear_blit(brw
,
514 obj
->Mappings
[index
].Offset
+ offset
,
515 intel_obj
->range_map_bo
[index
],
516 intel_obj
->map_extra
[index
] + offset
,
518 mark_buffer_gpu_usage(intel_obj
,
519 obj
->Mappings
[index
].Offset
+ offset
,
525 * The UnmapBuffer() driver hook.
527 * Implements glUnmapBuffer().
530 brw_unmap_buffer(struct gl_context
*ctx
,
531 struct gl_buffer_object
*obj
,
532 gl_map_buffer_index index
)
534 struct brw_context
*brw
= brw_context(ctx
);
535 struct intel_buffer_object
*intel_obj
= intel_buffer_object(obj
);
538 assert(obj
->Mappings
[index
].Pointer
);
539 if (intel_obj
->range_map_bo
[index
] != NULL
) {
540 drm_intel_bo_unmap(intel_obj
->range_map_bo
[index
]);
542 if (!(obj
->Mappings
[index
].AccessFlags
& GL_MAP_FLUSH_EXPLICIT_BIT
)) {
543 intel_emit_linear_blit(brw
,
544 intel_obj
->buffer
, obj
->Mappings
[index
].Offset
,
545 intel_obj
->range_map_bo
[index
],
546 intel_obj
->map_extra
[index
],
547 obj
->Mappings
[index
].Length
);
548 mark_buffer_gpu_usage(intel_obj
, obj
->Mappings
[index
].Offset
,
549 obj
->Mappings
[index
].Length
);
552 /* Since we've emitted some blits to buffers that will (likely) be used
553 * in rendering operations in other cache domains in this batch, emit a
554 * flush. Once again, we wish for a domain tracker in libdrm to cover
555 * usage inside of a batchbuffer.
557 intel_batchbuffer_emit_mi_flush(brw
);
559 drm_intel_bo_unreference(intel_obj
->range_map_bo
[index
]);
560 intel_obj
->range_map_bo
[index
] = NULL
;
561 } else if (intel_obj
->buffer
!= NULL
) {
562 drm_intel_bo_unmap(intel_obj
->buffer
);
564 obj
->Mappings
[index
].Pointer
= NULL
;
565 obj
->Mappings
[index
].Offset
= 0;
566 obj
->Mappings
[index
].Length
= 0;
572 * Gets a pointer to the object's BO, and marks the given range as being used
575 * Anywhere that uses buffer objects in the pipeline should be using this to
576 * mark the range of the buffer that is being accessed by the pipeline.
579 intel_bufferobj_buffer(struct brw_context
*brw
,
580 struct intel_buffer_object
*intel_obj
,
581 uint32_t offset
, uint32_t size
)
583 /* This is needed so that things like transform feedback and texture buffer
584 * objects that need a BO but don't want to check that they exist for
585 * draw-time validation can just always get a BO from a GL buffer object.
587 if (intel_obj
->buffer
== NULL
)
588 alloc_buffer_object(brw
, intel_obj
);
590 mark_buffer_gpu_usage(intel_obj
, offset
, size
);
592 return intel_obj
->buffer
;
596 * The CopyBufferSubData() driver hook.
598 * Implements glCopyBufferSubData(), which copies a portion of one buffer
599 * object's data to another. Independent source and destination offsets
603 brw_copy_buffer_subdata(struct gl_context
*ctx
,
604 struct gl_buffer_object
*src
,
605 struct gl_buffer_object
*dst
,
606 GLintptr read_offset
, GLintptr write_offset
,
609 struct brw_context
*brw
= brw_context(ctx
);
610 struct intel_buffer_object
*intel_src
= intel_buffer_object(src
);
611 struct intel_buffer_object
*intel_dst
= intel_buffer_object(dst
);
612 drm_intel_bo
*src_bo
, *dst_bo
;
617 dst_bo
= intel_bufferobj_buffer(brw
, intel_dst
, write_offset
, size
);
618 src_bo
= intel_bufferobj_buffer(brw
, intel_src
, read_offset
, size
);
620 intel_emit_linear_blit(brw
,
621 dst_bo
, write_offset
,
622 src_bo
, read_offset
, size
);
624 /* Since we've emitted some blits to buffers that will (likely) be used
625 * in rendering operations in other cache domains in this batch, emit a
626 * flush. Once again, we wish for a domain tracker in libdrm to cover
627 * usage inside of a batchbuffer.
629 intel_batchbuffer_emit_mi_flush(brw
);
633 intelInitBufferObjectFuncs(struct dd_function_table
*functions
)
635 functions
->NewBufferObject
= brw_new_buffer_object
;
636 functions
->DeleteBuffer
= brw_delete_buffer
;
637 functions
->BufferData
= brw_buffer_data
;
638 functions
->BufferSubData
= brw_buffer_subdata
;
639 functions
->GetBufferSubData
= brw_get_buffer_subdata
;
640 functions
->MapBufferRange
= brw_map_buffer_range
;
641 functions
->FlushMappedBufferRange
= brw_flush_mapped_buffer_range
;
642 functions
->UnmapBuffer
= brw_unmap_buffer
;
643 functions
->CopyBufferSubData
= brw_copy_buffer_subdata
;