1 /**************************************************************************
3 * Copyright 2003 VMware, Inc.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
29 * @file intel_buffer_objects.c
31 * This provides core GL buffer object functionality.
34 #include "main/imports.h"
35 #include "main/mtypes.h"
36 #include "main/macros.h"
37 #include "main/bufferobj.h"
39 #include "brw_context.h"
40 #include "intel_blit.h"
41 #include "intel_buffer_objects.h"
42 #include "intel_batchbuffer.h"
45 * Map a buffer object; issue performance warnings if mapping causes stalls.
47 * This matches the drm_intel_bo_map API, but takes an additional human-readable
48 * name for the buffer object to use in the performance debug message.
51 brw_bo_map(struct brw_context
*brw
,
52 drm_intel_bo
*bo
, int write_enable
,
55 if (likely(!brw
->perf_debug
) || !drm_intel_bo_busy(bo
))
56 return drm_intel_bo_map(bo
, write_enable
);
58 double start_time
= get_time();
60 int ret
= drm_intel_bo_map(bo
, write_enable
);
62 perf_debug("CPU mapping a busy %s BO stalled and took %.03f ms.\n",
63 bo_name
, (get_time() - start_time
) * 1000);
69 brw_bo_map_gtt(struct brw_context
*brw
, drm_intel_bo
*bo
, const char *bo_name
)
71 if (likely(!brw
->perf_debug
) || !drm_intel_bo_busy(bo
))
72 return drm_intel_gem_bo_map_gtt(bo
);
74 double start_time
= get_time();
76 int ret
= drm_intel_gem_bo_map_gtt(bo
);
78 perf_debug("GTT mapping a busy %s BO stalled and took %.03f ms.\n",
79 bo_name
, (get_time() - start_time
) * 1000);
85 intel_bufferobj_unmap(struct gl_context
* ctx
, struct gl_buffer_object
*obj
,
86 gl_map_buffer_index index
);
89 intel_bufferobj_mark_gpu_usage(struct intel_buffer_object
*intel_obj
,
90 uint32_t offset
, uint32_t size
)
92 intel_obj
->gpu_active_start
= MIN2(intel_obj
->gpu_active_start
, offset
);
93 intel_obj
->gpu_active_end
= MAX2(intel_obj
->gpu_active_end
, offset
+ size
);
97 intel_bufferobj_mark_inactive(struct intel_buffer_object
*intel_obj
)
99 intel_obj
->gpu_active_start
= ~0;
100 intel_obj
->gpu_active_end
= 0;
103 /** Allocates a new drm_intel_bo to store the data for the buffer object. */
105 intel_bufferobj_alloc_buffer(struct brw_context
*brw
,
106 struct intel_buffer_object
*intel_obj
)
108 intel_obj
->buffer
= drm_intel_bo_alloc(brw
->bufmgr
, "bufferobj",
109 intel_obj
->Base
.Size
, 64);
111 /* the buffer might be bound as a uniform buffer, need to update it
113 brw
->state
.dirty
.brw
|= BRW_NEW_UNIFORM_BUFFER
;
115 intel_bufferobj_mark_inactive(intel_obj
);
119 release_buffer(struct intel_buffer_object
*intel_obj
)
121 drm_intel_bo_unreference(intel_obj
->buffer
);
122 intel_obj
->buffer
= NULL
;
126 * The NewBufferObject() driver hook.
128 * Allocates a new intel_buffer_object structure and initializes it.
130 * There is some duplication between mesa's bufferobjects and our
131 * bufmgr buffers. Both have an integer handle and a hashtable to
132 * lookup an opaque structure. It would be nice if the handles and
133 * internal structure where somehow shared.
135 static struct gl_buffer_object
*
136 intel_bufferobj_alloc(struct gl_context
* ctx
, GLuint name
, GLenum target
)
138 struct intel_buffer_object
*obj
= CALLOC_STRUCT(intel_buffer_object
);
140 _mesa_initialize_buffer_object(ctx
, &obj
->Base
, name
, target
);
148 * The DeleteBuffer() driver hook.
150 * Deletes a single OpenGL buffer object. Used by glDeleteBuffers().
153 intel_bufferobj_free(struct gl_context
* ctx
, struct gl_buffer_object
*obj
)
155 struct intel_buffer_object
*intel_obj
= intel_buffer_object(obj
);
159 /* Buffer objects are automatically unmapped when deleting according
160 * to the spec, but Mesa doesn't do UnmapBuffer for us at context destroy
161 * (though it does if you call glDeleteBuffers)
163 _mesa_buffer_unmap_all_mappings(ctx
, obj
);
165 drm_intel_bo_unreference(intel_obj
->buffer
);
171 * The BufferData() driver hook.
173 * Implements glBufferData(), which recreates a buffer object's data store
174 * and populates it with the given data, if present.
176 * Any data that was previously stored in the buffer object is lost.
178 * \return true for success, false if out of memory
181 intel_bufferobj_data(struct gl_context
* ctx
,
186 GLbitfield storageFlags
,
187 struct gl_buffer_object
*obj
)
189 struct brw_context
*brw
= brw_context(ctx
);
190 struct intel_buffer_object
*intel_obj
= intel_buffer_object(obj
);
192 /* Part of the ABI, but this function doesn't use it.
196 intel_obj
->Base
.Size
= size
;
197 intel_obj
->Base
.Usage
= usage
;
198 intel_obj
->Base
.StorageFlags
= storageFlags
;
200 assert(!obj
->Mappings
[MAP_USER
].Pointer
); /* Mesa should have unmapped it */
201 assert(!obj
->Mappings
[MAP_INTERNAL
].Pointer
);
203 if (intel_obj
->buffer
!= NULL
)
204 release_buffer(intel_obj
);
207 intel_bufferobj_alloc_buffer(brw
, intel_obj
);
208 if (!intel_obj
->buffer
)
212 drm_intel_bo_subdata(intel_obj
->buffer
, 0, size
, data
);
220 * The BufferSubData() driver hook.
222 * Implements glBufferSubData(), which replaces a portion of the data in a
225 * If the data range specified by (size + offset) extends beyond the end of
226 * the buffer or if data is NULL, no copy is performed.
229 intel_bufferobj_subdata(struct gl_context
* ctx
,
232 const GLvoid
* data
, struct gl_buffer_object
*obj
)
234 struct brw_context
*brw
= brw_context(ctx
);
235 struct intel_buffer_object
*intel_obj
= intel_buffer_object(obj
);
243 /* See if we can unsynchronized write the data into the user's BO. This
244 * avoids GPU stalls in unfortunately common user patterns (uploading
245 * sequentially into a BO, with draw calls in between each upload).
247 * Once we've hit this path, we mark this GL BO as preferring stalling to
248 * blits, so that we can hopefully hit this path again in the future
249 * (otherwise, an app that might occasionally stall but mostly not will end
250 * up with blitting all the time, at the cost of bandwidth)
253 if (offset
+ size
<= intel_obj
->gpu_active_start
||
254 intel_obj
->gpu_active_end
<= offset
) {
255 drm_intel_gem_bo_map_unsynchronized(intel_obj
->buffer
);
256 memcpy(intel_obj
->buffer
->virtual + offset
, data
, size
);
257 drm_intel_bo_unmap(intel_obj
->buffer
);
259 if (intel_obj
->gpu_active_end
> intel_obj
->gpu_active_start
)
260 intel_obj
->prefer_stall_to_blit
= true;
266 drm_intel_bo_busy(intel_obj
->buffer
) ||
267 drm_intel_bo_references(brw
->batch
.bo
, intel_obj
->buffer
);
270 if (size
== intel_obj
->Base
.Size
) {
271 /* Replace the current busy bo so the subdata doesn't stall. */
272 drm_intel_bo_unreference(intel_obj
->buffer
);
273 intel_bufferobj_alloc_buffer(brw
, intel_obj
);
274 } else if (!intel_obj
->prefer_stall_to_blit
) {
275 perf_debug("Using a blit copy to avoid stalling on "
276 "glBufferSubData(%ld, %ld) (%ldkb) to a busy "
277 "(%d-%d) buffer object.\n",
278 (long)offset
, (long)offset
+ size
, (long)(size
/1024),
279 intel_obj
->gpu_active_start
,
280 intel_obj
->gpu_active_end
);
281 drm_intel_bo
*temp_bo
=
282 drm_intel_bo_alloc(brw
->bufmgr
, "subdata temp", size
, 64);
284 drm_intel_bo_subdata(temp_bo
, 0, size
, data
);
286 intel_emit_linear_blit(brw
,
287 intel_obj
->buffer
, offset
,
291 drm_intel_bo_unreference(temp_bo
);
294 perf_debug("Stalling on glBufferSubData(%ld, %ld) (%ldkb) to a busy "
295 "(%d-%d) buffer object. Use glMapBufferRange() to "
297 (long)offset
, (long)offset
+ size
, (long)(size
/1024),
298 intel_obj
->gpu_active_start
,
299 intel_obj
->gpu_active_end
);
300 intel_batchbuffer_flush(brw
);
304 drm_intel_bo_subdata(intel_obj
->buffer
, offset
, size
, data
);
305 intel_bufferobj_mark_inactive(intel_obj
);
310 * The GetBufferSubData() driver hook.
312 * Implements glGetBufferSubData(), which copies a subrange of a buffer
313 * object into user memory.
316 intel_bufferobj_get_subdata(struct gl_context
* ctx
,
319 GLvoid
* data
, struct gl_buffer_object
*obj
)
321 struct intel_buffer_object
*intel_obj
= intel_buffer_object(obj
);
322 struct brw_context
*brw
= brw_context(ctx
);
325 if (drm_intel_bo_references(brw
->batch
.bo
, intel_obj
->buffer
)) {
326 intel_batchbuffer_flush(brw
);
328 drm_intel_bo_get_subdata(intel_obj
->buffer
, offset
, size
, data
);
330 intel_bufferobj_mark_inactive(intel_obj
);
335 * The MapBufferRange() driver hook.
337 * This implements both glMapBufferRange() and glMapBuffer().
339 * The goal of this extension is to allow apps to accumulate their rendering
340 * at the same time as they accumulate their buffer object. Without it,
341 * you'd end up blocking on execution of rendering every time you mapped
342 * the buffer to put new data in.
344 * We support it in 3 ways: If unsynchronized, then don't bother
345 * flushing the batchbuffer before mapping the buffer, which can save blocking
346 * in many cases. If we would still block, and they allow the whole buffer
347 * to be invalidated, then just allocate a new buffer to replace the old one.
348 * If not, and we'd block, and they allow the subrange of the buffer to be
349 * invalidated, then we can make a new little BO, let them write into that,
350 * and blit it into the real BO at unmap time.
353 intel_bufferobj_map_range(struct gl_context
* ctx
,
354 GLintptr offset
, GLsizeiptr length
,
355 GLbitfield access
, struct gl_buffer_object
*obj
,
356 gl_map_buffer_index index
)
358 struct brw_context
*brw
= brw_context(ctx
);
359 struct intel_buffer_object
*intel_obj
= intel_buffer_object(obj
);
363 /* _mesa_MapBufferRange (GL entrypoint) sets these, but the vbo module also
364 * internally uses our functions directly.
366 obj
->Mappings
[index
].Offset
= offset
;
367 obj
->Mappings
[index
].Length
= length
;
368 obj
->Mappings
[index
].AccessFlags
= access
;
370 if (intel_obj
->buffer
== NULL
) {
371 obj
->Mappings
[index
].Pointer
= NULL
;
375 /* If the access is synchronized (like a normal buffer mapping), then get
376 * things flushed out so the later mapping syncs appropriately through GEM.
377 * If the user doesn't care about existing buffer contents and mapping would
378 * cause us to block, then throw out the old buffer.
380 * If they set INVALIDATE_BUFFER, we can pitch the current contents to
381 * achieve the required synchronization.
383 if (!(access
& GL_MAP_UNSYNCHRONIZED_BIT
)) {
384 if (drm_intel_bo_references(brw
->batch
.bo
, intel_obj
->buffer
)) {
385 if (access
& GL_MAP_INVALIDATE_BUFFER_BIT
) {
386 drm_intel_bo_unreference(intel_obj
->buffer
);
387 intel_bufferobj_alloc_buffer(brw
, intel_obj
);
389 perf_debug("Stalling on the GPU for mapping a busy buffer "
391 intel_batchbuffer_flush(brw
);
393 } else if (drm_intel_bo_busy(intel_obj
->buffer
) &&
394 (access
& GL_MAP_INVALIDATE_BUFFER_BIT
)) {
395 drm_intel_bo_unreference(intel_obj
->buffer
);
396 intel_bufferobj_alloc_buffer(brw
, intel_obj
);
400 /* If the user is mapping a range of an active buffer object but
401 * doesn't require the current contents of that range, make a new
402 * BO, and we'll copy what they put in there out at unmap or
405 * That is, unless they're looking for a persistent mapping -- we would
406 * need to do blits in the MemoryBarrier call, and it's easier to just do a
407 * GPU stall and do a mapping.
409 if (!(access
& (GL_MAP_UNSYNCHRONIZED_BIT
| GL_MAP_PERSISTENT_BIT
)) &&
410 (access
& GL_MAP_INVALIDATE_RANGE_BIT
) &&
411 drm_intel_bo_busy(intel_obj
->buffer
)) {
412 /* Ensure that the base alignment of the allocation meets the alignment
413 * guarantees the driver has advertised to the application.
415 const unsigned alignment
= ctx
->Const
.MinMapBufferAlignment
;
417 intel_obj
->map_extra
[index
] = (uintptr_t) offset
% alignment
;
418 intel_obj
->range_map_bo
[index
] = drm_intel_bo_alloc(brw
->bufmgr
,
421 intel_obj
->map_extra
[index
],
424 drm_intel_bo_map(intel_obj
->range_map_bo
[index
],
425 (access
& GL_MAP_WRITE_BIT
) != 0);
427 drm_intel_gem_bo_map_gtt(intel_obj
->range_map_bo
[index
]);
429 obj
->Mappings
[index
].Pointer
=
430 intel_obj
->range_map_bo
[index
]->virtual + intel_obj
->map_extra
[index
];
431 return obj
->Mappings
[index
].Pointer
;
434 if (access
& GL_MAP_UNSYNCHRONIZED_BIT
)
435 drm_intel_gem_bo_map_unsynchronized(intel_obj
->buffer
);
436 else if (!brw
->has_llc
&& (!(access
& GL_MAP_READ_BIT
) ||
437 (access
& GL_MAP_PERSISTENT_BIT
))) {
438 drm_intel_gem_bo_map_gtt(intel_obj
->buffer
);
439 intel_bufferobj_mark_inactive(intel_obj
);
441 drm_intel_bo_map(intel_obj
->buffer
, (access
& GL_MAP_WRITE_BIT
) != 0);
442 intel_bufferobj_mark_inactive(intel_obj
);
445 obj
->Mappings
[index
].Pointer
= intel_obj
->buffer
->virtual + offset
;
446 return obj
->Mappings
[index
].Pointer
;
450 * The FlushMappedBufferRange() driver hook.
452 * Implements glFlushMappedBufferRange(), which signifies that modifications
453 * have been made to a range of a mapped buffer, and it should be flushed.
455 * This is only used for buffers mapped with GL_MAP_FLUSH_EXPLICIT_BIT.
457 * Ideally we'd use a BO to avoid taking up cache space for the temporary
458 * data, but FlushMappedBufferRange may be followed by further writes to
459 * the pointer, so we would have to re-map after emitting our blit, which
460 * would defeat the point.
463 intel_bufferobj_flush_mapped_range(struct gl_context
*ctx
,
464 GLintptr offset
, GLsizeiptr length
,
465 struct gl_buffer_object
*obj
,
466 gl_map_buffer_index index
)
468 struct brw_context
*brw
= brw_context(ctx
);
469 struct intel_buffer_object
*intel_obj
= intel_buffer_object(obj
);
470 GLbitfield access
= obj
->Mappings
[index
].AccessFlags
;
472 assert(access
& GL_MAP_FLUSH_EXPLICIT_BIT
);
474 /* If we gave a direct mapping of the buffer instead of using a temporary,
475 * then there's nothing to do.
477 if (intel_obj
->range_map_bo
[index
] == NULL
)
483 /* Note that we're not unmapping our buffer while executing the blit. We
484 * need to have a mapping still at the end of this call, since the user
485 * gets to make further modifications and glFlushMappedBufferRange() calls.
486 * This is safe, because:
488 * - On LLC platforms, we're using a CPU mapping that's coherent with the
489 * GPU (except for the render caches), so the kernel doesn't need to do
490 * any flushing work for us except for what happens at batch exec time
493 * - On non-LLC platforms, we're using a GTT mapping that writes directly
494 * to system memory (except for the chipset cache that gets flushed at
497 * In both cases we don't need to stall for the previous blit to complete
498 * so we can re-map (and we definitely don't want to, since that would be
499 * slow): If the user edits a part of their buffer that's previously been
500 * blitted, then our lack of synchoronization is fine, because either
501 * they'll get some too-new data in the first blit and not do another blit
502 * of that area (but in that case the results are undefined), or they'll do
503 * another blit of that area and the complete newer data will land the
506 intel_emit_linear_blit(brw
,
508 obj
->Mappings
[index
].Offset
+ offset
,
509 intel_obj
->range_map_bo
[index
],
510 intel_obj
->map_extra
[index
] + offset
,
512 intel_bufferobj_mark_gpu_usage(intel_obj
,
513 obj
->Mappings
[index
].Offset
+ offset
,
519 * The UnmapBuffer() driver hook.
521 * Implements glUnmapBuffer().
524 intel_bufferobj_unmap(struct gl_context
* ctx
, struct gl_buffer_object
*obj
,
525 gl_map_buffer_index index
)
527 struct brw_context
*brw
= brw_context(ctx
);
528 struct intel_buffer_object
*intel_obj
= intel_buffer_object(obj
);
531 assert(obj
->Mappings
[index
].Pointer
);
532 if (intel_obj
->range_map_bo
[index
] != NULL
) {
533 drm_intel_bo_unmap(intel_obj
->range_map_bo
[index
]);
535 if (!(obj
->Mappings
[index
].AccessFlags
& GL_MAP_FLUSH_EXPLICIT_BIT
)) {
536 intel_emit_linear_blit(brw
,
537 intel_obj
->buffer
, obj
->Mappings
[index
].Offset
,
538 intel_obj
->range_map_bo
[index
],
539 intel_obj
->map_extra
[index
],
540 obj
->Mappings
[index
].Length
);
541 intel_bufferobj_mark_gpu_usage(intel_obj
, obj
->Mappings
[index
].Offset
,
542 obj
->Mappings
[index
].Length
);
545 /* Since we've emitted some blits to buffers that will (likely) be used
546 * in rendering operations in other cache domains in this batch, emit a
547 * flush. Once again, we wish for a domain tracker in libdrm to cover
548 * usage inside of a batchbuffer.
550 intel_batchbuffer_emit_mi_flush(brw
);
552 drm_intel_bo_unreference(intel_obj
->range_map_bo
[index
]);
553 intel_obj
->range_map_bo
[index
] = NULL
;
554 } else if (intel_obj
->buffer
!= NULL
) {
555 drm_intel_bo_unmap(intel_obj
->buffer
);
557 obj
->Mappings
[index
].Pointer
= NULL
;
558 obj
->Mappings
[index
].Offset
= 0;
559 obj
->Mappings
[index
].Length
= 0;
565 * Gets a pointer to the object's BO, and marks the given range as being used
568 * Anywhere that uses buffer objects in the pipeline should be using this to
569 * mark the range of the buffer that is being accessed by the pipeline.
572 intel_bufferobj_buffer(struct brw_context
*brw
,
573 struct intel_buffer_object
*intel_obj
,
574 uint32_t offset
, uint32_t size
)
576 /* This is needed so that things like transform feedback and texture buffer
577 * objects that need a BO but don't want to check that they exist for
578 * draw-time validation can just always get a BO from a GL buffer object.
580 if (intel_obj
->buffer
== NULL
)
581 intel_bufferobj_alloc_buffer(brw
, intel_obj
);
583 intel_bufferobj_mark_gpu_usage(intel_obj
, offset
, size
);
585 return intel_obj
->buffer
;
589 * The CopyBufferSubData() driver hook.
591 * Implements glCopyBufferSubData(), which copies a portion of one buffer
592 * object's data to another. Independent source and destination offsets
596 intel_bufferobj_copy_subdata(struct gl_context
*ctx
,
597 struct gl_buffer_object
*src
,
598 struct gl_buffer_object
*dst
,
599 GLintptr read_offset
, GLintptr write_offset
,
602 struct brw_context
*brw
= brw_context(ctx
);
603 struct intel_buffer_object
*intel_src
= intel_buffer_object(src
);
604 struct intel_buffer_object
*intel_dst
= intel_buffer_object(dst
);
605 drm_intel_bo
*src_bo
, *dst_bo
;
610 dst_bo
= intel_bufferobj_buffer(brw
, intel_dst
, write_offset
, size
);
611 src_bo
= intel_bufferobj_buffer(brw
, intel_src
, read_offset
, size
);
613 intel_emit_linear_blit(brw
,
614 dst_bo
, write_offset
,
615 src_bo
, read_offset
, size
);
617 /* Since we've emitted some blits to buffers that will (likely) be used
618 * in rendering operations in other cache domains in this batch, emit a
619 * flush. Once again, we wish for a domain tracker in libdrm to cover
620 * usage inside of a batchbuffer.
622 intel_batchbuffer_emit_mi_flush(brw
);
626 intelInitBufferObjectFuncs(struct dd_function_table
*functions
)
628 functions
->NewBufferObject
= intel_bufferobj_alloc
;
629 functions
->DeleteBuffer
= intel_bufferobj_free
;
630 functions
->BufferData
= intel_bufferobj_data
;
631 functions
->BufferSubData
= intel_bufferobj_subdata
;
632 functions
->GetBufferSubData
= intel_bufferobj_get_subdata
;
633 functions
->MapBufferRange
= intel_bufferobj_map_range
;
634 functions
->FlushMappedBufferRange
= intel_bufferobj_flush_mapped_range
;
635 functions
->UnmapBuffer
= intel_bufferobj_unmap
;
636 functions
->CopyBufferSubData
= intel_bufferobj_copy_subdata
;