2 * Copyright 2003 VMware, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial portions
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
18 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
21 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
22 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
23 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27 * @file intel_buffer_objects.c
29 * This provides core GL buffer object functionality.
32 #include "main/imports.h"
33 #include "main/mtypes.h"
34 #include "main/macros.h"
35 #include "main/streaming-load-memcpy.h"
36 #include "main/bufferobj.h"
37 #include "x86/common_x86_asm.h"
39 #include "brw_context.h"
40 #include "intel_blit.h"
41 #include "intel_buffer_objects.h"
42 #include "intel_batchbuffer.h"
43 #include "intel_tiled_memcpy.h"
46 mark_buffer_gpu_usage(struct intel_buffer_object
*intel_obj
,
47 uint32_t offset
, uint32_t size
)
49 intel_obj
->gpu_active_start
= MIN2(intel_obj
->gpu_active_start
, offset
);
50 intel_obj
->gpu_active_end
= MAX2(intel_obj
->gpu_active_end
, offset
+ size
);
54 mark_buffer_inactive(struct intel_buffer_object
*intel_obj
)
56 intel_obj
->gpu_active_start
= ~0;
57 intel_obj
->gpu_active_end
= 0;
61 mark_buffer_valid_data(struct intel_buffer_object
*intel_obj
,
62 uint32_t offset
, uint32_t size
)
64 intel_obj
->valid_data_start
= MIN2(intel_obj
->valid_data_start
, offset
);
65 intel_obj
->valid_data_end
= MAX2(intel_obj
->valid_data_end
, offset
+ size
);
69 mark_buffer_invalid(struct intel_buffer_object
*intel_obj
)
71 intel_obj
->valid_data_start
= ~0;
72 intel_obj
->valid_data_end
= 0;
75 /** Allocates a new brw_bo to store the data for the buffer object. */
77 alloc_buffer_object(struct brw_context
*brw
,
78 struct intel_buffer_object
*intel_obj
)
80 const struct gl_context
*ctx
= &brw
->ctx
;
82 uint64_t size
= intel_obj
->Base
.Size
;
83 if (ctx
->Const
.RobustAccess
) {
84 /* Pad out buffer objects with an extra 2kB (half a page).
86 * When pushing UBOs, we need to safeguard against 3DSTATE_CONSTANT_*
87 * reading out of bounds memory. The application might bind a UBO that's
88 * smaller than what the program expects. Ideally, we'd bind an extra
89 * push buffer containing zeros, but we have a limited number of those,
90 * so it's not always viable. Our only safe option is to pad all buffer
91 * objects by the maximum push data length, so that it will never read
92 * past the end of a BO.
94 * This is unfortunate, but it should result in at most 1 extra page,
95 * which probably isn't too terrible.
97 size
+= 64 * 32; /* max read length of 64 256-bit units */
99 intel_obj
->buffer
= brw_bo_alloc(brw
->bufmgr
, "bufferobj", size
, 64);
101 /* the buffer might be bound as a uniform buffer, need to update it
103 if (intel_obj
->Base
.UsageHistory
& USAGE_UNIFORM_BUFFER
)
104 brw
->ctx
.NewDriverState
|= BRW_NEW_UNIFORM_BUFFER
;
105 if (intel_obj
->Base
.UsageHistory
& USAGE_SHADER_STORAGE_BUFFER
)
106 brw
->ctx
.NewDriverState
|= BRW_NEW_UNIFORM_BUFFER
;
107 if (intel_obj
->Base
.UsageHistory
& USAGE_TEXTURE_BUFFER
)
108 brw
->ctx
.NewDriverState
|= BRW_NEW_TEXTURE_BUFFER
;
109 if (intel_obj
->Base
.UsageHistory
& USAGE_ATOMIC_COUNTER_BUFFER
)
110 brw
->ctx
.NewDriverState
|= BRW_NEW_ATOMIC_BUFFER
;
112 mark_buffer_inactive(intel_obj
);
113 mark_buffer_invalid(intel_obj
);
117 release_buffer(struct intel_buffer_object
*intel_obj
)
119 brw_bo_unreference(intel_obj
->buffer
);
120 intel_obj
->buffer
= NULL
;
124 * The NewBufferObject() driver hook.
126 * Allocates a new intel_buffer_object structure and initializes it.
128 * There is some duplication between mesa's bufferobjects and our
129 * bufmgr buffers. Both have an integer handle and a hashtable to
130 * lookup an opaque structure. It would be nice if the handles and
131 * internal structure where somehow shared.
133 static struct gl_buffer_object
*
134 brw_new_buffer_object(struct gl_context
* ctx
, GLuint name
)
136 struct intel_buffer_object
*obj
= CALLOC_STRUCT(intel_buffer_object
);
138 _mesa_error_no_memory(__func__
);
142 _mesa_initialize_buffer_object(ctx
, &obj
->Base
, name
);
150 * The DeleteBuffer() driver hook.
152 * Deletes a single OpenGL buffer object. Used by glDeleteBuffers().
155 brw_delete_buffer(struct gl_context
* ctx
, struct gl_buffer_object
*obj
)
157 struct intel_buffer_object
*intel_obj
= intel_buffer_object(obj
);
161 /* Buffer objects are automatically unmapped when deleting according
162 * to the spec, but Mesa doesn't do UnmapBuffer for us at context destroy
163 * (though it does if you call glDeleteBuffers)
165 _mesa_buffer_unmap_all_mappings(ctx
, obj
);
167 brw_bo_unreference(intel_obj
->buffer
);
168 _mesa_delete_buffer_object(ctx
, obj
);
173 * The BufferData() driver hook.
175 * Implements glBufferData(), which recreates a buffer object's data store
176 * and populates it with the given data, if present.
178 * Any data that was previously stored in the buffer object is lost.
180 * \return true for success, false if out of memory
183 brw_buffer_data(struct gl_context
*ctx
,
188 GLbitfield storageFlags
,
189 struct gl_buffer_object
*obj
)
191 struct brw_context
*brw
= brw_context(ctx
);
192 struct intel_buffer_object
*intel_obj
= intel_buffer_object(obj
);
194 /* Part of the ABI, but this function doesn't use it.
198 intel_obj
->Base
.Size
= size
;
199 intel_obj
->Base
.Usage
= usage
;
200 intel_obj
->Base
.StorageFlags
= storageFlags
;
202 assert(!obj
->Mappings
[MAP_USER
].Pointer
); /* Mesa should have unmapped it */
203 assert(!obj
->Mappings
[MAP_INTERNAL
].Pointer
);
205 if (intel_obj
->buffer
!= NULL
)
206 release_buffer(intel_obj
);
209 alloc_buffer_object(brw
, intel_obj
);
210 if (!intel_obj
->buffer
)
214 brw_bo_subdata(intel_obj
->buffer
, 0, size
, data
);
215 mark_buffer_valid_data(intel_obj
, 0, size
);
224 * The BufferSubData() driver hook.
226 * Implements glBufferSubData(), which replaces a portion of the data in a
229 * If the data range specified by (size + offset) extends beyond the end of
230 * the buffer or if data is NULL, no copy is performed.
233 brw_buffer_subdata(struct gl_context
*ctx
,
237 struct gl_buffer_object
*obj
)
239 struct brw_context
*brw
= brw_context(ctx
);
240 struct intel_buffer_object
*intel_obj
= intel_buffer_object(obj
);
248 /* See if we can unsynchronized write the data into the user's BO. This
249 * avoids GPU stalls in unfortunately common user patterns (uploading
250 * sequentially into a BO, with draw calls in between each upload).
252 * Once we've hit this path, we mark this GL BO as preferring stalling to
253 * blits, so that we can hopefully hit this path again in the future
254 * (otherwise, an app that might occasionally stall but mostly not will end
255 * up with blitting all the time, at the cost of bandwidth)
257 if (offset
+ size
<= intel_obj
->gpu_active_start
||
258 intel_obj
->gpu_active_end
<= offset
||
259 offset
+ size
<= intel_obj
->valid_data_start
||
260 intel_obj
->valid_data_end
<= offset
) {
261 void *map
= brw_bo_map(brw
, intel_obj
->buffer
, MAP_WRITE
| MAP_ASYNC
);
262 memcpy(map
+ offset
, data
, size
);
263 brw_bo_unmap(intel_obj
->buffer
);
265 if (intel_obj
->gpu_active_end
> intel_obj
->gpu_active_start
)
266 intel_obj
->prefer_stall_to_blit
= true;
268 mark_buffer_valid_data(intel_obj
, offset
, size
);
273 brw_bo_busy(intel_obj
->buffer
) ||
274 brw_batch_references(&brw
->batch
, intel_obj
->buffer
);
277 if (size
== intel_obj
->Base
.Size
||
278 (intel_obj
->valid_data_start
>= offset
&&
279 intel_obj
->valid_data_end
<= offset
+ size
)) {
280 /* Replace the current busy bo so the subdata doesn't stall. */
281 brw_bo_unreference(intel_obj
->buffer
);
282 alloc_buffer_object(brw
, intel_obj
);
283 } else if (!intel_obj
->prefer_stall_to_blit
) {
284 perf_debug("Using a blit copy to avoid stalling on "
285 "glBufferSubData(%ld, %ld) (%ldkb) to a busy "
286 "(%d-%d) / valid (%d-%d) buffer object.\n",
287 (long)offset
, (long)offset
+ size
, (long)(size
/1024),
288 intel_obj
->gpu_active_start
,
289 intel_obj
->gpu_active_end
,
290 intel_obj
->valid_data_start
,
291 intel_obj
->valid_data_end
);
292 struct brw_bo
*temp_bo
=
293 brw_bo_alloc(brw
->bufmgr
, "subdata temp", size
, 64);
295 brw_bo_subdata(temp_bo
, 0, size
, data
);
297 intel_emit_linear_blit(brw
,
298 intel_obj
->buffer
, offset
,
301 brw_emit_mi_flush(brw
);
303 brw_bo_unreference(temp_bo
);
304 mark_buffer_valid_data(intel_obj
, offset
, size
);
307 perf_debug("Stalling on glBufferSubData(%ld, %ld) (%ldkb) to a busy "
308 "(%d-%d) buffer object. Use glMapBufferRange() to "
310 (long)offset
, (long)offset
+ size
, (long)(size
/1024),
311 intel_obj
->gpu_active_start
,
312 intel_obj
->gpu_active_end
);
313 intel_batchbuffer_flush(brw
);
317 brw_bo_subdata(intel_obj
->buffer
, offset
, size
, data
);
318 mark_buffer_inactive(intel_obj
);
319 mark_buffer_valid_data(intel_obj
, offset
, size
);
324 * The GetBufferSubData() driver hook.
326 * Implements glGetBufferSubData(), which copies a subrange of a buffer
327 * object into user memory.
330 brw_get_buffer_subdata(struct gl_context
*ctx
,
334 struct gl_buffer_object
*obj
)
336 struct intel_buffer_object
*intel_obj
= intel_buffer_object(obj
);
337 struct brw_context
*brw
= brw_context(ctx
);
340 if (brw_batch_references(&brw
->batch
, intel_obj
->buffer
)) {
341 intel_batchbuffer_flush(brw
);
344 unsigned int map_flags
= MAP_READ
;
345 mem_copy_fn memcpy_fn
= memcpy
;
347 if (!intel_obj
->buffer
->cache_coherent
&& cpu_has_sse4_1
) {
348 /* Rather than acquire a new WB mmaping of the buffer object and pull
349 * it into the CPU cache, keep using the WC mmap that we have for writes,
350 * and use the magic movntd instructions instead.
352 map_flags
|= MAP_COHERENT
;
353 memcpy_fn
= (mem_copy_fn
) _mesa_streaming_load_memcpy
;
357 void *map
= brw_bo_map(brw
, intel_obj
->buffer
, map_flags
);
358 if (unlikely(!map
)) {
359 _mesa_error_no_memory(__func__
);
362 memcpy_fn(data
, map
+ offset
, size
);
363 brw_bo_unmap(intel_obj
->buffer
);
365 mark_buffer_inactive(intel_obj
);
370 * The MapBufferRange() driver hook.
372 * This implements both glMapBufferRange() and glMapBuffer().
374 * The goal of this extension is to allow apps to accumulate their rendering
375 * at the same time as they accumulate their buffer object. Without it,
376 * you'd end up blocking on execution of rendering every time you mapped
377 * the buffer to put new data in.
379 * We support it in 3 ways: If unsynchronized, then don't bother
380 * flushing the batchbuffer before mapping the buffer, which can save blocking
381 * in many cases. If we would still block, and they allow the whole buffer
382 * to be invalidated, then just allocate a new buffer to replace the old one.
383 * If not, and we'd block, and they allow the subrange of the buffer to be
384 * invalidated, then we can make a new little BO, let them write into that,
385 * and blit it into the real BO at unmap time.
388 brw_map_buffer_range(struct gl_context
*ctx
,
389 GLintptr offset
, GLsizeiptr length
,
390 GLbitfield access
, struct gl_buffer_object
*obj
,
391 gl_map_buffer_index index
)
393 struct brw_context
*brw
= brw_context(ctx
);
394 struct intel_buffer_object
*intel_obj
= intel_buffer_object(obj
);
398 STATIC_ASSERT(GL_MAP_UNSYNCHRONIZED_BIT
== MAP_ASYNC
);
399 STATIC_ASSERT(GL_MAP_WRITE_BIT
== MAP_WRITE
);
400 STATIC_ASSERT(GL_MAP_READ_BIT
== MAP_READ
);
401 STATIC_ASSERT(GL_MAP_PERSISTENT_BIT
== MAP_PERSISTENT
);
402 STATIC_ASSERT(GL_MAP_COHERENT_BIT
== MAP_COHERENT
);
403 assert((access
& MAP_INTERNAL_MASK
) == 0);
405 /* _mesa_MapBufferRange (GL entrypoint) sets these, but the vbo module also
406 * internally uses our functions directly.
408 obj
->Mappings
[index
].Offset
= offset
;
409 obj
->Mappings
[index
].Length
= length
;
410 obj
->Mappings
[index
].AccessFlags
= access
;
412 if (intel_obj
->buffer
== NULL
) {
413 obj
->Mappings
[index
].Pointer
= NULL
;
417 /* If the access is synchronized (like a normal buffer mapping), then get
418 * things flushed out so the later mapping syncs appropriately through GEM.
419 * If the user doesn't care about existing buffer contents and mapping would
420 * cause us to block, then throw out the old buffer.
422 * If they set INVALIDATE_BUFFER, we can pitch the current contents to
423 * achieve the required synchronization.
425 if (!(access
& GL_MAP_UNSYNCHRONIZED_BIT
)) {
426 if (brw_batch_references(&brw
->batch
, intel_obj
->buffer
)) {
427 if (access
& GL_MAP_INVALIDATE_BUFFER_BIT
) {
428 brw_bo_unreference(intel_obj
->buffer
);
429 alloc_buffer_object(brw
, intel_obj
);
431 perf_debug("Stalling on the GPU for mapping a busy buffer "
433 intel_batchbuffer_flush(brw
);
435 } else if (brw_bo_busy(intel_obj
->buffer
) &&
436 (access
& GL_MAP_INVALIDATE_BUFFER_BIT
)) {
437 brw_bo_unreference(intel_obj
->buffer
);
438 alloc_buffer_object(brw
, intel_obj
);
442 if (access
& MAP_WRITE
)
443 mark_buffer_valid_data(intel_obj
, offset
, length
);
445 /* If the user is mapping a range of an active buffer object but
446 * doesn't require the current contents of that range, make a new
447 * BO, and we'll copy what they put in there out at unmap or
450 * That is, unless they're looking for a persistent mapping -- we would
451 * need to do blits in the MemoryBarrier call, and it's easier to just do a
452 * GPU stall and do a mapping.
454 if (!(access
& (GL_MAP_UNSYNCHRONIZED_BIT
| GL_MAP_PERSISTENT_BIT
)) &&
455 (access
& GL_MAP_INVALIDATE_RANGE_BIT
) &&
456 brw_bo_busy(intel_obj
->buffer
)) {
457 /* Ensure that the base alignment of the allocation meets the alignment
458 * guarantees the driver has advertised to the application.
460 const unsigned alignment
= ctx
->Const
.MinMapBufferAlignment
;
462 intel_obj
->map_extra
[index
] = (uintptr_t) offset
% alignment
;
463 intel_obj
->range_map_bo
[index
] =
464 brw_bo_alloc(brw
->bufmgr
, "BO blit temp",
465 length
+ intel_obj
->map_extra
[index
], alignment
);
466 void *map
= brw_bo_map(brw
, intel_obj
->range_map_bo
[index
], access
);
467 obj
->Mappings
[index
].Pointer
= map
+ intel_obj
->map_extra
[index
];
468 return obj
->Mappings
[index
].Pointer
;
471 void *map
= brw_bo_map(brw
, intel_obj
->buffer
, access
);
472 if (!(access
& GL_MAP_UNSYNCHRONIZED_BIT
)) {
473 mark_buffer_inactive(intel_obj
);
476 obj
->Mappings
[index
].Pointer
= map
+ offset
;
477 return obj
->Mappings
[index
].Pointer
;
481 * The FlushMappedBufferRange() driver hook.
483 * Implements glFlushMappedBufferRange(), which signifies that modifications
484 * have been made to a range of a mapped buffer, and it should be flushed.
486 * This is only used for buffers mapped with GL_MAP_FLUSH_EXPLICIT_BIT.
488 * Ideally we'd use a BO to avoid taking up cache space for the temporary
489 * data, but FlushMappedBufferRange may be followed by further writes to
490 * the pointer, so we would have to re-map after emitting our blit, which
491 * would defeat the point.
494 brw_flush_mapped_buffer_range(struct gl_context
*ctx
,
495 GLintptr offset
, GLsizeiptr length
,
496 struct gl_buffer_object
*obj
,
497 gl_map_buffer_index index
)
499 struct brw_context
*brw
= brw_context(ctx
);
500 struct intel_buffer_object
*intel_obj
= intel_buffer_object(obj
);
502 assert(obj
->Mappings
[index
].AccessFlags
& GL_MAP_FLUSH_EXPLICIT_BIT
);
504 /* If we gave a direct mapping of the buffer instead of using a temporary,
505 * then there's nothing to do.
507 if (intel_obj
->range_map_bo
[index
] == NULL
)
513 /* Note that we're not unmapping our buffer while executing the blit. We
514 * need to have a mapping still at the end of this call, since the user
515 * gets to make further modifications and glFlushMappedBufferRange() calls.
516 * This is safe, because:
518 * - On LLC platforms, we're using a CPU mapping that's coherent with the
519 * GPU (except for the render caches), so the kernel doesn't need to do
520 * any flushing work for us except for what happens at batch exec time
523 * - On non-LLC platforms, we're using a GTT mapping that writes directly
524 * to system memory (except for the chipset cache that gets flushed at
527 * In both cases we don't need to stall for the previous blit to complete
528 * so we can re-map (and we definitely don't want to, since that would be
529 * slow): If the user edits a part of their buffer that's previously been
530 * blitted, then our lack of synchoronization is fine, because either
531 * they'll get some too-new data in the first blit and not do another blit
532 * of that area (but in that case the results are undefined), or they'll do
533 * another blit of that area and the complete newer data will land the
536 intel_emit_linear_blit(brw
,
538 obj
->Mappings
[index
].Offset
+ offset
,
539 intel_obj
->range_map_bo
[index
],
540 intel_obj
->map_extra
[index
] + offset
,
542 mark_buffer_gpu_usage(intel_obj
,
543 obj
->Mappings
[index
].Offset
+ offset
,
545 brw_emit_mi_flush(brw
);
550 * The UnmapBuffer() driver hook.
552 * Implements glUnmapBuffer().
555 brw_unmap_buffer(struct gl_context
*ctx
,
556 struct gl_buffer_object
*obj
,
557 gl_map_buffer_index index
)
559 struct brw_context
*brw
= brw_context(ctx
);
560 struct intel_buffer_object
*intel_obj
= intel_buffer_object(obj
);
563 assert(obj
->Mappings
[index
].Pointer
);
564 if (intel_obj
->range_map_bo
[index
] != NULL
) {
565 brw_bo_unmap(intel_obj
->range_map_bo
[index
]);
567 if (!(obj
->Mappings
[index
].AccessFlags
& GL_MAP_FLUSH_EXPLICIT_BIT
)) {
568 intel_emit_linear_blit(brw
,
569 intel_obj
->buffer
, obj
->Mappings
[index
].Offset
,
570 intel_obj
->range_map_bo
[index
],
571 intel_obj
->map_extra
[index
],
572 obj
->Mappings
[index
].Length
);
573 mark_buffer_gpu_usage(intel_obj
, obj
->Mappings
[index
].Offset
,
574 obj
->Mappings
[index
].Length
);
575 brw_emit_mi_flush(brw
);
578 /* Since we've emitted some blits to buffers that will (likely) be used
579 * in rendering operations in other cache domains in this batch, emit a
580 * flush. Once again, we wish for a domain tracker in libdrm to cover
581 * usage inside of a batchbuffer.
584 brw_bo_unreference(intel_obj
->range_map_bo
[index
]);
585 intel_obj
->range_map_bo
[index
] = NULL
;
586 } else if (intel_obj
->buffer
!= NULL
) {
587 brw_bo_unmap(intel_obj
->buffer
);
589 obj
->Mappings
[index
].Pointer
= NULL
;
590 obj
->Mappings
[index
].Offset
= 0;
591 obj
->Mappings
[index
].Length
= 0;
597 * Gets a pointer to the object's BO, and marks the given range as being used
600 * Anywhere that uses buffer objects in the pipeline should be using this to
601 * mark the range of the buffer that is being accessed by the pipeline.
604 intel_bufferobj_buffer(struct brw_context
*brw
,
605 struct intel_buffer_object
*intel_obj
,
606 uint32_t offset
, uint32_t size
, bool write
)
608 /* This is needed so that things like transform feedback and texture buffer
609 * objects that need a BO but don't want to check that they exist for
610 * draw-time validation can just always get a BO from a GL buffer object.
612 if (intel_obj
->buffer
== NULL
)
613 alloc_buffer_object(brw
, intel_obj
);
615 mark_buffer_gpu_usage(intel_obj
, offset
, size
);
617 /* If writing, (conservatively) mark this section as having valid data. */
619 mark_buffer_valid_data(intel_obj
, offset
, size
);
621 return intel_obj
->buffer
;
625 * The CopyBufferSubData() driver hook.
627 * Implements glCopyBufferSubData(), which copies a portion of one buffer
628 * object's data to another. Independent source and destination offsets
632 brw_copy_buffer_subdata(struct gl_context
*ctx
,
633 struct gl_buffer_object
*src
,
634 struct gl_buffer_object
*dst
,
635 GLintptr read_offset
, GLintptr write_offset
,
638 struct brw_context
*brw
= brw_context(ctx
);
639 struct intel_buffer_object
*intel_src
= intel_buffer_object(src
);
640 struct intel_buffer_object
*intel_dst
= intel_buffer_object(dst
);
641 struct brw_bo
*src_bo
, *dst_bo
;
646 dst_bo
= intel_bufferobj_buffer(brw
, intel_dst
, write_offset
, size
, true);
647 src_bo
= intel_bufferobj_buffer(brw
, intel_src
, read_offset
, size
, false);
649 intel_emit_linear_blit(brw
,
650 dst_bo
, write_offset
,
651 src_bo
, read_offset
, size
);
653 /* Since we've emitted some blits to buffers that will (likely) be used
654 * in rendering operations in other cache domains in this batch, emit a
655 * flush. Once again, we wish for a domain tracker in libdrm to cover
656 * usage inside of a batchbuffer.
658 brw_emit_mi_flush(brw
);
662 intelInitBufferObjectFuncs(struct dd_function_table
*functions
)
664 functions
->NewBufferObject
= brw_new_buffer_object
;
665 functions
->DeleteBuffer
= brw_delete_buffer
;
666 functions
->BufferData
= brw_buffer_data
;
667 functions
->BufferSubData
= brw_buffer_subdata
;
668 functions
->GetBufferSubData
= brw_get_buffer_subdata
;
669 functions
->MapBufferRange
= brw_map_buffer_range
;
670 functions
->FlushMappedBufferRange
= brw_flush_mapped_buffer_range
;
671 functions
->UnmapBuffer
= brw_unmap_buffer
;
672 functions
->CopyBufferSubData
= brw_copy_buffer_subdata
;