i965: Issue performance warnings on MapBufferRange stalls.
[mesa.git] / src / mesa / drivers / dri / i965 / intel_buffer_objects.c
1 /**************************************************************************
2 *
3 * Copyright 2003 VMware, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 /**
29 * @file intel_buffer_objects.c
30 *
31 * This provides core GL buffer object functionality.
32 */
33
34 #include "main/imports.h"
35 #include "main/mtypes.h"
36 #include "main/macros.h"
37 #include "main/bufferobj.h"
38
39 #include "brw_context.h"
40 #include "intel_blit.h"
41 #include "intel_buffer_objects.h"
42 #include "intel_batchbuffer.h"
43
44 /**
45 * Map a buffer object; issue performance warnings if mapping causes stalls.
46 *
47 * This matches the drm_intel_bo_map API, but takes an additional human-readable
48 * name for the buffer object to use in the performance debug message.
49 */
50 int
51 brw_bo_map(struct brw_context *brw,
52 drm_intel_bo *bo, int write_enable,
53 const char *bo_name)
54 {
55 if (likely(!brw->perf_debug) || !drm_intel_bo_busy(bo))
56 return drm_intel_bo_map(bo, write_enable);
57
58 double start_time = get_time();
59
60 int ret = drm_intel_bo_map(bo, write_enable);
61
62 perf_debug("CPU mapping a busy %s BO stalled and took %.03f ms.\n",
63 bo_name, (get_time() - start_time) * 1000);
64
65 return ret;
66 }
67
68 int
69 brw_bo_map_gtt(struct brw_context *brw, drm_intel_bo *bo, const char *bo_name)
70 {
71 if (likely(!brw->perf_debug) || !drm_intel_bo_busy(bo))
72 return drm_intel_gem_bo_map_gtt(bo);
73
74 double start_time = get_time();
75
76 int ret = drm_intel_gem_bo_map_gtt(bo);
77
78 perf_debug("GTT mapping a busy %s BO stalled and took %.03f ms.\n",
79 bo_name, (get_time() - start_time) * 1000);
80
81 return ret;
82 }
83
84 static GLboolean
85 intel_bufferobj_unmap(struct gl_context * ctx, struct gl_buffer_object *obj,
86 gl_map_buffer_index index);
87
88 static void
89 intel_bufferobj_mark_gpu_usage(struct intel_buffer_object *intel_obj,
90 uint32_t offset, uint32_t size)
91 {
92 intel_obj->gpu_active_start = MIN2(intel_obj->gpu_active_start, offset);
93 intel_obj->gpu_active_end = MAX2(intel_obj->gpu_active_end, offset + size);
94 }
95
96 static void
97 intel_bufferobj_mark_inactive(struct intel_buffer_object *intel_obj)
98 {
99 intel_obj->gpu_active_start = ~0;
100 intel_obj->gpu_active_end = 0;
101 }
102
103 /** Allocates a new drm_intel_bo to store the data for the buffer object. */
104 static void
105 intel_bufferobj_alloc_buffer(struct brw_context *brw,
106 struct intel_buffer_object *intel_obj)
107 {
108 intel_obj->buffer = drm_intel_bo_alloc(brw->bufmgr, "bufferobj",
109 intel_obj->Base.Size, 64);
110
111 /* the buffer might be bound as a uniform buffer, need to update it
112 */
113 brw->state.dirty.brw |= BRW_NEW_UNIFORM_BUFFER;
114
115 intel_bufferobj_mark_inactive(intel_obj);
116 }
117
118 static void
119 release_buffer(struct intel_buffer_object *intel_obj)
120 {
121 drm_intel_bo_unreference(intel_obj->buffer);
122 intel_obj->buffer = NULL;
123 }
124
125 /**
126 * The NewBufferObject() driver hook.
127 *
128 * Allocates a new intel_buffer_object structure and initializes it.
129 *
130 * There is some duplication between mesa's bufferobjects and our
131 * bufmgr buffers. Both have an integer handle and a hashtable to
132 * lookup an opaque structure. It would be nice if the handles and
133 * internal structure where somehow shared.
134 */
135 static struct gl_buffer_object *
136 intel_bufferobj_alloc(struct gl_context * ctx, GLuint name, GLenum target)
137 {
138 struct intel_buffer_object *obj = CALLOC_STRUCT(intel_buffer_object);
139 if (!obj) {
140 _mesa_error_no_memory(__func__);
141 }
142
143 _mesa_initialize_buffer_object(ctx, &obj->Base, name, target);
144
145 obj->buffer = NULL;
146
147 return &obj->Base;
148 }
149
150 /**
151 * The DeleteBuffer() driver hook.
152 *
153 * Deletes a single OpenGL buffer object. Used by glDeleteBuffers().
154 */
155 static void
156 intel_bufferobj_free(struct gl_context * ctx, struct gl_buffer_object *obj)
157 {
158 struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
159
160 assert(intel_obj);
161
162 /* Buffer objects are automatically unmapped when deleting according
163 * to the spec, but Mesa doesn't do UnmapBuffer for us at context destroy
164 * (though it does if you call glDeleteBuffers)
165 */
166 _mesa_buffer_unmap_all_mappings(ctx, obj);
167
168 drm_intel_bo_unreference(intel_obj->buffer);
169 free(intel_obj);
170 }
171
172
173 /**
174 * The BufferData() driver hook.
175 *
176 * Implements glBufferData(), which recreates a buffer object's data store
177 * and populates it with the given data, if present.
178 *
179 * Any data that was previously stored in the buffer object is lost.
180 *
181 * \return true for success, false if out of memory
182 */
183 static GLboolean
184 intel_bufferobj_data(struct gl_context * ctx,
185 GLenum target,
186 GLsizeiptrARB size,
187 const GLvoid * data,
188 GLenum usage,
189 GLbitfield storageFlags,
190 struct gl_buffer_object *obj)
191 {
192 struct brw_context *brw = brw_context(ctx);
193 struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
194
195 /* Part of the ABI, but this function doesn't use it.
196 */
197 (void) target;
198
199 intel_obj->Base.Size = size;
200 intel_obj->Base.Usage = usage;
201 intel_obj->Base.StorageFlags = storageFlags;
202
203 assert(!obj->Mappings[MAP_USER].Pointer); /* Mesa should have unmapped it */
204 assert(!obj->Mappings[MAP_INTERNAL].Pointer);
205
206 if (intel_obj->buffer != NULL)
207 release_buffer(intel_obj);
208
209 if (size != 0) {
210 intel_bufferobj_alloc_buffer(brw, intel_obj);
211 if (!intel_obj->buffer)
212 return false;
213
214 if (data != NULL)
215 drm_intel_bo_subdata(intel_obj->buffer, 0, size, data);
216 }
217
218 return true;
219 }
220
221
222 /**
223 * The BufferSubData() driver hook.
224 *
225 * Implements glBufferSubData(), which replaces a portion of the data in a
226 * buffer object.
227 *
228 * If the data range specified by (size + offset) extends beyond the end of
229 * the buffer or if data is NULL, no copy is performed.
230 */
231 static void
232 intel_bufferobj_subdata(struct gl_context * ctx,
233 GLintptrARB offset,
234 GLsizeiptrARB size,
235 const GLvoid * data, struct gl_buffer_object *obj)
236 {
237 struct brw_context *brw = brw_context(ctx);
238 struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
239 bool busy;
240
241 if (size == 0)
242 return;
243
244 assert(intel_obj);
245
246 /* See if we can unsynchronized write the data into the user's BO. This
247 * avoids GPU stalls in unfortunately common user patterns (uploading
248 * sequentially into a BO, with draw calls in between each upload).
249 *
250 * Once we've hit this path, we mark this GL BO as preferring stalling to
251 * blits, so that we can hopefully hit this path again in the future
252 * (otherwise, an app that might occasionally stall but mostly not will end
253 * up with blitting all the time, at the cost of bandwidth)
254 */
255 if (brw->has_llc) {
256 if (offset + size <= intel_obj->gpu_active_start ||
257 intel_obj->gpu_active_end <= offset) {
258 drm_intel_gem_bo_map_unsynchronized(intel_obj->buffer);
259 memcpy(intel_obj->buffer->virtual + offset, data, size);
260 drm_intel_bo_unmap(intel_obj->buffer);
261
262 if (intel_obj->gpu_active_end > intel_obj->gpu_active_start)
263 intel_obj->prefer_stall_to_blit = true;
264 return;
265 }
266 }
267
268 busy =
269 drm_intel_bo_busy(intel_obj->buffer) ||
270 drm_intel_bo_references(brw->batch.bo, intel_obj->buffer);
271
272 if (busy) {
273 if (size == intel_obj->Base.Size) {
274 /* Replace the current busy bo so the subdata doesn't stall. */
275 drm_intel_bo_unreference(intel_obj->buffer);
276 intel_bufferobj_alloc_buffer(brw, intel_obj);
277 } else if (!intel_obj->prefer_stall_to_blit) {
278 perf_debug("Using a blit copy to avoid stalling on "
279 "glBufferSubData(%ld, %ld) (%ldkb) to a busy "
280 "(%d-%d) buffer object.\n",
281 (long)offset, (long)offset + size, (long)(size/1024),
282 intel_obj->gpu_active_start,
283 intel_obj->gpu_active_end);
284 drm_intel_bo *temp_bo =
285 drm_intel_bo_alloc(brw->bufmgr, "subdata temp", size, 64);
286
287 drm_intel_bo_subdata(temp_bo, 0, size, data);
288
289 intel_emit_linear_blit(brw,
290 intel_obj->buffer, offset,
291 temp_bo, 0,
292 size);
293
294 drm_intel_bo_unreference(temp_bo);
295 return;
296 } else {
297 perf_debug("Stalling on glBufferSubData(%ld, %ld) (%ldkb) to a busy "
298 "(%d-%d) buffer object. Use glMapBufferRange() to "
299 "avoid this.\n",
300 (long)offset, (long)offset + size, (long)(size/1024),
301 intel_obj->gpu_active_start,
302 intel_obj->gpu_active_end);
303 intel_batchbuffer_flush(brw);
304 }
305 }
306
307 drm_intel_bo_subdata(intel_obj->buffer, offset, size, data);
308 intel_bufferobj_mark_inactive(intel_obj);
309 }
310
311
312 /**
313 * The GetBufferSubData() driver hook.
314 *
315 * Implements glGetBufferSubData(), which copies a subrange of a buffer
316 * object into user memory.
317 */
318 static void
319 intel_bufferobj_get_subdata(struct gl_context * ctx,
320 GLintptrARB offset,
321 GLsizeiptrARB size,
322 GLvoid * data, struct gl_buffer_object *obj)
323 {
324 struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
325 struct brw_context *brw = brw_context(ctx);
326
327 assert(intel_obj);
328 if (drm_intel_bo_references(brw->batch.bo, intel_obj->buffer)) {
329 intel_batchbuffer_flush(brw);
330 }
331 drm_intel_bo_get_subdata(intel_obj->buffer, offset, size, data);
332
333 intel_bufferobj_mark_inactive(intel_obj);
334 }
335
336
337 /**
338 * The MapBufferRange() driver hook.
339 *
340 * This implements both glMapBufferRange() and glMapBuffer().
341 *
342 * The goal of this extension is to allow apps to accumulate their rendering
343 * at the same time as they accumulate their buffer object. Without it,
344 * you'd end up blocking on execution of rendering every time you mapped
345 * the buffer to put new data in.
346 *
347 * We support it in 3 ways: If unsynchronized, then don't bother
348 * flushing the batchbuffer before mapping the buffer, which can save blocking
349 * in many cases. If we would still block, and they allow the whole buffer
350 * to be invalidated, then just allocate a new buffer to replace the old one.
351 * If not, and we'd block, and they allow the subrange of the buffer to be
352 * invalidated, then we can make a new little BO, let them write into that,
353 * and blit it into the real BO at unmap time.
354 */
355 static void *
356 intel_bufferobj_map_range(struct gl_context * ctx,
357 GLintptr offset, GLsizeiptr length,
358 GLbitfield access, struct gl_buffer_object *obj,
359 gl_map_buffer_index index)
360 {
361 struct brw_context *brw = brw_context(ctx);
362 struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
363
364 assert(intel_obj);
365
366 /* _mesa_MapBufferRange (GL entrypoint) sets these, but the vbo module also
367 * internally uses our functions directly.
368 */
369 obj->Mappings[index].Offset = offset;
370 obj->Mappings[index].Length = length;
371 obj->Mappings[index].AccessFlags = access;
372
373 if (intel_obj->buffer == NULL) {
374 obj->Mappings[index].Pointer = NULL;
375 return NULL;
376 }
377
378 /* If the access is synchronized (like a normal buffer mapping), then get
379 * things flushed out so the later mapping syncs appropriately through GEM.
380 * If the user doesn't care about existing buffer contents and mapping would
381 * cause us to block, then throw out the old buffer.
382 *
383 * If they set INVALIDATE_BUFFER, we can pitch the current contents to
384 * achieve the required synchronization.
385 */
386 if (!(access & GL_MAP_UNSYNCHRONIZED_BIT)) {
387 if (drm_intel_bo_references(brw->batch.bo, intel_obj->buffer)) {
388 if (access & GL_MAP_INVALIDATE_BUFFER_BIT) {
389 drm_intel_bo_unreference(intel_obj->buffer);
390 intel_bufferobj_alloc_buffer(brw, intel_obj);
391 } else {
392 perf_debug("Stalling on the GPU for mapping a busy buffer "
393 "object\n");
394 intel_batchbuffer_flush(brw);
395 }
396 } else if (drm_intel_bo_busy(intel_obj->buffer) &&
397 (access & GL_MAP_INVALIDATE_BUFFER_BIT)) {
398 drm_intel_bo_unreference(intel_obj->buffer);
399 intel_bufferobj_alloc_buffer(brw, intel_obj);
400 }
401 }
402
403 /* If the user is mapping a range of an active buffer object but
404 * doesn't require the current contents of that range, make a new
405 * BO, and we'll copy what they put in there out at unmap or
406 * FlushRange time.
407 *
408 * That is, unless they're looking for a persistent mapping -- we would
409 * need to do blits in the MemoryBarrier call, and it's easier to just do a
410 * GPU stall and do a mapping.
411 */
412 if (!(access & (GL_MAP_UNSYNCHRONIZED_BIT | GL_MAP_PERSISTENT_BIT)) &&
413 (access & GL_MAP_INVALIDATE_RANGE_BIT) &&
414 drm_intel_bo_busy(intel_obj->buffer)) {
415 /* Ensure that the base alignment of the allocation meets the alignment
416 * guarantees the driver has advertised to the application.
417 */
418 const unsigned alignment = ctx->Const.MinMapBufferAlignment;
419
420 intel_obj->map_extra[index] = (uintptr_t) offset % alignment;
421 intel_obj->range_map_bo[index] = drm_intel_bo_alloc(brw->bufmgr,
422 "BO blit temp",
423 length +
424 intel_obj->map_extra[index],
425 alignment);
426 if (brw->has_llc) {
427 brw_bo_map(brw, intel_obj->range_map_bo[index],
428 (access & GL_MAP_WRITE_BIT) != 0, "range-map");
429 } else {
430 drm_intel_gem_bo_map_gtt(intel_obj->range_map_bo[index]);
431 }
432 obj->Mappings[index].Pointer =
433 intel_obj->range_map_bo[index]->virtual + intel_obj->map_extra[index];
434 return obj->Mappings[index].Pointer;
435 }
436
437 if (access & GL_MAP_UNSYNCHRONIZED_BIT)
438 drm_intel_gem_bo_map_unsynchronized(intel_obj->buffer);
439 else if (!brw->has_llc && (!(access & GL_MAP_READ_BIT) ||
440 (access & GL_MAP_PERSISTENT_BIT))) {
441 drm_intel_gem_bo_map_gtt(intel_obj->buffer);
442 intel_bufferobj_mark_inactive(intel_obj);
443 } else {
444 brw_bo_map(brw, intel_obj->buffer, (access & GL_MAP_WRITE_BIT) != 0,
445 "MapBufferRange");
446 intel_bufferobj_mark_inactive(intel_obj);
447 }
448
449 obj->Mappings[index].Pointer = intel_obj->buffer->virtual + offset;
450 return obj->Mappings[index].Pointer;
451 }
452
453 /**
454 * The FlushMappedBufferRange() driver hook.
455 *
456 * Implements glFlushMappedBufferRange(), which signifies that modifications
457 * have been made to a range of a mapped buffer, and it should be flushed.
458 *
459 * This is only used for buffers mapped with GL_MAP_FLUSH_EXPLICIT_BIT.
460 *
461 * Ideally we'd use a BO to avoid taking up cache space for the temporary
462 * data, but FlushMappedBufferRange may be followed by further writes to
463 * the pointer, so we would have to re-map after emitting our blit, which
464 * would defeat the point.
465 */
466 static void
467 intel_bufferobj_flush_mapped_range(struct gl_context *ctx,
468 GLintptr offset, GLsizeiptr length,
469 struct gl_buffer_object *obj,
470 gl_map_buffer_index index)
471 {
472 struct brw_context *brw = brw_context(ctx);
473 struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
474 GLbitfield access = obj->Mappings[index].AccessFlags;
475
476 assert(access & GL_MAP_FLUSH_EXPLICIT_BIT);
477
478 /* If we gave a direct mapping of the buffer instead of using a temporary,
479 * then there's nothing to do.
480 */
481 if (intel_obj->range_map_bo[index] == NULL)
482 return;
483
484 if (length == 0)
485 return;
486
487 /* Note that we're not unmapping our buffer while executing the blit. We
488 * need to have a mapping still at the end of this call, since the user
489 * gets to make further modifications and glFlushMappedBufferRange() calls.
490 * This is safe, because:
491 *
492 * - On LLC platforms, we're using a CPU mapping that's coherent with the
493 * GPU (except for the render caches), so the kernel doesn't need to do
494 * any flushing work for us except for what happens at batch exec time
495 * anyway.
496 *
497 * - On non-LLC platforms, we're using a GTT mapping that writes directly
498 * to system memory (except for the chipset cache that gets flushed at
499 * batch exec time).
500 *
501 * In both cases we don't need to stall for the previous blit to complete
502 * so we can re-map (and we definitely don't want to, since that would be
503 * slow): If the user edits a part of their buffer that's previously been
504 * blitted, then our lack of synchoronization is fine, because either
505 * they'll get some too-new data in the first blit and not do another blit
506 * of that area (but in that case the results are undefined), or they'll do
507 * another blit of that area and the complete newer data will land the
508 * second time.
509 */
510 intel_emit_linear_blit(brw,
511 intel_obj->buffer,
512 obj->Mappings[index].Offset + offset,
513 intel_obj->range_map_bo[index],
514 intel_obj->map_extra[index] + offset,
515 length);
516 intel_bufferobj_mark_gpu_usage(intel_obj,
517 obj->Mappings[index].Offset + offset,
518 length);
519 }
520
521
522 /**
523 * The UnmapBuffer() driver hook.
524 *
525 * Implements glUnmapBuffer().
526 */
527 static GLboolean
528 intel_bufferobj_unmap(struct gl_context * ctx, struct gl_buffer_object *obj,
529 gl_map_buffer_index index)
530 {
531 struct brw_context *brw = brw_context(ctx);
532 struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
533
534 assert(intel_obj);
535 assert(obj->Mappings[index].Pointer);
536 if (intel_obj->range_map_bo[index] != NULL) {
537 drm_intel_bo_unmap(intel_obj->range_map_bo[index]);
538
539 if (!(obj->Mappings[index].AccessFlags & GL_MAP_FLUSH_EXPLICIT_BIT)) {
540 intel_emit_linear_blit(brw,
541 intel_obj->buffer, obj->Mappings[index].Offset,
542 intel_obj->range_map_bo[index],
543 intel_obj->map_extra[index],
544 obj->Mappings[index].Length);
545 intel_bufferobj_mark_gpu_usage(intel_obj, obj->Mappings[index].Offset,
546 obj->Mappings[index].Length);
547 }
548
549 /* Since we've emitted some blits to buffers that will (likely) be used
550 * in rendering operations in other cache domains in this batch, emit a
551 * flush. Once again, we wish for a domain tracker in libdrm to cover
552 * usage inside of a batchbuffer.
553 */
554 intel_batchbuffer_emit_mi_flush(brw);
555
556 drm_intel_bo_unreference(intel_obj->range_map_bo[index]);
557 intel_obj->range_map_bo[index] = NULL;
558 } else if (intel_obj->buffer != NULL) {
559 drm_intel_bo_unmap(intel_obj->buffer);
560 }
561 obj->Mappings[index].Pointer = NULL;
562 obj->Mappings[index].Offset = 0;
563 obj->Mappings[index].Length = 0;
564
565 return true;
566 }
567
568 /**
569 * Gets a pointer to the object's BO, and marks the given range as being used
570 * on the GPU.
571 *
572 * Anywhere that uses buffer objects in the pipeline should be using this to
573 * mark the range of the buffer that is being accessed by the pipeline.
574 */
575 drm_intel_bo *
576 intel_bufferobj_buffer(struct brw_context *brw,
577 struct intel_buffer_object *intel_obj,
578 uint32_t offset, uint32_t size)
579 {
580 /* This is needed so that things like transform feedback and texture buffer
581 * objects that need a BO but don't want to check that they exist for
582 * draw-time validation can just always get a BO from a GL buffer object.
583 */
584 if (intel_obj->buffer == NULL)
585 intel_bufferobj_alloc_buffer(brw, intel_obj);
586
587 intel_bufferobj_mark_gpu_usage(intel_obj, offset, size);
588
589 return intel_obj->buffer;
590 }
591
592 /**
593 * The CopyBufferSubData() driver hook.
594 *
595 * Implements glCopyBufferSubData(), which copies a portion of one buffer
596 * object's data to another. Independent source and destination offsets
597 * are allowed.
598 */
599 static void
600 intel_bufferobj_copy_subdata(struct gl_context *ctx,
601 struct gl_buffer_object *src,
602 struct gl_buffer_object *dst,
603 GLintptr read_offset, GLintptr write_offset,
604 GLsizeiptr size)
605 {
606 struct brw_context *brw = brw_context(ctx);
607 struct intel_buffer_object *intel_src = intel_buffer_object(src);
608 struct intel_buffer_object *intel_dst = intel_buffer_object(dst);
609 drm_intel_bo *src_bo, *dst_bo;
610
611 if (size == 0)
612 return;
613
614 dst_bo = intel_bufferobj_buffer(brw, intel_dst, write_offset, size);
615 src_bo = intel_bufferobj_buffer(brw, intel_src, read_offset, size);
616
617 intel_emit_linear_blit(brw,
618 dst_bo, write_offset,
619 src_bo, read_offset, size);
620
621 /* Since we've emitted some blits to buffers that will (likely) be used
622 * in rendering operations in other cache domains in this batch, emit a
623 * flush. Once again, we wish for a domain tracker in libdrm to cover
624 * usage inside of a batchbuffer.
625 */
626 intel_batchbuffer_emit_mi_flush(brw);
627 }
628
629 void
630 intelInitBufferObjectFuncs(struct dd_function_table *functions)
631 {
632 functions->NewBufferObject = intel_bufferobj_alloc;
633 functions->DeleteBuffer = intel_bufferobj_free;
634 functions->BufferData = intel_bufferobj_data;
635 functions->BufferSubData = intel_bufferobj_subdata;
636 functions->GetBufferSubData = intel_bufferobj_get_subdata;
637 functions->MapBufferRange = intel_bufferobj_map_range;
638 functions->FlushMappedBufferRange = intel_bufferobj_flush_mapped_range;
639 functions->UnmapBuffer = intel_bufferobj_unmap;
640 functions->CopyBufferSubData = intel_bufferobj_copy_subdata;
641 }