1 /**************************************************************************
3 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
29 #include "main/imports.h"
30 #include "main/mtypes.h"
31 #include "main/macros.h"
32 #include "main/bufferobj.h"
34 #include "intel_blit.h"
35 #include "intel_buffer_objects.h"
36 #include "intel_batchbuffer.h"
37 #include "intel_context.h"
38 #include "intel_fbo.h"
39 #include "intel_mipmap_tree.h"
40 #include "intel_regions.h"
43 intel_bufferobj_unmap(struct gl_context
* ctx
,
44 GLenum target
, struct gl_buffer_object
*obj
);
46 /** Allocates a new drm_intel_bo to store the data for the buffer object. */
48 intel_bufferobj_alloc_buffer(struct intel_context
*intel
,
49 struct intel_buffer_object
*intel_obj
)
51 intel_obj
->buffer
= drm_intel_bo_alloc(intel
->bufmgr
, "bufferobj",
52 intel_obj
->Base
.Size
, 64);
56 * There is some duplication between mesa's bufferobjects and our
57 * bufmgr buffers. Both have an integer handle and a hashtable to
58 * lookup an opaque structure. It would be nice if the handles and
59 * internal structure where somehow shared.
61 static struct gl_buffer_object
*
62 intel_bufferobj_alloc(struct gl_context
* ctx
, GLuint name
, GLenum target
)
64 struct intel_buffer_object
*obj
= CALLOC_STRUCT(intel_buffer_object
);
66 _mesa_initialize_buffer_object(&obj
->Base
, name
, target
);
73 /* Break the COW tie to the region. The region gets to keep the data.
76 intel_bufferobj_release_region(struct intel_context
*intel
,
77 struct intel_buffer_object
*intel_obj
)
79 assert(intel_obj
->region
->buffer
== intel_obj
->buffer
);
80 intel_obj
->region
->pbo
= NULL
;
81 intel_obj
->region
= NULL
;
83 drm_intel_bo_unreference(intel_obj
->buffer
);
84 intel_obj
->buffer
= NULL
;
87 /* Break the COW tie to the region. Both the pbo and the region end
88 * up with a copy of the data.
91 intel_bufferobj_cow(struct intel_context
*intel
,
92 struct intel_buffer_object
*intel_obj
)
94 assert(intel_obj
->region
);
95 intel_region_cow(intel
, intel_obj
->region
);
100 * Deallocate/free a vertex/pixel buffer object.
101 * Called via glDeleteBuffersARB().
104 intel_bufferobj_free(struct gl_context
* ctx
, struct gl_buffer_object
*obj
)
106 struct intel_context
*intel
= intel_context(ctx
);
107 struct intel_buffer_object
*intel_obj
= intel_buffer_object(obj
);
111 /* Buffer objects are automatically unmapped when deleting according
112 * to the spec, but Mesa doesn't do UnmapBuffer for us at context destroy
113 * (though it does if you call glDeleteBuffers)
116 intel_bufferobj_unmap(ctx
, 0, obj
);
118 free(intel_obj
->sys_buffer
);
119 if (intel_obj
->region
) {
120 intel_bufferobj_release_region(intel
, intel_obj
);
122 else if (intel_obj
->buffer
) {
123 drm_intel_bo_unreference(intel_obj
->buffer
);
132 * Allocate space for and store data in a buffer object. Any data that was
133 * previously stored in the buffer object is lost. If data is NULL,
134 * memory will be allocated, but no copy will occur.
135 * Called via ctx->Driver.BufferData().
136 * \return GL_TRUE for success, GL_FALSE if out of memory
139 intel_bufferobj_data(struct gl_context
* ctx
,
143 GLenum usage
, struct gl_buffer_object
*obj
)
145 struct intel_context
*intel
= intel_context(ctx
);
146 struct intel_buffer_object
*intel_obj
= intel_buffer_object(obj
);
148 intel_obj
->Base
.Size
= size
;
149 intel_obj
->Base
.Usage
= usage
;
151 assert(!obj
->Pointer
); /* Mesa should have unmapped it */
153 if (intel_obj
->region
)
154 intel_bufferobj_release_region(intel
, intel_obj
);
156 if (intel_obj
->buffer
!= NULL
) {
157 drm_intel_bo_unreference(intel_obj
->buffer
);
158 intel_obj
->buffer
= NULL
;
160 free(intel_obj
->sys_buffer
);
161 intel_obj
->sys_buffer
= NULL
;
165 /* On pre-965, stick VBOs in system memory, as we're always doing swtnl
166 * with their contents anyway.
168 if (target
== GL_ARRAY_BUFFER
|| target
== GL_ELEMENT_ARRAY_BUFFER
) {
169 intel_obj
->sys_buffer
= malloc(size
);
170 if (intel_obj
->sys_buffer
!= NULL
) {
172 memcpy(intel_obj
->sys_buffer
, data
, size
);
177 intel_bufferobj_alloc_buffer(intel
, intel_obj
);
178 if (!intel_obj
->buffer
)
182 drm_intel_bo_subdata(intel_obj
->buffer
, 0, size
, data
);
190 * Replace data in a subrange of buffer object. If the data range
191 * specified by size + offset extends beyond the end of the buffer or
192 * if data is NULL, no copy is performed.
193 * Called via glBufferSubDataARB().
196 intel_bufferobj_subdata(struct gl_context
* ctx
,
200 const GLvoid
* data
, struct gl_buffer_object
*obj
)
202 struct intel_context
*intel
= intel_context(ctx
);
203 struct intel_buffer_object
*intel_obj
= intel_buffer_object(obj
);
210 if (intel_obj
->region
)
211 intel_bufferobj_cow(intel
, intel_obj
);
213 if (intel_obj
->sys_buffer
)
214 memcpy((char *)intel_obj
->sys_buffer
+ offset
, data
, size
);
216 /* Flush any existing batchbuffer that might reference this data. */
217 if (intel
->gen
< 6) {
218 if (drm_intel_bo_busy(intel_obj
->buffer
) ||
219 drm_intel_bo_references(intel
->batch
->buf
, intel_obj
->buffer
)) {
220 drm_intel_bo
*temp_bo
;
222 temp_bo
= drm_intel_bo_alloc(intel
->bufmgr
, "subdata temp", size
, 64);
224 drm_intel_bo_subdata(temp_bo
, 0, size
, data
);
226 intel_emit_linear_blit(intel
,
227 intel_obj
->buffer
, offset
,
231 drm_intel_bo_unreference(temp_bo
);
233 drm_intel_bo_subdata(intel_obj
->buffer
, offset
, size
, data
);
236 if (drm_intel_bo_references(intel
->batch
->buf
, intel_obj
->buffer
)) {
237 intel_batchbuffer_flush(intel
->batch
);
239 drm_intel_bo_subdata(intel_obj
->buffer
, offset
, size
, data
);
246 * Called via glGetBufferSubDataARB().
249 intel_bufferobj_get_subdata(struct gl_context
* ctx
,
253 GLvoid
* data
, struct gl_buffer_object
*obj
)
255 struct intel_buffer_object
*intel_obj
= intel_buffer_object(obj
);
258 if (intel_obj
->sys_buffer
)
259 memcpy(data
, (char *)intel_obj
->sys_buffer
+ offset
, size
);
261 drm_intel_bo_get_subdata(intel_obj
->buffer
, offset
, size
, data
);
267 * Called via glMapBufferARB().
270 intel_bufferobj_map(struct gl_context
* ctx
,
272 GLenum access
, struct gl_buffer_object
*obj
)
274 struct intel_context
*intel
= intel_context(ctx
);
275 struct intel_buffer_object
*intel_obj
= intel_buffer_object(obj
);
276 GLboolean read_only
= (access
== GL_READ_ONLY_ARB
);
277 GLboolean write_only
= (access
== GL_WRITE_ONLY_ARB
);
281 if (intel_obj
->sys_buffer
) {
282 obj
->Pointer
= intel_obj
->sys_buffer
;
283 obj
->Length
= obj
->Size
;
288 /* Flush any existing batchbuffer that might reference this data. */
289 if (drm_intel_bo_references(intel
->batch
->buf
, intel_obj
->buffer
))
292 if (intel_obj
->region
)
293 intel_bufferobj_cow(intel
, intel_obj
);
295 if (intel_obj
->buffer
== NULL
) {
301 drm_intel_gem_bo_map_gtt(intel_obj
->buffer
);
302 intel_obj
->mapped_gtt
= GL_TRUE
;
304 drm_intel_bo_map(intel_obj
->buffer
, !read_only
);
305 intel_obj
->mapped_gtt
= GL_FALSE
;
308 obj
->Pointer
= intel_obj
->buffer
->virtual;
309 obj
->Length
= obj
->Size
;
316 * Called via glMapBufferRange().
318 * The goal of this extension is to allow apps to accumulate their rendering
319 * at the same time as they accumulate their buffer object. Without it,
320 * you'd end up blocking on execution of rendering every time you mapped
321 * the buffer to put new data in.
323 * We support it in 3 ways: If unsynchronized, then don't bother
324 * flushing the batchbuffer before mapping the buffer, which can save blocking
325 * in many cases. If we would still block, and they allow the whole buffer
326 * to be invalidated, then just allocate a new buffer to replace the old one.
327 * If not, and we'd block, and they allow the subrange of the buffer to be
328 * invalidated, then we can make a new little BO, let them write into that,
329 * and blit it into the real BO at unmap time.
332 intel_bufferobj_map_range(struct gl_context
* ctx
,
333 GLenum target
, GLintptr offset
, GLsizeiptr length
,
334 GLbitfield access
, struct gl_buffer_object
*obj
)
336 struct intel_context
*intel
= intel_context(ctx
);
337 struct intel_buffer_object
*intel_obj
= intel_buffer_object(obj
);
341 /* _mesa_MapBufferRange (GL entrypoint) sets these, but the vbo module also
342 * internally uses our functions directly.
344 obj
->Offset
= offset
;
345 obj
->Length
= length
;
346 obj
->AccessFlags
= access
;
348 if (intel_obj
->sys_buffer
) {
349 obj
->Pointer
= intel_obj
->sys_buffer
+ offset
;
353 if (intel_obj
->region
)
354 intel_bufferobj_cow(intel
, intel_obj
);
356 /* If the mapping is synchronized with other GL operations, flush
357 * the batchbuffer so that GEM knows about the buffer access for later
360 if (!(access
& GL_MAP_UNSYNCHRONIZED_BIT
) &&
361 drm_intel_bo_references(intel
->batch
->buf
, intel_obj
->buffer
))
364 if (intel_obj
->buffer
== NULL
) {
369 /* If the user doesn't care about existing buffer contents and mapping
370 * would cause us to block, then throw out the old buffer.
372 if (!(access
& GL_MAP_UNSYNCHRONIZED_BIT
) &&
373 (access
& GL_MAP_INVALIDATE_BUFFER_BIT
) &&
374 drm_intel_bo_busy(intel_obj
->buffer
)) {
375 drm_intel_bo_unreference(intel_obj
->buffer
);
376 intel_obj
->buffer
= drm_intel_bo_alloc(intel
->bufmgr
, "bufferobj",
377 intel_obj
->Base
.Size
, 64);
380 /* If the user is mapping a range of an active buffer object but
381 * doesn't require the current contents of that range, make a new
382 * BO, and we'll copy what they put in there out at unmap or
385 if ((access
& GL_MAP_INVALIDATE_RANGE_BIT
) &&
386 drm_intel_bo_busy(intel_obj
->buffer
)) {
387 if (access
& GL_MAP_FLUSH_EXPLICIT_BIT
) {
388 intel_obj
->range_map_buffer
= malloc(length
);
389 obj
->Pointer
= intel_obj
->range_map_buffer
;
391 intel_obj
->range_map_bo
= drm_intel_bo_alloc(intel
->bufmgr
,
394 if (!(access
& GL_MAP_READ_BIT
)) {
395 drm_intel_gem_bo_map_gtt(intel_obj
->range_map_bo
);
396 intel_obj
->mapped_gtt
= GL_TRUE
;
398 drm_intel_bo_map(intel_obj
->range_map_bo
,
399 (access
& GL_MAP_WRITE_BIT
) != 0);
400 intel_obj
->mapped_gtt
= GL_FALSE
;
402 obj
->Pointer
= intel_obj
->range_map_bo
->virtual;
407 if (!(access
& GL_MAP_READ_BIT
)) {
408 drm_intel_gem_bo_map_gtt(intel_obj
->buffer
);
409 intel_obj
->mapped_gtt
= GL_TRUE
;
411 drm_intel_bo_map(intel_obj
->buffer
, (access
& GL_MAP_WRITE_BIT
) != 0);
412 intel_obj
->mapped_gtt
= GL_FALSE
;
415 obj
->Pointer
= intel_obj
->buffer
->virtual + offset
;
419 /* Ideally we'd use a BO to avoid taking up cache space for the temporary
420 * data, but FlushMappedBufferRange may be followed by further writes to
421 * the pointer, so we would have to re-map after emitting our blit, which
422 * would defeat the point.
425 intel_bufferobj_flush_mapped_range(struct gl_context
*ctx
, GLenum target
,
426 GLintptr offset
, GLsizeiptr length
,
427 struct gl_buffer_object
*obj
)
429 struct intel_context
*intel
= intel_context(ctx
);
430 struct intel_buffer_object
*intel_obj
= intel_buffer_object(obj
);
431 drm_intel_bo
*temp_bo
;
433 /* Unless we're in the range map using a temporary system buffer,
434 * there's no work to do.
436 if (intel_obj
->range_map_buffer
== NULL
)
442 temp_bo
= drm_intel_bo_alloc(intel
->bufmgr
, "range map flush", length
, 64);
444 drm_intel_bo_subdata(temp_bo
, 0, length
, intel_obj
->range_map_buffer
);
446 intel_emit_linear_blit(intel
,
447 intel_obj
->buffer
, obj
->Offset
+ offset
,
451 drm_intel_bo_unreference(temp_bo
);
456 * Called via glUnmapBuffer().
459 intel_bufferobj_unmap(struct gl_context
* ctx
,
460 GLenum target
, struct gl_buffer_object
*obj
)
462 struct intel_context
*intel
= intel_context(ctx
);
463 struct intel_buffer_object
*intel_obj
= intel_buffer_object(obj
);
466 assert(obj
->Pointer
);
467 if (intel_obj
->sys_buffer
!= NULL
) {
468 /* always keep the mapping around. */
469 } else if (intel_obj
->range_map_buffer
!= NULL
) {
470 /* Since we've emitted some blits to buffers that will (likely) be used
471 * in rendering operations in other cache domains in this batch, emit a
472 * flush. Once again, we wish for a domain tracker in libdrm to cover
473 * usage inside of a batchbuffer.
475 intel_batchbuffer_emit_mi_flush(intel
->batch
);
476 free(intel_obj
->range_map_buffer
);
477 intel_obj
->range_map_buffer
= NULL
;
478 } else if (intel_obj
->range_map_bo
!= NULL
) {
479 if (intel_obj
->mapped_gtt
) {
480 drm_intel_gem_bo_unmap_gtt(intel_obj
->range_map_bo
);
482 drm_intel_bo_unmap(intel_obj
->range_map_bo
);
485 intel_emit_linear_blit(intel
,
486 intel_obj
->buffer
, obj
->Offset
,
487 intel_obj
->range_map_bo
, 0,
490 /* Since we've emitted some blits to buffers that will (likely) be used
491 * in rendering operations in other cache domains in this batch, emit a
492 * flush. Once again, we wish for a domain tracker in libdrm to cover
493 * usage inside of a batchbuffer.
495 intel_batchbuffer_emit_mi_flush(intel
->batch
);
497 drm_intel_bo_unreference(intel_obj
->range_map_bo
);
498 intel_obj
->range_map_bo
= NULL
;
499 } else if (intel_obj
->buffer
!= NULL
) {
500 if (intel_obj
->mapped_gtt
) {
501 drm_intel_gem_bo_unmap_gtt(intel_obj
->buffer
);
503 drm_intel_bo_unmap(intel_obj
->buffer
);
514 intel_bufferobj_buffer(struct intel_context
*intel
,
515 struct intel_buffer_object
*intel_obj
, GLuint flag
)
517 if (intel_obj
->region
) {
518 if (flag
== INTEL_WRITE_PART
)
519 intel_bufferobj_cow(intel
, intel_obj
);
520 else if (flag
== INTEL_WRITE_FULL
) {
521 intel_bufferobj_release_region(intel
, intel_obj
);
522 intel_bufferobj_alloc_buffer(intel
, intel_obj
);
526 if (intel_obj
->buffer
== NULL
) {
527 void *sys_buffer
= intel_obj
->sys_buffer
;
529 /* only one of buffer and sys_buffer could be non-NULL */
530 intel_bufferobj_alloc_buffer(intel
, intel_obj
);
531 intel_obj
->sys_buffer
= NULL
;
533 intel_bufferobj_subdata(&intel
->ctx
,
536 intel_obj
->Base
.Size
,
540 intel_obj
->sys_buffer
= NULL
;
543 return intel_obj
->buffer
;
547 intel_bufferobj_copy_subdata(struct gl_context
*ctx
,
548 struct gl_buffer_object
*src
,
549 struct gl_buffer_object
*dst
,
550 GLintptr read_offset
, GLintptr write_offset
,
553 struct intel_context
*intel
= intel_context(ctx
);
554 struct intel_buffer_object
*intel_src
= intel_buffer_object(src
);
555 struct intel_buffer_object
*intel_dst
= intel_buffer_object(dst
);
556 drm_intel_bo
*src_bo
, *dst_bo
;
561 /* If we're in system memory, just map and memcpy. */
562 if (intel_src
->sys_buffer
|| intel_dst
->sys_buffer
|| intel
->gen
>= 6) {
563 /* The same buffer may be used, but note that regions copied may
567 char *ptr
= intel_bufferobj_map(ctx
, GL_COPY_WRITE_BUFFER
,
569 memcpy(ptr
+ write_offset
, ptr
+ read_offset
, size
);
570 intel_bufferobj_unmap(ctx
, GL_COPY_WRITE_BUFFER
, dst
);
575 src_ptr
= intel_bufferobj_map(ctx
, GL_COPY_READ_BUFFER
,
577 dst_ptr
= intel_bufferobj_map(ctx
, GL_COPY_WRITE_BUFFER
,
580 memcpy(dst_ptr
+ write_offset
, src_ptr
+ read_offset
, size
);
582 intel_bufferobj_unmap(ctx
, GL_COPY_READ_BUFFER
, src
);
583 intel_bufferobj_unmap(ctx
, GL_COPY_WRITE_BUFFER
, dst
);
588 /* Otherwise, we have real BOs, so blit them. */
590 dst_bo
= intel_bufferobj_buffer(intel
, intel_dst
, INTEL_WRITE_PART
);
591 src_bo
= intel_bufferobj_buffer(intel
, intel_src
, INTEL_READ
);
593 intel_emit_linear_blit(intel
,
594 dst_bo
, write_offset
,
595 src_bo
, read_offset
, size
);
597 /* Since we've emitted some blits to buffers that will (likely) be used
598 * in rendering operations in other cache domains in this batch, emit a
599 * flush. Once again, we wish for a domain tracker in libdrm to cover
600 * usage inside of a batchbuffer.
602 intel_batchbuffer_emit_mi_flush(intel
->batch
);
605 #if FEATURE_APPLE_object_purgeable
607 intel_buffer_purgeable(struct gl_context
* ctx
,
608 drm_intel_bo
*buffer
,
614 retained
= drm_intel_bo_madvise (buffer
, I915_MADV_DONTNEED
);
616 return retained
? GL_VOLATILE_APPLE
: GL_RELEASED_APPLE
;
620 intel_buffer_object_purgeable(struct gl_context
* ctx
,
621 struct gl_buffer_object
*obj
,
624 struct intel_buffer_object
*intel
;
626 intel
= intel_buffer_object (obj
);
627 if (intel
->buffer
!= NULL
)
628 return intel_buffer_purgeable (ctx
, intel
->buffer
, option
);
630 if (option
== GL_RELEASED_APPLE
) {
631 if (intel
->sys_buffer
!= NULL
) {
632 free(intel
->sys_buffer
);
633 intel
->sys_buffer
= NULL
;
636 return GL_RELEASED_APPLE
;
638 /* XXX Create the buffer and madvise(MADV_DONTNEED)? */
639 return intel_buffer_purgeable (ctx
,
640 intel_bufferobj_buffer(intel_context(ctx
),
647 intel_texture_object_purgeable(struct gl_context
* ctx
,
648 struct gl_texture_object
*obj
,
651 struct intel_texture_object
*intel
;
653 intel
= intel_texture_object(obj
);
654 if (intel
->mt
== NULL
|| intel
->mt
->region
== NULL
)
655 return GL_RELEASED_APPLE
;
657 return intel_buffer_purgeable (ctx
, intel
->mt
->region
->buffer
, option
);
661 intel_render_object_purgeable(struct gl_context
* ctx
,
662 struct gl_renderbuffer
*obj
,
665 struct intel_renderbuffer
*intel
;
667 intel
= intel_renderbuffer(obj
);
668 if (intel
->region
== NULL
)
669 return GL_RELEASED_APPLE
;
671 return intel_buffer_purgeable (ctx
, intel
->region
->buffer
, option
);
675 intel_buffer_unpurgeable(struct gl_context
* ctx
,
676 drm_intel_bo
*buffer
,
683 retained
= drm_intel_bo_madvise (buffer
, I915_MADV_WILLNEED
);
685 return retained
? GL_RETAINED_APPLE
: GL_UNDEFINED_APPLE
;
689 intel_buffer_object_unpurgeable(struct gl_context
* ctx
,
690 struct gl_buffer_object
*obj
,
693 return intel_buffer_unpurgeable (ctx
, intel_buffer_object (obj
)->buffer
, option
);
697 intel_texture_object_unpurgeable(struct gl_context
* ctx
,
698 struct gl_texture_object
*obj
,
701 struct intel_texture_object
*intel
;
703 intel
= intel_texture_object(obj
);
704 if (intel
->mt
== NULL
|| intel
->mt
->region
== NULL
)
705 return GL_UNDEFINED_APPLE
;
707 return intel_buffer_unpurgeable (ctx
, intel
->mt
->region
->buffer
, option
);
711 intel_render_object_unpurgeable(struct gl_context
* ctx
,
712 struct gl_renderbuffer
*obj
,
715 struct intel_renderbuffer
*intel
;
717 intel
= intel_renderbuffer(obj
);
718 if (intel
->region
== NULL
)
719 return GL_UNDEFINED_APPLE
;
721 return intel_buffer_unpurgeable (ctx
, intel
->region
->buffer
, option
);
726 intelInitBufferObjectFuncs(struct dd_function_table
*functions
)
728 functions
->NewBufferObject
= intel_bufferobj_alloc
;
729 functions
->DeleteBuffer
= intel_bufferobj_free
;
730 functions
->BufferData
= intel_bufferobj_data
;
731 functions
->BufferSubData
= intel_bufferobj_subdata
;
732 functions
->GetBufferSubData
= intel_bufferobj_get_subdata
;
733 functions
->MapBuffer
= intel_bufferobj_map
;
734 functions
->MapBufferRange
= intel_bufferobj_map_range
;
735 functions
->FlushMappedBufferRange
= intel_bufferobj_flush_mapped_range
;
736 functions
->UnmapBuffer
= intel_bufferobj_unmap
;
737 functions
->CopyBufferSubData
= intel_bufferobj_copy_subdata
;
739 #if FEATURE_APPLE_object_purgeable
740 functions
->BufferObjectPurgeable
= intel_buffer_object_purgeable
;
741 functions
->TextureObjectPurgeable
= intel_texture_object_purgeable
;
742 functions
->RenderObjectPurgeable
= intel_render_object_purgeable
;
744 functions
->BufferObjectUnpurgeable
= intel_buffer_object_unpurgeable
;
745 functions
->TextureObjectUnpurgeable
= intel_texture_object_unpurgeable
;
746 functions
->RenderObjectUnpurgeable
= intel_render_object_unpurgeable
;