1 /**************************************************************************
3 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
29 #include "main/imports.h"
30 #include "main/mfeatures.h"
31 #include "main/mtypes.h"
32 #include "main/macros.h"
33 #include "main/bufferobj.h"
35 #include "intel_blit.h"
36 #include "intel_buffer_objects.h"
37 #include "intel_batchbuffer.h"
38 #include "intel_context.h"
39 #include "intel_fbo.h"
40 #include "intel_mipmap_tree.h"
41 #include "intel_regions.h"
44 intel_bufferobj_unmap(struct gl_context
* ctx
,
45 GLenum target
, struct gl_buffer_object
*obj
);
47 /** Allocates a new drm_intel_bo to store the data for the buffer object. */
49 intel_bufferobj_alloc_buffer(struct intel_context
*intel
,
50 struct intel_buffer_object
*intel_obj
)
52 intel_obj
->buffer
= drm_intel_bo_alloc(intel
->bufmgr
, "bufferobj",
53 intel_obj
->Base
.Size
, 64);
57 * There is some duplication between mesa's bufferobjects and our
58 * bufmgr buffers. Both have an integer handle and a hashtable to
59 * lookup an opaque structure. It would be nice if the handles and
60 * internal structure where somehow shared.
62 static struct gl_buffer_object
*
63 intel_bufferobj_alloc(struct gl_context
* ctx
, GLuint name
, GLenum target
)
65 struct intel_buffer_object
*obj
= CALLOC_STRUCT(intel_buffer_object
);
67 _mesa_initialize_buffer_object(&obj
->Base
, name
, target
);
74 /* Break the COW tie to the region. The region gets to keep the data.
77 intel_bufferobj_release_region(struct intel_context
*intel
,
78 struct intel_buffer_object
*intel_obj
)
80 assert(intel_obj
->region
->buffer
== intel_obj
->buffer
);
81 intel_obj
->region
->pbo
= NULL
;
82 intel_obj
->region
= NULL
;
84 drm_intel_bo_unreference(intel_obj
->buffer
);
85 intel_obj
->buffer
= NULL
;
88 /* Break the COW tie to the region. Both the pbo and the region end
89 * up with a copy of the data.
92 intel_bufferobj_cow(struct intel_context
*intel
,
93 struct intel_buffer_object
*intel_obj
)
95 assert(intel_obj
->region
);
96 intel_region_cow(intel
, intel_obj
->region
);
101 * Deallocate/free a vertex/pixel buffer object.
102 * Called via glDeleteBuffersARB().
105 intel_bufferobj_free(struct gl_context
* ctx
, struct gl_buffer_object
*obj
)
107 struct intel_context
*intel
= intel_context(ctx
);
108 struct intel_buffer_object
*intel_obj
= intel_buffer_object(obj
);
112 /* Buffer objects are automatically unmapped when deleting according
113 * to the spec, but Mesa doesn't do UnmapBuffer for us at context destroy
114 * (though it does if you call glDeleteBuffers)
117 intel_bufferobj_unmap(ctx
, 0, obj
);
119 free(intel_obj
->sys_buffer
);
120 if (intel_obj
->region
) {
121 intel_bufferobj_release_region(intel
, intel_obj
);
123 else if (intel_obj
->buffer
) {
124 drm_intel_bo_unreference(intel_obj
->buffer
);
133 * Allocate space for and store data in a buffer object. Any data that was
134 * previously stored in the buffer object is lost. If data is NULL,
135 * memory will be allocated, but no copy will occur.
136 * Called via ctx->Driver.BufferData().
137 * \return GL_TRUE for success, GL_FALSE if out of memory
140 intel_bufferobj_data(struct gl_context
* ctx
,
144 GLenum usage
, struct gl_buffer_object
*obj
)
146 struct intel_context
*intel
= intel_context(ctx
);
147 struct intel_buffer_object
*intel_obj
= intel_buffer_object(obj
);
149 intel_obj
->Base
.Size
= size
;
150 intel_obj
->Base
.Usage
= usage
;
152 assert(!obj
->Pointer
); /* Mesa should have unmapped it */
154 if (intel_obj
->region
)
155 intel_bufferobj_release_region(intel
, intel_obj
);
157 if (intel_obj
->buffer
!= NULL
) {
158 drm_intel_bo_unreference(intel_obj
->buffer
);
159 intel_obj
->buffer
= NULL
;
161 free(intel_obj
->sys_buffer
);
162 intel_obj
->sys_buffer
= NULL
;
166 /* On pre-965, stick VBOs in system memory, as we're always doing swtnl
167 * with their contents anyway.
169 if (target
== GL_ARRAY_BUFFER
|| target
== GL_ELEMENT_ARRAY_BUFFER
) {
170 intel_obj
->sys_buffer
= malloc(size
);
171 if (intel_obj
->sys_buffer
!= NULL
) {
173 memcpy(intel_obj
->sys_buffer
, data
, size
);
178 intel_bufferobj_alloc_buffer(intel
, intel_obj
);
179 if (!intel_obj
->buffer
)
183 drm_intel_bo_subdata(intel_obj
->buffer
, 0, size
, data
);
191 * Replace data in a subrange of buffer object. If the data range
192 * specified by size + offset extends beyond the end of the buffer or
193 * if data is NULL, no copy is performed.
194 * Called via glBufferSubDataARB().
197 intel_bufferobj_subdata(struct gl_context
* ctx
,
201 const GLvoid
* data
, struct gl_buffer_object
*obj
)
203 struct intel_context
*intel
= intel_context(ctx
);
204 struct intel_buffer_object
*intel_obj
= intel_buffer_object(obj
);
211 if (intel_obj
->region
)
212 intel_bufferobj_cow(intel
, intel_obj
);
214 if (intel_obj
->sys_buffer
)
215 memcpy((char *)intel_obj
->sys_buffer
+ offset
, data
, size
);
217 /* Flush any existing batchbuffer that might reference this data. */
218 if (intel
->gen
< 6) {
219 if (drm_intel_bo_busy(intel_obj
->buffer
) ||
220 drm_intel_bo_references(intel
->batch
->buf
, intel_obj
->buffer
)) {
221 drm_intel_bo
*temp_bo
;
223 temp_bo
= drm_intel_bo_alloc(intel
->bufmgr
, "subdata temp", size
, 64);
225 drm_intel_bo_subdata(temp_bo
, 0, size
, data
);
227 intel_emit_linear_blit(intel
,
228 intel_obj
->buffer
, offset
,
232 drm_intel_bo_unreference(temp_bo
);
234 drm_intel_bo_subdata(intel_obj
->buffer
, offset
, size
, data
);
237 if (drm_intel_bo_references(intel
->batch
->buf
, intel_obj
->buffer
)) {
238 intel_batchbuffer_flush(intel
->batch
);
240 drm_intel_bo_subdata(intel_obj
->buffer
, offset
, size
, data
);
247 * Called via glGetBufferSubDataARB().
250 intel_bufferobj_get_subdata(struct gl_context
* ctx
,
254 GLvoid
* data
, struct gl_buffer_object
*obj
)
256 struct intel_buffer_object
*intel_obj
= intel_buffer_object(obj
);
259 if (intel_obj
->sys_buffer
)
260 memcpy(data
, (char *)intel_obj
->sys_buffer
+ offset
, size
);
262 drm_intel_bo_get_subdata(intel_obj
->buffer
, offset
, size
, data
);
268 * Called via glMapBufferARB().
271 intel_bufferobj_map(struct gl_context
* ctx
,
273 GLenum access
, struct gl_buffer_object
*obj
)
275 struct intel_context
*intel
= intel_context(ctx
);
276 struct intel_buffer_object
*intel_obj
= intel_buffer_object(obj
);
277 GLboolean read_only
= (access
== GL_READ_ONLY_ARB
);
278 GLboolean write_only
= (access
== GL_WRITE_ONLY_ARB
);
282 if (intel_obj
->sys_buffer
) {
283 obj
->Pointer
= intel_obj
->sys_buffer
;
284 obj
->Length
= obj
->Size
;
289 /* Flush any existing batchbuffer that might reference this data. */
290 if (drm_intel_bo_references(intel
->batch
->buf
, intel_obj
->buffer
))
293 if (intel_obj
->region
)
294 intel_bufferobj_cow(intel
, intel_obj
);
296 if (intel_obj
->buffer
== NULL
) {
302 drm_intel_gem_bo_map_gtt(intel_obj
->buffer
);
303 intel_obj
->mapped_gtt
= GL_TRUE
;
305 drm_intel_bo_map(intel_obj
->buffer
, !read_only
);
306 intel_obj
->mapped_gtt
= GL_FALSE
;
309 obj
->Pointer
= intel_obj
->buffer
->virtual;
310 obj
->Length
= obj
->Size
;
317 * Called via glMapBufferRange().
319 * The goal of this extension is to allow apps to accumulate their rendering
320 * at the same time as they accumulate their buffer object. Without it,
321 * you'd end up blocking on execution of rendering every time you mapped
322 * the buffer to put new data in.
324 * We support it in 3 ways: If unsynchronized, then don't bother
325 * flushing the batchbuffer before mapping the buffer, which can save blocking
326 * in many cases. If we would still block, and they allow the whole buffer
327 * to be invalidated, then just allocate a new buffer to replace the old one.
328 * If not, and we'd block, and they allow the subrange of the buffer to be
329 * invalidated, then we can make a new little BO, let them write into that,
330 * and blit it into the real BO at unmap time.
333 intel_bufferobj_map_range(struct gl_context
* ctx
,
334 GLenum target
, GLintptr offset
, GLsizeiptr length
,
335 GLbitfield access
, struct gl_buffer_object
*obj
)
337 struct intel_context
*intel
= intel_context(ctx
);
338 struct intel_buffer_object
*intel_obj
= intel_buffer_object(obj
);
342 /* _mesa_MapBufferRange (GL entrypoint) sets these, but the vbo module also
343 * internally uses our functions directly.
345 obj
->Offset
= offset
;
346 obj
->Length
= length
;
347 obj
->AccessFlags
= access
;
349 if (intel_obj
->sys_buffer
) {
350 obj
->Pointer
= intel_obj
->sys_buffer
+ offset
;
354 if (intel_obj
->region
)
355 intel_bufferobj_cow(intel
, intel_obj
);
357 /* If the mapping is synchronized with other GL operations, flush
358 * the batchbuffer so that GEM knows about the buffer access for later
361 if (!(access
& GL_MAP_UNSYNCHRONIZED_BIT
) &&
362 drm_intel_bo_references(intel
->batch
->buf
, intel_obj
->buffer
))
365 if (intel_obj
->buffer
== NULL
) {
370 /* If the user doesn't care about existing buffer contents and mapping
371 * would cause us to block, then throw out the old buffer.
373 if (!(access
& GL_MAP_UNSYNCHRONIZED_BIT
) &&
374 (access
& GL_MAP_INVALIDATE_BUFFER_BIT
) &&
375 drm_intel_bo_busy(intel_obj
->buffer
)) {
376 drm_intel_bo_unreference(intel_obj
->buffer
);
377 intel_obj
->buffer
= drm_intel_bo_alloc(intel
->bufmgr
, "bufferobj",
378 intel_obj
->Base
.Size
, 64);
381 /* If the user is mapping a range of an active buffer object but
382 * doesn't require the current contents of that range, make a new
383 * BO, and we'll copy what they put in there out at unmap or
386 if ((access
& GL_MAP_INVALIDATE_RANGE_BIT
) &&
387 drm_intel_bo_busy(intel_obj
->buffer
)) {
388 if (access
& GL_MAP_FLUSH_EXPLICIT_BIT
) {
389 intel_obj
->range_map_buffer
= malloc(length
);
390 obj
->Pointer
= intel_obj
->range_map_buffer
;
392 intel_obj
->range_map_bo
= drm_intel_bo_alloc(intel
->bufmgr
,
395 if (!(access
& GL_MAP_READ_BIT
)) {
396 drm_intel_gem_bo_map_gtt(intel_obj
->range_map_bo
);
397 intel_obj
->mapped_gtt
= GL_TRUE
;
399 drm_intel_bo_map(intel_obj
->range_map_bo
,
400 (access
& GL_MAP_WRITE_BIT
) != 0);
401 intel_obj
->mapped_gtt
= GL_FALSE
;
403 obj
->Pointer
= intel_obj
->range_map_bo
->virtual;
408 if (!(access
& GL_MAP_READ_BIT
)) {
409 drm_intel_gem_bo_map_gtt(intel_obj
->buffer
);
410 intel_obj
->mapped_gtt
= GL_TRUE
;
412 drm_intel_bo_map(intel_obj
->buffer
, (access
& GL_MAP_WRITE_BIT
) != 0);
413 intel_obj
->mapped_gtt
= GL_FALSE
;
416 obj
->Pointer
= intel_obj
->buffer
->virtual + offset
;
420 /* Ideally we'd use a BO to avoid taking up cache space for the temporary
421 * data, but FlushMappedBufferRange may be followed by further writes to
422 * the pointer, so we would have to re-map after emitting our blit, which
423 * would defeat the point.
426 intel_bufferobj_flush_mapped_range(struct gl_context
*ctx
, GLenum target
,
427 GLintptr offset
, GLsizeiptr length
,
428 struct gl_buffer_object
*obj
)
430 struct intel_context
*intel
= intel_context(ctx
);
431 struct intel_buffer_object
*intel_obj
= intel_buffer_object(obj
);
432 drm_intel_bo
*temp_bo
;
434 /* Unless we're in the range map using a temporary system buffer,
435 * there's no work to do.
437 if (intel_obj
->range_map_buffer
== NULL
)
443 temp_bo
= drm_intel_bo_alloc(intel
->bufmgr
, "range map flush", length
, 64);
445 drm_intel_bo_subdata(temp_bo
, 0, length
, intel_obj
->range_map_buffer
);
447 intel_emit_linear_blit(intel
,
448 intel_obj
->buffer
, obj
->Offset
+ offset
,
452 drm_intel_bo_unreference(temp_bo
);
457 * Called via glUnmapBuffer().
460 intel_bufferobj_unmap(struct gl_context
* ctx
,
461 GLenum target
, struct gl_buffer_object
*obj
)
463 struct intel_context
*intel
= intel_context(ctx
);
464 struct intel_buffer_object
*intel_obj
= intel_buffer_object(obj
);
467 assert(obj
->Pointer
);
468 if (intel_obj
->sys_buffer
!= NULL
) {
469 /* always keep the mapping around. */
470 } else if (intel_obj
->range_map_buffer
!= NULL
) {
471 /* Since we've emitted some blits to buffers that will (likely) be used
472 * in rendering operations in other cache domains in this batch, emit a
473 * flush. Once again, we wish for a domain tracker in libdrm to cover
474 * usage inside of a batchbuffer.
476 intel_batchbuffer_emit_mi_flush(intel
->batch
);
477 free(intel_obj
->range_map_buffer
);
478 intel_obj
->range_map_buffer
= NULL
;
479 } else if (intel_obj
->range_map_bo
!= NULL
) {
480 if (intel_obj
->mapped_gtt
) {
481 drm_intel_gem_bo_unmap_gtt(intel_obj
->range_map_bo
);
483 drm_intel_bo_unmap(intel_obj
->range_map_bo
);
486 intel_emit_linear_blit(intel
,
487 intel_obj
->buffer
, obj
->Offset
,
488 intel_obj
->range_map_bo
, 0,
491 /* Since we've emitted some blits to buffers that will (likely) be used
492 * in rendering operations in other cache domains in this batch, emit a
493 * flush. Once again, we wish for a domain tracker in libdrm to cover
494 * usage inside of a batchbuffer.
496 intel_batchbuffer_emit_mi_flush(intel
->batch
);
498 drm_intel_bo_unreference(intel_obj
->range_map_bo
);
499 intel_obj
->range_map_bo
= NULL
;
500 } else if (intel_obj
->buffer
!= NULL
) {
501 if (intel_obj
->mapped_gtt
) {
502 drm_intel_gem_bo_unmap_gtt(intel_obj
->buffer
);
504 drm_intel_bo_unmap(intel_obj
->buffer
);
515 intel_bufferobj_buffer(struct intel_context
*intel
,
516 struct intel_buffer_object
*intel_obj
, GLuint flag
)
518 if (intel_obj
->region
) {
519 if (flag
== INTEL_WRITE_PART
)
520 intel_bufferobj_cow(intel
, intel_obj
);
521 else if (flag
== INTEL_WRITE_FULL
) {
522 intel_bufferobj_release_region(intel
, intel_obj
);
523 intel_bufferobj_alloc_buffer(intel
, intel_obj
);
527 if (intel_obj
->buffer
== NULL
) {
528 void *sys_buffer
= intel_obj
->sys_buffer
;
530 /* only one of buffer and sys_buffer could be non-NULL */
531 intel_bufferobj_alloc_buffer(intel
, intel_obj
);
532 intel_obj
->sys_buffer
= NULL
;
534 intel_bufferobj_subdata(&intel
->ctx
,
537 intel_obj
->Base
.Size
,
541 intel_obj
->sys_buffer
= NULL
;
544 return intel_obj
->buffer
;
548 intel_bufferobj_copy_subdata(struct gl_context
*ctx
,
549 struct gl_buffer_object
*src
,
550 struct gl_buffer_object
*dst
,
551 GLintptr read_offset
, GLintptr write_offset
,
554 struct intel_context
*intel
= intel_context(ctx
);
555 struct intel_buffer_object
*intel_src
= intel_buffer_object(src
);
556 struct intel_buffer_object
*intel_dst
= intel_buffer_object(dst
);
557 drm_intel_bo
*src_bo
, *dst_bo
;
562 /* If we're in system memory, just map and memcpy. */
563 if (intel_src
->sys_buffer
|| intel_dst
->sys_buffer
|| intel
->gen
>= 6) {
564 /* The same buffer may be used, but note that regions copied may
568 char *ptr
= intel_bufferobj_map(ctx
, GL_COPY_WRITE_BUFFER
,
570 memcpy(ptr
+ write_offset
, ptr
+ read_offset
, size
);
571 intel_bufferobj_unmap(ctx
, GL_COPY_WRITE_BUFFER
, dst
);
576 src_ptr
= intel_bufferobj_map(ctx
, GL_COPY_READ_BUFFER
,
578 dst_ptr
= intel_bufferobj_map(ctx
, GL_COPY_WRITE_BUFFER
,
581 memcpy(dst_ptr
+ write_offset
, src_ptr
+ read_offset
, size
);
583 intel_bufferobj_unmap(ctx
, GL_COPY_READ_BUFFER
, src
);
584 intel_bufferobj_unmap(ctx
, GL_COPY_WRITE_BUFFER
, dst
);
589 /* Otherwise, we have real BOs, so blit them. */
591 dst_bo
= intel_bufferobj_buffer(intel
, intel_dst
, INTEL_WRITE_PART
);
592 src_bo
= intel_bufferobj_buffer(intel
, intel_src
, INTEL_READ
);
594 intel_emit_linear_blit(intel
,
595 dst_bo
, write_offset
,
596 src_bo
, read_offset
, size
);
598 /* Since we've emitted some blits to buffers that will (likely) be used
599 * in rendering operations in other cache domains in this batch, emit a
600 * flush. Once again, we wish for a domain tracker in libdrm to cover
601 * usage inside of a batchbuffer.
603 intel_batchbuffer_emit_mi_flush(intel
->batch
);
606 #if FEATURE_APPLE_object_purgeable
608 intel_buffer_purgeable(struct gl_context
* ctx
,
609 drm_intel_bo
*buffer
,
615 retained
= drm_intel_bo_madvise (buffer
, I915_MADV_DONTNEED
);
617 return retained
? GL_VOLATILE_APPLE
: GL_RELEASED_APPLE
;
621 intel_buffer_object_purgeable(struct gl_context
* ctx
,
622 struct gl_buffer_object
*obj
,
625 struct intel_buffer_object
*intel
;
627 intel
= intel_buffer_object (obj
);
628 if (intel
->buffer
!= NULL
)
629 return intel_buffer_purgeable (ctx
, intel
->buffer
, option
);
631 if (option
== GL_RELEASED_APPLE
) {
632 if (intel
->sys_buffer
!= NULL
) {
633 free(intel
->sys_buffer
);
634 intel
->sys_buffer
= NULL
;
637 return GL_RELEASED_APPLE
;
639 /* XXX Create the buffer and madvise(MADV_DONTNEED)? */
640 return intel_buffer_purgeable (ctx
,
641 intel_bufferobj_buffer(intel_context(ctx
),
648 intel_texture_object_purgeable(struct gl_context
* ctx
,
649 struct gl_texture_object
*obj
,
652 struct intel_texture_object
*intel
;
654 intel
= intel_texture_object(obj
);
655 if (intel
->mt
== NULL
|| intel
->mt
->region
== NULL
)
656 return GL_RELEASED_APPLE
;
658 return intel_buffer_purgeable (ctx
, intel
->mt
->region
->buffer
, option
);
662 intel_render_object_purgeable(struct gl_context
* ctx
,
663 struct gl_renderbuffer
*obj
,
666 struct intel_renderbuffer
*intel
;
668 intel
= intel_renderbuffer(obj
);
669 if (intel
->region
== NULL
)
670 return GL_RELEASED_APPLE
;
672 return intel_buffer_purgeable (ctx
, intel
->region
->buffer
, option
);
676 intel_buffer_unpurgeable(struct gl_context
* ctx
,
677 drm_intel_bo
*buffer
,
684 retained
= drm_intel_bo_madvise (buffer
, I915_MADV_WILLNEED
);
686 return retained
? GL_RETAINED_APPLE
: GL_UNDEFINED_APPLE
;
690 intel_buffer_object_unpurgeable(struct gl_context
* ctx
,
691 struct gl_buffer_object
*obj
,
694 return intel_buffer_unpurgeable (ctx
, intel_buffer_object (obj
)->buffer
, option
);
698 intel_texture_object_unpurgeable(struct gl_context
* ctx
,
699 struct gl_texture_object
*obj
,
702 struct intel_texture_object
*intel
;
704 intel
= intel_texture_object(obj
);
705 if (intel
->mt
== NULL
|| intel
->mt
->region
== NULL
)
706 return GL_UNDEFINED_APPLE
;
708 return intel_buffer_unpurgeable (ctx
, intel
->mt
->region
->buffer
, option
);
712 intel_render_object_unpurgeable(struct gl_context
* ctx
,
713 struct gl_renderbuffer
*obj
,
716 struct intel_renderbuffer
*intel
;
718 intel
= intel_renderbuffer(obj
);
719 if (intel
->region
== NULL
)
720 return GL_UNDEFINED_APPLE
;
722 return intel_buffer_unpurgeable (ctx
, intel
->region
->buffer
, option
);
727 intelInitBufferObjectFuncs(struct dd_function_table
*functions
)
729 functions
->NewBufferObject
= intel_bufferobj_alloc
;
730 functions
->DeleteBuffer
= intel_bufferobj_free
;
731 functions
->BufferData
= intel_bufferobj_data
;
732 functions
->BufferSubData
= intel_bufferobj_subdata
;
733 functions
->GetBufferSubData
= intel_bufferobj_get_subdata
;
734 functions
->MapBuffer
= intel_bufferobj_map
;
735 functions
->MapBufferRange
= intel_bufferobj_map_range
;
736 functions
->FlushMappedBufferRange
= intel_bufferobj_flush_mapped_range
;
737 functions
->UnmapBuffer
= intel_bufferobj_unmap
;
738 functions
->CopyBufferSubData
= intel_bufferobj_copy_subdata
;
740 #if FEATURE_APPLE_object_purgeable
741 functions
->BufferObjectPurgeable
= intel_buffer_object_purgeable
;
742 functions
->TextureObjectPurgeable
= intel_texture_object_purgeable
;
743 functions
->RenderObjectPurgeable
= intel_render_object_purgeable
;
745 functions
->BufferObjectUnpurgeable
= intel_buffer_object_unpurgeable
;
746 functions
->TextureObjectUnpurgeable
= intel_texture_object_unpurgeable
;
747 functions
->RenderObjectUnpurgeable
= intel_render_object_unpurgeable
;