1 /**************************************************************************
3 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
29 #include "main/imports.h"
30 #include "main/mtypes.h"
31 #include "main/macros.h"
32 #include "main/bufferobj.h"
34 #include "intel_blit.h"
35 #include "intel_buffer_objects.h"
36 #include "intel_batchbuffer.h"
37 #include "intel_context.h"
38 #include "intel_fbo.h"
39 #include "intel_mipmap_tree.h"
40 #include "intel_regions.h"
43 intel_bufferobj_unmap(GLcontext
* ctx
,
44 GLenum target
, struct gl_buffer_object
*obj
);
46 /** Allocates a new dri_bo to store the data for the buffer object. */
48 intel_bufferobj_alloc_buffer(struct intel_context
*intel
,
49 struct intel_buffer_object
*intel_obj
)
51 intel_obj
->buffer
= dri_bo_alloc(intel
->bufmgr
, "bufferobj",
52 intel_obj
->Base
.Size
, 64);
56 * There is some duplication between mesa's bufferobjects and our
57 * bufmgr buffers. Both have an integer handle and a hashtable to
58 * lookup an opaque structure. It would be nice if the handles and
59 * internal structure where somehow shared.
61 static struct gl_buffer_object
*
62 intel_bufferobj_alloc(GLcontext
* ctx
, GLuint name
, GLenum target
)
64 struct intel_buffer_object
*obj
= CALLOC_STRUCT(intel_buffer_object
);
66 _mesa_initialize_buffer_object(&obj
->Base
, name
, target
);
73 /* Break the COW tie to the region. The region gets to keep the data.
76 intel_bufferobj_release_region(struct intel_context
*intel
,
77 struct intel_buffer_object
*intel_obj
)
79 assert(intel_obj
->region
->buffer
== intel_obj
->buffer
);
80 intel_obj
->region
->pbo
= NULL
;
81 intel_obj
->region
= NULL
;
83 dri_bo_unreference(intel_obj
->buffer
);
84 intel_obj
->buffer
= NULL
;
87 /* Break the COW tie to the region. Both the pbo and the region end
88 * up with a copy of the data.
91 intel_bufferobj_cow(struct intel_context
*intel
,
92 struct intel_buffer_object
*intel_obj
)
94 assert(intel_obj
->region
);
95 intel_region_cow(intel
, intel_obj
->region
);
100 * Deallocate/free a vertex/pixel buffer object.
101 * Called via glDeleteBuffersARB().
104 intel_bufferobj_free(GLcontext
* ctx
, struct gl_buffer_object
*obj
)
106 struct intel_context
*intel
= intel_context(ctx
);
107 struct intel_buffer_object
*intel_obj
= intel_buffer_object(obj
);
111 /* Buffer objects are automatically unmapped when deleting according
112 * to the spec, but Mesa doesn't do UnmapBuffer for us at context destroy
113 * (though it does if you call glDeleteBuffers)
116 intel_bufferobj_unmap(ctx
, 0, obj
);
118 free(intel_obj
->sys_buffer
);
119 if (intel_obj
->region
) {
120 intel_bufferobj_release_region(intel
, intel_obj
);
122 else if (intel_obj
->buffer
) {
123 dri_bo_unreference(intel_obj
->buffer
);
132 * Allocate space for and store data in a buffer object. Any data that was
133 * previously stored in the buffer object is lost. If data is NULL,
134 * memory will be allocated, but no copy will occur.
135 * Called via ctx->Driver.BufferData().
136 * \return GL_TRUE for success, GL_FALSE if out of memory
139 intel_bufferobj_data(GLcontext
* ctx
,
143 GLenum usage
, struct gl_buffer_object
*obj
)
145 struct intel_context
*intel
= intel_context(ctx
);
146 struct intel_buffer_object
*intel_obj
= intel_buffer_object(obj
);
148 intel_obj
->Base
.Size
= size
;
149 intel_obj
->Base
.Usage
= usage
;
151 assert(!obj
->Pointer
); /* Mesa should have unmapped it */
153 if (intel_obj
->region
)
154 intel_bufferobj_release_region(intel
, intel_obj
);
156 if (intel_obj
->buffer
!= NULL
) {
157 dri_bo_unreference(intel_obj
->buffer
);
158 intel_obj
->buffer
= NULL
;
160 free(intel_obj
->sys_buffer
);
161 intel_obj
->sys_buffer
= NULL
;
165 /* On pre-965, stick VBOs in system memory, as we're always doing swtnl
166 * with their contents anyway.
168 if (target
== GL_ARRAY_BUFFER
|| target
== GL_ELEMENT_ARRAY_BUFFER
) {
169 intel_obj
->sys_buffer
= malloc(size
);
170 if (intel_obj
->sys_buffer
!= NULL
) {
172 memcpy(intel_obj
->sys_buffer
, data
, size
);
177 intel_bufferobj_alloc_buffer(intel
, intel_obj
);
178 if (!intel_obj
->buffer
)
182 dri_bo_subdata(intel_obj
->buffer
, 0, size
, data
);
190 * Replace data in a subrange of buffer object. If the data range
191 * specified by size + offset extends beyond the end of the buffer or
192 * if data is NULL, no copy is performed.
193 * Called via glBufferSubDataARB().
196 intel_bufferobj_subdata(GLcontext
* ctx
,
200 const GLvoid
* data
, struct gl_buffer_object
*obj
)
202 struct intel_context
*intel
= intel_context(ctx
);
203 struct intel_buffer_object
*intel_obj
= intel_buffer_object(obj
);
207 if (intel_obj
->region
)
208 intel_bufferobj_cow(intel
, intel_obj
);
210 if (intel_obj
->sys_buffer
)
211 memcpy((char *)intel_obj
->sys_buffer
+ offset
, data
, size
);
213 /* Flush any existing batchbuffer that might reference this data. */
214 if (drm_intel_bo_busy(intel_obj
->buffer
) ||
215 drm_intel_bo_references(intel
->batch
->buf
, intel_obj
->buffer
)) {
216 drm_intel_bo
*temp_bo
;
218 temp_bo
= drm_intel_bo_alloc(intel
->bufmgr
, "subdata temp", size
, 64);
220 drm_intel_bo_subdata(temp_bo
, 0, size
, data
);
222 intel_emit_linear_blit(intel
,
223 intel_obj
->buffer
, offset
,
227 drm_intel_bo_unreference(temp_bo
);
229 dri_bo_subdata(intel_obj
->buffer
, offset
, size
, data
);
236 * Called via glGetBufferSubDataARB().
239 intel_bufferobj_get_subdata(GLcontext
* ctx
,
243 GLvoid
* data
, struct gl_buffer_object
*obj
)
245 struct intel_buffer_object
*intel_obj
= intel_buffer_object(obj
);
248 if (intel_obj
->sys_buffer
)
249 memcpy(data
, (char *)intel_obj
->sys_buffer
+ offset
, size
);
251 dri_bo_get_subdata(intel_obj
->buffer
, offset
, size
, data
);
257 * Called via glMapBufferARB().
260 intel_bufferobj_map(GLcontext
* ctx
,
262 GLenum access
, struct gl_buffer_object
*obj
)
264 struct intel_context
*intel
= intel_context(ctx
);
265 struct intel_buffer_object
*intel_obj
= intel_buffer_object(obj
);
266 GLboolean read_only
= (access
== GL_READ_ONLY_ARB
);
267 GLboolean write_only
= (access
== GL_WRITE_ONLY_ARB
);
271 if (intel_obj
->sys_buffer
) {
272 obj
->Pointer
= intel_obj
->sys_buffer
;
273 obj
->Length
= obj
->Size
;
278 /* Flush any existing batchbuffer that might reference this data. */
279 if (drm_intel_bo_references(intel
->batch
->buf
, intel_obj
->buffer
))
282 if (intel_obj
->region
)
283 intel_bufferobj_cow(intel
, intel_obj
);
285 if (intel_obj
->buffer
== NULL
) {
291 drm_intel_gem_bo_map_gtt(intel_obj
->buffer
);
292 intel_obj
->mapped_gtt
= GL_TRUE
;
294 drm_intel_bo_map(intel_obj
->buffer
, !read_only
);
295 intel_obj
->mapped_gtt
= GL_FALSE
;
298 obj
->Pointer
= intel_obj
->buffer
->virtual;
299 obj
->Length
= obj
->Size
;
306 * Called via glMapBufferRange().
308 * The goal of this extension is to allow apps to accumulate their rendering
309 * at the same time as they accumulate their buffer object. Without it,
310 * you'd end up blocking on execution of rendering every time you mapped
311 * the buffer to put new data in.
313 * We support it in 3 ways: If unsynchronized, then don't bother
314 * flushing the batchbuffer before mapping the buffer, which can save blocking
315 * in many cases. If we would still block, and they allow the whole buffer
316 * to be invalidated, then just allocate a new buffer to replace the old one.
317 * If not, and we'd block, and they allow the subrange of the buffer to be
318 * invalidated, then we can make a new little BO, let them write into that,
319 * and blit it into the real BO at unmap time.
322 intel_bufferobj_map_range(GLcontext
* ctx
,
323 GLenum target
, GLintptr offset
, GLsizeiptr length
,
324 GLbitfield access
, struct gl_buffer_object
*obj
)
326 struct intel_context
*intel
= intel_context(ctx
);
327 struct intel_buffer_object
*intel_obj
= intel_buffer_object(obj
);
331 /* _mesa_MapBufferRange (GL entrypoint) sets these, but the vbo module also
332 * internally uses our functions directly.
334 obj
->Offset
= offset
;
335 obj
->Length
= length
;
336 obj
->AccessFlags
= access
;
338 if (intel_obj
->sys_buffer
) {
339 obj
->Pointer
= intel_obj
->sys_buffer
+ offset
;
343 if (intel_obj
->region
)
344 intel_bufferobj_cow(intel
, intel_obj
);
346 /* If the mapping is synchronized with other GL operations, flush
347 * the batchbuffer so that GEM knows about the buffer access for later
350 if (!(access
& GL_MAP_UNSYNCHRONIZED_BIT
) &&
351 drm_intel_bo_references(intel
->batch
->buf
, intel_obj
->buffer
))
354 if (intel_obj
->buffer
== NULL
) {
359 /* If the user doesn't care about existing buffer contents and mapping
360 * would cause us to block, then throw out the old buffer.
362 if (!(access
& GL_MAP_UNSYNCHRONIZED_BIT
) &&
363 (access
& GL_MAP_INVALIDATE_BUFFER_BIT
) &&
364 drm_intel_bo_busy(intel_obj
->buffer
)) {
365 drm_intel_bo_unreference(intel_obj
->buffer
);
366 intel_obj
->buffer
= dri_bo_alloc(intel
->bufmgr
, "bufferobj",
367 intel_obj
->Base
.Size
, 64);
370 /* If the user is mapping a range of an active buffer object but
371 * doesn't require the current contents of that range, make a new
372 * BO, and we'll copy what they put in there out at unmap or
375 if ((access
& GL_MAP_INVALIDATE_RANGE_BIT
) &&
376 drm_intel_bo_busy(intel_obj
->buffer
)) {
377 if (access
& GL_MAP_FLUSH_EXPLICIT_BIT
) {
378 intel_obj
->range_map_buffer
= malloc(length
);
379 obj
->Pointer
= intel_obj
->range_map_buffer
;
381 intel_obj
->range_map_bo
= drm_intel_bo_alloc(intel
->bufmgr
,
384 if (!(access
& GL_MAP_READ_BIT
)) {
385 drm_intel_gem_bo_map_gtt(intel_obj
->range_map_bo
);
386 intel_obj
->mapped_gtt
= GL_TRUE
;
388 drm_intel_bo_map(intel_obj
->range_map_bo
,
389 (access
& GL_MAP_WRITE_BIT
) != 0);
390 intel_obj
->mapped_gtt
= GL_FALSE
;
392 obj
->Pointer
= intel_obj
->range_map_bo
->virtual;
397 if (!(access
& GL_MAP_READ_BIT
)) {
398 drm_intel_gem_bo_map_gtt(intel_obj
->buffer
);
399 intel_obj
->mapped_gtt
= GL_TRUE
;
401 drm_intel_bo_map(intel_obj
->buffer
, (access
& GL_MAP_WRITE_BIT
) != 0);
402 intel_obj
->mapped_gtt
= GL_FALSE
;
405 obj
->Pointer
= intel_obj
->buffer
->virtual + offset
;
409 /* Ideally we'd use a BO to avoid taking up cache space for the temporary
410 * data, but FlushMappedBufferRange may be followed by further writes to
411 * the pointer, so we would have to re-map after emitting our blit, which
412 * would defeat the point.
415 intel_bufferobj_flush_mapped_range(GLcontext
*ctx
, GLenum target
,
416 GLintptr offset
, GLsizeiptr length
,
417 struct gl_buffer_object
*obj
)
419 struct intel_context
*intel
= intel_context(ctx
);
420 struct intel_buffer_object
*intel_obj
= intel_buffer_object(obj
);
421 drm_intel_bo
*temp_bo
;
423 /* Unless we're in the range map using a temporary system buffer,
424 * there's no work to do.
426 if (intel_obj
->range_map_buffer
== NULL
)
429 temp_bo
= drm_intel_bo_alloc(intel
->bufmgr
, "range map flush", length
, 64);
431 drm_intel_bo_subdata(temp_bo
, 0, length
, intel_obj
->range_map_buffer
);
433 intel_emit_linear_blit(intel
,
434 intel_obj
->buffer
, obj
->Offset
+ offset
,
438 drm_intel_bo_unreference(temp_bo
);
443 * Called via glUnmapBuffer().
446 intel_bufferobj_unmap(GLcontext
* ctx
,
447 GLenum target
, struct gl_buffer_object
*obj
)
449 struct intel_context
*intel
= intel_context(ctx
);
450 struct intel_buffer_object
*intel_obj
= intel_buffer_object(obj
);
453 assert(obj
->Pointer
);
454 if (intel_obj
->sys_buffer
!= NULL
) {
455 /* always keep the mapping around. */
456 } else if (intel_obj
->range_map_buffer
!= NULL
) {
457 /* Since we've emitted some blits to buffers that will (likely) be used
458 * in rendering operations in other cache domains in this batch, emit a
459 * flush. Once again, we wish for a domain tracker in libdrm to cover
460 * usage inside of a batchbuffer.
462 intel_batchbuffer_emit_mi_flush(intel
->batch
);
463 free(intel_obj
->range_map_buffer
);
464 intel_obj
->range_map_buffer
= NULL
;
465 } else if (intel_obj
->range_map_bo
!= NULL
) {
466 if (intel_obj
->mapped_gtt
) {
467 drm_intel_gem_bo_unmap_gtt(intel_obj
->range_map_bo
);
469 drm_intel_bo_unmap(intel_obj
->range_map_bo
);
472 intel_emit_linear_blit(intel
,
473 intel_obj
->buffer
, obj
->Offset
,
474 intel_obj
->range_map_bo
, 0,
477 /* Since we've emitted some blits to buffers that will (likely) be used
478 * in rendering operations in other cache domains in this batch, emit a
479 * flush. Once again, we wish for a domain tracker in libdrm to cover
480 * usage inside of a batchbuffer.
482 intel_batchbuffer_emit_mi_flush(intel
->batch
);
484 drm_intel_bo_unreference(intel_obj
->range_map_bo
);
485 intel_obj
->range_map_bo
= NULL
;
486 } else if (intel_obj
->buffer
!= NULL
) {
487 if (intel_obj
->mapped_gtt
) {
488 drm_intel_gem_bo_unmap_gtt(intel_obj
->buffer
);
490 drm_intel_bo_unmap(intel_obj
->buffer
);
501 intel_bufferobj_buffer(struct intel_context
*intel
,
502 struct intel_buffer_object
*intel_obj
, GLuint flag
)
504 if (intel_obj
->region
) {
505 if (flag
== INTEL_WRITE_PART
)
506 intel_bufferobj_cow(intel
, intel_obj
);
507 else if (flag
== INTEL_WRITE_FULL
) {
508 intel_bufferobj_release_region(intel
, intel_obj
);
509 intel_bufferobj_alloc_buffer(intel
, intel_obj
);
513 if (intel_obj
->buffer
== NULL
) {
514 void *sys_buffer
= intel_obj
->sys_buffer
;
516 /* only one of buffer and sys_buffer could be non-NULL */
517 intel_bufferobj_alloc_buffer(intel
, intel_obj
);
518 intel_obj
->sys_buffer
= NULL
;
520 intel_bufferobj_subdata(&intel
->ctx
,
523 intel_obj
->Base
.Size
,
527 intel_obj
->sys_buffer
= NULL
;
530 return intel_obj
->buffer
;
534 intel_bufferobj_copy_subdata(GLcontext
*ctx
,
535 struct gl_buffer_object
*src
,
536 struct gl_buffer_object
*dst
,
537 GLintptr read_offset
, GLintptr write_offset
,
540 struct intel_context
*intel
= intel_context(ctx
);
541 struct intel_buffer_object
*intel_src
= intel_buffer_object(src
);
542 struct intel_buffer_object
*intel_dst
= intel_buffer_object(dst
);
543 drm_intel_bo
*src_bo
, *dst_bo
;
548 /* If we're in system memory, just map and memcpy. */
549 if (intel_src
->sys_buffer
|| intel_dst
->sys_buffer
) {
550 /* The same buffer may be used, but note that regions copied may
554 char *ptr
= intel_bufferobj_map(ctx
, GL_COPY_WRITE_BUFFER
,
556 memcpy(ptr
+ write_offset
, ptr
+ read_offset
, size
);
557 intel_bufferobj_unmap(ctx
, GL_COPY_WRITE_BUFFER
, dst
);
562 src_ptr
= intel_bufferobj_map(ctx
, GL_COPY_READ_BUFFER
,
564 dst_ptr
= intel_bufferobj_map(ctx
, GL_COPY_WRITE_BUFFER
,
567 memcpy(dst_ptr
+ write_offset
, src_ptr
+ read_offset
, size
);
569 intel_bufferobj_unmap(ctx
, GL_COPY_READ_BUFFER
, src
);
570 intel_bufferobj_unmap(ctx
, GL_COPY_WRITE_BUFFER
, dst
);
574 /* Otherwise, we have real BOs, so blit them. */
576 dst_bo
= intel_bufferobj_buffer(intel
, intel_dst
, INTEL_WRITE_PART
);
577 src_bo
= intel_bufferobj_buffer(intel
, intel_src
, INTEL_READ
);
579 intel_emit_linear_blit(intel
,
580 dst_bo
, write_offset
,
581 src_bo
, read_offset
, size
);
583 /* Since we've emitted some blits to buffers that will (likely) be used
584 * in rendering operations in other cache domains in this batch, emit a
585 * flush. Once again, we wish for a domain tracker in libdrm to cover
586 * usage inside of a batchbuffer.
588 intel_batchbuffer_emit_mi_flush(intel
->batch
);
591 #if FEATURE_APPLE_object_purgeable
593 intel_buffer_purgeable(GLcontext
* ctx
,
594 drm_intel_bo
*buffer
,
600 retained
= drm_intel_bo_madvise (buffer
, I915_MADV_DONTNEED
);
602 return retained
? GL_VOLATILE_APPLE
: GL_RELEASED_APPLE
;
606 intel_buffer_object_purgeable(GLcontext
* ctx
,
607 struct gl_buffer_object
*obj
,
610 struct intel_buffer_object
*intel
;
612 intel
= intel_buffer_object (obj
);
613 if (intel
->buffer
!= NULL
)
614 return intel_buffer_purgeable (ctx
, intel
->buffer
, option
);
616 if (option
== GL_RELEASED_APPLE
) {
617 if (intel
->sys_buffer
!= NULL
) {
618 free(intel
->sys_buffer
);
619 intel
->sys_buffer
= NULL
;
622 return GL_RELEASED_APPLE
;
624 /* XXX Create the buffer and madvise(MADV_DONTNEED)? */
625 return intel_buffer_purgeable (ctx
,
626 intel_bufferobj_buffer(intel_context(ctx
),
633 intel_texture_object_purgeable(GLcontext
* ctx
,
634 struct gl_texture_object
*obj
,
637 struct intel_texture_object
*intel
;
639 intel
= intel_texture_object(obj
);
640 if (intel
->mt
== NULL
|| intel
->mt
->region
== NULL
)
641 return GL_RELEASED_APPLE
;
643 return intel_buffer_purgeable (ctx
, intel
->mt
->region
->buffer
, option
);
647 intel_render_object_purgeable(GLcontext
* ctx
,
648 struct gl_renderbuffer
*obj
,
651 struct intel_renderbuffer
*intel
;
653 intel
= intel_renderbuffer(obj
);
654 if (intel
->region
== NULL
)
655 return GL_RELEASED_APPLE
;
657 return intel_buffer_purgeable (ctx
, intel
->region
->buffer
, option
);
661 intel_buffer_unpurgeable(GLcontext
* ctx
,
662 drm_intel_bo
*buffer
,
669 retained
= drm_intel_bo_madvise (buffer
, I915_MADV_WILLNEED
);
671 return retained
? GL_RETAINED_APPLE
: GL_UNDEFINED_APPLE
;
675 intel_buffer_object_unpurgeable(GLcontext
* ctx
,
676 struct gl_buffer_object
*obj
,
679 return intel_buffer_unpurgeable (ctx
, intel_buffer_object (obj
)->buffer
, option
);
683 intel_texture_object_unpurgeable(GLcontext
* ctx
,
684 struct gl_texture_object
*obj
,
687 struct intel_texture_object
*intel
;
689 intel
= intel_texture_object(obj
);
690 if (intel
->mt
== NULL
|| intel
->mt
->region
== NULL
)
691 return GL_UNDEFINED_APPLE
;
693 return intel_buffer_unpurgeable (ctx
, intel
->mt
->region
->buffer
, option
);
697 intel_render_object_unpurgeable(GLcontext
* ctx
,
698 struct gl_renderbuffer
*obj
,
701 struct intel_renderbuffer
*intel
;
703 intel
= intel_renderbuffer(obj
);
704 if (intel
->region
== NULL
)
705 return GL_UNDEFINED_APPLE
;
707 return intel_buffer_unpurgeable (ctx
, intel
->region
->buffer
, option
);
712 intelInitBufferObjectFuncs(struct dd_function_table
*functions
)
714 functions
->NewBufferObject
= intel_bufferobj_alloc
;
715 functions
->DeleteBuffer
= intel_bufferobj_free
;
716 functions
->BufferData
= intel_bufferobj_data
;
717 functions
->BufferSubData
= intel_bufferobj_subdata
;
718 functions
->GetBufferSubData
= intel_bufferobj_get_subdata
;
719 functions
->MapBuffer
= intel_bufferobj_map
;
720 functions
->MapBufferRange
= intel_bufferobj_map_range
;
721 functions
->FlushMappedBufferRange
= intel_bufferobj_flush_mapped_range
;
722 functions
->UnmapBuffer
= intel_bufferobj_unmap
;
723 functions
->CopyBufferSubData
= intel_bufferobj_copy_subdata
;
725 #if FEATURE_APPLE_object_purgeable
726 functions
->BufferObjectPurgeable
= intel_buffer_object_purgeable
;
727 functions
->TextureObjectPurgeable
= intel_texture_object_purgeable
;
728 functions
->RenderObjectPurgeable
= intel_render_object_purgeable
;
730 functions
->BufferObjectUnpurgeable
= intel_buffer_object_unpurgeable
;
731 functions
->TextureObjectUnpurgeable
= intel_texture_object_unpurgeable
;
732 functions
->RenderObjectUnpurgeable
= intel_render_object_unpurgeable
;