1 /**************************************************************************
3 * Copyright 2003 VMware, Inc.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
29 #include "main/imports.h"
30 #include "main/mtypes.h"
31 #include "main/macros.h"
32 #include "main/bufferobj.h"
34 #include "intel_blit.h"
35 #include "intel_buffer_objects.h"
36 #include "intel_batchbuffer.h"
37 #include "intel_context.h"
38 #include "intel_fbo.h"
39 #include "intel_mipmap_tree.h"
40 #include "intel_regions.h"
43 intel_bufferobj_unmap(struct gl_context
* ctx
, struct gl_buffer_object
*obj
);
45 /** Allocates a new drm_intel_bo to store the data for the buffer object. */
47 intel_bufferobj_alloc_buffer(struct intel_context
*intel
,
48 struct intel_buffer_object
*intel_obj
)
50 intel_obj
->buffer
= drm_intel_bo_alloc(intel
->bufmgr
, "bufferobj",
51 intel_obj
->Base
.Size
, 64);
55 release_buffer(struct intel_buffer_object
*intel_obj
)
57 drm_intel_bo_unreference(intel_obj
->buffer
);
58 intel_obj
->buffer
= NULL
;
59 intel_obj
->offset
= 0;
60 intel_obj
->source
= 0;
64 * There is some duplication between mesa's bufferobjects and our
65 * bufmgr buffers. Both have an integer handle and a hashtable to
66 * lookup an opaque structure. It would be nice if the handles and
67 * internal structure where somehow shared.
69 static struct gl_buffer_object
*
70 intel_bufferobj_alloc(struct gl_context
* ctx
, GLuint name
, GLenum target
)
72 struct intel_buffer_object
*obj
= CALLOC_STRUCT(intel_buffer_object
);
74 _mesa_initialize_buffer_object(ctx
, &obj
->Base
, name
, target
);
82 * Deallocate/free a vertex/pixel buffer object.
83 * Called via glDeleteBuffersARB().
86 intel_bufferobj_free(struct gl_context
* ctx
, struct gl_buffer_object
*obj
)
88 struct intel_buffer_object
*intel_obj
= intel_buffer_object(obj
);
92 /* Buffer objects are automatically unmapped when deleting according
93 * to the spec, but Mesa doesn't do UnmapBuffer for us at context destroy
94 * (though it does if you call glDeleteBuffers)
97 intel_bufferobj_unmap(ctx
, obj
);
99 free(intel_obj
->sys_buffer
);
101 drm_intel_bo_unreference(intel_obj
->buffer
);
108 * Allocate space for and store data in a buffer object. Any data that was
109 * previously stored in the buffer object is lost. If data is NULL,
110 * memory will be allocated, but no copy will occur.
111 * Called via ctx->Driver.BufferData().
112 * \return true for success, false if out of memory
115 intel_bufferobj_data(struct gl_context
* ctx
,
120 GLbitfield storageFlags
,
121 struct gl_buffer_object
*obj
)
123 struct intel_context
*intel
= intel_context(ctx
);
124 struct intel_buffer_object
*intel_obj
= intel_buffer_object(obj
);
126 intel_obj
->Base
.Size
= size
;
127 intel_obj
->Base
.Usage
= usage
;
128 intel_obj
->Base
.StorageFlags
= storageFlags
;
130 assert(!obj
->Pointer
); /* Mesa should have unmapped it */
132 if (intel_obj
->buffer
!= NULL
)
133 release_buffer(intel_obj
);
135 free(intel_obj
->sys_buffer
);
136 intel_obj
->sys_buffer
= NULL
;
139 /* Stick VBOs in system memory, as we're always doing swtnl with their
142 if (target
== GL_ARRAY_BUFFER
|| target
== GL_ELEMENT_ARRAY_BUFFER
) {
143 intel_obj
->sys_buffer
= malloc(size
);
144 if (intel_obj
->sys_buffer
!= NULL
) {
146 memcpy(intel_obj
->sys_buffer
, data
, size
);
151 intel_bufferobj_alloc_buffer(intel
, intel_obj
);
152 if (!intel_obj
->buffer
)
156 drm_intel_bo_subdata(intel_obj
->buffer
, 0, size
, data
);
164 * Replace data in a subrange of buffer object. If the data range
165 * specified by size + offset extends beyond the end of the buffer or
166 * if data is NULL, no copy is performed.
167 * Called via glBufferSubDataARB().
170 intel_bufferobj_subdata(struct gl_context
* ctx
,
173 const GLvoid
* data
, struct gl_buffer_object
*obj
)
175 struct intel_context
*intel
= intel_context(ctx
);
176 struct intel_buffer_object
*intel_obj
= intel_buffer_object(obj
);
184 /* If we have a single copy in system memory, update that */
185 if (intel_obj
->sys_buffer
) {
186 if (intel_obj
->source
)
187 release_buffer(intel_obj
);
189 if (intel_obj
->buffer
== NULL
) {
190 memcpy((char *)intel_obj
->sys_buffer
+ offset
, data
, size
);
194 free(intel_obj
->sys_buffer
);
195 intel_obj
->sys_buffer
= NULL
;
198 /* Otherwise we need to update the copy in video memory. */
200 drm_intel_bo_busy(intel_obj
->buffer
) ||
201 drm_intel_bo_references(intel
->batch
.bo
, intel_obj
->buffer
);
204 if (size
== intel_obj
->Base
.Size
) {
205 /* Replace the current busy bo with fresh data. */
206 drm_intel_bo_unreference(intel_obj
->buffer
);
207 intel_bufferobj_alloc_buffer(intel
, intel_obj
);
208 drm_intel_bo_subdata(intel_obj
->buffer
, 0, size
, data
);
210 perf_debug("Using a blit copy to avoid stalling on %ldb "
211 "glBufferSubData() to a busy buffer object.\n",
213 drm_intel_bo
*temp_bo
=
214 drm_intel_bo_alloc(intel
->bufmgr
, "subdata temp", size
, 64);
216 drm_intel_bo_subdata(temp_bo
, 0, size
, data
);
218 intel_emit_linear_blit(intel
,
219 intel_obj
->buffer
, offset
,
223 drm_intel_bo_unreference(temp_bo
);
226 drm_intel_bo_subdata(intel_obj
->buffer
, offset
, size
, data
);
232 * Called via glGetBufferSubDataARB().
235 intel_bufferobj_get_subdata(struct gl_context
* ctx
,
238 GLvoid
* data
, struct gl_buffer_object
*obj
)
240 struct intel_buffer_object
*intel_obj
= intel_buffer_object(obj
);
241 struct intel_context
*intel
= intel_context(ctx
);
244 if (intel_obj
->sys_buffer
)
245 memcpy(data
, (char *)intel_obj
->sys_buffer
+ offset
, size
);
247 if (drm_intel_bo_references(intel
->batch
.bo
, intel_obj
->buffer
)) {
248 intel_batchbuffer_flush(intel
);
250 drm_intel_bo_get_subdata(intel_obj
->buffer
, offset
, size
, data
);
257 * Called via glMapBufferRange and glMapBuffer
259 * The goal of this extension is to allow apps to accumulate their rendering
260 * at the same time as they accumulate their buffer object. Without it,
261 * you'd end up blocking on execution of rendering every time you mapped
262 * the buffer to put new data in.
264 * We support it in 3 ways: If unsynchronized, then don't bother
265 * flushing the batchbuffer before mapping the buffer, which can save blocking
266 * in many cases. If we would still block, and they allow the whole buffer
267 * to be invalidated, then just allocate a new buffer to replace the old one.
268 * If not, and we'd block, and they allow the subrange of the buffer to be
269 * invalidated, then we can make a new little BO, let them write into that,
270 * and blit it into the real BO at unmap time.
273 intel_bufferobj_map_range(struct gl_context
* ctx
,
274 GLintptr offset
, GLsizeiptr length
,
275 GLbitfield access
, struct gl_buffer_object
*obj
)
277 struct intel_context
*intel
= intel_context(ctx
);
278 struct intel_buffer_object
*intel_obj
= intel_buffer_object(obj
);
282 /* _mesa_MapBufferRange (GL entrypoint) sets these, but the vbo module also
283 * internally uses our functions directly.
285 obj
->Offset
= offset
;
286 obj
->Length
= length
;
287 obj
->AccessFlags
= access
;
289 if (intel_obj
->sys_buffer
) {
290 const bool read_only
=
291 (access
& (GL_MAP_READ_BIT
| GL_MAP_WRITE_BIT
)) == GL_MAP_READ_BIT
;
293 if (!read_only
&& intel_obj
->source
)
294 release_buffer(intel_obj
);
296 if (!intel_obj
->buffer
|| intel_obj
->source
) {
297 obj
->Pointer
= intel_obj
->sys_buffer
+ offset
;
301 free(intel_obj
->sys_buffer
);
302 intel_obj
->sys_buffer
= NULL
;
305 if (intel_obj
->buffer
== NULL
) {
310 /* If the access is synchronized (like a normal buffer mapping), then get
311 * things flushed out so the later mapping syncs appropriately through GEM.
312 * If the user doesn't care about existing buffer contents and mapping would
313 * cause us to block, then throw out the old buffer.
315 * If they set INVALIDATE_BUFFER, we can pitch the current contents to
316 * achieve the required synchronization.
318 if (!(access
& GL_MAP_UNSYNCHRONIZED_BIT
)) {
319 if (drm_intel_bo_references(intel
->batch
.bo
, intel_obj
->buffer
)) {
320 if (access
& GL_MAP_INVALIDATE_BUFFER_BIT
) {
321 drm_intel_bo_unreference(intel_obj
->buffer
);
322 intel_bufferobj_alloc_buffer(intel
, intel_obj
);
324 perf_debug("Stalling on the GPU for mapping a busy buffer "
328 } else if (drm_intel_bo_busy(intel_obj
->buffer
) &&
329 (access
& GL_MAP_INVALIDATE_BUFFER_BIT
)) {
330 drm_intel_bo_unreference(intel_obj
->buffer
);
331 intel_bufferobj_alloc_buffer(intel
, intel_obj
);
335 /* If the user is mapping a range of an active buffer object but
336 * doesn't require the current contents of that range, make a new
337 * BO, and we'll copy what they put in there out at unmap or
340 if ((access
& GL_MAP_INVALIDATE_RANGE_BIT
) &&
341 drm_intel_bo_busy(intel_obj
->buffer
)) {
342 /* Ensure that the base alignment of the allocation meets the alignment
343 * guarantees the driver has advertised to the application.
345 const unsigned alignment
= ctx
->Const
.MinMapBufferAlignment
;
346 const unsigned extra
= (uintptr_t) offset
% alignment
;
348 if (access
& GL_MAP_FLUSH_EXPLICIT_BIT
) {
349 intel_obj
->range_map_buffer
= _mesa_align_malloc(length
+ extra
,
351 obj
->Pointer
= intel_obj
->range_map_buffer
+ extra
;
353 intel_obj
->range_map_bo
= drm_intel_bo_alloc(intel
->bufmgr
,
357 if (!(access
& GL_MAP_READ_BIT
)) {
358 drm_intel_gem_bo_map_gtt(intel_obj
->range_map_bo
);
360 drm_intel_bo_map(intel_obj
->range_map_bo
,
361 (access
& GL_MAP_WRITE_BIT
) != 0);
363 obj
->Pointer
= intel_obj
->range_map_bo
->virtual + extra
;
368 if (access
& GL_MAP_UNSYNCHRONIZED_BIT
)
369 drm_intel_gem_bo_map_unsynchronized(intel_obj
->buffer
);
370 else if (!(access
& GL_MAP_READ_BIT
)) {
371 drm_intel_gem_bo_map_gtt(intel_obj
->buffer
);
373 drm_intel_bo_map(intel_obj
->buffer
, (access
& GL_MAP_WRITE_BIT
) != 0);
376 obj
->Pointer
= intel_obj
->buffer
->virtual + offset
;
380 /* Ideally we'd use a BO to avoid taking up cache space for the temporary
381 * data, but FlushMappedBufferRange may be followed by further writes to
382 * the pointer, so we would have to re-map after emitting our blit, which
383 * would defeat the point.
386 intel_bufferobj_flush_mapped_range(struct gl_context
*ctx
,
387 GLintptr offset
, GLsizeiptr length
,
388 struct gl_buffer_object
*obj
)
390 struct intel_context
*intel
= intel_context(ctx
);
391 struct intel_buffer_object
*intel_obj
= intel_buffer_object(obj
);
392 drm_intel_bo
*temp_bo
;
394 /* Unless we're in the range map using a temporary system buffer,
395 * there's no work to do.
397 if (intel_obj
->range_map_buffer
== NULL
)
403 temp_bo
= drm_intel_bo_alloc(intel
->bufmgr
, "range map flush", length
, 64);
405 /* Use obj->Pointer instead of intel_obj->range_map_buffer because the
406 * former points to the actual mapping while the latter may be offset to
407 * meet alignment guarantees.
409 drm_intel_bo_subdata(temp_bo
, 0, length
, obj
->Pointer
);
411 intel_emit_linear_blit(intel
,
412 intel_obj
->buffer
, obj
->Offset
+ offset
,
416 drm_intel_bo_unreference(temp_bo
);
421 * Called via glUnmapBuffer().
424 intel_bufferobj_unmap(struct gl_context
* ctx
, struct gl_buffer_object
*obj
)
426 struct intel_context
*intel
= intel_context(ctx
);
427 struct intel_buffer_object
*intel_obj
= intel_buffer_object(obj
);
430 assert(obj
->Pointer
);
431 if (intel_obj
->sys_buffer
!= NULL
) {
432 /* always keep the mapping around. */
433 } else if (intel_obj
->range_map_buffer
!= NULL
) {
434 /* Since we've emitted some blits to buffers that will (likely) be used
435 * in rendering operations in other cache domains in this batch, emit a
436 * flush. Once again, we wish for a domain tracker in libdrm to cover
437 * usage inside of a batchbuffer.
439 intel_batchbuffer_emit_mi_flush(intel
);
440 _mesa_align_free(intel_obj
->range_map_buffer
);
441 intel_obj
->range_map_buffer
= NULL
;
442 } else if (intel_obj
->range_map_bo
!= NULL
) {
443 const unsigned extra
= obj
->Pointer
- intel_obj
->range_map_bo
->virtual;
445 drm_intel_bo_unmap(intel_obj
->range_map_bo
);
447 intel_emit_linear_blit(intel
,
448 intel_obj
->buffer
, obj
->Offset
,
449 intel_obj
->range_map_bo
, extra
,
452 /* Since we've emitted some blits to buffers that will (likely) be used
453 * in rendering operations in other cache domains in this batch, emit a
454 * flush. Once again, we wish for a domain tracker in libdrm to cover
455 * usage inside of a batchbuffer.
457 intel_batchbuffer_emit_mi_flush(intel
);
459 drm_intel_bo_unreference(intel_obj
->range_map_bo
);
460 intel_obj
->range_map_bo
= NULL
;
461 } else if (intel_obj
->buffer
!= NULL
) {
462 drm_intel_bo_unmap(intel_obj
->buffer
);
472 intel_bufferobj_buffer(struct intel_context
*intel
,
473 struct intel_buffer_object
*intel_obj
)
475 if (intel_obj
->source
)
476 release_buffer(intel_obj
);
478 if (intel_obj
->buffer
== NULL
) {
479 intel_bufferobj_alloc_buffer(intel
, intel_obj
);
480 drm_intel_bo_subdata(intel_obj
->buffer
,
481 0, intel_obj
->Base
.Size
,
482 intel_obj
->sys_buffer
);
484 free(intel_obj
->sys_buffer
);
485 intel_obj
->sys_buffer
= NULL
;
486 intel_obj
->offset
= 0;
489 return intel_obj
->buffer
;
492 #define INTEL_UPLOAD_SIZE (64*1024)
495 intel_upload_finish(struct intel_context
*intel
)
497 if (!intel
->upload
.bo
)
500 if (intel
->upload
.buffer_len
) {
501 drm_intel_bo_subdata(intel
->upload
.bo
,
502 intel
->upload
.buffer_offset
,
503 intel
->upload
.buffer_len
,
504 intel
->upload
.buffer
);
505 intel
->upload
.buffer_len
= 0;
508 drm_intel_bo_unreference(intel
->upload
.bo
);
509 intel
->upload
.bo
= NULL
;
512 static void wrap_buffers(struct intel_context
*intel
, GLuint size
)
514 intel_upload_finish(intel
);
516 if (size
< INTEL_UPLOAD_SIZE
)
517 size
= INTEL_UPLOAD_SIZE
;
519 intel
->upload
.bo
= drm_intel_bo_alloc(intel
->bufmgr
, "upload", size
, 0);
520 intel
->upload
.offset
= 0;
523 void intel_upload_data(struct intel_context
*intel
,
524 const void *ptr
, GLuint size
, GLuint align
,
525 drm_intel_bo
**return_bo
,
526 GLuint
*return_offset
)
530 base
= (intel
->upload
.offset
+ align
- 1) / align
* align
;
531 if (intel
->upload
.bo
== NULL
|| base
+ size
> intel
->upload
.bo
->size
) {
532 wrap_buffers(intel
, size
);
536 drm_intel_bo_reference(intel
->upload
.bo
);
537 *return_bo
= intel
->upload
.bo
;
538 *return_offset
= base
;
540 delta
= base
- intel
->upload
.offset
;
541 if (intel
->upload
.buffer_len
&&
542 intel
->upload
.buffer_len
+ delta
+ size
> sizeof(intel
->upload
.buffer
))
544 drm_intel_bo_subdata(intel
->upload
.bo
,
545 intel
->upload
.buffer_offset
,
546 intel
->upload
.buffer_len
,
547 intel
->upload
.buffer
);
548 intel
->upload
.buffer_len
= 0;
551 if (size
< sizeof(intel
->upload
.buffer
))
553 if (intel
->upload
.buffer_len
== 0)
554 intel
->upload
.buffer_offset
= base
;
556 intel
->upload
.buffer_len
+= delta
;
558 memcpy(intel
->upload
.buffer
+ intel
->upload
.buffer_len
, ptr
, size
);
559 intel
->upload
.buffer_len
+= size
;
563 drm_intel_bo_subdata(intel
->upload
.bo
, base
, size
, ptr
);
566 intel
->upload
.offset
= base
+ size
;
570 intel_bufferobj_source(struct intel_context
*intel
,
571 struct intel_buffer_object
*intel_obj
,
572 GLuint align
, GLuint
*offset
)
574 if (intel_obj
->buffer
== NULL
) {
575 intel_upload_data(intel
,
576 intel_obj
->sys_buffer
, intel_obj
->Base
.Size
, align
,
577 &intel_obj
->buffer
, &intel_obj
->offset
);
578 intel_obj
->source
= 1;
581 *offset
= intel_obj
->offset
;
582 return intel_obj
->buffer
;
586 intel_bufferobj_copy_subdata(struct gl_context
*ctx
,
587 struct gl_buffer_object
*src
,
588 struct gl_buffer_object
*dst
,
589 GLintptr read_offset
, GLintptr write_offset
,
592 struct intel_context
*intel
= intel_context(ctx
);
593 struct intel_buffer_object
*intel_src
= intel_buffer_object(src
);
594 struct intel_buffer_object
*intel_dst
= intel_buffer_object(dst
);
595 drm_intel_bo
*src_bo
, *dst_bo
;
601 /* If we're in system memory, just map and memcpy. */
602 if (intel_src
->sys_buffer
|| intel_dst
->sys_buffer
) {
603 /* The same buffer may be used, but note that regions copied may
607 char *ptr
= intel_bufferobj_map_range(ctx
, 0, dst
->Size
,
611 memmove(ptr
+ write_offset
, ptr
+ read_offset
, size
);
612 intel_bufferobj_unmap(ctx
, dst
);
617 src_ptr
= intel_bufferobj_map_range(ctx
, 0, src
->Size
,
618 GL_MAP_READ_BIT
, src
);
619 dst_ptr
= intel_bufferobj_map_range(ctx
, 0, dst
->Size
,
620 GL_MAP_WRITE_BIT
, dst
);
622 memcpy(dst_ptr
+ write_offset
, src_ptr
+ read_offset
, size
);
624 intel_bufferobj_unmap(ctx
, src
);
625 intel_bufferobj_unmap(ctx
, dst
);
630 /* Otherwise, we have real BOs, so blit them. */
632 dst_bo
= intel_bufferobj_buffer(intel
, intel_dst
);
633 src_bo
= intel_bufferobj_source(intel
, intel_src
, 64, &src_offset
);
635 intel_emit_linear_blit(intel
,
636 dst_bo
, write_offset
,
637 src_bo
, read_offset
+ src_offset
, size
);
639 /* Since we've emitted some blits to buffers that will (likely) be used
640 * in rendering operations in other cache domains in this batch, emit a
641 * flush. Once again, we wish for a domain tracker in libdrm to cover
642 * usage inside of a batchbuffer.
644 intel_batchbuffer_emit_mi_flush(intel
);
648 intel_buffer_purgeable(drm_intel_bo
*buffer
)
653 retained
= drm_intel_bo_madvise (buffer
, I915_MADV_DONTNEED
);
655 return retained
? GL_VOLATILE_APPLE
: GL_RELEASED_APPLE
;
659 intel_buffer_object_purgeable(struct gl_context
* ctx
,
660 struct gl_buffer_object
*obj
,
663 struct intel_buffer_object
*intel_obj
= intel_buffer_object (obj
);
665 if (intel_obj
->buffer
!= NULL
)
666 return intel_buffer_purgeable(intel_obj
->buffer
);
668 if (option
== GL_RELEASED_APPLE
) {
669 free(intel_obj
->sys_buffer
);
670 intel_obj
->sys_buffer
= NULL
;
672 return GL_RELEASED_APPLE
;
674 /* XXX Create the buffer and madvise(MADV_DONTNEED)? */
675 struct intel_context
*intel
= intel_context(ctx
);
676 drm_intel_bo
*bo
= intel_bufferobj_buffer(intel
, intel_obj
);
678 return intel_buffer_purgeable(bo
);
683 intel_texture_object_purgeable(struct gl_context
* ctx
,
684 struct gl_texture_object
*obj
,
687 struct intel_texture_object
*intel
;
692 intel
= intel_texture_object(obj
);
693 if (intel
->mt
== NULL
|| intel
->mt
->region
== NULL
)
694 return GL_RELEASED_APPLE
;
696 return intel_buffer_purgeable(intel
->mt
->region
->bo
);
700 intel_render_object_purgeable(struct gl_context
* ctx
,
701 struct gl_renderbuffer
*obj
,
704 struct intel_renderbuffer
*intel
;
709 intel
= intel_renderbuffer(obj
);
710 if (intel
->mt
== NULL
)
711 return GL_RELEASED_APPLE
;
713 return intel_buffer_purgeable(intel
->mt
->region
->bo
);
717 intel_buffer_unpurgeable(drm_intel_bo
*buffer
)
723 retained
= drm_intel_bo_madvise (buffer
, I915_MADV_WILLNEED
);
725 return retained
? GL_RETAINED_APPLE
: GL_UNDEFINED_APPLE
;
729 intel_buffer_object_unpurgeable(struct gl_context
* ctx
,
730 struct gl_buffer_object
*obj
,
736 return intel_buffer_unpurgeable(intel_buffer_object (obj
)->buffer
);
740 intel_texture_object_unpurgeable(struct gl_context
* ctx
,
741 struct gl_texture_object
*obj
,
744 struct intel_texture_object
*intel
;
749 intel
= intel_texture_object(obj
);
750 if (intel
->mt
== NULL
|| intel
->mt
->region
== NULL
)
751 return GL_UNDEFINED_APPLE
;
753 return intel_buffer_unpurgeable(intel
->mt
->region
->bo
);
757 intel_render_object_unpurgeable(struct gl_context
* ctx
,
758 struct gl_renderbuffer
*obj
,
761 struct intel_renderbuffer
*intel
;
766 intel
= intel_renderbuffer(obj
);
767 if (intel
->mt
== NULL
)
768 return GL_UNDEFINED_APPLE
;
770 return intel_buffer_unpurgeable(intel
->mt
->region
->bo
);
774 intelInitBufferObjectFuncs(struct dd_function_table
*functions
)
776 functions
->NewBufferObject
= intel_bufferobj_alloc
;
777 functions
->DeleteBuffer
= intel_bufferobj_free
;
778 functions
->BufferData
= intel_bufferobj_data
;
779 functions
->BufferSubData
= intel_bufferobj_subdata
;
780 functions
->GetBufferSubData
= intel_bufferobj_get_subdata
;
781 functions
->MapBufferRange
= intel_bufferobj_map_range
;
782 functions
->FlushMappedBufferRange
= intel_bufferobj_flush_mapped_range
;
783 functions
->UnmapBuffer
= intel_bufferobj_unmap
;
784 functions
->CopyBufferSubData
= intel_bufferobj_copy_subdata
;
786 functions
->BufferObjectPurgeable
= intel_buffer_object_purgeable
;
787 functions
->TextureObjectPurgeable
= intel_texture_object_purgeable
;
788 functions
->RenderObjectPurgeable
= intel_render_object_purgeable
;
790 functions
->BufferObjectUnpurgeable
= intel_buffer_object_unpurgeable
;
791 functions
->TextureObjectUnpurgeable
= intel_texture_object_unpurgeable
;
792 functions
->RenderObjectUnpurgeable
= intel_render_object_unpurgeable
;