i965: Move intel_context::upload to brw_context.
[mesa.git] / src / mesa / drivers / dri / i965 / intel_buffer_objects.c
1 /**************************************************************************
2 *
3 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28
29 #include "main/imports.h"
30 #include "main/mtypes.h"
31 #include "main/macros.h"
32 #include "main/bufferobj.h"
33
34 #include "brw_context.h"
35 #include "intel_blit.h"
36 #include "intel_buffer_objects.h"
37 #include "intel_batchbuffer.h"
38 #include "intel_fbo.h"
39 #include "intel_mipmap_tree.h"
40 #include "intel_regions.h"
41
42 #include "brw_context.h"
43
44 static GLboolean
45 intel_bufferobj_unmap(struct gl_context * ctx, struct gl_buffer_object *obj);
46
47 /** Allocates a new drm_intel_bo to store the data for the buffer object. */
48 static void
49 intel_bufferobj_alloc_buffer(struct brw_context *brw,
50 struct intel_buffer_object *intel_obj)
51 {
52 intel_obj->buffer = drm_intel_bo_alloc(brw->bufmgr, "bufferobj",
53 intel_obj->Base.Size, 64);
54
55 /* the buffer might be bound as a uniform buffer, need to update it
56 */
57 brw->state.dirty.brw |= BRW_NEW_UNIFORM_BUFFER;
58 }
59
60 static void
61 release_buffer(struct intel_buffer_object *intel_obj)
62 {
63 drm_intel_bo_unreference(intel_obj->buffer);
64 intel_obj->buffer = NULL;
65 intel_obj->offset = 0;
66 }
67
68 /**
69 * There is some duplication between mesa's bufferobjects and our
70 * bufmgr buffers. Both have an integer handle and a hashtable to
71 * lookup an opaque structure. It would be nice if the handles and
72 * internal structure where somehow shared.
73 */
74 static struct gl_buffer_object *
75 intel_bufferobj_alloc(struct gl_context * ctx, GLuint name, GLenum target)
76 {
77 struct intel_buffer_object *obj = CALLOC_STRUCT(intel_buffer_object);
78
79 _mesa_initialize_buffer_object(ctx, &obj->Base, name, target);
80
81 obj->buffer = NULL;
82
83 return &obj->Base;
84 }
85
86 /**
87 * Deallocate/free a vertex/pixel buffer object.
88 * Called via glDeleteBuffersARB().
89 */
90 static void
91 intel_bufferobj_free(struct gl_context * ctx, struct gl_buffer_object *obj)
92 {
93 struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
94
95 assert(intel_obj);
96
97 /* Buffer objects are automatically unmapped when deleting according
98 * to the spec, but Mesa doesn't do UnmapBuffer for us at context destroy
99 * (though it does if you call glDeleteBuffers)
100 */
101 if (obj->Pointer)
102 intel_bufferobj_unmap(ctx, obj);
103
104 drm_intel_bo_unreference(intel_obj->buffer);
105 free(intel_obj);
106 }
107
108
109
110 /**
111 * Allocate space for and store data in a buffer object. Any data that was
112 * previously stored in the buffer object is lost. If data is NULL,
113 * memory will be allocated, but no copy will occur.
114 * Called via ctx->Driver.BufferData().
115 * \return true for success, false if out of memory
116 */
117 static GLboolean
118 intel_bufferobj_data(struct gl_context * ctx,
119 GLenum target,
120 GLsizeiptrARB size,
121 const GLvoid * data,
122 GLenum usage, struct gl_buffer_object *obj)
123 {
124 struct brw_context *brw = brw_context(ctx);
125 struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
126
127 /* Part of the ABI, but this function doesn't use it.
128 */
129 (void) target;
130
131 intel_obj->Base.Size = size;
132 intel_obj->Base.Usage = usage;
133
134 assert(!obj->Pointer); /* Mesa should have unmapped it */
135
136 if (intel_obj->buffer != NULL)
137 release_buffer(intel_obj);
138
139 if (size != 0) {
140 intel_bufferobj_alloc_buffer(brw, intel_obj);
141 if (!intel_obj->buffer)
142 return false;
143
144 if (data != NULL)
145 drm_intel_bo_subdata(intel_obj->buffer, 0, size, data);
146 }
147
148 return true;
149 }
150
151
152 /**
153 * Replace data in a subrange of buffer object. If the data range
154 * specified by size + offset extends beyond the end of the buffer or
155 * if data is NULL, no copy is performed.
156 * Called via glBufferSubDataARB().
157 */
158 static void
159 intel_bufferobj_subdata(struct gl_context * ctx,
160 GLintptrARB offset,
161 GLsizeiptrARB size,
162 const GLvoid * data, struct gl_buffer_object *obj)
163 {
164 struct brw_context *brw = brw_context(ctx);
165 struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
166 bool busy;
167
168 if (size == 0)
169 return;
170
171 assert(intel_obj);
172
173 busy =
174 drm_intel_bo_busy(intel_obj->buffer) ||
175 drm_intel_bo_references(brw->batch.bo, intel_obj->buffer);
176
177 if (busy) {
178 if (size == intel_obj->Base.Size) {
179 /* Replace the current busy bo with fresh data. */
180 drm_intel_bo_unreference(intel_obj->buffer);
181 intel_bufferobj_alloc_buffer(brw, intel_obj);
182 drm_intel_bo_subdata(intel_obj->buffer, 0, size, data);
183 } else {
184 perf_debug("Using a blit copy to avoid stalling on %ldb "
185 "glBufferSubData() to a busy buffer object.\n",
186 (long)size);
187 drm_intel_bo *temp_bo =
188 drm_intel_bo_alloc(brw->bufmgr, "subdata temp", size, 64);
189
190 drm_intel_bo_subdata(temp_bo, 0, size, data);
191
192 intel_emit_linear_blit(brw,
193 intel_obj->buffer, offset,
194 temp_bo, 0,
195 size);
196
197 drm_intel_bo_unreference(temp_bo);
198 }
199 } else {
200 drm_intel_bo_subdata(intel_obj->buffer, offset, size, data);
201 }
202 }
203
204
205 /**
206 * Called via glGetBufferSubDataARB().
207 */
208 static void
209 intel_bufferobj_get_subdata(struct gl_context * ctx,
210 GLintptrARB offset,
211 GLsizeiptrARB size,
212 GLvoid * data, struct gl_buffer_object *obj)
213 {
214 struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
215 struct brw_context *brw = brw_context(ctx);
216
217 assert(intel_obj);
218 if (drm_intel_bo_references(brw->batch.bo, intel_obj->buffer)) {
219 intel_batchbuffer_flush(brw);
220 }
221 drm_intel_bo_get_subdata(intel_obj->buffer, offset, size, data);
222 }
223
224
225
226 /**
227 * Called via glMapBufferRange and glMapBuffer
228 *
229 * The goal of this extension is to allow apps to accumulate their rendering
230 * at the same time as they accumulate their buffer object. Without it,
231 * you'd end up blocking on execution of rendering every time you mapped
232 * the buffer to put new data in.
233 *
234 * We support it in 3 ways: If unsynchronized, then don't bother
235 * flushing the batchbuffer before mapping the buffer, which can save blocking
236 * in many cases. If we would still block, and they allow the whole buffer
237 * to be invalidated, then just allocate a new buffer to replace the old one.
238 * If not, and we'd block, and they allow the subrange of the buffer to be
239 * invalidated, then we can make a new little BO, let them write into that,
240 * and blit it into the real BO at unmap time.
241 */
242 static void *
243 intel_bufferobj_map_range(struct gl_context * ctx,
244 GLintptr offset, GLsizeiptr length,
245 GLbitfield access, struct gl_buffer_object *obj)
246 {
247 struct brw_context *brw = brw_context(ctx);
248 struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
249
250 assert(intel_obj);
251
252 /* _mesa_MapBufferRange (GL entrypoint) sets these, but the vbo module also
253 * internally uses our functions directly.
254 */
255 obj->Offset = offset;
256 obj->Length = length;
257 obj->AccessFlags = access;
258
259 if (intel_obj->buffer == NULL) {
260 obj->Pointer = NULL;
261 return NULL;
262 }
263
264 /* If the access is synchronized (like a normal buffer mapping), then get
265 * things flushed out so the later mapping syncs appropriately through GEM.
266 * If the user doesn't care about existing buffer contents and mapping would
267 * cause us to block, then throw out the old buffer.
268 *
269 * If they set INVALIDATE_BUFFER, we can pitch the current contents to
270 * achieve the required synchronization.
271 */
272 if (!(access & GL_MAP_UNSYNCHRONIZED_BIT)) {
273 if (drm_intel_bo_references(brw->batch.bo, intel_obj->buffer)) {
274 if (access & GL_MAP_INVALIDATE_BUFFER_BIT) {
275 drm_intel_bo_unreference(intel_obj->buffer);
276 intel_bufferobj_alloc_buffer(brw, intel_obj);
277 } else {
278 perf_debug("Stalling on the GPU for mapping a busy buffer "
279 "object\n");
280 intel_flush(ctx);
281 }
282 } else if (drm_intel_bo_busy(intel_obj->buffer) &&
283 (access & GL_MAP_INVALIDATE_BUFFER_BIT)) {
284 drm_intel_bo_unreference(intel_obj->buffer);
285 intel_bufferobj_alloc_buffer(brw, intel_obj);
286 }
287 }
288
289 /* If the user is mapping a range of an active buffer object but
290 * doesn't require the current contents of that range, make a new
291 * BO, and we'll copy what they put in there out at unmap or
292 * FlushRange time.
293 */
294 if ((access & GL_MAP_INVALIDATE_RANGE_BIT) &&
295 drm_intel_bo_busy(intel_obj->buffer)) {
296 if (access & GL_MAP_FLUSH_EXPLICIT_BIT) {
297 intel_obj->range_map_buffer = malloc(length);
298 obj->Pointer = intel_obj->range_map_buffer;
299 } else {
300 intel_obj->range_map_bo = drm_intel_bo_alloc(brw->bufmgr,
301 "range map",
302 length, 64);
303 if (!(access & GL_MAP_READ_BIT)) {
304 drm_intel_gem_bo_map_gtt(intel_obj->range_map_bo);
305 } else {
306 drm_intel_bo_map(intel_obj->range_map_bo,
307 (access & GL_MAP_WRITE_BIT) != 0);
308 }
309 obj->Pointer = intel_obj->range_map_bo->virtual;
310 }
311 return obj->Pointer;
312 }
313
314 if (access & GL_MAP_UNSYNCHRONIZED_BIT)
315 drm_intel_gem_bo_map_unsynchronized(intel_obj->buffer);
316 else if (!(access & GL_MAP_READ_BIT)) {
317 drm_intel_gem_bo_map_gtt(intel_obj->buffer);
318 } else {
319 drm_intel_bo_map(intel_obj->buffer, (access & GL_MAP_WRITE_BIT) != 0);
320 }
321
322 obj->Pointer = intel_obj->buffer->virtual + offset;
323 return obj->Pointer;
324 }
325
326 /* Ideally we'd use a BO to avoid taking up cache space for the temporary
327 * data, but FlushMappedBufferRange may be followed by further writes to
328 * the pointer, so we would have to re-map after emitting our blit, which
329 * would defeat the point.
330 */
331 static void
332 intel_bufferobj_flush_mapped_range(struct gl_context *ctx,
333 GLintptr offset, GLsizeiptr length,
334 struct gl_buffer_object *obj)
335 {
336 struct brw_context *brw = brw_context(ctx);
337 struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
338 drm_intel_bo *temp_bo;
339
340 /* Unless we're in the range map using a temporary system buffer,
341 * there's no work to do.
342 */
343 if (intel_obj->range_map_buffer == NULL)
344 return;
345
346 if (length == 0)
347 return;
348
349 temp_bo = drm_intel_bo_alloc(brw->bufmgr, "range map flush", length, 64);
350
351 drm_intel_bo_subdata(temp_bo, 0, length, intel_obj->range_map_buffer);
352
353 intel_emit_linear_blit(brw,
354 intel_obj->buffer, obj->Offset + offset,
355 temp_bo, 0,
356 length);
357
358 drm_intel_bo_unreference(temp_bo);
359 }
360
361
362 /**
363 * Called via glUnmapBuffer().
364 */
365 static GLboolean
366 intel_bufferobj_unmap(struct gl_context * ctx, struct gl_buffer_object *obj)
367 {
368 struct brw_context *brw = brw_context(ctx);
369 struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
370
371 assert(intel_obj);
372 assert(obj->Pointer);
373 if (intel_obj->range_map_buffer != NULL) {
374 /* Since we've emitted some blits to buffers that will (likely) be used
375 * in rendering operations in other cache domains in this batch, emit a
376 * flush. Once again, we wish for a domain tracker in libdrm to cover
377 * usage inside of a batchbuffer.
378 */
379 intel_batchbuffer_emit_mi_flush(brw);
380 free(intel_obj->range_map_buffer);
381 intel_obj->range_map_buffer = NULL;
382 } else if (intel_obj->range_map_bo != NULL) {
383 drm_intel_bo_unmap(intel_obj->range_map_bo);
384
385 intel_emit_linear_blit(brw,
386 intel_obj->buffer, obj->Offset,
387 intel_obj->range_map_bo, 0,
388 obj->Length);
389
390 /* Since we've emitted some blits to buffers that will (likely) be used
391 * in rendering operations in other cache domains in this batch, emit a
392 * flush. Once again, we wish for a domain tracker in libdrm to cover
393 * usage inside of a batchbuffer.
394 */
395 intel_batchbuffer_emit_mi_flush(brw);
396
397 drm_intel_bo_unreference(intel_obj->range_map_bo);
398 intel_obj->range_map_bo = NULL;
399 } else if (intel_obj->buffer != NULL) {
400 drm_intel_bo_unmap(intel_obj->buffer);
401 }
402 obj->Pointer = NULL;
403 obj->Offset = 0;
404 obj->Length = 0;
405
406 return true;
407 }
408
409 drm_intel_bo *
410 intel_bufferobj_buffer(struct brw_context *brw,
411 struct intel_buffer_object *intel_obj,
412 GLuint flag)
413 {
414 if (intel_obj->buffer == NULL)
415 intel_bufferobj_alloc_buffer(brw, intel_obj);
416
417 return intel_obj->buffer;
418 }
419
420 #define INTEL_UPLOAD_SIZE (64*1024)
421
422 void
423 intel_upload_finish(struct brw_context *brw)
424 {
425 if (!brw->upload.bo)
426 return;
427
428 if (brw->upload.buffer_len) {
429 drm_intel_bo_subdata(brw->upload.bo,
430 brw->upload.buffer_offset,
431 brw->upload.buffer_len,
432 brw->upload.buffer);
433 brw->upload.buffer_len = 0;
434 }
435
436 drm_intel_bo_unreference(brw->upload.bo);
437 brw->upload.bo = NULL;
438 }
439
440 static void wrap_buffers(struct brw_context *brw, GLuint size)
441 {
442 intel_upload_finish(brw);
443
444 if (size < INTEL_UPLOAD_SIZE)
445 size = INTEL_UPLOAD_SIZE;
446
447 brw->upload.bo = drm_intel_bo_alloc(brw->bufmgr, "upload", size, 0);
448 brw->upload.offset = 0;
449 }
450
451 void intel_upload_data(struct brw_context *brw,
452 const void *ptr, GLuint size, GLuint align,
453 drm_intel_bo **return_bo,
454 GLuint *return_offset)
455 {
456 GLuint base, delta;
457
458 base = (brw->upload.offset + align - 1) / align * align;
459 if (brw->upload.bo == NULL || base + size > brw->upload.bo->size) {
460 wrap_buffers(brw, size);
461 base = 0;
462 }
463
464 drm_intel_bo_reference(brw->upload.bo);
465 *return_bo = brw->upload.bo;
466 *return_offset = base;
467
468 delta = base - brw->upload.offset;
469 if (brw->upload.buffer_len &&
470 brw->upload.buffer_len + delta + size > sizeof(brw->upload.buffer))
471 {
472 drm_intel_bo_subdata(brw->upload.bo,
473 brw->upload.buffer_offset,
474 brw->upload.buffer_len,
475 brw->upload.buffer);
476 brw->upload.buffer_len = 0;
477 }
478
479 if (size < sizeof(brw->upload.buffer))
480 {
481 if (brw->upload.buffer_len == 0)
482 brw->upload.buffer_offset = base;
483 else
484 brw->upload.buffer_len += delta;
485
486 memcpy(brw->upload.buffer + brw->upload.buffer_len, ptr, size);
487 brw->upload.buffer_len += size;
488 }
489 else
490 {
491 drm_intel_bo_subdata(brw->upload.bo, base, size, ptr);
492 }
493
494 brw->upload.offset = base + size;
495 }
496
497 void *intel_upload_map(struct brw_context *brw, GLuint size, GLuint align)
498 {
499 GLuint base, delta;
500 char *ptr;
501
502 base = (brw->upload.offset + align - 1) / align * align;
503 if (brw->upload.bo == NULL || base + size > brw->upload.bo->size) {
504 wrap_buffers(brw, size);
505 base = 0;
506 }
507
508 delta = base - brw->upload.offset;
509 if (brw->upload.buffer_len &&
510 brw->upload.buffer_len + delta + size > sizeof(brw->upload.buffer))
511 {
512 drm_intel_bo_subdata(brw->upload.bo,
513 brw->upload.buffer_offset,
514 brw->upload.buffer_len,
515 brw->upload.buffer);
516 brw->upload.buffer_len = 0;
517 }
518
519 if (size <= sizeof(brw->upload.buffer)) {
520 if (brw->upload.buffer_len == 0)
521 brw->upload.buffer_offset = base;
522 else
523 brw->upload.buffer_len += delta;
524
525 ptr = brw->upload.buffer + brw->upload.buffer_len;
526 brw->upload.buffer_len += size;
527 } else
528 ptr = malloc(size);
529
530 return ptr;
531 }
532
533 void intel_upload_unmap(struct brw_context *brw,
534 const void *ptr, GLuint size, GLuint align,
535 drm_intel_bo **return_bo,
536 GLuint *return_offset)
537 {
538 GLuint base;
539
540 base = (brw->upload.offset + align - 1) / align * align;
541 if (size > sizeof(brw->upload.buffer)) {
542 drm_intel_bo_subdata(brw->upload.bo, base, size, ptr);
543 free((void*)ptr);
544 }
545
546 drm_intel_bo_reference(brw->upload.bo);
547 *return_bo = brw->upload.bo;
548 *return_offset = base;
549
550 brw->upload.offset = base + size;
551 }
552
553 drm_intel_bo *
554 intel_bufferobj_source(struct brw_context *brw,
555 struct intel_buffer_object *intel_obj,
556 GLuint align, GLuint *offset)
557 {
558 *offset = intel_obj->offset;
559 return intel_obj->buffer;
560 }
561
562 static void
563 intel_bufferobj_copy_subdata(struct gl_context *ctx,
564 struct gl_buffer_object *src,
565 struct gl_buffer_object *dst,
566 GLintptr read_offset, GLintptr write_offset,
567 GLsizeiptr size)
568 {
569 struct brw_context *brw = brw_context(ctx);
570 struct intel_buffer_object *intel_src = intel_buffer_object(src);
571 struct intel_buffer_object *intel_dst = intel_buffer_object(dst);
572 drm_intel_bo *src_bo, *dst_bo;
573 GLuint src_offset;
574
575 if (size == 0)
576 return;
577
578 dst_bo = intel_bufferobj_buffer(brw, intel_dst, INTEL_WRITE_PART);
579 src_bo = intel_bufferobj_source(brw, intel_src, 64, &src_offset);
580
581 intel_emit_linear_blit(brw,
582 dst_bo, write_offset,
583 src_bo, read_offset + src_offset, size);
584
585 /* Since we've emitted some blits to buffers that will (likely) be used
586 * in rendering operations in other cache domains in this batch, emit a
587 * flush. Once again, we wish for a domain tracker in libdrm to cover
588 * usage inside of a batchbuffer.
589 */
590 intel_batchbuffer_emit_mi_flush(brw);
591 }
592
593 static GLenum
594 intel_buffer_purgeable(drm_intel_bo *buffer)
595 {
596 int retained = 0;
597
598 if (buffer != NULL)
599 retained = drm_intel_bo_madvise (buffer, I915_MADV_DONTNEED);
600
601 return retained ? GL_VOLATILE_APPLE : GL_RELEASED_APPLE;
602 }
603
604 static GLenum
605 intel_buffer_object_purgeable(struct gl_context * ctx,
606 struct gl_buffer_object *obj,
607 GLenum option)
608 {
609 struct intel_buffer_object *intel_obj = intel_buffer_object (obj);
610
611 if (intel_obj->buffer != NULL)
612 return intel_buffer_purgeable(intel_obj->buffer);
613
614 if (option == GL_RELEASED_APPLE) {
615 return GL_RELEASED_APPLE;
616 } else {
617 /* XXX Create the buffer and madvise(MADV_DONTNEED)? */
618 struct brw_context *brw = brw_context(ctx);
619 drm_intel_bo *bo = intel_bufferobj_buffer(brw, intel_obj, INTEL_READ);
620
621 return intel_buffer_purgeable(bo);
622 }
623 }
624
625 static GLenum
626 intel_texture_object_purgeable(struct gl_context * ctx,
627 struct gl_texture_object *obj,
628 GLenum option)
629 {
630 struct intel_texture_object *intel;
631
632 (void) ctx;
633 (void) option;
634
635 intel = intel_texture_object(obj);
636 if (intel->mt == NULL || intel->mt->region == NULL)
637 return GL_RELEASED_APPLE;
638
639 return intel_buffer_purgeable(intel->mt->region->bo);
640 }
641
642 static GLenum
643 intel_render_object_purgeable(struct gl_context * ctx,
644 struct gl_renderbuffer *obj,
645 GLenum option)
646 {
647 struct intel_renderbuffer *intel;
648
649 (void) ctx;
650 (void) option;
651
652 intel = intel_renderbuffer(obj);
653 if (intel->mt == NULL)
654 return GL_RELEASED_APPLE;
655
656 return intel_buffer_purgeable(intel->mt->region->bo);
657 }
658
659 static GLenum
660 intel_buffer_unpurgeable(drm_intel_bo *buffer)
661 {
662 int retained;
663
664 retained = 0;
665 if (buffer != NULL)
666 retained = drm_intel_bo_madvise (buffer, I915_MADV_WILLNEED);
667
668 return retained ? GL_RETAINED_APPLE : GL_UNDEFINED_APPLE;
669 }
670
671 static GLenum
672 intel_buffer_object_unpurgeable(struct gl_context * ctx,
673 struct gl_buffer_object *obj,
674 GLenum option)
675 {
676 (void) ctx;
677 (void) option;
678
679 return intel_buffer_unpurgeable(intel_buffer_object (obj)->buffer);
680 }
681
682 static GLenum
683 intel_texture_object_unpurgeable(struct gl_context * ctx,
684 struct gl_texture_object *obj,
685 GLenum option)
686 {
687 struct intel_texture_object *intel;
688
689 (void) ctx;
690 (void) option;
691
692 intel = intel_texture_object(obj);
693 if (intel->mt == NULL || intel->mt->region == NULL)
694 return GL_UNDEFINED_APPLE;
695
696 return intel_buffer_unpurgeable(intel->mt->region->bo);
697 }
698
699 static GLenum
700 intel_render_object_unpurgeable(struct gl_context * ctx,
701 struct gl_renderbuffer *obj,
702 GLenum option)
703 {
704 struct intel_renderbuffer *intel;
705
706 (void) ctx;
707 (void) option;
708
709 intel = intel_renderbuffer(obj);
710 if (intel->mt == NULL)
711 return GL_UNDEFINED_APPLE;
712
713 return intel_buffer_unpurgeable(intel->mt->region->bo);
714 }
715
716 void
717 intelInitBufferObjectFuncs(struct dd_function_table *functions)
718 {
719 functions->NewBufferObject = intel_bufferobj_alloc;
720 functions->DeleteBuffer = intel_bufferobj_free;
721 functions->BufferData = intel_bufferobj_data;
722 functions->BufferSubData = intel_bufferobj_subdata;
723 functions->GetBufferSubData = intel_bufferobj_get_subdata;
724 functions->MapBufferRange = intel_bufferobj_map_range;
725 functions->FlushMappedBufferRange = intel_bufferobj_flush_mapped_range;
726 functions->UnmapBuffer = intel_bufferobj_unmap;
727 functions->CopyBufferSubData = intel_bufferobj_copy_subdata;
728
729 functions->BufferObjectPurgeable = intel_buffer_object_purgeable;
730 functions->TextureObjectPurgeable = intel_texture_object_purgeable;
731 functions->RenderObjectPurgeable = intel_render_object_purgeable;
732
733 functions->BufferObjectUnpurgeable = intel_buffer_object_unpurgeable;
734 functions->TextureObjectUnpurgeable = intel_texture_object_unpurgeable;
735 functions->RenderObjectUnpurgeable = intel_render_object_unpurgeable;
736 }