s/Tungsten Graphics/VMware/
[mesa.git] / src / mesa / drivers / dri / i915 / intel_buffer_objects.c
1 /**************************************************************************
2 *
3 * Copyright 2003 VMware, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28
29 #include "main/imports.h"
30 #include "main/mtypes.h"
31 #include "main/macros.h"
32 #include "main/bufferobj.h"
33
34 #include "intel_blit.h"
35 #include "intel_buffer_objects.h"
36 #include "intel_batchbuffer.h"
37 #include "intel_context.h"
38 #include "intel_fbo.h"
39 #include "intel_mipmap_tree.h"
40 #include "intel_regions.h"
41
42 static GLboolean
43 intel_bufferobj_unmap(struct gl_context * ctx, struct gl_buffer_object *obj);
44
45 /** Allocates a new drm_intel_bo to store the data for the buffer object. */
46 static void
47 intel_bufferobj_alloc_buffer(struct intel_context *intel,
48 struct intel_buffer_object *intel_obj)
49 {
50 intel_obj->buffer = drm_intel_bo_alloc(intel->bufmgr, "bufferobj",
51 intel_obj->Base.Size, 64);
52 }
53
54 static void
55 release_buffer(struct intel_buffer_object *intel_obj)
56 {
57 drm_intel_bo_unreference(intel_obj->buffer);
58 intel_obj->buffer = NULL;
59 intel_obj->offset = 0;
60 intel_obj->source = 0;
61 }
62
63 /**
64 * There is some duplication between mesa's bufferobjects and our
65 * bufmgr buffers. Both have an integer handle and a hashtable to
66 * lookup an opaque structure. It would be nice if the handles and
67 * internal structure where somehow shared.
68 */
69 static struct gl_buffer_object *
70 intel_bufferobj_alloc(struct gl_context * ctx, GLuint name, GLenum target)
71 {
72 struct intel_buffer_object *obj = CALLOC_STRUCT(intel_buffer_object);
73
74 _mesa_initialize_buffer_object(ctx, &obj->Base, name, target);
75
76 obj->buffer = NULL;
77
78 return &obj->Base;
79 }
80
81 /**
82 * Deallocate/free a vertex/pixel buffer object.
83 * Called via glDeleteBuffersARB().
84 */
85 static void
86 intel_bufferobj_free(struct gl_context * ctx, struct gl_buffer_object *obj)
87 {
88 struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
89
90 assert(intel_obj);
91
92 /* Buffer objects are automatically unmapped when deleting according
93 * to the spec, but Mesa doesn't do UnmapBuffer for us at context destroy
94 * (though it does if you call glDeleteBuffers)
95 */
96 if (obj->Pointer)
97 intel_bufferobj_unmap(ctx, obj);
98
99 free(intel_obj->sys_buffer);
100
101 drm_intel_bo_unreference(intel_obj->buffer);
102 free(intel_obj);
103 }
104
105
106
107 /**
108 * Allocate space for and store data in a buffer object. Any data that was
109 * previously stored in the buffer object is lost. If data is NULL,
110 * memory will be allocated, but no copy will occur.
111 * Called via ctx->Driver.BufferData().
112 * \return true for success, false if out of memory
113 */
114 static GLboolean
115 intel_bufferobj_data(struct gl_context * ctx,
116 GLenum target,
117 GLsizeiptrARB size,
118 const GLvoid * data,
119 GLenum usage, struct gl_buffer_object *obj)
120 {
121 struct intel_context *intel = intel_context(ctx);
122 struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
123
124 intel_obj->Base.Size = size;
125 intel_obj->Base.Usage = usage;
126
127 assert(!obj->Pointer); /* Mesa should have unmapped it */
128
129 if (intel_obj->buffer != NULL)
130 release_buffer(intel_obj);
131
132 free(intel_obj->sys_buffer);
133 intel_obj->sys_buffer = NULL;
134
135 if (size != 0) {
136 /* Stick VBOs in system memory, as we're always doing swtnl with their
137 * contents anyway.
138 */
139 if (target == GL_ARRAY_BUFFER || target == GL_ELEMENT_ARRAY_BUFFER) {
140 intel_obj->sys_buffer = malloc(size);
141 if (intel_obj->sys_buffer != NULL) {
142 if (data != NULL)
143 memcpy(intel_obj->sys_buffer, data, size);
144 return true;
145 }
146 }
147
148 intel_bufferobj_alloc_buffer(intel, intel_obj);
149 if (!intel_obj->buffer)
150 return false;
151
152 if (data != NULL)
153 drm_intel_bo_subdata(intel_obj->buffer, 0, size, data);
154 }
155
156 return true;
157 }
158
159
160 /**
161 * Replace data in a subrange of buffer object. If the data range
162 * specified by size + offset extends beyond the end of the buffer or
163 * if data is NULL, no copy is performed.
164 * Called via glBufferSubDataARB().
165 */
166 static void
167 intel_bufferobj_subdata(struct gl_context * ctx,
168 GLintptrARB offset,
169 GLsizeiptrARB size,
170 const GLvoid * data, struct gl_buffer_object *obj)
171 {
172 struct intel_context *intel = intel_context(ctx);
173 struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
174 bool busy;
175
176 if (size == 0)
177 return;
178
179 assert(intel_obj);
180
181 /* If we have a single copy in system memory, update that */
182 if (intel_obj->sys_buffer) {
183 if (intel_obj->source)
184 release_buffer(intel_obj);
185
186 if (intel_obj->buffer == NULL) {
187 memcpy((char *)intel_obj->sys_buffer + offset, data, size);
188 return;
189 }
190
191 free(intel_obj->sys_buffer);
192 intel_obj->sys_buffer = NULL;
193 }
194
195 /* Otherwise we need to update the copy in video memory. */
196 busy =
197 drm_intel_bo_busy(intel_obj->buffer) ||
198 drm_intel_bo_references(intel->batch.bo, intel_obj->buffer);
199
200 if (busy) {
201 if (size == intel_obj->Base.Size) {
202 /* Replace the current busy bo with fresh data. */
203 drm_intel_bo_unreference(intel_obj->buffer);
204 intel_bufferobj_alloc_buffer(intel, intel_obj);
205 drm_intel_bo_subdata(intel_obj->buffer, 0, size, data);
206 } else {
207 perf_debug("Using a blit copy to avoid stalling on %ldb "
208 "glBufferSubData() to a busy buffer object.\n",
209 (long)size);
210 drm_intel_bo *temp_bo =
211 drm_intel_bo_alloc(intel->bufmgr, "subdata temp", size, 64);
212
213 drm_intel_bo_subdata(temp_bo, 0, size, data);
214
215 intel_emit_linear_blit(intel,
216 intel_obj->buffer, offset,
217 temp_bo, 0,
218 size);
219
220 drm_intel_bo_unreference(temp_bo);
221 }
222 } else {
223 drm_intel_bo_subdata(intel_obj->buffer, offset, size, data);
224 }
225 }
226
227
228 /**
229 * Called via glGetBufferSubDataARB().
230 */
231 static void
232 intel_bufferobj_get_subdata(struct gl_context * ctx,
233 GLintptrARB offset,
234 GLsizeiptrARB size,
235 GLvoid * data, struct gl_buffer_object *obj)
236 {
237 struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
238 struct intel_context *intel = intel_context(ctx);
239
240 assert(intel_obj);
241 if (intel_obj->sys_buffer)
242 memcpy(data, (char *)intel_obj->sys_buffer + offset, size);
243 else {
244 if (drm_intel_bo_references(intel->batch.bo, intel_obj->buffer)) {
245 intel_batchbuffer_flush(intel);
246 }
247 drm_intel_bo_get_subdata(intel_obj->buffer, offset, size, data);
248 }
249 }
250
251
252
253 /**
254 * Called via glMapBufferRange and glMapBuffer
255 *
256 * The goal of this extension is to allow apps to accumulate their rendering
257 * at the same time as they accumulate their buffer object. Without it,
258 * you'd end up blocking on execution of rendering every time you mapped
259 * the buffer to put new data in.
260 *
261 * We support it in 3 ways: If unsynchronized, then don't bother
262 * flushing the batchbuffer before mapping the buffer, which can save blocking
263 * in many cases. If we would still block, and they allow the whole buffer
264 * to be invalidated, then just allocate a new buffer to replace the old one.
265 * If not, and we'd block, and they allow the subrange of the buffer to be
266 * invalidated, then we can make a new little BO, let them write into that,
267 * and blit it into the real BO at unmap time.
268 */
269 static void *
270 intel_bufferobj_map_range(struct gl_context * ctx,
271 GLintptr offset, GLsizeiptr length,
272 GLbitfield access, struct gl_buffer_object *obj)
273 {
274 struct intel_context *intel = intel_context(ctx);
275 struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
276
277 assert(intel_obj);
278
279 /* _mesa_MapBufferRange (GL entrypoint) sets these, but the vbo module also
280 * internally uses our functions directly.
281 */
282 obj->Offset = offset;
283 obj->Length = length;
284 obj->AccessFlags = access;
285
286 if (intel_obj->sys_buffer) {
287 const bool read_only =
288 (access & (GL_MAP_READ_BIT | GL_MAP_WRITE_BIT)) == GL_MAP_READ_BIT;
289
290 if (!read_only && intel_obj->source)
291 release_buffer(intel_obj);
292
293 if (!intel_obj->buffer || intel_obj->source) {
294 obj->Pointer = intel_obj->sys_buffer + offset;
295 return obj->Pointer;
296 }
297
298 free(intel_obj->sys_buffer);
299 intel_obj->sys_buffer = NULL;
300 }
301
302 if (intel_obj->buffer == NULL) {
303 obj->Pointer = NULL;
304 return NULL;
305 }
306
307 /* If the access is synchronized (like a normal buffer mapping), then get
308 * things flushed out so the later mapping syncs appropriately through GEM.
309 * If the user doesn't care about existing buffer contents and mapping would
310 * cause us to block, then throw out the old buffer.
311 *
312 * If they set INVALIDATE_BUFFER, we can pitch the current contents to
313 * achieve the required synchronization.
314 */
315 if (!(access & GL_MAP_UNSYNCHRONIZED_BIT)) {
316 if (drm_intel_bo_references(intel->batch.bo, intel_obj->buffer)) {
317 if (access & GL_MAP_INVALIDATE_BUFFER_BIT) {
318 drm_intel_bo_unreference(intel_obj->buffer);
319 intel_bufferobj_alloc_buffer(intel, intel_obj);
320 } else {
321 perf_debug("Stalling on the GPU for mapping a busy buffer "
322 "object\n");
323 intel_flush(ctx);
324 }
325 } else if (drm_intel_bo_busy(intel_obj->buffer) &&
326 (access & GL_MAP_INVALIDATE_BUFFER_BIT)) {
327 drm_intel_bo_unreference(intel_obj->buffer);
328 intel_bufferobj_alloc_buffer(intel, intel_obj);
329 }
330 }
331
332 /* If the user is mapping a range of an active buffer object but
333 * doesn't require the current contents of that range, make a new
334 * BO, and we'll copy what they put in there out at unmap or
335 * FlushRange time.
336 */
337 if ((access & GL_MAP_INVALIDATE_RANGE_BIT) &&
338 drm_intel_bo_busy(intel_obj->buffer)) {
339 if (access & GL_MAP_FLUSH_EXPLICIT_BIT) {
340 intel_obj->range_map_buffer = malloc(length);
341 obj->Pointer = intel_obj->range_map_buffer;
342 } else {
343 intel_obj->range_map_bo = drm_intel_bo_alloc(intel->bufmgr,
344 "range map",
345 length, 64);
346 if (!(access & GL_MAP_READ_BIT)) {
347 drm_intel_gem_bo_map_gtt(intel_obj->range_map_bo);
348 } else {
349 drm_intel_bo_map(intel_obj->range_map_bo,
350 (access & GL_MAP_WRITE_BIT) != 0);
351 }
352 obj->Pointer = intel_obj->range_map_bo->virtual;
353 }
354 return obj->Pointer;
355 }
356
357 if (access & GL_MAP_UNSYNCHRONIZED_BIT)
358 drm_intel_gem_bo_map_unsynchronized(intel_obj->buffer);
359 else if (!(access & GL_MAP_READ_BIT)) {
360 drm_intel_gem_bo_map_gtt(intel_obj->buffer);
361 } else {
362 drm_intel_bo_map(intel_obj->buffer, (access & GL_MAP_WRITE_BIT) != 0);
363 }
364
365 obj->Pointer = intel_obj->buffer->virtual + offset;
366 return obj->Pointer;
367 }
368
369 /* Ideally we'd use a BO to avoid taking up cache space for the temporary
370 * data, but FlushMappedBufferRange may be followed by further writes to
371 * the pointer, so we would have to re-map after emitting our blit, which
372 * would defeat the point.
373 */
374 static void
375 intel_bufferobj_flush_mapped_range(struct gl_context *ctx,
376 GLintptr offset, GLsizeiptr length,
377 struct gl_buffer_object *obj)
378 {
379 struct intel_context *intel = intel_context(ctx);
380 struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
381 drm_intel_bo *temp_bo;
382
383 /* Unless we're in the range map using a temporary system buffer,
384 * there's no work to do.
385 */
386 if (intel_obj->range_map_buffer == NULL)
387 return;
388
389 if (length == 0)
390 return;
391
392 temp_bo = drm_intel_bo_alloc(intel->bufmgr, "range map flush", length, 64);
393
394 drm_intel_bo_subdata(temp_bo, 0, length, intel_obj->range_map_buffer);
395
396 intel_emit_linear_blit(intel,
397 intel_obj->buffer, obj->Offset + offset,
398 temp_bo, 0,
399 length);
400
401 drm_intel_bo_unreference(temp_bo);
402 }
403
404
405 /**
406 * Called via glUnmapBuffer().
407 */
408 static GLboolean
409 intel_bufferobj_unmap(struct gl_context * ctx, struct gl_buffer_object *obj)
410 {
411 struct intel_context *intel = intel_context(ctx);
412 struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
413
414 assert(intel_obj);
415 assert(obj->Pointer);
416 if (intel_obj->sys_buffer != NULL) {
417 /* always keep the mapping around. */
418 } else if (intel_obj->range_map_buffer != NULL) {
419 /* Since we've emitted some blits to buffers that will (likely) be used
420 * in rendering operations in other cache domains in this batch, emit a
421 * flush. Once again, we wish for a domain tracker in libdrm to cover
422 * usage inside of a batchbuffer.
423 */
424 intel_batchbuffer_emit_mi_flush(intel);
425 free(intel_obj->range_map_buffer);
426 intel_obj->range_map_buffer = NULL;
427 } else if (intel_obj->range_map_bo != NULL) {
428 drm_intel_bo_unmap(intel_obj->range_map_bo);
429
430 intel_emit_linear_blit(intel,
431 intel_obj->buffer, obj->Offset,
432 intel_obj->range_map_bo, 0,
433 obj->Length);
434
435 /* Since we've emitted some blits to buffers that will (likely) be used
436 * in rendering operations in other cache domains in this batch, emit a
437 * flush. Once again, we wish for a domain tracker in libdrm to cover
438 * usage inside of a batchbuffer.
439 */
440 intel_batchbuffer_emit_mi_flush(intel);
441
442 drm_intel_bo_unreference(intel_obj->range_map_bo);
443 intel_obj->range_map_bo = NULL;
444 } else if (intel_obj->buffer != NULL) {
445 drm_intel_bo_unmap(intel_obj->buffer);
446 }
447 obj->Pointer = NULL;
448 obj->Offset = 0;
449 obj->Length = 0;
450
451 return true;
452 }
453
454 drm_intel_bo *
455 intel_bufferobj_buffer(struct intel_context *intel,
456 struct intel_buffer_object *intel_obj,
457 GLuint flag)
458 {
459 if (intel_obj->source)
460 release_buffer(intel_obj);
461
462 if (intel_obj->buffer == NULL) {
463 intel_bufferobj_alloc_buffer(intel, intel_obj);
464 drm_intel_bo_subdata(intel_obj->buffer,
465 0, intel_obj->Base.Size,
466 intel_obj->sys_buffer);
467
468 free(intel_obj->sys_buffer);
469 intel_obj->sys_buffer = NULL;
470 intel_obj->offset = 0;
471 }
472
473 return intel_obj->buffer;
474 }
475
476 #define INTEL_UPLOAD_SIZE (64*1024)
477
478 void
479 intel_upload_finish(struct intel_context *intel)
480 {
481 if (!intel->upload.bo)
482 return;
483
484 if (intel->upload.buffer_len) {
485 drm_intel_bo_subdata(intel->upload.bo,
486 intel->upload.buffer_offset,
487 intel->upload.buffer_len,
488 intel->upload.buffer);
489 intel->upload.buffer_len = 0;
490 }
491
492 drm_intel_bo_unreference(intel->upload.bo);
493 intel->upload.bo = NULL;
494 }
495
496 static void wrap_buffers(struct intel_context *intel, GLuint size)
497 {
498 intel_upload_finish(intel);
499
500 if (size < INTEL_UPLOAD_SIZE)
501 size = INTEL_UPLOAD_SIZE;
502
503 intel->upload.bo = drm_intel_bo_alloc(intel->bufmgr, "upload", size, 0);
504 intel->upload.offset = 0;
505 }
506
507 void intel_upload_data(struct intel_context *intel,
508 const void *ptr, GLuint size, GLuint align,
509 drm_intel_bo **return_bo,
510 GLuint *return_offset)
511 {
512 GLuint base, delta;
513
514 base = (intel->upload.offset + align - 1) / align * align;
515 if (intel->upload.bo == NULL || base + size > intel->upload.bo->size) {
516 wrap_buffers(intel, size);
517 base = 0;
518 }
519
520 drm_intel_bo_reference(intel->upload.bo);
521 *return_bo = intel->upload.bo;
522 *return_offset = base;
523
524 delta = base - intel->upload.offset;
525 if (intel->upload.buffer_len &&
526 intel->upload.buffer_len + delta + size > sizeof(intel->upload.buffer))
527 {
528 drm_intel_bo_subdata(intel->upload.bo,
529 intel->upload.buffer_offset,
530 intel->upload.buffer_len,
531 intel->upload.buffer);
532 intel->upload.buffer_len = 0;
533 }
534
535 if (size < sizeof(intel->upload.buffer))
536 {
537 if (intel->upload.buffer_len == 0)
538 intel->upload.buffer_offset = base;
539 else
540 intel->upload.buffer_len += delta;
541
542 memcpy(intel->upload.buffer + intel->upload.buffer_len, ptr, size);
543 intel->upload.buffer_len += size;
544 }
545 else
546 {
547 drm_intel_bo_subdata(intel->upload.bo, base, size, ptr);
548 }
549
550 intel->upload.offset = base + size;
551 }
552
553 drm_intel_bo *
554 intel_bufferobj_source(struct intel_context *intel,
555 struct intel_buffer_object *intel_obj,
556 GLuint align, GLuint *offset)
557 {
558 if (intel_obj->buffer == NULL) {
559 intel_upload_data(intel,
560 intel_obj->sys_buffer, intel_obj->Base.Size, align,
561 &intel_obj->buffer, &intel_obj->offset);
562 intel_obj->source = 1;
563 }
564
565 *offset = intel_obj->offset;
566 return intel_obj->buffer;
567 }
568
569 static void
570 intel_bufferobj_copy_subdata(struct gl_context *ctx,
571 struct gl_buffer_object *src,
572 struct gl_buffer_object *dst,
573 GLintptr read_offset, GLintptr write_offset,
574 GLsizeiptr size)
575 {
576 struct intel_context *intel = intel_context(ctx);
577 struct intel_buffer_object *intel_src = intel_buffer_object(src);
578 struct intel_buffer_object *intel_dst = intel_buffer_object(dst);
579 drm_intel_bo *src_bo, *dst_bo;
580 GLuint src_offset;
581
582 if (size == 0)
583 return;
584
585 /* If we're in system memory, just map and memcpy. */
586 if (intel_src->sys_buffer || intel_dst->sys_buffer) {
587 /* The same buffer may be used, but note that regions copied may
588 * not overlap.
589 */
590 if (src == dst) {
591 char *ptr = intel_bufferobj_map_range(ctx, 0, dst->Size,
592 GL_MAP_READ_BIT |
593 GL_MAP_WRITE_BIT,
594 dst);
595 memmove(ptr + write_offset, ptr + read_offset, size);
596 intel_bufferobj_unmap(ctx, dst);
597 } else {
598 const char *src_ptr;
599 char *dst_ptr;
600
601 src_ptr = intel_bufferobj_map_range(ctx, 0, src->Size,
602 GL_MAP_READ_BIT, src);
603 dst_ptr = intel_bufferobj_map_range(ctx, 0, dst->Size,
604 GL_MAP_WRITE_BIT, dst);
605
606 memcpy(dst_ptr + write_offset, src_ptr + read_offset, size);
607
608 intel_bufferobj_unmap(ctx, src);
609 intel_bufferobj_unmap(ctx, dst);
610 }
611 return;
612 }
613
614 /* Otherwise, we have real BOs, so blit them. */
615
616 dst_bo = intel_bufferobj_buffer(intel, intel_dst, INTEL_WRITE_PART);
617 src_bo = intel_bufferobj_source(intel, intel_src, 64, &src_offset);
618
619 intel_emit_linear_blit(intel,
620 dst_bo, write_offset,
621 src_bo, read_offset + src_offset, size);
622
623 /* Since we've emitted some blits to buffers that will (likely) be used
624 * in rendering operations in other cache domains in this batch, emit a
625 * flush. Once again, we wish for a domain tracker in libdrm to cover
626 * usage inside of a batchbuffer.
627 */
628 intel_batchbuffer_emit_mi_flush(intel);
629 }
630
631 static GLenum
632 intel_buffer_purgeable(drm_intel_bo *buffer)
633 {
634 int retained = 0;
635
636 if (buffer != NULL)
637 retained = drm_intel_bo_madvise (buffer, I915_MADV_DONTNEED);
638
639 return retained ? GL_VOLATILE_APPLE : GL_RELEASED_APPLE;
640 }
641
642 static GLenum
643 intel_buffer_object_purgeable(struct gl_context * ctx,
644 struct gl_buffer_object *obj,
645 GLenum option)
646 {
647 struct intel_buffer_object *intel_obj = intel_buffer_object (obj);
648
649 if (intel_obj->buffer != NULL)
650 return intel_buffer_purgeable(intel_obj->buffer);
651
652 if (option == GL_RELEASED_APPLE) {
653 free(intel_obj->sys_buffer);
654 intel_obj->sys_buffer = NULL;
655
656 return GL_RELEASED_APPLE;
657 } else {
658 /* XXX Create the buffer and madvise(MADV_DONTNEED)? */
659 struct intel_context *intel = intel_context(ctx);
660 drm_intel_bo *bo = intel_bufferobj_buffer(intel, intel_obj, INTEL_READ);
661
662 return intel_buffer_purgeable(bo);
663 }
664 }
665
666 static GLenum
667 intel_texture_object_purgeable(struct gl_context * ctx,
668 struct gl_texture_object *obj,
669 GLenum option)
670 {
671 struct intel_texture_object *intel;
672
673 (void) ctx;
674 (void) option;
675
676 intel = intel_texture_object(obj);
677 if (intel->mt == NULL || intel->mt->region == NULL)
678 return GL_RELEASED_APPLE;
679
680 return intel_buffer_purgeable(intel->mt->region->bo);
681 }
682
683 static GLenum
684 intel_render_object_purgeable(struct gl_context * ctx,
685 struct gl_renderbuffer *obj,
686 GLenum option)
687 {
688 struct intel_renderbuffer *intel;
689
690 (void) ctx;
691 (void) option;
692
693 intel = intel_renderbuffer(obj);
694 if (intel->mt == NULL)
695 return GL_RELEASED_APPLE;
696
697 return intel_buffer_purgeable(intel->mt->region->bo);
698 }
699
700 static GLenum
701 intel_buffer_unpurgeable(drm_intel_bo *buffer)
702 {
703 int retained;
704
705 retained = 0;
706 if (buffer != NULL)
707 retained = drm_intel_bo_madvise (buffer, I915_MADV_WILLNEED);
708
709 return retained ? GL_RETAINED_APPLE : GL_UNDEFINED_APPLE;
710 }
711
712 static GLenum
713 intel_buffer_object_unpurgeable(struct gl_context * ctx,
714 struct gl_buffer_object *obj,
715 GLenum option)
716 {
717 (void) ctx;
718 (void) option;
719
720 return intel_buffer_unpurgeable(intel_buffer_object (obj)->buffer);
721 }
722
723 static GLenum
724 intel_texture_object_unpurgeable(struct gl_context * ctx,
725 struct gl_texture_object *obj,
726 GLenum option)
727 {
728 struct intel_texture_object *intel;
729
730 (void) ctx;
731 (void) option;
732
733 intel = intel_texture_object(obj);
734 if (intel->mt == NULL || intel->mt->region == NULL)
735 return GL_UNDEFINED_APPLE;
736
737 return intel_buffer_unpurgeable(intel->mt->region->bo);
738 }
739
740 static GLenum
741 intel_render_object_unpurgeable(struct gl_context * ctx,
742 struct gl_renderbuffer *obj,
743 GLenum option)
744 {
745 struct intel_renderbuffer *intel;
746
747 (void) ctx;
748 (void) option;
749
750 intel = intel_renderbuffer(obj);
751 if (intel->mt == NULL)
752 return GL_UNDEFINED_APPLE;
753
754 return intel_buffer_unpurgeable(intel->mt->region->bo);
755 }
756
757 void
758 intelInitBufferObjectFuncs(struct dd_function_table *functions)
759 {
760 functions->NewBufferObject = intel_bufferobj_alloc;
761 functions->DeleteBuffer = intel_bufferobj_free;
762 functions->BufferData = intel_bufferobj_data;
763 functions->BufferSubData = intel_bufferobj_subdata;
764 functions->GetBufferSubData = intel_bufferobj_get_subdata;
765 functions->MapBufferRange = intel_bufferobj_map_range;
766 functions->FlushMappedBufferRange = intel_bufferobj_flush_mapped_range;
767 functions->UnmapBuffer = intel_bufferobj_unmap;
768 functions->CopyBufferSubData = intel_bufferobj_copy_subdata;
769
770 functions->BufferObjectPurgeable = intel_buffer_object_purgeable;
771 functions->TextureObjectPurgeable = intel_texture_object_purgeable;
772 functions->RenderObjectPurgeable = intel_render_object_purgeable;
773
774 functions->BufferObjectUnpurgeable = intel_buffer_object_unpurgeable;
775 functions->TextureObjectUnpurgeable = intel_texture_object_unpurgeable;
776 functions->RenderObjectUnpurgeable = intel_render_object_unpurgeable;
777 }