345db6dfe0fd336fa52f065e2036e26899879f7d
[mesa.git] / src / mesa / drivers / dri / i915 / intel_buffer_objects.c
1 /**************************************************************************
2 *
3 * Copyright 2003 VMware, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28
29 #include "main/imports.h"
30 #include "main/mtypes.h"
31 #include "main/macros.h"
32 #include "main/bufferobj.h"
33
34 #include "intel_blit.h"
35 #include "intel_buffer_objects.h"
36 #include "intel_batchbuffer.h"
37 #include "intel_context.h"
38 #include "intel_fbo.h"
39 #include "intel_mipmap_tree.h"
40 #include "intel_regions.h"
41
42 static GLboolean
43 intel_bufferobj_unmap(struct gl_context * ctx, struct gl_buffer_object *obj);
44
45 /** Allocates a new drm_intel_bo to store the data for the buffer object. */
46 static void
47 intel_bufferobj_alloc_buffer(struct intel_context *intel,
48 struct intel_buffer_object *intel_obj)
49 {
50 intel_obj->buffer = drm_intel_bo_alloc(intel->bufmgr, "bufferobj",
51 intel_obj->Base.Size, 64);
52 }
53
54 static void
55 release_buffer(struct intel_buffer_object *intel_obj)
56 {
57 drm_intel_bo_unreference(intel_obj->buffer);
58 intel_obj->buffer = NULL;
59 intel_obj->offset = 0;
60 intel_obj->source = 0;
61 }
62
63 /**
64 * There is some duplication between mesa's bufferobjects and our
65 * bufmgr buffers. Both have an integer handle and a hashtable to
66 * lookup an opaque structure. It would be nice if the handles and
67 * internal structure where somehow shared.
68 */
69 static struct gl_buffer_object *
70 intel_bufferobj_alloc(struct gl_context * ctx, GLuint name, GLenum target)
71 {
72 struct intel_buffer_object *obj = CALLOC_STRUCT(intel_buffer_object);
73
74 _mesa_initialize_buffer_object(ctx, &obj->Base, name, target);
75
76 obj->buffer = NULL;
77
78 return &obj->Base;
79 }
80
81 /**
82 * Deallocate/free a vertex/pixel buffer object.
83 * Called via glDeleteBuffersARB().
84 */
85 static void
86 intel_bufferobj_free(struct gl_context * ctx, struct gl_buffer_object *obj)
87 {
88 struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
89
90 assert(intel_obj);
91
92 /* Buffer objects are automatically unmapped when deleting according
93 * to the spec, but Mesa doesn't do UnmapBuffer for us at context destroy
94 * (though it does if you call glDeleteBuffers)
95 */
96 if (obj->Pointer)
97 intel_bufferobj_unmap(ctx, obj);
98
99 free(intel_obj->sys_buffer);
100
101 drm_intel_bo_unreference(intel_obj->buffer);
102 free(intel_obj);
103 }
104
105
106
107 /**
108 * Allocate space for and store data in a buffer object. Any data that was
109 * previously stored in the buffer object is lost. If data is NULL,
110 * memory will be allocated, but no copy will occur.
111 * Called via ctx->Driver.BufferData().
112 * \return true for success, false if out of memory
113 */
114 static GLboolean
115 intel_bufferobj_data(struct gl_context * ctx,
116 GLenum target,
117 GLsizeiptrARB size,
118 const GLvoid * data,
119 GLenum usage, struct gl_buffer_object *obj)
120 {
121 struct intel_context *intel = intel_context(ctx);
122 struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
123
124 intel_obj->Base.Size = size;
125 intel_obj->Base.Usage = usage;
126
127 assert(!obj->Pointer); /* Mesa should have unmapped it */
128
129 if (intel_obj->buffer != NULL)
130 release_buffer(intel_obj);
131
132 free(intel_obj->sys_buffer);
133 intel_obj->sys_buffer = NULL;
134
135 if (size != 0) {
136 /* Stick VBOs in system memory, as we're always doing swtnl with their
137 * contents anyway.
138 */
139 if (target == GL_ARRAY_BUFFER || target == GL_ELEMENT_ARRAY_BUFFER) {
140 intel_obj->sys_buffer = malloc(size);
141 if (intel_obj->sys_buffer != NULL) {
142 if (data != NULL)
143 memcpy(intel_obj->sys_buffer, data, size);
144 return true;
145 }
146 }
147
148 intel_bufferobj_alloc_buffer(intel, intel_obj);
149 if (!intel_obj->buffer)
150 return false;
151
152 if (data != NULL)
153 drm_intel_bo_subdata(intel_obj->buffer, 0, size, data);
154 }
155
156 return true;
157 }
158
159
160 /**
161 * Replace data in a subrange of buffer object. If the data range
162 * specified by size + offset extends beyond the end of the buffer or
163 * if data is NULL, no copy is performed.
164 * Called via glBufferSubDataARB().
165 */
166 static void
167 intel_bufferobj_subdata(struct gl_context * ctx,
168 GLintptrARB offset,
169 GLsizeiptrARB size,
170 const GLvoid * data, struct gl_buffer_object *obj)
171 {
172 struct intel_context *intel = intel_context(ctx);
173 struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
174 bool busy;
175
176 if (size == 0)
177 return;
178
179 assert(intel_obj);
180
181 /* If we have a single copy in system memory, update that */
182 if (intel_obj->sys_buffer) {
183 if (intel_obj->source)
184 release_buffer(intel_obj);
185
186 if (intel_obj->buffer == NULL) {
187 memcpy((char *)intel_obj->sys_buffer + offset, data, size);
188 return;
189 }
190
191 free(intel_obj->sys_buffer);
192 intel_obj->sys_buffer = NULL;
193 }
194
195 /* Otherwise we need to update the copy in video memory. */
196 busy =
197 drm_intel_bo_busy(intel_obj->buffer) ||
198 drm_intel_bo_references(intel->batch.bo, intel_obj->buffer);
199
200 if (busy) {
201 if (size == intel_obj->Base.Size) {
202 /* Replace the current busy bo with fresh data. */
203 drm_intel_bo_unreference(intel_obj->buffer);
204 intel_bufferobj_alloc_buffer(intel, intel_obj);
205 drm_intel_bo_subdata(intel_obj->buffer, 0, size, data);
206 } else {
207 perf_debug("Using a blit copy to avoid stalling on %ldb "
208 "glBufferSubData() to a busy buffer object.\n",
209 (long)size);
210 drm_intel_bo *temp_bo =
211 drm_intel_bo_alloc(intel->bufmgr, "subdata temp", size, 64);
212
213 drm_intel_bo_subdata(temp_bo, 0, size, data);
214
215 intel_emit_linear_blit(intel,
216 intel_obj->buffer, offset,
217 temp_bo, 0,
218 size);
219
220 drm_intel_bo_unreference(temp_bo);
221 }
222 } else {
223 drm_intel_bo_subdata(intel_obj->buffer, offset, size, data);
224 }
225 }
226
227
228 /**
229 * Called via glGetBufferSubDataARB().
230 */
231 static void
232 intel_bufferobj_get_subdata(struct gl_context * ctx,
233 GLintptrARB offset,
234 GLsizeiptrARB size,
235 GLvoid * data, struct gl_buffer_object *obj)
236 {
237 struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
238 struct intel_context *intel = intel_context(ctx);
239
240 assert(intel_obj);
241 if (intel_obj->sys_buffer)
242 memcpy(data, (char *)intel_obj->sys_buffer + offset, size);
243 else {
244 if (drm_intel_bo_references(intel->batch.bo, intel_obj->buffer)) {
245 intel_batchbuffer_flush(intel);
246 }
247 drm_intel_bo_get_subdata(intel_obj->buffer, offset, size, data);
248 }
249 }
250
251
252
253 /**
254 * Called via glMapBufferRange and glMapBuffer
255 *
256 * The goal of this extension is to allow apps to accumulate their rendering
257 * at the same time as they accumulate their buffer object. Without it,
258 * you'd end up blocking on execution of rendering every time you mapped
259 * the buffer to put new data in.
260 *
261 * We support it in 3 ways: If unsynchronized, then don't bother
262 * flushing the batchbuffer before mapping the buffer, which can save blocking
263 * in many cases. If we would still block, and they allow the whole buffer
264 * to be invalidated, then just allocate a new buffer to replace the old one.
265 * If not, and we'd block, and they allow the subrange of the buffer to be
266 * invalidated, then we can make a new little BO, let them write into that,
267 * and blit it into the real BO at unmap time.
268 */
269 static void *
270 intel_bufferobj_map_range(struct gl_context * ctx,
271 GLintptr offset, GLsizeiptr length,
272 GLbitfield access, struct gl_buffer_object *obj)
273 {
274 struct intel_context *intel = intel_context(ctx);
275 struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
276
277 assert(intel_obj);
278
279 /* _mesa_MapBufferRange (GL entrypoint) sets these, but the vbo module also
280 * internally uses our functions directly.
281 */
282 obj->Offset = offset;
283 obj->Length = length;
284 obj->AccessFlags = access;
285
286 if (intel_obj->sys_buffer) {
287 const bool read_only =
288 (access & (GL_MAP_READ_BIT | GL_MAP_WRITE_BIT)) == GL_MAP_READ_BIT;
289
290 if (!read_only && intel_obj->source)
291 release_buffer(intel_obj);
292
293 if (!intel_obj->buffer || intel_obj->source) {
294 obj->Pointer = intel_obj->sys_buffer + offset;
295 return obj->Pointer;
296 }
297
298 free(intel_obj->sys_buffer);
299 intel_obj->sys_buffer = NULL;
300 }
301
302 if (intel_obj->buffer == NULL) {
303 obj->Pointer = NULL;
304 return NULL;
305 }
306
307 /* If the access is synchronized (like a normal buffer mapping), then get
308 * things flushed out so the later mapping syncs appropriately through GEM.
309 * If the user doesn't care about existing buffer contents and mapping would
310 * cause us to block, then throw out the old buffer.
311 *
312 * If they set INVALIDATE_BUFFER, we can pitch the current contents to
313 * achieve the required synchronization.
314 */
315 if (!(access & GL_MAP_UNSYNCHRONIZED_BIT)) {
316 if (drm_intel_bo_references(intel->batch.bo, intel_obj->buffer)) {
317 if (access & GL_MAP_INVALIDATE_BUFFER_BIT) {
318 drm_intel_bo_unreference(intel_obj->buffer);
319 intel_bufferobj_alloc_buffer(intel, intel_obj);
320 } else {
321 perf_debug("Stalling on the GPU for mapping a busy buffer "
322 "object\n");
323 intel_flush(ctx);
324 }
325 } else if (drm_intel_bo_busy(intel_obj->buffer) &&
326 (access & GL_MAP_INVALIDATE_BUFFER_BIT)) {
327 drm_intel_bo_unreference(intel_obj->buffer);
328 intel_bufferobj_alloc_buffer(intel, intel_obj);
329 }
330 }
331
332 /* If the user is mapping a range of an active buffer object but
333 * doesn't require the current contents of that range, make a new
334 * BO, and we'll copy what they put in there out at unmap or
335 * FlushRange time.
336 */
337 if ((access & GL_MAP_INVALIDATE_RANGE_BIT) &&
338 drm_intel_bo_busy(intel_obj->buffer)) {
339 /* Ensure that the base alignment of the allocation meets the alignment
340 * guarantees the driver has advertised to the application.
341 */
342 const unsigned alignment = ctx->Const.MinMapBufferAlignment;
343 const unsigned extra = (uintptr_t) offset % alignment;
344
345 if (access & GL_MAP_FLUSH_EXPLICIT_BIT) {
346 intel_obj->range_map_buffer = _mesa_align_malloc(length + extra,
347 alignment);
348 obj->Pointer = intel_obj->range_map_buffer + extra;
349 } else {
350 intel_obj->range_map_bo = drm_intel_bo_alloc(intel->bufmgr,
351 "range map",
352 length + extra,
353 alignment);
354 if (!(access & GL_MAP_READ_BIT)) {
355 drm_intel_gem_bo_map_gtt(intel_obj->range_map_bo);
356 } else {
357 drm_intel_bo_map(intel_obj->range_map_bo,
358 (access & GL_MAP_WRITE_BIT) != 0);
359 }
360 obj->Pointer = intel_obj->range_map_bo->virtual + extra;
361 }
362 return obj->Pointer;
363 }
364
365 if (access & GL_MAP_UNSYNCHRONIZED_BIT)
366 drm_intel_gem_bo_map_unsynchronized(intel_obj->buffer);
367 else if (!(access & GL_MAP_READ_BIT)) {
368 drm_intel_gem_bo_map_gtt(intel_obj->buffer);
369 } else {
370 drm_intel_bo_map(intel_obj->buffer, (access & GL_MAP_WRITE_BIT) != 0);
371 }
372
373 obj->Pointer = intel_obj->buffer->virtual + offset;
374 return obj->Pointer;
375 }
376
377 /* Ideally we'd use a BO to avoid taking up cache space for the temporary
378 * data, but FlushMappedBufferRange may be followed by further writes to
379 * the pointer, so we would have to re-map after emitting our blit, which
380 * would defeat the point.
381 */
382 static void
383 intel_bufferobj_flush_mapped_range(struct gl_context *ctx,
384 GLintptr offset, GLsizeiptr length,
385 struct gl_buffer_object *obj)
386 {
387 struct intel_context *intel = intel_context(ctx);
388 struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
389 drm_intel_bo *temp_bo;
390
391 /* Unless we're in the range map using a temporary system buffer,
392 * there's no work to do.
393 */
394 if (intel_obj->range_map_buffer == NULL)
395 return;
396
397 if (length == 0)
398 return;
399
400 temp_bo = drm_intel_bo_alloc(intel->bufmgr, "range map flush", length, 64);
401
402 /* Use obj->Pointer instead of intel_obj->range_map_buffer because the
403 * former points to the actual mapping while the latter may be offset to
404 * meet alignment guarantees.
405 */
406 drm_intel_bo_subdata(temp_bo, 0, length, obj->Pointer);
407
408 intel_emit_linear_blit(intel,
409 intel_obj->buffer, obj->Offset + offset,
410 temp_bo, 0,
411 length);
412
413 drm_intel_bo_unreference(temp_bo);
414 }
415
416
417 /**
418 * Called via glUnmapBuffer().
419 */
420 static GLboolean
421 intel_bufferobj_unmap(struct gl_context * ctx, struct gl_buffer_object *obj)
422 {
423 struct intel_context *intel = intel_context(ctx);
424 struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
425
426 assert(intel_obj);
427 assert(obj->Pointer);
428 if (intel_obj->sys_buffer != NULL) {
429 /* always keep the mapping around. */
430 } else if (intel_obj->range_map_buffer != NULL) {
431 /* Since we've emitted some blits to buffers that will (likely) be used
432 * in rendering operations in other cache domains in this batch, emit a
433 * flush. Once again, we wish for a domain tracker in libdrm to cover
434 * usage inside of a batchbuffer.
435 */
436 intel_batchbuffer_emit_mi_flush(intel);
437 _mesa_align_free(intel_obj->range_map_buffer);
438 intel_obj->range_map_buffer = NULL;
439 } else if (intel_obj->range_map_bo != NULL) {
440 const unsigned extra = obj->Pointer - intel_obj->range_map_bo->virtual;
441
442 drm_intel_bo_unmap(intel_obj->range_map_bo);
443
444 intel_emit_linear_blit(intel,
445 intel_obj->buffer, obj->Offset,
446 intel_obj->range_map_bo, extra,
447 obj->Length);
448
449 /* Since we've emitted some blits to buffers that will (likely) be used
450 * in rendering operations in other cache domains in this batch, emit a
451 * flush. Once again, we wish for a domain tracker in libdrm to cover
452 * usage inside of a batchbuffer.
453 */
454 intel_batchbuffer_emit_mi_flush(intel);
455
456 drm_intel_bo_unreference(intel_obj->range_map_bo);
457 intel_obj->range_map_bo = NULL;
458 } else if (intel_obj->buffer != NULL) {
459 drm_intel_bo_unmap(intel_obj->buffer);
460 }
461 obj->Pointer = NULL;
462 obj->Offset = 0;
463 obj->Length = 0;
464
465 return true;
466 }
467
468 drm_intel_bo *
469 intel_bufferobj_buffer(struct intel_context *intel,
470 struct intel_buffer_object *intel_obj)
471 {
472 if (intel_obj->source)
473 release_buffer(intel_obj);
474
475 if (intel_obj->buffer == NULL) {
476 intel_bufferobj_alloc_buffer(intel, intel_obj);
477 drm_intel_bo_subdata(intel_obj->buffer,
478 0, intel_obj->Base.Size,
479 intel_obj->sys_buffer);
480
481 free(intel_obj->sys_buffer);
482 intel_obj->sys_buffer = NULL;
483 intel_obj->offset = 0;
484 }
485
486 return intel_obj->buffer;
487 }
488
489 #define INTEL_UPLOAD_SIZE (64*1024)
490
491 void
492 intel_upload_finish(struct intel_context *intel)
493 {
494 if (!intel->upload.bo)
495 return;
496
497 if (intel->upload.buffer_len) {
498 drm_intel_bo_subdata(intel->upload.bo,
499 intel->upload.buffer_offset,
500 intel->upload.buffer_len,
501 intel->upload.buffer);
502 intel->upload.buffer_len = 0;
503 }
504
505 drm_intel_bo_unreference(intel->upload.bo);
506 intel->upload.bo = NULL;
507 }
508
509 static void wrap_buffers(struct intel_context *intel, GLuint size)
510 {
511 intel_upload_finish(intel);
512
513 if (size < INTEL_UPLOAD_SIZE)
514 size = INTEL_UPLOAD_SIZE;
515
516 intel->upload.bo = drm_intel_bo_alloc(intel->bufmgr, "upload", size, 0);
517 intel->upload.offset = 0;
518 }
519
520 void intel_upload_data(struct intel_context *intel,
521 const void *ptr, GLuint size, GLuint align,
522 drm_intel_bo **return_bo,
523 GLuint *return_offset)
524 {
525 GLuint base, delta;
526
527 base = (intel->upload.offset + align - 1) / align * align;
528 if (intel->upload.bo == NULL || base + size > intel->upload.bo->size) {
529 wrap_buffers(intel, size);
530 base = 0;
531 }
532
533 drm_intel_bo_reference(intel->upload.bo);
534 *return_bo = intel->upload.bo;
535 *return_offset = base;
536
537 delta = base - intel->upload.offset;
538 if (intel->upload.buffer_len &&
539 intel->upload.buffer_len + delta + size > sizeof(intel->upload.buffer))
540 {
541 drm_intel_bo_subdata(intel->upload.bo,
542 intel->upload.buffer_offset,
543 intel->upload.buffer_len,
544 intel->upload.buffer);
545 intel->upload.buffer_len = 0;
546 }
547
548 if (size < sizeof(intel->upload.buffer))
549 {
550 if (intel->upload.buffer_len == 0)
551 intel->upload.buffer_offset = base;
552 else
553 intel->upload.buffer_len += delta;
554
555 memcpy(intel->upload.buffer + intel->upload.buffer_len, ptr, size);
556 intel->upload.buffer_len += size;
557 }
558 else
559 {
560 drm_intel_bo_subdata(intel->upload.bo, base, size, ptr);
561 }
562
563 intel->upload.offset = base + size;
564 }
565
566 drm_intel_bo *
567 intel_bufferobj_source(struct intel_context *intel,
568 struct intel_buffer_object *intel_obj,
569 GLuint align, GLuint *offset)
570 {
571 if (intel_obj->buffer == NULL) {
572 intel_upload_data(intel,
573 intel_obj->sys_buffer, intel_obj->Base.Size, align,
574 &intel_obj->buffer, &intel_obj->offset);
575 intel_obj->source = 1;
576 }
577
578 *offset = intel_obj->offset;
579 return intel_obj->buffer;
580 }
581
582 static void
583 intel_bufferobj_copy_subdata(struct gl_context *ctx,
584 struct gl_buffer_object *src,
585 struct gl_buffer_object *dst,
586 GLintptr read_offset, GLintptr write_offset,
587 GLsizeiptr size)
588 {
589 struct intel_context *intel = intel_context(ctx);
590 struct intel_buffer_object *intel_src = intel_buffer_object(src);
591 struct intel_buffer_object *intel_dst = intel_buffer_object(dst);
592 drm_intel_bo *src_bo, *dst_bo;
593 GLuint src_offset;
594
595 if (size == 0)
596 return;
597
598 /* If we're in system memory, just map and memcpy. */
599 if (intel_src->sys_buffer || intel_dst->sys_buffer) {
600 /* The same buffer may be used, but note that regions copied may
601 * not overlap.
602 */
603 if (src == dst) {
604 char *ptr = intel_bufferobj_map_range(ctx, 0, dst->Size,
605 GL_MAP_READ_BIT |
606 GL_MAP_WRITE_BIT,
607 dst);
608 memmove(ptr + write_offset, ptr + read_offset, size);
609 intel_bufferobj_unmap(ctx, dst);
610 } else {
611 const char *src_ptr;
612 char *dst_ptr;
613
614 src_ptr = intel_bufferobj_map_range(ctx, 0, src->Size,
615 GL_MAP_READ_BIT, src);
616 dst_ptr = intel_bufferobj_map_range(ctx, 0, dst->Size,
617 GL_MAP_WRITE_BIT, dst);
618
619 memcpy(dst_ptr + write_offset, src_ptr + read_offset, size);
620
621 intel_bufferobj_unmap(ctx, src);
622 intel_bufferobj_unmap(ctx, dst);
623 }
624 return;
625 }
626
627 /* Otherwise, we have real BOs, so blit them. */
628
629 dst_bo = intel_bufferobj_buffer(intel, intel_dst);
630 src_bo = intel_bufferobj_source(intel, intel_src, 64, &src_offset);
631
632 intel_emit_linear_blit(intel,
633 dst_bo, write_offset,
634 src_bo, read_offset + src_offset, size);
635
636 /* Since we've emitted some blits to buffers that will (likely) be used
637 * in rendering operations in other cache domains in this batch, emit a
638 * flush. Once again, we wish for a domain tracker in libdrm to cover
639 * usage inside of a batchbuffer.
640 */
641 intel_batchbuffer_emit_mi_flush(intel);
642 }
643
644 static GLenum
645 intel_buffer_purgeable(drm_intel_bo *buffer)
646 {
647 int retained = 0;
648
649 if (buffer != NULL)
650 retained = drm_intel_bo_madvise (buffer, I915_MADV_DONTNEED);
651
652 return retained ? GL_VOLATILE_APPLE : GL_RELEASED_APPLE;
653 }
654
655 static GLenum
656 intel_buffer_object_purgeable(struct gl_context * ctx,
657 struct gl_buffer_object *obj,
658 GLenum option)
659 {
660 struct intel_buffer_object *intel_obj = intel_buffer_object (obj);
661
662 if (intel_obj->buffer != NULL)
663 return intel_buffer_purgeable(intel_obj->buffer);
664
665 if (option == GL_RELEASED_APPLE) {
666 free(intel_obj->sys_buffer);
667 intel_obj->sys_buffer = NULL;
668
669 return GL_RELEASED_APPLE;
670 } else {
671 /* XXX Create the buffer and madvise(MADV_DONTNEED)? */
672 struct intel_context *intel = intel_context(ctx);
673 drm_intel_bo *bo = intel_bufferobj_buffer(intel, intel_obj);
674
675 return intel_buffer_purgeable(bo);
676 }
677 }
678
679 static GLenum
680 intel_texture_object_purgeable(struct gl_context * ctx,
681 struct gl_texture_object *obj,
682 GLenum option)
683 {
684 struct intel_texture_object *intel;
685
686 (void) ctx;
687 (void) option;
688
689 intel = intel_texture_object(obj);
690 if (intel->mt == NULL || intel->mt->region == NULL)
691 return GL_RELEASED_APPLE;
692
693 return intel_buffer_purgeable(intel->mt->region->bo);
694 }
695
696 static GLenum
697 intel_render_object_purgeable(struct gl_context * ctx,
698 struct gl_renderbuffer *obj,
699 GLenum option)
700 {
701 struct intel_renderbuffer *intel;
702
703 (void) ctx;
704 (void) option;
705
706 intel = intel_renderbuffer(obj);
707 if (intel->mt == NULL)
708 return GL_RELEASED_APPLE;
709
710 return intel_buffer_purgeable(intel->mt->region->bo);
711 }
712
713 static GLenum
714 intel_buffer_unpurgeable(drm_intel_bo *buffer)
715 {
716 int retained;
717
718 retained = 0;
719 if (buffer != NULL)
720 retained = drm_intel_bo_madvise (buffer, I915_MADV_WILLNEED);
721
722 return retained ? GL_RETAINED_APPLE : GL_UNDEFINED_APPLE;
723 }
724
725 static GLenum
726 intel_buffer_object_unpurgeable(struct gl_context * ctx,
727 struct gl_buffer_object *obj,
728 GLenum option)
729 {
730 (void) ctx;
731 (void) option;
732
733 return intel_buffer_unpurgeable(intel_buffer_object (obj)->buffer);
734 }
735
736 static GLenum
737 intel_texture_object_unpurgeable(struct gl_context * ctx,
738 struct gl_texture_object *obj,
739 GLenum option)
740 {
741 struct intel_texture_object *intel;
742
743 (void) ctx;
744 (void) option;
745
746 intel = intel_texture_object(obj);
747 if (intel->mt == NULL || intel->mt->region == NULL)
748 return GL_UNDEFINED_APPLE;
749
750 return intel_buffer_unpurgeable(intel->mt->region->bo);
751 }
752
753 static GLenum
754 intel_render_object_unpurgeable(struct gl_context * ctx,
755 struct gl_renderbuffer *obj,
756 GLenum option)
757 {
758 struct intel_renderbuffer *intel;
759
760 (void) ctx;
761 (void) option;
762
763 intel = intel_renderbuffer(obj);
764 if (intel->mt == NULL)
765 return GL_UNDEFINED_APPLE;
766
767 return intel_buffer_unpurgeable(intel->mt->region->bo);
768 }
769
770 void
771 intelInitBufferObjectFuncs(struct dd_function_table *functions)
772 {
773 functions->NewBufferObject = intel_bufferobj_alloc;
774 functions->DeleteBuffer = intel_bufferobj_free;
775 functions->BufferData = intel_bufferobj_data;
776 functions->BufferSubData = intel_bufferobj_subdata;
777 functions->GetBufferSubData = intel_bufferobj_get_subdata;
778 functions->MapBufferRange = intel_bufferobj_map_range;
779 functions->FlushMappedBufferRange = intel_bufferobj_flush_mapped_range;
780 functions->UnmapBuffer = intel_bufferobj_unmap;
781 functions->CopyBufferSubData = intel_bufferobj_copy_subdata;
782
783 functions->BufferObjectPurgeable = intel_buffer_object_purgeable;
784 functions->TextureObjectPurgeable = intel_texture_object_purgeable;
785 functions->RenderObjectPurgeable = intel_render_object_purgeable;
786
787 functions->BufferObjectUnpurgeable = intel_buffer_object_unpurgeable;
788 functions->TextureObjectUnpurgeable = intel_texture_object_unpurgeable;
789 functions->RenderObjectUnpurgeable = intel_render_object_unpurgeable;
790 }