f4fb9998cde281885b5299a8e323faf236173f4d
[mesa.git] / src / mesa / drivers / dri / i915 / intel_buffer_objects.c
1 /**************************************************************************
2 *
3 * Copyright 2003 VMware, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28
29 #include "main/imports.h"
30 #include "main/mtypes.h"
31 #include "main/macros.h"
32 #include "main/bufferobj.h"
33
34 #include "intel_blit.h"
35 #include "intel_buffer_objects.h"
36 #include "intel_batchbuffer.h"
37 #include "intel_context.h"
38 #include "intel_fbo.h"
39 #include "intel_mipmap_tree.h"
40 #include "intel_regions.h"
41
42 static GLboolean
43 intel_bufferobj_unmap(struct gl_context * ctx, struct gl_buffer_object *obj);
44
45 /** Allocates a new drm_intel_bo to store the data for the buffer object. */
46 static void
47 intel_bufferobj_alloc_buffer(struct intel_context *intel,
48 struct intel_buffer_object *intel_obj)
49 {
50 intel_obj->buffer = drm_intel_bo_alloc(intel->bufmgr, "bufferobj",
51 intel_obj->Base.Size, 64);
52 }
53
54 static void
55 release_buffer(struct intel_buffer_object *intel_obj)
56 {
57 drm_intel_bo_unreference(intel_obj->buffer);
58 intel_obj->buffer = NULL;
59 intel_obj->offset = 0;
60 intel_obj->source = 0;
61 }
62
63 /**
64 * There is some duplication between mesa's bufferobjects and our
65 * bufmgr buffers. Both have an integer handle and a hashtable to
66 * lookup an opaque structure. It would be nice if the handles and
67 * internal structure where somehow shared.
68 */
69 static struct gl_buffer_object *
70 intel_bufferobj_alloc(struct gl_context * ctx, GLuint name, GLenum target)
71 {
72 struct intel_buffer_object *obj = CALLOC_STRUCT(intel_buffer_object);
73
74 _mesa_initialize_buffer_object(ctx, &obj->Base, name, target);
75
76 obj->buffer = NULL;
77
78 return &obj->Base;
79 }
80
81 /**
82 * Deallocate/free a vertex/pixel buffer object.
83 * Called via glDeleteBuffersARB().
84 */
85 static void
86 intel_bufferobj_free(struct gl_context * ctx, struct gl_buffer_object *obj)
87 {
88 struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
89
90 assert(intel_obj);
91
92 /* Buffer objects are automatically unmapped when deleting according
93 * to the spec, but Mesa doesn't do UnmapBuffer for us at context destroy
94 * (though it does if you call glDeleteBuffers)
95 */
96 if (obj->Pointer)
97 intel_bufferobj_unmap(ctx, obj);
98
99 free(intel_obj->sys_buffer);
100
101 drm_intel_bo_unreference(intel_obj->buffer);
102 free(intel_obj);
103 }
104
105
106
107 /**
108 * Allocate space for and store data in a buffer object. Any data that was
109 * previously stored in the buffer object is lost. If data is NULL,
110 * memory will be allocated, but no copy will occur.
111 * Called via ctx->Driver.BufferData().
112 * \return true for success, false if out of memory
113 */
114 static GLboolean
115 intel_bufferobj_data(struct gl_context * ctx,
116 GLenum target,
117 GLsizeiptrARB size,
118 const GLvoid * data,
119 GLenum usage,
120 GLbitfield storageFlags,
121 struct gl_buffer_object *obj)
122 {
123 struct intel_context *intel = intel_context(ctx);
124 struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
125
126 intel_obj->Base.Size = size;
127 intel_obj->Base.Usage = usage;
128 intel_obj->Base.StorageFlags = storageFlags;
129
130 assert(!obj->Pointer); /* Mesa should have unmapped it */
131
132 if (intel_obj->buffer != NULL)
133 release_buffer(intel_obj);
134
135 free(intel_obj->sys_buffer);
136 intel_obj->sys_buffer = NULL;
137
138 if (size != 0) {
139 /* Stick VBOs in system memory, as we're always doing swtnl with their
140 * contents anyway.
141 */
142 if (target == GL_ARRAY_BUFFER || target == GL_ELEMENT_ARRAY_BUFFER) {
143 intel_obj->sys_buffer = malloc(size);
144 if (intel_obj->sys_buffer != NULL) {
145 if (data != NULL)
146 memcpy(intel_obj->sys_buffer, data, size);
147 return true;
148 }
149 }
150
151 intel_bufferobj_alloc_buffer(intel, intel_obj);
152 if (!intel_obj->buffer)
153 return false;
154
155 if (data != NULL)
156 drm_intel_bo_subdata(intel_obj->buffer, 0, size, data);
157 }
158
159 return true;
160 }
161
162
163 /**
164 * Replace data in a subrange of buffer object. If the data range
165 * specified by size + offset extends beyond the end of the buffer or
166 * if data is NULL, no copy is performed.
167 * Called via glBufferSubDataARB().
168 */
169 static void
170 intel_bufferobj_subdata(struct gl_context * ctx,
171 GLintptrARB offset,
172 GLsizeiptrARB size,
173 const GLvoid * data, struct gl_buffer_object *obj)
174 {
175 struct intel_context *intel = intel_context(ctx);
176 struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
177 bool busy;
178
179 if (size == 0)
180 return;
181
182 assert(intel_obj);
183
184 /* If we have a single copy in system memory, update that */
185 if (intel_obj->sys_buffer) {
186 if (intel_obj->source)
187 release_buffer(intel_obj);
188
189 if (intel_obj->buffer == NULL) {
190 memcpy((char *)intel_obj->sys_buffer + offset, data, size);
191 return;
192 }
193
194 free(intel_obj->sys_buffer);
195 intel_obj->sys_buffer = NULL;
196 }
197
198 /* Otherwise we need to update the copy in video memory. */
199 busy =
200 drm_intel_bo_busy(intel_obj->buffer) ||
201 drm_intel_bo_references(intel->batch.bo, intel_obj->buffer);
202
203 if (busy) {
204 if (size == intel_obj->Base.Size) {
205 /* Replace the current busy bo with fresh data. */
206 drm_intel_bo_unreference(intel_obj->buffer);
207 intel_bufferobj_alloc_buffer(intel, intel_obj);
208 drm_intel_bo_subdata(intel_obj->buffer, 0, size, data);
209 } else {
210 perf_debug("Using a blit copy to avoid stalling on %ldb "
211 "glBufferSubData() to a busy buffer object.\n",
212 (long)size);
213 drm_intel_bo *temp_bo =
214 drm_intel_bo_alloc(intel->bufmgr, "subdata temp", size, 64);
215
216 drm_intel_bo_subdata(temp_bo, 0, size, data);
217
218 intel_emit_linear_blit(intel,
219 intel_obj->buffer, offset,
220 temp_bo, 0,
221 size);
222
223 drm_intel_bo_unreference(temp_bo);
224 }
225 } else {
226 drm_intel_bo_subdata(intel_obj->buffer, offset, size, data);
227 }
228 }
229
230
231 /**
232 * Called via glGetBufferSubDataARB().
233 */
234 static void
235 intel_bufferobj_get_subdata(struct gl_context * ctx,
236 GLintptrARB offset,
237 GLsizeiptrARB size,
238 GLvoid * data, struct gl_buffer_object *obj)
239 {
240 struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
241 struct intel_context *intel = intel_context(ctx);
242
243 assert(intel_obj);
244 if (intel_obj->sys_buffer)
245 memcpy(data, (char *)intel_obj->sys_buffer + offset, size);
246 else {
247 if (drm_intel_bo_references(intel->batch.bo, intel_obj->buffer)) {
248 intel_batchbuffer_flush(intel);
249 }
250 drm_intel_bo_get_subdata(intel_obj->buffer, offset, size, data);
251 }
252 }
253
254
255
256 /**
257 * Called via glMapBufferRange and glMapBuffer
258 *
259 * The goal of this extension is to allow apps to accumulate their rendering
260 * at the same time as they accumulate their buffer object. Without it,
261 * you'd end up blocking on execution of rendering every time you mapped
262 * the buffer to put new data in.
263 *
264 * We support it in 3 ways: If unsynchronized, then don't bother
265 * flushing the batchbuffer before mapping the buffer, which can save blocking
266 * in many cases. If we would still block, and they allow the whole buffer
267 * to be invalidated, then just allocate a new buffer to replace the old one.
268 * If not, and we'd block, and they allow the subrange of the buffer to be
269 * invalidated, then we can make a new little BO, let them write into that,
270 * and blit it into the real BO at unmap time.
271 */
272 static void *
273 intel_bufferobj_map_range(struct gl_context * ctx,
274 GLintptr offset, GLsizeiptr length,
275 GLbitfield access, struct gl_buffer_object *obj)
276 {
277 struct intel_context *intel = intel_context(ctx);
278 struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
279
280 assert(intel_obj);
281
282 /* _mesa_MapBufferRange (GL entrypoint) sets these, but the vbo module also
283 * internally uses our functions directly.
284 */
285 obj->Offset = offset;
286 obj->Length = length;
287 obj->AccessFlags = access;
288
289 if (intel_obj->sys_buffer) {
290 const bool read_only =
291 (access & (GL_MAP_READ_BIT | GL_MAP_WRITE_BIT)) == GL_MAP_READ_BIT;
292
293 if (!read_only && intel_obj->source)
294 release_buffer(intel_obj);
295
296 if (!intel_obj->buffer || intel_obj->source) {
297 obj->Pointer = intel_obj->sys_buffer + offset;
298 return obj->Pointer;
299 }
300
301 free(intel_obj->sys_buffer);
302 intel_obj->sys_buffer = NULL;
303 }
304
305 if (intel_obj->buffer == NULL) {
306 obj->Pointer = NULL;
307 return NULL;
308 }
309
310 /* If the access is synchronized (like a normal buffer mapping), then get
311 * things flushed out so the later mapping syncs appropriately through GEM.
312 * If the user doesn't care about existing buffer contents and mapping would
313 * cause us to block, then throw out the old buffer.
314 *
315 * If they set INVALIDATE_BUFFER, we can pitch the current contents to
316 * achieve the required synchronization.
317 */
318 if (!(access & GL_MAP_UNSYNCHRONIZED_BIT)) {
319 if (drm_intel_bo_references(intel->batch.bo, intel_obj->buffer)) {
320 if (access & GL_MAP_INVALIDATE_BUFFER_BIT) {
321 drm_intel_bo_unreference(intel_obj->buffer);
322 intel_bufferobj_alloc_buffer(intel, intel_obj);
323 } else {
324 perf_debug("Stalling on the GPU for mapping a busy buffer "
325 "object\n");
326 intel_flush(ctx);
327 }
328 } else if (drm_intel_bo_busy(intel_obj->buffer) &&
329 (access & GL_MAP_INVALIDATE_BUFFER_BIT)) {
330 drm_intel_bo_unreference(intel_obj->buffer);
331 intel_bufferobj_alloc_buffer(intel, intel_obj);
332 }
333 }
334
335 /* If the user is mapping a range of an active buffer object but
336 * doesn't require the current contents of that range, make a new
337 * BO, and we'll copy what they put in there out at unmap or
338 * FlushRange time.
339 */
340 if ((access & GL_MAP_INVALIDATE_RANGE_BIT) &&
341 drm_intel_bo_busy(intel_obj->buffer)) {
342 /* Ensure that the base alignment of the allocation meets the alignment
343 * guarantees the driver has advertised to the application.
344 */
345 const unsigned alignment = ctx->Const.MinMapBufferAlignment;
346 const unsigned extra = (uintptr_t) offset % alignment;
347
348 if (access & GL_MAP_FLUSH_EXPLICIT_BIT) {
349 intel_obj->range_map_buffer = _mesa_align_malloc(length + extra,
350 alignment);
351 obj->Pointer = intel_obj->range_map_buffer + extra;
352 } else {
353 intel_obj->range_map_bo = drm_intel_bo_alloc(intel->bufmgr,
354 "range map",
355 length + extra,
356 alignment);
357 if (!(access & GL_MAP_READ_BIT)) {
358 drm_intel_gem_bo_map_gtt(intel_obj->range_map_bo);
359 } else {
360 drm_intel_bo_map(intel_obj->range_map_bo,
361 (access & GL_MAP_WRITE_BIT) != 0);
362 }
363 obj->Pointer = intel_obj->range_map_bo->virtual + extra;
364 }
365 return obj->Pointer;
366 }
367
368 if (access & GL_MAP_UNSYNCHRONIZED_BIT)
369 drm_intel_gem_bo_map_unsynchronized(intel_obj->buffer);
370 else if (!(access & GL_MAP_READ_BIT)) {
371 drm_intel_gem_bo_map_gtt(intel_obj->buffer);
372 } else {
373 drm_intel_bo_map(intel_obj->buffer, (access & GL_MAP_WRITE_BIT) != 0);
374 }
375
376 obj->Pointer = intel_obj->buffer->virtual + offset;
377 return obj->Pointer;
378 }
379
380 /* Ideally we'd use a BO to avoid taking up cache space for the temporary
381 * data, but FlushMappedBufferRange may be followed by further writes to
382 * the pointer, so we would have to re-map after emitting our blit, which
383 * would defeat the point.
384 */
385 static void
386 intel_bufferobj_flush_mapped_range(struct gl_context *ctx,
387 GLintptr offset, GLsizeiptr length,
388 struct gl_buffer_object *obj)
389 {
390 struct intel_context *intel = intel_context(ctx);
391 struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
392 drm_intel_bo *temp_bo;
393
394 /* Unless we're in the range map using a temporary system buffer,
395 * there's no work to do.
396 */
397 if (intel_obj->range_map_buffer == NULL)
398 return;
399
400 if (length == 0)
401 return;
402
403 temp_bo = drm_intel_bo_alloc(intel->bufmgr, "range map flush", length, 64);
404
405 /* Use obj->Pointer instead of intel_obj->range_map_buffer because the
406 * former points to the actual mapping while the latter may be offset to
407 * meet alignment guarantees.
408 */
409 drm_intel_bo_subdata(temp_bo, 0, length, obj->Pointer);
410
411 intel_emit_linear_blit(intel,
412 intel_obj->buffer, obj->Offset + offset,
413 temp_bo, 0,
414 length);
415
416 drm_intel_bo_unreference(temp_bo);
417 }
418
419
420 /**
421 * Called via glUnmapBuffer().
422 */
423 static GLboolean
424 intel_bufferobj_unmap(struct gl_context * ctx, struct gl_buffer_object *obj)
425 {
426 struct intel_context *intel = intel_context(ctx);
427 struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
428
429 assert(intel_obj);
430 assert(obj->Pointer);
431 if (intel_obj->sys_buffer != NULL) {
432 /* always keep the mapping around. */
433 } else if (intel_obj->range_map_buffer != NULL) {
434 /* Since we've emitted some blits to buffers that will (likely) be used
435 * in rendering operations in other cache domains in this batch, emit a
436 * flush. Once again, we wish for a domain tracker in libdrm to cover
437 * usage inside of a batchbuffer.
438 */
439 intel_batchbuffer_emit_mi_flush(intel);
440 _mesa_align_free(intel_obj->range_map_buffer);
441 intel_obj->range_map_buffer = NULL;
442 } else if (intel_obj->range_map_bo != NULL) {
443 const unsigned extra = obj->Pointer - intel_obj->range_map_bo->virtual;
444
445 drm_intel_bo_unmap(intel_obj->range_map_bo);
446
447 intel_emit_linear_blit(intel,
448 intel_obj->buffer, obj->Offset,
449 intel_obj->range_map_bo, extra,
450 obj->Length);
451
452 /* Since we've emitted some blits to buffers that will (likely) be used
453 * in rendering operations in other cache domains in this batch, emit a
454 * flush. Once again, we wish for a domain tracker in libdrm to cover
455 * usage inside of a batchbuffer.
456 */
457 intel_batchbuffer_emit_mi_flush(intel);
458
459 drm_intel_bo_unreference(intel_obj->range_map_bo);
460 intel_obj->range_map_bo = NULL;
461 } else if (intel_obj->buffer != NULL) {
462 drm_intel_bo_unmap(intel_obj->buffer);
463 }
464 obj->Pointer = NULL;
465 obj->Offset = 0;
466 obj->Length = 0;
467
468 return true;
469 }
470
471 drm_intel_bo *
472 intel_bufferobj_buffer(struct intel_context *intel,
473 struct intel_buffer_object *intel_obj)
474 {
475 if (intel_obj->source)
476 release_buffer(intel_obj);
477
478 if (intel_obj->buffer == NULL) {
479 intel_bufferobj_alloc_buffer(intel, intel_obj);
480 drm_intel_bo_subdata(intel_obj->buffer,
481 0, intel_obj->Base.Size,
482 intel_obj->sys_buffer);
483
484 free(intel_obj->sys_buffer);
485 intel_obj->sys_buffer = NULL;
486 intel_obj->offset = 0;
487 }
488
489 return intel_obj->buffer;
490 }
491
492 #define INTEL_UPLOAD_SIZE (64*1024)
493
494 void
495 intel_upload_finish(struct intel_context *intel)
496 {
497 if (!intel->upload.bo)
498 return;
499
500 if (intel->upload.buffer_len) {
501 drm_intel_bo_subdata(intel->upload.bo,
502 intel->upload.buffer_offset,
503 intel->upload.buffer_len,
504 intel->upload.buffer);
505 intel->upload.buffer_len = 0;
506 }
507
508 drm_intel_bo_unreference(intel->upload.bo);
509 intel->upload.bo = NULL;
510 }
511
512 static void wrap_buffers(struct intel_context *intel, GLuint size)
513 {
514 intel_upload_finish(intel);
515
516 if (size < INTEL_UPLOAD_SIZE)
517 size = INTEL_UPLOAD_SIZE;
518
519 intel->upload.bo = drm_intel_bo_alloc(intel->bufmgr, "upload", size, 0);
520 intel->upload.offset = 0;
521 }
522
523 void intel_upload_data(struct intel_context *intel,
524 const void *ptr, GLuint size, GLuint align,
525 drm_intel_bo **return_bo,
526 GLuint *return_offset)
527 {
528 GLuint base, delta;
529
530 base = (intel->upload.offset + align - 1) / align * align;
531 if (intel->upload.bo == NULL || base + size > intel->upload.bo->size) {
532 wrap_buffers(intel, size);
533 base = 0;
534 }
535
536 drm_intel_bo_reference(intel->upload.bo);
537 *return_bo = intel->upload.bo;
538 *return_offset = base;
539
540 delta = base - intel->upload.offset;
541 if (intel->upload.buffer_len &&
542 intel->upload.buffer_len + delta + size > sizeof(intel->upload.buffer))
543 {
544 drm_intel_bo_subdata(intel->upload.bo,
545 intel->upload.buffer_offset,
546 intel->upload.buffer_len,
547 intel->upload.buffer);
548 intel->upload.buffer_len = 0;
549 }
550
551 if (size < sizeof(intel->upload.buffer))
552 {
553 if (intel->upload.buffer_len == 0)
554 intel->upload.buffer_offset = base;
555 else
556 intel->upload.buffer_len += delta;
557
558 memcpy(intel->upload.buffer + intel->upload.buffer_len, ptr, size);
559 intel->upload.buffer_len += size;
560 }
561 else
562 {
563 drm_intel_bo_subdata(intel->upload.bo, base, size, ptr);
564 }
565
566 intel->upload.offset = base + size;
567 }
568
569 drm_intel_bo *
570 intel_bufferobj_source(struct intel_context *intel,
571 struct intel_buffer_object *intel_obj,
572 GLuint align, GLuint *offset)
573 {
574 if (intel_obj->buffer == NULL) {
575 intel_upload_data(intel,
576 intel_obj->sys_buffer, intel_obj->Base.Size, align,
577 &intel_obj->buffer, &intel_obj->offset);
578 intel_obj->source = 1;
579 }
580
581 *offset = intel_obj->offset;
582 return intel_obj->buffer;
583 }
584
585 static void
586 intel_bufferobj_copy_subdata(struct gl_context *ctx,
587 struct gl_buffer_object *src,
588 struct gl_buffer_object *dst,
589 GLintptr read_offset, GLintptr write_offset,
590 GLsizeiptr size)
591 {
592 struct intel_context *intel = intel_context(ctx);
593 struct intel_buffer_object *intel_src = intel_buffer_object(src);
594 struct intel_buffer_object *intel_dst = intel_buffer_object(dst);
595 drm_intel_bo *src_bo, *dst_bo;
596 GLuint src_offset;
597
598 if (size == 0)
599 return;
600
601 /* If we're in system memory, just map and memcpy. */
602 if (intel_src->sys_buffer || intel_dst->sys_buffer) {
603 /* The same buffer may be used, but note that regions copied may
604 * not overlap.
605 */
606 if (src == dst) {
607 char *ptr = intel_bufferobj_map_range(ctx, 0, dst->Size,
608 GL_MAP_READ_BIT |
609 GL_MAP_WRITE_BIT,
610 dst);
611 memmove(ptr + write_offset, ptr + read_offset, size);
612 intel_bufferobj_unmap(ctx, dst);
613 } else {
614 const char *src_ptr;
615 char *dst_ptr;
616
617 src_ptr = intel_bufferobj_map_range(ctx, 0, src->Size,
618 GL_MAP_READ_BIT, src);
619 dst_ptr = intel_bufferobj_map_range(ctx, 0, dst->Size,
620 GL_MAP_WRITE_BIT, dst);
621
622 memcpy(dst_ptr + write_offset, src_ptr + read_offset, size);
623
624 intel_bufferobj_unmap(ctx, src);
625 intel_bufferobj_unmap(ctx, dst);
626 }
627 return;
628 }
629
630 /* Otherwise, we have real BOs, so blit them. */
631
632 dst_bo = intel_bufferobj_buffer(intel, intel_dst);
633 src_bo = intel_bufferobj_source(intel, intel_src, 64, &src_offset);
634
635 intel_emit_linear_blit(intel,
636 dst_bo, write_offset,
637 src_bo, read_offset + src_offset, size);
638
639 /* Since we've emitted some blits to buffers that will (likely) be used
640 * in rendering operations in other cache domains in this batch, emit a
641 * flush. Once again, we wish for a domain tracker in libdrm to cover
642 * usage inside of a batchbuffer.
643 */
644 intel_batchbuffer_emit_mi_flush(intel);
645 }
646
647 static GLenum
648 intel_buffer_purgeable(drm_intel_bo *buffer)
649 {
650 int retained = 0;
651
652 if (buffer != NULL)
653 retained = drm_intel_bo_madvise (buffer, I915_MADV_DONTNEED);
654
655 return retained ? GL_VOLATILE_APPLE : GL_RELEASED_APPLE;
656 }
657
658 static GLenum
659 intel_buffer_object_purgeable(struct gl_context * ctx,
660 struct gl_buffer_object *obj,
661 GLenum option)
662 {
663 struct intel_buffer_object *intel_obj = intel_buffer_object (obj);
664
665 if (intel_obj->buffer != NULL)
666 return intel_buffer_purgeable(intel_obj->buffer);
667
668 if (option == GL_RELEASED_APPLE) {
669 free(intel_obj->sys_buffer);
670 intel_obj->sys_buffer = NULL;
671
672 return GL_RELEASED_APPLE;
673 } else {
674 /* XXX Create the buffer and madvise(MADV_DONTNEED)? */
675 struct intel_context *intel = intel_context(ctx);
676 drm_intel_bo *bo = intel_bufferobj_buffer(intel, intel_obj);
677
678 return intel_buffer_purgeable(bo);
679 }
680 }
681
682 static GLenum
683 intel_texture_object_purgeable(struct gl_context * ctx,
684 struct gl_texture_object *obj,
685 GLenum option)
686 {
687 struct intel_texture_object *intel;
688
689 (void) ctx;
690 (void) option;
691
692 intel = intel_texture_object(obj);
693 if (intel->mt == NULL || intel->mt->region == NULL)
694 return GL_RELEASED_APPLE;
695
696 return intel_buffer_purgeable(intel->mt->region->bo);
697 }
698
699 static GLenum
700 intel_render_object_purgeable(struct gl_context * ctx,
701 struct gl_renderbuffer *obj,
702 GLenum option)
703 {
704 struct intel_renderbuffer *intel;
705
706 (void) ctx;
707 (void) option;
708
709 intel = intel_renderbuffer(obj);
710 if (intel->mt == NULL)
711 return GL_RELEASED_APPLE;
712
713 return intel_buffer_purgeable(intel->mt->region->bo);
714 }
715
716 static GLenum
717 intel_buffer_unpurgeable(drm_intel_bo *buffer)
718 {
719 int retained;
720
721 retained = 0;
722 if (buffer != NULL)
723 retained = drm_intel_bo_madvise (buffer, I915_MADV_WILLNEED);
724
725 return retained ? GL_RETAINED_APPLE : GL_UNDEFINED_APPLE;
726 }
727
728 static GLenum
729 intel_buffer_object_unpurgeable(struct gl_context * ctx,
730 struct gl_buffer_object *obj,
731 GLenum option)
732 {
733 (void) ctx;
734 (void) option;
735
736 return intel_buffer_unpurgeable(intel_buffer_object (obj)->buffer);
737 }
738
739 static GLenum
740 intel_texture_object_unpurgeable(struct gl_context * ctx,
741 struct gl_texture_object *obj,
742 GLenum option)
743 {
744 struct intel_texture_object *intel;
745
746 (void) ctx;
747 (void) option;
748
749 intel = intel_texture_object(obj);
750 if (intel->mt == NULL || intel->mt->region == NULL)
751 return GL_UNDEFINED_APPLE;
752
753 return intel_buffer_unpurgeable(intel->mt->region->bo);
754 }
755
756 static GLenum
757 intel_render_object_unpurgeable(struct gl_context * ctx,
758 struct gl_renderbuffer *obj,
759 GLenum option)
760 {
761 struct intel_renderbuffer *intel;
762
763 (void) ctx;
764 (void) option;
765
766 intel = intel_renderbuffer(obj);
767 if (intel->mt == NULL)
768 return GL_UNDEFINED_APPLE;
769
770 return intel_buffer_unpurgeable(intel->mt->region->bo);
771 }
772
773 void
774 intelInitBufferObjectFuncs(struct dd_function_table *functions)
775 {
776 functions->NewBufferObject = intel_bufferobj_alloc;
777 functions->DeleteBuffer = intel_bufferobj_free;
778 functions->BufferData = intel_bufferobj_data;
779 functions->BufferSubData = intel_bufferobj_subdata;
780 functions->GetBufferSubData = intel_bufferobj_get_subdata;
781 functions->MapBufferRange = intel_bufferobj_map_range;
782 functions->FlushMappedBufferRange = intel_bufferobj_flush_mapped_range;
783 functions->UnmapBuffer = intel_bufferobj_unmap;
784 functions->CopyBufferSubData = intel_bufferobj_copy_subdata;
785
786 functions->BufferObjectPurgeable = intel_buffer_object_purgeable;
787 functions->TextureObjectPurgeable = intel_texture_object_purgeable;
788 functions->RenderObjectPurgeable = intel_render_object_purgeable;
789
790 functions->BufferObjectUnpurgeable = intel_buffer_object_unpurgeable;
791 functions->TextureObjectUnpurgeable = intel_texture_object_unpurgeable;
792 functions->RenderObjectUnpurgeable = intel_render_object_unpurgeable;
793 }