nouveau: Add support for ARB_sampler_objects
[mesa.git] / src / mesa / drivers / dri / intel / intel_buffer_objects.c
1 /**************************************************************************
2 *
3 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28
29 #include "main/imports.h"
30 #include "main/mfeatures.h"
31 #include "main/mtypes.h"
32 #include "main/macros.h"
33 #include "main/bufferobj.h"
34
35 #include "intel_blit.h"
36 #include "intel_buffer_objects.h"
37 #include "intel_batchbuffer.h"
38 #include "intel_context.h"
39 #include "intel_fbo.h"
40 #include "intel_mipmap_tree.h"
41 #include "intel_regions.h"
42
43 static GLboolean
44 intel_bufferobj_unmap(struct gl_context * ctx, struct gl_buffer_object *obj);
45
46 /** Allocates a new drm_intel_bo to store the data for the buffer object. */
47 static void
48 intel_bufferobj_alloc_buffer(struct intel_context *intel,
49 struct intel_buffer_object *intel_obj)
50 {
51 intel_obj->buffer = drm_intel_bo_alloc(intel->bufmgr, "bufferobj",
52 intel_obj->Base.Size, 64);
53 }
54
55 static void
56 release_buffer(struct intel_buffer_object *intel_obj)
57 {
58 drm_intel_bo_unreference(intel_obj->buffer);
59 intel_obj->buffer = NULL;
60 intel_obj->offset = 0;
61 intel_obj->source = 0;
62 }
63
64 /**
65 * There is some duplication between mesa's bufferobjects and our
66 * bufmgr buffers. Both have an integer handle and a hashtable to
67 * lookup an opaque structure. It would be nice if the handles and
68 * internal structure where somehow shared.
69 */
70 static struct gl_buffer_object *
71 intel_bufferobj_alloc(struct gl_context * ctx, GLuint name, GLenum target)
72 {
73 struct intel_buffer_object *obj = CALLOC_STRUCT(intel_buffer_object);
74
75 _mesa_initialize_buffer_object(ctx, &obj->Base, name, target);
76
77 obj->buffer = NULL;
78
79 return &obj->Base;
80 }
81
82 /**
83 * Deallocate/free a vertex/pixel buffer object.
84 * Called via glDeleteBuffersARB().
85 */
86 static void
87 intel_bufferobj_free(struct gl_context * ctx, struct gl_buffer_object *obj)
88 {
89 struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
90
91 assert(intel_obj);
92
93 /* Buffer objects are automatically unmapped when deleting according
94 * to the spec, but Mesa doesn't do UnmapBuffer for us at context destroy
95 * (though it does if you call glDeleteBuffers)
96 */
97 if (obj->Pointer)
98 intel_bufferobj_unmap(ctx, obj);
99
100 free(intel_obj->sys_buffer);
101
102 drm_intel_bo_unreference(intel_obj->buffer);
103 free(intel_obj);
104 }
105
106
107
108 /**
109 * Allocate space for and store data in a buffer object. Any data that was
110 * previously stored in the buffer object is lost. If data is NULL,
111 * memory will be allocated, but no copy will occur.
112 * Called via ctx->Driver.BufferData().
113 * \return true for success, false if out of memory
114 */
115 static GLboolean
116 intel_bufferobj_data(struct gl_context * ctx,
117 GLenum target,
118 GLsizeiptrARB size,
119 const GLvoid * data,
120 GLenum usage, struct gl_buffer_object *obj)
121 {
122 struct intel_context *intel = intel_context(ctx);
123 struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
124
125 /* Part of the ABI, but this function doesn't use it.
126 */
127 #ifndef I915
128 (void) target;
129 #endif
130
131 intel_obj->Base.Size = size;
132 intel_obj->Base.Usage = usage;
133
134 assert(!obj->Pointer); /* Mesa should have unmapped it */
135
136 if (intel_obj->buffer != NULL)
137 release_buffer(intel_obj);
138
139 free(intel_obj->sys_buffer);
140 intel_obj->sys_buffer = NULL;
141
142 if (size != 0) {
143 #ifdef I915
144 /* On pre-965, stick VBOs in system memory, as we're always doing
145 * swtnl with their contents anyway.
146 */
147 if (target == GL_ARRAY_BUFFER || target == GL_ELEMENT_ARRAY_BUFFER) {
148 intel_obj->sys_buffer = malloc(size);
149 if (intel_obj->sys_buffer != NULL) {
150 if (data != NULL)
151 memcpy(intel_obj->sys_buffer, data, size);
152 return true;
153 }
154 }
155 #endif
156 intel_bufferobj_alloc_buffer(intel, intel_obj);
157 if (!intel_obj->buffer)
158 return false;
159
160 if (data != NULL)
161 drm_intel_bo_subdata(intel_obj->buffer, 0, size, data);
162 }
163
164 return true;
165 }
166
167
168 /**
169 * Replace data in a subrange of buffer object. If the data range
170 * specified by size + offset extends beyond the end of the buffer or
171 * if data is NULL, no copy is performed.
172 * Called via glBufferSubDataARB().
173 */
174 static void
175 intel_bufferobj_subdata(struct gl_context * ctx,
176 GLintptrARB offset,
177 GLsizeiptrARB size,
178 const GLvoid * data, struct gl_buffer_object *obj)
179 {
180 struct intel_context *intel = intel_context(ctx);
181 struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
182 bool busy;
183
184 if (size == 0)
185 return;
186
187 assert(intel_obj);
188
189 /* If we have a single copy in system memory, update that */
190 if (intel_obj->sys_buffer) {
191 if (intel_obj->source)
192 release_buffer(intel_obj);
193
194 if (intel_obj->buffer == NULL) {
195 memcpy((char *)intel_obj->sys_buffer + offset, data, size);
196 return;
197 }
198
199 free(intel_obj->sys_buffer);
200 intel_obj->sys_buffer = NULL;
201 }
202
203 /* Otherwise we need to update the copy in video memory. */
204 busy =
205 drm_intel_bo_busy(intel_obj->buffer) ||
206 drm_intel_bo_references(intel->batch.bo, intel_obj->buffer);
207
208 if (busy) {
209 if (size == intel_obj->Base.Size) {
210 /* Replace the current busy bo with fresh data. */
211 drm_intel_bo_unreference(intel_obj->buffer);
212 intel_bufferobj_alloc_buffer(intel, intel_obj);
213 drm_intel_bo_subdata(intel_obj->buffer, 0, size, data);
214 } else {
215 /* Use the blitter to upload the new data. */
216 drm_intel_bo *temp_bo =
217 drm_intel_bo_alloc(intel->bufmgr, "subdata temp", size, 64);
218
219 drm_intel_bo_subdata(temp_bo, 0, size, data);
220
221 intel_emit_linear_blit(intel,
222 intel_obj->buffer, offset,
223 temp_bo, 0,
224 size);
225
226 drm_intel_bo_unreference(temp_bo);
227 }
228 } else {
229 drm_intel_bo_subdata(intel_obj->buffer, offset, size, data);
230 }
231 }
232
233
234 /**
235 * Called via glGetBufferSubDataARB().
236 */
237 static void
238 intel_bufferobj_get_subdata(struct gl_context * ctx,
239 GLintptrARB offset,
240 GLsizeiptrARB size,
241 GLvoid * data, struct gl_buffer_object *obj)
242 {
243 struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
244 struct intel_context *intel = intel_context(ctx);
245
246 assert(intel_obj);
247 if (intel_obj->sys_buffer)
248 memcpy(data, (char *)intel_obj->sys_buffer + offset, size);
249 else {
250 if (drm_intel_bo_references(intel->batch.bo, intel_obj->buffer)) {
251 intel_batchbuffer_flush(intel);
252 }
253 drm_intel_bo_get_subdata(intel_obj->buffer, offset, size, data);
254 }
255 }
256
257
258
259 /**
260 * Called via glMapBufferRange and glMapBuffer
261 *
262 * The goal of this extension is to allow apps to accumulate their rendering
263 * at the same time as they accumulate their buffer object. Without it,
264 * you'd end up blocking on execution of rendering every time you mapped
265 * the buffer to put new data in.
266 *
267 * We support it in 3 ways: If unsynchronized, then don't bother
268 * flushing the batchbuffer before mapping the buffer, which can save blocking
269 * in many cases. If we would still block, and they allow the whole buffer
270 * to be invalidated, then just allocate a new buffer to replace the old one.
271 * If not, and we'd block, and they allow the subrange of the buffer to be
272 * invalidated, then we can make a new little BO, let them write into that,
273 * and blit it into the real BO at unmap time.
274 */
275 static void *
276 intel_bufferobj_map_range(struct gl_context * ctx,
277 GLintptr offset, GLsizeiptr length,
278 GLbitfield access, struct gl_buffer_object *obj)
279 {
280 struct intel_context *intel = intel_context(ctx);
281 struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
282
283 assert(intel_obj);
284
285 /* _mesa_MapBufferRange (GL entrypoint) sets these, but the vbo module also
286 * internally uses our functions directly.
287 */
288 obj->Offset = offset;
289 obj->Length = length;
290 obj->AccessFlags = access;
291
292 if (intel_obj->sys_buffer) {
293 const bool read_only =
294 (access & (GL_MAP_READ_BIT | GL_MAP_WRITE_BIT)) == GL_MAP_READ_BIT;
295
296 if (!read_only && intel_obj->source)
297 release_buffer(intel_obj);
298
299 if (!intel_obj->buffer || intel_obj->source) {
300 obj->Pointer = intel_obj->sys_buffer + offset;
301 return obj->Pointer;
302 }
303
304 free(intel_obj->sys_buffer);
305 intel_obj->sys_buffer = NULL;
306 }
307
308 if (intel_obj->buffer == NULL) {
309 obj->Pointer = NULL;
310 return NULL;
311 }
312
313 /* If the access is synchronized (like a normal buffer mapping), then get
314 * things flushed out so the later mapping syncs appropriately through GEM.
315 * If the user doesn't care about existing buffer contents and mapping would
316 * cause us to block, then throw out the old buffer.
317 *
318 * If they set INVALIDATE_BUFFER, we can pitch the current contents to
319 * achieve the required synchronization.
320 */
321 if (!(access & GL_MAP_UNSYNCHRONIZED_BIT)) {
322 if (drm_intel_bo_references(intel->batch.bo, intel_obj->buffer)) {
323 if (access & GL_MAP_INVALIDATE_BUFFER_BIT) {
324 drm_intel_bo_unreference(intel_obj->buffer);
325 intel_bufferobj_alloc_buffer(intel, intel_obj);
326 } else {
327 intel_flush(ctx);
328 }
329 } else if (drm_intel_bo_busy(intel_obj->buffer) &&
330 (access & GL_MAP_INVALIDATE_BUFFER_BIT)) {
331 drm_intel_bo_unreference(intel_obj->buffer);
332 intel_bufferobj_alloc_buffer(intel, intel_obj);
333 }
334 }
335
336 /* If the user is mapping a range of an active buffer object but
337 * doesn't require the current contents of that range, make a new
338 * BO, and we'll copy what they put in there out at unmap or
339 * FlushRange time.
340 */
341 if ((access & GL_MAP_INVALIDATE_RANGE_BIT) &&
342 drm_intel_bo_busy(intel_obj->buffer)) {
343 if (access & GL_MAP_FLUSH_EXPLICIT_BIT) {
344 intel_obj->range_map_buffer = malloc(length);
345 obj->Pointer = intel_obj->range_map_buffer;
346 } else {
347 intel_obj->range_map_bo = drm_intel_bo_alloc(intel->bufmgr,
348 "range map",
349 length, 64);
350 if (!(access & GL_MAP_READ_BIT)) {
351 drm_intel_gem_bo_map_gtt(intel_obj->range_map_bo);
352 } else {
353 drm_intel_bo_map(intel_obj->range_map_bo,
354 (access & GL_MAP_WRITE_BIT) != 0);
355 }
356 obj->Pointer = intel_obj->range_map_bo->virtual;
357 }
358 return obj->Pointer;
359 }
360
361 if (access & GL_MAP_UNSYNCHRONIZED_BIT)
362 drm_intel_gem_bo_map_unsynchronized(intel_obj->buffer);
363 else if (!(access & GL_MAP_READ_BIT)) {
364 drm_intel_gem_bo_map_gtt(intel_obj->buffer);
365 } else {
366 drm_intel_bo_map(intel_obj->buffer, (access & GL_MAP_WRITE_BIT) != 0);
367 }
368
369 obj->Pointer = intel_obj->buffer->virtual + offset;
370 return obj->Pointer;
371 }
372
373 /* Ideally we'd use a BO to avoid taking up cache space for the temporary
374 * data, but FlushMappedBufferRange may be followed by further writes to
375 * the pointer, so we would have to re-map after emitting our blit, which
376 * would defeat the point.
377 */
378 static void
379 intel_bufferobj_flush_mapped_range(struct gl_context *ctx,
380 GLintptr offset, GLsizeiptr length,
381 struct gl_buffer_object *obj)
382 {
383 struct intel_context *intel = intel_context(ctx);
384 struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
385 drm_intel_bo *temp_bo;
386
387 /* Unless we're in the range map using a temporary system buffer,
388 * there's no work to do.
389 */
390 if (intel_obj->range_map_buffer == NULL)
391 return;
392
393 if (length == 0)
394 return;
395
396 temp_bo = drm_intel_bo_alloc(intel->bufmgr, "range map flush", length, 64);
397
398 drm_intel_bo_subdata(temp_bo, 0, length, intel_obj->range_map_buffer);
399
400 intel_emit_linear_blit(intel,
401 intel_obj->buffer, obj->Offset + offset,
402 temp_bo, 0,
403 length);
404
405 drm_intel_bo_unreference(temp_bo);
406 }
407
408
409 /**
410 * Called via glUnmapBuffer().
411 */
412 static GLboolean
413 intel_bufferobj_unmap(struct gl_context * ctx, struct gl_buffer_object *obj)
414 {
415 struct intel_context *intel = intel_context(ctx);
416 struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
417
418 assert(intel_obj);
419 assert(obj->Pointer);
420 if (intel_obj->sys_buffer != NULL) {
421 /* always keep the mapping around. */
422 } else if (intel_obj->range_map_buffer != NULL) {
423 /* Since we've emitted some blits to buffers that will (likely) be used
424 * in rendering operations in other cache domains in this batch, emit a
425 * flush. Once again, we wish for a domain tracker in libdrm to cover
426 * usage inside of a batchbuffer.
427 */
428 intel_batchbuffer_emit_mi_flush(intel);
429 free(intel_obj->range_map_buffer);
430 intel_obj->range_map_buffer = NULL;
431 } else if (intel_obj->range_map_bo != NULL) {
432 drm_intel_bo_unmap(intel_obj->range_map_bo);
433
434 intel_emit_linear_blit(intel,
435 intel_obj->buffer, obj->Offset,
436 intel_obj->range_map_bo, 0,
437 obj->Length);
438
439 /* Since we've emitted some blits to buffers that will (likely) be used
440 * in rendering operations in other cache domains in this batch, emit a
441 * flush. Once again, we wish for a domain tracker in libdrm to cover
442 * usage inside of a batchbuffer.
443 */
444 intel_batchbuffer_emit_mi_flush(intel);
445
446 drm_intel_bo_unreference(intel_obj->range_map_bo);
447 intel_obj->range_map_bo = NULL;
448 } else if (intel_obj->buffer != NULL) {
449 drm_intel_bo_unmap(intel_obj->buffer);
450 }
451 obj->Pointer = NULL;
452 obj->Offset = 0;
453 obj->Length = 0;
454
455 return true;
456 }
457
458 drm_intel_bo *
459 intel_bufferobj_buffer(struct intel_context *intel,
460 struct intel_buffer_object *intel_obj,
461 GLuint flag)
462 {
463 if (intel_obj->source)
464 release_buffer(intel_obj);
465
466 if (intel_obj->buffer == NULL) {
467 intel_bufferobj_alloc_buffer(intel, intel_obj);
468 drm_intel_bo_subdata(intel_obj->buffer,
469 0, intel_obj->Base.Size,
470 intel_obj->sys_buffer);
471
472 free(intel_obj->sys_buffer);
473 intel_obj->sys_buffer = NULL;
474 intel_obj->offset = 0;
475 }
476
477 return intel_obj->buffer;
478 }
479
480 #define INTEL_UPLOAD_SIZE (64*1024)
481
482 void
483 intel_upload_finish(struct intel_context *intel)
484 {
485 if (!intel->upload.bo)
486 return;
487
488 if (intel->upload.buffer_len) {
489 drm_intel_bo_subdata(intel->upload.bo,
490 intel->upload.buffer_offset,
491 intel->upload.buffer_len,
492 intel->upload.buffer);
493 intel->upload.buffer_len = 0;
494 }
495
496 drm_intel_bo_unreference(intel->upload.bo);
497 intel->upload.bo = NULL;
498 }
499
500 static void wrap_buffers(struct intel_context *intel, GLuint size)
501 {
502 intel_upload_finish(intel);
503
504 if (size < INTEL_UPLOAD_SIZE)
505 size = INTEL_UPLOAD_SIZE;
506
507 intel->upload.bo = drm_intel_bo_alloc(intel->bufmgr, "upload", size, 0);
508 intel->upload.offset = 0;
509 }
510
511 void intel_upload_data(struct intel_context *intel,
512 const void *ptr, GLuint size, GLuint align,
513 drm_intel_bo **return_bo,
514 GLuint *return_offset)
515 {
516 GLuint base, delta;
517
518 base = (intel->upload.offset + align - 1) / align * align;
519 if (intel->upload.bo == NULL || base + size > intel->upload.bo->size) {
520 wrap_buffers(intel, size);
521 base = 0;
522 }
523
524 drm_intel_bo_reference(intel->upload.bo);
525 *return_bo = intel->upload.bo;
526 *return_offset = base;
527
528 delta = base - intel->upload.offset;
529 if (intel->upload.buffer_len &&
530 intel->upload.buffer_len + delta + size > sizeof(intel->upload.buffer))
531 {
532 drm_intel_bo_subdata(intel->upload.bo,
533 intel->upload.buffer_offset,
534 intel->upload.buffer_len,
535 intel->upload.buffer);
536 intel->upload.buffer_len = 0;
537 }
538
539 if (size < sizeof(intel->upload.buffer))
540 {
541 if (intel->upload.buffer_len == 0)
542 intel->upload.buffer_offset = base;
543 else
544 intel->upload.buffer_len += delta;
545
546 memcpy(intel->upload.buffer + intel->upload.buffer_len, ptr, size);
547 intel->upload.buffer_len += size;
548 }
549 else
550 {
551 drm_intel_bo_subdata(intel->upload.bo, base, size, ptr);
552 }
553
554 intel->upload.offset = base + size;
555 }
556
557 void *intel_upload_map(struct intel_context *intel, GLuint size, GLuint align)
558 {
559 GLuint base, delta;
560 char *ptr;
561
562 base = (intel->upload.offset + align - 1) / align * align;
563 if (intel->upload.bo == NULL || base + size > intel->upload.bo->size) {
564 wrap_buffers(intel, size);
565 base = 0;
566 }
567
568 delta = base - intel->upload.offset;
569 if (intel->upload.buffer_len &&
570 intel->upload.buffer_len + delta + size > sizeof(intel->upload.buffer))
571 {
572 drm_intel_bo_subdata(intel->upload.bo,
573 intel->upload.buffer_offset,
574 intel->upload.buffer_len,
575 intel->upload.buffer);
576 intel->upload.buffer_len = 0;
577 }
578
579 if (size <= sizeof(intel->upload.buffer)) {
580 if (intel->upload.buffer_len == 0)
581 intel->upload.buffer_offset = base;
582 else
583 intel->upload.buffer_len += delta;
584
585 ptr = intel->upload.buffer + intel->upload.buffer_len;
586 intel->upload.buffer_len += size;
587 } else
588 ptr = malloc(size);
589
590 return ptr;
591 }
592
593 void intel_upload_unmap(struct intel_context *intel,
594 const void *ptr, GLuint size, GLuint align,
595 drm_intel_bo **return_bo,
596 GLuint *return_offset)
597 {
598 GLuint base;
599
600 base = (intel->upload.offset + align - 1) / align * align;
601 if (size > sizeof(intel->upload.buffer)) {
602 drm_intel_bo_subdata(intel->upload.bo, base, size, ptr);
603 free((void*)ptr);
604 }
605
606 drm_intel_bo_reference(intel->upload.bo);
607 *return_bo = intel->upload.bo;
608 *return_offset = base;
609
610 intel->upload.offset = base + size;
611 }
612
613 drm_intel_bo *
614 intel_bufferobj_source(struct intel_context *intel,
615 struct intel_buffer_object *intel_obj,
616 GLuint align, GLuint *offset)
617 {
618 if (intel_obj->buffer == NULL) {
619 intel_upload_data(intel,
620 intel_obj->sys_buffer, intel_obj->Base.Size, align,
621 &intel_obj->buffer, &intel_obj->offset);
622 intel_obj->source = 1;
623 }
624
625 *offset = intel_obj->offset;
626 return intel_obj->buffer;
627 }
628
629 static void
630 intel_bufferobj_copy_subdata(struct gl_context *ctx,
631 struct gl_buffer_object *src,
632 struct gl_buffer_object *dst,
633 GLintptr read_offset, GLintptr write_offset,
634 GLsizeiptr size)
635 {
636 struct intel_context *intel = intel_context(ctx);
637 struct intel_buffer_object *intel_src = intel_buffer_object(src);
638 struct intel_buffer_object *intel_dst = intel_buffer_object(dst);
639 drm_intel_bo *src_bo, *dst_bo;
640 GLuint src_offset;
641
642 if (size == 0)
643 return;
644
645 /* If we're in system memory, just map and memcpy. */
646 if (intel_src->sys_buffer || intel_dst->sys_buffer) {
647 /* The same buffer may be used, but note that regions copied may
648 * not overlap.
649 */
650 if (src == dst) {
651 char *ptr = intel_bufferobj_map_range(ctx, 0, dst->Size,
652 GL_MAP_READ_BIT |
653 GL_MAP_WRITE_BIT,
654 dst);
655 memmove(ptr + write_offset, ptr + read_offset, size);
656 intel_bufferobj_unmap(ctx, dst);
657 } else {
658 const char *src_ptr;
659 char *dst_ptr;
660
661 src_ptr = intel_bufferobj_map_range(ctx, 0, src->Size,
662 GL_MAP_READ_BIT, src);
663 dst_ptr = intel_bufferobj_map_range(ctx, 0, dst->Size,
664 GL_MAP_WRITE_BIT, dst);
665
666 memcpy(dst_ptr + write_offset, src_ptr + read_offset, size);
667
668 intel_bufferobj_unmap(ctx, src);
669 intel_bufferobj_unmap(ctx, dst);
670 }
671 return;
672 }
673
674 /* Otherwise, we have real BOs, so blit them. */
675
676 dst_bo = intel_bufferobj_buffer(intel, intel_dst, INTEL_WRITE_PART);
677 src_bo = intel_bufferobj_source(intel, intel_src, 64, &src_offset);
678
679 intel_emit_linear_blit(intel,
680 dst_bo, write_offset,
681 src_bo, read_offset + src_offset, size);
682
683 /* Since we've emitted some blits to buffers that will (likely) be used
684 * in rendering operations in other cache domains in this batch, emit a
685 * flush. Once again, we wish for a domain tracker in libdrm to cover
686 * usage inside of a batchbuffer.
687 */
688 intel_batchbuffer_emit_mi_flush(intel);
689 }
690
691 #if FEATURE_APPLE_object_purgeable
692 static GLenum
693 intel_buffer_purgeable(drm_intel_bo *buffer)
694 {
695 int retained = 0;
696
697 if (buffer != NULL)
698 retained = drm_intel_bo_madvise (buffer, I915_MADV_DONTNEED);
699
700 return retained ? GL_VOLATILE_APPLE : GL_RELEASED_APPLE;
701 }
702
703 static GLenum
704 intel_buffer_object_purgeable(struct gl_context * ctx,
705 struct gl_buffer_object *obj,
706 GLenum option)
707 {
708 struct intel_buffer_object *intel_obj = intel_buffer_object (obj);
709
710 if (intel_obj->buffer != NULL)
711 return intel_buffer_purgeable(intel_obj->buffer);
712
713 if (option == GL_RELEASED_APPLE) {
714 if (intel_obj->sys_buffer != NULL) {
715 free(intel_obj->sys_buffer);
716 intel_obj->sys_buffer = NULL;
717 }
718
719 return GL_RELEASED_APPLE;
720 } else {
721 /* XXX Create the buffer and madvise(MADV_DONTNEED)? */
722 struct intel_context *intel = intel_context(ctx);
723 drm_intel_bo *bo = intel_bufferobj_buffer(intel, intel_obj, INTEL_READ);
724
725 return intel_buffer_purgeable(bo);
726 }
727 }
728
729 static GLenum
730 intel_texture_object_purgeable(struct gl_context * ctx,
731 struct gl_texture_object *obj,
732 GLenum option)
733 {
734 struct intel_texture_object *intel;
735
736 (void) ctx;
737 (void) option;
738
739 intel = intel_texture_object(obj);
740 if (intel->mt == NULL || intel->mt->region == NULL)
741 return GL_RELEASED_APPLE;
742
743 return intel_buffer_purgeable(intel->mt->region->bo);
744 }
745
746 static GLenum
747 intel_render_object_purgeable(struct gl_context * ctx,
748 struct gl_renderbuffer *obj,
749 GLenum option)
750 {
751 struct intel_renderbuffer *intel;
752
753 (void) ctx;
754 (void) option;
755
756 intel = intel_renderbuffer(obj);
757 if (intel->mt == NULL)
758 return GL_RELEASED_APPLE;
759
760 return intel_buffer_purgeable(intel->mt->region->bo);
761 }
762
763 static GLenum
764 intel_buffer_unpurgeable(drm_intel_bo *buffer)
765 {
766 int retained;
767
768 retained = 0;
769 if (buffer != NULL)
770 retained = drm_intel_bo_madvise (buffer, I915_MADV_WILLNEED);
771
772 return retained ? GL_RETAINED_APPLE : GL_UNDEFINED_APPLE;
773 }
774
775 static GLenum
776 intel_buffer_object_unpurgeable(struct gl_context * ctx,
777 struct gl_buffer_object *obj,
778 GLenum option)
779 {
780 (void) ctx;
781 (void) option;
782
783 return intel_buffer_unpurgeable(intel_buffer_object (obj)->buffer);
784 }
785
786 static GLenum
787 intel_texture_object_unpurgeable(struct gl_context * ctx,
788 struct gl_texture_object *obj,
789 GLenum option)
790 {
791 struct intel_texture_object *intel;
792
793 (void) ctx;
794 (void) option;
795
796 intel = intel_texture_object(obj);
797 if (intel->mt == NULL || intel->mt->region == NULL)
798 return GL_UNDEFINED_APPLE;
799
800 return intel_buffer_unpurgeable(intel->mt->region->bo);
801 }
802
803 static GLenum
804 intel_render_object_unpurgeable(struct gl_context * ctx,
805 struct gl_renderbuffer *obj,
806 GLenum option)
807 {
808 struct intel_renderbuffer *intel;
809
810 (void) ctx;
811 (void) option;
812
813 intel = intel_renderbuffer(obj);
814 if (intel->mt == NULL)
815 return GL_UNDEFINED_APPLE;
816
817 return intel_buffer_unpurgeable(intel->mt->region->bo);
818 }
819 #endif
820
821 void
822 intelInitBufferObjectFuncs(struct dd_function_table *functions)
823 {
824 functions->NewBufferObject = intel_bufferobj_alloc;
825 functions->DeleteBuffer = intel_bufferobj_free;
826 functions->BufferData = intel_bufferobj_data;
827 functions->BufferSubData = intel_bufferobj_subdata;
828 functions->GetBufferSubData = intel_bufferobj_get_subdata;
829 functions->MapBufferRange = intel_bufferobj_map_range;
830 functions->FlushMappedBufferRange = intel_bufferobj_flush_mapped_range;
831 functions->UnmapBuffer = intel_bufferobj_unmap;
832 functions->CopyBufferSubData = intel_bufferobj_copy_subdata;
833
834 #if FEATURE_APPLE_object_purgeable
835 functions->BufferObjectPurgeable = intel_buffer_object_purgeable;
836 functions->TextureObjectPurgeable = intel_texture_object_purgeable;
837 functions->RenderObjectPurgeable = intel_render_object_purgeable;
838
839 functions->BufferObjectUnpurgeable = intel_buffer_object_unpurgeable;
840 functions->TextureObjectUnpurgeable = intel_texture_object_unpurgeable;
841 functions->RenderObjectUnpurgeable = intel_render_object_unpurgeable;
842 #endif
843 }