1 /**************************************************************************
3 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
29 #include "main/imports.h"
30 #include "main/mfeatures.h"
31 #include "main/mtypes.h"
32 #include "main/macros.h"
33 #include "main/bufferobj.h"
35 #include "intel_blit.h"
36 #include "intel_buffer_objects.h"
37 #include "intel_batchbuffer.h"
38 #include "intel_context.h"
39 #include "intel_fbo.h"
40 #include "intel_mipmap_tree.h"
41 #include "intel_regions.h"
44 intel_bufferobj_unmap(struct gl_context
* ctx
,
45 GLenum target
, struct gl_buffer_object
*obj
);
47 /** Allocates a new drm_intel_bo to store the data for the buffer object. */
49 intel_bufferobj_alloc_buffer(struct intel_context
*intel
,
50 struct intel_buffer_object
*intel_obj
)
52 intel_obj
->buffer
= drm_intel_bo_alloc(intel
->bufmgr
, "bufferobj",
53 intel_obj
->Base
.Size
, 64);
57 * There is some duplication between mesa's bufferobjects and our
58 * bufmgr buffers. Both have an integer handle and a hashtable to
59 * lookup an opaque structure. It would be nice if the handles and
60 * internal structure where somehow shared.
62 static struct gl_buffer_object
*
63 intel_bufferobj_alloc(struct gl_context
* ctx
, GLuint name
, GLenum target
)
65 struct intel_buffer_object
*obj
= CALLOC_STRUCT(intel_buffer_object
);
67 _mesa_initialize_buffer_object(&obj
->Base
, name
, target
);
74 /* Break the COW tie to the region. The region gets to keep the data.
77 intel_bufferobj_release_region(struct intel_context
*intel
,
78 struct intel_buffer_object
*intel_obj
)
80 assert(intel_obj
->region
->buffer
== intel_obj
->buffer
);
81 intel_obj
->region
->pbo
= NULL
;
82 intel_obj
->region
= NULL
;
84 drm_intel_bo_unreference(intel_obj
->buffer
);
85 intel_obj
->buffer
= NULL
;
88 /* Break the COW tie to the region. Both the pbo and the region end
89 * up with a copy of the data.
92 intel_bufferobj_cow(struct intel_context
*intel
,
93 struct intel_buffer_object
*intel_obj
)
95 assert(intel_obj
->region
);
96 intel_region_cow(intel
, intel_obj
->region
);
101 * Deallocate/free a vertex/pixel buffer object.
102 * Called via glDeleteBuffersARB().
105 intel_bufferobj_free(struct gl_context
* ctx
, struct gl_buffer_object
*obj
)
107 struct intel_context
*intel
= intel_context(ctx
);
108 struct intel_buffer_object
*intel_obj
= intel_buffer_object(obj
);
112 /* Buffer objects are automatically unmapped when deleting according
113 * to the spec, but Mesa doesn't do UnmapBuffer for us at context destroy
114 * (though it does if you call glDeleteBuffers)
117 intel_bufferobj_unmap(ctx
, 0, obj
);
119 free(intel_obj
->sys_buffer
);
120 if (intel_obj
->region
) {
121 intel_bufferobj_release_region(intel
, intel_obj
);
123 else if (intel_obj
->buffer
) {
124 drm_intel_bo_unreference(intel_obj
->buffer
);
133 * Allocate space for and store data in a buffer object. Any data that was
134 * previously stored in the buffer object is lost. If data is NULL,
135 * memory will be allocated, but no copy will occur.
136 * Called via ctx->Driver.BufferData().
137 * \return GL_TRUE for success, GL_FALSE if out of memory
140 intel_bufferobj_data(struct gl_context
* ctx
,
144 GLenum usage
, struct gl_buffer_object
*obj
)
146 struct intel_context
*intel
= intel_context(ctx
);
147 struct intel_buffer_object
*intel_obj
= intel_buffer_object(obj
);
149 intel_obj
->Base
.Size
= size
;
150 intel_obj
->Base
.Usage
= usage
;
152 assert(!obj
->Pointer
); /* Mesa should have unmapped it */
154 if (intel_obj
->region
)
155 intel_bufferobj_release_region(intel
, intel_obj
);
157 if (intel_obj
->buffer
!= NULL
) {
158 drm_intel_bo_unreference(intel_obj
->buffer
);
159 intel_obj
->buffer
= NULL
;
160 intel_obj
->source
= 0;
162 free(intel_obj
->sys_buffer
);
163 intel_obj
->sys_buffer
= NULL
;
166 if (usage
== GL_DYNAMIC_DRAW
168 /* On pre-965, stick VBOs in system memory, as we're always doing
169 * swtnl with their contents anyway.
171 || target
== GL_ARRAY_BUFFER
|| target
== GL_ELEMENT_ARRAY_BUFFER
175 intel_obj
->sys_buffer
= malloc(size
);
176 if (intel_obj
->sys_buffer
!= NULL
) {
178 memcpy(intel_obj
->sys_buffer
, data
, size
);
182 intel_bufferobj_alloc_buffer(intel
, intel_obj
);
183 if (!intel_obj
->buffer
)
187 drm_intel_bo_subdata(intel_obj
->buffer
, 0, size
, data
);
195 * Replace data in a subrange of buffer object. If the data range
196 * specified by size + offset extends beyond the end of the buffer or
197 * if data is NULL, no copy is performed.
198 * Called via glBufferSubDataARB().
201 intel_bufferobj_subdata(struct gl_context
* ctx
,
205 const GLvoid
* data
, struct gl_buffer_object
*obj
)
207 struct intel_context
*intel
= intel_context(ctx
);
208 struct intel_buffer_object
*intel_obj
= intel_buffer_object(obj
);
215 if (intel_obj
->region
)
216 intel_bufferobj_cow(intel
, intel_obj
);
218 if (intel_obj
->sys_buffer
) {
219 if (intel_obj
->buffer
) {
220 drm_intel_bo_unreference(intel_obj
->buffer
);
221 intel_obj
->buffer
= NULL
;
222 intel_obj
->source
= 0;
224 memcpy((char *)intel_obj
->sys_buffer
+ offset
, data
, size
);
227 drm_intel_bo_busy(intel_obj
->buffer
) ||
228 drm_intel_bo_references(intel
->batch
.bo
, intel_obj
->buffer
);
230 /* replace the current busy bo with fresh data */
231 if (busy
&& size
== intel_obj
->Base
.Size
) {
232 drm_intel_bo_unreference(intel_obj
->buffer
);
233 intel_bufferobj_alloc_buffer(intel
, intel_obj
);
234 drm_intel_bo_subdata(intel_obj
->buffer
, 0, size
, data
);
235 } else if (intel
->gen
< 6) {
237 drm_intel_bo
*temp_bo
;
239 temp_bo
= drm_intel_bo_alloc(intel
->bufmgr
, "subdata temp", size
, 64);
241 drm_intel_bo_subdata(temp_bo
, 0, size
, data
);
243 intel_emit_linear_blit(intel
,
244 intel_obj
->buffer
, offset
,
248 drm_intel_bo_unreference(temp_bo
);
250 drm_intel_bo_subdata(intel_obj
->buffer
, offset
, size
, data
);
253 /* Can't use the blit to modify the buffer in the middle of batch. */
254 if (drm_intel_bo_references(intel
->batch
.bo
, intel_obj
->buffer
)) {
255 intel_batchbuffer_flush(intel
);
257 drm_intel_bo_subdata(intel_obj
->buffer
, offset
, size
, data
);
264 * Called via glGetBufferSubDataARB().
267 intel_bufferobj_get_subdata(struct gl_context
* ctx
,
271 GLvoid
* data
, struct gl_buffer_object
*obj
)
273 struct intel_buffer_object
*intel_obj
= intel_buffer_object(obj
);
276 if (intel_obj
->sys_buffer
)
277 memcpy(data
, (char *)intel_obj
->sys_buffer
+ offset
, size
);
279 drm_intel_bo_get_subdata(intel_obj
->buffer
, offset
, size
, data
);
285 * Called via glMapBufferARB().
288 intel_bufferobj_map(struct gl_context
* ctx
,
290 GLenum access
, struct gl_buffer_object
*obj
)
292 struct intel_context
*intel
= intel_context(ctx
);
293 struct intel_buffer_object
*intel_obj
= intel_buffer_object(obj
);
294 GLboolean read_only
= (access
== GL_READ_ONLY_ARB
);
295 GLboolean write_only
= (access
== GL_WRITE_ONLY_ARB
);
299 if (intel_obj
->sys_buffer
) {
300 if (!read_only
&& intel_obj
->buffer
) {
301 drm_intel_bo_unreference(intel_obj
->buffer
);
302 intel_obj
->buffer
= NULL
;
303 intel_obj
->source
= 0;
305 obj
->Pointer
= intel_obj
->sys_buffer
;
306 obj
->Length
= obj
->Size
;
311 /* Flush any existing batchbuffer that might reference this data. */
312 if (drm_intel_bo_references(intel
->batch
.bo
, intel_obj
->buffer
))
315 if (intel_obj
->region
)
316 intel_bufferobj_cow(intel
, intel_obj
);
318 if (intel_obj
->buffer
== NULL
) {
324 drm_intel_gem_bo_map_gtt(intel_obj
->buffer
);
325 intel_obj
->mapped_gtt
= GL_TRUE
;
327 drm_intel_bo_map(intel_obj
->buffer
, !read_only
);
328 intel_obj
->mapped_gtt
= GL_FALSE
;
331 obj
->Pointer
= intel_obj
->buffer
->virtual;
332 obj
->Length
= obj
->Size
;
339 * Called via glMapBufferRange().
341 * The goal of this extension is to allow apps to accumulate their rendering
342 * at the same time as they accumulate their buffer object. Without it,
343 * you'd end up blocking on execution of rendering every time you mapped
344 * the buffer to put new data in.
346 * We support it in 3 ways: If unsynchronized, then don't bother
347 * flushing the batchbuffer before mapping the buffer, which can save blocking
348 * in many cases. If we would still block, and they allow the whole buffer
349 * to be invalidated, then just allocate a new buffer to replace the old one.
350 * If not, and we'd block, and they allow the subrange of the buffer to be
351 * invalidated, then we can make a new little BO, let them write into that,
352 * and blit it into the real BO at unmap time.
355 intel_bufferobj_map_range(struct gl_context
* ctx
,
356 GLenum target
, GLintptr offset
, GLsizeiptr length
,
357 GLbitfield access
, struct gl_buffer_object
*obj
)
359 struct intel_context
*intel
= intel_context(ctx
);
360 struct intel_buffer_object
*intel_obj
= intel_buffer_object(obj
);
364 /* _mesa_MapBufferRange (GL entrypoint) sets these, but the vbo module also
365 * internally uses our functions directly.
367 obj
->Offset
= offset
;
368 obj
->Length
= length
;
369 obj
->AccessFlags
= access
;
371 if (intel_obj
->sys_buffer
) {
372 if (access
!= GL_READ_ONLY_ARB
&& intel_obj
->buffer
) {
373 drm_intel_bo_unreference(intel_obj
->buffer
);
374 intel_obj
->buffer
= NULL
;
375 intel_obj
->source
= 0;
377 obj
->Pointer
= intel_obj
->sys_buffer
+ offset
;
381 if (intel_obj
->region
)
382 intel_bufferobj_cow(intel
, intel_obj
);
384 /* If the mapping is synchronized with other GL operations, flush
385 * the batchbuffer so that GEM knows about the buffer access for later
388 if (!(access
& GL_MAP_UNSYNCHRONIZED_BIT
) &&
389 drm_intel_bo_references(intel
->batch
.bo
, intel_obj
->buffer
))
392 if (intel_obj
->buffer
== NULL
) {
397 /* If the user doesn't care about existing buffer contents and mapping
398 * would cause us to block, then throw out the old buffer.
400 if (!(access
& GL_MAP_UNSYNCHRONIZED_BIT
) &&
401 (access
& GL_MAP_INVALIDATE_BUFFER_BIT
) &&
402 drm_intel_bo_busy(intel_obj
->buffer
)) {
403 drm_intel_bo_unreference(intel_obj
->buffer
);
404 intel_bufferobj_alloc_buffer(intel
, intel_obj
);
407 /* If the user is mapping a range of an active buffer object but
408 * doesn't require the current contents of that range, make a new
409 * BO, and we'll copy what they put in there out at unmap or
412 if ((access
& GL_MAP_INVALIDATE_RANGE_BIT
) &&
413 drm_intel_bo_busy(intel_obj
->buffer
)) {
414 if (access
& GL_MAP_FLUSH_EXPLICIT_BIT
) {
415 intel_obj
->range_map_buffer
= malloc(length
);
416 obj
->Pointer
= intel_obj
->range_map_buffer
;
418 intel_obj
->range_map_bo
= drm_intel_bo_alloc(intel
->bufmgr
,
421 if (!(access
& GL_MAP_READ_BIT
)) {
422 drm_intel_gem_bo_map_gtt(intel_obj
->range_map_bo
);
423 intel_obj
->mapped_gtt
= GL_TRUE
;
425 drm_intel_bo_map(intel_obj
->range_map_bo
,
426 (access
& GL_MAP_WRITE_BIT
) != 0);
427 intel_obj
->mapped_gtt
= GL_FALSE
;
429 obj
->Pointer
= intel_obj
->range_map_bo
->virtual;
434 if (!(access
& GL_MAP_READ_BIT
)) {
435 drm_intel_gem_bo_map_gtt(intel_obj
->buffer
);
436 intel_obj
->mapped_gtt
= GL_TRUE
;
438 drm_intel_bo_map(intel_obj
->buffer
, (access
& GL_MAP_WRITE_BIT
) != 0);
439 intel_obj
->mapped_gtt
= GL_FALSE
;
442 obj
->Pointer
= intel_obj
->buffer
->virtual + offset
;
446 /* Ideally we'd use a BO to avoid taking up cache space for the temporary
447 * data, but FlushMappedBufferRange may be followed by further writes to
448 * the pointer, so we would have to re-map after emitting our blit, which
449 * would defeat the point.
452 intel_bufferobj_flush_mapped_range(struct gl_context
*ctx
, GLenum target
,
453 GLintptr offset
, GLsizeiptr length
,
454 struct gl_buffer_object
*obj
)
456 struct intel_context
*intel
= intel_context(ctx
);
457 struct intel_buffer_object
*intel_obj
= intel_buffer_object(obj
);
458 drm_intel_bo
*temp_bo
;
460 /* Unless we're in the range map using a temporary system buffer,
461 * there's no work to do.
463 if (intel_obj
->range_map_buffer
== NULL
)
469 temp_bo
= drm_intel_bo_alloc(intel
->bufmgr
, "range map flush", length
, 64);
471 drm_intel_bo_subdata(temp_bo
, 0, length
, intel_obj
->range_map_buffer
);
473 intel_emit_linear_blit(intel
,
474 intel_obj
->buffer
, obj
->Offset
+ offset
,
478 drm_intel_bo_unreference(temp_bo
);
483 * Called via glUnmapBuffer().
486 intel_bufferobj_unmap(struct gl_context
* ctx
,
487 GLenum target
, struct gl_buffer_object
*obj
)
489 struct intel_context
*intel
= intel_context(ctx
);
490 struct intel_buffer_object
*intel_obj
= intel_buffer_object(obj
);
493 assert(obj
->Pointer
);
494 if (intel_obj
->sys_buffer
!= NULL
) {
495 /* always keep the mapping around. */
496 } else if (intel_obj
->range_map_buffer
!= NULL
) {
497 /* Since we've emitted some blits to buffers that will (likely) be used
498 * in rendering operations in other cache domains in this batch, emit a
499 * flush. Once again, we wish for a domain tracker in libdrm to cover
500 * usage inside of a batchbuffer.
502 intel_batchbuffer_emit_mi_flush(intel
);
503 free(intel_obj
->range_map_buffer
);
504 intel_obj
->range_map_buffer
= NULL
;
505 } else if (intel_obj
->range_map_bo
!= NULL
) {
506 if (intel_obj
->mapped_gtt
) {
507 drm_intel_gem_bo_unmap_gtt(intel_obj
->range_map_bo
);
509 drm_intel_bo_unmap(intel_obj
->range_map_bo
);
512 intel_emit_linear_blit(intel
,
513 intel_obj
->buffer
, obj
->Offset
,
514 intel_obj
->range_map_bo
, 0,
517 /* Since we've emitted some blits to buffers that will (likely) be used
518 * in rendering operations in other cache domains in this batch, emit a
519 * flush. Once again, we wish for a domain tracker in libdrm to cover
520 * usage inside of a batchbuffer.
522 intel_batchbuffer_emit_mi_flush(intel
);
524 drm_intel_bo_unreference(intel_obj
->range_map_bo
);
525 intel_obj
->range_map_bo
= NULL
;
526 } else if (intel_obj
->buffer
!= NULL
) {
527 if (intel_obj
->mapped_gtt
) {
528 drm_intel_gem_bo_unmap_gtt(intel_obj
->buffer
);
530 drm_intel_bo_unmap(intel_obj
->buffer
);
541 intel_bufferobj_buffer(struct intel_context
*intel
,
542 struct intel_buffer_object
*intel_obj
,
545 if (intel_obj
->region
) {
546 if (flag
== INTEL_WRITE_PART
)
547 intel_bufferobj_cow(intel
, intel_obj
);
548 else if (flag
== INTEL_WRITE_FULL
) {
549 intel_bufferobj_release_region(intel
, intel_obj
);
550 intel_bufferobj_alloc_buffer(intel
, intel_obj
);
554 if (intel_obj
->source
) {
555 drm_intel_bo_unreference(intel_obj
->buffer
);
556 intel_obj
->buffer
= NULL
;
557 intel_obj
->source
= 0;
560 if (intel_obj
->buffer
== NULL
) {
561 intel_bufferobj_alloc_buffer(intel
, intel_obj
);
562 drm_intel_bo_subdata(intel_obj
->buffer
,
563 0, intel_obj
->Base
.Size
,
564 intel_obj
->sys_buffer
);
566 free(intel_obj
->sys_buffer
);
567 intel_obj
->sys_buffer
= NULL
;
568 intel_obj
->offset
= 0;
571 return intel_obj
->buffer
;
574 #define INTEL_UPLOAD_SIZE (64*1024)
577 intel_upload_finish(struct intel_context
*intel
)
579 if (!intel
->upload
.bo
)
582 if (intel
->upload
.buffer_len
) {
583 drm_intel_bo_subdata(intel
->upload
.bo
,
584 intel
->upload
.buffer_offset
,
585 intel
->upload
.buffer_len
,
586 intel
->upload
.buffer
);
587 intel
->upload
.buffer_len
= 0;
590 drm_intel_bo_unreference(intel
->upload
.bo
);
591 intel
->upload
.bo
= NULL
;
594 static void wrap_buffers(struct intel_context
*intel
, GLuint size
)
596 intel_upload_finish(intel
);
598 if (size
< INTEL_UPLOAD_SIZE
)
599 size
= INTEL_UPLOAD_SIZE
;
601 intel
->upload
.bo
= drm_intel_bo_alloc(intel
->bufmgr
, "upload", size
, 0);
602 intel
->upload
.offset
= 0;
605 void intel_upload_data(struct intel_context
*intel
,
606 const void *ptr
, GLuint size
, GLuint align
,
607 drm_intel_bo
**return_bo
,
608 GLuint
*return_offset
)
612 base
= (intel
->upload
.offset
+ align
- 1) / align
* align
;
613 if (intel
->upload
.bo
== NULL
|| base
+ size
> intel
->upload
.bo
->size
) {
614 wrap_buffers(intel
, size
);
618 drm_intel_bo_reference(intel
->upload
.bo
);
619 *return_bo
= intel
->upload
.bo
;
620 *return_offset
= base
;
622 delta
= base
- intel
->upload
.offset
;
623 if (intel
->upload
.buffer_len
&&
624 intel
->upload
.buffer_len
+ delta
+ size
> sizeof(intel
->upload
.buffer
))
626 drm_intel_bo_subdata(intel
->upload
.bo
,
627 intel
->upload
.buffer_offset
,
628 intel
->upload
.buffer_len
,
629 intel
->upload
.buffer
);
630 intel
->upload
.buffer_len
= 0;
633 if (size
< sizeof(intel
->upload
.buffer
))
635 if (intel
->upload
.buffer_len
== 0)
636 intel
->upload
.buffer_offset
= base
;
638 intel
->upload
.buffer_len
+= delta
;
640 memcpy(intel
->upload
.buffer
+ intel
->upload
.buffer_len
, ptr
, size
);
641 intel
->upload
.buffer_len
+= size
;
645 drm_intel_bo_subdata(intel
->upload
.bo
, base
, size
, ptr
);
648 intel
->upload
.offset
= base
+ size
;
651 void *intel_upload_map(struct intel_context
*intel
, GLuint size
, GLuint align
)
656 base
= (intel
->upload
.offset
+ align
- 1) / align
* align
;
657 if (intel
->upload
.bo
== NULL
|| base
+ size
> intel
->upload
.bo
->size
) {
658 wrap_buffers(intel
, size
);
662 delta
= base
- intel
->upload
.offset
;
663 if (intel
->upload
.buffer_len
&&
664 intel
->upload
.buffer_len
+ delta
+ size
> sizeof(intel
->upload
.buffer
))
666 drm_intel_bo_subdata(intel
->upload
.bo
,
667 intel
->upload
.buffer_offset
,
668 intel
->upload
.buffer_len
,
669 intel
->upload
.buffer
);
670 intel
->upload
.buffer_len
= 0;
673 if (size
<= sizeof(intel
->upload
.buffer
)) {
674 if (intel
->upload
.buffer_len
== 0)
675 intel
->upload
.buffer_offset
= base
;
677 intel
->upload
.buffer_len
+= delta
;
679 ptr
= intel
->upload
.buffer
+ intel
->upload
.buffer_len
;
680 intel
->upload
.buffer_len
+= size
;
687 void intel_upload_unmap(struct intel_context
*intel
,
688 const void *ptr
, GLuint size
, GLuint align
,
689 drm_intel_bo
**return_bo
,
690 GLuint
*return_offset
)
694 base
= (intel
->upload
.offset
+ align
- 1) / align
* align
;
695 if (size
> sizeof(intel
->upload
.buffer
)) {
696 drm_intel_bo_subdata(intel
->upload
.bo
, base
, size
, ptr
);
700 drm_intel_bo_reference(intel
->upload
.bo
);
701 *return_bo
= intel
->upload
.bo
;
702 *return_offset
= base
;
704 intel
->upload
.offset
= base
+ size
;
708 intel_bufferobj_source(struct intel_context
*intel
,
709 struct intel_buffer_object
*intel_obj
,
712 if (intel_obj
->buffer
== NULL
) {
713 intel_upload_data(intel
,
714 intel_obj
->sys_buffer
, intel_obj
->Base
.Size
, 64,
715 &intel_obj
->buffer
, &intel_obj
->offset
);
716 intel_obj
->source
= 1;
719 *offset
= intel_obj
->offset
;
720 return intel_obj
->buffer
;
724 intel_bufferobj_copy_subdata(struct gl_context
*ctx
,
725 struct gl_buffer_object
*src
,
726 struct gl_buffer_object
*dst
,
727 GLintptr read_offset
, GLintptr write_offset
,
730 struct intel_context
*intel
= intel_context(ctx
);
731 struct intel_buffer_object
*intel_src
= intel_buffer_object(src
);
732 struct intel_buffer_object
*intel_dst
= intel_buffer_object(dst
);
733 drm_intel_bo
*src_bo
, *dst_bo
;
739 /* If we're in system memory, just map and memcpy. */
740 if (intel_src
->sys_buffer
|| intel_dst
->sys_buffer
|| intel
->gen
>= 6) {
741 /* The same buffer may be used, but note that regions copied may
745 char *ptr
= intel_bufferobj_map(ctx
, GL_COPY_WRITE_BUFFER
,
747 memcpy(ptr
+ write_offset
, ptr
+ read_offset
, size
);
748 intel_bufferobj_unmap(ctx
, GL_COPY_WRITE_BUFFER
, dst
);
753 src_ptr
= intel_bufferobj_map(ctx
, GL_COPY_READ_BUFFER
,
755 dst_ptr
= intel_bufferobj_map(ctx
, GL_COPY_WRITE_BUFFER
,
758 memcpy(dst_ptr
+ write_offset
, src_ptr
+ read_offset
, size
);
760 intel_bufferobj_unmap(ctx
, GL_COPY_READ_BUFFER
, src
);
761 intel_bufferobj_unmap(ctx
, GL_COPY_WRITE_BUFFER
, dst
);
766 /* Otherwise, we have real BOs, so blit them. */
768 dst_bo
= intel_bufferobj_buffer(intel
, intel_dst
, INTEL_WRITE_PART
);
769 src_bo
= intel_bufferobj_source(intel
, intel_src
, &src_offset
);
771 intel_emit_linear_blit(intel
,
772 dst_bo
, write_offset
,
773 src_bo
, read_offset
+ src_offset
, size
);
775 /* Since we've emitted some blits to buffers that will (likely) be used
776 * in rendering operations in other cache domains in this batch, emit a
777 * flush. Once again, we wish for a domain tracker in libdrm to cover
778 * usage inside of a batchbuffer.
780 intel_batchbuffer_emit_mi_flush(intel
);
783 #if FEATURE_APPLE_object_purgeable
785 intel_buffer_purgeable(struct gl_context
* ctx
,
786 drm_intel_bo
*buffer
,
792 retained
= drm_intel_bo_madvise (buffer
, I915_MADV_DONTNEED
);
794 return retained
? GL_VOLATILE_APPLE
: GL_RELEASED_APPLE
;
798 intel_buffer_object_purgeable(struct gl_context
* ctx
,
799 struct gl_buffer_object
*obj
,
802 struct intel_buffer_object
*intel
;
804 intel
= intel_buffer_object (obj
);
805 if (intel
->buffer
!= NULL
)
806 return intel_buffer_purgeable (ctx
, intel
->buffer
, option
);
808 if (option
== GL_RELEASED_APPLE
) {
809 if (intel
->sys_buffer
!= NULL
) {
810 free(intel
->sys_buffer
);
811 intel
->sys_buffer
= NULL
;
814 return GL_RELEASED_APPLE
;
816 /* XXX Create the buffer and madvise(MADV_DONTNEED)? */
817 return intel_buffer_purgeable (ctx
,
818 intel_bufferobj_buffer(intel_context(ctx
),
825 intel_texture_object_purgeable(struct gl_context
* ctx
,
826 struct gl_texture_object
*obj
,
829 struct intel_texture_object
*intel
;
831 intel
= intel_texture_object(obj
);
832 if (intel
->mt
== NULL
|| intel
->mt
->region
== NULL
)
833 return GL_RELEASED_APPLE
;
835 return intel_buffer_purgeable (ctx
, intel
->mt
->region
->buffer
, option
);
839 intel_render_object_purgeable(struct gl_context
* ctx
,
840 struct gl_renderbuffer
*obj
,
843 struct intel_renderbuffer
*intel
;
845 intel
= intel_renderbuffer(obj
);
846 if (intel
->region
== NULL
)
847 return GL_RELEASED_APPLE
;
849 return intel_buffer_purgeable (ctx
, intel
->region
->buffer
, option
);
853 intel_buffer_unpurgeable(struct gl_context
* ctx
,
854 drm_intel_bo
*buffer
,
861 retained
= drm_intel_bo_madvise (buffer
, I915_MADV_WILLNEED
);
863 return retained
? GL_RETAINED_APPLE
: GL_UNDEFINED_APPLE
;
867 intel_buffer_object_unpurgeable(struct gl_context
* ctx
,
868 struct gl_buffer_object
*obj
,
871 return intel_buffer_unpurgeable (ctx
, intel_buffer_object (obj
)->buffer
, option
);
875 intel_texture_object_unpurgeable(struct gl_context
* ctx
,
876 struct gl_texture_object
*obj
,
879 struct intel_texture_object
*intel
;
881 intel
= intel_texture_object(obj
);
882 if (intel
->mt
== NULL
|| intel
->mt
->region
== NULL
)
883 return GL_UNDEFINED_APPLE
;
885 return intel_buffer_unpurgeable (ctx
, intel
->mt
->region
->buffer
, option
);
889 intel_render_object_unpurgeable(struct gl_context
* ctx
,
890 struct gl_renderbuffer
*obj
,
893 struct intel_renderbuffer
*intel
;
895 intel
= intel_renderbuffer(obj
);
896 if (intel
->region
== NULL
)
897 return GL_UNDEFINED_APPLE
;
899 return intel_buffer_unpurgeable (ctx
, intel
->region
->buffer
, option
);
904 intelInitBufferObjectFuncs(struct dd_function_table
*functions
)
906 functions
->NewBufferObject
= intel_bufferobj_alloc
;
907 functions
->DeleteBuffer
= intel_bufferobj_free
;
908 functions
->BufferData
= intel_bufferobj_data
;
909 functions
->BufferSubData
= intel_bufferobj_subdata
;
910 functions
->GetBufferSubData
= intel_bufferobj_get_subdata
;
911 functions
->MapBuffer
= intel_bufferobj_map
;
912 functions
->MapBufferRange
= intel_bufferobj_map_range
;
913 functions
->FlushMappedBufferRange
= intel_bufferobj_flush_mapped_range
;
914 functions
->UnmapBuffer
= intel_bufferobj_unmap
;
915 functions
->CopyBufferSubData
= intel_bufferobj_copy_subdata
;
917 #if FEATURE_APPLE_object_purgeable
918 functions
->BufferObjectPurgeable
= intel_buffer_object_purgeable
;
919 functions
->TextureObjectPurgeable
= intel_texture_object_purgeable
;
920 functions
->RenderObjectPurgeable
= intel_render_object_purgeable
;
922 functions
->BufferObjectUnpurgeable
= intel_buffer_object_unpurgeable
;
923 functions
->TextureObjectUnpurgeable
= intel_texture_object_unpurgeable
;
924 functions
->RenderObjectUnpurgeable
= intel_render_object_unpurgeable
;