1 /**************************************************************************
3 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
29 #include "main/imports.h"
30 #include "main/mfeatures.h"
31 #include "main/mtypes.h"
32 #include "main/macros.h"
33 #include "main/bufferobj.h"
35 #include "intel_blit.h"
36 #include "intel_buffer_objects.h"
37 #include "intel_batchbuffer.h"
38 #include "intel_context.h"
39 #include "intel_fbo.h"
40 #include "intel_mipmap_tree.h"
41 #include "intel_regions.h"
44 intel_bufferobj_unmap(struct gl_context
* ctx
,
45 GLenum target
, struct gl_buffer_object
*obj
);
47 /** Allocates a new drm_intel_bo to store the data for the buffer object. */
49 intel_bufferobj_alloc_buffer(struct intel_context
*intel
,
50 struct intel_buffer_object
*intel_obj
)
52 intel_obj
->buffer
= drm_intel_bo_alloc(intel
->bufmgr
, "bufferobj",
53 intel_obj
->Base
.Size
, 64);
57 release_buffer(struct intel_buffer_object
*intel_obj
)
59 drm_intel_bo_unreference(intel_obj
->buffer
);
60 intel_obj
->buffer
= NULL
;
61 intel_obj
->offset
= 0;
62 intel_obj
->source
= 0;
66 * There is some duplication between mesa's bufferobjects and our
67 * bufmgr buffers. Both have an integer handle and a hashtable to
68 * lookup an opaque structure. It would be nice if the handles and
69 * internal structure where somehow shared.
71 static struct gl_buffer_object
*
72 intel_bufferobj_alloc(struct gl_context
* ctx
, GLuint name
, GLenum target
)
74 struct intel_buffer_object
*obj
= CALLOC_STRUCT(intel_buffer_object
);
76 _mesa_initialize_buffer_object(&obj
->Base
, name
, target
);
83 /* Break the COW tie to the region. The region gets to keep the data.
86 intel_bufferobj_release_region(struct intel_context
*intel
,
87 struct intel_buffer_object
*intel_obj
)
89 assert(intel_obj
->region
->buffer
== intel_obj
->buffer
);
90 intel_obj
->region
->pbo
= NULL
;
91 intel_obj
->region
= NULL
;
93 release_buffer(intel_obj
);
96 /* Break the COW tie to the region. Both the pbo and the region end
97 * up with a copy of the data.
100 intel_bufferobj_cow(struct intel_context
*intel
,
101 struct intel_buffer_object
*intel_obj
)
103 assert(intel_obj
->region
);
104 intel_region_cow(intel
, intel_obj
->region
);
109 * Deallocate/free a vertex/pixel buffer object.
110 * Called via glDeleteBuffersARB().
113 intel_bufferobj_free(struct gl_context
* ctx
, struct gl_buffer_object
*obj
)
115 struct intel_context
*intel
= intel_context(ctx
);
116 struct intel_buffer_object
*intel_obj
= intel_buffer_object(obj
);
120 /* Buffer objects are automatically unmapped when deleting according
121 * to the spec, but Mesa doesn't do UnmapBuffer for us at context destroy
122 * (though it does if you call glDeleteBuffers)
125 intel_bufferobj_unmap(ctx
, 0, obj
);
127 free(intel_obj
->sys_buffer
);
128 if (intel_obj
->region
) {
129 intel_bufferobj_release_region(intel
, intel_obj
);
132 drm_intel_bo_unreference(intel_obj
->buffer
);
139 * Allocate space for and store data in a buffer object. Any data that was
140 * previously stored in the buffer object is lost. If data is NULL,
141 * memory will be allocated, but no copy will occur.
142 * Called via ctx->Driver.BufferData().
143 * \return GL_TRUE for success, GL_FALSE if out of memory
146 intel_bufferobj_data(struct gl_context
* ctx
,
150 GLenum usage
, struct gl_buffer_object
*obj
)
152 struct intel_context
*intel
= intel_context(ctx
);
153 struct intel_buffer_object
*intel_obj
= intel_buffer_object(obj
);
155 intel_obj
->Base
.Size
= size
;
156 intel_obj
->Base
.Usage
= usage
;
158 assert(!obj
->Pointer
); /* Mesa should have unmapped it */
160 if (intel_obj
->region
)
161 intel_bufferobj_release_region(intel
, intel_obj
);
163 if (intel_obj
->buffer
!= NULL
)
164 release_buffer(intel_obj
);
166 free(intel_obj
->sys_buffer
);
167 intel_obj
->sys_buffer
= NULL
;
170 if (usage
== GL_DYNAMIC_DRAW
172 /* On pre-965, stick VBOs in system memory, as we're always doing
173 * swtnl with their contents anyway.
175 || target
== GL_ARRAY_BUFFER
|| target
== GL_ELEMENT_ARRAY_BUFFER
179 intel_obj
->sys_buffer
= malloc(size
);
180 if (intel_obj
->sys_buffer
!= NULL
) {
182 memcpy(intel_obj
->sys_buffer
, data
, size
);
186 intel_bufferobj_alloc_buffer(intel
, intel_obj
);
187 if (!intel_obj
->buffer
)
191 drm_intel_bo_subdata(intel_obj
->buffer
, 0, size
, data
);
199 * Replace data in a subrange of buffer object. If the data range
200 * specified by size + offset extends beyond the end of the buffer or
201 * if data is NULL, no copy is performed.
202 * Called via glBufferSubDataARB().
205 intel_bufferobj_subdata(struct gl_context
* ctx
,
209 const GLvoid
* data
, struct gl_buffer_object
*obj
)
211 struct intel_context
*intel
= intel_context(ctx
);
212 struct intel_buffer_object
*intel_obj
= intel_buffer_object(obj
);
220 if (intel_obj
->region
)
221 intel_bufferobj_cow(intel
, intel_obj
);
223 /* If we have a single copy in system memory, update that */
224 if (intel_obj
->sys_buffer
) {
225 if (intel_obj
->source
)
226 release_buffer(intel_obj
);
228 if (intel_obj
->buffer
== NULL
) {
229 memcpy((char *)intel_obj
->sys_buffer
+ offset
, data
, size
);
233 free(intel_obj
->sys_buffer
);
234 intel_obj
->sys_buffer
= NULL
;
237 /* Otherwise we need to update the copy in video memory. */
239 drm_intel_bo_busy(intel_obj
->buffer
) ||
240 drm_intel_bo_references(intel
->batch
.bo
, intel_obj
->buffer
);
242 /* replace the current busy bo with fresh data */
243 if (busy
&& size
== intel_obj
->Base
.Size
) {
244 drm_intel_bo_unreference(intel_obj
->buffer
);
245 intel_bufferobj_alloc_buffer(intel
, intel_obj
);
246 drm_intel_bo_subdata(intel_obj
->buffer
, 0, size
, data
);
247 } else if (intel
->gen
< 6) {
249 drm_intel_bo
*temp_bo
;
251 temp_bo
= drm_intel_bo_alloc(intel
->bufmgr
, "subdata temp", size
, 64);
253 drm_intel_bo_subdata(temp_bo
, 0, size
, data
);
255 intel_emit_linear_blit(intel
,
256 intel_obj
->buffer
, offset
,
260 drm_intel_bo_unreference(temp_bo
);
262 drm_intel_bo_subdata(intel_obj
->buffer
, offset
, size
, data
);
265 /* Can't use the blit to modify the buffer in the middle of batch. */
266 if (drm_intel_bo_references(intel
->batch
.bo
, intel_obj
->buffer
)) {
267 intel_batchbuffer_flush(intel
);
269 drm_intel_bo_subdata(intel_obj
->buffer
, offset
, size
, data
);
275 * Called via glGetBufferSubDataARB().
278 intel_bufferobj_get_subdata(struct gl_context
* ctx
,
282 GLvoid
* data
, struct gl_buffer_object
*obj
)
284 struct intel_buffer_object
*intel_obj
= intel_buffer_object(obj
);
285 struct intel_context
*intel
= intel_context(ctx
);
288 if (intel_obj
->sys_buffer
)
289 memcpy(data
, (char *)intel_obj
->sys_buffer
+ offset
, size
);
291 if (drm_intel_bo_references(intel
->batch
.bo
, intel_obj
->buffer
)) {
292 intel_batchbuffer_flush(intel
);
294 drm_intel_bo_get_subdata(intel_obj
->buffer
, offset
, size
, data
);
301 * Called via glMapBufferARB().
304 intel_bufferobj_map(struct gl_context
* ctx
,
306 GLenum access
, struct gl_buffer_object
*obj
)
308 struct intel_context
*intel
= intel_context(ctx
);
309 struct intel_buffer_object
*intel_obj
= intel_buffer_object(obj
);
310 GLboolean read_only
= (access
== GL_READ_ONLY_ARB
);
311 GLboolean write_only
= (access
== GL_WRITE_ONLY_ARB
);
315 if (intel_obj
->sys_buffer
) {
316 if (!read_only
&& intel_obj
->source
) {
317 release_buffer(intel_obj
);
320 if (!intel_obj
->buffer
|| intel_obj
->source
) {
321 obj
->Pointer
= intel_obj
->sys_buffer
;
322 obj
->Length
= obj
->Size
;
327 free(intel_obj
->sys_buffer
);
328 intel_obj
->sys_buffer
= NULL
;
331 /* Flush any existing batchbuffer that might reference this data. */
332 if (drm_intel_bo_references(intel
->batch
.bo
, intel_obj
->buffer
))
335 if (intel_obj
->region
)
336 intel_bufferobj_cow(intel
, intel_obj
);
338 if (intel_obj
->buffer
== NULL
) {
344 drm_intel_gem_bo_map_gtt(intel_obj
->buffer
);
345 intel_obj
->mapped_gtt
= GL_TRUE
;
347 drm_intel_bo_map(intel_obj
->buffer
, !read_only
);
348 intel_obj
->mapped_gtt
= GL_FALSE
;
351 obj
->Pointer
= intel_obj
->buffer
->virtual;
352 obj
->Length
= obj
->Size
;
359 * Called via glMapBufferRange().
361 * The goal of this extension is to allow apps to accumulate their rendering
362 * at the same time as they accumulate their buffer object. Without it,
363 * you'd end up blocking on execution of rendering every time you mapped
364 * the buffer to put new data in.
366 * We support it in 3 ways: If unsynchronized, then don't bother
367 * flushing the batchbuffer before mapping the buffer, which can save blocking
368 * in many cases. If we would still block, and they allow the whole buffer
369 * to be invalidated, then just allocate a new buffer to replace the old one.
370 * If not, and we'd block, and they allow the subrange of the buffer to be
371 * invalidated, then we can make a new little BO, let them write into that,
372 * and blit it into the real BO at unmap time.
375 intel_bufferobj_map_range(struct gl_context
* ctx
,
376 GLenum target
, GLintptr offset
, GLsizeiptr length
,
377 GLbitfield access
, struct gl_buffer_object
*obj
)
379 struct intel_context
*intel
= intel_context(ctx
);
380 struct intel_buffer_object
*intel_obj
= intel_buffer_object(obj
);
381 GLboolean read_only
= (access
== GL_READ_ONLY_ARB
);
385 /* _mesa_MapBufferRange (GL entrypoint) sets these, but the vbo module also
386 * internally uses our functions directly.
388 obj
->Offset
= offset
;
389 obj
->Length
= length
;
390 obj
->AccessFlags
= access
;
392 if (intel_obj
->sys_buffer
) {
393 if (!read_only
&& intel_obj
->source
)
394 release_buffer(intel_obj
);
396 if (!intel_obj
->buffer
|| intel_obj
->source
) {
397 obj
->Pointer
= intel_obj
->sys_buffer
+ offset
;
401 free(intel_obj
->sys_buffer
);
402 intel_obj
->sys_buffer
= NULL
;
405 if (intel_obj
->region
)
406 intel_bufferobj_cow(intel
, intel_obj
);
408 /* If the mapping is synchronized with other GL operations, flush
409 * the batchbuffer so that GEM knows about the buffer access for later
412 if (!(access
& GL_MAP_UNSYNCHRONIZED_BIT
) &&
413 drm_intel_bo_references(intel
->batch
.bo
, intel_obj
->buffer
))
416 if (intel_obj
->buffer
== NULL
) {
421 /* If the user doesn't care about existing buffer contents and mapping
422 * would cause us to block, then throw out the old buffer.
424 if (!(access
& GL_MAP_UNSYNCHRONIZED_BIT
) &&
425 (access
& GL_MAP_INVALIDATE_BUFFER_BIT
) &&
426 drm_intel_bo_busy(intel_obj
->buffer
)) {
427 drm_intel_bo_unreference(intel_obj
->buffer
);
428 intel_bufferobj_alloc_buffer(intel
, intel_obj
);
431 /* If the user is mapping a range of an active buffer object but
432 * doesn't require the current contents of that range, make a new
433 * BO, and we'll copy what they put in there out at unmap or
436 if ((access
& GL_MAP_INVALIDATE_RANGE_BIT
) &&
437 drm_intel_bo_busy(intel_obj
->buffer
)) {
438 if (access
& GL_MAP_FLUSH_EXPLICIT_BIT
) {
439 intel_obj
->range_map_buffer
= malloc(length
);
440 obj
->Pointer
= intel_obj
->range_map_buffer
;
442 intel_obj
->range_map_bo
= drm_intel_bo_alloc(intel
->bufmgr
,
445 if (!(access
& GL_MAP_READ_BIT
)) {
446 drm_intel_gem_bo_map_gtt(intel_obj
->range_map_bo
);
447 intel_obj
->mapped_gtt
= GL_TRUE
;
449 drm_intel_bo_map(intel_obj
->range_map_bo
,
450 (access
& GL_MAP_WRITE_BIT
) != 0);
451 intel_obj
->mapped_gtt
= GL_FALSE
;
453 obj
->Pointer
= intel_obj
->range_map_bo
->virtual;
458 if (!(access
& GL_MAP_READ_BIT
)) {
459 drm_intel_gem_bo_map_gtt(intel_obj
->buffer
);
460 intel_obj
->mapped_gtt
= GL_TRUE
;
462 drm_intel_bo_map(intel_obj
->buffer
, (access
& GL_MAP_WRITE_BIT
) != 0);
463 intel_obj
->mapped_gtt
= GL_FALSE
;
466 obj
->Pointer
= intel_obj
->buffer
->virtual + offset
;
470 /* Ideally we'd use a BO to avoid taking up cache space for the temporary
471 * data, but FlushMappedBufferRange may be followed by further writes to
472 * the pointer, so we would have to re-map after emitting our blit, which
473 * would defeat the point.
476 intel_bufferobj_flush_mapped_range(struct gl_context
*ctx
, GLenum target
,
477 GLintptr offset
, GLsizeiptr length
,
478 struct gl_buffer_object
*obj
)
480 struct intel_context
*intel
= intel_context(ctx
);
481 struct intel_buffer_object
*intel_obj
= intel_buffer_object(obj
);
482 drm_intel_bo
*temp_bo
;
484 /* Unless we're in the range map using a temporary system buffer,
485 * there's no work to do.
487 if (intel_obj
->range_map_buffer
== NULL
)
493 temp_bo
= drm_intel_bo_alloc(intel
->bufmgr
, "range map flush", length
, 64);
495 drm_intel_bo_subdata(temp_bo
, 0, length
, intel_obj
->range_map_buffer
);
497 intel_emit_linear_blit(intel
,
498 intel_obj
->buffer
, obj
->Offset
+ offset
,
502 drm_intel_bo_unreference(temp_bo
);
507 * Called via glUnmapBuffer().
510 intel_bufferobj_unmap(struct gl_context
* ctx
,
511 GLenum target
, struct gl_buffer_object
*obj
)
513 struct intel_context
*intel
= intel_context(ctx
);
514 struct intel_buffer_object
*intel_obj
= intel_buffer_object(obj
);
517 assert(obj
->Pointer
);
518 if (intel_obj
->sys_buffer
!= NULL
) {
519 /* always keep the mapping around. */
520 } else if (intel_obj
->range_map_buffer
!= NULL
) {
521 /* Since we've emitted some blits to buffers that will (likely) be used
522 * in rendering operations in other cache domains in this batch, emit a
523 * flush. Once again, we wish for a domain tracker in libdrm to cover
524 * usage inside of a batchbuffer.
526 intel_batchbuffer_emit_mi_flush(intel
);
527 free(intel_obj
->range_map_buffer
);
528 intel_obj
->range_map_buffer
= NULL
;
529 } else if (intel_obj
->range_map_bo
!= NULL
) {
530 if (intel_obj
->mapped_gtt
) {
531 drm_intel_gem_bo_unmap_gtt(intel_obj
->range_map_bo
);
533 drm_intel_bo_unmap(intel_obj
->range_map_bo
);
536 intel_emit_linear_blit(intel
,
537 intel_obj
->buffer
, obj
->Offset
,
538 intel_obj
->range_map_bo
, 0,
541 /* Since we've emitted some blits to buffers that will (likely) be used
542 * in rendering operations in other cache domains in this batch, emit a
543 * flush. Once again, we wish for a domain tracker in libdrm to cover
544 * usage inside of a batchbuffer.
546 intel_batchbuffer_emit_mi_flush(intel
);
548 drm_intel_bo_unreference(intel_obj
->range_map_bo
);
549 intel_obj
->range_map_bo
= NULL
;
550 } else if (intel_obj
->buffer
!= NULL
) {
551 if (intel_obj
->mapped_gtt
) {
552 drm_intel_gem_bo_unmap_gtt(intel_obj
->buffer
);
554 drm_intel_bo_unmap(intel_obj
->buffer
);
565 intel_bufferobj_buffer(struct intel_context
*intel
,
566 struct intel_buffer_object
*intel_obj
,
569 if (intel_obj
->region
) {
570 if (flag
== INTEL_WRITE_PART
)
571 intel_bufferobj_cow(intel
, intel_obj
);
572 else if (flag
== INTEL_WRITE_FULL
) {
573 intel_bufferobj_release_region(intel
, intel_obj
);
574 intel_bufferobj_alloc_buffer(intel
, intel_obj
);
578 if (intel_obj
->source
)
579 release_buffer(intel_obj
);
581 if (intel_obj
->buffer
== NULL
) {
582 intel_bufferobj_alloc_buffer(intel
, intel_obj
);
583 drm_intel_bo_subdata(intel_obj
->buffer
,
584 0, intel_obj
->Base
.Size
,
585 intel_obj
->sys_buffer
);
587 free(intel_obj
->sys_buffer
);
588 intel_obj
->sys_buffer
= NULL
;
589 intel_obj
->offset
= 0;
592 return intel_obj
->buffer
;
595 #define INTEL_UPLOAD_SIZE (64*1024)
598 intel_upload_finish(struct intel_context
*intel
)
600 if (!intel
->upload
.bo
)
603 if (intel
->upload
.buffer_len
) {
604 drm_intel_bo_subdata(intel
->upload
.bo
,
605 intel
->upload
.buffer_offset
,
606 intel
->upload
.buffer_len
,
607 intel
->upload
.buffer
);
608 intel
->upload
.buffer_len
= 0;
611 drm_intel_bo_unreference(intel
->upload
.bo
);
612 intel
->upload
.bo
= NULL
;
615 static void wrap_buffers(struct intel_context
*intel
, GLuint size
)
617 intel_upload_finish(intel
);
619 if (size
< INTEL_UPLOAD_SIZE
)
620 size
= INTEL_UPLOAD_SIZE
;
622 intel
->upload
.bo
= drm_intel_bo_alloc(intel
->bufmgr
, "upload", size
, 0);
623 intel
->upload
.offset
= 0;
626 void intel_upload_data(struct intel_context
*intel
,
627 const void *ptr
, GLuint size
, GLuint align
,
628 drm_intel_bo
**return_bo
,
629 GLuint
*return_offset
)
633 base
= (intel
->upload
.offset
+ align
- 1) / align
* align
;
634 if (intel
->upload
.bo
== NULL
|| base
+ size
> intel
->upload
.bo
->size
) {
635 wrap_buffers(intel
, size
);
639 drm_intel_bo_reference(intel
->upload
.bo
);
640 *return_bo
= intel
->upload
.bo
;
641 *return_offset
= base
;
643 delta
= base
- intel
->upload
.offset
;
644 if (intel
->upload
.buffer_len
&&
645 intel
->upload
.buffer_len
+ delta
+ size
> sizeof(intel
->upload
.buffer
))
647 drm_intel_bo_subdata(intel
->upload
.bo
,
648 intel
->upload
.buffer_offset
,
649 intel
->upload
.buffer_len
,
650 intel
->upload
.buffer
);
651 intel
->upload
.buffer_len
= 0;
654 if (size
< sizeof(intel
->upload
.buffer
))
656 if (intel
->upload
.buffer_len
== 0)
657 intel
->upload
.buffer_offset
= base
;
659 intel
->upload
.buffer_len
+= delta
;
661 memcpy(intel
->upload
.buffer
+ intel
->upload
.buffer_len
, ptr
, size
);
662 intel
->upload
.buffer_len
+= size
;
666 drm_intel_bo_subdata(intel
->upload
.bo
, base
, size
, ptr
);
669 intel
->upload
.offset
= base
+ size
;
672 void *intel_upload_map(struct intel_context
*intel
, GLuint size
, GLuint align
)
677 base
= (intel
->upload
.offset
+ align
- 1) / align
* align
;
678 if (intel
->upload
.bo
== NULL
|| base
+ size
> intel
->upload
.bo
->size
) {
679 wrap_buffers(intel
, size
);
683 delta
= base
- intel
->upload
.offset
;
684 if (intel
->upload
.buffer_len
&&
685 intel
->upload
.buffer_len
+ delta
+ size
> sizeof(intel
->upload
.buffer
))
687 drm_intel_bo_subdata(intel
->upload
.bo
,
688 intel
->upload
.buffer_offset
,
689 intel
->upload
.buffer_len
,
690 intel
->upload
.buffer
);
691 intel
->upload
.buffer_len
= 0;
694 if (size
<= sizeof(intel
->upload
.buffer
)) {
695 if (intel
->upload
.buffer_len
== 0)
696 intel
->upload
.buffer_offset
= base
;
698 intel
->upload
.buffer_len
+= delta
;
700 ptr
= intel
->upload
.buffer
+ intel
->upload
.buffer_len
;
701 intel
->upload
.buffer_len
+= size
;
708 void intel_upload_unmap(struct intel_context
*intel
,
709 const void *ptr
, GLuint size
, GLuint align
,
710 drm_intel_bo
**return_bo
,
711 GLuint
*return_offset
)
715 base
= (intel
->upload
.offset
+ align
- 1) / align
* align
;
716 if (size
> sizeof(intel
->upload
.buffer
)) {
717 drm_intel_bo_subdata(intel
->upload
.bo
, base
, size
, ptr
);
721 drm_intel_bo_reference(intel
->upload
.bo
);
722 *return_bo
= intel
->upload
.bo
;
723 *return_offset
= base
;
725 intel
->upload
.offset
= base
+ size
;
729 intel_bufferobj_source(struct intel_context
*intel
,
730 struct intel_buffer_object
*intel_obj
,
731 GLuint align
, GLuint
*offset
)
733 if (intel_obj
->buffer
== NULL
) {
734 intel_upload_data(intel
,
735 intel_obj
->sys_buffer
, intel_obj
->Base
.Size
, align
,
736 &intel_obj
->buffer
, &intel_obj
->offset
);
737 intel_obj
->source
= 1;
740 *offset
= intel_obj
->offset
;
741 return intel_obj
->buffer
;
745 intel_bufferobj_copy_subdata(struct gl_context
*ctx
,
746 struct gl_buffer_object
*src
,
747 struct gl_buffer_object
*dst
,
748 GLintptr read_offset
, GLintptr write_offset
,
751 struct intel_context
*intel
= intel_context(ctx
);
752 struct intel_buffer_object
*intel_src
= intel_buffer_object(src
);
753 struct intel_buffer_object
*intel_dst
= intel_buffer_object(dst
);
754 drm_intel_bo
*src_bo
, *dst_bo
;
760 /* If we're in system memory, just map and memcpy. */
761 if (intel_src
->sys_buffer
|| intel_dst
->sys_buffer
|| intel
->gen
>= 6) {
762 /* The same buffer may be used, but note that regions copied may
766 char *ptr
= intel_bufferobj_map(ctx
, GL_COPY_WRITE_BUFFER
,
768 memmove(ptr
+ write_offset
, ptr
+ read_offset
, size
);
769 intel_bufferobj_unmap(ctx
, GL_COPY_WRITE_BUFFER
, dst
);
774 src_ptr
= intel_bufferobj_map(ctx
, GL_COPY_READ_BUFFER
,
776 dst_ptr
= intel_bufferobj_map(ctx
, GL_COPY_WRITE_BUFFER
,
779 memcpy(dst_ptr
+ write_offset
, src_ptr
+ read_offset
, size
);
781 intel_bufferobj_unmap(ctx
, GL_COPY_READ_BUFFER
, src
);
782 intel_bufferobj_unmap(ctx
, GL_COPY_WRITE_BUFFER
, dst
);
787 /* Otherwise, we have real BOs, so blit them. */
789 dst_bo
= intel_bufferobj_buffer(intel
, intel_dst
, INTEL_WRITE_PART
);
790 src_bo
= intel_bufferobj_source(intel
, intel_src
, 64, &src_offset
);
792 intel_emit_linear_blit(intel
,
793 dst_bo
, write_offset
,
794 src_bo
, read_offset
+ src_offset
, size
);
796 /* Since we've emitted some blits to buffers that will (likely) be used
797 * in rendering operations in other cache domains in this batch, emit a
798 * flush. Once again, we wish for a domain tracker in libdrm to cover
799 * usage inside of a batchbuffer.
801 intel_batchbuffer_emit_mi_flush(intel
);
804 #if FEATURE_APPLE_object_purgeable
806 intel_buffer_purgeable(struct gl_context
* ctx
,
807 drm_intel_bo
*buffer
,
813 retained
= drm_intel_bo_madvise (buffer
, I915_MADV_DONTNEED
);
815 return retained
? GL_VOLATILE_APPLE
: GL_RELEASED_APPLE
;
819 intel_buffer_object_purgeable(struct gl_context
* ctx
,
820 struct gl_buffer_object
*obj
,
823 struct intel_buffer_object
*intel
;
825 intel
= intel_buffer_object (obj
);
826 if (intel
->buffer
!= NULL
)
827 return intel_buffer_purgeable (ctx
, intel
->buffer
, option
);
829 if (option
== GL_RELEASED_APPLE
) {
830 if (intel
->sys_buffer
!= NULL
) {
831 free(intel
->sys_buffer
);
832 intel
->sys_buffer
= NULL
;
835 return GL_RELEASED_APPLE
;
837 /* XXX Create the buffer and madvise(MADV_DONTNEED)? */
838 return intel_buffer_purgeable (ctx
,
839 intel_bufferobj_buffer(intel_context(ctx
),
846 intel_texture_object_purgeable(struct gl_context
* ctx
,
847 struct gl_texture_object
*obj
,
850 struct intel_texture_object
*intel
;
852 intel
= intel_texture_object(obj
);
853 if (intel
->mt
== NULL
|| intel
->mt
->region
== NULL
)
854 return GL_RELEASED_APPLE
;
856 return intel_buffer_purgeable (ctx
, intel
->mt
->region
->buffer
, option
);
860 intel_render_object_purgeable(struct gl_context
* ctx
,
861 struct gl_renderbuffer
*obj
,
864 struct intel_renderbuffer
*intel
;
866 intel
= intel_renderbuffer(obj
);
867 if (intel
->region
== NULL
)
868 return GL_RELEASED_APPLE
;
870 return intel_buffer_purgeable (ctx
, intel
->region
->buffer
, option
);
874 intel_buffer_unpurgeable(struct gl_context
* ctx
,
875 drm_intel_bo
*buffer
,
882 retained
= drm_intel_bo_madvise (buffer
, I915_MADV_WILLNEED
);
884 return retained
? GL_RETAINED_APPLE
: GL_UNDEFINED_APPLE
;
888 intel_buffer_object_unpurgeable(struct gl_context
* ctx
,
889 struct gl_buffer_object
*obj
,
892 return intel_buffer_unpurgeable (ctx
, intel_buffer_object (obj
)->buffer
, option
);
896 intel_texture_object_unpurgeable(struct gl_context
* ctx
,
897 struct gl_texture_object
*obj
,
900 struct intel_texture_object
*intel
;
902 intel
= intel_texture_object(obj
);
903 if (intel
->mt
== NULL
|| intel
->mt
->region
== NULL
)
904 return GL_UNDEFINED_APPLE
;
906 return intel_buffer_unpurgeable (ctx
, intel
->mt
->region
->buffer
, option
);
910 intel_render_object_unpurgeable(struct gl_context
* ctx
,
911 struct gl_renderbuffer
*obj
,
914 struct intel_renderbuffer
*intel
;
916 intel
= intel_renderbuffer(obj
);
917 if (intel
->region
== NULL
)
918 return GL_UNDEFINED_APPLE
;
920 return intel_buffer_unpurgeable (ctx
, intel
->region
->buffer
, option
);
925 intelInitBufferObjectFuncs(struct dd_function_table
*functions
)
927 functions
->NewBufferObject
= intel_bufferobj_alloc
;
928 functions
->DeleteBuffer
= intel_bufferobj_free
;
929 functions
->BufferData
= intel_bufferobj_data
;
930 functions
->BufferSubData
= intel_bufferobj_subdata
;
931 functions
->GetBufferSubData
= intel_bufferobj_get_subdata
;
932 functions
->MapBuffer
= intel_bufferobj_map
;
933 functions
->MapBufferRange
= intel_bufferobj_map_range
;
934 functions
->FlushMappedBufferRange
= intel_bufferobj_flush_mapped_range
;
935 functions
->UnmapBuffer
= intel_bufferobj_unmap
;
936 functions
->CopyBufferSubData
= intel_bufferobj_copy_subdata
;
938 #if FEATURE_APPLE_object_purgeable
939 functions
->BufferObjectPurgeable
= intel_buffer_object_purgeable
;
940 functions
->TextureObjectPurgeable
= intel_texture_object_purgeable
;
941 functions
->RenderObjectPurgeable
= intel_render_object_purgeable
;
943 functions
->BufferObjectUnpurgeable
= intel_buffer_object_unpurgeable
;
944 functions
->TextureObjectUnpurgeable
= intel_texture_object_unpurgeable
;
945 functions
->RenderObjectUnpurgeable
= intel_render_object_unpurgeable
;