1 /**************************************************************************
3 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
29 #include "main/imports.h"
30 #include "main/mfeatures.h"
31 #include "main/mtypes.h"
32 #include "main/macros.h"
33 #include "main/bufferobj.h"
35 #include "intel_blit.h"
36 #include "intel_buffer_objects.h"
37 #include "intel_batchbuffer.h"
38 #include "intel_context.h"
39 #include "intel_fbo.h"
40 #include "intel_mipmap_tree.h"
41 #include "intel_regions.h"
44 intel_bufferobj_unmap(struct gl_context
* ctx
,
45 GLenum target
, struct gl_buffer_object
*obj
);
47 /** Allocates a new drm_intel_bo to store the data for the buffer object. */
49 intel_bufferobj_alloc_buffer(struct intel_context
*intel
,
50 struct intel_buffer_object
*intel_obj
)
52 intel_obj
->buffer
= drm_intel_bo_alloc(intel
->bufmgr
, "bufferobj",
53 intel_obj
->Base
.Size
, 64);
57 release_buffer(struct intel_buffer_object
*intel_obj
)
59 drm_intel_bo_unreference(intel_obj
->buffer
);
60 intel_obj
->buffer
= NULL
;
61 intel_obj
->offset
= 0;
62 intel_obj
->source
= 0;
66 * There is some duplication between mesa's bufferobjects and our
67 * bufmgr buffers. Both have an integer handle and a hashtable to
68 * lookup an opaque structure. It would be nice if the handles and
69 * internal structure where somehow shared.
71 static struct gl_buffer_object
*
72 intel_bufferobj_alloc(struct gl_context
* ctx
, GLuint name
, GLenum target
)
74 struct intel_buffer_object
*obj
= CALLOC_STRUCT(intel_buffer_object
);
76 _mesa_initialize_buffer_object(&obj
->Base
, name
, target
);
83 /* Break the COW tie to the region. The region gets to keep the data.
86 intel_bufferobj_release_region(struct intel_context
*intel
,
87 struct intel_buffer_object
*intel_obj
)
89 assert(intel_obj
->region
->buffer
== intel_obj
->buffer
);
90 intel_obj
->region
->pbo
= NULL
;
91 intel_obj
->region
= NULL
;
93 release_buffer(intel_obj
);
96 /* Break the COW tie to the region. Both the pbo and the region end
97 * up with a copy of the data.
100 intel_bufferobj_cow(struct intel_context
*intel
,
101 struct intel_buffer_object
*intel_obj
)
103 assert(intel_obj
->region
);
104 intel_region_cow(intel
, intel_obj
->region
);
109 * Deallocate/free a vertex/pixel buffer object.
110 * Called via glDeleteBuffersARB().
113 intel_bufferobj_free(struct gl_context
* ctx
, struct gl_buffer_object
*obj
)
115 struct intel_context
*intel
= intel_context(ctx
);
116 struct intel_buffer_object
*intel_obj
= intel_buffer_object(obj
);
120 /* Buffer objects are automatically unmapped when deleting according
121 * to the spec, but Mesa doesn't do UnmapBuffer for us at context destroy
122 * (though it does if you call glDeleteBuffers)
125 intel_bufferobj_unmap(ctx
, 0, obj
);
127 free(intel_obj
->sys_buffer
);
128 if (intel_obj
->region
) {
129 intel_bufferobj_release_region(intel
, intel_obj
);
132 drm_intel_bo_unreference(intel_obj
->buffer
);
139 * Allocate space for and store data in a buffer object. Any data that was
140 * previously stored in the buffer object is lost. If data is NULL,
141 * memory will be allocated, but no copy will occur.
142 * Called via ctx->Driver.BufferData().
143 * \return GL_TRUE for success, GL_FALSE if out of memory
146 intel_bufferobj_data(struct gl_context
* ctx
,
150 GLenum usage
, struct gl_buffer_object
*obj
)
152 struct intel_context
*intel
= intel_context(ctx
);
153 struct intel_buffer_object
*intel_obj
= intel_buffer_object(obj
);
155 intel_obj
->Base
.Size
= size
;
156 intel_obj
->Base
.Usage
= usage
;
158 assert(!obj
->Pointer
); /* Mesa should have unmapped it */
160 if (intel_obj
->region
)
161 intel_bufferobj_release_region(intel
, intel_obj
);
163 if (intel_obj
->buffer
!= NULL
)
164 release_buffer(intel_obj
);
166 free(intel_obj
->sys_buffer
);
167 intel_obj
->sys_buffer
= NULL
;
170 if (usage
== GL_DYNAMIC_DRAW
172 /* On pre-965, stick VBOs in system memory, as we're always doing
173 * swtnl with their contents anyway.
175 || target
== GL_ARRAY_BUFFER
|| target
== GL_ELEMENT_ARRAY_BUFFER
179 intel_obj
->sys_buffer
= malloc(size
);
180 if (intel_obj
->sys_buffer
!= NULL
) {
182 memcpy(intel_obj
->sys_buffer
, data
, size
);
186 intel_bufferobj_alloc_buffer(intel
, intel_obj
);
187 if (!intel_obj
->buffer
)
191 drm_intel_bo_subdata(intel_obj
->buffer
, 0, size
, data
);
199 * Replace data in a subrange of buffer object. If the data range
200 * specified by size + offset extends beyond the end of the buffer or
201 * if data is NULL, no copy is performed.
202 * Called via glBufferSubDataARB().
205 intel_bufferobj_subdata(struct gl_context
* ctx
,
209 const GLvoid
* data
, struct gl_buffer_object
*obj
)
211 struct intel_context
*intel
= intel_context(ctx
);
212 struct intel_buffer_object
*intel_obj
= intel_buffer_object(obj
);
220 if (intel_obj
->region
)
221 intel_bufferobj_cow(intel
, intel_obj
);
223 /* If we have a single copy in system memory, update that */
224 if (intel_obj
->sys_buffer
) {
225 if (intel_obj
->source
)
226 release_buffer(intel_obj
);
228 if (intel_obj
->buffer
== NULL
) {
229 memcpy((char *)intel_obj
->sys_buffer
+ offset
, data
, size
);
233 free(intel_obj
->sys_buffer
);
234 intel_obj
->sys_buffer
= NULL
;
237 /* Otherwise we need to update the copy in video memory. */
239 drm_intel_bo_busy(intel_obj
->buffer
) ||
240 drm_intel_bo_references(intel
->batch
.bo
, intel_obj
->buffer
);
242 /* replace the current busy bo with fresh data */
243 if (busy
&& size
== intel_obj
->Base
.Size
) {
244 drm_intel_bo_unreference(intel_obj
->buffer
);
245 intel_bufferobj_alloc_buffer(intel
, intel_obj
);
246 drm_intel_bo_subdata(intel_obj
->buffer
, 0, size
, data
);
247 } else if (intel
->gen
< 6) {
249 drm_intel_bo
*temp_bo
;
251 temp_bo
= drm_intel_bo_alloc(intel
->bufmgr
, "subdata temp", size
, 64);
253 drm_intel_bo_subdata(temp_bo
, 0, size
, data
);
255 intel_emit_linear_blit(intel
,
256 intel_obj
->buffer
, offset
,
260 drm_intel_bo_unreference(temp_bo
);
262 drm_intel_bo_subdata(intel_obj
->buffer
, offset
, size
, data
);
265 /* Can't use the blit to modify the buffer in the middle of batch. */
266 if (drm_intel_bo_references(intel
->batch
.bo
, intel_obj
->buffer
)) {
267 intel_batchbuffer_flush(intel
);
269 drm_intel_bo_subdata(intel_obj
->buffer
, offset
, size
, data
);
275 * Called via glGetBufferSubDataARB().
278 intel_bufferobj_get_subdata(struct gl_context
* ctx
,
282 GLvoid
* data
, struct gl_buffer_object
*obj
)
284 struct intel_buffer_object
*intel_obj
= intel_buffer_object(obj
);
287 if (intel_obj
->sys_buffer
)
288 memcpy(data
, (char *)intel_obj
->sys_buffer
+ offset
, size
);
290 drm_intel_bo_get_subdata(intel_obj
->buffer
, offset
, size
, data
);
296 * Called via glMapBufferARB().
299 intel_bufferobj_map(struct gl_context
* ctx
,
301 GLenum access
, struct gl_buffer_object
*obj
)
303 struct intel_context
*intel
= intel_context(ctx
);
304 struct intel_buffer_object
*intel_obj
= intel_buffer_object(obj
);
305 GLboolean read_only
= (access
== GL_READ_ONLY_ARB
);
306 GLboolean write_only
= (access
== GL_WRITE_ONLY_ARB
);
310 if (intel_obj
->sys_buffer
) {
311 if (!read_only
&& intel_obj
->source
) {
312 release_buffer(intel_obj
);
315 if (!intel_obj
->buffer
|| intel_obj
->source
) {
316 obj
->Pointer
= intel_obj
->sys_buffer
;
317 obj
->Length
= obj
->Size
;
322 free(intel_obj
->sys_buffer
);
323 intel_obj
->sys_buffer
= NULL
;
326 /* Flush any existing batchbuffer that might reference this data. */
327 if (drm_intel_bo_references(intel
->batch
.bo
, intel_obj
->buffer
))
330 if (intel_obj
->region
)
331 intel_bufferobj_cow(intel
, intel_obj
);
333 if (intel_obj
->buffer
== NULL
) {
339 drm_intel_gem_bo_map_gtt(intel_obj
->buffer
);
340 intel_obj
->mapped_gtt
= GL_TRUE
;
342 drm_intel_bo_map(intel_obj
->buffer
, !read_only
);
343 intel_obj
->mapped_gtt
= GL_FALSE
;
346 obj
->Pointer
= intel_obj
->buffer
->virtual;
347 obj
->Length
= obj
->Size
;
354 * Called via glMapBufferRange().
356 * The goal of this extension is to allow apps to accumulate their rendering
357 * at the same time as they accumulate their buffer object. Without it,
358 * you'd end up blocking on execution of rendering every time you mapped
359 * the buffer to put new data in.
361 * We support it in 3 ways: If unsynchronized, then don't bother
362 * flushing the batchbuffer before mapping the buffer, which can save blocking
363 * in many cases. If we would still block, and they allow the whole buffer
364 * to be invalidated, then just allocate a new buffer to replace the old one.
365 * If not, and we'd block, and they allow the subrange of the buffer to be
366 * invalidated, then we can make a new little BO, let them write into that,
367 * and blit it into the real BO at unmap time.
370 intel_bufferobj_map_range(struct gl_context
* ctx
,
371 GLenum target
, GLintptr offset
, GLsizeiptr length
,
372 GLbitfield access
, struct gl_buffer_object
*obj
)
374 struct intel_context
*intel
= intel_context(ctx
);
375 struct intel_buffer_object
*intel_obj
= intel_buffer_object(obj
);
376 GLboolean read_only
= (access
== GL_READ_ONLY_ARB
);
380 /* _mesa_MapBufferRange (GL entrypoint) sets these, but the vbo module also
381 * internally uses our functions directly.
383 obj
->Offset
= offset
;
384 obj
->Length
= length
;
385 obj
->AccessFlags
= access
;
387 if (intel_obj
->sys_buffer
) {
388 if (!read_only
&& intel_obj
->source
)
389 release_buffer(intel_obj
);
391 if (!intel_obj
->buffer
|| intel_obj
->source
) {
392 obj
->Pointer
= intel_obj
->sys_buffer
+ offset
;
396 free(intel_obj
->sys_buffer
);
397 intel_obj
->sys_buffer
= NULL
;
400 if (intel_obj
->region
)
401 intel_bufferobj_cow(intel
, intel_obj
);
403 /* If the mapping is synchronized with other GL operations, flush
404 * the batchbuffer so that GEM knows about the buffer access for later
407 if (!(access
& GL_MAP_UNSYNCHRONIZED_BIT
) &&
408 drm_intel_bo_references(intel
->batch
.bo
, intel_obj
->buffer
))
411 if (intel_obj
->buffer
== NULL
) {
416 /* If the user doesn't care about existing buffer contents and mapping
417 * would cause us to block, then throw out the old buffer.
419 if (!(access
& GL_MAP_UNSYNCHRONIZED_BIT
) &&
420 (access
& GL_MAP_INVALIDATE_BUFFER_BIT
) &&
421 drm_intel_bo_busy(intel_obj
->buffer
)) {
422 drm_intel_bo_unreference(intel_obj
->buffer
);
423 intel_bufferobj_alloc_buffer(intel
, intel_obj
);
426 /* If the user is mapping a range of an active buffer object but
427 * doesn't require the current contents of that range, make a new
428 * BO, and we'll copy what they put in there out at unmap or
431 if ((access
& GL_MAP_INVALIDATE_RANGE_BIT
) &&
432 drm_intel_bo_busy(intel_obj
->buffer
)) {
433 if (access
& GL_MAP_FLUSH_EXPLICIT_BIT
) {
434 intel_obj
->range_map_buffer
= malloc(length
);
435 obj
->Pointer
= intel_obj
->range_map_buffer
;
437 intel_obj
->range_map_bo
= drm_intel_bo_alloc(intel
->bufmgr
,
440 if (!(access
& GL_MAP_READ_BIT
)) {
441 drm_intel_gem_bo_map_gtt(intel_obj
->range_map_bo
);
442 intel_obj
->mapped_gtt
= GL_TRUE
;
444 drm_intel_bo_map(intel_obj
->range_map_bo
,
445 (access
& GL_MAP_WRITE_BIT
) != 0);
446 intel_obj
->mapped_gtt
= GL_FALSE
;
448 obj
->Pointer
= intel_obj
->range_map_bo
->virtual;
453 if (!(access
& GL_MAP_READ_BIT
)) {
454 drm_intel_gem_bo_map_gtt(intel_obj
->buffer
);
455 intel_obj
->mapped_gtt
= GL_TRUE
;
457 drm_intel_bo_map(intel_obj
->buffer
, (access
& GL_MAP_WRITE_BIT
) != 0);
458 intel_obj
->mapped_gtt
= GL_FALSE
;
461 obj
->Pointer
= intel_obj
->buffer
->virtual + offset
;
465 /* Ideally we'd use a BO to avoid taking up cache space for the temporary
466 * data, but FlushMappedBufferRange may be followed by further writes to
467 * the pointer, so we would have to re-map after emitting our blit, which
468 * would defeat the point.
471 intel_bufferobj_flush_mapped_range(struct gl_context
*ctx
, GLenum target
,
472 GLintptr offset
, GLsizeiptr length
,
473 struct gl_buffer_object
*obj
)
475 struct intel_context
*intel
= intel_context(ctx
);
476 struct intel_buffer_object
*intel_obj
= intel_buffer_object(obj
);
477 drm_intel_bo
*temp_bo
;
479 /* Unless we're in the range map using a temporary system buffer,
480 * there's no work to do.
482 if (intel_obj
->range_map_buffer
== NULL
)
488 temp_bo
= drm_intel_bo_alloc(intel
->bufmgr
, "range map flush", length
, 64);
490 drm_intel_bo_subdata(temp_bo
, 0, length
, intel_obj
->range_map_buffer
);
492 intel_emit_linear_blit(intel
,
493 intel_obj
->buffer
, obj
->Offset
+ offset
,
497 drm_intel_bo_unreference(temp_bo
);
502 * Called via glUnmapBuffer().
505 intel_bufferobj_unmap(struct gl_context
* ctx
,
506 GLenum target
, struct gl_buffer_object
*obj
)
508 struct intel_context
*intel
= intel_context(ctx
);
509 struct intel_buffer_object
*intel_obj
= intel_buffer_object(obj
);
512 assert(obj
->Pointer
);
513 if (intel_obj
->sys_buffer
!= NULL
) {
514 /* always keep the mapping around. */
515 } else if (intel_obj
->range_map_buffer
!= NULL
) {
516 /* Since we've emitted some blits to buffers that will (likely) be used
517 * in rendering operations in other cache domains in this batch, emit a
518 * flush. Once again, we wish for a domain tracker in libdrm to cover
519 * usage inside of a batchbuffer.
521 intel_batchbuffer_emit_mi_flush(intel
);
522 free(intel_obj
->range_map_buffer
);
523 intel_obj
->range_map_buffer
= NULL
;
524 } else if (intel_obj
->range_map_bo
!= NULL
) {
525 if (intel_obj
->mapped_gtt
) {
526 drm_intel_gem_bo_unmap_gtt(intel_obj
->range_map_bo
);
528 drm_intel_bo_unmap(intel_obj
->range_map_bo
);
531 intel_emit_linear_blit(intel
,
532 intel_obj
->buffer
, obj
->Offset
,
533 intel_obj
->range_map_bo
, 0,
536 /* Since we've emitted some blits to buffers that will (likely) be used
537 * in rendering operations in other cache domains in this batch, emit a
538 * flush. Once again, we wish for a domain tracker in libdrm to cover
539 * usage inside of a batchbuffer.
541 intel_batchbuffer_emit_mi_flush(intel
);
543 drm_intel_bo_unreference(intel_obj
->range_map_bo
);
544 intel_obj
->range_map_bo
= NULL
;
545 } else if (intel_obj
->buffer
!= NULL
) {
546 if (intel_obj
->mapped_gtt
) {
547 drm_intel_gem_bo_unmap_gtt(intel_obj
->buffer
);
549 drm_intel_bo_unmap(intel_obj
->buffer
);
560 intel_bufferobj_buffer(struct intel_context
*intel
,
561 struct intel_buffer_object
*intel_obj
,
564 if (intel_obj
->region
) {
565 if (flag
== INTEL_WRITE_PART
)
566 intel_bufferobj_cow(intel
, intel_obj
);
567 else if (flag
== INTEL_WRITE_FULL
) {
568 intel_bufferobj_release_region(intel
, intel_obj
);
569 intel_bufferobj_alloc_buffer(intel
, intel_obj
);
573 if (intel_obj
->source
)
574 release_buffer(intel_obj
);
576 if (intel_obj
->buffer
== NULL
) {
577 intel_bufferobj_alloc_buffer(intel
, intel_obj
);
578 drm_intel_bo_subdata(intel_obj
->buffer
,
579 0, intel_obj
->Base
.Size
,
580 intel_obj
->sys_buffer
);
582 free(intel_obj
->sys_buffer
);
583 intel_obj
->sys_buffer
= NULL
;
584 intel_obj
->offset
= 0;
587 return intel_obj
->buffer
;
590 #define INTEL_UPLOAD_SIZE (64*1024)
593 intel_upload_finish(struct intel_context
*intel
)
595 if (!intel
->upload
.bo
)
598 if (intel
->upload
.buffer_len
) {
599 drm_intel_bo_subdata(intel
->upload
.bo
,
600 intel
->upload
.buffer_offset
,
601 intel
->upload
.buffer_len
,
602 intel
->upload
.buffer
);
603 intel
->upload
.buffer_len
= 0;
606 drm_intel_bo_unreference(intel
->upload
.bo
);
607 intel
->upload
.bo
= NULL
;
610 static void wrap_buffers(struct intel_context
*intel
, GLuint size
)
612 intel_upload_finish(intel
);
614 if (size
< INTEL_UPLOAD_SIZE
)
615 size
= INTEL_UPLOAD_SIZE
;
617 intel
->upload
.bo
= drm_intel_bo_alloc(intel
->bufmgr
, "upload", size
, 0);
618 intel
->upload
.offset
= 0;
621 void intel_upload_data(struct intel_context
*intel
,
622 const void *ptr
, GLuint size
, GLuint align
,
623 drm_intel_bo
**return_bo
,
624 GLuint
*return_offset
)
628 base
= (intel
->upload
.offset
+ align
- 1) / align
* align
;
629 if (intel
->upload
.bo
== NULL
|| base
+ size
> intel
->upload
.bo
->size
) {
630 wrap_buffers(intel
, size
);
634 drm_intel_bo_reference(intel
->upload
.bo
);
635 *return_bo
= intel
->upload
.bo
;
636 *return_offset
= base
;
638 delta
= base
- intel
->upload
.offset
;
639 if (intel
->upload
.buffer_len
&&
640 intel
->upload
.buffer_len
+ delta
+ size
> sizeof(intel
->upload
.buffer
))
642 drm_intel_bo_subdata(intel
->upload
.bo
,
643 intel
->upload
.buffer_offset
,
644 intel
->upload
.buffer_len
,
645 intel
->upload
.buffer
);
646 intel
->upload
.buffer_len
= 0;
649 if (size
< sizeof(intel
->upload
.buffer
))
651 if (intel
->upload
.buffer_len
== 0)
652 intel
->upload
.buffer_offset
= base
;
654 intel
->upload
.buffer_len
+= delta
;
656 memcpy(intel
->upload
.buffer
+ intel
->upload
.buffer_len
, ptr
, size
);
657 intel
->upload
.buffer_len
+= size
;
661 drm_intel_bo_subdata(intel
->upload
.bo
, base
, size
, ptr
);
664 intel
->upload
.offset
= base
+ size
;
667 void *intel_upload_map(struct intel_context
*intel
, GLuint size
, GLuint align
)
672 base
= (intel
->upload
.offset
+ align
- 1) / align
* align
;
673 if (intel
->upload
.bo
== NULL
|| base
+ size
> intel
->upload
.bo
->size
) {
674 wrap_buffers(intel
, size
);
678 delta
= base
- intel
->upload
.offset
;
679 if (intel
->upload
.buffer_len
&&
680 intel
->upload
.buffer_len
+ delta
+ size
> sizeof(intel
->upload
.buffer
))
682 drm_intel_bo_subdata(intel
->upload
.bo
,
683 intel
->upload
.buffer_offset
,
684 intel
->upload
.buffer_len
,
685 intel
->upload
.buffer
);
686 intel
->upload
.buffer_len
= 0;
689 if (size
<= sizeof(intel
->upload
.buffer
)) {
690 if (intel
->upload
.buffer_len
== 0)
691 intel
->upload
.buffer_offset
= base
;
693 intel
->upload
.buffer_len
+= delta
;
695 ptr
= intel
->upload
.buffer
+ intel
->upload
.buffer_len
;
696 intel
->upload
.buffer_len
+= size
;
703 void intel_upload_unmap(struct intel_context
*intel
,
704 const void *ptr
, GLuint size
, GLuint align
,
705 drm_intel_bo
**return_bo
,
706 GLuint
*return_offset
)
710 base
= (intel
->upload
.offset
+ align
- 1) / align
* align
;
711 if (size
> sizeof(intel
->upload
.buffer
)) {
712 drm_intel_bo_subdata(intel
->upload
.bo
, base
, size
, ptr
);
716 drm_intel_bo_reference(intel
->upload
.bo
);
717 *return_bo
= intel
->upload
.bo
;
718 *return_offset
= base
;
720 intel
->upload
.offset
= base
+ size
;
724 intel_bufferobj_source(struct intel_context
*intel
,
725 struct intel_buffer_object
*intel_obj
,
726 GLuint align
, GLuint
*offset
)
728 if (intel_obj
->buffer
== NULL
) {
729 intel_upload_data(intel
,
730 intel_obj
->sys_buffer
, intel_obj
->Base
.Size
, align
,
731 &intel_obj
->buffer
, &intel_obj
->offset
);
732 intel_obj
->source
= 1;
735 *offset
= intel_obj
->offset
;
736 return intel_obj
->buffer
;
740 intel_bufferobj_copy_subdata(struct gl_context
*ctx
,
741 struct gl_buffer_object
*src
,
742 struct gl_buffer_object
*dst
,
743 GLintptr read_offset
, GLintptr write_offset
,
746 struct intel_context
*intel
= intel_context(ctx
);
747 struct intel_buffer_object
*intel_src
= intel_buffer_object(src
);
748 struct intel_buffer_object
*intel_dst
= intel_buffer_object(dst
);
749 drm_intel_bo
*src_bo
, *dst_bo
;
755 /* If we're in system memory, just map and memcpy. */
756 if (intel_src
->sys_buffer
|| intel_dst
->sys_buffer
|| intel
->gen
>= 6) {
757 /* The same buffer may be used, but note that regions copied may
761 char *ptr
= intel_bufferobj_map(ctx
, GL_COPY_WRITE_BUFFER
,
763 memmove(ptr
+ write_offset
, ptr
+ read_offset
, size
);
764 intel_bufferobj_unmap(ctx
, GL_COPY_WRITE_BUFFER
, dst
);
769 src_ptr
= intel_bufferobj_map(ctx
, GL_COPY_READ_BUFFER
,
771 dst_ptr
= intel_bufferobj_map(ctx
, GL_COPY_WRITE_BUFFER
,
774 memcpy(dst_ptr
+ write_offset
, src_ptr
+ read_offset
, size
);
776 intel_bufferobj_unmap(ctx
, GL_COPY_READ_BUFFER
, src
);
777 intel_bufferobj_unmap(ctx
, GL_COPY_WRITE_BUFFER
, dst
);
782 /* Otherwise, we have real BOs, so blit them. */
784 dst_bo
= intel_bufferobj_buffer(intel
, intel_dst
, INTEL_WRITE_PART
);
785 src_bo
= intel_bufferobj_source(intel
, intel_src
, 64, &src_offset
);
787 intel_emit_linear_blit(intel
,
788 dst_bo
, write_offset
,
789 src_bo
, read_offset
+ src_offset
, size
);
791 /* Since we've emitted some blits to buffers that will (likely) be used
792 * in rendering operations in other cache domains in this batch, emit a
793 * flush. Once again, we wish for a domain tracker in libdrm to cover
794 * usage inside of a batchbuffer.
796 intel_batchbuffer_emit_mi_flush(intel
);
799 #if FEATURE_APPLE_object_purgeable
801 intel_buffer_purgeable(struct gl_context
* ctx
,
802 drm_intel_bo
*buffer
,
808 retained
= drm_intel_bo_madvise (buffer
, I915_MADV_DONTNEED
);
810 return retained
? GL_VOLATILE_APPLE
: GL_RELEASED_APPLE
;
814 intel_buffer_object_purgeable(struct gl_context
* ctx
,
815 struct gl_buffer_object
*obj
,
818 struct intel_buffer_object
*intel
;
820 intel
= intel_buffer_object (obj
);
821 if (intel
->buffer
!= NULL
)
822 return intel_buffer_purgeable (ctx
, intel
->buffer
, option
);
824 if (option
== GL_RELEASED_APPLE
) {
825 if (intel
->sys_buffer
!= NULL
) {
826 free(intel
->sys_buffer
);
827 intel
->sys_buffer
= NULL
;
830 return GL_RELEASED_APPLE
;
832 /* XXX Create the buffer and madvise(MADV_DONTNEED)? */
833 return intel_buffer_purgeable (ctx
,
834 intel_bufferobj_buffer(intel_context(ctx
),
841 intel_texture_object_purgeable(struct gl_context
* ctx
,
842 struct gl_texture_object
*obj
,
845 struct intel_texture_object
*intel
;
847 intel
= intel_texture_object(obj
);
848 if (intel
->mt
== NULL
|| intel
->mt
->region
== NULL
)
849 return GL_RELEASED_APPLE
;
851 return intel_buffer_purgeable (ctx
, intel
->mt
->region
->buffer
, option
);
855 intel_render_object_purgeable(struct gl_context
* ctx
,
856 struct gl_renderbuffer
*obj
,
859 struct intel_renderbuffer
*intel
;
861 intel
= intel_renderbuffer(obj
);
862 if (intel
->region
== NULL
)
863 return GL_RELEASED_APPLE
;
865 return intel_buffer_purgeable (ctx
, intel
->region
->buffer
, option
);
869 intel_buffer_unpurgeable(struct gl_context
* ctx
,
870 drm_intel_bo
*buffer
,
877 retained
= drm_intel_bo_madvise (buffer
, I915_MADV_WILLNEED
);
879 return retained
? GL_RETAINED_APPLE
: GL_UNDEFINED_APPLE
;
883 intel_buffer_object_unpurgeable(struct gl_context
* ctx
,
884 struct gl_buffer_object
*obj
,
887 return intel_buffer_unpurgeable (ctx
, intel_buffer_object (obj
)->buffer
, option
);
891 intel_texture_object_unpurgeable(struct gl_context
* ctx
,
892 struct gl_texture_object
*obj
,
895 struct intel_texture_object
*intel
;
897 intel
= intel_texture_object(obj
);
898 if (intel
->mt
== NULL
|| intel
->mt
->region
== NULL
)
899 return GL_UNDEFINED_APPLE
;
901 return intel_buffer_unpurgeable (ctx
, intel
->mt
->region
->buffer
, option
);
905 intel_render_object_unpurgeable(struct gl_context
* ctx
,
906 struct gl_renderbuffer
*obj
,
909 struct intel_renderbuffer
*intel
;
911 intel
= intel_renderbuffer(obj
);
912 if (intel
->region
== NULL
)
913 return GL_UNDEFINED_APPLE
;
915 return intel_buffer_unpurgeable (ctx
, intel
->region
->buffer
, option
);
920 intelInitBufferObjectFuncs(struct dd_function_table
*functions
)
922 functions
->NewBufferObject
= intel_bufferobj_alloc
;
923 functions
->DeleteBuffer
= intel_bufferobj_free
;
924 functions
->BufferData
= intel_bufferobj_data
;
925 functions
->BufferSubData
= intel_bufferobj_subdata
;
926 functions
->GetBufferSubData
= intel_bufferobj_get_subdata
;
927 functions
->MapBuffer
= intel_bufferobj_map
;
928 functions
->MapBufferRange
= intel_bufferobj_map_range
;
929 functions
->FlushMappedBufferRange
= intel_bufferobj_flush_mapped_range
;
930 functions
->UnmapBuffer
= intel_bufferobj_unmap
;
931 functions
->CopyBufferSubData
= intel_bufferobj_copy_subdata
;
933 #if FEATURE_APPLE_object_purgeable
934 functions
->BufferObjectPurgeable
= intel_buffer_object_purgeable
;
935 functions
->TextureObjectPurgeable
= intel_texture_object_purgeable
;
936 functions
->RenderObjectPurgeable
= intel_render_object_purgeable
;
938 functions
->BufferObjectUnpurgeable
= intel_buffer_object_unpurgeable
;
939 functions
->TextureObjectUnpurgeable
= intel_texture_object_unpurgeable
;
940 functions
->RenderObjectUnpurgeable
= intel_render_object_unpurgeable
;