i965: Move the remaining intel code to the i965 directory.
[mesa.git] / src / mesa / drivers / dri / i965 / intel_buffer_objects.c
1 /**************************************************************************
2 *
3 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28
29 #include "main/imports.h"
30 #include "main/mtypes.h"
31 #include "main/macros.h"
32 #include "main/bufferobj.h"
33
34 #include "intel_blit.h"
35 #include "intel_buffer_objects.h"
36 #include "intel_batchbuffer.h"
37 #include "intel_context.h"
38 #include "intel_fbo.h"
39 #include "intel_mipmap_tree.h"
40 #include "intel_regions.h"
41
42 #ifndef I915
43 #include "brw_context.h"
44 #endif
45
46 static GLboolean
47 intel_bufferobj_unmap(struct gl_context * ctx, struct gl_buffer_object *obj);
48
49 /** Allocates a new drm_intel_bo to store the data for the buffer object. */
50 static void
51 intel_bufferobj_alloc_buffer(struct intel_context *intel,
52 struct intel_buffer_object *intel_obj)
53 {
54 intel_obj->buffer = drm_intel_bo_alloc(intel->bufmgr, "bufferobj",
55 intel_obj->Base.Size, 64);
56
57 #ifndef I915
58 /* the buffer might be bound as a uniform buffer, need to update it
59 */
60 {
61 struct brw_context *brw = brw_context(&intel->ctx);
62 brw->state.dirty.brw |= BRW_NEW_UNIFORM_BUFFER;
63 }
64 #endif
65 }
66
67 static void
68 release_buffer(struct intel_buffer_object *intel_obj)
69 {
70 drm_intel_bo_unreference(intel_obj->buffer);
71 intel_obj->buffer = NULL;
72 intel_obj->offset = 0;
73 intel_obj->source = 0;
74 }
75
76 /**
77 * There is some duplication between mesa's bufferobjects and our
78 * bufmgr buffers. Both have an integer handle and a hashtable to
79 * lookup an opaque structure. It would be nice if the handles and
80 * internal structure where somehow shared.
81 */
82 static struct gl_buffer_object *
83 intel_bufferobj_alloc(struct gl_context * ctx, GLuint name, GLenum target)
84 {
85 struct intel_buffer_object *obj = CALLOC_STRUCT(intel_buffer_object);
86
87 _mesa_initialize_buffer_object(ctx, &obj->Base, name, target);
88
89 obj->buffer = NULL;
90
91 return &obj->Base;
92 }
93
94 /**
95 * Deallocate/free a vertex/pixel buffer object.
96 * Called via glDeleteBuffersARB().
97 */
98 static void
99 intel_bufferobj_free(struct gl_context * ctx, struct gl_buffer_object *obj)
100 {
101 struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
102
103 assert(intel_obj);
104
105 /* Buffer objects are automatically unmapped when deleting according
106 * to the spec, but Mesa doesn't do UnmapBuffer for us at context destroy
107 * (though it does if you call glDeleteBuffers)
108 */
109 if (obj->Pointer)
110 intel_bufferobj_unmap(ctx, obj);
111
112 free(intel_obj->sys_buffer);
113
114 drm_intel_bo_unreference(intel_obj->buffer);
115 free(intel_obj);
116 }
117
118
119
120 /**
121 * Allocate space for and store data in a buffer object. Any data that was
122 * previously stored in the buffer object is lost. If data is NULL,
123 * memory will be allocated, but no copy will occur.
124 * Called via ctx->Driver.BufferData().
125 * \return true for success, false if out of memory
126 */
127 static GLboolean
128 intel_bufferobj_data(struct gl_context * ctx,
129 GLenum target,
130 GLsizeiptrARB size,
131 const GLvoid * data,
132 GLenum usage, struct gl_buffer_object *obj)
133 {
134 struct intel_context *intel = intel_context(ctx);
135 struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
136
137 /* Part of the ABI, but this function doesn't use it.
138 */
139 #ifndef I915
140 (void) target;
141 #endif
142
143 intel_obj->Base.Size = size;
144 intel_obj->Base.Usage = usage;
145
146 assert(!obj->Pointer); /* Mesa should have unmapped it */
147
148 if (intel_obj->buffer != NULL)
149 release_buffer(intel_obj);
150
151 free(intel_obj->sys_buffer);
152 intel_obj->sys_buffer = NULL;
153
154 if (size != 0) {
155 #ifdef I915
156 /* On pre-965, stick VBOs in system memory, as we're always doing
157 * swtnl with their contents anyway.
158 */
159 if (target == GL_ARRAY_BUFFER || target == GL_ELEMENT_ARRAY_BUFFER) {
160 intel_obj->sys_buffer = malloc(size);
161 if (intel_obj->sys_buffer != NULL) {
162 if (data != NULL)
163 memcpy(intel_obj->sys_buffer, data, size);
164 return true;
165 }
166 }
167 #endif
168 intel_bufferobj_alloc_buffer(intel, intel_obj);
169 if (!intel_obj->buffer)
170 return false;
171
172 if (data != NULL)
173 drm_intel_bo_subdata(intel_obj->buffer, 0, size, data);
174 }
175
176 return true;
177 }
178
179
180 /**
181 * Replace data in a subrange of buffer object. If the data range
182 * specified by size + offset extends beyond the end of the buffer or
183 * if data is NULL, no copy is performed.
184 * Called via glBufferSubDataARB().
185 */
186 static void
187 intel_bufferobj_subdata(struct gl_context * ctx,
188 GLintptrARB offset,
189 GLsizeiptrARB size,
190 const GLvoid * data, struct gl_buffer_object *obj)
191 {
192 struct intel_context *intel = intel_context(ctx);
193 struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
194 bool busy;
195
196 if (size == 0)
197 return;
198
199 assert(intel_obj);
200
201 /* If we have a single copy in system memory, update that */
202 if (intel_obj->sys_buffer) {
203 if (intel_obj->source)
204 release_buffer(intel_obj);
205
206 if (intel_obj->buffer == NULL) {
207 memcpy((char *)intel_obj->sys_buffer + offset, data, size);
208 return;
209 }
210
211 free(intel_obj->sys_buffer);
212 intel_obj->sys_buffer = NULL;
213 }
214
215 /* Otherwise we need to update the copy in video memory. */
216 busy =
217 drm_intel_bo_busy(intel_obj->buffer) ||
218 drm_intel_bo_references(intel->batch.bo, intel_obj->buffer);
219
220 if (busy) {
221 if (size == intel_obj->Base.Size) {
222 /* Replace the current busy bo with fresh data. */
223 drm_intel_bo_unreference(intel_obj->buffer);
224 intel_bufferobj_alloc_buffer(intel, intel_obj);
225 drm_intel_bo_subdata(intel_obj->buffer, 0, size, data);
226 } else {
227 perf_debug("Using a blit copy to avoid stalling on %ldb "
228 "glBufferSubData() to a busy buffer object.\n",
229 (long)size);
230 drm_intel_bo *temp_bo =
231 drm_intel_bo_alloc(intel->bufmgr, "subdata temp", size, 64);
232
233 drm_intel_bo_subdata(temp_bo, 0, size, data);
234
235 intel_emit_linear_blit(intel,
236 intel_obj->buffer, offset,
237 temp_bo, 0,
238 size);
239
240 drm_intel_bo_unreference(temp_bo);
241 }
242 } else {
243 drm_intel_bo_subdata(intel_obj->buffer, offset, size, data);
244 }
245 }
246
247
248 /**
249 * Called via glGetBufferSubDataARB().
250 */
251 static void
252 intel_bufferobj_get_subdata(struct gl_context * ctx,
253 GLintptrARB offset,
254 GLsizeiptrARB size,
255 GLvoid * data, struct gl_buffer_object *obj)
256 {
257 struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
258 struct intel_context *intel = intel_context(ctx);
259
260 assert(intel_obj);
261 if (intel_obj->sys_buffer)
262 memcpy(data, (char *)intel_obj->sys_buffer + offset, size);
263 else {
264 if (drm_intel_bo_references(intel->batch.bo, intel_obj->buffer)) {
265 intel_batchbuffer_flush(intel);
266 }
267 drm_intel_bo_get_subdata(intel_obj->buffer, offset, size, data);
268 }
269 }
270
271
272
273 /**
274 * Called via glMapBufferRange and glMapBuffer
275 *
276 * The goal of this extension is to allow apps to accumulate their rendering
277 * at the same time as they accumulate their buffer object. Without it,
278 * you'd end up blocking on execution of rendering every time you mapped
279 * the buffer to put new data in.
280 *
281 * We support it in 3 ways: If unsynchronized, then don't bother
282 * flushing the batchbuffer before mapping the buffer, which can save blocking
283 * in many cases. If we would still block, and they allow the whole buffer
284 * to be invalidated, then just allocate a new buffer to replace the old one.
285 * If not, and we'd block, and they allow the subrange of the buffer to be
286 * invalidated, then we can make a new little BO, let them write into that,
287 * and blit it into the real BO at unmap time.
288 */
289 static void *
290 intel_bufferobj_map_range(struct gl_context * ctx,
291 GLintptr offset, GLsizeiptr length,
292 GLbitfield access, struct gl_buffer_object *obj)
293 {
294 struct intel_context *intel = intel_context(ctx);
295 struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
296
297 assert(intel_obj);
298
299 /* _mesa_MapBufferRange (GL entrypoint) sets these, but the vbo module also
300 * internally uses our functions directly.
301 */
302 obj->Offset = offset;
303 obj->Length = length;
304 obj->AccessFlags = access;
305
306 if (intel_obj->sys_buffer) {
307 const bool read_only =
308 (access & (GL_MAP_READ_BIT | GL_MAP_WRITE_BIT)) == GL_MAP_READ_BIT;
309
310 if (!read_only && intel_obj->source)
311 release_buffer(intel_obj);
312
313 if (!intel_obj->buffer || intel_obj->source) {
314 obj->Pointer = intel_obj->sys_buffer + offset;
315 return obj->Pointer;
316 }
317
318 free(intel_obj->sys_buffer);
319 intel_obj->sys_buffer = NULL;
320 }
321
322 if (intel_obj->buffer == NULL) {
323 obj->Pointer = NULL;
324 return NULL;
325 }
326
327 /* If the access is synchronized (like a normal buffer mapping), then get
328 * things flushed out so the later mapping syncs appropriately through GEM.
329 * If the user doesn't care about existing buffer contents and mapping would
330 * cause us to block, then throw out the old buffer.
331 *
332 * If they set INVALIDATE_BUFFER, we can pitch the current contents to
333 * achieve the required synchronization.
334 */
335 if (!(access & GL_MAP_UNSYNCHRONIZED_BIT)) {
336 if (drm_intel_bo_references(intel->batch.bo, intel_obj->buffer)) {
337 if (access & GL_MAP_INVALIDATE_BUFFER_BIT) {
338 drm_intel_bo_unreference(intel_obj->buffer);
339 intel_bufferobj_alloc_buffer(intel, intel_obj);
340 } else {
341 perf_debug("Stalling on the GPU for mapping a busy buffer "
342 "object\n");
343 intel_flush(ctx);
344 }
345 } else if (drm_intel_bo_busy(intel_obj->buffer) &&
346 (access & GL_MAP_INVALIDATE_BUFFER_BIT)) {
347 drm_intel_bo_unreference(intel_obj->buffer);
348 intel_bufferobj_alloc_buffer(intel, intel_obj);
349 }
350 }
351
352 /* If the user is mapping a range of an active buffer object but
353 * doesn't require the current contents of that range, make a new
354 * BO, and we'll copy what they put in there out at unmap or
355 * FlushRange time.
356 */
357 if ((access & GL_MAP_INVALIDATE_RANGE_BIT) &&
358 drm_intel_bo_busy(intel_obj->buffer)) {
359 if (access & GL_MAP_FLUSH_EXPLICIT_BIT) {
360 intel_obj->range_map_buffer = malloc(length);
361 obj->Pointer = intel_obj->range_map_buffer;
362 } else {
363 intel_obj->range_map_bo = drm_intel_bo_alloc(intel->bufmgr,
364 "range map",
365 length, 64);
366 if (!(access & GL_MAP_READ_BIT)) {
367 drm_intel_gem_bo_map_gtt(intel_obj->range_map_bo);
368 } else {
369 drm_intel_bo_map(intel_obj->range_map_bo,
370 (access & GL_MAP_WRITE_BIT) != 0);
371 }
372 obj->Pointer = intel_obj->range_map_bo->virtual;
373 }
374 return obj->Pointer;
375 }
376
377 if (access & GL_MAP_UNSYNCHRONIZED_BIT)
378 drm_intel_gem_bo_map_unsynchronized(intel_obj->buffer);
379 else if (!(access & GL_MAP_READ_BIT)) {
380 drm_intel_gem_bo_map_gtt(intel_obj->buffer);
381 } else {
382 drm_intel_bo_map(intel_obj->buffer, (access & GL_MAP_WRITE_BIT) != 0);
383 }
384
385 obj->Pointer = intel_obj->buffer->virtual + offset;
386 return obj->Pointer;
387 }
388
389 /* Ideally we'd use a BO to avoid taking up cache space for the temporary
390 * data, but FlushMappedBufferRange may be followed by further writes to
391 * the pointer, so we would have to re-map after emitting our blit, which
392 * would defeat the point.
393 */
394 static void
395 intel_bufferobj_flush_mapped_range(struct gl_context *ctx,
396 GLintptr offset, GLsizeiptr length,
397 struct gl_buffer_object *obj)
398 {
399 struct intel_context *intel = intel_context(ctx);
400 struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
401 drm_intel_bo *temp_bo;
402
403 /* Unless we're in the range map using a temporary system buffer,
404 * there's no work to do.
405 */
406 if (intel_obj->range_map_buffer == NULL)
407 return;
408
409 if (length == 0)
410 return;
411
412 temp_bo = drm_intel_bo_alloc(intel->bufmgr, "range map flush", length, 64);
413
414 drm_intel_bo_subdata(temp_bo, 0, length, intel_obj->range_map_buffer);
415
416 intel_emit_linear_blit(intel,
417 intel_obj->buffer, obj->Offset + offset,
418 temp_bo, 0,
419 length);
420
421 drm_intel_bo_unreference(temp_bo);
422 }
423
424
425 /**
426 * Called via glUnmapBuffer().
427 */
428 static GLboolean
429 intel_bufferobj_unmap(struct gl_context * ctx, struct gl_buffer_object *obj)
430 {
431 struct intel_context *intel = intel_context(ctx);
432 struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
433
434 assert(intel_obj);
435 assert(obj->Pointer);
436 if (intel_obj->sys_buffer != NULL) {
437 /* always keep the mapping around. */
438 } else if (intel_obj->range_map_buffer != NULL) {
439 /* Since we've emitted some blits to buffers that will (likely) be used
440 * in rendering operations in other cache domains in this batch, emit a
441 * flush. Once again, we wish for a domain tracker in libdrm to cover
442 * usage inside of a batchbuffer.
443 */
444 intel_batchbuffer_emit_mi_flush(intel);
445 free(intel_obj->range_map_buffer);
446 intel_obj->range_map_buffer = NULL;
447 } else if (intel_obj->range_map_bo != NULL) {
448 drm_intel_bo_unmap(intel_obj->range_map_bo);
449
450 intel_emit_linear_blit(intel,
451 intel_obj->buffer, obj->Offset,
452 intel_obj->range_map_bo, 0,
453 obj->Length);
454
455 /* Since we've emitted some blits to buffers that will (likely) be used
456 * in rendering operations in other cache domains in this batch, emit a
457 * flush. Once again, we wish for a domain tracker in libdrm to cover
458 * usage inside of a batchbuffer.
459 */
460 intel_batchbuffer_emit_mi_flush(intel);
461
462 drm_intel_bo_unreference(intel_obj->range_map_bo);
463 intel_obj->range_map_bo = NULL;
464 } else if (intel_obj->buffer != NULL) {
465 drm_intel_bo_unmap(intel_obj->buffer);
466 }
467 obj->Pointer = NULL;
468 obj->Offset = 0;
469 obj->Length = 0;
470
471 return true;
472 }
473
474 drm_intel_bo *
475 intel_bufferobj_buffer(struct intel_context *intel,
476 struct intel_buffer_object *intel_obj,
477 GLuint flag)
478 {
479 if (intel_obj->source)
480 release_buffer(intel_obj);
481
482 if (intel_obj->buffer == NULL) {
483 intel_bufferobj_alloc_buffer(intel, intel_obj);
484 drm_intel_bo_subdata(intel_obj->buffer,
485 0, intel_obj->Base.Size,
486 intel_obj->sys_buffer);
487
488 free(intel_obj->sys_buffer);
489 intel_obj->sys_buffer = NULL;
490 intel_obj->offset = 0;
491 }
492
493 return intel_obj->buffer;
494 }
495
496 #define INTEL_UPLOAD_SIZE (64*1024)
497
498 void
499 intel_upload_finish(struct intel_context *intel)
500 {
501 if (!intel->upload.bo)
502 return;
503
504 if (intel->upload.buffer_len) {
505 drm_intel_bo_subdata(intel->upload.bo,
506 intel->upload.buffer_offset,
507 intel->upload.buffer_len,
508 intel->upload.buffer);
509 intel->upload.buffer_len = 0;
510 }
511
512 drm_intel_bo_unreference(intel->upload.bo);
513 intel->upload.bo = NULL;
514 }
515
516 static void wrap_buffers(struct intel_context *intel, GLuint size)
517 {
518 intel_upload_finish(intel);
519
520 if (size < INTEL_UPLOAD_SIZE)
521 size = INTEL_UPLOAD_SIZE;
522
523 intel->upload.bo = drm_intel_bo_alloc(intel->bufmgr, "upload", size, 0);
524 intel->upload.offset = 0;
525 }
526
527 void intel_upload_data(struct intel_context *intel,
528 const void *ptr, GLuint size, GLuint align,
529 drm_intel_bo **return_bo,
530 GLuint *return_offset)
531 {
532 GLuint base, delta;
533
534 base = (intel->upload.offset + align - 1) / align * align;
535 if (intel->upload.bo == NULL || base + size > intel->upload.bo->size) {
536 wrap_buffers(intel, size);
537 base = 0;
538 }
539
540 drm_intel_bo_reference(intel->upload.bo);
541 *return_bo = intel->upload.bo;
542 *return_offset = base;
543
544 delta = base - intel->upload.offset;
545 if (intel->upload.buffer_len &&
546 intel->upload.buffer_len + delta + size > sizeof(intel->upload.buffer))
547 {
548 drm_intel_bo_subdata(intel->upload.bo,
549 intel->upload.buffer_offset,
550 intel->upload.buffer_len,
551 intel->upload.buffer);
552 intel->upload.buffer_len = 0;
553 }
554
555 if (size < sizeof(intel->upload.buffer))
556 {
557 if (intel->upload.buffer_len == 0)
558 intel->upload.buffer_offset = base;
559 else
560 intel->upload.buffer_len += delta;
561
562 memcpy(intel->upload.buffer + intel->upload.buffer_len, ptr, size);
563 intel->upload.buffer_len += size;
564 }
565 else
566 {
567 drm_intel_bo_subdata(intel->upload.bo, base, size, ptr);
568 }
569
570 intel->upload.offset = base + size;
571 }
572
573 void *intel_upload_map(struct intel_context *intel, GLuint size, GLuint align)
574 {
575 GLuint base, delta;
576 char *ptr;
577
578 base = (intel->upload.offset + align - 1) / align * align;
579 if (intel->upload.bo == NULL || base + size > intel->upload.bo->size) {
580 wrap_buffers(intel, size);
581 base = 0;
582 }
583
584 delta = base - intel->upload.offset;
585 if (intel->upload.buffer_len &&
586 intel->upload.buffer_len + delta + size > sizeof(intel->upload.buffer))
587 {
588 drm_intel_bo_subdata(intel->upload.bo,
589 intel->upload.buffer_offset,
590 intel->upload.buffer_len,
591 intel->upload.buffer);
592 intel->upload.buffer_len = 0;
593 }
594
595 if (size <= sizeof(intel->upload.buffer)) {
596 if (intel->upload.buffer_len == 0)
597 intel->upload.buffer_offset = base;
598 else
599 intel->upload.buffer_len += delta;
600
601 ptr = intel->upload.buffer + intel->upload.buffer_len;
602 intel->upload.buffer_len += size;
603 } else
604 ptr = malloc(size);
605
606 return ptr;
607 }
608
609 void intel_upload_unmap(struct intel_context *intel,
610 const void *ptr, GLuint size, GLuint align,
611 drm_intel_bo **return_bo,
612 GLuint *return_offset)
613 {
614 GLuint base;
615
616 base = (intel->upload.offset + align - 1) / align * align;
617 if (size > sizeof(intel->upload.buffer)) {
618 drm_intel_bo_subdata(intel->upload.bo, base, size, ptr);
619 free((void*)ptr);
620 }
621
622 drm_intel_bo_reference(intel->upload.bo);
623 *return_bo = intel->upload.bo;
624 *return_offset = base;
625
626 intel->upload.offset = base + size;
627 }
628
629 drm_intel_bo *
630 intel_bufferobj_source(struct intel_context *intel,
631 struct intel_buffer_object *intel_obj,
632 GLuint align, GLuint *offset)
633 {
634 if (intel_obj->buffer == NULL) {
635 intel_upload_data(intel,
636 intel_obj->sys_buffer, intel_obj->Base.Size, align,
637 &intel_obj->buffer, &intel_obj->offset);
638 intel_obj->source = 1;
639 }
640
641 *offset = intel_obj->offset;
642 return intel_obj->buffer;
643 }
644
645 static void
646 intel_bufferobj_copy_subdata(struct gl_context *ctx,
647 struct gl_buffer_object *src,
648 struct gl_buffer_object *dst,
649 GLintptr read_offset, GLintptr write_offset,
650 GLsizeiptr size)
651 {
652 struct intel_context *intel = intel_context(ctx);
653 struct intel_buffer_object *intel_src = intel_buffer_object(src);
654 struct intel_buffer_object *intel_dst = intel_buffer_object(dst);
655 drm_intel_bo *src_bo, *dst_bo;
656 GLuint src_offset;
657
658 if (size == 0)
659 return;
660
661 /* If we're in system memory, just map and memcpy. */
662 if (intel_src->sys_buffer || intel_dst->sys_buffer) {
663 /* The same buffer may be used, but note that regions copied may
664 * not overlap.
665 */
666 if (src == dst) {
667 char *ptr = intel_bufferobj_map_range(ctx, 0, dst->Size,
668 GL_MAP_READ_BIT |
669 GL_MAP_WRITE_BIT,
670 dst);
671 memmove(ptr + write_offset, ptr + read_offset, size);
672 intel_bufferobj_unmap(ctx, dst);
673 } else {
674 const char *src_ptr;
675 char *dst_ptr;
676
677 src_ptr = intel_bufferobj_map_range(ctx, 0, src->Size,
678 GL_MAP_READ_BIT, src);
679 dst_ptr = intel_bufferobj_map_range(ctx, 0, dst->Size,
680 GL_MAP_WRITE_BIT, dst);
681
682 memcpy(dst_ptr + write_offset, src_ptr + read_offset, size);
683
684 intel_bufferobj_unmap(ctx, src);
685 intel_bufferobj_unmap(ctx, dst);
686 }
687 return;
688 }
689
690 /* Otherwise, we have real BOs, so blit them. */
691
692 dst_bo = intel_bufferobj_buffer(intel, intel_dst, INTEL_WRITE_PART);
693 src_bo = intel_bufferobj_source(intel, intel_src, 64, &src_offset);
694
695 intel_emit_linear_blit(intel,
696 dst_bo, write_offset,
697 src_bo, read_offset + src_offset, size);
698
699 /* Since we've emitted some blits to buffers that will (likely) be used
700 * in rendering operations in other cache domains in this batch, emit a
701 * flush. Once again, we wish for a domain tracker in libdrm to cover
702 * usage inside of a batchbuffer.
703 */
704 intel_batchbuffer_emit_mi_flush(intel);
705 }
706
707 static GLenum
708 intel_buffer_purgeable(drm_intel_bo *buffer)
709 {
710 int retained = 0;
711
712 if (buffer != NULL)
713 retained = drm_intel_bo_madvise (buffer, I915_MADV_DONTNEED);
714
715 return retained ? GL_VOLATILE_APPLE : GL_RELEASED_APPLE;
716 }
717
718 static GLenum
719 intel_buffer_object_purgeable(struct gl_context * ctx,
720 struct gl_buffer_object *obj,
721 GLenum option)
722 {
723 struct intel_buffer_object *intel_obj = intel_buffer_object (obj);
724
725 if (intel_obj->buffer != NULL)
726 return intel_buffer_purgeable(intel_obj->buffer);
727
728 if (option == GL_RELEASED_APPLE) {
729 free(intel_obj->sys_buffer);
730 intel_obj->sys_buffer = NULL;
731
732 return GL_RELEASED_APPLE;
733 } else {
734 /* XXX Create the buffer and madvise(MADV_DONTNEED)? */
735 struct intel_context *intel = intel_context(ctx);
736 drm_intel_bo *bo = intel_bufferobj_buffer(intel, intel_obj, INTEL_READ);
737
738 return intel_buffer_purgeable(bo);
739 }
740 }
741
742 static GLenum
743 intel_texture_object_purgeable(struct gl_context * ctx,
744 struct gl_texture_object *obj,
745 GLenum option)
746 {
747 struct intel_texture_object *intel;
748
749 (void) ctx;
750 (void) option;
751
752 intel = intel_texture_object(obj);
753 if (intel->mt == NULL || intel->mt->region == NULL)
754 return GL_RELEASED_APPLE;
755
756 return intel_buffer_purgeable(intel->mt->region->bo);
757 }
758
759 static GLenum
760 intel_render_object_purgeable(struct gl_context * ctx,
761 struct gl_renderbuffer *obj,
762 GLenum option)
763 {
764 struct intel_renderbuffer *intel;
765
766 (void) ctx;
767 (void) option;
768
769 intel = intel_renderbuffer(obj);
770 if (intel->mt == NULL)
771 return GL_RELEASED_APPLE;
772
773 return intel_buffer_purgeable(intel->mt->region->bo);
774 }
775
776 static GLenum
777 intel_buffer_unpurgeable(drm_intel_bo *buffer)
778 {
779 int retained;
780
781 retained = 0;
782 if (buffer != NULL)
783 retained = drm_intel_bo_madvise (buffer, I915_MADV_WILLNEED);
784
785 return retained ? GL_RETAINED_APPLE : GL_UNDEFINED_APPLE;
786 }
787
788 static GLenum
789 intel_buffer_object_unpurgeable(struct gl_context * ctx,
790 struct gl_buffer_object *obj,
791 GLenum option)
792 {
793 (void) ctx;
794 (void) option;
795
796 return intel_buffer_unpurgeable(intel_buffer_object (obj)->buffer);
797 }
798
799 static GLenum
800 intel_texture_object_unpurgeable(struct gl_context * ctx,
801 struct gl_texture_object *obj,
802 GLenum option)
803 {
804 struct intel_texture_object *intel;
805
806 (void) ctx;
807 (void) option;
808
809 intel = intel_texture_object(obj);
810 if (intel->mt == NULL || intel->mt->region == NULL)
811 return GL_UNDEFINED_APPLE;
812
813 return intel_buffer_unpurgeable(intel->mt->region->bo);
814 }
815
816 static GLenum
817 intel_render_object_unpurgeable(struct gl_context * ctx,
818 struct gl_renderbuffer *obj,
819 GLenum option)
820 {
821 struct intel_renderbuffer *intel;
822
823 (void) ctx;
824 (void) option;
825
826 intel = intel_renderbuffer(obj);
827 if (intel->mt == NULL)
828 return GL_UNDEFINED_APPLE;
829
830 return intel_buffer_unpurgeable(intel->mt->region->bo);
831 }
832
833 void
834 intelInitBufferObjectFuncs(struct dd_function_table *functions)
835 {
836 functions->NewBufferObject = intel_bufferobj_alloc;
837 functions->DeleteBuffer = intel_bufferobj_free;
838 functions->BufferData = intel_bufferobj_data;
839 functions->BufferSubData = intel_bufferobj_subdata;
840 functions->GetBufferSubData = intel_bufferobj_get_subdata;
841 functions->MapBufferRange = intel_bufferobj_map_range;
842 functions->FlushMappedBufferRange = intel_bufferobj_flush_mapped_range;
843 functions->UnmapBuffer = intel_bufferobj_unmap;
844 functions->CopyBufferSubData = intel_bufferobj_copy_subdata;
845
846 functions->BufferObjectPurgeable = intel_buffer_object_purgeable;
847 functions->TextureObjectPurgeable = intel_texture_object_purgeable;
848 functions->RenderObjectPurgeable = intel_render_object_purgeable;
849
850 functions->BufferObjectUnpurgeable = intel_buffer_object_unpurgeable;
851 functions->TextureObjectUnpurgeable = intel_texture_object_unpurgeable;
852 functions->RenderObjectUnpurgeable = intel_render_object_unpurgeable;
853 }