mesa: Remove target parameter from dd_function_table::UnmapBuffer
[mesa.git] / src / mesa / drivers / dri / intel / intel_buffer_objects.c
1 /**************************************************************************
2 *
3 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28
29 #include "main/imports.h"
30 #include "main/mfeatures.h"
31 #include "main/mtypes.h"
32 #include "main/macros.h"
33 #include "main/bufferobj.h"
34
35 #include "intel_blit.h"
36 #include "intel_buffer_objects.h"
37 #include "intel_batchbuffer.h"
38 #include "intel_context.h"
39 #include "intel_fbo.h"
40 #include "intel_mipmap_tree.h"
41 #include "intel_regions.h"
42
43 static GLboolean
44 intel_bufferobj_unmap(struct gl_context * ctx, struct gl_buffer_object *obj);
45
46 /** Allocates a new drm_intel_bo to store the data for the buffer object. */
47 static void
48 intel_bufferobj_alloc_buffer(struct intel_context *intel,
49 struct intel_buffer_object *intel_obj)
50 {
51 intel_obj->buffer = drm_intel_bo_alloc(intel->bufmgr, "bufferobj",
52 intel_obj->Base.Size, 64);
53 }
54
55 static void
56 release_buffer(struct intel_buffer_object *intel_obj)
57 {
58 drm_intel_bo_unreference(intel_obj->buffer);
59 intel_obj->buffer = NULL;
60 intel_obj->offset = 0;
61 intel_obj->source = 0;
62 }
63
64 /**
65 * There is some duplication between mesa's bufferobjects and our
66 * bufmgr buffers. Both have an integer handle and a hashtable to
67 * lookup an opaque structure. It would be nice if the handles and
68 * internal structure where somehow shared.
69 */
70 static struct gl_buffer_object *
71 intel_bufferobj_alloc(struct gl_context * ctx, GLuint name, GLenum target)
72 {
73 struct intel_buffer_object *obj = CALLOC_STRUCT(intel_buffer_object);
74
75 _mesa_initialize_buffer_object(&obj->Base, name, target);
76
77 obj->buffer = NULL;
78
79 return &obj->Base;
80 }
81
82 /* Break the COW tie to the region. The region gets to keep the data.
83 */
84 void
85 intel_bufferobj_release_region(struct intel_context *intel,
86 struct intel_buffer_object *intel_obj)
87 {
88 assert(intel_obj->region->buffer == intel_obj->buffer);
89 intel_obj->region->pbo = NULL;
90 intel_obj->region = NULL;
91
92 release_buffer(intel_obj);
93 }
94
95 /* Break the COW tie to the region. Both the pbo and the region end
96 * up with a copy of the data.
97 */
98 void
99 intel_bufferobj_cow(struct intel_context *intel,
100 struct intel_buffer_object *intel_obj)
101 {
102 assert(intel_obj->region);
103 intel_region_cow(intel, intel_obj->region);
104 }
105
106
107 /**
108 * Deallocate/free a vertex/pixel buffer object.
109 * Called via glDeleteBuffersARB().
110 */
111 static void
112 intel_bufferobj_free(struct gl_context * ctx, struct gl_buffer_object *obj)
113 {
114 struct intel_context *intel = intel_context(ctx);
115 struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
116
117 assert(intel_obj);
118
119 /* Buffer objects are automatically unmapped when deleting according
120 * to the spec, but Mesa doesn't do UnmapBuffer for us at context destroy
121 * (though it does if you call glDeleteBuffers)
122 */
123 if (obj->Pointer)
124 intel_bufferobj_unmap(ctx, obj);
125
126 free(intel_obj->sys_buffer);
127 if (intel_obj->region) {
128 intel_bufferobj_release_region(intel, intel_obj);
129 }
130
131 drm_intel_bo_unreference(intel_obj->buffer);
132 free(intel_obj);
133 }
134
135
136
137 /**
138 * Allocate space for and store data in a buffer object. Any data that was
139 * previously stored in the buffer object is lost. If data is NULL,
140 * memory will be allocated, but no copy will occur.
141 * Called via ctx->Driver.BufferData().
142 * \return GL_TRUE for success, GL_FALSE if out of memory
143 */
144 static GLboolean
145 intel_bufferobj_data(struct gl_context * ctx,
146 GLenum target,
147 GLsizeiptrARB size,
148 const GLvoid * data,
149 GLenum usage, struct gl_buffer_object *obj)
150 {
151 struct intel_context *intel = intel_context(ctx);
152 struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
153
154 intel_obj->Base.Size = size;
155 intel_obj->Base.Usage = usage;
156
157 assert(!obj->Pointer); /* Mesa should have unmapped it */
158
159 if (intel_obj->region)
160 intel_bufferobj_release_region(intel, intel_obj);
161
162 if (intel_obj->buffer != NULL)
163 release_buffer(intel_obj);
164
165 free(intel_obj->sys_buffer);
166 intel_obj->sys_buffer = NULL;
167
168 if (size != 0) {
169 if (usage == GL_DYNAMIC_DRAW
170 #ifdef I915
171 /* On pre-965, stick VBOs in system memory, as we're always doing
172 * swtnl with their contents anyway.
173 */
174 || target == GL_ARRAY_BUFFER || target == GL_ELEMENT_ARRAY_BUFFER
175 #endif
176 )
177 {
178 intel_obj->sys_buffer = malloc(size);
179 if (intel_obj->sys_buffer != NULL) {
180 if (data != NULL)
181 memcpy(intel_obj->sys_buffer, data, size);
182 return GL_TRUE;
183 }
184 }
185 intel_bufferobj_alloc_buffer(intel, intel_obj);
186 if (!intel_obj->buffer)
187 return GL_FALSE;
188
189 if (data != NULL)
190 drm_intel_bo_subdata(intel_obj->buffer, 0, size, data);
191 }
192
193 return GL_TRUE;
194 }
195
196
197 /**
198 * Replace data in a subrange of buffer object. If the data range
199 * specified by size + offset extends beyond the end of the buffer or
200 * if data is NULL, no copy is performed.
201 * Called via glBufferSubDataARB().
202 */
203 static void
204 intel_bufferobj_subdata(struct gl_context * ctx,
205 GLenum target,
206 GLintptrARB offset,
207 GLsizeiptrARB size,
208 const GLvoid * data, struct gl_buffer_object *obj)
209 {
210 struct intel_context *intel = intel_context(ctx);
211 struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
212 bool busy;
213
214 if (size == 0)
215 return;
216
217 assert(intel_obj);
218
219 if (intel_obj->region)
220 intel_bufferobj_cow(intel, intel_obj);
221
222 /* If we have a single copy in system memory, update that */
223 if (intel_obj->sys_buffer) {
224 if (intel_obj->source)
225 release_buffer(intel_obj);
226
227 if (intel_obj->buffer == NULL) {
228 memcpy((char *)intel_obj->sys_buffer + offset, data, size);
229 return;
230 }
231
232 free(intel_obj->sys_buffer);
233 intel_obj->sys_buffer = NULL;
234 }
235
236 /* Otherwise we need to update the copy in video memory. */
237 busy =
238 drm_intel_bo_busy(intel_obj->buffer) ||
239 drm_intel_bo_references(intel->batch.bo, intel_obj->buffer);
240
241 /* replace the current busy bo with fresh data */
242 if (busy && size == intel_obj->Base.Size) {
243 drm_intel_bo_unreference(intel_obj->buffer);
244 intel_bufferobj_alloc_buffer(intel, intel_obj);
245 drm_intel_bo_subdata(intel_obj->buffer, 0, size, data);
246 } else if (intel->gen < 6) {
247 if (busy) {
248 drm_intel_bo *temp_bo;
249
250 temp_bo = drm_intel_bo_alloc(intel->bufmgr, "subdata temp", size, 64);
251
252 drm_intel_bo_subdata(temp_bo, 0, size, data);
253
254 intel_emit_linear_blit(intel,
255 intel_obj->buffer, offset,
256 temp_bo, 0,
257 size);
258
259 drm_intel_bo_unreference(temp_bo);
260 } else {
261 drm_intel_bo_subdata(intel_obj->buffer, offset, size, data);
262 }
263 } else {
264 /* Can't use the blit to modify the buffer in the middle of batch. */
265 if (drm_intel_bo_references(intel->batch.bo, intel_obj->buffer)) {
266 intel_batchbuffer_flush(intel);
267 }
268 drm_intel_bo_subdata(intel_obj->buffer, offset, size, data);
269 }
270 }
271
272
273 /**
274 * Called via glGetBufferSubDataARB().
275 */
276 static void
277 intel_bufferobj_get_subdata(struct gl_context * ctx,
278 GLenum target,
279 GLintptrARB offset,
280 GLsizeiptrARB size,
281 GLvoid * data, struct gl_buffer_object *obj)
282 {
283 struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
284 struct intel_context *intel = intel_context(ctx);
285
286 assert(intel_obj);
287 if (intel_obj->sys_buffer)
288 memcpy(data, (char *)intel_obj->sys_buffer + offset, size);
289 else {
290 if (drm_intel_bo_references(intel->batch.bo, intel_obj->buffer)) {
291 intel_batchbuffer_flush(intel);
292 }
293 drm_intel_bo_get_subdata(intel_obj->buffer, offset, size, data);
294 }
295 }
296
297
298
299 /**
300 * Called via glMapBufferARB().
301 */
302 static void *
303 intel_bufferobj_map(struct gl_context * ctx,
304 GLenum target,
305 GLenum access, struct gl_buffer_object *obj)
306 {
307 struct intel_context *intel = intel_context(ctx);
308 struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
309 GLboolean read_only = (access == GL_READ_ONLY_ARB);
310 GLboolean write_only = (access == GL_WRITE_ONLY_ARB);
311
312 assert(intel_obj);
313
314 if (intel_obj->sys_buffer) {
315 if (!read_only && intel_obj->source) {
316 release_buffer(intel_obj);
317 }
318
319 if (!intel_obj->buffer || intel_obj->source) {
320 obj->Pointer = intel_obj->sys_buffer;
321 obj->Length = obj->Size;
322 obj->Offset = 0;
323 return obj->Pointer;
324 }
325
326 free(intel_obj->sys_buffer);
327 intel_obj->sys_buffer = NULL;
328 }
329
330 /* Flush any existing batchbuffer that might reference this data. */
331 if (drm_intel_bo_references(intel->batch.bo, intel_obj->buffer))
332 intel_flush(ctx);
333
334 if (intel_obj->region)
335 intel_bufferobj_cow(intel, intel_obj);
336
337 if (intel_obj->buffer == NULL) {
338 obj->Pointer = NULL;
339 return NULL;
340 }
341
342 if (write_only) {
343 drm_intel_gem_bo_map_gtt(intel_obj->buffer);
344 intel_obj->mapped_gtt = GL_TRUE;
345 } else {
346 drm_intel_bo_map(intel_obj->buffer, !read_only);
347 intel_obj->mapped_gtt = GL_FALSE;
348 }
349
350 obj->Pointer = intel_obj->buffer->virtual;
351 obj->Length = obj->Size;
352 obj->Offset = 0;
353
354 return obj->Pointer;
355 }
356
357 /**
358 * Called via glMapBufferRange().
359 *
360 * The goal of this extension is to allow apps to accumulate their rendering
361 * at the same time as they accumulate their buffer object. Without it,
362 * you'd end up blocking on execution of rendering every time you mapped
363 * the buffer to put new data in.
364 *
365 * We support it in 3 ways: If unsynchronized, then don't bother
366 * flushing the batchbuffer before mapping the buffer, which can save blocking
367 * in many cases. If we would still block, and they allow the whole buffer
368 * to be invalidated, then just allocate a new buffer to replace the old one.
369 * If not, and we'd block, and they allow the subrange of the buffer to be
370 * invalidated, then we can make a new little BO, let them write into that,
371 * and blit it into the real BO at unmap time.
372 */
373 static void *
374 intel_bufferobj_map_range(struct gl_context * ctx,
375 GLenum target, GLintptr offset, GLsizeiptr length,
376 GLbitfield access, struct gl_buffer_object *obj)
377 {
378 struct intel_context *intel = intel_context(ctx);
379 struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
380 GLboolean read_only = (access == GL_READ_ONLY_ARB);
381
382 assert(intel_obj);
383
384 /* _mesa_MapBufferRange (GL entrypoint) sets these, but the vbo module also
385 * internally uses our functions directly.
386 */
387 obj->Offset = offset;
388 obj->Length = length;
389 obj->AccessFlags = access;
390
391 if (intel_obj->sys_buffer) {
392 if (!read_only && intel_obj->source)
393 release_buffer(intel_obj);
394
395 if (!intel_obj->buffer || intel_obj->source) {
396 obj->Pointer = intel_obj->sys_buffer + offset;
397 return obj->Pointer;
398 }
399
400 free(intel_obj->sys_buffer);
401 intel_obj->sys_buffer = NULL;
402 }
403
404 if (intel_obj->region)
405 intel_bufferobj_cow(intel, intel_obj);
406
407 /* If the mapping is synchronized with other GL operations, flush
408 * the batchbuffer so that GEM knows about the buffer access for later
409 * syncing.
410 */
411 if (!(access & GL_MAP_UNSYNCHRONIZED_BIT) &&
412 drm_intel_bo_references(intel->batch.bo, intel_obj->buffer))
413 intel_flush(ctx);
414
415 if (intel_obj->buffer == NULL) {
416 obj->Pointer = NULL;
417 return NULL;
418 }
419
420 /* If the user doesn't care about existing buffer contents and mapping
421 * would cause us to block, then throw out the old buffer.
422 */
423 if (!(access & GL_MAP_UNSYNCHRONIZED_BIT) &&
424 (access & GL_MAP_INVALIDATE_BUFFER_BIT) &&
425 drm_intel_bo_busy(intel_obj->buffer)) {
426 drm_intel_bo_unreference(intel_obj->buffer);
427 intel_bufferobj_alloc_buffer(intel, intel_obj);
428 }
429
430 /* If the user is mapping a range of an active buffer object but
431 * doesn't require the current contents of that range, make a new
432 * BO, and we'll copy what they put in there out at unmap or
433 * FlushRange time.
434 */
435 if ((access & GL_MAP_INVALIDATE_RANGE_BIT) &&
436 drm_intel_bo_busy(intel_obj->buffer)) {
437 if (access & GL_MAP_FLUSH_EXPLICIT_BIT) {
438 intel_obj->range_map_buffer = malloc(length);
439 obj->Pointer = intel_obj->range_map_buffer;
440 } else {
441 intel_obj->range_map_bo = drm_intel_bo_alloc(intel->bufmgr,
442 "range map",
443 length, 64);
444 if (!(access & GL_MAP_READ_BIT)) {
445 drm_intel_gem_bo_map_gtt(intel_obj->range_map_bo);
446 intel_obj->mapped_gtt = GL_TRUE;
447 } else {
448 drm_intel_bo_map(intel_obj->range_map_bo,
449 (access & GL_MAP_WRITE_BIT) != 0);
450 intel_obj->mapped_gtt = GL_FALSE;
451 }
452 obj->Pointer = intel_obj->range_map_bo->virtual;
453 }
454 return obj->Pointer;
455 }
456
457 if (!(access & GL_MAP_READ_BIT)) {
458 drm_intel_gem_bo_map_gtt(intel_obj->buffer);
459 intel_obj->mapped_gtt = GL_TRUE;
460 } else {
461 drm_intel_bo_map(intel_obj->buffer, (access & GL_MAP_WRITE_BIT) != 0);
462 intel_obj->mapped_gtt = GL_FALSE;
463 }
464
465 obj->Pointer = intel_obj->buffer->virtual + offset;
466 return obj->Pointer;
467 }
468
469 /* Ideally we'd use a BO to avoid taking up cache space for the temporary
470 * data, but FlushMappedBufferRange may be followed by further writes to
471 * the pointer, so we would have to re-map after emitting our blit, which
472 * would defeat the point.
473 */
474 static void
475 intel_bufferobj_flush_mapped_range(struct gl_context *ctx, GLenum target,
476 GLintptr offset, GLsizeiptr length,
477 struct gl_buffer_object *obj)
478 {
479 struct intel_context *intel = intel_context(ctx);
480 struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
481 drm_intel_bo *temp_bo;
482
483 /* Unless we're in the range map using a temporary system buffer,
484 * there's no work to do.
485 */
486 if (intel_obj->range_map_buffer == NULL)
487 return;
488
489 if (length == 0)
490 return;
491
492 temp_bo = drm_intel_bo_alloc(intel->bufmgr, "range map flush", length, 64);
493
494 drm_intel_bo_subdata(temp_bo, 0, length, intel_obj->range_map_buffer);
495
496 intel_emit_linear_blit(intel,
497 intel_obj->buffer, obj->Offset + offset,
498 temp_bo, 0,
499 length);
500
501 drm_intel_bo_unreference(temp_bo);
502 }
503
504
505 /**
506 * Called via glUnmapBuffer().
507 */
508 static GLboolean
509 intel_bufferobj_unmap(struct gl_context * ctx, struct gl_buffer_object *obj)
510 {
511 struct intel_context *intel = intel_context(ctx);
512 struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
513
514 assert(intel_obj);
515 assert(obj->Pointer);
516 if (intel_obj->sys_buffer != NULL) {
517 /* always keep the mapping around. */
518 } else if (intel_obj->range_map_buffer != NULL) {
519 /* Since we've emitted some blits to buffers that will (likely) be used
520 * in rendering operations in other cache domains in this batch, emit a
521 * flush. Once again, we wish for a domain tracker in libdrm to cover
522 * usage inside of a batchbuffer.
523 */
524 intel_batchbuffer_emit_mi_flush(intel);
525 free(intel_obj->range_map_buffer);
526 intel_obj->range_map_buffer = NULL;
527 } else if (intel_obj->range_map_bo != NULL) {
528 if (intel_obj->mapped_gtt) {
529 drm_intel_gem_bo_unmap_gtt(intel_obj->range_map_bo);
530 } else {
531 drm_intel_bo_unmap(intel_obj->range_map_bo);
532 }
533
534 intel_emit_linear_blit(intel,
535 intel_obj->buffer, obj->Offset,
536 intel_obj->range_map_bo, 0,
537 obj->Length);
538
539 /* Since we've emitted some blits to buffers that will (likely) be used
540 * in rendering operations in other cache domains in this batch, emit a
541 * flush. Once again, we wish for a domain tracker in libdrm to cover
542 * usage inside of a batchbuffer.
543 */
544 intel_batchbuffer_emit_mi_flush(intel);
545
546 drm_intel_bo_unreference(intel_obj->range_map_bo);
547 intel_obj->range_map_bo = NULL;
548 } else if (intel_obj->buffer != NULL) {
549 if (intel_obj->mapped_gtt) {
550 drm_intel_gem_bo_unmap_gtt(intel_obj->buffer);
551 } else {
552 drm_intel_bo_unmap(intel_obj->buffer);
553 }
554 }
555 obj->Pointer = NULL;
556 obj->Offset = 0;
557 obj->Length = 0;
558
559 return GL_TRUE;
560 }
561
562 drm_intel_bo *
563 intel_bufferobj_buffer(struct intel_context *intel,
564 struct intel_buffer_object *intel_obj,
565 GLuint flag)
566 {
567 if (intel_obj->region) {
568 if (flag == INTEL_WRITE_PART)
569 intel_bufferobj_cow(intel, intel_obj);
570 else if (flag == INTEL_WRITE_FULL) {
571 intel_bufferobj_release_region(intel, intel_obj);
572 intel_bufferobj_alloc_buffer(intel, intel_obj);
573 }
574 }
575
576 if (intel_obj->source)
577 release_buffer(intel_obj);
578
579 if (intel_obj->buffer == NULL) {
580 intel_bufferobj_alloc_buffer(intel, intel_obj);
581 drm_intel_bo_subdata(intel_obj->buffer,
582 0, intel_obj->Base.Size,
583 intel_obj->sys_buffer);
584
585 free(intel_obj->sys_buffer);
586 intel_obj->sys_buffer = NULL;
587 intel_obj->offset = 0;
588 }
589
590 return intel_obj->buffer;
591 }
592
593 #define INTEL_UPLOAD_SIZE (64*1024)
594
595 void
596 intel_upload_finish(struct intel_context *intel)
597 {
598 if (!intel->upload.bo)
599 return;
600
601 if (intel->upload.buffer_len) {
602 drm_intel_bo_subdata(intel->upload.bo,
603 intel->upload.buffer_offset,
604 intel->upload.buffer_len,
605 intel->upload.buffer);
606 intel->upload.buffer_len = 0;
607 }
608
609 drm_intel_bo_unreference(intel->upload.bo);
610 intel->upload.bo = NULL;
611 }
612
613 static void wrap_buffers(struct intel_context *intel, GLuint size)
614 {
615 intel_upload_finish(intel);
616
617 if (size < INTEL_UPLOAD_SIZE)
618 size = INTEL_UPLOAD_SIZE;
619
620 intel->upload.bo = drm_intel_bo_alloc(intel->bufmgr, "upload", size, 0);
621 intel->upload.offset = 0;
622 }
623
624 void intel_upload_data(struct intel_context *intel,
625 const void *ptr, GLuint size, GLuint align,
626 drm_intel_bo **return_bo,
627 GLuint *return_offset)
628 {
629 GLuint base, delta;
630
631 base = (intel->upload.offset + align - 1) / align * align;
632 if (intel->upload.bo == NULL || base + size > intel->upload.bo->size) {
633 wrap_buffers(intel, size);
634 base = 0;
635 }
636
637 drm_intel_bo_reference(intel->upload.bo);
638 *return_bo = intel->upload.bo;
639 *return_offset = base;
640
641 delta = base - intel->upload.offset;
642 if (intel->upload.buffer_len &&
643 intel->upload.buffer_len + delta + size > sizeof(intel->upload.buffer))
644 {
645 drm_intel_bo_subdata(intel->upload.bo,
646 intel->upload.buffer_offset,
647 intel->upload.buffer_len,
648 intel->upload.buffer);
649 intel->upload.buffer_len = 0;
650 }
651
652 if (size < sizeof(intel->upload.buffer))
653 {
654 if (intel->upload.buffer_len == 0)
655 intel->upload.buffer_offset = base;
656 else
657 intel->upload.buffer_len += delta;
658
659 memcpy(intel->upload.buffer + intel->upload.buffer_len, ptr, size);
660 intel->upload.buffer_len += size;
661 }
662 else
663 {
664 drm_intel_bo_subdata(intel->upload.bo, base, size, ptr);
665 }
666
667 intel->upload.offset = base + size;
668 }
669
670 void *intel_upload_map(struct intel_context *intel, GLuint size, GLuint align)
671 {
672 GLuint base, delta;
673 char *ptr;
674
675 base = (intel->upload.offset + align - 1) / align * align;
676 if (intel->upload.bo == NULL || base + size > intel->upload.bo->size) {
677 wrap_buffers(intel, size);
678 base = 0;
679 }
680
681 delta = base - intel->upload.offset;
682 if (intel->upload.buffer_len &&
683 intel->upload.buffer_len + delta + size > sizeof(intel->upload.buffer))
684 {
685 drm_intel_bo_subdata(intel->upload.bo,
686 intel->upload.buffer_offset,
687 intel->upload.buffer_len,
688 intel->upload.buffer);
689 intel->upload.buffer_len = 0;
690 }
691
692 if (size <= sizeof(intel->upload.buffer)) {
693 if (intel->upload.buffer_len == 0)
694 intel->upload.buffer_offset = base;
695 else
696 intel->upload.buffer_len += delta;
697
698 ptr = intel->upload.buffer + intel->upload.buffer_len;
699 intel->upload.buffer_len += size;
700 } else
701 ptr = malloc(size);
702
703 return ptr;
704 }
705
706 void intel_upload_unmap(struct intel_context *intel,
707 const void *ptr, GLuint size, GLuint align,
708 drm_intel_bo **return_bo,
709 GLuint *return_offset)
710 {
711 GLuint base;
712
713 base = (intel->upload.offset + align - 1) / align * align;
714 if (size > sizeof(intel->upload.buffer)) {
715 drm_intel_bo_subdata(intel->upload.bo, base, size, ptr);
716 free((void*)ptr);
717 }
718
719 drm_intel_bo_reference(intel->upload.bo);
720 *return_bo = intel->upload.bo;
721 *return_offset = base;
722
723 intel->upload.offset = base + size;
724 }
725
726 drm_intel_bo *
727 intel_bufferobj_source(struct intel_context *intel,
728 struct intel_buffer_object *intel_obj,
729 GLuint align, GLuint *offset)
730 {
731 if (intel_obj->buffer == NULL) {
732 intel_upload_data(intel,
733 intel_obj->sys_buffer, intel_obj->Base.Size, align,
734 &intel_obj->buffer, &intel_obj->offset);
735 intel_obj->source = 1;
736 }
737
738 *offset = intel_obj->offset;
739 return intel_obj->buffer;
740 }
741
742 static void
743 intel_bufferobj_copy_subdata(struct gl_context *ctx,
744 struct gl_buffer_object *src,
745 struct gl_buffer_object *dst,
746 GLintptr read_offset, GLintptr write_offset,
747 GLsizeiptr size)
748 {
749 struct intel_context *intel = intel_context(ctx);
750 struct intel_buffer_object *intel_src = intel_buffer_object(src);
751 struct intel_buffer_object *intel_dst = intel_buffer_object(dst);
752 drm_intel_bo *src_bo, *dst_bo;
753 GLuint src_offset;
754
755 if (size == 0)
756 return;
757
758 /* If we're in system memory, just map and memcpy. */
759 if (intel_src->sys_buffer || intel_dst->sys_buffer || intel->gen >= 6) {
760 /* The same buffer may be used, but note that regions copied may
761 * not overlap.
762 */
763 if (src == dst) {
764 char *ptr = intel_bufferobj_map(ctx, GL_COPY_WRITE_BUFFER,
765 GL_READ_WRITE, dst);
766 memmove(ptr + write_offset, ptr + read_offset, size);
767 intel_bufferobj_unmap(ctx, dst);
768 } else {
769 const char *src_ptr;
770 char *dst_ptr;
771
772 src_ptr = intel_bufferobj_map(ctx, GL_COPY_READ_BUFFER,
773 GL_READ_ONLY, src);
774 dst_ptr = intel_bufferobj_map(ctx, GL_COPY_WRITE_BUFFER,
775 GL_WRITE_ONLY, dst);
776
777 memcpy(dst_ptr + write_offset, src_ptr + read_offset, size);
778
779 intel_bufferobj_unmap(ctx, src);
780 intel_bufferobj_unmap(ctx, dst);
781 }
782 return;
783 }
784
785 /* Otherwise, we have real BOs, so blit them. */
786
787 dst_bo = intel_bufferobj_buffer(intel, intel_dst, INTEL_WRITE_PART);
788 src_bo = intel_bufferobj_source(intel, intel_src, 64, &src_offset);
789
790 intel_emit_linear_blit(intel,
791 dst_bo, write_offset,
792 src_bo, read_offset + src_offset, size);
793
794 /* Since we've emitted some blits to buffers that will (likely) be used
795 * in rendering operations in other cache domains in this batch, emit a
796 * flush. Once again, we wish for a domain tracker in libdrm to cover
797 * usage inside of a batchbuffer.
798 */
799 intel_batchbuffer_emit_mi_flush(intel);
800 }
801
802 #if FEATURE_APPLE_object_purgeable
803 static GLenum
804 intel_buffer_purgeable(struct gl_context * ctx,
805 drm_intel_bo *buffer,
806 GLenum option)
807 {
808 int retained = 0;
809
810 if (buffer != NULL)
811 retained = drm_intel_bo_madvise (buffer, I915_MADV_DONTNEED);
812
813 return retained ? GL_VOLATILE_APPLE : GL_RELEASED_APPLE;
814 }
815
816 static GLenum
817 intel_buffer_object_purgeable(struct gl_context * ctx,
818 struct gl_buffer_object *obj,
819 GLenum option)
820 {
821 struct intel_buffer_object *intel;
822
823 intel = intel_buffer_object (obj);
824 if (intel->buffer != NULL)
825 return intel_buffer_purgeable (ctx, intel->buffer, option);
826
827 if (option == GL_RELEASED_APPLE) {
828 if (intel->sys_buffer != NULL) {
829 free(intel->sys_buffer);
830 intel->sys_buffer = NULL;
831 }
832
833 return GL_RELEASED_APPLE;
834 } else {
835 /* XXX Create the buffer and madvise(MADV_DONTNEED)? */
836 return intel_buffer_purgeable (ctx,
837 intel_bufferobj_buffer(intel_context(ctx),
838 intel, INTEL_READ),
839 option);
840 }
841 }
842
843 static GLenum
844 intel_texture_object_purgeable(struct gl_context * ctx,
845 struct gl_texture_object *obj,
846 GLenum option)
847 {
848 struct intel_texture_object *intel;
849
850 intel = intel_texture_object(obj);
851 if (intel->mt == NULL || intel->mt->region == NULL)
852 return GL_RELEASED_APPLE;
853
854 return intel_buffer_purgeable (ctx, intel->mt->region->buffer, option);
855 }
856
857 static GLenum
858 intel_render_object_purgeable(struct gl_context * ctx,
859 struct gl_renderbuffer *obj,
860 GLenum option)
861 {
862 struct intel_renderbuffer *intel;
863
864 intel = intel_renderbuffer(obj);
865 if (intel->region == NULL)
866 return GL_RELEASED_APPLE;
867
868 return intel_buffer_purgeable (ctx, intel->region->buffer, option);
869 }
870
871 static GLenum
872 intel_buffer_unpurgeable(struct gl_context * ctx,
873 drm_intel_bo *buffer,
874 GLenum option)
875 {
876 int retained;
877
878 retained = 0;
879 if (buffer != NULL)
880 retained = drm_intel_bo_madvise (buffer, I915_MADV_WILLNEED);
881
882 return retained ? GL_RETAINED_APPLE : GL_UNDEFINED_APPLE;
883 }
884
885 static GLenum
886 intel_buffer_object_unpurgeable(struct gl_context * ctx,
887 struct gl_buffer_object *obj,
888 GLenum option)
889 {
890 return intel_buffer_unpurgeable (ctx, intel_buffer_object (obj)->buffer, option);
891 }
892
893 static GLenum
894 intel_texture_object_unpurgeable(struct gl_context * ctx,
895 struct gl_texture_object *obj,
896 GLenum option)
897 {
898 struct intel_texture_object *intel;
899
900 intel = intel_texture_object(obj);
901 if (intel->mt == NULL || intel->mt->region == NULL)
902 return GL_UNDEFINED_APPLE;
903
904 return intel_buffer_unpurgeable (ctx, intel->mt->region->buffer, option);
905 }
906
907 static GLenum
908 intel_render_object_unpurgeable(struct gl_context * ctx,
909 struct gl_renderbuffer *obj,
910 GLenum option)
911 {
912 struct intel_renderbuffer *intel;
913
914 intel = intel_renderbuffer(obj);
915 if (intel->region == NULL)
916 return GL_UNDEFINED_APPLE;
917
918 return intel_buffer_unpurgeable (ctx, intel->region->buffer, option);
919 }
920 #endif
921
922 void
923 intelInitBufferObjectFuncs(struct dd_function_table *functions)
924 {
925 functions->NewBufferObject = intel_bufferobj_alloc;
926 functions->DeleteBuffer = intel_bufferobj_free;
927 functions->BufferData = intel_bufferobj_data;
928 functions->BufferSubData = intel_bufferobj_subdata;
929 functions->GetBufferSubData = intel_bufferobj_get_subdata;
930 functions->MapBuffer = intel_bufferobj_map;
931 functions->MapBufferRange = intel_bufferobj_map_range;
932 functions->FlushMappedBufferRange = intel_bufferobj_flush_mapped_range;
933 functions->UnmapBuffer = intel_bufferobj_unmap;
934 functions->CopyBufferSubData = intel_bufferobj_copy_subdata;
935
936 #if FEATURE_APPLE_object_purgeable
937 functions->BufferObjectPurgeable = intel_buffer_object_purgeable;
938 functions->TextureObjectPurgeable = intel_texture_object_purgeable;
939 functions->RenderObjectPurgeable = intel_render_object_purgeable;
940
941 functions->BufferObjectUnpurgeable = intel_buffer_object_unpurgeable;
942 functions->TextureObjectUnpurgeable = intel_texture_object_unpurgeable;
943 functions->RenderObjectUnpurgeable = intel_render_object_unpurgeable;
944 #endif
945 }