i915: And remember assign the new value to the state reg...
[mesa.git] / src / mesa / drivers / dri / intel / intel_buffer_objects.c
1 /**************************************************************************
2 *
3 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28
29 #include "main/imports.h"
30 #include "main/mfeatures.h"
31 #include "main/mtypes.h"
32 #include "main/macros.h"
33 #include "main/bufferobj.h"
34
35 #include "intel_blit.h"
36 #include "intel_buffer_objects.h"
37 #include "intel_batchbuffer.h"
38 #include "intel_context.h"
39 #include "intel_fbo.h"
40 #include "intel_mipmap_tree.h"
41 #include "intel_regions.h"
42
43 static GLboolean
44 intel_bufferobj_unmap(struct gl_context * ctx,
45 GLenum target, struct gl_buffer_object *obj);
46
47 /** Allocates a new drm_intel_bo to store the data for the buffer object. */
48 static void
49 intel_bufferobj_alloc_buffer(struct intel_context *intel,
50 struct intel_buffer_object *intel_obj)
51 {
52 intel_obj->buffer = drm_intel_bo_alloc(intel->bufmgr, "bufferobj",
53 intel_obj->Base.Size, 64);
54 }
55
56 /**
57 * There is some duplication between mesa's bufferobjects and our
58 * bufmgr buffers. Both have an integer handle and a hashtable to
59 * lookup an opaque structure. It would be nice if the handles and
60 * internal structure where somehow shared.
61 */
62 static struct gl_buffer_object *
63 intel_bufferobj_alloc(struct gl_context * ctx, GLuint name, GLenum target)
64 {
65 struct intel_buffer_object *obj = CALLOC_STRUCT(intel_buffer_object);
66
67 _mesa_initialize_buffer_object(&obj->Base, name, target);
68
69 obj->buffer = NULL;
70
71 return &obj->Base;
72 }
73
74 /* Break the COW tie to the region. The region gets to keep the data.
75 */
76 void
77 intel_bufferobj_release_region(struct intel_context *intel,
78 struct intel_buffer_object *intel_obj)
79 {
80 assert(intel_obj->region->buffer == intel_obj->buffer);
81 intel_obj->region->pbo = NULL;
82 intel_obj->region = NULL;
83
84 drm_intel_bo_unreference(intel_obj->buffer);
85 intel_obj->buffer = NULL;
86 }
87
88 /* Break the COW tie to the region. Both the pbo and the region end
89 * up with a copy of the data.
90 */
91 void
92 intel_bufferobj_cow(struct intel_context *intel,
93 struct intel_buffer_object *intel_obj)
94 {
95 assert(intel_obj->region);
96 intel_region_cow(intel, intel_obj->region);
97 }
98
99
100 /**
101 * Deallocate/free a vertex/pixel buffer object.
102 * Called via glDeleteBuffersARB().
103 */
104 static void
105 intel_bufferobj_free(struct gl_context * ctx, struct gl_buffer_object *obj)
106 {
107 struct intel_context *intel = intel_context(ctx);
108 struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
109
110 assert(intel_obj);
111
112 /* Buffer objects are automatically unmapped when deleting according
113 * to the spec, but Mesa doesn't do UnmapBuffer for us at context destroy
114 * (though it does if you call glDeleteBuffers)
115 */
116 if (obj->Pointer)
117 intel_bufferobj_unmap(ctx, 0, obj);
118
119 free(intel_obj->sys_buffer);
120 if (intel_obj->region) {
121 intel_bufferobj_release_region(intel, intel_obj);
122 }
123
124 drm_intel_bo_unreference(intel_obj->buffer);
125 free(intel_obj);
126 }
127
128
129
130 /**
131 * Allocate space for and store data in a buffer object. Any data that was
132 * previously stored in the buffer object is lost. If data is NULL,
133 * memory will be allocated, but no copy will occur.
134 * Called via ctx->Driver.BufferData().
135 * \return GL_TRUE for success, GL_FALSE if out of memory
136 */
137 static GLboolean
138 intel_bufferobj_data(struct gl_context * ctx,
139 GLenum target,
140 GLsizeiptrARB size,
141 const GLvoid * data,
142 GLenum usage, struct gl_buffer_object *obj)
143 {
144 struct intel_context *intel = intel_context(ctx);
145 struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
146
147 intel_obj->Base.Size = size;
148 intel_obj->Base.Usage = usage;
149
150 assert(!obj->Pointer); /* Mesa should have unmapped it */
151
152 if (intel_obj->region)
153 intel_bufferobj_release_region(intel, intel_obj);
154
155 if (intel_obj->buffer != NULL) {
156 drm_intel_bo_unreference(intel_obj->buffer);
157 intel_obj->buffer = NULL;
158 intel_obj->source = 0;
159 }
160 free(intel_obj->sys_buffer);
161 intel_obj->sys_buffer = NULL;
162
163 if (size != 0) {
164 if (usage == GL_DYNAMIC_DRAW
165 #ifdef I915
166 /* On pre-965, stick VBOs in system memory, as we're always doing
167 * swtnl with their contents anyway.
168 */
169 || target == GL_ARRAY_BUFFER || target == GL_ELEMENT_ARRAY_BUFFER
170 #endif
171 )
172 {
173 intel_obj->sys_buffer = malloc(size);
174 if (intel_obj->sys_buffer != NULL) {
175 if (data != NULL)
176 memcpy(intel_obj->sys_buffer, data, size);
177 return GL_TRUE;
178 }
179 }
180 intel_bufferobj_alloc_buffer(intel, intel_obj);
181 if (!intel_obj->buffer)
182 return GL_FALSE;
183
184 if (data != NULL)
185 drm_intel_bo_subdata(intel_obj->buffer, 0, size, data);
186 }
187
188 return GL_TRUE;
189 }
190
191
192 /**
193 * Replace data in a subrange of buffer object. If the data range
194 * specified by size + offset extends beyond the end of the buffer or
195 * if data is NULL, no copy is performed.
196 * Called via glBufferSubDataARB().
197 */
198 static void
199 intel_bufferobj_subdata(struct gl_context * ctx,
200 GLenum target,
201 GLintptrARB offset,
202 GLsizeiptrARB size,
203 const GLvoid * data, struct gl_buffer_object *obj)
204 {
205 struct intel_context *intel = intel_context(ctx);
206 struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
207
208 if (size == 0)
209 return;
210
211 assert(intel_obj);
212
213 if (intel_obj->region)
214 intel_bufferobj_cow(intel, intel_obj);
215
216 if (intel_obj->sys_buffer) {
217 if (intel_obj->buffer) {
218 drm_intel_bo_unreference(intel_obj->buffer);
219 intel_obj->buffer = NULL;
220 intel_obj->source = 0;
221 }
222 memcpy((char *)intel_obj->sys_buffer + offset, data, size);
223 } else {
224 bool busy =
225 drm_intel_bo_busy(intel_obj->buffer) ||
226 drm_intel_bo_references(intel->batch.bo, intel_obj->buffer);
227
228 /* replace the current busy bo with fresh data */
229 if (busy && size == intel_obj->Base.Size) {
230 drm_intel_bo_unreference(intel_obj->buffer);
231 intel_bufferobj_alloc_buffer(intel, intel_obj);
232 drm_intel_bo_subdata(intel_obj->buffer, 0, size, data);
233 } else if (intel->gen < 6) {
234 if (busy) {
235 drm_intel_bo *temp_bo;
236
237 temp_bo = drm_intel_bo_alloc(intel->bufmgr, "subdata temp", size, 64);
238
239 drm_intel_bo_subdata(temp_bo, 0, size, data);
240
241 intel_emit_linear_blit(intel,
242 intel_obj->buffer, offset,
243 temp_bo, 0,
244 size);
245
246 drm_intel_bo_unreference(temp_bo);
247 } else {
248 drm_intel_bo_subdata(intel_obj->buffer, offset, size, data);
249 }
250 } else {
251 /* Can't use the blit to modify the buffer in the middle of batch. */
252 if (drm_intel_bo_references(intel->batch.bo, intel_obj->buffer)) {
253 intel_batchbuffer_flush(intel);
254 }
255 drm_intel_bo_subdata(intel_obj->buffer, offset, size, data);
256 }
257 }
258 }
259
260
261 /**
262 * Called via glGetBufferSubDataARB().
263 */
264 static void
265 intel_bufferobj_get_subdata(struct gl_context * ctx,
266 GLenum target,
267 GLintptrARB offset,
268 GLsizeiptrARB size,
269 GLvoid * data, struct gl_buffer_object *obj)
270 {
271 struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
272
273 assert(intel_obj);
274 if (intel_obj->sys_buffer)
275 memcpy(data, (char *)intel_obj->sys_buffer + offset, size);
276 else
277 drm_intel_bo_get_subdata(intel_obj->buffer, offset, size, data);
278 }
279
280
281
282 /**
283 * Called via glMapBufferARB().
284 */
285 static void *
286 intel_bufferobj_map(struct gl_context * ctx,
287 GLenum target,
288 GLenum access, struct gl_buffer_object *obj)
289 {
290 struct intel_context *intel = intel_context(ctx);
291 struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
292 GLboolean read_only = (access == GL_READ_ONLY_ARB);
293 GLboolean write_only = (access == GL_WRITE_ONLY_ARB);
294
295 assert(intel_obj);
296
297 if (intel_obj->sys_buffer) {
298 if (!read_only && intel_obj->buffer) {
299 drm_intel_bo_unreference(intel_obj->buffer);
300 intel_obj->buffer = NULL;
301 intel_obj->source = 0;
302 }
303 obj->Pointer = intel_obj->sys_buffer;
304 obj->Length = obj->Size;
305 obj->Offset = 0;
306 return obj->Pointer;
307 }
308
309 /* Flush any existing batchbuffer that might reference this data. */
310 if (drm_intel_bo_references(intel->batch.bo, intel_obj->buffer))
311 intel_flush(ctx);
312
313 if (intel_obj->region)
314 intel_bufferobj_cow(intel, intel_obj);
315
316 if (intel_obj->buffer == NULL) {
317 obj->Pointer = NULL;
318 return NULL;
319 }
320
321 if (write_only) {
322 drm_intel_gem_bo_map_gtt(intel_obj->buffer);
323 intel_obj->mapped_gtt = GL_TRUE;
324 } else {
325 drm_intel_bo_map(intel_obj->buffer, !read_only);
326 intel_obj->mapped_gtt = GL_FALSE;
327 }
328
329 obj->Pointer = intel_obj->buffer->virtual;
330 obj->Length = obj->Size;
331 obj->Offset = 0;
332
333 return obj->Pointer;
334 }
335
336 /**
337 * Called via glMapBufferRange().
338 *
339 * The goal of this extension is to allow apps to accumulate their rendering
340 * at the same time as they accumulate their buffer object. Without it,
341 * you'd end up blocking on execution of rendering every time you mapped
342 * the buffer to put new data in.
343 *
344 * We support it in 3 ways: If unsynchronized, then don't bother
345 * flushing the batchbuffer before mapping the buffer, which can save blocking
346 * in many cases. If we would still block, and they allow the whole buffer
347 * to be invalidated, then just allocate a new buffer to replace the old one.
348 * If not, and we'd block, and they allow the subrange of the buffer to be
349 * invalidated, then we can make a new little BO, let them write into that,
350 * and blit it into the real BO at unmap time.
351 */
352 static void *
353 intel_bufferobj_map_range(struct gl_context * ctx,
354 GLenum target, GLintptr offset, GLsizeiptr length,
355 GLbitfield access, struct gl_buffer_object *obj)
356 {
357 struct intel_context *intel = intel_context(ctx);
358 struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
359
360 assert(intel_obj);
361
362 /* _mesa_MapBufferRange (GL entrypoint) sets these, but the vbo module also
363 * internally uses our functions directly.
364 */
365 obj->Offset = offset;
366 obj->Length = length;
367 obj->AccessFlags = access;
368
369 if (intel_obj->sys_buffer) {
370 if (access != GL_READ_ONLY_ARB && intel_obj->buffer) {
371 drm_intel_bo_unreference(intel_obj->buffer);
372 intel_obj->buffer = NULL;
373 intel_obj->source = 0;
374 }
375 obj->Pointer = intel_obj->sys_buffer + offset;
376 return obj->Pointer;
377 }
378
379 if (intel_obj->region)
380 intel_bufferobj_cow(intel, intel_obj);
381
382 /* If the mapping is synchronized with other GL operations, flush
383 * the batchbuffer so that GEM knows about the buffer access for later
384 * syncing.
385 */
386 if (!(access & GL_MAP_UNSYNCHRONIZED_BIT) &&
387 drm_intel_bo_references(intel->batch.bo, intel_obj->buffer))
388 intel_flush(ctx);
389
390 if (intel_obj->buffer == NULL) {
391 obj->Pointer = NULL;
392 return NULL;
393 }
394
395 /* If the user doesn't care about existing buffer contents and mapping
396 * would cause us to block, then throw out the old buffer.
397 */
398 if (!(access & GL_MAP_UNSYNCHRONIZED_BIT) &&
399 (access & GL_MAP_INVALIDATE_BUFFER_BIT) &&
400 drm_intel_bo_busy(intel_obj->buffer)) {
401 drm_intel_bo_unreference(intel_obj->buffer);
402 intel_bufferobj_alloc_buffer(intel, intel_obj);
403 }
404
405 /* If the user is mapping a range of an active buffer object but
406 * doesn't require the current contents of that range, make a new
407 * BO, and we'll copy what they put in there out at unmap or
408 * FlushRange time.
409 */
410 if ((access & GL_MAP_INVALIDATE_RANGE_BIT) &&
411 drm_intel_bo_busy(intel_obj->buffer)) {
412 if (access & GL_MAP_FLUSH_EXPLICIT_BIT) {
413 intel_obj->range_map_buffer = malloc(length);
414 obj->Pointer = intel_obj->range_map_buffer;
415 } else {
416 intel_obj->range_map_bo = drm_intel_bo_alloc(intel->bufmgr,
417 "range map",
418 length, 64);
419 if (!(access & GL_MAP_READ_BIT)) {
420 drm_intel_gem_bo_map_gtt(intel_obj->range_map_bo);
421 intel_obj->mapped_gtt = GL_TRUE;
422 } else {
423 drm_intel_bo_map(intel_obj->range_map_bo,
424 (access & GL_MAP_WRITE_BIT) != 0);
425 intel_obj->mapped_gtt = GL_FALSE;
426 }
427 obj->Pointer = intel_obj->range_map_bo->virtual;
428 }
429 return obj->Pointer;
430 }
431
432 if (!(access & GL_MAP_READ_BIT)) {
433 drm_intel_gem_bo_map_gtt(intel_obj->buffer);
434 intel_obj->mapped_gtt = GL_TRUE;
435 } else {
436 drm_intel_bo_map(intel_obj->buffer, (access & GL_MAP_WRITE_BIT) != 0);
437 intel_obj->mapped_gtt = GL_FALSE;
438 }
439
440 obj->Pointer = intel_obj->buffer->virtual + offset;
441 return obj->Pointer;
442 }
443
444 /* Ideally we'd use a BO to avoid taking up cache space for the temporary
445 * data, but FlushMappedBufferRange may be followed by further writes to
446 * the pointer, so we would have to re-map after emitting our blit, which
447 * would defeat the point.
448 */
449 static void
450 intel_bufferobj_flush_mapped_range(struct gl_context *ctx, GLenum target,
451 GLintptr offset, GLsizeiptr length,
452 struct gl_buffer_object *obj)
453 {
454 struct intel_context *intel = intel_context(ctx);
455 struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
456 drm_intel_bo *temp_bo;
457
458 /* Unless we're in the range map using a temporary system buffer,
459 * there's no work to do.
460 */
461 if (intel_obj->range_map_buffer == NULL)
462 return;
463
464 if (length == 0)
465 return;
466
467 temp_bo = drm_intel_bo_alloc(intel->bufmgr, "range map flush", length, 64);
468
469 drm_intel_bo_subdata(temp_bo, 0, length, intel_obj->range_map_buffer);
470
471 intel_emit_linear_blit(intel,
472 intel_obj->buffer, obj->Offset + offset,
473 temp_bo, 0,
474 length);
475
476 drm_intel_bo_unreference(temp_bo);
477 }
478
479
480 /**
481 * Called via glUnmapBuffer().
482 */
483 static GLboolean
484 intel_bufferobj_unmap(struct gl_context * ctx,
485 GLenum target, struct gl_buffer_object *obj)
486 {
487 struct intel_context *intel = intel_context(ctx);
488 struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
489
490 assert(intel_obj);
491 assert(obj->Pointer);
492 if (intel_obj->sys_buffer != NULL) {
493 /* always keep the mapping around. */
494 } else if (intel_obj->range_map_buffer != NULL) {
495 /* Since we've emitted some blits to buffers that will (likely) be used
496 * in rendering operations in other cache domains in this batch, emit a
497 * flush. Once again, we wish for a domain tracker in libdrm to cover
498 * usage inside of a batchbuffer.
499 */
500 intel_batchbuffer_emit_mi_flush(intel);
501 free(intel_obj->range_map_buffer);
502 intel_obj->range_map_buffer = NULL;
503 } else if (intel_obj->range_map_bo != NULL) {
504 if (intel_obj->mapped_gtt) {
505 drm_intel_gem_bo_unmap_gtt(intel_obj->range_map_bo);
506 } else {
507 drm_intel_bo_unmap(intel_obj->range_map_bo);
508 }
509
510 intel_emit_linear_blit(intel,
511 intel_obj->buffer, obj->Offset,
512 intel_obj->range_map_bo, 0,
513 obj->Length);
514
515 /* Since we've emitted some blits to buffers that will (likely) be used
516 * in rendering operations in other cache domains in this batch, emit a
517 * flush. Once again, we wish for a domain tracker in libdrm to cover
518 * usage inside of a batchbuffer.
519 */
520 intel_batchbuffer_emit_mi_flush(intel);
521
522 drm_intel_bo_unreference(intel_obj->range_map_bo);
523 intel_obj->range_map_bo = NULL;
524 } else if (intel_obj->buffer != NULL) {
525 if (intel_obj->mapped_gtt) {
526 drm_intel_gem_bo_unmap_gtt(intel_obj->buffer);
527 } else {
528 drm_intel_bo_unmap(intel_obj->buffer);
529 }
530 }
531 obj->Pointer = NULL;
532 obj->Offset = 0;
533 obj->Length = 0;
534
535 return GL_TRUE;
536 }
537
538 drm_intel_bo *
539 intel_bufferobj_buffer(struct intel_context *intel,
540 struct intel_buffer_object *intel_obj,
541 GLuint flag)
542 {
543 if (intel_obj->region) {
544 if (flag == INTEL_WRITE_PART)
545 intel_bufferobj_cow(intel, intel_obj);
546 else if (flag == INTEL_WRITE_FULL) {
547 intel_bufferobj_release_region(intel, intel_obj);
548 intel_bufferobj_alloc_buffer(intel, intel_obj);
549 }
550 }
551
552 if (intel_obj->source) {
553 drm_intel_bo_unreference(intel_obj->buffer);
554 intel_obj->buffer = NULL;
555 intel_obj->source = 0;
556 }
557
558 if (intel_obj->buffer == NULL) {
559 intel_bufferobj_alloc_buffer(intel, intel_obj);
560 drm_intel_bo_subdata(intel_obj->buffer,
561 0, intel_obj->Base.Size,
562 intel_obj->sys_buffer);
563
564 free(intel_obj->sys_buffer);
565 intel_obj->sys_buffer = NULL;
566 intel_obj->offset = 0;
567 }
568
569 return intel_obj->buffer;
570 }
571
572 #define INTEL_UPLOAD_SIZE (64*1024)
573
574 void
575 intel_upload_finish(struct intel_context *intel)
576 {
577 if (!intel->upload.bo)
578 return;
579
580 if (intel->upload.buffer_len) {
581 drm_intel_bo_subdata(intel->upload.bo,
582 intel->upload.buffer_offset,
583 intel->upload.buffer_len,
584 intel->upload.buffer);
585 intel->upload.buffer_len = 0;
586 }
587
588 drm_intel_bo_unreference(intel->upload.bo);
589 intel->upload.bo = NULL;
590 }
591
592 static void wrap_buffers(struct intel_context *intel, GLuint size)
593 {
594 intel_upload_finish(intel);
595
596 if (size < INTEL_UPLOAD_SIZE)
597 size = INTEL_UPLOAD_SIZE;
598
599 intel->upload.bo = drm_intel_bo_alloc(intel->bufmgr, "upload", size, 0);
600 intel->upload.offset = 0;
601 }
602
603 void intel_upload_data(struct intel_context *intel,
604 const void *ptr, GLuint size, GLuint align,
605 drm_intel_bo **return_bo,
606 GLuint *return_offset)
607 {
608 GLuint base, delta;
609
610 base = (intel->upload.offset + align - 1) / align * align;
611 if (intel->upload.bo == NULL || base + size > intel->upload.bo->size) {
612 wrap_buffers(intel, size);
613 base = 0;
614 }
615
616 drm_intel_bo_reference(intel->upload.bo);
617 *return_bo = intel->upload.bo;
618 *return_offset = base;
619
620 delta = base - intel->upload.offset;
621 if (intel->upload.buffer_len &&
622 intel->upload.buffer_len + delta + size > sizeof(intel->upload.buffer))
623 {
624 drm_intel_bo_subdata(intel->upload.bo,
625 intel->upload.buffer_offset,
626 intel->upload.buffer_len,
627 intel->upload.buffer);
628 intel->upload.buffer_len = 0;
629 }
630
631 if (size < sizeof(intel->upload.buffer))
632 {
633 if (intel->upload.buffer_len == 0)
634 intel->upload.buffer_offset = base;
635 else
636 intel->upload.buffer_len += delta;
637
638 memcpy(intel->upload.buffer + intel->upload.buffer_len, ptr, size);
639 intel->upload.buffer_len += size;
640 }
641 else
642 {
643 drm_intel_bo_subdata(intel->upload.bo, base, size, ptr);
644 }
645
646 intel->upload.offset = base + size;
647 }
648
649 void *intel_upload_map(struct intel_context *intel, GLuint size, GLuint align)
650 {
651 GLuint base, delta;
652 char *ptr;
653
654 base = (intel->upload.offset + align - 1) / align * align;
655 if (intel->upload.bo == NULL || base + size > intel->upload.bo->size) {
656 wrap_buffers(intel, size);
657 base = 0;
658 }
659
660 delta = base - intel->upload.offset;
661 if (intel->upload.buffer_len &&
662 intel->upload.buffer_len + delta + size > sizeof(intel->upload.buffer))
663 {
664 drm_intel_bo_subdata(intel->upload.bo,
665 intel->upload.buffer_offset,
666 intel->upload.buffer_len,
667 intel->upload.buffer);
668 intel->upload.buffer_len = 0;
669 }
670
671 if (size <= sizeof(intel->upload.buffer)) {
672 if (intel->upload.buffer_len == 0)
673 intel->upload.buffer_offset = base;
674 else
675 intel->upload.buffer_len += delta;
676
677 ptr = intel->upload.buffer + intel->upload.buffer_len;
678 intel->upload.buffer_len += size;
679 } else
680 ptr = malloc(size);
681
682 return ptr;
683 }
684
685 void intel_upload_unmap(struct intel_context *intel,
686 const void *ptr, GLuint size, GLuint align,
687 drm_intel_bo **return_bo,
688 GLuint *return_offset)
689 {
690 GLuint base;
691
692 base = (intel->upload.offset + align - 1) / align * align;
693 if (size > sizeof(intel->upload.buffer)) {
694 drm_intel_bo_subdata(intel->upload.bo, base, size, ptr);
695 free((void*)ptr);
696 }
697
698 drm_intel_bo_reference(intel->upload.bo);
699 *return_bo = intel->upload.bo;
700 *return_offset = base;
701
702 intel->upload.offset = base + size;
703 }
704
705 drm_intel_bo *
706 intel_bufferobj_source(struct intel_context *intel,
707 struct intel_buffer_object *intel_obj,
708 GLuint *offset)
709 {
710 if (intel_obj->buffer == NULL) {
711 intel_upload_data(intel,
712 intel_obj->sys_buffer, intel_obj->Base.Size, 64,
713 &intel_obj->buffer, &intel_obj->offset);
714 intel_obj->source = 1;
715 }
716
717 *offset = intel_obj->offset;
718 return intel_obj->buffer;
719 }
720
721 static void
722 intel_bufferobj_copy_subdata(struct gl_context *ctx,
723 struct gl_buffer_object *src,
724 struct gl_buffer_object *dst,
725 GLintptr read_offset, GLintptr write_offset,
726 GLsizeiptr size)
727 {
728 struct intel_context *intel = intel_context(ctx);
729 struct intel_buffer_object *intel_src = intel_buffer_object(src);
730 struct intel_buffer_object *intel_dst = intel_buffer_object(dst);
731 drm_intel_bo *src_bo, *dst_bo;
732 GLuint src_offset;
733
734 if (size == 0)
735 return;
736
737 /* If we're in system memory, just map and memcpy. */
738 if (intel_src->sys_buffer || intel_dst->sys_buffer || intel->gen >= 6) {
739 /* The same buffer may be used, but note that regions copied may
740 * not overlap.
741 */
742 if (src == dst) {
743 char *ptr = intel_bufferobj_map(ctx, GL_COPY_WRITE_BUFFER,
744 GL_READ_WRITE, dst);
745 memcpy(ptr + write_offset, ptr + read_offset, size);
746 intel_bufferobj_unmap(ctx, GL_COPY_WRITE_BUFFER, dst);
747 } else {
748 const char *src_ptr;
749 char *dst_ptr;
750
751 src_ptr = intel_bufferobj_map(ctx, GL_COPY_READ_BUFFER,
752 GL_READ_ONLY, src);
753 dst_ptr = intel_bufferobj_map(ctx, GL_COPY_WRITE_BUFFER,
754 GL_WRITE_ONLY, dst);
755
756 memcpy(dst_ptr + write_offset, src_ptr + read_offset, size);
757
758 intel_bufferobj_unmap(ctx, GL_COPY_READ_BUFFER, src);
759 intel_bufferobj_unmap(ctx, GL_COPY_WRITE_BUFFER, dst);
760 }
761 return;
762 }
763
764 /* Otherwise, we have real BOs, so blit them. */
765
766 dst_bo = intel_bufferobj_buffer(intel, intel_dst, INTEL_WRITE_PART);
767 src_bo = intel_bufferobj_source(intel, intel_src, &src_offset);
768
769 intel_emit_linear_blit(intel,
770 dst_bo, write_offset,
771 src_bo, read_offset + src_offset, size);
772
773 /* Since we've emitted some blits to buffers that will (likely) be used
774 * in rendering operations in other cache domains in this batch, emit a
775 * flush. Once again, we wish for a domain tracker in libdrm to cover
776 * usage inside of a batchbuffer.
777 */
778 intel_batchbuffer_emit_mi_flush(intel);
779 }
780
781 #if FEATURE_APPLE_object_purgeable
782 static GLenum
783 intel_buffer_purgeable(struct gl_context * ctx,
784 drm_intel_bo *buffer,
785 GLenum option)
786 {
787 int retained = 0;
788
789 if (buffer != NULL)
790 retained = drm_intel_bo_madvise (buffer, I915_MADV_DONTNEED);
791
792 return retained ? GL_VOLATILE_APPLE : GL_RELEASED_APPLE;
793 }
794
795 static GLenum
796 intel_buffer_object_purgeable(struct gl_context * ctx,
797 struct gl_buffer_object *obj,
798 GLenum option)
799 {
800 struct intel_buffer_object *intel;
801
802 intel = intel_buffer_object (obj);
803 if (intel->buffer != NULL)
804 return intel_buffer_purgeable (ctx, intel->buffer, option);
805
806 if (option == GL_RELEASED_APPLE) {
807 if (intel->sys_buffer != NULL) {
808 free(intel->sys_buffer);
809 intel->sys_buffer = NULL;
810 }
811
812 return GL_RELEASED_APPLE;
813 } else {
814 /* XXX Create the buffer and madvise(MADV_DONTNEED)? */
815 return intel_buffer_purgeable (ctx,
816 intel_bufferobj_buffer(intel_context(ctx),
817 intel, INTEL_READ),
818 option);
819 }
820 }
821
822 static GLenum
823 intel_texture_object_purgeable(struct gl_context * ctx,
824 struct gl_texture_object *obj,
825 GLenum option)
826 {
827 struct intel_texture_object *intel;
828
829 intel = intel_texture_object(obj);
830 if (intel->mt == NULL || intel->mt->region == NULL)
831 return GL_RELEASED_APPLE;
832
833 return intel_buffer_purgeable (ctx, intel->mt->region->buffer, option);
834 }
835
836 static GLenum
837 intel_render_object_purgeable(struct gl_context * ctx,
838 struct gl_renderbuffer *obj,
839 GLenum option)
840 {
841 struct intel_renderbuffer *intel;
842
843 intel = intel_renderbuffer(obj);
844 if (intel->region == NULL)
845 return GL_RELEASED_APPLE;
846
847 return intel_buffer_purgeable (ctx, intel->region->buffer, option);
848 }
849
850 static GLenum
851 intel_buffer_unpurgeable(struct gl_context * ctx,
852 drm_intel_bo *buffer,
853 GLenum option)
854 {
855 int retained;
856
857 retained = 0;
858 if (buffer != NULL)
859 retained = drm_intel_bo_madvise (buffer, I915_MADV_WILLNEED);
860
861 return retained ? GL_RETAINED_APPLE : GL_UNDEFINED_APPLE;
862 }
863
864 static GLenum
865 intel_buffer_object_unpurgeable(struct gl_context * ctx,
866 struct gl_buffer_object *obj,
867 GLenum option)
868 {
869 return intel_buffer_unpurgeable (ctx, intel_buffer_object (obj)->buffer, option);
870 }
871
872 static GLenum
873 intel_texture_object_unpurgeable(struct gl_context * ctx,
874 struct gl_texture_object *obj,
875 GLenum option)
876 {
877 struct intel_texture_object *intel;
878
879 intel = intel_texture_object(obj);
880 if (intel->mt == NULL || intel->mt->region == NULL)
881 return GL_UNDEFINED_APPLE;
882
883 return intel_buffer_unpurgeable (ctx, intel->mt->region->buffer, option);
884 }
885
886 static GLenum
887 intel_render_object_unpurgeable(struct gl_context * ctx,
888 struct gl_renderbuffer *obj,
889 GLenum option)
890 {
891 struct intel_renderbuffer *intel;
892
893 intel = intel_renderbuffer(obj);
894 if (intel->region == NULL)
895 return GL_UNDEFINED_APPLE;
896
897 return intel_buffer_unpurgeable (ctx, intel->region->buffer, option);
898 }
899 #endif
900
901 void
902 intelInitBufferObjectFuncs(struct dd_function_table *functions)
903 {
904 functions->NewBufferObject = intel_bufferobj_alloc;
905 functions->DeleteBuffer = intel_bufferobj_free;
906 functions->BufferData = intel_bufferobj_data;
907 functions->BufferSubData = intel_bufferobj_subdata;
908 functions->GetBufferSubData = intel_bufferobj_get_subdata;
909 functions->MapBuffer = intel_bufferobj_map;
910 functions->MapBufferRange = intel_bufferobj_map_range;
911 functions->FlushMappedBufferRange = intel_bufferobj_flush_mapped_range;
912 functions->UnmapBuffer = intel_bufferobj_unmap;
913 functions->CopyBufferSubData = intel_bufferobj_copy_subdata;
914
915 #if FEATURE_APPLE_object_purgeable
916 functions->BufferObjectPurgeable = intel_buffer_object_purgeable;
917 functions->TextureObjectPurgeable = intel_texture_object_purgeable;
918 functions->RenderObjectPurgeable = intel_render_object_purgeable;
919
920 functions->BufferObjectUnpurgeable = intel_buffer_object_unpurgeable;
921 functions->TextureObjectUnpurgeable = intel_texture_object_unpurgeable;
922 functions->RenderObjectUnpurgeable = intel_render_object_unpurgeable;
923 #endif
924 }