i965: Assert array index on access to vec4_visitor's arrays.
[mesa.git] / src / mesa / drivers / dri / i915 / intel_buffer_objects.c
1 /**************************************************************************
2 *
3 * Copyright 2003 VMware, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28
29 #include "main/imports.h"
30 #include "main/mtypes.h"
31 #include "main/macros.h"
32 #include "main/bufferobj.h"
33
34 #include "intel_blit.h"
35 #include "intel_buffer_objects.h"
36 #include "intel_batchbuffer.h"
37 #include "intel_context.h"
38 #include "intel_fbo.h"
39 #include "intel_mipmap_tree.h"
40 #include "intel_regions.h"
41
42 static GLboolean
43 intel_bufferobj_unmap(struct gl_context * ctx, struct gl_buffer_object *obj,
44 gl_map_buffer_index index);
45
46 /** Allocates a new drm_intel_bo to store the data for the buffer object. */
47 static void
48 intel_bufferobj_alloc_buffer(struct intel_context *intel,
49 struct intel_buffer_object *intel_obj)
50 {
51 intel_obj->buffer = drm_intel_bo_alloc(intel->bufmgr, "bufferobj",
52 intel_obj->Base.Size, 64);
53 }
54
55 static void
56 release_buffer(struct intel_buffer_object *intel_obj)
57 {
58 drm_intel_bo_unreference(intel_obj->buffer);
59 intel_obj->buffer = NULL;
60 intel_obj->offset = 0;
61 intel_obj->source = 0;
62 }
63
64 /**
65 * There is some duplication between mesa's bufferobjects and our
66 * bufmgr buffers. Both have an integer handle and a hashtable to
67 * lookup an opaque structure. It would be nice if the handles and
68 * internal structure where somehow shared.
69 */
70 static struct gl_buffer_object *
71 intel_bufferobj_alloc(struct gl_context * ctx, GLuint name, GLenum target)
72 {
73 struct intel_buffer_object *obj = CALLOC_STRUCT(intel_buffer_object);
74
75 _mesa_initialize_buffer_object(ctx, &obj->Base, name, target);
76
77 obj->buffer = NULL;
78
79 return &obj->Base;
80 }
81
82 /**
83 * Deallocate/free a vertex/pixel buffer object.
84 * Called via glDeleteBuffersARB().
85 */
86 static void
87 intel_bufferobj_free(struct gl_context * ctx, struct gl_buffer_object *obj)
88 {
89 struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
90
91 assert(intel_obj);
92
93 /* Buffer objects are automatically unmapped when deleting according
94 * to the spec, but Mesa doesn't do UnmapBuffer for us at context destroy
95 * (though it does if you call glDeleteBuffers)
96 */
97 _mesa_buffer_unmap_all_mappings(ctx, obj);
98
99 free(intel_obj->sys_buffer);
100
101 drm_intel_bo_unreference(intel_obj->buffer);
102 free(intel_obj);
103 }
104
105
106
107 /**
108 * Allocate space for and store data in a buffer object. Any data that was
109 * previously stored in the buffer object is lost. If data is NULL,
110 * memory will be allocated, but no copy will occur.
111 * Called via ctx->Driver.BufferData().
112 * \return true for success, false if out of memory
113 */
114 static GLboolean
115 intel_bufferobj_data(struct gl_context * ctx,
116 GLenum target,
117 GLsizeiptrARB size,
118 const GLvoid * data,
119 GLenum usage,
120 GLbitfield storageFlags,
121 struct gl_buffer_object *obj)
122 {
123 struct intel_context *intel = intel_context(ctx);
124 struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
125
126 intel_obj->Base.Size = size;
127 intel_obj->Base.Usage = usage;
128 intel_obj->Base.StorageFlags = storageFlags;
129
130 assert(!obj->Mappings[MAP_USER].Pointer); /* Mesa should have unmapped it */
131 assert(!obj->Mappings[MAP_INTERNAL].Pointer);
132
133 if (intel_obj->buffer != NULL)
134 release_buffer(intel_obj);
135
136 free(intel_obj->sys_buffer);
137 intel_obj->sys_buffer = NULL;
138
139 if (size != 0) {
140 /* Stick VBOs in system memory, as we're always doing swtnl with their
141 * contents anyway.
142 */
143 if (target == GL_ARRAY_BUFFER || target == GL_ELEMENT_ARRAY_BUFFER) {
144 intel_obj->sys_buffer = malloc(size);
145 if (intel_obj->sys_buffer != NULL) {
146 if (data != NULL)
147 memcpy(intel_obj->sys_buffer, data, size);
148 return true;
149 }
150 }
151
152 intel_bufferobj_alloc_buffer(intel, intel_obj);
153 if (!intel_obj->buffer)
154 return false;
155
156 if (data != NULL)
157 drm_intel_bo_subdata(intel_obj->buffer, 0, size, data);
158 }
159
160 return true;
161 }
162
163
164 /**
165 * Replace data in a subrange of buffer object. If the data range
166 * specified by size + offset extends beyond the end of the buffer or
167 * if data is NULL, no copy is performed.
168 * Called via glBufferSubDataARB().
169 */
170 static void
171 intel_bufferobj_subdata(struct gl_context * ctx,
172 GLintptrARB offset,
173 GLsizeiptrARB size,
174 const GLvoid * data, struct gl_buffer_object *obj)
175 {
176 struct intel_context *intel = intel_context(ctx);
177 struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
178 bool busy;
179
180 if (size == 0)
181 return;
182
183 assert(intel_obj);
184
185 /* If we have a single copy in system memory, update that */
186 if (intel_obj->sys_buffer) {
187 if (intel_obj->source)
188 release_buffer(intel_obj);
189
190 if (intel_obj->buffer == NULL) {
191 memcpy((char *)intel_obj->sys_buffer + offset, data, size);
192 return;
193 }
194
195 free(intel_obj->sys_buffer);
196 intel_obj->sys_buffer = NULL;
197 }
198
199 /* Otherwise we need to update the copy in video memory. */
200 busy =
201 drm_intel_bo_busy(intel_obj->buffer) ||
202 drm_intel_bo_references(intel->batch.bo, intel_obj->buffer);
203
204 if (busy) {
205 if (size == intel_obj->Base.Size) {
206 /* Replace the current busy bo with fresh data. */
207 drm_intel_bo_unreference(intel_obj->buffer);
208 intel_bufferobj_alloc_buffer(intel, intel_obj);
209 drm_intel_bo_subdata(intel_obj->buffer, 0, size, data);
210 } else {
211 perf_debug("Using a blit copy to avoid stalling on %ldb "
212 "glBufferSubData() to a busy buffer object.\n",
213 (long)size);
214 drm_intel_bo *temp_bo =
215 drm_intel_bo_alloc(intel->bufmgr, "subdata temp", size, 64);
216
217 drm_intel_bo_subdata(temp_bo, 0, size, data);
218
219 intel_emit_linear_blit(intel,
220 intel_obj->buffer, offset,
221 temp_bo, 0,
222 size);
223
224 drm_intel_bo_unreference(temp_bo);
225 }
226 } else {
227 drm_intel_bo_subdata(intel_obj->buffer, offset, size, data);
228 }
229 }
230
231
232 /**
233 * Called via glGetBufferSubDataARB().
234 */
235 static void
236 intel_bufferobj_get_subdata(struct gl_context * ctx,
237 GLintptrARB offset,
238 GLsizeiptrARB size,
239 GLvoid * data, struct gl_buffer_object *obj)
240 {
241 struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
242 struct intel_context *intel = intel_context(ctx);
243
244 assert(intel_obj);
245 if (intel_obj->sys_buffer)
246 memcpy(data, (char *)intel_obj->sys_buffer + offset, size);
247 else {
248 if (drm_intel_bo_references(intel->batch.bo, intel_obj->buffer)) {
249 intel_batchbuffer_flush(intel);
250 }
251 drm_intel_bo_get_subdata(intel_obj->buffer, offset, size, data);
252 }
253 }
254
255
256
257 /**
258 * Called via glMapBufferRange and glMapBuffer
259 *
260 * The goal of this extension is to allow apps to accumulate their rendering
261 * at the same time as they accumulate their buffer object. Without it,
262 * you'd end up blocking on execution of rendering every time you mapped
263 * the buffer to put new data in.
264 *
265 * We support it in 3 ways: If unsynchronized, then don't bother
266 * flushing the batchbuffer before mapping the buffer, which can save blocking
267 * in many cases. If we would still block, and they allow the whole buffer
268 * to be invalidated, then just allocate a new buffer to replace the old one.
269 * If not, and we'd block, and they allow the subrange of the buffer to be
270 * invalidated, then we can make a new little BO, let them write into that,
271 * and blit it into the real BO at unmap time.
272 */
273 static void *
274 intel_bufferobj_map_range(struct gl_context * ctx,
275 GLintptr offset, GLsizeiptr length,
276 GLbitfield access, struct gl_buffer_object *obj,
277 gl_map_buffer_index index)
278 {
279 struct intel_context *intel = intel_context(ctx);
280 struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
281
282 assert(intel_obj);
283
284 /* _mesa_MapBufferRange (GL entrypoint) sets these, but the vbo module also
285 * internally uses our functions directly.
286 */
287 obj->Mappings[index].Offset = offset;
288 obj->Mappings[index].Length = length;
289 obj->Mappings[index].AccessFlags = access;
290
291 if (intel_obj->sys_buffer) {
292 const bool read_only =
293 (access & (GL_MAP_READ_BIT | GL_MAP_WRITE_BIT)) == GL_MAP_READ_BIT;
294
295 if (!read_only && intel_obj->source)
296 release_buffer(intel_obj);
297
298 if (!intel_obj->buffer || intel_obj->source) {
299 obj->Mappings[index].Pointer = intel_obj->sys_buffer + offset;
300 return obj->Mappings[index].Pointer;
301 }
302
303 free(intel_obj->sys_buffer);
304 intel_obj->sys_buffer = NULL;
305 }
306
307 if (intel_obj->buffer == NULL) {
308 obj->Mappings[index].Pointer = NULL;
309 return NULL;
310 }
311
312 /* If the access is synchronized (like a normal buffer mapping), then get
313 * things flushed out so the later mapping syncs appropriately through GEM.
314 * If the user doesn't care about existing buffer contents and mapping would
315 * cause us to block, then throw out the old buffer.
316 *
317 * If they set INVALIDATE_BUFFER, we can pitch the current contents to
318 * achieve the required synchronization.
319 */
320 if (!(access & GL_MAP_UNSYNCHRONIZED_BIT)) {
321 if (drm_intel_bo_references(intel->batch.bo, intel_obj->buffer)) {
322 if (access & GL_MAP_INVALIDATE_BUFFER_BIT) {
323 drm_intel_bo_unreference(intel_obj->buffer);
324 intel_bufferobj_alloc_buffer(intel, intel_obj);
325 } else {
326 perf_debug("Stalling on the GPU for mapping a busy buffer "
327 "object\n");
328 intel_flush(ctx);
329 }
330 } else if (drm_intel_bo_busy(intel_obj->buffer) &&
331 (access & GL_MAP_INVALIDATE_BUFFER_BIT)) {
332 drm_intel_bo_unreference(intel_obj->buffer);
333 intel_bufferobj_alloc_buffer(intel, intel_obj);
334 }
335 }
336
337 /* If the user is mapping a range of an active buffer object but
338 * doesn't require the current contents of that range, make a new
339 * BO, and we'll copy what they put in there out at unmap or
340 * FlushRange time.
341 */
342 if ((access & GL_MAP_INVALIDATE_RANGE_BIT) &&
343 drm_intel_bo_busy(intel_obj->buffer)) {
344 /* Ensure that the base alignment of the allocation meets the alignment
345 * guarantees the driver has advertised to the application.
346 */
347 const unsigned alignment = ctx->Const.MinMapBufferAlignment;
348 const unsigned extra = (uintptr_t) offset % alignment;
349
350 if (access & GL_MAP_FLUSH_EXPLICIT_BIT) {
351 intel_obj->range_map_buffer[index] =
352 _mesa_align_malloc(length + extra, alignment);
353 obj->Mappings[index].Pointer =
354 intel_obj->range_map_buffer[index] + extra;
355 } else {
356 intel_obj->range_map_bo[index] = drm_intel_bo_alloc(intel->bufmgr,
357 "range map",
358 length + extra,
359 alignment);
360 if (!(access & GL_MAP_READ_BIT)) {
361 drm_intel_gem_bo_map_gtt(intel_obj->range_map_bo[index]);
362 } else {
363 drm_intel_bo_map(intel_obj->range_map_bo[index],
364 (access & GL_MAP_WRITE_BIT) != 0);
365 }
366 obj->Mappings[index].Pointer =
367 intel_obj->range_map_bo[index]->virtual + extra;
368 }
369 return obj->Mappings[index].Pointer;
370 }
371
372 if (access & GL_MAP_UNSYNCHRONIZED_BIT)
373 drm_intel_gem_bo_map_unsynchronized(intel_obj->buffer);
374 else if (!(access & GL_MAP_READ_BIT)) {
375 drm_intel_gem_bo_map_gtt(intel_obj->buffer);
376 } else {
377 drm_intel_bo_map(intel_obj->buffer, (access & GL_MAP_WRITE_BIT) != 0);
378 }
379
380 obj->Mappings[index].Pointer = intel_obj->buffer->virtual + offset;
381 return obj->Mappings[index].Pointer;
382 }
383
384 /* Ideally we'd use a BO to avoid taking up cache space for the temporary
385 * data, but FlushMappedBufferRange may be followed by further writes to
386 * the pointer, so we would have to re-map after emitting our blit, which
387 * would defeat the point.
388 */
389 static void
390 intel_bufferobj_flush_mapped_range(struct gl_context *ctx,
391 GLintptr offset, GLsizeiptr length,
392 struct gl_buffer_object *obj,
393 gl_map_buffer_index index)
394 {
395 struct intel_context *intel = intel_context(ctx);
396 struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
397 drm_intel_bo *temp_bo;
398
399 /* Unless we're in the range map using a temporary system buffer,
400 * there's no work to do.
401 */
402 if (intel_obj->range_map_buffer[index] == NULL)
403 return;
404
405 if (length == 0)
406 return;
407
408 temp_bo = drm_intel_bo_alloc(intel->bufmgr, "range map flush", length, 64);
409
410 /* Use obj->Pointer instead of intel_obj->range_map_buffer because the
411 * former points to the actual mapping while the latter may be offset to
412 * meet alignment guarantees.
413 */
414 drm_intel_bo_subdata(temp_bo, 0, length, obj->Mappings[index].Pointer);
415
416 intel_emit_linear_blit(intel,
417 intel_obj->buffer,
418 obj->Mappings[index].Offset + offset,
419 temp_bo, 0,
420 length);
421
422 drm_intel_bo_unreference(temp_bo);
423 }
424
425
426 /**
427 * Called via glUnmapBuffer().
428 */
429 static GLboolean
430 intel_bufferobj_unmap(struct gl_context * ctx, struct gl_buffer_object *obj,
431 gl_map_buffer_index index)
432 {
433 struct intel_context *intel = intel_context(ctx);
434 struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
435
436 assert(intel_obj);
437 assert(obj->Mappings[index].Pointer);
438 if (intel_obj->sys_buffer != NULL) {
439 /* always keep the mapping around. */
440 } else if (intel_obj->range_map_buffer[index] != NULL) {
441 /* Since we've emitted some blits to buffers that will (likely) be used
442 * in rendering operations in other cache domains in this batch, emit a
443 * flush. Once again, we wish for a domain tracker in libdrm to cover
444 * usage inside of a batchbuffer.
445 */
446 intel_batchbuffer_emit_mi_flush(intel);
447 _mesa_align_free(intel_obj->range_map_buffer[index]);
448 intel_obj->range_map_buffer[index] = NULL;
449 } else if (intel_obj->range_map_bo[index] != NULL) {
450 const unsigned extra = obj->Mappings[index].Pointer -
451 intel_obj->range_map_bo[index]->virtual;
452
453 drm_intel_bo_unmap(intel_obj->range_map_bo[index]);
454
455 intel_emit_linear_blit(intel,
456 intel_obj->buffer, obj->Mappings[index].Offset,
457 intel_obj->range_map_bo[index], extra,
458 obj->Mappings[index].Length);
459
460 /* Since we've emitted some blits to buffers that will (likely) be used
461 * in rendering operations in other cache domains in this batch, emit a
462 * flush. Once again, we wish for a domain tracker in libdrm to cover
463 * usage inside of a batchbuffer.
464 */
465 intel_batchbuffer_emit_mi_flush(intel);
466
467 drm_intel_bo_unreference(intel_obj->range_map_bo[index]);
468 intel_obj->range_map_bo[index] = NULL;
469 } else if (intel_obj->buffer != NULL) {
470 drm_intel_bo_unmap(intel_obj->buffer);
471 }
472 obj->Mappings[index].Pointer = NULL;
473 obj->Mappings[index].Offset = 0;
474 obj->Mappings[index].Length = 0;
475
476 return true;
477 }
478
479 drm_intel_bo *
480 intel_bufferobj_buffer(struct intel_context *intel,
481 struct intel_buffer_object *intel_obj)
482 {
483 if (intel_obj->source)
484 release_buffer(intel_obj);
485
486 if (intel_obj->buffer == NULL) {
487 intel_bufferobj_alloc_buffer(intel, intel_obj);
488 drm_intel_bo_subdata(intel_obj->buffer,
489 0, intel_obj->Base.Size,
490 intel_obj->sys_buffer);
491
492 free(intel_obj->sys_buffer);
493 intel_obj->sys_buffer = NULL;
494 intel_obj->offset = 0;
495 }
496
497 return intel_obj->buffer;
498 }
499
500 #define INTEL_UPLOAD_SIZE (64*1024)
501
502 void
503 intel_upload_finish(struct intel_context *intel)
504 {
505 if (!intel->upload.bo)
506 return;
507
508 if (intel->upload.buffer_len) {
509 drm_intel_bo_subdata(intel->upload.bo,
510 intel->upload.buffer_offset,
511 intel->upload.buffer_len,
512 intel->upload.buffer);
513 intel->upload.buffer_len = 0;
514 }
515
516 drm_intel_bo_unreference(intel->upload.bo);
517 intel->upload.bo = NULL;
518 }
519
520 static void wrap_buffers(struct intel_context *intel, GLuint size)
521 {
522 intel_upload_finish(intel);
523
524 if (size < INTEL_UPLOAD_SIZE)
525 size = INTEL_UPLOAD_SIZE;
526
527 intel->upload.bo = drm_intel_bo_alloc(intel->bufmgr, "upload", size, 0);
528 intel->upload.offset = 0;
529 }
530
531 void intel_upload_data(struct intel_context *intel,
532 const void *ptr, GLuint size, GLuint align,
533 drm_intel_bo **return_bo,
534 GLuint *return_offset)
535 {
536 GLuint base, delta;
537
538 base = (intel->upload.offset + align - 1) / align * align;
539 if (intel->upload.bo == NULL || base + size > intel->upload.bo->size) {
540 wrap_buffers(intel, size);
541 base = 0;
542 }
543
544 drm_intel_bo_reference(intel->upload.bo);
545 *return_bo = intel->upload.bo;
546 *return_offset = base;
547
548 delta = base - intel->upload.offset;
549 if (intel->upload.buffer_len &&
550 intel->upload.buffer_len + delta + size > sizeof(intel->upload.buffer))
551 {
552 drm_intel_bo_subdata(intel->upload.bo,
553 intel->upload.buffer_offset,
554 intel->upload.buffer_len,
555 intel->upload.buffer);
556 intel->upload.buffer_len = 0;
557 }
558
559 if (size < sizeof(intel->upload.buffer))
560 {
561 if (intel->upload.buffer_len == 0)
562 intel->upload.buffer_offset = base;
563 else
564 intel->upload.buffer_len += delta;
565
566 memcpy(intel->upload.buffer + intel->upload.buffer_len, ptr, size);
567 intel->upload.buffer_len += size;
568 }
569 else
570 {
571 drm_intel_bo_subdata(intel->upload.bo, base, size, ptr);
572 }
573
574 intel->upload.offset = base + size;
575 }
576
577 drm_intel_bo *
578 intel_bufferobj_source(struct intel_context *intel,
579 struct intel_buffer_object *intel_obj,
580 GLuint align, GLuint *offset)
581 {
582 if (intel_obj->buffer == NULL) {
583 intel_upload_data(intel,
584 intel_obj->sys_buffer, intel_obj->Base.Size, align,
585 &intel_obj->buffer, &intel_obj->offset);
586 intel_obj->source = 1;
587 }
588
589 *offset = intel_obj->offset;
590 return intel_obj->buffer;
591 }
592
593 static void
594 intel_bufferobj_copy_subdata(struct gl_context *ctx,
595 struct gl_buffer_object *src,
596 struct gl_buffer_object *dst,
597 GLintptr read_offset, GLintptr write_offset,
598 GLsizeiptr size)
599 {
600 struct intel_context *intel = intel_context(ctx);
601 struct intel_buffer_object *intel_src = intel_buffer_object(src);
602 struct intel_buffer_object *intel_dst = intel_buffer_object(dst);
603 drm_intel_bo *src_bo, *dst_bo;
604 GLuint src_offset;
605
606 if (size == 0)
607 return;
608
609 /* If we're in system memory, just map and memcpy. */
610 if (intel_src->sys_buffer || intel_dst->sys_buffer) {
611 /* The same buffer may be used, but note that regions copied may
612 * not overlap.
613 */
614 if (src == dst) {
615 char *ptr = intel_bufferobj_map_range(ctx, 0, dst->Size,
616 GL_MAP_READ_BIT |
617 GL_MAP_WRITE_BIT,
618 dst, MAP_INTERNAL);
619 memmove(ptr + write_offset, ptr + read_offset, size);
620 intel_bufferobj_unmap(ctx, dst, MAP_INTERNAL);
621 } else {
622 const char *src_ptr;
623 char *dst_ptr;
624
625 src_ptr = intel_bufferobj_map_range(ctx, 0, src->Size,
626 GL_MAP_READ_BIT, src,
627 MAP_INTERNAL);
628 dst_ptr = intel_bufferobj_map_range(ctx, 0, dst->Size,
629 GL_MAP_WRITE_BIT, dst,
630 MAP_INTERNAL);
631
632 memcpy(dst_ptr + write_offset, src_ptr + read_offset, size);
633
634 intel_bufferobj_unmap(ctx, src, MAP_INTERNAL);
635 intel_bufferobj_unmap(ctx, dst, MAP_INTERNAL);
636 }
637 return;
638 }
639
640 /* Otherwise, we have real BOs, so blit them. */
641
642 dst_bo = intel_bufferobj_buffer(intel, intel_dst);
643 src_bo = intel_bufferobj_source(intel, intel_src, 64, &src_offset);
644
645 intel_emit_linear_blit(intel,
646 dst_bo, write_offset,
647 src_bo, read_offset + src_offset, size);
648
649 /* Since we've emitted some blits to buffers that will (likely) be used
650 * in rendering operations in other cache domains in this batch, emit a
651 * flush. Once again, we wish for a domain tracker in libdrm to cover
652 * usage inside of a batchbuffer.
653 */
654 intel_batchbuffer_emit_mi_flush(intel);
655 }
656
657 static GLenum
658 intel_buffer_purgeable(drm_intel_bo *buffer)
659 {
660 int retained = 0;
661
662 if (buffer != NULL)
663 retained = drm_intel_bo_madvise (buffer, I915_MADV_DONTNEED);
664
665 return retained ? GL_VOLATILE_APPLE : GL_RELEASED_APPLE;
666 }
667
668 static GLenum
669 intel_buffer_object_purgeable(struct gl_context * ctx,
670 struct gl_buffer_object *obj,
671 GLenum option)
672 {
673 struct intel_buffer_object *intel_obj = intel_buffer_object (obj);
674
675 if (intel_obj->buffer != NULL)
676 return intel_buffer_purgeable(intel_obj->buffer);
677
678 if (option == GL_RELEASED_APPLE) {
679 free(intel_obj->sys_buffer);
680 intel_obj->sys_buffer = NULL;
681
682 return GL_RELEASED_APPLE;
683 } else {
684 /* XXX Create the buffer and madvise(MADV_DONTNEED)? */
685 struct intel_context *intel = intel_context(ctx);
686 drm_intel_bo *bo = intel_bufferobj_buffer(intel, intel_obj);
687
688 return intel_buffer_purgeable(bo);
689 }
690 }
691
692 static GLenum
693 intel_texture_object_purgeable(struct gl_context * ctx,
694 struct gl_texture_object *obj,
695 GLenum option)
696 {
697 struct intel_texture_object *intel;
698
699 (void) ctx;
700 (void) option;
701
702 intel = intel_texture_object(obj);
703 if (intel->mt == NULL || intel->mt->region == NULL)
704 return GL_RELEASED_APPLE;
705
706 return intel_buffer_purgeable(intel->mt->region->bo);
707 }
708
709 static GLenum
710 intel_render_object_purgeable(struct gl_context * ctx,
711 struct gl_renderbuffer *obj,
712 GLenum option)
713 {
714 struct intel_renderbuffer *intel;
715
716 (void) ctx;
717 (void) option;
718
719 intel = intel_renderbuffer(obj);
720 if (intel->mt == NULL)
721 return GL_RELEASED_APPLE;
722
723 return intel_buffer_purgeable(intel->mt->region->bo);
724 }
725
726 static GLenum
727 intel_buffer_unpurgeable(drm_intel_bo *buffer)
728 {
729 int retained;
730
731 retained = 0;
732 if (buffer != NULL)
733 retained = drm_intel_bo_madvise (buffer, I915_MADV_WILLNEED);
734
735 return retained ? GL_RETAINED_APPLE : GL_UNDEFINED_APPLE;
736 }
737
738 static GLenum
739 intel_buffer_object_unpurgeable(struct gl_context * ctx,
740 struct gl_buffer_object *obj,
741 GLenum option)
742 {
743 (void) ctx;
744 (void) option;
745
746 return intel_buffer_unpurgeable(intel_buffer_object (obj)->buffer);
747 }
748
749 static GLenum
750 intel_texture_object_unpurgeable(struct gl_context * ctx,
751 struct gl_texture_object *obj,
752 GLenum option)
753 {
754 struct intel_texture_object *intel;
755
756 (void) ctx;
757 (void) option;
758
759 intel = intel_texture_object(obj);
760 if (intel->mt == NULL || intel->mt->region == NULL)
761 return GL_UNDEFINED_APPLE;
762
763 return intel_buffer_unpurgeable(intel->mt->region->bo);
764 }
765
766 static GLenum
767 intel_render_object_unpurgeable(struct gl_context * ctx,
768 struct gl_renderbuffer *obj,
769 GLenum option)
770 {
771 struct intel_renderbuffer *intel;
772
773 (void) ctx;
774 (void) option;
775
776 intel = intel_renderbuffer(obj);
777 if (intel->mt == NULL)
778 return GL_UNDEFINED_APPLE;
779
780 return intel_buffer_unpurgeable(intel->mt->region->bo);
781 }
782
783 void
784 intelInitBufferObjectFuncs(struct dd_function_table *functions)
785 {
786 functions->NewBufferObject = intel_bufferobj_alloc;
787 functions->DeleteBuffer = intel_bufferobj_free;
788 functions->BufferData = intel_bufferobj_data;
789 functions->BufferSubData = intel_bufferobj_subdata;
790 functions->GetBufferSubData = intel_bufferobj_get_subdata;
791 functions->MapBufferRange = intel_bufferobj_map_range;
792 functions->FlushMappedBufferRange = intel_bufferobj_flush_mapped_range;
793 functions->UnmapBuffer = intel_bufferobj_unmap;
794 functions->CopyBufferSubData = intel_bufferobj_copy_subdata;
795
796 functions->BufferObjectPurgeable = intel_buffer_object_purgeable;
797 functions->TextureObjectPurgeable = intel_texture_object_purgeable;
798 functions->RenderObjectPurgeable = intel_render_object_purgeable;
799
800 functions->BufferObjectUnpurgeable = intel_buffer_object_unpurgeable;
801 functions->TextureObjectUnpurgeable = intel_texture_object_unpurgeable;
802 functions->RenderObjectUnpurgeable = intel_render_object_unpurgeable;
803 }