452e6d33c070c75a99c6acc3f7be425e85410b96
[mesa.git] / src / mesa / drivers / dri / i965 / intel_buffer_objects.c
1 /*
2 * Copyright 2003 VMware, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial portions
15 * of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
18 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
21 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
22 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
23 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 */
25
26 /**
27 * @file intel_buffer_objects.c
28 *
29 * This provides core GL buffer object functionality.
30 */
31
32 #include "main/imports.h"
33 #include "main/mtypes.h"
34 #include "main/macros.h"
35 #include "main/streaming-load-memcpy.h"
36 #include "main/bufferobj.h"
37 #include "x86/common_x86_asm.h"
38
39 #include "brw_context.h"
40 #include "brw_blorp.h"
41 #include "intel_buffer_objects.h"
42 #include "intel_batchbuffer.h"
43 #include "intel_tiled_memcpy.h"
44
45 static void
46 mark_buffer_gpu_usage(struct intel_buffer_object *intel_obj,
47 uint32_t offset, uint32_t size)
48 {
49 intel_obj->gpu_active_start = MIN2(intel_obj->gpu_active_start, offset);
50 intel_obj->gpu_active_end = MAX2(intel_obj->gpu_active_end, offset + size);
51 }
52
53 static void
54 mark_buffer_inactive(struct intel_buffer_object *intel_obj)
55 {
56 intel_obj->gpu_active_start = ~0;
57 intel_obj->gpu_active_end = 0;
58 }
59
60 static void
61 mark_buffer_valid_data(struct intel_buffer_object *intel_obj,
62 uint32_t offset, uint32_t size)
63 {
64 intel_obj->valid_data_start = MIN2(intel_obj->valid_data_start, offset);
65 intel_obj->valid_data_end = MAX2(intel_obj->valid_data_end, offset + size);
66 }
67
68 static void
69 mark_buffer_invalid(struct intel_buffer_object *intel_obj)
70 {
71 intel_obj->valid_data_start = ~0;
72 intel_obj->valid_data_end = 0;
73 }
74
75 /** Allocates a new brw_bo to store the data for the buffer object. */
76 static void
77 alloc_buffer_object(struct brw_context *brw,
78 struct intel_buffer_object *intel_obj)
79 {
80 const struct gl_context *ctx = &brw->ctx;
81
82 uint64_t size = intel_obj->Base.Size;
83 if (ctx->Const.RobustAccess) {
84 /* Pad out buffer objects with an extra 2kB (half a page).
85 *
86 * When pushing UBOs, we need to safeguard against 3DSTATE_CONSTANT_*
87 * reading out of bounds memory. The application might bind a UBO that's
88 * smaller than what the program expects. Ideally, we'd bind an extra
89 * push buffer containing zeros, but we have a limited number of those,
90 * so it's not always viable. Our only safe option is to pad all buffer
91 * objects by the maximum push data length, so that it will never read
92 * past the end of a BO.
93 *
94 * This is unfortunate, but it should result in at most 1 extra page,
95 * which probably isn't too terrible.
96 */
97 size += 64 * 32; /* max read length of 64 256-bit units */
98 }
99 intel_obj->buffer =
100 brw_bo_alloc(brw->bufmgr, "bufferobj", size, BRW_MEMZONE_OTHER);
101
102 /* the buffer might be bound as a uniform buffer, need to update it
103 */
104 if (intel_obj->Base.UsageHistory & USAGE_UNIFORM_BUFFER)
105 brw->ctx.NewDriverState |= BRW_NEW_UNIFORM_BUFFER;
106 if (intel_obj->Base.UsageHistory & USAGE_SHADER_STORAGE_BUFFER)
107 brw->ctx.NewDriverState |= BRW_NEW_UNIFORM_BUFFER;
108 if (intel_obj->Base.UsageHistory & USAGE_TEXTURE_BUFFER)
109 brw->ctx.NewDriverState |= BRW_NEW_TEXTURE_BUFFER;
110 if (intel_obj->Base.UsageHistory & USAGE_ATOMIC_COUNTER_BUFFER)
111 brw->ctx.NewDriverState |= BRW_NEW_UNIFORM_BUFFER;
112
113 mark_buffer_inactive(intel_obj);
114 mark_buffer_invalid(intel_obj);
115 }
116
117 static void
118 release_buffer(struct intel_buffer_object *intel_obj)
119 {
120 brw_bo_unreference(intel_obj->buffer);
121 intel_obj->buffer = NULL;
122 }
123
124 /**
125 * The NewBufferObject() driver hook.
126 *
127 * Allocates a new intel_buffer_object structure and initializes it.
128 *
129 * There is some duplication between mesa's bufferobjects and our
130 * bufmgr buffers. Both have an integer handle and a hashtable to
131 * lookup an opaque structure. It would be nice if the handles and
132 * internal structure where somehow shared.
133 */
134 static struct gl_buffer_object *
135 brw_new_buffer_object(struct gl_context * ctx, GLuint name)
136 {
137 struct intel_buffer_object *obj = CALLOC_STRUCT(intel_buffer_object);
138 if (!obj) {
139 _mesa_error_no_memory(__func__);
140 return NULL;
141 }
142
143 _mesa_initialize_buffer_object(ctx, &obj->Base, name);
144
145 obj->buffer = NULL;
146
147 return &obj->Base;
148 }
149
150 /**
151 * The DeleteBuffer() driver hook.
152 *
153 * Deletes a single OpenGL buffer object. Used by glDeleteBuffers().
154 */
155 static void
156 brw_delete_buffer(struct gl_context * ctx, struct gl_buffer_object *obj)
157 {
158 struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
159
160 assert(intel_obj);
161
162 /* Buffer objects are automatically unmapped when deleting according
163 * to the spec, but Mesa doesn't do UnmapBuffer for us at context destroy
164 * (though it does if you call glDeleteBuffers)
165 */
166 _mesa_buffer_unmap_all_mappings(ctx, obj);
167
168 brw_bo_unreference(intel_obj->buffer);
169 _mesa_delete_buffer_object(ctx, obj);
170 }
171
172
173 /**
174 * The BufferData() driver hook.
175 *
176 * Implements glBufferData(), which recreates a buffer object's data store
177 * and populates it with the given data, if present.
178 *
179 * Any data that was previously stored in the buffer object is lost.
180 *
181 * \return true for success, false if out of memory
182 */
183 static GLboolean
184 brw_buffer_data(struct gl_context *ctx,
185 GLenum target,
186 GLsizeiptrARB size,
187 const GLvoid *data,
188 GLenum usage,
189 GLbitfield storageFlags,
190 struct gl_buffer_object *obj)
191 {
192 struct brw_context *brw = brw_context(ctx);
193 struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
194
195 /* Part of the ABI, but this function doesn't use it.
196 */
197 (void) target;
198
199 intel_obj->Base.Size = size;
200 intel_obj->Base.Usage = usage;
201 intel_obj->Base.StorageFlags = storageFlags;
202
203 assert(!obj->Mappings[MAP_USER].Pointer); /* Mesa should have unmapped it */
204 assert(!obj->Mappings[MAP_INTERNAL].Pointer);
205
206 if (intel_obj->buffer != NULL)
207 release_buffer(intel_obj);
208
209 if (size != 0) {
210 alloc_buffer_object(brw, intel_obj);
211 if (!intel_obj->buffer)
212 return false;
213
214 if (data != NULL) {
215 brw_bo_subdata(intel_obj->buffer, 0, size, data);
216 mark_buffer_valid_data(intel_obj, 0, size);
217 }
218 }
219
220 return true;
221 }
222
223
224 /**
225 * The BufferSubData() driver hook.
226 *
227 * Implements glBufferSubData(), which replaces a portion of the data in a
228 * buffer object.
229 *
230 * If the data range specified by (size + offset) extends beyond the end of
231 * the buffer or if data is NULL, no copy is performed.
232 */
233 static void
234 brw_buffer_subdata(struct gl_context *ctx,
235 GLintptrARB offset,
236 GLsizeiptrARB size,
237 const GLvoid *data,
238 struct gl_buffer_object *obj)
239 {
240 struct brw_context *brw = brw_context(ctx);
241 struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
242 bool busy;
243
244 if (size == 0)
245 return;
246
247 assert(intel_obj);
248
249 /* See if we can unsynchronized write the data into the user's BO. This
250 * avoids GPU stalls in unfortunately common user patterns (uploading
251 * sequentially into a BO, with draw calls in between each upload).
252 *
253 * Once we've hit this path, we mark this GL BO as preferring stalling to
254 * blits, so that we can hopefully hit this path again in the future
255 * (otherwise, an app that might occasionally stall but mostly not will end
256 * up with blitting all the time, at the cost of bandwidth)
257 */
258 if (offset + size <= intel_obj->gpu_active_start ||
259 intel_obj->gpu_active_end <= offset ||
260 offset + size <= intel_obj->valid_data_start ||
261 intel_obj->valid_data_end <= offset) {
262 void *map = brw_bo_map(brw, intel_obj->buffer, MAP_WRITE | MAP_ASYNC);
263 memcpy(map + offset, data, size);
264 brw_bo_unmap(intel_obj->buffer);
265
266 if (intel_obj->gpu_active_end > intel_obj->gpu_active_start)
267 intel_obj->prefer_stall_to_blit = true;
268
269 mark_buffer_valid_data(intel_obj, offset, size);
270 return;
271 }
272
273 busy =
274 brw_bo_busy(intel_obj->buffer) ||
275 brw_batch_references(&brw->batch, intel_obj->buffer);
276
277 if (busy) {
278 if (size == intel_obj->Base.Size ||
279 (intel_obj->valid_data_start >= offset &&
280 intel_obj->valid_data_end <= offset + size)) {
281 /* Replace the current busy bo so the subdata doesn't stall. */
282 brw_bo_unreference(intel_obj->buffer);
283 alloc_buffer_object(brw, intel_obj);
284 } else if (!intel_obj->prefer_stall_to_blit) {
285 perf_debug("Using a blit copy to avoid stalling on "
286 "glBufferSubData(%ld, %ld) (%ldkb) to a busy "
287 "(%d-%d) / valid (%d-%d) buffer object.\n",
288 (long)offset, (long)offset + size, (long)(size/1024),
289 intel_obj->gpu_active_start,
290 intel_obj->gpu_active_end,
291 intel_obj->valid_data_start,
292 intel_obj->valid_data_end);
293 struct brw_bo *temp_bo =
294 brw_bo_alloc(brw->bufmgr, "subdata temp", size, BRW_MEMZONE_OTHER);
295
296 brw_bo_subdata(temp_bo, 0, size, data);
297
298 brw_blorp_copy_buffers(brw,
299 temp_bo, 0,
300 intel_obj->buffer, offset,
301 size);
302 brw_emit_mi_flush(brw);
303
304 brw_bo_unreference(temp_bo);
305 mark_buffer_valid_data(intel_obj, offset, size);
306 return;
307 } else {
308 perf_debug("Stalling on glBufferSubData(%ld, %ld) (%ldkb) to a busy "
309 "(%d-%d) buffer object. Use glMapBufferRange() to "
310 "avoid this.\n",
311 (long)offset, (long)offset + size, (long)(size/1024),
312 intel_obj->gpu_active_start,
313 intel_obj->gpu_active_end);
314 intel_batchbuffer_flush(brw);
315 }
316 }
317
318 brw_bo_subdata(intel_obj->buffer, offset, size, data);
319 mark_buffer_inactive(intel_obj);
320 mark_buffer_valid_data(intel_obj, offset, size);
321 }
322
323
324 /**
325 * The GetBufferSubData() driver hook.
326 *
327 * Implements glGetBufferSubData(), which copies a subrange of a buffer
328 * object into user memory.
329 */
330 static void
331 brw_get_buffer_subdata(struct gl_context *ctx,
332 GLintptrARB offset,
333 GLsizeiptrARB size,
334 GLvoid *data,
335 struct gl_buffer_object *obj)
336 {
337 struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
338 struct brw_context *brw = brw_context(ctx);
339
340 assert(intel_obj);
341 if (brw_batch_references(&brw->batch, intel_obj->buffer)) {
342 intel_batchbuffer_flush(brw);
343 }
344
345 unsigned int map_flags = MAP_READ;
346 mem_copy_fn memcpy_fn = memcpy;
347 #ifdef USE_SSE41
348 if (!intel_obj->buffer->cache_coherent && cpu_has_sse4_1) {
349 /* Rather than acquire a new WB mmaping of the buffer object and pull
350 * it into the CPU cache, keep using the WC mmap that we have for writes,
351 * and use the magic movntd instructions instead.
352 */
353 map_flags |= MAP_COHERENT;
354 memcpy_fn = (mem_copy_fn) _mesa_streaming_load_memcpy;
355 }
356 #endif
357
358 void *map = brw_bo_map(brw, intel_obj->buffer, map_flags);
359 if (unlikely(!map)) {
360 _mesa_error_no_memory(__func__);
361 return;
362 }
363 memcpy_fn(data, map + offset, size);
364 brw_bo_unmap(intel_obj->buffer);
365
366 mark_buffer_inactive(intel_obj);
367 }
368
369
370 /**
371 * The MapBufferRange() driver hook.
372 *
373 * This implements both glMapBufferRange() and glMapBuffer().
374 *
375 * The goal of this extension is to allow apps to accumulate their rendering
376 * at the same time as they accumulate their buffer object. Without it,
377 * you'd end up blocking on execution of rendering every time you mapped
378 * the buffer to put new data in.
379 *
380 * We support it in 3 ways: If unsynchronized, then don't bother
381 * flushing the batchbuffer before mapping the buffer, which can save blocking
382 * in many cases. If we would still block, and they allow the whole buffer
383 * to be invalidated, then just allocate a new buffer to replace the old one.
384 * If not, and we'd block, and they allow the subrange of the buffer to be
385 * invalidated, then we can make a new little BO, let them write into that,
386 * and blit it into the real BO at unmap time.
387 */
388 static void *
389 brw_map_buffer_range(struct gl_context *ctx,
390 GLintptr offset, GLsizeiptr length,
391 GLbitfield access, struct gl_buffer_object *obj,
392 gl_map_buffer_index index)
393 {
394 struct brw_context *brw = brw_context(ctx);
395 struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
396
397 assert(intel_obj);
398
399 STATIC_ASSERT(GL_MAP_UNSYNCHRONIZED_BIT == MAP_ASYNC);
400 STATIC_ASSERT(GL_MAP_WRITE_BIT == MAP_WRITE);
401 STATIC_ASSERT(GL_MAP_READ_BIT == MAP_READ);
402 STATIC_ASSERT(GL_MAP_PERSISTENT_BIT == MAP_PERSISTENT);
403 STATIC_ASSERT(GL_MAP_COHERENT_BIT == MAP_COHERENT);
404 assert((access & MAP_INTERNAL_MASK) == 0);
405
406 /* _mesa_MapBufferRange (GL entrypoint) sets these, but the vbo module also
407 * internally uses our functions directly.
408 */
409 obj->Mappings[index].Offset = offset;
410 obj->Mappings[index].Length = length;
411 obj->Mappings[index].AccessFlags = access;
412
413 if (intel_obj->buffer == NULL) {
414 obj->Mappings[index].Pointer = NULL;
415 return NULL;
416 }
417
418 /* If the access is synchronized (like a normal buffer mapping), then get
419 * things flushed out so the later mapping syncs appropriately through GEM.
420 * If the user doesn't care about existing buffer contents and mapping would
421 * cause us to block, then throw out the old buffer.
422 *
423 * If they set INVALIDATE_BUFFER, we can pitch the current contents to
424 * achieve the required synchronization.
425 */
426 if (!(access & GL_MAP_UNSYNCHRONIZED_BIT)) {
427 if (brw_batch_references(&brw->batch, intel_obj->buffer)) {
428 if (access & GL_MAP_INVALIDATE_BUFFER_BIT) {
429 brw_bo_unreference(intel_obj->buffer);
430 alloc_buffer_object(brw, intel_obj);
431 } else {
432 perf_debug("Stalling on the GPU for mapping a busy buffer "
433 "object\n");
434 intel_batchbuffer_flush(brw);
435 }
436 } else if (brw_bo_busy(intel_obj->buffer) &&
437 (access & GL_MAP_INVALIDATE_BUFFER_BIT)) {
438 brw_bo_unreference(intel_obj->buffer);
439 alloc_buffer_object(brw, intel_obj);
440 }
441 }
442
443 if (access & MAP_WRITE)
444 mark_buffer_valid_data(intel_obj, offset, length);
445
446 /* If the user is mapping a range of an active buffer object but
447 * doesn't require the current contents of that range, make a new
448 * BO, and we'll copy what they put in there out at unmap or
449 * FlushRange time.
450 *
451 * That is, unless they're looking for a persistent mapping -- we would
452 * need to do blits in the MemoryBarrier call, and it's easier to just do a
453 * GPU stall and do a mapping.
454 */
455 if (!(access & (GL_MAP_UNSYNCHRONIZED_BIT | GL_MAP_PERSISTENT_BIT)) &&
456 (access & GL_MAP_INVALIDATE_RANGE_BIT) &&
457 brw_bo_busy(intel_obj->buffer)) {
458 /* Ensure that the base alignment of the allocation meets the alignment
459 * guarantees the driver has advertised to the application.
460 */
461 const unsigned alignment = ctx->Const.MinMapBufferAlignment;
462
463 intel_obj->map_extra[index] = (uintptr_t) offset % alignment;
464 intel_obj->range_map_bo[index] =
465 brw_bo_alloc(brw->bufmgr, "BO blit temp",
466 length + intel_obj->map_extra[index],
467 BRW_MEMZONE_OTHER);
468 void *map = brw_bo_map(brw, intel_obj->range_map_bo[index], access);
469 obj->Mappings[index].Pointer = map + intel_obj->map_extra[index];
470 return obj->Mappings[index].Pointer;
471 }
472
473 void *map = brw_bo_map(brw, intel_obj->buffer, access);
474 if (!(access & GL_MAP_UNSYNCHRONIZED_BIT)) {
475 mark_buffer_inactive(intel_obj);
476 }
477
478 obj->Mappings[index].Pointer = map + offset;
479 return obj->Mappings[index].Pointer;
480 }
481
482 /**
483 * The FlushMappedBufferRange() driver hook.
484 *
485 * Implements glFlushMappedBufferRange(), which signifies that modifications
486 * have been made to a range of a mapped buffer, and it should be flushed.
487 *
488 * This is only used for buffers mapped with GL_MAP_FLUSH_EXPLICIT_BIT.
489 *
490 * Ideally we'd use a BO to avoid taking up cache space for the temporary
491 * data, but FlushMappedBufferRange may be followed by further writes to
492 * the pointer, so we would have to re-map after emitting our blit, which
493 * would defeat the point.
494 */
495 static void
496 brw_flush_mapped_buffer_range(struct gl_context *ctx,
497 GLintptr offset, GLsizeiptr length,
498 struct gl_buffer_object *obj,
499 gl_map_buffer_index index)
500 {
501 struct brw_context *brw = brw_context(ctx);
502 struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
503
504 assert(obj->Mappings[index].AccessFlags & GL_MAP_FLUSH_EXPLICIT_BIT);
505
506 /* If we gave a direct mapping of the buffer instead of using a temporary,
507 * then there's nothing to do.
508 */
509 if (intel_obj->range_map_bo[index] == NULL)
510 return;
511
512 if (length == 0)
513 return;
514
515 /* Note that we're not unmapping our buffer while executing the blit. We
516 * need to have a mapping still at the end of this call, since the user
517 * gets to make further modifications and glFlushMappedBufferRange() calls.
518 * This is safe, because:
519 *
520 * - On LLC platforms, we're using a CPU mapping that's coherent with the
521 * GPU (except for the render caches), so the kernel doesn't need to do
522 * any flushing work for us except for what happens at batch exec time
523 * anyway.
524 *
525 * - On non-LLC platforms, we're using a GTT mapping that writes directly
526 * to system memory (except for the chipset cache that gets flushed at
527 * batch exec time).
528 *
529 * In both cases we don't need to stall for the previous blit to complete
530 * so we can re-map (and we definitely don't want to, since that would be
531 * slow): If the user edits a part of their buffer that's previously been
532 * blitted, then our lack of synchoronization is fine, because either
533 * they'll get some too-new data in the first blit and not do another blit
534 * of that area (but in that case the results are undefined), or they'll do
535 * another blit of that area and the complete newer data will land the
536 * second time.
537 */
538 brw_blorp_copy_buffers(brw,
539 intel_obj->range_map_bo[index],
540 intel_obj->map_extra[index] + offset,
541 intel_obj->buffer,
542 obj->Mappings[index].Offset + offset,
543 length);
544 mark_buffer_gpu_usage(intel_obj,
545 obj->Mappings[index].Offset + offset,
546 length);
547 brw_emit_mi_flush(brw);
548 }
549
550
551 /**
552 * The UnmapBuffer() driver hook.
553 *
554 * Implements glUnmapBuffer().
555 */
556 static GLboolean
557 brw_unmap_buffer(struct gl_context *ctx,
558 struct gl_buffer_object *obj,
559 gl_map_buffer_index index)
560 {
561 struct brw_context *brw = brw_context(ctx);
562 struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
563
564 assert(intel_obj);
565 assert(obj->Mappings[index].Pointer);
566 if (intel_obj->range_map_bo[index] != NULL) {
567 brw_bo_unmap(intel_obj->range_map_bo[index]);
568
569 if (!(obj->Mappings[index].AccessFlags & GL_MAP_FLUSH_EXPLICIT_BIT)) {
570 brw_blorp_copy_buffers(brw,
571 intel_obj->range_map_bo[index],
572 intel_obj->map_extra[index],
573 intel_obj->buffer, obj->Mappings[index].Offset,
574 obj->Mappings[index].Length);
575 mark_buffer_gpu_usage(intel_obj, obj->Mappings[index].Offset,
576 obj->Mappings[index].Length);
577 brw_emit_mi_flush(brw);
578 }
579
580 /* Since we've emitted some blits to buffers that will (likely) be used
581 * in rendering operations in other cache domains in this batch, emit a
582 * flush. Once again, we wish for a domain tracker in libdrm to cover
583 * usage inside of a batchbuffer.
584 */
585
586 brw_bo_unreference(intel_obj->range_map_bo[index]);
587 intel_obj->range_map_bo[index] = NULL;
588 } else if (intel_obj->buffer != NULL) {
589 brw_bo_unmap(intel_obj->buffer);
590 }
591 obj->Mappings[index].Pointer = NULL;
592 obj->Mappings[index].Offset = 0;
593 obj->Mappings[index].Length = 0;
594
595 return true;
596 }
597
598 /**
599 * Gets a pointer to the object's BO, and marks the given range as being used
600 * on the GPU.
601 *
602 * Anywhere that uses buffer objects in the pipeline should be using this to
603 * mark the range of the buffer that is being accessed by the pipeline.
604 */
605 struct brw_bo *
606 intel_bufferobj_buffer(struct brw_context *brw,
607 struct intel_buffer_object *intel_obj,
608 uint32_t offset, uint32_t size, bool write)
609 {
610 /* This is needed so that things like transform feedback and texture buffer
611 * objects that need a BO but don't want to check that they exist for
612 * draw-time validation can just always get a BO from a GL buffer object.
613 */
614 if (intel_obj->buffer == NULL)
615 alloc_buffer_object(brw, intel_obj);
616
617 mark_buffer_gpu_usage(intel_obj, offset, size);
618
619 /* If writing, (conservatively) mark this section as having valid data. */
620 if (write)
621 mark_buffer_valid_data(intel_obj, offset, size);
622
623 return intel_obj->buffer;
624 }
625
626 /**
627 * The CopyBufferSubData() driver hook.
628 *
629 * Implements glCopyBufferSubData(), which copies a portion of one buffer
630 * object's data to another. Independent source and destination offsets
631 * are allowed.
632 */
633 static void
634 brw_copy_buffer_subdata(struct gl_context *ctx,
635 struct gl_buffer_object *src,
636 struct gl_buffer_object *dst,
637 GLintptr read_offset, GLintptr write_offset,
638 GLsizeiptr size)
639 {
640 struct brw_context *brw = brw_context(ctx);
641 struct intel_buffer_object *intel_src = intel_buffer_object(src);
642 struct intel_buffer_object *intel_dst = intel_buffer_object(dst);
643 struct brw_bo *src_bo, *dst_bo;
644
645 if (size == 0)
646 return;
647
648 dst_bo = intel_bufferobj_buffer(brw, intel_dst, write_offset, size, true);
649 src_bo = intel_bufferobj_buffer(brw, intel_src, read_offset, size, false);
650
651 brw_blorp_copy_buffers(brw,
652 src_bo, read_offset,
653 dst_bo, write_offset, size);
654
655 /* Since we've emitted some blits to buffers that will (likely) be used
656 * in rendering operations in other cache domains in this batch, emit a
657 * flush. Once again, we wish for a domain tracker in libdrm to cover
658 * usage inside of a batchbuffer.
659 */
660 brw_emit_mi_flush(brw);
661 }
662
663 void
664 intelInitBufferObjectFuncs(struct dd_function_table *functions)
665 {
666 functions->NewBufferObject = brw_new_buffer_object;
667 functions->DeleteBuffer = brw_delete_buffer;
668 functions->BufferData = brw_buffer_data;
669 functions->BufferSubData = brw_buffer_subdata;
670 functions->GetBufferSubData = brw_get_buffer_subdata;
671 functions->MapBufferRange = brw_map_buffer_range;
672 functions->FlushMappedBufferRange = brw_flush_mapped_buffer_range;
673 functions->UnmapBuffer = brw_unmap_buffer;
674 functions->CopyBufferSubData = brw_copy_buffer_subdata;
675 }