replace malloc macros in imports.h with u_memory.h versions
[mesa.git] / src / mesa / drivers / dri / i965 / intel_buffer_objects.c
1 /*
2 * Copyright 2003 VMware, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial portions
15 * of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
18 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
21 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
22 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
23 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 */
25
26 /**
27 * @file intel_buffer_objects.c
28 *
29 * This provides core GL buffer object functionality.
30 */
31
32 #include "util/imports.h"
33 #include "main/mtypes.h"
34 #include "main/macros.h"
35 #include "main/streaming-load-memcpy.h"
36 #include "main/bufferobj.h"
37 #include "x86/common_x86_asm.h"
38 #include "util/u_memory.h"
39
40 #include "brw_context.h"
41 #include "brw_blorp.h"
42 #include "intel_buffer_objects.h"
43 #include "intel_batchbuffer.h"
44
45 static void
46 mark_buffer_gpu_usage(struct intel_buffer_object *intel_obj,
47 uint32_t offset, uint32_t size)
48 {
49 intel_obj->gpu_active_start = MIN2(intel_obj->gpu_active_start, offset);
50 intel_obj->gpu_active_end = MAX2(intel_obj->gpu_active_end, offset + size);
51 }
52
53 static void
54 mark_buffer_inactive(struct intel_buffer_object *intel_obj)
55 {
56 intel_obj->gpu_active_start = ~0;
57 intel_obj->gpu_active_end = 0;
58 }
59
60 static void
61 mark_buffer_valid_data(struct intel_buffer_object *intel_obj,
62 uint32_t offset, uint32_t size)
63 {
64 intel_obj->valid_data_start = MIN2(intel_obj->valid_data_start, offset);
65 intel_obj->valid_data_end = MAX2(intel_obj->valid_data_end, offset + size);
66 }
67
68 static void
69 mark_buffer_invalid(struct intel_buffer_object *intel_obj)
70 {
71 intel_obj->valid_data_start = ~0;
72 intel_obj->valid_data_end = 0;
73 }
74
75 /** Allocates a new brw_bo to store the data for the buffer object. */
76 static void
77 alloc_buffer_object(struct brw_context *brw,
78 struct intel_buffer_object *intel_obj)
79 {
80 const struct gl_context *ctx = &brw->ctx;
81
82 uint64_t size = intel_obj->Base.Size;
83 if (ctx->Const.RobustAccess) {
84 /* Pad out buffer objects with an extra 2kB (half a page).
85 *
86 * When pushing UBOs, we need to safeguard against 3DSTATE_CONSTANT_*
87 * reading out of bounds memory. The application might bind a UBO that's
88 * smaller than what the program expects. Ideally, we'd bind an extra
89 * push buffer containing zeros, but we have a limited number of those,
90 * so it's not always viable. Our only safe option is to pad all buffer
91 * objects by the maximum push data length, so that it will never read
92 * past the end of a BO.
93 *
94 * This is unfortunate, but it should result in at most 1 extra page,
95 * which probably isn't too terrible.
96 */
97 size += 64 * 32; /* max read length of 64 256-bit units */
98 }
99 intel_obj->buffer =
100 brw_bo_alloc(brw->bufmgr, "bufferobj", size, BRW_MEMZONE_OTHER);
101
102 /* the buffer might be bound as a uniform buffer, need to update it
103 */
104 if (intel_obj->Base.UsageHistory & USAGE_UNIFORM_BUFFER)
105 brw->ctx.NewDriverState |= BRW_NEW_UNIFORM_BUFFER;
106 if (intel_obj->Base.UsageHistory & USAGE_SHADER_STORAGE_BUFFER)
107 brw->ctx.NewDriverState |= BRW_NEW_UNIFORM_BUFFER;
108 if (intel_obj->Base.UsageHistory & USAGE_TEXTURE_BUFFER)
109 brw->ctx.NewDriverState |= BRW_NEW_TEXTURE_BUFFER;
110 if (intel_obj->Base.UsageHistory & USAGE_ATOMIC_COUNTER_BUFFER)
111 brw->ctx.NewDriverState |= BRW_NEW_UNIFORM_BUFFER;
112
113 mark_buffer_inactive(intel_obj);
114 mark_buffer_invalid(intel_obj);
115 }
116
117 static void
118 release_buffer(struct intel_buffer_object *intel_obj)
119 {
120 brw_bo_unreference(intel_obj->buffer);
121 intel_obj->buffer = NULL;
122 }
123
124 /**
125 * The NewBufferObject() driver hook.
126 *
127 * Allocates a new intel_buffer_object structure and initializes it.
128 *
129 * There is some duplication between mesa's bufferobjects and our
130 * bufmgr buffers. Both have an integer handle and a hashtable to
131 * lookup an opaque structure. It would be nice if the handles and
132 * internal structure where somehow shared.
133 */
134 static struct gl_buffer_object *
135 brw_new_buffer_object(struct gl_context * ctx, GLuint name)
136 {
137 struct intel_buffer_object *obj = CALLOC_STRUCT(intel_buffer_object);
138 if (!obj) {
139 _mesa_error_no_memory(__func__);
140 return NULL;
141 }
142
143 _mesa_initialize_buffer_object(ctx, &obj->Base, name);
144
145 obj->buffer = NULL;
146
147 return &obj->Base;
148 }
149
150 /**
151 * The DeleteBuffer() driver hook.
152 *
153 * Deletes a single OpenGL buffer object. Used by glDeleteBuffers().
154 */
155 static void
156 brw_delete_buffer(struct gl_context * ctx, struct gl_buffer_object *obj)
157 {
158 struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
159
160 assert(intel_obj);
161
162 /* Buffer objects are automatically unmapped when deleting according
163 * to the spec, but Mesa doesn't do UnmapBuffer for us at context destroy
164 * (though it does if you call glDeleteBuffers)
165 */
166 _mesa_buffer_unmap_all_mappings(ctx, obj);
167
168 brw_bo_unreference(intel_obj->buffer);
169 _mesa_delete_buffer_object(ctx, obj);
170 }
171
172
173 /**
174 * The BufferData() driver hook.
175 *
176 * Implements glBufferData(), which recreates a buffer object's data store
177 * and populates it with the given data, if present.
178 *
179 * Any data that was previously stored in the buffer object is lost.
180 *
181 * \return true for success, false if out of memory
182 */
183 static GLboolean
184 brw_buffer_data(struct gl_context *ctx,
185 GLenum target,
186 GLsizeiptrARB size,
187 const GLvoid *data,
188 GLenum usage,
189 GLbitfield storageFlags,
190 struct gl_buffer_object *obj)
191 {
192 struct brw_context *brw = brw_context(ctx);
193 struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
194
195 /* Part of the ABI, but this function doesn't use it.
196 */
197 (void) target;
198
199 intel_obj->Base.Size = size;
200 intel_obj->Base.Usage = usage;
201 intel_obj->Base.StorageFlags = storageFlags;
202
203 assert(!obj->Mappings[MAP_USER].Pointer); /* Mesa should have unmapped it */
204 assert(!obj->Mappings[MAP_INTERNAL].Pointer);
205
206 if (intel_obj->buffer != NULL)
207 release_buffer(intel_obj);
208
209 if (size != 0) {
210 alloc_buffer_object(brw, intel_obj);
211 if (!intel_obj->buffer)
212 return false;
213
214 if (data != NULL) {
215 brw_bo_subdata(intel_obj->buffer, 0, size, data);
216 mark_buffer_valid_data(intel_obj, 0, size);
217 }
218 }
219
220 return true;
221 }
222
223
224 /**
225 * The BufferSubData() driver hook.
226 *
227 * Implements glBufferSubData(), which replaces a portion of the data in a
228 * buffer object.
229 *
230 * If the data range specified by (size + offset) extends beyond the end of
231 * the buffer or if data is NULL, no copy is performed.
232 */
233 static void
234 brw_buffer_subdata(struct gl_context *ctx,
235 GLintptrARB offset,
236 GLsizeiptrARB size,
237 const GLvoid *data,
238 struct gl_buffer_object *obj)
239 {
240 struct brw_context *brw = brw_context(ctx);
241 struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
242 bool busy;
243
244 if (size == 0)
245 return;
246
247 assert(intel_obj);
248
249 /* See if we can unsynchronized write the data into the user's BO. This
250 * avoids GPU stalls in unfortunately common user patterns (uploading
251 * sequentially into a BO, with draw calls in between each upload).
252 *
253 * Once we've hit this path, we mark this GL BO as preferring stalling to
254 * blits, so that we can hopefully hit this path again in the future
255 * (otherwise, an app that might occasionally stall but mostly not will end
256 * up with blitting all the time, at the cost of bandwidth)
257 */
258 if (offset + size <= intel_obj->gpu_active_start ||
259 intel_obj->gpu_active_end <= offset ||
260 offset + size <= intel_obj->valid_data_start ||
261 intel_obj->valid_data_end <= offset) {
262 void *map = brw_bo_map(brw, intel_obj->buffer, MAP_WRITE | MAP_ASYNC);
263 memcpy(map + offset, data, size);
264 brw_bo_unmap(intel_obj->buffer);
265
266 if (intel_obj->gpu_active_end > intel_obj->gpu_active_start)
267 intel_obj->prefer_stall_to_blit = true;
268
269 mark_buffer_valid_data(intel_obj, offset, size);
270 return;
271 }
272
273 busy =
274 brw_bo_busy(intel_obj->buffer) ||
275 brw_batch_references(&brw->batch, intel_obj->buffer);
276
277 if (busy) {
278 if (size == intel_obj->Base.Size ||
279 (intel_obj->valid_data_start >= offset &&
280 intel_obj->valid_data_end <= offset + size)) {
281 /* Replace the current busy bo so the subdata doesn't stall. */
282 brw_bo_unreference(intel_obj->buffer);
283 alloc_buffer_object(brw, intel_obj);
284 } else if (!intel_obj->prefer_stall_to_blit) {
285 perf_debug("Using a blit copy to avoid stalling on "
286 "glBufferSubData(%ld, %ld) (%ldkb) to a busy "
287 "(%d-%d) / valid (%d-%d) buffer object.\n",
288 (long)offset, (long)offset + size, (long)(size/1024),
289 intel_obj->gpu_active_start,
290 intel_obj->gpu_active_end,
291 intel_obj->valid_data_start,
292 intel_obj->valid_data_end);
293 struct brw_bo *temp_bo =
294 brw_bo_alloc(brw->bufmgr, "subdata temp", size, BRW_MEMZONE_OTHER);
295
296 brw_bo_subdata(temp_bo, 0, size, data);
297
298 brw_blorp_copy_buffers(brw,
299 temp_bo, 0,
300 intel_obj->buffer, offset,
301 size);
302 brw_emit_mi_flush(brw);
303
304 brw_bo_unreference(temp_bo);
305 mark_buffer_valid_data(intel_obj, offset, size);
306 return;
307 } else {
308 perf_debug("Stalling on glBufferSubData(%ld, %ld) (%ldkb) to a busy "
309 "(%d-%d) buffer object. Use glMapBufferRange() to "
310 "avoid this.\n",
311 (long)offset, (long)offset + size, (long)(size/1024),
312 intel_obj->gpu_active_start,
313 intel_obj->gpu_active_end);
314 intel_batchbuffer_flush(brw);
315 }
316 }
317
318 brw_bo_subdata(intel_obj->buffer, offset, size, data);
319 mark_buffer_inactive(intel_obj);
320 mark_buffer_valid_data(intel_obj, offset, size);
321 }
322
323 /* Typedef for memcpy function (used in brw_get_buffer_subdata below). */
324 typedef void *(*mem_copy_fn)(void *dest, const void *src, size_t n);
325
326 /**
327 * The GetBufferSubData() driver hook.
328 *
329 * Implements glGetBufferSubData(), which copies a subrange of a buffer
330 * object into user memory.
331 */
332 static void
333 brw_get_buffer_subdata(struct gl_context *ctx,
334 GLintptrARB offset,
335 GLsizeiptrARB size,
336 GLvoid *data,
337 struct gl_buffer_object *obj)
338 {
339 struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
340 struct brw_context *brw = brw_context(ctx);
341
342 assert(intel_obj);
343 if (brw_batch_references(&brw->batch, intel_obj->buffer)) {
344 intel_batchbuffer_flush(brw);
345 }
346
347 unsigned int map_flags = MAP_READ;
348 mem_copy_fn memcpy_fn = memcpy;
349 #ifdef USE_SSE41
350 if (!intel_obj->buffer->cache_coherent && cpu_has_sse4_1) {
351 /* Rather than acquire a new WB mmaping of the buffer object and pull
352 * it into the CPU cache, keep using the WC mmap that we have for writes,
353 * and use the magic movntd instructions instead.
354 */
355 map_flags |= MAP_COHERENT;
356 memcpy_fn = (mem_copy_fn) _mesa_streaming_load_memcpy;
357 }
358 #endif
359
360 void *map = brw_bo_map(brw, intel_obj->buffer, map_flags);
361 if (unlikely(!map)) {
362 _mesa_error_no_memory(__func__);
363 return;
364 }
365 memcpy_fn(data, map + offset, size);
366 brw_bo_unmap(intel_obj->buffer);
367
368 mark_buffer_inactive(intel_obj);
369 }
370
371
372 /**
373 * The MapBufferRange() driver hook.
374 *
375 * This implements both glMapBufferRange() and glMapBuffer().
376 *
377 * The goal of this extension is to allow apps to accumulate their rendering
378 * at the same time as they accumulate their buffer object. Without it,
379 * you'd end up blocking on execution of rendering every time you mapped
380 * the buffer to put new data in.
381 *
382 * We support it in 3 ways: If unsynchronized, then don't bother
383 * flushing the batchbuffer before mapping the buffer, which can save blocking
384 * in many cases. If we would still block, and they allow the whole buffer
385 * to be invalidated, then just allocate a new buffer to replace the old one.
386 * If not, and we'd block, and they allow the subrange of the buffer to be
387 * invalidated, then we can make a new little BO, let them write into that,
388 * and blit it into the real BO at unmap time.
389 */
390 static void *
391 brw_map_buffer_range(struct gl_context *ctx,
392 GLintptr offset, GLsizeiptr length,
393 GLbitfield access, struct gl_buffer_object *obj,
394 gl_map_buffer_index index)
395 {
396 struct brw_context *brw = brw_context(ctx);
397 struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
398
399 assert(intel_obj);
400
401 STATIC_ASSERT(GL_MAP_UNSYNCHRONIZED_BIT == MAP_ASYNC);
402 STATIC_ASSERT(GL_MAP_WRITE_BIT == MAP_WRITE);
403 STATIC_ASSERT(GL_MAP_READ_BIT == MAP_READ);
404 STATIC_ASSERT(GL_MAP_PERSISTENT_BIT == MAP_PERSISTENT);
405 STATIC_ASSERT(GL_MAP_COHERENT_BIT == MAP_COHERENT);
406 assert((access & MAP_INTERNAL_MASK) == 0);
407
408 /* _mesa_MapBufferRange (GL entrypoint) sets these, but the vbo module also
409 * internally uses our functions directly.
410 */
411 obj->Mappings[index].Offset = offset;
412 obj->Mappings[index].Length = length;
413 obj->Mappings[index].AccessFlags = access;
414
415 if (intel_obj->buffer == NULL) {
416 obj->Mappings[index].Pointer = NULL;
417 return NULL;
418 }
419
420 /* If the access is synchronized (like a normal buffer mapping), then get
421 * things flushed out so the later mapping syncs appropriately through GEM.
422 * If the user doesn't care about existing buffer contents and mapping would
423 * cause us to block, then throw out the old buffer.
424 *
425 * If they set INVALIDATE_BUFFER, we can pitch the current contents to
426 * achieve the required synchronization.
427 */
428 if (!(access & GL_MAP_UNSYNCHRONIZED_BIT)) {
429 if (brw_batch_references(&brw->batch, intel_obj->buffer)) {
430 if (access & GL_MAP_INVALIDATE_BUFFER_BIT) {
431 brw_bo_unreference(intel_obj->buffer);
432 alloc_buffer_object(brw, intel_obj);
433 } else {
434 perf_debug("Stalling on the GPU for mapping a busy buffer "
435 "object\n");
436 intel_batchbuffer_flush(brw);
437 }
438 } else if (brw_bo_busy(intel_obj->buffer) &&
439 (access & GL_MAP_INVALIDATE_BUFFER_BIT)) {
440 brw_bo_unreference(intel_obj->buffer);
441 alloc_buffer_object(brw, intel_obj);
442 }
443 }
444
445 if (access & MAP_WRITE)
446 mark_buffer_valid_data(intel_obj, offset, length);
447
448 /* If the user is mapping a range of an active buffer object but
449 * doesn't require the current contents of that range, make a new
450 * BO, and we'll copy what they put in there out at unmap or
451 * FlushRange time.
452 *
453 * That is, unless they're looking for a persistent mapping -- we would
454 * need to do blits in the MemoryBarrier call, and it's easier to just do a
455 * GPU stall and do a mapping.
456 */
457 if (!(access & (GL_MAP_UNSYNCHRONIZED_BIT | GL_MAP_PERSISTENT_BIT)) &&
458 (access & GL_MAP_INVALIDATE_RANGE_BIT) &&
459 brw_bo_busy(intel_obj->buffer)) {
460 /* Ensure that the base alignment of the allocation meets the alignment
461 * guarantees the driver has advertised to the application.
462 */
463 const unsigned alignment = ctx->Const.MinMapBufferAlignment;
464
465 intel_obj->map_extra[index] = (uintptr_t) offset % alignment;
466 intel_obj->range_map_bo[index] =
467 brw_bo_alloc(brw->bufmgr, "BO blit temp",
468 length + intel_obj->map_extra[index],
469 BRW_MEMZONE_OTHER);
470 void *map = brw_bo_map(brw, intel_obj->range_map_bo[index], access);
471 obj->Mappings[index].Pointer = map + intel_obj->map_extra[index];
472 return obj->Mappings[index].Pointer;
473 }
474
475 void *map = brw_bo_map(brw, intel_obj->buffer, access);
476 if (!(access & GL_MAP_UNSYNCHRONIZED_BIT)) {
477 mark_buffer_inactive(intel_obj);
478 }
479
480 obj->Mappings[index].Pointer = map + offset;
481 return obj->Mappings[index].Pointer;
482 }
483
484 /**
485 * The FlushMappedBufferRange() driver hook.
486 *
487 * Implements glFlushMappedBufferRange(), which signifies that modifications
488 * have been made to a range of a mapped buffer, and it should be flushed.
489 *
490 * This is only used for buffers mapped with GL_MAP_FLUSH_EXPLICIT_BIT.
491 *
492 * Ideally we'd use a BO to avoid taking up cache space for the temporary
493 * data, but FlushMappedBufferRange may be followed by further writes to
494 * the pointer, so we would have to re-map after emitting our blit, which
495 * would defeat the point.
496 */
497 static void
498 brw_flush_mapped_buffer_range(struct gl_context *ctx,
499 GLintptr offset, GLsizeiptr length,
500 struct gl_buffer_object *obj,
501 gl_map_buffer_index index)
502 {
503 struct brw_context *brw = brw_context(ctx);
504 struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
505
506 assert(obj->Mappings[index].AccessFlags & GL_MAP_FLUSH_EXPLICIT_BIT);
507
508 /* If we gave a direct mapping of the buffer instead of using a temporary,
509 * then there's nothing to do.
510 */
511 if (intel_obj->range_map_bo[index] == NULL)
512 return;
513
514 if (length == 0)
515 return;
516
517 /* Note that we're not unmapping our buffer while executing the blit. We
518 * need to have a mapping still at the end of this call, since the user
519 * gets to make further modifications and glFlushMappedBufferRange() calls.
520 * This is safe, because:
521 *
522 * - On LLC platforms, we're using a CPU mapping that's coherent with the
523 * GPU (except for the render caches), so the kernel doesn't need to do
524 * any flushing work for us except for what happens at batch exec time
525 * anyway.
526 *
527 * - On non-LLC platforms, we're using a GTT mapping that writes directly
528 * to system memory (except for the chipset cache that gets flushed at
529 * batch exec time).
530 *
531 * In both cases we don't need to stall for the previous blit to complete
532 * so we can re-map (and we definitely don't want to, since that would be
533 * slow): If the user edits a part of their buffer that's previously been
534 * blitted, then our lack of synchoronization is fine, because either
535 * they'll get some too-new data in the first blit and not do another blit
536 * of that area (but in that case the results are undefined), or they'll do
537 * another blit of that area and the complete newer data will land the
538 * second time.
539 */
540 brw_blorp_copy_buffers(brw,
541 intel_obj->range_map_bo[index],
542 intel_obj->map_extra[index] + offset,
543 intel_obj->buffer,
544 obj->Mappings[index].Offset + offset,
545 length);
546 mark_buffer_gpu_usage(intel_obj,
547 obj->Mappings[index].Offset + offset,
548 length);
549 brw_emit_mi_flush(brw);
550 }
551
552
553 /**
554 * The UnmapBuffer() driver hook.
555 *
556 * Implements glUnmapBuffer().
557 */
558 static GLboolean
559 brw_unmap_buffer(struct gl_context *ctx,
560 struct gl_buffer_object *obj,
561 gl_map_buffer_index index)
562 {
563 struct brw_context *brw = brw_context(ctx);
564 struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
565
566 assert(intel_obj);
567 assert(obj->Mappings[index].Pointer);
568 if (intel_obj->range_map_bo[index] != NULL) {
569 brw_bo_unmap(intel_obj->range_map_bo[index]);
570
571 if (!(obj->Mappings[index].AccessFlags & GL_MAP_FLUSH_EXPLICIT_BIT)) {
572 brw_blorp_copy_buffers(brw,
573 intel_obj->range_map_bo[index],
574 intel_obj->map_extra[index],
575 intel_obj->buffer, obj->Mappings[index].Offset,
576 obj->Mappings[index].Length);
577 mark_buffer_gpu_usage(intel_obj, obj->Mappings[index].Offset,
578 obj->Mappings[index].Length);
579 brw_emit_mi_flush(brw);
580 }
581
582 /* Since we've emitted some blits to buffers that will (likely) be used
583 * in rendering operations in other cache domains in this batch, emit a
584 * flush. Once again, we wish for a domain tracker in libdrm to cover
585 * usage inside of a batchbuffer.
586 */
587
588 brw_bo_unreference(intel_obj->range_map_bo[index]);
589 intel_obj->range_map_bo[index] = NULL;
590 } else if (intel_obj->buffer != NULL) {
591 brw_bo_unmap(intel_obj->buffer);
592 }
593 obj->Mappings[index].Pointer = NULL;
594 obj->Mappings[index].Offset = 0;
595 obj->Mappings[index].Length = 0;
596
597 return true;
598 }
599
600 /**
601 * Gets a pointer to the object's BO, and marks the given range as being used
602 * on the GPU.
603 *
604 * Anywhere that uses buffer objects in the pipeline should be using this to
605 * mark the range of the buffer that is being accessed by the pipeline.
606 */
607 struct brw_bo *
608 intel_bufferobj_buffer(struct brw_context *brw,
609 struct intel_buffer_object *intel_obj,
610 uint32_t offset, uint32_t size, bool write)
611 {
612 /* This is needed so that things like transform feedback and texture buffer
613 * objects that need a BO but don't want to check that they exist for
614 * draw-time validation can just always get a BO from a GL buffer object.
615 */
616 if (intel_obj->buffer == NULL)
617 alloc_buffer_object(brw, intel_obj);
618
619 mark_buffer_gpu_usage(intel_obj, offset, size);
620
621 /* If writing, (conservatively) mark this section as having valid data. */
622 if (write)
623 mark_buffer_valid_data(intel_obj, offset, size);
624
625 return intel_obj->buffer;
626 }
627
628 /**
629 * The CopyBufferSubData() driver hook.
630 *
631 * Implements glCopyBufferSubData(), which copies a portion of one buffer
632 * object's data to another. Independent source and destination offsets
633 * are allowed.
634 */
635 static void
636 brw_copy_buffer_subdata(struct gl_context *ctx,
637 struct gl_buffer_object *src,
638 struct gl_buffer_object *dst,
639 GLintptr read_offset, GLintptr write_offset,
640 GLsizeiptr size)
641 {
642 struct brw_context *brw = brw_context(ctx);
643 struct intel_buffer_object *intel_src = intel_buffer_object(src);
644 struct intel_buffer_object *intel_dst = intel_buffer_object(dst);
645 struct brw_bo *src_bo, *dst_bo;
646
647 if (size == 0)
648 return;
649
650 dst_bo = intel_bufferobj_buffer(brw, intel_dst, write_offset, size, true);
651 src_bo = intel_bufferobj_buffer(brw, intel_src, read_offset, size, false);
652
653 brw_blorp_copy_buffers(brw,
654 src_bo, read_offset,
655 dst_bo, write_offset, size);
656
657 /* Since we've emitted some blits to buffers that will (likely) be used
658 * in rendering operations in other cache domains in this batch, emit a
659 * flush. Once again, we wish for a domain tracker in libdrm to cover
660 * usage inside of a batchbuffer.
661 */
662 brw_emit_mi_flush(brw);
663 }
664
665 void
666 intelInitBufferObjectFuncs(struct dd_function_table *functions)
667 {
668 functions->NewBufferObject = brw_new_buffer_object;
669 functions->DeleteBuffer = brw_delete_buffer;
670 functions->BufferData = brw_buffer_data;
671 functions->BufferSubData = brw_buffer_subdata;
672 functions->GetBufferSubData = brw_get_buffer_subdata;
673 functions->MapBufferRange = brw_map_buffer_range;
674 functions->FlushMappedBufferRange = brw_flush_mapped_buffer_range;
675 functions->UnmapBuffer = brw_unmap_buffer;
676 functions->CopyBufferSubData = brw_copy_buffer_subdata;
677 }