5ebe94f4545549d130805d0389001320b416ccc5
[mesa.git] / src / mesa / state_tracker / st_cb_bufferobjects.c
1 /**************************************************************************
2 *
3 * Copyright 2007 VMware, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28
29 /**
30 * Functions for pixel buffer objects and vertex/element buffer objects.
31 */
32
33
34 #include <inttypes.h> /* for PRId64 macro */
35
36 #include "main/errors.h"
37 #include "main/imports.h"
38 #include "main/mtypes.h"
39 #include "main/arrayobj.h"
40 #include "main/bufferobj.h"
41
42 #include "st_context.h"
43 #include "st_cb_bufferobjects.h"
44 #include "st_cb_memoryobjects.h"
45 #include "st_debug.h"
46
47 #include "pipe/p_context.h"
48 #include "pipe/p_defines.h"
49 #include "util/u_inlines.h"
50
51
52 /**
53 * There is some duplication between mesa's bufferobjects and our
54 * bufmgr buffers. Both have an integer handle and a hashtable to
55 * lookup an opaque structure. It would be nice if the handles and
56 * internal structure where somehow shared.
57 */
58 static struct gl_buffer_object *
59 st_bufferobj_alloc(struct gl_context *ctx, GLuint name)
60 {
61 struct st_buffer_object *st_obj = ST_CALLOC_STRUCT(st_buffer_object);
62
63 if (!st_obj)
64 return NULL;
65
66 _mesa_initialize_buffer_object(ctx, &st_obj->Base, name);
67
68 return &st_obj->Base;
69 }
70
71
72
73 /**
74 * Deallocate/free a vertex/pixel buffer object.
75 * Called via glDeleteBuffersARB().
76 */
77 static void
78 st_bufferobj_free(struct gl_context *ctx, struct gl_buffer_object *obj)
79 {
80 struct st_buffer_object *st_obj = st_buffer_object(obj);
81
82 assert(obj->RefCount == 0);
83 _mesa_buffer_unmap_all_mappings(ctx, obj);
84
85 if (st_obj->buffer)
86 pipe_resource_reference(&st_obj->buffer, NULL);
87
88 _mesa_delete_buffer_object(ctx, obj);
89 }
90
91
92
93 /**
94 * Replace data in a subrange of buffer object. If the data range
95 * specified by size + offset extends beyond the end of the buffer or
96 * if data is NULL, no copy is performed.
97 * Called via glBufferSubDataARB().
98 */
99 static void
100 st_bufferobj_subdata(struct gl_context *ctx,
101 GLintptrARB offset,
102 GLsizeiptrARB size,
103 const void * data, struct gl_buffer_object *obj)
104 {
105 struct st_buffer_object *st_obj = st_buffer_object(obj);
106
107 /* we may be called from VBO code, so double-check params here */
108 assert(offset >= 0);
109 assert(size >= 0);
110 assert(offset + size <= obj->Size);
111
112 if (!size)
113 return;
114
115 /*
116 * According to ARB_vertex_buffer_object specification, if data is null,
117 * then the contents of the buffer object's data store is undefined. We just
118 * ignore, and leave it unchanged.
119 */
120 if (!data)
121 return;
122
123 if (!st_obj->buffer) {
124 /* we probably ran out of memory during buffer allocation */
125 return;
126 }
127
128 /* Now that transfers are per-context, we don't have to figure out
129 * flushing here. Usually drivers won't need to flush in this case
130 * even if the buffer is currently referenced by hardware - they
131 * just queue the upload as dma rather than mapping the underlying
132 * buffer directly.
133 */
134 pipe_buffer_write(st_context(ctx)->pipe,
135 st_obj->buffer,
136 offset, size, data);
137 }
138
139
140 /**
141 * Called via glGetBufferSubDataARB().
142 */
143 static void
144 st_bufferobj_get_subdata(struct gl_context *ctx,
145 GLintptrARB offset,
146 GLsizeiptrARB size,
147 void * data, struct gl_buffer_object *obj)
148 {
149 struct st_buffer_object *st_obj = st_buffer_object(obj);
150
151 /* we may be called from VBO code, so double-check params here */
152 assert(offset >= 0);
153 assert(size >= 0);
154 assert(offset + size <= obj->Size);
155
156 if (!size)
157 return;
158
159 if (!st_obj->buffer) {
160 /* we probably ran out of memory during buffer allocation */
161 return;
162 }
163
164 pipe_buffer_read(st_context(ctx)->pipe, st_obj->buffer,
165 offset, size, data);
166 }
167
168
169 /**
170 * Return bitmask of PIPE_BIND_x flags corresponding a GL buffer target.
171 */
172 static unsigned
173 buffer_target_to_bind_flags(GLenum target)
174 {
175 switch (target) {
176 case GL_PIXEL_PACK_BUFFER_ARB:
177 case GL_PIXEL_UNPACK_BUFFER_ARB:
178 return PIPE_BIND_RENDER_TARGET | PIPE_BIND_SAMPLER_VIEW;
179 case GL_ARRAY_BUFFER_ARB:
180 return PIPE_BIND_VERTEX_BUFFER;
181 case GL_ELEMENT_ARRAY_BUFFER_ARB:
182 return PIPE_BIND_INDEX_BUFFER;
183 case GL_TEXTURE_BUFFER:
184 return PIPE_BIND_SAMPLER_VIEW;
185 case GL_TRANSFORM_FEEDBACK_BUFFER:
186 return PIPE_BIND_STREAM_OUTPUT;
187 case GL_UNIFORM_BUFFER:
188 return PIPE_BIND_CONSTANT_BUFFER;
189 case GL_DRAW_INDIRECT_BUFFER:
190 case GL_PARAMETER_BUFFER_ARB:
191 return PIPE_BIND_COMMAND_ARGS_BUFFER;
192 case GL_ATOMIC_COUNTER_BUFFER:
193 case GL_SHADER_STORAGE_BUFFER:
194 return PIPE_BIND_SHADER_BUFFER;
195 case GL_QUERY_BUFFER:
196 return PIPE_BIND_QUERY_BUFFER;
197 default:
198 return 0;
199 }
200 }
201
202
203 /**
204 * Return bitmask of PIPE_RESOURCE_x flags corresponding to GL_MAP_x flags.
205 */
206 static unsigned
207 storage_flags_to_buffer_flags(GLbitfield storageFlags)
208 {
209 unsigned flags = 0;
210 if (storageFlags & GL_MAP_PERSISTENT_BIT)
211 flags |= PIPE_RESOURCE_FLAG_MAP_PERSISTENT;
212 if (storageFlags & GL_MAP_COHERENT_BIT)
213 flags |= PIPE_RESOURCE_FLAG_MAP_COHERENT;
214 if (storageFlags & GL_SPARSE_STORAGE_BIT_ARB)
215 flags |= PIPE_RESOURCE_FLAG_SPARSE;
216 return flags;
217 }
218
219
220 /**
221 * From a buffer object's target, immutability flag, storage flags and
222 * usage hint, return a pipe_resource_usage value (PIPE_USAGE_DYNAMIC,
223 * STREAM, etc).
224 */
225 static const enum pipe_resource_usage
226 buffer_usage(GLenum target, GLboolean immutable,
227 GLbitfield storageFlags, GLenum usage)
228 {
229 if (immutable) {
230 /* BufferStorage */
231 if (storageFlags & GL_CLIENT_STORAGE_BIT) {
232 if (storageFlags & GL_MAP_READ_BIT)
233 return PIPE_USAGE_STAGING;
234 else
235 return PIPE_USAGE_STREAM;
236 } else {
237 return PIPE_USAGE_DEFAULT;
238 }
239 }
240 else {
241 /* BufferData */
242 switch (usage) {
243 case GL_DYNAMIC_DRAW:
244 case GL_DYNAMIC_COPY:
245 return PIPE_USAGE_DYNAMIC;
246 case GL_STREAM_DRAW:
247 case GL_STREAM_COPY:
248 /* XXX: Remove this test and fall-through when we have PBO unpacking
249 * acceleration. Right now, PBO unpacking is done by the CPU, so we
250 * have to make sure CPU reads are fast.
251 */
252 if (target != GL_PIXEL_UNPACK_BUFFER_ARB) {
253 return PIPE_USAGE_STREAM;
254 }
255 /* fall through */
256 case GL_STATIC_READ:
257 case GL_DYNAMIC_READ:
258 case GL_STREAM_READ:
259 return PIPE_USAGE_STAGING;
260 case GL_STATIC_DRAW:
261 case GL_STATIC_COPY:
262 default:
263 return PIPE_USAGE_DEFAULT;
264 }
265 }
266 }
267
268
269 static ALWAYS_INLINE GLboolean
270 bufferobj_data(struct gl_context *ctx,
271 GLenum target,
272 GLsizeiptrARB size,
273 const void *data,
274 struct gl_memory_object *memObj,
275 GLuint64 offset,
276 GLenum usage,
277 GLbitfield storageFlags,
278 struct gl_buffer_object *obj)
279 {
280 struct st_context *st = st_context(ctx);
281 struct pipe_context *pipe = st->pipe;
282 struct pipe_screen *screen = pipe->screen;
283 struct st_buffer_object *st_obj = st_buffer_object(obj);
284 struct st_memory_object *st_mem_obj = st_memory_object(memObj);
285
286 if (target != GL_EXTERNAL_VIRTUAL_MEMORY_BUFFER_AMD &&
287 size && st_obj->buffer &&
288 st_obj->Base.Size == size &&
289 st_obj->Base.Usage == usage &&
290 st_obj->Base.StorageFlags == storageFlags) {
291 if (data) {
292 /* Just discard the old contents and write new data.
293 * This should be the same as creating a new buffer, but we avoid
294 * a lot of validation in Mesa.
295 */
296 pipe->buffer_subdata(pipe, st_obj->buffer,
297 PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE,
298 0, size, data);
299 return GL_TRUE;
300 } else if (screen->get_param(screen, PIPE_CAP_INVALIDATE_BUFFER)) {
301 pipe->invalidate_resource(pipe, st_obj->buffer);
302 return GL_TRUE;
303 }
304 }
305
306 st_obj->Base.Size = size;
307 st_obj->Base.Usage = usage;
308 st_obj->Base.StorageFlags = storageFlags;
309
310 pipe_resource_reference( &st_obj->buffer, NULL );
311
312 const unsigned bindings = buffer_target_to_bind_flags(target);
313
314 if (ST_DEBUG & DEBUG_BUFFER) {
315 debug_printf("Create buffer size %" PRId64 " bind 0x%x\n",
316 (int64_t) size, bindings);
317 }
318
319 if (size != 0) {
320 struct pipe_resource buffer;
321
322 memset(&buffer, 0, sizeof buffer);
323 buffer.target = PIPE_BUFFER;
324 buffer.format = PIPE_FORMAT_R8_UNORM; /* want TYPELESS or similar */
325 buffer.bind = bindings;
326 buffer.usage =
327 buffer_usage(target, st_obj->Base.Immutable, storageFlags, usage);
328 buffer.flags = storage_flags_to_buffer_flags(storageFlags);
329 buffer.width0 = size;
330 buffer.height0 = 1;
331 buffer.depth0 = 1;
332 buffer.array_size = 1;
333
334 if (st_mem_obj) {
335 st_obj->buffer = screen->resource_from_memobj(screen, &buffer,
336 st_mem_obj->memory,
337 offset);
338 }
339 else if (target == GL_EXTERNAL_VIRTUAL_MEMORY_BUFFER_AMD) {
340 st_obj->buffer =
341 screen->resource_from_user_memory(screen, &buffer, (void*)data);
342 }
343 else {
344 st_obj->buffer = screen->resource_create(screen, &buffer);
345
346 if (st_obj->buffer && data)
347 pipe_buffer_write(pipe, st_obj->buffer, 0, size, data);
348 }
349
350 if (!st_obj->buffer) {
351 /* out of memory */
352 st_obj->Base.Size = 0;
353 return GL_FALSE;
354 }
355 }
356
357 /* The current buffer may be bound, so we have to revalidate all atoms that
358 * might be using it.
359 */
360 /* TODO: Add arrays to usage history */
361 ctx->NewDriverState |= ST_NEW_VERTEX_ARRAYS;
362 if (st_obj->Base.UsageHistory & USAGE_UNIFORM_BUFFER)
363 ctx->NewDriverState |= ST_NEW_UNIFORM_BUFFER;
364 if (st_obj->Base.UsageHistory & USAGE_SHADER_STORAGE_BUFFER)
365 ctx->NewDriverState |= ST_NEW_STORAGE_BUFFER;
366 if (st_obj->Base.UsageHistory & USAGE_TEXTURE_BUFFER)
367 ctx->NewDriverState |= ST_NEW_SAMPLER_VIEWS | ST_NEW_IMAGE_UNITS;
368 if (st_obj->Base.UsageHistory & USAGE_ATOMIC_COUNTER_BUFFER)
369 ctx->NewDriverState |= ctx->DriverFlags.NewAtomicBuffer;
370
371 return GL_TRUE;
372 }
373
374 /**
375 * Allocate space for and store data in a buffer object. Any data that was
376 * previously stored in the buffer object is lost. If data is NULL,
377 * memory will be allocated, but no copy will occur.
378 * Called via ctx->Driver.BufferData().
379 * \return GL_TRUE for success, GL_FALSE if out of memory
380 */
381 static GLboolean
382 st_bufferobj_data(struct gl_context *ctx,
383 GLenum target,
384 GLsizeiptrARB size,
385 const void *data,
386 GLenum usage,
387 GLbitfield storageFlags,
388 struct gl_buffer_object *obj)
389 {
390 return bufferobj_data(ctx, target, size, data, NULL, 0, usage, storageFlags, obj);
391 }
392
393 static GLboolean
394 st_bufferobj_data_mem(struct gl_context *ctx,
395 GLenum target,
396 GLsizeiptrARB size,
397 struct gl_memory_object *memObj,
398 GLuint64 offset,
399 GLenum usage,
400 struct gl_buffer_object *bufObj)
401 {
402 return bufferobj_data(ctx, target, size, NULL, memObj, offset, usage, 0, bufObj);
403 }
404
405 /**
406 * Called via glInvalidateBuffer(Sub)Data.
407 */
408 static void
409 st_bufferobj_invalidate(struct gl_context *ctx,
410 struct gl_buffer_object *obj,
411 GLintptr offset,
412 GLsizeiptr size)
413 {
414 struct st_context *st = st_context(ctx);
415 struct pipe_context *pipe = st->pipe;
416 struct st_buffer_object *st_obj = st_buffer_object(obj);
417
418 /* We ignore partial invalidates. */
419 if (offset != 0 || size != obj->Size)
420 return;
421
422 /* Nothing to invalidate. */
423 if (!st_obj->buffer)
424 return;
425
426 pipe->invalidate_resource(pipe, st_obj->buffer);
427 }
428
429
430 /**
431 * Convert GLbitfield of GL_MAP_x flags to gallium pipe_transfer_usage flags.
432 * \param wholeBuffer is the whole buffer being mapped?
433 */
434 enum pipe_transfer_usage
435 st_access_flags_to_transfer_flags(GLbitfield access, bool wholeBuffer)
436 {
437 enum pipe_transfer_usage flags = 0;
438
439 if (access & GL_MAP_WRITE_BIT)
440 flags |= PIPE_TRANSFER_WRITE;
441
442 if (access & GL_MAP_READ_BIT)
443 flags |= PIPE_TRANSFER_READ;
444
445 if (access & GL_MAP_FLUSH_EXPLICIT_BIT)
446 flags |= PIPE_TRANSFER_FLUSH_EXPLICIT;
447
448 if (access & GL_MAP_INVALIDATE_BUFFER_BIT) {
449 flags |= PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE;
450 }
451 else if (access & GL_MAP_INVALIDATE_RANGE_BIT) {
452 if (wholeBuffer)
453 flags |= PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE;
454 else
455 flags |= PIPE_TRANSFER_DISCARD_RANGE;
456 }
457
458 if (access & GL_MAP_UNSYNCHRONIZED_BIT)
459 flags |= PIPE_TRANSFER_UNSYNCHRONIZED;
460
461 if (access & GL_MAP_PERSISTENT_BIT)
462 flags |= PIPE_TRANSFER_PERSISTENT;
463
464 if (access & GL_MAP_COHERENT_BIT)
465 flags |= PIPE_TRANSFER_COHERENT;
466
467 /* ... other flags ...
468 */
469
470 if (access & MESA_MAP_NOWAIT_BIT)
471 flags |= PIPE_TRANSFER_DONTBLOCK;
472
473 return flags;
474 }
475
476
477 /**
478 * Called via glMapBufferRange().
479 */
480 static void *
481 st_bufferobj_map_range(struct gl_context *ctx,
482 GLintptr offset, GLsizeiptr length, GLbitfield access,
483 struct gl_buffer_object *obj,
484 gl_map_buffer_index index)
485 {
486 struct pipe_context *pipe = st_context(ctx)->pipe;
487 struct st_buffer_object *st_obj = st_buffer_object(obj);
488
489 assert(offset >= 0);
490 assert(length >= 0);
491 assert(offset < obj->Size);
492 assert(offset + length <= obj->Size);
493
494 const enum pipe_transfer_usage transfer_flags =
495 st_access_flags_to_transfer_flags(access,
496 offset == 0 && length == obj->Size);
497
498 obj->Mappings[index].Pointer = pipe_buffer_map_range(pipe,
499 st_obj->buffer,
500 offset, length,
501 transfer_flags,
502 &st_obj->transfer[index]);
503 if (obj->Mappings[index].Pointer) {
504 obj->Mappings[index].Offset = offset;
505 obj->Mappings[index].Length = length;
506 obj->Mappings[index].AccessFlags = access;
507 }
508 else {
509 st_obj->transfer[index] = NULL;
510 }
511
512 return obj->Mappings[index].Pointer;
513 }
514
515
516 static void
517 st_bufferobj_flush_mapped_range(struct gl_context *ctx,
518 GLintptr offset, GLsizeiptr length,
519 struct gl_buffer_object *obj,
520 gl_map_buffer_index index)
521 {
522 struct pipe_context *pipe = st_context(ctx)->pipe;
523 struct st_buffer_object *st_obj = st_buffer_object(obj);
524
525 /* Subrange is relative to mapped range */
526 assert(offset >= 0);
527 assert(length >= 0);
528 assert(offset + length <= obj->Mappings[index].Length);
529 assert(obj->Mappings[index].Pointer);
530
531 if (!length)
532 return;
533
534 pipe_buffer_flush_mapped_range(pipe, st_obj->transfer[index],
535 obj->Mappings[index].Offset + offset,
536 length);
537 }
538
539
540 /**
541 * Called via glUnmapBufferARB().
542 */
543 static GLboolean
544 st_bufferobj_unmap(struct gl_context *ctx, struct gl_buffer_object *obj,
545 gl_map_buffer_index index)
546 {
547 struct pipe_context *pipe = st_context(ctx)->pipe;
548 struct st_buffer_object *st_obj = st_buffer_object(obj);
549
550 if (obj->Mappings[index].Length)
551 pipe_buffer_unmap(pipe, st_obj->transfer[index]);
552
553 st_obj->transfer[index] = NULL;
554 obj->Mappings[index].Pointer = NULL;
555 obj->Mappings[index].Offset = 0;
556 obj->Mappings[index].Length = 0;
557 return GL_TRUE;
558 }
559
560
561 /**
562 * Called via glCopyBufferSubData().
563 */
564 static void
565 st_copy_buffer_subdata(struct gl_context *ctx,
566 struct gl_buffer_object *src,
567 struct gl_buffer_object *dst,
568 GLintptr readOffset, GLintptr writeOffset,
569 GLsizeiptr size)
570 {
571 struct pipe_context *pipe = st_context(ctx)->pipe;
572 struct st_buffer_object *srcObj = st_buffer_object(src);
573 struct st_buffer_object *dstObj = st_buffer_object(dst);
574 struct pipe_box box;
575
576 if (!size)
577 return;
578
579 /* buffer should not already be mapped */
580 assert(!_mesa_check_disallowed_mapping(src));
581 assert(!_mesa_check_disallowed_mapping(dst));
582
583 u_box_1d(readOffset, size, &box);
584
585 pipe->resource_copy_region(pipe, dstObj->buffer, 0, writeOffset, 0, 0,
586 srcObj->buffer, 0, &box);
587 }
588
589 /**
590 * Called via glClearBufferSubData().
591 */
592 static void
593 st_clear_buffer_subdata(struct gl_context *ctx,
594 GLintptr offset, GLsizeiptr size,
595 const void *clearValue,
596 GLsizeiptr clearValueSize,
597 struct gl_buffer_object *bufObj)
598 {
599 struct pipe_context *pipe = st_context(ctx)->pipe;
600 struct st_buffer_object *buf = st_buffer_object(bufObj);
601 static const char zeros[16] = {0};
602
603 if (!pipe->clear_buffer) {
604 _mesa_ClearBufferSubData_sw(ctx, offset, size,
605 clearValue, clearValueSize, bufObj);
606 return;
607 }
608
609 if (!clearValue)
610 clearValue = zeros;
611
612 pipe->clear_buffer(pipe, buf->buffer, offset, size,
613 clearValue, clearValueSize);
614 }
615
616 static void
617 st_bufferobj_page_commitment(struct gl_context *ctx,
618 struct gl_buffer_object *bufferObj,
619 GLintptr offset, GLsizeiptr size,
620 GLboolean commit)
621 {
622 struct pipe_context *pipe = st_context(ctx)->pipe;
623 struct st_buffer_object *buf = st_buffer_object(bufferObj);
624 struct pipe_box box;
625
626 u_box_1d(offset, size, &box);
627
628 if (!pipe->resource_commit(pipe, buf->buffer, 0, &box, commit)) {
629 _mesa_error(ctx, GL_OUT_OF_MEMORY, "glBufferPageCommitmentARB(out of memory)");
630 return;
631 }
632 }
633
634 void
635 st_init_bufferobject_functions(struct pipe_screen *screen,
636 struct dd_function_table *functions)
637 {
638 functions->NewBufferObject = st_bufferobj_alloc;
639 functions->DeleteBuffer = st_bufferobj_free;
640 functions->BufferData = st_bufferobj_data;
641 functions->BufferDataMem = st_bufferobj_data_mem;
642 functions->BufferSubData = st_bufferobj_subdata;
643 functions->GetBufferSubData = st_bufferobj_get_subdata;
644 functions->MapBufferRange = st_bufferobj_map_range;
645 functions->FlushMappedBufferRange = st_bufferobj_flush_mapped_range;
646 functions->UnmapBuffer = st_bufferobj_unmap;
647 functions->CopyBufferSubData = st_copy_buffer_subdata;
648 functions->ClearBufferSubData = st_clear_buffer_subdata;
649 functions->BufferPageCommitment = st_bufferobj_page_commitment;
650
651 if (screen->get_param(screen, PIPE_CAP_INVALIDATE_BUFFER))
652 functions->InvalidateBufferSubData = st_bufferobj_invalidate;
653 }