044916b4dc8ea84a9a53de8ca7aec261b5f9147b
[mesa.git] / src / mesa / state_tracker / st_cb_bufferobjects.c
1 /**************************************************************************
2 *
3 * Copyright 2007 VMware, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28
29 /**
30 * Functions for pixel buffer objects and vertex/element buffer objects.
31 */
32
33
34 #include <inttypes.h> /* for PRId64 macro */
35
36 #include "main/imports.h"
37 #include "main/mtypes.h"
38 #include "main/arrayobj.h"
39 #include "main/bufferobj.h"
40
41 #include "st_context.h"
42 #include "st_cb_bufferobjects.h"
43 #include "st_cb_memoryobjects.h"
44 #include "st_debug.h"
45
46 #include "pipe/p_context.h"
47 #include "pipe/p_defines.h"
48 #include "util/u_inlines.h"
49
50
51 /**
52 * There is some duplication between mesa's bufferobjects and our
53 * bufmgr buffers. Both have an integer handle and a hashtable to
54 * lookup an opaque structure. It would be nice if the handles and
55 * internal structure where somehow shared.
56 */
57 static struct gl_buffer_object *
58 st_bufferobj_alloc(struct gl_context *ctx, GLuint name)
59 {
60 struct st_buffer_object *st_obj = ST_CALLOC_STRUCT(st_buffer_object);
61
62 if (!st_obj)
63 return NULL;
64
65 _mesa_initialize_buffer_object(ctx, &st_obj->Base, name);
66
67 return &st_obj->Base;
68 }
69
70
71
72 /**
73 * Deallocate/free a vertex/pixel buffer object.
74 * Called via glDeleteBuffersARB().
75 */
76 static void
77 st_bufferobj_free(struct gl_context *ctx, struct gl_buffer_object *obj)
78 {
79 struct st_buffer_object *st_obj = st_buffer_object(obj);
80
81 assert(obj->RefCount == 0);
82 _mesa_buffer_unmap_all_mappings(ctx, obj);
83
84 if (st_obj->buffer)
85 pipe_resource_reference(&st_obj->buffer, NULL);
86
87 _mesa_delete_buffer_object(ctx, obj);
88 }
89
90
91
92 /**
93 * Replace data in a subrange of buffer object. If the data range
94 * specified by size + offset extends beyond the end of the buffer or
95 * if data is NULL, no copy is performed.
96 * Called via glBufferSubDataARB().
97 */
98 static void
99 st_bufferobj_subdata(struct gl_context *ctx,
100 GLintptrARB offset,
101 GLsizeiptrARB size,
102 const void * data, struct gl_buffer_object *obj)
103 {
104 struct st_buffer_object *st_obj = st_buffer_object(obj);
105
106 /* we may be called from VBO code, so double-check params here */
107 assert(offset >= 0);
108 assert(size >= 0);
109 assert(offset + size <= obj->Size);
110
111 if (!size)
112 return;
113
114 /*
115 * According to ARB_vertex_buffer_object specification, if data is null,
116 * then the contents of the buffer object's data store is undefined. We just
117 * ignore, and leave it unchanged.
118 */
119 if (!data)
120 return;
121
122 if (!st_obj->buffer) {
123 /* we probably ran out of memory during buffer allocation */
124 return;
125 }
126
127 /* Now that transfers are per-context, we don't have to figure out
128 * flushing here. Usually drivers won't need to flush in this case
129 * even if the buffer is currently referenced by hardware - they
130 * just queue the upload as dma rather than mapping the underlying
131 * buffer directly.
132 */
133 pipe_buffer_write(st_context(ctx)->pipe,
134 st_obj->buffer,
135 offset, size, data);
136 }
137
138
139 /**
140 * Called via glGetBufferSubDataARB().
141 */
142 static void
143 st_bufferobj_get_subdata(struct gl_context *ctx,
144 GLintptrARB offset,
145 GLsizeiptrARB size,
146 void * data, struct gl_buffer_object *obj)
147 {
148 struct st_buffer_object *st_obj = st_buffer_object(obj);
149
150 /* we may be called from VBO code, so double-check params here */
151 assert(offset >= 0);
152 assert(size >= 0);
153 assert(offset + size <= obj->Size);
154
155 if (!size)
156 return;
157
158 if (!st_obj->buffer) {
159 /* we probably ran out of memory during buffer allocation */
160 return;
161 }
162
163 pipe_buffer_read(st_context(ctx)->pipe, st_obj->buffer,
164 offset, size, data);
165 }
166
167
168 /**
169 * Return bitmask of PIPE_BIND_x flags corresponding a GL buffer target.
170 */
171 static unsigned
172 buffer_target_to_bind_flags(GLenum target)
173 {
174 switch (target) {
175 case GL_PIXEL_PACK_BUFFER_ARB:
176 case GL_PIXEL_UNPACK_BUFFER_ARB:
177 return PIPE_BIND_RENDER_TARGET | PIPE_BIND_SAMPLER_VIEW;
178 case GL_ARRAY_BUFFER_ARB:
179 return PIPE_BIND_VERTEX_BUFFER;
180 case GL_ELEMENT_ARRAY_BUFFER_ARB:
181 return PIPE_BIND_INDEX_BUFFER;
182 case GL_TEXTURE_BUFFER:
183 return PIPE_BIND_SAMPLER_VIEW;
184 case GL_TRANSFORM_FEEDBACK_BUFFER:
185 return PIPE_BIND_STREAM_OUTPUT;
186 case GL_UNIFORM_BUFFER:
187 return PIPE_BIND_CONSTANT_BUFFER;
188 case GL_DRAW_INDIRECT_BUFFER:
189 case GL_PARAMETER_BUFFER_ARB:
190 return PIPE_BIND_COMMAND_ARGS_BUFFER;
191 case GL_ATOMIC_COUNTER_BUFFER:
192 case GL_SHADER_STORAGE_BUFFER:
193 return PIPE_BIND_SHADER_BUFFER;
194 case GL_QUERY_BUFFER:
195 return PIPE_BIND_QUERY_BUFFER;
196 default:
197 return 0;
198 }
199 }
200
201
202 /**
203 * Return bitmask of PIPE_RESOURCE_x flags corresponding to GL_MAP_x flags.
204 */
205 static unsigned
206 storage_flags_to_buffer_flags(GLbitfield storageFlags)
207 {
208 unsigned flags = 0;
209 if (storageFlags & GL_MAP_PERSISTENT_BIT)
210 flags |= PIPE_RESOURCE_FLAG_MAP_PERSISTENT;
211 if (storageFlags & GL_MAP_COHERENT_BIT)
212 flags |= PIPE_RESOURCE_FLAG_MAP_COHERENT;
213 if (storageFlags & GL_SPARSE_STORAGE_BIT_ARB)
214 flags |= PIPE_RESOURCE_FLAG_SPARSE;
215 return flags;
216 }
217
218
219 /**
220 * From a buffer object's target, immutability flag, storage flags and
221 * usage hint, return a pipe_resource_usage value (PIPE_USAGE_DYNAMIC,
222 * STREAM, etc).
223 */
224 static const enum pipe_resource_usage
225 buffer_usage(GLenum target, GLboolean immutable,
226 GLbitfield storageFlags, GLenum usage)
227 {
228 if (immutable) {
229 /* BufferStorage */
230 if (storageFlags & GL_CLIENT_STORAGE_BIT) {
231 if (storageFlags & GL_MAP_READ_BIT)
232 return PIPE_USAGE_STAGING;
233 else
234 return PIPE_USAGE_STREAM;
235 } else {
236 return PIPE_USAGE_DEFAULT;
237 }
238 }
239 else {
240 /* BufferData */
241 switch (usage) {
242 case GL_DYNAMIC_DRAW:
243 case GL_DYNAMIC_COPY:
244 return PIPE_USAGE_DYNAMIC;
245 case GL_STREAM_DRAW:
246 case GL_STREAM_COPY:
247 /* XXX: Remove this test and fall-through when we have PBO unpacking
248 * acceleration. Right now, PBO unpacking is done by the CPU, so we
249 * have to make sure CPU reads are fast.
250 */
251 if (target != GL_PIXEL_UNPACK_BUFFER_ARB) {
252 return PIPE_USAGE_STREAM;
253 }
254 /* fall through */
255 case GL_STATIC_READ:
256 case GL_DYNAMIC_READ:
257 case GL_STREAM_READ:
258 return PIPE_USAGE_STAGING;
259 case GL_STATIC_DRAW:
260 case GL_STATIC_COPY:
261 default:
262 return PIPE_USAGE_DEFAULT;
263 }
264 }
265 }
266
267
268 static ALWAYS_INLINE GLboolean
269 bufferobj_data(struct gl_context *ctx,
270 GLenum target,
271 GLsizeiptrARB size,
272 const void *data,
273 struct gl_memory_object *memObj,
274 GLuint64 offset,
275 GLenum usage,
276 GLbitfield storageFlags,
277 struct gl_buffer_object *obj)
278 {
279 struct st_context *st = st_context(ctx);
280 struct pipe_context *pipe = st->pipe;
281 struct pipe_screen *screen = pipe->screen;
282 struct st_buffer_object *st_obj = st_buffer_object(obj);
283 struct st_memory_object *st_mem_obj = st_memory_object(memObj);
284
285 if (target != GL_EXTERNAL_VIRTUAL_MEMORY_BUFFER_AMD &&
286 size && st_obj->buffer &&
287 st_obj->Base.Size == size &&
288 st_obj->Base.Usage == usage &&
289 st_obj->Base.StorageFlags == storageFlags) {
290 if (data) {
291 /* Just discard the old contents and write new data.
292 * This should be the same as creating a new buffer, but we avoid
293 * a lot of validation in Mesa.
294 */
295 pipe->buffer_subdata(pipe, st_obj->buffer,
296 PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE,
297 0, size, data);
298 return GL_TRUE;
299 } else if (screen->get_param(screen, PIPE_CAP_INVALIDATE_BUFFER)) {
300 pipe->invalidate_resource(pipe, st_obj->buffer);
301 return GL_TRUE;
302 }
303 }
304
305 st_obj->Base.Size = size;
306 st_obj->Base.Usage = usage;
307 st_obj->Base.StorageFlags = storageFlags;
308
309 pipe_resource_reference( &st_obj->buffer, NULL );
310
311 const unsigned bindings = buffer_target_to_bind_flags(target);
312
313 if (ST_DEBUG & DEBUG_BUFFER) {
314 debug_printf("Create buffer size %" PRId64 " bind 0x%x\n",
315 (int64_t) size, bindings);
316 }
317
318 if (size != 0) {
319 struct pipe_resource buffer;
320
321 memset(&buffer, 0, sizeof buffer);
322 buffer.target = PIPE_BUFFER;
323 buffer.format = PIPE_FORMAT_R8_UNORM; /* want TYPELESS or similar */
324 buffer.bind = bindings;
325 buffer.usage =
326 buffer_usage(target, st_obj->Base.Immutable, storageFlags, usage);
327 buffer.flags = storage_flags_to_buffer_flags(storageFlags);
328 buffer.width0 = size;
329 buffer.height0 = 1;
330 buffer.depth0 = 1;
331 buffer.array_size = 1;
332
333 if (st_mem_obj) {
334 st_obj->buffer = screen->resource_from_memobj(screen, &buffer,
335 st_mem_obj->memory,
336 offset);
337 }
338 else if (target == GL_EXTERNAL_VIRTUAL_MEMORY_BUFFER_AMD) {
339 st_obj->buffer =
340 screen->resource_from_user_memory(screen, &buffer, (void*)data);
341 }
342 else {
343 st_obj->buffer = screen->resource_create(screen, &buffer);
344
345 if (st_obj->buffer && data)
346 pipe_buffer_write(pipe, st_obj->buffer, 0, size, data);
347 }
348
349 if (!st_obj->buffer) {
350 /* out of memory */
351 st_obj->Base.Size = 0;
352 return GL_FALSE;
353 }
354 }
355
356 /* The current buffer may be bound, so we have to revalidate all atoms that
357 * might be using it.
358 */
359 /* TODO: Add arrays to usage history */
360 ctx->NewDriverState |= ST_NEW_VERTEX_ARRAYS;
361 if (st_obj->Base.UsageHistory & USAGE_UNIFORM_BUFFER)
362 ctx->NewDriverState |= ST_NEW_UNIFORM_BUFFER;
363 if (st_obj->Base.UsageHistory & USAGE_SHADER_STORAGE_BUFFER)
364 ctx->NewDriverState |= ST_NEW_STORAGE_BUFFER;
365 if (st_obj->Base.UsageHistory & USAGE_TEXTURE_BUFFER)
366 ctx->NewDriverState |= ST_NEW_SAMPLER_VIEWS | ST_NEW_IMAGE_UNITS;
367 if (st_obj->Base.UsageHistory & USAGE_ATOMIC_COUNTER_BUFFER)
368 ctx->NewDriverState |= ctx->DriverFlags.NewAtomicBuffer;
369
370 return GL_TRUE;
371 }
372
373 /**
374 * Allocate space for and store data in a buffer object. Any data that was
375 * previously stored in the buffer object is lost. If data is NULL,
376 * memory will be allocated, but no copy will occur.
377 * Called via ctx->Driver.BufferData().
378 * \return GL_TRUE for success, GL_FALSE if out of memory
379 */
380 static GLboolean
381 st_bufferobj_data(struct gl_context *ctx,
382 GLenum target,
383 GLsizeiptrARB size,
384 const void *data,
385 GLenum usage,
386 GLbitfield storageFlags,
387 struct gl_buffer_object *obj)
388 {
389 return bufferobj_data(ctx, target, size, data, NULL, 0, usage, storageFlags, obj);
390 }
391
392 static GLboolean
393 st_bufferobj_data_mem(struct gl_context *ctx,
394 GLenum target,
395 GLsizeiptrARB size,
396 struct gl_memory_object *memObj,
397 GLuint64 offset,
398 GLenum usage,
399 struct gl_buffer_object *bufObj)
400 {
401 return bufferobj_data(ctx, target, size, NULL, memObj, offset, usage, 0, bufObj);
402 }
403
404 /**
405 * Called via glInvalidateBuffer(Sub)Data.
406 */
407 static void
408 st_bufferobj_invalidate(struct gl_context *ctx,
409 struct gl_buffer_object *obj,
410 GLintptr offset,
411 GLsizeiptr size)
412 {
413 struct st_context *st = st_context(ctx);
414 struct pipe_context *pipe = st->pipe;
415 struct st_buffer_object *st_obj = st_buffer_object(obj);
416
417 /* We ignore partial invalidates. */
418 if (offset != 0 || size != obj->Size)
419 return;
420
421 /* Nothing to invalidate. */
422 if (!st_obj->buffer)
423 return;
424
425 pipe->invalidate_resource(pipe, st_obj->buffer);
426 }
427
428
429 /**
430 * Convert GLbitfield of GL_MAP_x flags to gallium pipe_transfer_usage flags.
431 * \param wholeBuffer is the whole buffer being mapped?
432 */
433 enum pipe_transfer_usage
434 st_access_flags_to_transfer_flags(GLbitfield access, bool wholeBuffer)
435 {
436 enum pipe_transfer_usage flags = 0;
437
438 if (access & GL_MAP_WRITE_BIT)
439 flags |= PIPE_TRANSFER_WRITE;
440
441 if (access & GL_MAP_READ_BIT)
442 flags |= PIPE_TRANSFER_READ;
443
444 if (access & GL_MAP_FLUSH_EXPLICIT_BIT)
445 flags |= PIPE_TRANSFER_FLUSH_EXPLICIT;
446
447 if (access & GL_MAP_INVALIDATE_BUFFER_BIT) {
448 flags |= PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE;
449 }
450 else if (access & GL_MAP_INVALIDATE_RANGE_BIT) {
451 if (wholeBuffer)
452 flags |= PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE;
453 else
454 flags |= PIPE_TRANSFER_DISCARD_RANGE;
455 }
456
457 if (access & GL_MAP_UNSYNCHRONIZED_BIT)
458 flags |= PIPE_TRANSFER_UNSYNCHRONIZED;
459
460 if (access & GL_MAP_PERSISTENT_BIT)
461 flags |= PIPE_TRANSFER_PERSISTENT;
462
463 if (access & GL_MAP_COHERENT_BIT)
464 flags |= PIPE_TRANSFER_COHERENT;
465
466 /* ... other flags ...
467 */
468
469 if (access & MESA_MAP_NOWAIT_BIT)
470 flags |= PIPE_TRANSFER_DONTBLOCK;
471
472 return flags;
473 }
474
475
476 /**
477 * Called via glMapBufferRange().
478 */
479 static void *
480 st_bufferobj_map_range(struct gl_context *ctx,
481 GLintptr offset, GLsizeiptr length, GLbitfield access,
482 struct gl_buffer_object *obj,
483 gl_map_buffer_index index)
484 {
485 struct pipe_context *pipe = st_context(ctx)->pipe;
486 struct st_buffer_object *st_obj = st_buffer_object(obj);
487
488 assert(offset >= 0);
489 assert(length >= 0);
490 assert(offset < obj->Size);
491 assert(offset + length <= obj->Size);
492
493 const enum pipe_transfer_usage transfer_flags =
494 st_access_flags_to_transfer_flags(access,
495 offset == 0 && length == obj->Size);
496
497 obj->Mappings[index].Pointer = pipe_buffer_map_range(pipe,
498 st_obj->buffer,
499 offset, length,
500 transfer_flags,
501 &st_obj->transfer[index]);
502 if (obj->Mappings[index].Pointer) {
503 obj->Mappings[index].Offset = offset;
504 obj->Mappings[index].Length = length;
505 obj->Mappings[index].AccessFlags = access;
506 }
507 else {
508 st_obj->transfer[index] = NULL;
509 }
510
511 return obj->Mappings[index].Pointer;
512 }
513
514
515 static void
516 st_bufferobj_flush_mapped_range(struct gl_context *ctx,
517 GLintptr offset, GLsizeiptr length,
518 struct gl_buffer_object *obj,
519 gl_map_buffer_index index)
520 {
521 struct pipe_context *pipe = st_context(ctx)->pipe;
522 struct st_buffer_object *st_obj = st_buffer_object(obj);
523
524 /* Subrange is relative to mapped range */
525 assert(offset >= 0);
526 assert(length >= 0);
527 assert(offset + length <= obj->Mappings[index].Length);
528 assert(obj->Mappings[index].Pointer);
529
530 if (!length)
531 return;
532
533 pipe_buffer_flush_mapped_range(pipe, st_obj->transfer[index],
534 obj->Mappings[index].Offset + offset,
535 length);
536 }
537
538
539 /**
540 * Called via glUnmapBufferARB().
541 */
542 static GLboolean
543 st_bufferobj_unmap(struct gl_context *ctx, struct gl_buffer_object *obj,
544 gl_map_buffer_index index)
545 {
546 struct pipe_context *pipe = st_context(ctx)->pipe;
547 struct st_buffer_object *st_obj = st_buffer_object(obj);
548
549 if (obj->Mappings[index].Length)
550 pipe_buffer_unmap(pipe, st_obj->transfer[index]);
551
552 st_obj->transfer[index] = NULL;
553 obj->Mappings[index].Pointer = NULL;
554 obj->Mappings[index].Offset = 0;
555 obj->Mappings[index].Length = 0;
556 return GL_TRUE;
557 }
558
559
560 /**
561 * Called via glCopyBufferSubData().
562 */
563 static void
564 st_copy_buffer_subdata(struct gl_context *ctx,
565 struct gl_buffer_object *src,
566 struct gl_buffer_object *dst,
567 GLintptr readOffset, GLintptr writeOffset,
568 GLsizeiptr size)
569 {
570 struct pipe_context *pipe = st_context(ctx)->pipe;
571 struct st_buffer_object *srcObj = st_buffer_object(src);
572 struct st_buffer_object *dstObj = st_buffer_object(dst);
573 struct pipe_box box;
574
575 if (!size)
576 return;
577
578 /* buffer should not already be mapped */
579 assert(!_mesa_check_disallowed_mapping(src));
580 assert(!_mesa_check_disallowed_mapping(dst));
581
582 u_box_1d(readOffset, size, &box);
583
584 pipe->resource_copy_region(pipe, dstObj->buffer, 0, writeOffset, 0, 0,
585 srcObj->buffer, 0, &box);
586 }
587
588 /**
589 * Called via glClearBufferSubData().
590 */
591 static void
592 st_clear_buffer_subdata(struct gl_context *ctx,
593 GLintptr offset, GLsizeiptr size,
594 const void *clearValue,
595 GLsizeiptr clearValueSize,
596 struct gl_buffer_object *bufObj)
597 {
598 struct pipe_context *pipe = st_context(ctx)->pipe;
599 struct st_buffer_object *buf = st_buffer_object(bufObj);
600 static const char zeros[16] = {0};
601
602 if (!pipe->clear_buffer) {
603 _mesa_ClearBufferSubData_sw(ctx, offset, size,
604 clearValue, clearValueSize, bufObj);
605 return;
606 }
607
608 if (!clearValue)
609 clearValue = zeros;
610
611 pipe->clear_buffer(pipe, buf->buffer, offset, size,
612 clearValue, clearValueSize);
613 }
614
615 static void
616 st_bufferobj_page_commitment(struct gl_context *ctx,
617 struct gl_buffer_object *bufferObj,
618 GLintptr offset, GLsizeiptr size,
619 GLboolean commit)
620 {
621 struct pipe_context *pipe = st_context(ctx)->pipe;
622 struct st_buffer_object *buf = st_buffer_object(bufferObj);
623 struct pipe_box box;
624
625 u_box_1d(offset, size, &box);
626
627 if (!pipe->resource_commit(pipe, buf->buffer, 0, &box, commit)) {
628 _mesa_error(ctx, GL_OUT_OF_MEMORY, "glBufferPageCommitmentARB(out of memory)");
629 return;
630 }
631 }
632
633 void
634 st_init_bufferobject_functions(struct pipe_screen *screen,
635 struct dd_function_table *functions)
636 {
637 functions->NewBufferObject = st_bufferobj_alloc;
638 functions->DeleteBuffer = st_bufferobj_free;
639 functions->BufferData = st_bufferobj_data;
640 functions->BufferDataMem = st_bufferobj_data_mem;
641 functions->BufferSubData = st_bufferobj_subdata;
642 functions->GetBufferSubData = st_bufferobj_get_subdata;
643 functions->MapBufferRange = st_bufferobj_map_range;
644 functions->FlushMappedBufferRange = st_bufferobj_flush_mapped_range;
645 functions->UnmapBuffer = st_bufferobj_unmap;
646 functions->CopyBufferSubData = st_copy_buffer_subdata;
647 functions->ClearBufferSubData = st_clear_buffer_subdata;
648 functions->BufferPageCommitment = st_bufferobj_page_commitment;
649
650 if (screen->get_param(screen, PIPE_CAP_INVALIDATE_BUFFER))
651 functions->InvalidateBufferSubData = st_bufferobj_invalidate;
652 }