1 /**************************************************************************
3 * Copyright 2007 VMware, Inc.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
30 * Functions for pixel buffer objects and vertex/element buffer objects.
34 #include <inttypes.h> /* for PRId64 macro */
36 #include "main/errors.h"
37 #include "util/imports.h"
38 #include "main/mtypes.h"
39 #include "main/arrayobj.h"
40 #include "main/bufferobj.h"
42 #include "st_context.h"
43 #include "st_cb_bufferobjects.h"
44 #include "st_cb_memoryobjects.h"
48 #include "pipe/p_context.h"
49 #include "pipe/p_defines.h"
50 #include "util/u_inlines.h"
54 * There is some duplication between mesa's bufferobjects and our
55 * bufmgr buffers. Both have an integer handle and a hashtable to
56 * lookup an opaque structure. It would be nice if the handles and
57 * internal structure where somehow shared.
59 static struct gl_buffer_object
*
60 st_bufferobj_alloc(struct gl_context
*ctx
, GLuint name
)
62 struct st_buffer_object
*st_obj
= ST_CALLOC_STRUCT(st_buffer_object
);
67 _mesa_initialize_buffer_object(ctx
, &st_obj
->Base
, name
);
75 * Deallocate/free a vertex/pixel buffer object.
76 * Called via glDeleteBuffersARB().
79 st_bufferobj_free(struct gl_context
*ctx
, struct gl_buffer_object
*obj
)
81 struct st_buffer_object
*st_obj
= st_buffer_object(obj
);
83 assert(obj
->RefCount
== 0);
84 _mesa_buffer_unmap_all_mappings(ctx
, obj
);
87 pipe_resource_reference(&st_obj
->buffer
, NULL
);
89 _mesa_delete_buffer_object(ctx
, obj
);
95 * Replace data in a subrange of buffer object. If the data range
96 * specified by size + offset extends beyond the end of the buffer or
97 * if data is NULL, no copy is performed.
98 * Called via glBufferSubDataARB().
101 st_bufferobj_subdata(struct gl_context
*ctx
,
104 const void * data
, struct gl_buffer_object
*obj
)
106 struct st_buffer_object
*st_obj
= st_buffer_object(obj
);
108 /* we may be called from VBO code, so double-check params here */
111 assert(offset
+ size
<= obj
->Size
);
117 * According to ARB_vertex_buffer_object specification, if data is null,
118 * then the contents of the buffer object's data store is undefined. We just
119 * ignore, and leave it unchanged.
124 if (!st_obj
->buffer
) {
125 /* we probably ran out of memory during buffer allocation */
129 /* Now that transfers are per-context, we don't have to figure out
130 * flushing here. Usually drivers won't need to flush in this case
131 * even if the buffer is currently referenced by hardware - they
132 * just queue the upload as dma rather than mapping the underlying
135 * If the buffer is mapped, suppress implicit buffer range invalidation
136 * by using PIPE_TRANSFER_MAP_DIRECTLY.
138 struct pipe_context
*pipe
= st_context(ctx
)->pipe
;
140 pipe
->buffer_subdata(pipe
, st_obj
->buffer
,
141 _mesa_bufferobj_mapped(obj
, MAP_USER
) ?
142 PIPE_TRANSFER_MAP_DIRECTLY
: 0,
148 * Called via glGetBufferSubDataARB().
151 st_bufferobj_get_subdata(struct gl_context
*ctx
,
154 void * data
, struct gl_buffer_object
*obj
)
156 struct st_buffer_object
*st_obj
= st_buffer_object(obj
);
158 /* we may be called from VBO code, so double-check params here */
161 assert(offset
+ size
<= obj
->Size
);
166 if (!st_obj
->buffer
) {
167 /* we probably ran out of memory during buffer allocation */
171 pipe_buffer_read(st_context(ctx
)->pipe
, st_obj
->buffer
,
177 * Return bitmask of PIPE_BIND_x flags corresponding a GL buffer target.
180 buffer_target_to_bind_flags(GLenum target
)
183 case GL_PIXEL_PACK_BUFFER_ARB
:
184 case GL_PIXEL_UNPACK_BUFFER_ARB
:
185 return PIPE_BIND_RENDER_TARGET
| PIPE_BIND_SAMPLER_VIEW
;
186 case GL_ARRAY_BUFFER_ARB
:
187 return PIPE_BIND_VERTEX_BUFFER
;
188 case GL_ELEMENT_ARRAY_BUFFER_ARB
:
189 return PIPE_BIND_INDEX_BUFFER
;
190 case GL_TEXTURE_BUFFER
:
191 return PIPE_BIND_SAMPLER_VIEW
;
192 case GL_TRANSFORM_FEEDBACK_BUFFER
:
193 return PIPE_BIND_STREAM_OUTPUT
;
194 case GL_UNIFORM_BUFFER
:
195 return PIPE_BIND_CONSTANT_BUFFER
;
196 case GL_DRAW_INDIRECT_BUFFER
:
197 case GL_PARAMETER_BUFFER_ARB
:
198 return PIPE_BIND_COMMAND_ARGS_BUFFER
;
199 case GL_ATOMIC_COUNTER_BUFFER
:
200 case GL_SHADER_STORAGE_BUFFER
:
201 return PIPE_BIND_SHADER_BUFFER
;
202 case GL_QUERY_BUFFER
:
203 return PIPE_BIND_QUERY_BUFFER
;
211 * Return bitmask of PIPE_RESOURCE_x flags corresponding to GL_MAP_x flags.
214 storage_flags_to_buffer_flags(GLbitfield storageFlags
)
217 if (storageFlags
& GL_MAP_PERSISTENT_BIT
)
218 flags
|= PIPE_RESOURCE_FLAG_MAP_PERSISTENT
;
219 if (storageFlags
& GL_MAP_COHERENT_BIT
)
220 flags
|= PIPE_RESOURCE_FLAG_MAP_COHERENT
;
221 if (storageFlags
& GL_SPARSE_STORAGE_BIT_ARB
)
222 flags
|= PIPE_RESOURCE_FLAG_SPARSE
;
228 * From a buffer object's target, immutability flag, storage flags and
229 * usage hint, return a pipe_resource_usage value (PIPE_USAGE_DYNAMIC,
232 static enum pipe_resource_usage
233 buffer_usage(GLenum target
, GLboolean immutable
,
234 GLbitfield storageFlags
, GLenum usage
)
238 if (storageFlags
& GL_CLIENT_STORAGE_BIT
) {
239 if (storageFlags
& GL_MAP_READ_BIT
)
240 return PIPE_USAGE_STAGING
;
242 return PIPE_USAGE_STREAM
;
244 return PIPE_USAGE_DEFAULT
;
248 /* These are often read by the CPU, so enable CPU caches. */
249 if (target
== GL_PIXEL_PACK_BUFFER
||
250 target
== GL_PIXEL_UNPACK_BUFFER
)
251 return PIPE_USAGE_STAGING
;
255 case GL_DYNAMIC_DRAW
:
256 case GL_DYNAMIC_COPY
:
257 return PIPE_USAGE_DYNAMIC
;
260 return PIPE_USAGE_STREAM
;
262 case GL_DYNAMIC_READ
:
264 return PIPE_USAGE_STAGING
;
268 return PIPE_USAGE_DEFAULT
;
274 static ALWAYS_INLINE GLboolean
275 bufferobj_data(struct gl_context
*ctx
,
279 struct gl_memory_object
*memObj
,
282 GLbitfield storageFlags
,
283 struct gl_buffer_object
*obj
)
285 struct st_context
*st
= st_context(ctx
);
286 struct pipe_context
*pipe
= st
->pipe
;
287 struct pipe_screen
*screen
= pipe
->screen
;
288 struct st_buffer_object
*st_obj
= st_buffer_object(obj
);
289 struct st_memory_object
*st_mem_obj
= st_memory_object(memObj
);
290 bool is_mapped
= _mesa_bufferobj_mapped(obj
, MAP_USER
);
292 if (size
> UINT32_MAX
|| offset
> UINT32_MAX
) {
293 /* pipe_resource.width0 is 32 bits only and increasing it
294 * to 64 bits doesn't make much sense since hw support
295 * for > 4GB resources is limited.
297 st_obj
->Base
.Size
= 0;
301 if (target
!= GL_EXTERNAL_VIRTUAL_MEMORY_BUFFER_AMD
&&
302 size
&& st_obj
->buffer
&&
303 st_obj
->Base
.Size
== size
&&
304 st_obj
->Base
.Usage
== usage
&&
305 st_obj
->Base
.StorageFlags
== storageFlags
) {
307 /* Just discard the old contents and write new data.
308 * This should be the same as creating a new buffer, but we avoid
309 * a lot of validation in Mesa.
311 * If the buffer is mapped, we can't discard it.
313 * PIPE_TRANSFER_MAP_DIRECTLY supresses implicit buffer range
316 pipe
->buffer_subdata(pipe
, st_obj
->buffer
,
317 is_mapped
? PIPE_TRANSFER_MAP_DIRECTLY
:
318 PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE
,
321 } else if (is_mapped
) {
322 return GL_TRUE
; /* can't reallocate, nothing to do */
323 } else if (screen
->get_param(screen
, PIPE_CAP_INVALIDATE_BUFFER
)) {
324 pipe
->invalidate_resource(pipe
, st_obj
->buffer
);
329 st_obj
->Base
.Size
= size
;
330 st_obj
->Base
.Usage
= usage
;
331 st_obj
->Base
.StorageFlags
= storageFlags
;
333 pipe_resource_reference( &st_obj
->buffer
, NULL
);
335 const unsigned bindings
= buffer_target_to_bind_flags(target
);
337 if (ST_DEBUG
& DEBUG_BUFFER
) {
338 debug_printf("Create buffer size %" PRId64
" bind 0x%x\n",
339 (int64_t) size
, bindings
);
343 struct pipe_resource buffer
;
345 memset(&buffer
, 0, sizeof buffer
);
346 buffer
.target
= PIPE_BUFFER
;
347 buffer
.format
= PIPE_FORMAT_R8_UNORM
; /* want TYPELESS or similar */
348 buffer
.bind
= bindings
;
350 buffer_usage(target
, st_obj
->Base
.Immutable
, storageFlags
, usage
);
351 buffer
.flags
= storage_flags_to_buffer_flags(storageFlags
);
352 buffer
.width0
= size
;
355 buffer
.array_size
= 1;
358 st_obj
->buffer
= screen
->resource_from_memobj(screen
, &buffer
,
362 else if (target
== GL_EXTERNAL_VIRTUAL_MEMORY_BUFFER_AMD
) {
364 screen
->resource_from_user_memory(screen
, &buffer
, (void*)data
);
367 st_obj
->buffer
= screen
->resource_create(screen
, &buffer
);
369 if (st_obj
->buffer
&& data
)
370 pipe_buffer_write(pipe
, st_obj
->buffer
, 0, size
, data
);
373 if (!st_obj
->buffer
) {
375 st_obj
->Base
.Size
= 0;
380 /* The current buffer may be bound, so we have to revalidate all atoms that
383 if (st_obj
->Base
.UsageHistory
& USAGE_ARRAY_BUFFER
)
384 ctx
->NewDriverState
|= ST_NEW_VERTEX_ARRAYS
;
385 /* if (st_obj->Base.UsageHistory & USAGE_ELEMENT_ARRAY_BUFFER) */
386 /* ctx->NewDriverState |= TODO: Handle indices as gallium state; */
387 if (st_obj
->Base
.UsageHistory
& USAGE_UNIFORM_BUFFER
)
388 ctx
->NewDriverState
|= ST_NEW_UNIFORM_BUFFER
;
389 if (st_obj
->Base
.UsageHistory
& USAGE_SHADER_STORAGE_BUFFER
)
390 ctx
->NewDriverState
|= ST_NEW_STORAGE_BUFFER
;
391 if (st_obj
->Base
.UsageHistory
& USAGE_TEXTURE_BUFFER
)
392 ctx
->NewDriverState
|= ST_NEW_SAMPLER_VIEWS
| ST_NEW_IMAGE_UNITS
;
393 if (st_obj
->Base
.UsageHistory
& USAGE_ATOMIC_COUNTER_BUFFER
)
394 ctx
->NewDriverState
|= ctx
->DriverFlags
.NewAtomicBuffer
;
400 * Allocate space for and store data in a buffer object. Any data that was
401 * previously stored in the buffer object is lost. If data is NULL,
402 * memory will be allocated, but no copy will occur.
403 * Called via ctx->Driver.BufferData().
404 * \return GL_TRUE for success, GL_FALSE if out of memory
407 st_bufferobj_data(struct gl_context
*ctx
,
412 GLbitfield storageFlags
,
413 struct gl_buffer_object
*obj
)
415 return bufferobj_data(ctx
, target
, size
, data
, NULL
, 0, usage
, storageFlags
, obj
);
419 st_bufferobj_data_mem(struct gl_context
*ctx
,
422 struct gl_memory_object
*memObj
,
425 struct gl_buffer_object
*bufObj
)
427 return bufferobj_data(ctx
, target
, size
, NULL
, memObj
, offset
, usage
, 0, bufObj
);
431 * Called via glInvalidateBuffer(Sub)Data.
434 st_bufferobj_invalidate(struct gl_context
*ctx
,
435 struct gl_buffer_object
*obj
,
439 struct st_context
*st
= st_context(ctx
);
440 struct pipe_context
*pipe
= st
->pipe
;
441 struct st_buffer_object
*st_obj
= st_buffer_object(obj
);
443 /* We ignore partial invalidates. */
444 if (offset
!= 0 || size
!= obj
->Size
)
447 /* If the buffer is mapped, we can't invalidate it. */
448 if (!st_obj
->buffer
|| _mesa_bufferobj_mapped(obj
, MAP_USER
))
451 pipe
->invalidate_resource(pipe
, st_obj
->buffer
);
456 * Convert GLbitfield of GL_MAP_x flags to gallium pipe_transfer_usage flags.
457 * \param wholeBuffer is the whole buffer being mapped?
459 enum pipe_transfer_usage
460 st_access_flags_to_transfer_flags(GLbitfield access
, bool wholeBuffer
)
462 enum pipe_transfer_usage flags
= 0;
464 if (access
& GL_MAP_WRITE_BIT
)
465 flags
|= PIPE_TRANSFER_WRITE
;
467 if (access
& GL_MAP_READ_BIT
)
468 flags
|= PIPE_TRANSFER_READ
;
470 if (access
& GL_MAP_FLUSH_EXPLICIT_BIT
)
471 flags
|= PIPE_TRANSFER_FLUSH_EXPLICIT
;
473 if (access
& GL_MAP_INVALIDATE_BUFFER_BIT
) {
474 flags
|= PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE
;
476 else if (access
& GL_MAP_INVALIDATE_RANGE_BIT
) {
478 flags
|= PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE
;
480 flags
|= PIPE_TRANSFER_DISCARD_RANGE
;
483 if (access
& GL_MAP_UNSYNCHRONIZED_BIT
)
484 flags
|= PIPE_TRANSFER_UNSYNCHRONIZED
;
486 if (access
& GL_MAP_PERSISTENT_BIT
)
487 flags
|= PIPE_TRANSFER_PERSISTENT
;
489 if (access
& GL_MAP_COHERENT_BIT
)
490 flags
|= PIPE_TRANSFER_COHERENT
;
492 /* ... other flags ...
495 if (access
& MESA_MAP_NOWAIT_BIT
)
496 flags
|= PIPE_TRANSFER_DONTBLOCK
;
503 * Called via glMapBufferRange().
506 st_bufferobj_map_range(struct gl_context
*ctx
,
507 GLintptr offset
, GLsizeiptr length
, GLbitfield access
,
508 struct gl_buffer_object
*obj
,
509 gl_map_buffer_index index
)
511 struct pipe_context
*pipe
= st_context(ctx
)->pipe
;
512 struct st_buffer_object
*st_obj
= st_buffer_object(obj
);
516 assert(offset
< obj
->Size
);
517 assert(offset
+ length
<= obj
->Size
);
519 const enum pipe_transfer_usage transfer_flags
=
520 st_access_flags_to_transfer_flags(access
,
521 offset
== 0 && length
== obj
->Size
);
523 obj
->Mappings
[index
].Pointer
= pipe_buffer_map_range(pipe
,
527 &st_obj
->transfer
[index
]);
528 if (obj
->Mappings
[index
].Pointer
) {
529 obj
->Mappings
[index
].Offset
= offset
;
530 obj
->Mappings
[index
].Length
= length
;
531 obj
->Mappings
[index
].AccessFlags
= access
;
534 st_obj
->transfer
[index
] = NULL
;
537 return obj
->Mappings
[index
].Pointer
;
542 st_bufferobj_flush_mapped_range(struct gl_context
*ctx
,
543 GLintptr offset
, GLsizeiptr length
,
544 struct gl_buffer_object
*obj
,
545 gl_map_buffer_index index
)
547 struct pipe_context
*pipe
= st_context(ctx
)->pipe
;
548 struct st_buffer_object
*st_obj
= st_buffer_object(obj
);
550 /* Subrange is relative to mapped range */
553 assert(offset
+ length
<= obj
->Mappings
[index
].Length
);
554 assert(obj
->Mappings
[index
].Pointer
);
559 pipe_buffer_flush_mapped_range(pipe
, st_obj
->transfer
[index
],
560 obj
->Mappings
[index
].Offset
+ offset
,
566 * Called via glUnmapBufferARB().
569 st_bufferobj_unmap(struct gl_context
*ctx
, struct gl_buffer_object
*obj
,
570 gl_map_buffer_index index
)
572 struct pipe_context
*pipe
= st_context(ctx
)->pipe
;
573 struct st_buffer_object
*st_obj
= st_buffer_object(obj
);
575 if (obj
->Mappings
[index
].Length
)
576 pipe_buffer_unmap(pipe
, st_obj
->transfer
[index
]);
578 st_obj
->transfer
[index
] = NULL
;
579 obj
->Mappings
[index
].Pointer
= NULL
;
580 obj
->Mappings
[index
].Offset
= 0;
581 obj
->Mappings
[index
].Length
= 0;
587 * Called via glCopyBufferSubData().
590 st_copy_buffer_subdata(struct gl_context
*ctx
,
591 struct gl_buffer_object
*src
,
592 struct gl_buffer_object
*dst
,
593 GLintptr readOffset
, GLintptr writeOffset
,
596 struct pipe_context
*pipe
= st_context(ctx
)->pipe
;
597 struct st_buffer_object
*srcObj
= st_buffer_object(src
);
598 struct st_buffer_object
*dstObj
= st_buffer_object(dst
);
604 /* buffer should not already be mapped */
605 assert(!_mesa_check_disallowed_mapping(src
));
606 assert(!_mesa_check_disallowed_mapping(dst
));
608 u_box_1d(readOffset
, size
, &box
);
610 pipe
->resource_copy_region(pipe
, dstObj
->buffer
, 0, writeOffset
, 0, 0,
611 srcObj
->buffer
, 0, &box
);
615 * Called via glClearBufferSubData().
618 st_clear_buffer_subdata(struct gl_context
*ctx
,
619 GLintptr offset
, GLsizeiptr size
,
620 const void *clearValue
,
621 GLsizeiptr clearValueSize
,
622 struct gl_buffer_object
*bufObj
)
624 struct pipe_context
*pipe
= st_context(ctx
)->pipe
;
625 struct st_buffer_object
*buf
= st_buffer_object(bufObj
);
626 static const char zeros
[16] = {0};
628 if (!pipe
->clear_buffer
) {
629 _mesa_ClearBufferSubData_sw(ctx
, offset
, size
,
630 clearValue
, clearValueSize
, bufObj
);
637 pipe
->clear_buffer(pipe
, buf
->buffer
, offset
, size
,
638 clearValue
, clearValueSize
);
642 st_bufferobj_page_commitment(struct gl_context
*ctx
,
643 struct gl_buffer_object
*bufferObj
,
644 GLintptr offset
, GLsizeiptr size
,
647 struct pipe_context
*pipe
= st_context(ctx
)->pipe
;
648 struct st_buffer_object
*buf
= st_buffer_object(bufferObj
);
651 u_box_1d(offset
, size
, &box
);
653 if (!pipe
->resource_commit(pipe
, buf
->buffer
, 0, &box
, commit
)) {
654 _mesa_error(ctx
, GL_OUT_OF_MEMORY
, "glBufferPageCommitmentARB(out of memory)");
660 st_init_bufferobject_functions(struct pipe_screen
*screen
,
661 struct dd_function_table
*functions
)
663 functions
->NewBufferObject
= st_bufferobj_alloc
;
664 functions
->DeleteBuffer
= st_bufferobj_free
;
665 functions
->BufferData
= st_bufferobj_data
;
666 functions
->BufferDataMem
= st_bufferobj_data_mem
;
667 functions
->BufferSubData
= st_bufferobj_subdata
;
668 functions
->GetBufferSubData
= st_bufferobj_get_subdata
;
669 functions
->MapBufferRange
= st_bufferobj_map_range
;
670 functions
->FlushMappedBufferRange
= st_bufferobj_flush_mapped_range
;
671 functions
->UnmapBuffer
= st_bufferobj_unmap
;
672 functions
->CopyBufferSubData
= st_copy_buffer_subdata
;
673 functions
->ClearBufferSubData
= st_clear_buffer_subdata
;
674 functions
->BufferPageCommitment
= st_bufferobj_page_commitment
;
676 if (screen
->get_param(screen
, PIPE_CAP_INVALIDATE_BUFFER
))
677 functions
->InvalidateBufferSubData
= st_bufferobj_invalidate
;