struct pb_validate;
struct pipe_fence_handle;
+enum pb_usage_flags {
+ PB_USAGE_CPU_READ = (1 << 0),
+ PB_USAGE_CPU_WRITE = (1 << 1),
+ PB_USAGE_GPU_READ = (1 << 2),
+ PB_USAGE_GPU_WRITE = (1 << 3),
+ PB_USAGE_DONTBLOCK = (1 << 9),
+ PB_USAGE_UNSYNCHRONIZED = (1 << 10),
+};
-#define PB_USAGE_CPU_READ (1 << 0)
-#define PB_USAGE_CPU_WRITE (1 << 1)
-#define PB_USAGE_GPU_READ (1 << 2)
-#define PB_USAGE_GPU_WRITE (1 << 3)
-#define PB_USAGE_UNSYNCHRONIZED (1 << 10)
-#define PB_USAGE_DONTBLOCK (1 << 9)
+/* For error checking elsewhere */
+#define PB_USAGE_ALL (PB_USAGE_CPU_READ | \
+ PB_USAGE_CPU_WRITE | \
+ PB_USAGE_GPU_READ | \
+ PB_USAGE_GPU_WRITE | \
+ PB_USAGE_DONTBLOCK | \
+ PB_USAGE_UNSYNCHRONIZED)
#define PB_USAGE_CPU_READ_WRITE \
( PB_USAGE_CPU_READ | PB_USAGE_CPU_WRITE )
struct pb_desc
{
unsigned alignment;
- unsigned usage;
+ enum pb_usage_flags usage;
};
struct pipe_reference reference;
unsigned alignment;
pb_size size;
- unsigned usage;
+ enum pb_usage_flags usage;
/**
* Pointer to the virtual function table.
* flags is bitmask of PB_USAGE_CPU_READ/WRITE.
*/
void *(*map)( struct pb_buffer *buf,
- unsigned flags, void *flush_ctx );
+ enum pb_usage_flags flags, void *flush_ctx );
void (*unmap)( struct pb_buffer *buf );
enum pipe_error (*validate)( struct pb_buffer *buf,
struct pb_validate *vl,
- unsigned flags );
+ enum pb_usage_flags flags );
void (*fence)( struct pb_buffer *buf,
struct pipe_fence_handle *fence );
*/
static inline void *
pb_map(struct pb_buffer *buf,
- unsigned flags, void *flush_ctx)
+ enum pb_usage_flags flags, void *flush_ctx)
{
assert(buf);
if (!buf)
static inline enum pipe_error
-pb_validate(struct pb_buffer *buf, struct pb_validate *vl, unsigned flags)
+pb_validate(struct pb_buffer *buf, struct pb_validate *vl,
+ enum pb_usage_flags flags)
{
assert(buf);
if (!buf)
* A bitmask of PB_USAGE_CPU/GPU_READ/WRITE describing the current
* buffer usage.
*/
- unsigned flags;
+ enum pb_usage_flags flags;
unsigned mapcount;
static void *
fenced_buffer_map(struct pb_buffer *buf,
- unsigned flags, void *flush_ctx)
+ enum pb_usage_flags flags, void *flush_ctx)
{
struct fenced_buffer *fenced_buf = fenced_buffer(buf);
struct fenced_manager *fenced_mgr = fenced_buf->mgr;
static enum pipe_error
fenced_buffer_validate(struct pb_buffer *buf,
struct pb_validate *vl,
- unsigned flags)
+ enum pb_usage_flags flags)
{
struct fenced_buffer *fenced_buf = fenced_buffer(buf);
struct fenced_manager *fenced_mgr = fenced_buf->mgr;
static void *
malloc_buffer_map(struct pb_buffer *buf,
- unsigned flags,
+ enum pb_usage_flags flags,
void *flush_ctx)
{
return malloc_buffer(buf)->data;
static enum pipe_error
malloc_buffer_validate(struct pb_buffer *buf,
struct pb_validate *vl,
- unsigned flags)
+ enum pb_usage_flags flags)
{
assert(0);
return PIPE_ERROR;
static void *
pb_cache_buffer_map(struct pb_buffer *_buf,
- unsigned flags, void *flush_ctx)
+ enum pb_usage_flags flags, void *flush_ctx)
{
struct pb_cache_buffer *buf = pb_cache_buffer(_buf);
return pb_map(buf->buffer, flags, flush_ctx);
static enum pipe_error
pb_cache_buffer_validate(struct pb_buffer *_buf,
struct pb_validate *vl,
- unsigned flags)
+ enum pb_usage_flags flags)
{
struct pb_cache_buffer *buf = pb_cache_buffer(_buf);
return pb_validate(buf->buffer, vl, flags);
static void *
pb_debug_buffer_map(struct pb_buffer *_buf,
- unsigned flags, void *flush_ctx)
+ enum pb_usage_flags flags, void *flush_ctx)
{
struct pb_debug_buffer *buf = pb_debug_buffer(_buf);
void *map;
static enum pipe_error
pb_debug_buffer_validate(struct pb_buffer *_buf,
struct pb_validate *vl,
- unsigned flags)
+ enum pb_usage_flags flags)
{
struct pb_debug_buffer *buf = pb_debug_buffer(_buf);
+ assert((flags & ~PB_ALL_USAGE_FLAGS) == 0);
+
mtx_lock(&buf->mutex);
if(buf->map_count) {
debug_printf("%s: attempting to validate a mapped buffer\n", __FUNCTION__);
static void *
mm_buffer_map(struct pb_buffer *buf,
- unsigned flags,
+ enum pb_usage_flags flags,
void *flush_ctx)
{
struct mm_buffer *mm_buf = mm_buffer(buf);
static enum pipe_error
mm_buffer_validate(struct pb_buffer *buf,
struct pb_validate *vl,
- unsigned flags)
+ enum pb_usage_flags flags)
{
struct mm_buffer *mm_buf = mm_buffer(buf);
struct mm_pb_manager *mm = mm_buf->mgr;
static void *
pb_ondemand_buffer_map(struct pb_buffer *_buf,
- unsigned flags, void *flush_ctx)
+ enum pb_usage_flags flags, void *flush_ctx)
{
struct pb_ondemand_buffer *buf = pb_ondemand_buffer(_buf);
static enum pipe_error
pb_ondemand_buffer_validate(struct pb_buffer *_buf,
struct pb_validate *vl,
- unsigned flags)
+ enum pb_usage_flags flags)
{
struct pb_ondemand_buffer *buf = pb_ondemand_buffer(_buf);
enum pipe_error ret;
static void *
-pool_buffer_map(struct pb_buffer *buf, unsigned flags, void *flush_ctx)
+pool_buffer_map(struct pb_buffer *buf, enum pb_usage_flags flags,
+ void *flush_ctx)
{
struct pool_buffer *pool_buf = pool_buffer(buf);
struct pool_pb_manager *pool = pool_buf->mgr;
static enum pipe_error
pool_buffer_validate(struct pb_buffer *buf,
struct pb_validate *vl,
- unsigned flags)
+ enum pb_usage_flags flags)
{
struct pool_buffer *pool_buf = pool_buffer(buf);
struct pool_pb_manager *pool = pool_buf->mgr;
static void *
pb_slab_buffer_map(struct pb_buffer *_buf,
- unsigned flags,
+ enum pb_usage_flags flags,
void *flush_ctx)
{
struct pb_slab_buffer *buf = pb_slab_buffer(_buf);
static enum pipe_error
pb_slab_buffer_validate(struct pb_buffer *_buf,
struct pb_validate *vl,
- unsigned flags)
+ enum pb_usage_flags flags)
{
struct pb_slab_buffer *buf = pb_slab_buffer(_buf);
return pb_validate(buf->slab->bo, vl, flags);
struct pb_slab_range_manager *mgr = pb_slab_range_manager(_mgr);
pb_size bufSize;
pb_size reqSize = size;
- unsigned i;
+ enum pb_usage_flags i;
if(desc->alignment > reqSize)
reqSize = desc->alignment;
enum pipe_error
pb_validate_add_buffer(struct pb_validate *vl,
struct pb_buffer *buf,
- unsigned flags)
+ enum pb_usage_flags flags)
{
assert(buf);
if (!buf)
enum pipe_error
pb_validate_add_buffer(struct pb_validate *vl,
struct pb_buffer *buf,
- unsigned flags);
+ enum pb_usage_flags flags);
enum pipe_error
pb_validate_foreach(struct pb_validate *vl,