replace _mesa_logbase2 with util_logbase2
[mesa.git] / src / mesa / state_tracker / st_cb_bufferobjects.c
index a9104a90095d65b8a365b4bb0750885ecee0ea8f..ea0861d340cba8b29c8218058542ce3aaf81f902 100644 (file)
@@ -33,7 +33,8 @@
 
 #include <inttypes.h>  /* for PRId64 macro */
 
-#include "main/imports.h"
+#include "main/errors.h"
+#include "util/imports.h"
 #include "main/mtypes.h"
 #include "main/arrayobj.h"
 #include "main/bufferobj.h"
@@ -42,6 +43,7 @@
 #include "st_cb_bufferobjects.h"
 #include "st_cb_memoryobjects.h"
 #include "st_debug.h"
+#include "st_util.h"
 
 #include "pipe/p_context.h"
 #include "pipe/p_defines.h"
@@ -129,10 +131,16 @@ st_bufferobj_subdata(struct gl_context *ctx,
     * even if the buffer is currently referenced by hardware - they
     * just queue the upload as dma rather than mapping the underlying
     * buffer directly.
+    *
+    * If the buffer is mapped, suppress implicit buffer range invalidation
+    * by using PIPE_TRANSFER_MAP_DIRECTLY.
     */
-   pipe_buffer_write(st_context(ctx)->pipe,
-                     st_obj->buffer,
-                     offset, size, data);
+   struct pipe_context *pipe = st_context(ctx)->pipe;
+
+   pipe->buffer_subdata(pipe, st_obj->buffer,
+                        _mesa_bufferobj_mapped(obj, MAP_USER) ?
+                           PIPE_TRANSFER_MAP_DIRECTLY : 0,
+                        offset, size, data);
 }
 
 
@@ -164,6 +172,105 @@ st_bufferobj_get_subdata(struct gl_context *ctx,
                     offset, size, data);
 }
 
+
+/**
+ * Return bitmask of PIPE_BIND_x flags corresponding a GL buffer target.
+ */
+static unsigned
+buffer_target_to_bind_flags(GLenum target)
+{
+   switch (target) {
+   case GL_PIXEL_PACK_BUFFER_ARB:
+   case GL_PIXEL_UNPACK_BUFFER_ARB:
+      return PIPE_BIND_RENDER_TARGET | PIPE_BIND_SAMPLER_VIEW;
+   case GL_ARRAY_BUFFER_ARB:
+      return PIPE_BIND_VERTEX_BUFFER;
+   case GL_ELEMENT_ARRAY_BUFFER_ARB:
+      return PIPE_BIND_INDEX_BUFFER;
+   case GL_TEXTURE_BUFFER:
+      return PIPE_BIND_SAMPLER_VIEW;
+   case GL_TRANSFORM_FEEDBACK_BUFFER:
+      return PIPE_BIND_STREAM_OUTPUT;
+   case GL_UNIFORM_BUFFER:
+      return PIPE_BIND_CONSTANT_BUFFER;
+   case GL_DRAW_INDIRECT_BUFFER:
+   case GL_PARAMETER_BUFFER_ARB:
+      return PIPE_BIND_COMMAND_ARGS_BUFFER;
+   case GL_ATOMIC_COUNTER_BUFFER:
+   case GL_SHADER_STORAGE_BUFFER:
+      return PIPE_BIND_SHADER_BUFFER;
+   case GL_QUERY_BUFFER:
+      return PIPE_BIND_QUERY_BUFFER;
+   default:
+      return 0;
+   }
+}
+
+
+/**
+ * Return bitmask of PIPE_RESOURCE_x flags corresponding to GL_MAP_x flags.
+ */
+static unsigned
+storage_flags_to_buffer_flags(GLbitfield storageFlags)
+{
+   unsigned flags = 0;
+   if (storageFlags & GL_MAP_PERSISTENT_BIT)
+      flags |= PIPE_RESOURCE_FLAG_MAP_PERSISTENT;
+   if (storageFlags & GL_MAP_COHERENT_BIT)
+      flags |= PIPE_RESOURCE_FLAG_MAP_COHERENT;
+   if (storageFlags & GL_SPARSE_STORAGE_BIT_ARB)
+      flags |= PIPE_RESOURCE_FLAG_SPARSE;
+   return flags;
+}
+
+
+/**
+ * From a buffer object's target, immutability flag, storage flags and
+ * usage hint, return a pipe_resource_usage value (PIPE_USAGE_DYNAMIC,
+ * STREAM, etc).
+ */
+static enum pipe_resource_usage
+buffer_usage(GLenum target, GLboolean immutable,
+             GLbitfield storageFlags, GLenum usage)
+{
+   if (immutable) {
+      /* BufferStorage */
+      if (storageFlags & GL_CLIENT_STORAGE_BIT) {
+         if (storageFlags & GL_MAP_READ_BIT)
+            return PIPE_USAGE_STAGING;
+         else
+            return PIPE_USAGE_STREAM;
+      } else {
+         return PIPE_USAGE_DEFAULT;
+      }
+   }
+   else {
+      /* These are often read by the CPU, so enable CPU caches. */
+      if (target == GL_PIXEL_PACK_BUFFER ||
+          target == GL_PIXEL_UNPACK_BUFFER)
+         return PIPE_USAGE_STAGING;
+
+      /* BufferData */
+      switch (usage) {
+      case GL_DYNAMIC_DRAW:
+      case GL_DYNAMIC_COPY:
+         return PIPE_USAGE_DYNAMIC;
+      case GL_STREAM_DRAW:
+      case GL_STREAM_COPY:
+         return PIPE_USAGE_STREAM;
+      case GL_STATIC_READ:
+      case GL_DYNAMIC_READ:
+      case GL_STREAM_READ:
+         return PIPE_USAGE_STAGING;
+      case GL_STATIC_DRAW:
+      case GL_STATIC_COPY:
+      default:
+         return PIPE_USAGE_DEFAULT;
+      }
+   }
+}
+
+
 static ALWAYS_INLINE GLboolean
 bufferobj_data(struct gl_context *ctx,
                GLenum target,
@@ -180,7 +287,16 @@ bufferobj_data(struct gl_context *ctx,
    struct pipe_screen *screen = pipe->screen;
    struct st_buffer_object *st_obj = st_buffer_object(obj);
    struct st_memory_object *st_mem_obj = st_memory_object(memObj);
-   unsigned bind, pipe_usage, pipe_flags = 0;
+   bool is_mapped = _mesa_bufferobj_mapped(obj, MAP_USER);
+
+   if (size > UINT32_MAX || offset > UINT32_MAX) {
+      /* pipe_resource.width0 is 32 bits only and increasing it
+       * to 64 bits doesn't make much sense since hw support
+       * for > 4GB resources is limited.
+       */
+      st_obj->Base.Size = 0;
+      return GL_FALSE;
+   }
 
    if (target != GL_EXTERNAL_VIRTUAL_MEMORY_BUFFER_AMD &&
        size && st_obj->buffer &&
@@ -191,11 +307,19 @@ bufferobj_data(struct gl_context *ctx,
          /* Just discard the old contents and write new data.
           * This should be the same as creating a new buffer, but we avoid
           * a lot of validation in Mesa.
+          *
+          * If the buffer is mapped, we can't discard it.
+          *
+          * PIPE_TRANSFER_MAP_DIRECTLY supresses implicit buffer range
+          * invalidation.
           */
          pipe->buffer_subdata(pipe, st_obj->buffer,
-                              PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE,
+                              is_mapped ? PIPE_TRANSFER_MAP_DIRECTLY :
+                                          PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE,
                               0, size, data);
          return GL_TRUE;
+      } else if (is_mapped) {
+         return GL_TRUE; /* can't reallocate, nothing to do */
       } else if (screen->get_param(screen, PIPE_CAP_INVALIDATE_BUFFER)) {
          pipe->invalidate_resource(pipe, st_obj->buffer);
          return GL_TRUE;
@@ -206,97 +330,13 @@ bufferobj_data(struct gl_context *ctx,
    st_obj->Base.Usage = usage;
    st_obj->Base.StorageFlags = storageFlags;
 
-   switch (target) {
-   case GL_PIXEL_PACK_BUFFER_ARB:
-   case GL_PIXEL_UNPACK_BUFFER_ARB:
-      bind = PIPE_BIND_RENDER_TARGET | PIPE_BIND_SAMPLER_VIEW;
-      break;
-   case GL_ARRAY_BUFFER_ARB:
-      bind = PIPE_BIND_VERTEX_BUFFER;
-      break;
-   case GL_ELEMENT_ARRAY_BUFFER_ARB:
-      bind = PIPE_BIND_INDEX_BUFFER;
-      break;
-   case GL_TEXTURE_BUFFER:
-      bind = PIPE_BIND_SAMPLER_VIEW;
-      break;
-   case GL_TRANSFORM_FEEDBACK_BUFFER:
-      bind = PIPE_BIND_STREAM_OUTPUT;
-      break;
-   case GL_UNIFORM_BUFFER:
-      bind = PIPE_BIND_CONSTANT_BUFFER;
-      break;
-   case GL_DRAW_INDIRECT_BUFFER:
-   case GL_PARAMETER_BUFFER_ARB:
-      bind = PIPE_BIND_COMMAND_ARGS_BUFFER;
-      break;
-   case GL_ATOMIC_COUNTER_BUFFER:
-   case GL_SHADER_STORAGE_BUFFER:
-      bind = PIPE_BIND_SHADER_BUFFER;
-      break;
-   case GL_QUERY_BUFFER:
-      bind = PIPE_BIND_QUERY_BUFFER;
-      break;
-   default:
-      bind = 0;
-   }
-
-   /* Set usage. */
-   if (st_obj->Base.Immutable) {
-      /* BufferStorage */
-      if (storageFlags & GL_CLIENT_STORAGE_BIT) {
-         if (storageFlags & GL_MAP_READ_BIT)
-            pipe_usage = PIPE_USAGE_STAGING;
-         else
-            pipe_usage = PIPE_USAGE_STREAM;
-      } else {
-         pipe_usage = PIPE_USAGE_DEFAULT;
-      }
-   }
-   else {
-      /* BufferData */
-      switch (usage) {
-      case GL_STATIC_DRAW:
-      case GL_STATIC_COPY:
-      default:
-         pipe_usage = PIPE_USAGE_DEFAULT;
-         break;
-      case GL_DYNAMIC_DRAW:
-      case GL_DYNAMIC_COPY:
-         pipe_usage = PIPE_USAGE_DYNAMIC;
-         break;
-      case GL_STREAM_DRAW:
-      case GL_STREAM_COPY:
-         /* XXX: Remove this test and fall-through when we have PBO unpacking
-          * acceleration. Right now, PBO unpacking is done by the CPU, so we
-          * have to make sure CPU reads are fast.
-          */
-         if (target != GL_PIXEL_UNPACK_BUFFER_ARB) {
-            pipe_usage = PIPE_USAGE_STREAM;
-            break;
-         }
-         /* fall through */
-      case GL_STATIC_READ:
-      case GL_DYNAMIC_READ:
-      case GL_STREAM_READ:
-         pipe_usage = PIPE_USAGE_STAGING;
-         break;
-      }
-   }
-
-   /* Set flags. */
-   if (storageFlags & GL_MAP_PERSISTENT_BIT)
-      pipe_flags |= PIPE_RESOURCE_FLAG_MAP_PERSISTENT;
-   if (storageFlags & GL_MAP_COHERENT_BIT)
-      pipe_flags |= PIPE_RESOURCE_FLAG_MAP_COHERENT;
-   if (storageFlags & GL_SPARSE_STORAGE_BIT_ARB)
-      pipe_flags |= PIPE_RESOURCE_FLAG_SPARSE;
-
    pipe_resource_reference( &st_obj->buffer, NULL );
 
+   const unsigned bindings = buffer_target_to_bind_flags(target);
+
    if (ST_DEBUG & DEBUG_BUFFER) {
       debug_printf("Create buffer size %" PRId64 " bind 0x%x\n",
-                   (int64_t) size, bind);
+                   (int64_t) size, bindings);
    }
 
    if (size != 0) {
@@ -305,9 +345,10 @@ bufferobj_data(struct gl_context *ctx,
       memset(&buffer, 0, sizeof buffer);
       buffer.target = PIPE_BUFFER;
       buffer.format = PIPE_FORMAT_R8_UNORM; /* want TYPELESS or similar */
-      buffer.bind = bind;
-      buffer.usage = pipe_usage;
-      buffer.flags = pipe_flags;
+      buffer.bind = bindings;
+      buffer.usage =
+         buffer_usage(target, st_obj->Base.Immutable, storageFlags, usage);
+      buffer.flags = storage_flags_to_buffer_flags(storageFlags);
       buffer.width0 = size;
       buffer.height0 = 1;
       buffer.depth0 = 1;
@@ -339,8 +380,10 @@ bufferobj_data(struct gl_context *ctx,
    /* The current buffer may be bound, so we have to revalidate all atoms that
     * might be using it.
     */
-   /* TODO: Add arrays to usage history */
-   ctx->NewDriverState |= ST_NEW_VERTEX_ARRAYS;
+   if (st_obj->Base.UsageHistory & USAGE_ARRAY_BUFFER)
+      ctx->NewDriverState |= ST_NEW_VERTEX_ARRAYS;
+   /* if (st_obj->Base.UsageHistory & USAGE_ELEMENT_ARRAY_BUFFER) */
+   /*    ctx->NewDriverState |= TODO: Handle indices as gallium state; */
    if (st_obj->Base.UsageHistory & USAGE_UNIFORM_BUFFER)
       ctx->NewDriverState |= ST_NEW_UNIFORM_BUFFER;
    if (st_obj->Base.UsageHistory & USAGE_SHADER_STORAGE_BUFFER)
@@ -401,8 +444,8 @@ st_bufferobj_invalidate(struct gl_context *ctx,
    if (offset != 0 || size != obj->Size)
       return;
 
-   /* Nothing to invalidate. */
-   if (!st_obj->buffer)
+   /* If the buffer is mapped, we can't invalidate it. */
+   if (!st_obj->buffer || _mesa_bufferobj_mapped(obj, MAP_USER))
       return;
 
    pipe->invalidate_resource(pipe, st_obj->buffer);
@@ -410,17 +453,13 @@ st_bufferobj_invalidate(struct gl_context *ctx,
 
 
 /**
- * Called via glMapBufferRange().
+ * Convert GLbitfield of GL_MAP_x flags to gallium pipe_transfer_usage flags.
+ * \param wholeBuffer  is the whole buffer being mapped?
  */
-static void *
-st_bufferobj_map_range(struct gl_context *ctx,
-                       GLintptr offset, GLsizeiptr length, GLbitfield access,
-                       struct gl_buffer_object *obj,
-                       gl_map_buffer_index index)
+enum pipe_transfer_usage
+st_access_flags_to_transfer_flags(GLbitfield access, bool wholeBuffer)
 {
-   struct pipe_context *pipe = st_context(ctx)->pipe;
-   struct st_buffer_object *st_obj = st_buffer_object(obj);
-   enum pipe_transfer_usage flags = 0x0;
+   enum pipe_transfer_usage flags = 0;
 
    if (access & GL_MAP_WRITE_BIT)
       flags |= PIPE_TRANSFER_WRITE;
@@ -435,7 +474,7 @@ st_bufferobj_map_range(struct gl_context *ctx,
       flags |= PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE;
    }
    else if (access & GL_MAP_INVALIDATE_RANGE_BIT) {
-      if (offset == 0 && length == obj->Size)
+      if (wholeBuffer)
          flags |= PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE;
       else
          flags |= PIPE_TRANSFER_DISCARD_RANGE;
@@ -456,15 +495,35 @@ st_bufferobj_map_range(struct gl_context *ctx,
    if (access & MESA_MAP_NOWAIT_BIT)
       flags |= PIPE_TRANSFER_DONTBLOCK;
 
+   return flags;
+}
+
+
+/**
+ * Called via glMapBufferRange().
+ */
+static void *
+st_bufferobj_map_range(struct gl_context *ctx,
+                       GLintptr offset, GLsizeiptr length, GLbitfield access,
+                       struct gl_buffer_object *obj,
+                       gl_map_buffer_index index)
+{
+   struct pipe_context *pipe = st_context(ctx)->pipe;
+   struct st_buffer_object *st_obj = st_buffer_object(obj);
+
    assert(offset >= 0);
    assert(length >= 0);
    assert(offset < obj->Size);
    assert(offset + length <= obj->Size);
 
+   const enum pipe_transfer_usage transfer_flags =
+      st_access_flags_to_transfer_flags(access,
+                                        offset == 0 && length == obj->Size);
+
    obj->Mappings[index].Pointer = pipe_buffer_map_range(pipe,
                                                         st_obj->buffer,
                                                         offset, length,
-                                                        flags,
+                                                        transfer_flags,
                                                         &st_obj->transfer[index]);
    if (obj->Mappings[index].Pointer) {
       obj->Mappings[index].Offset = offset;