+}
+
+
+/**
+ * Return bitmask of PIPE_RESOURCE_x flags corresponding to GL_MAP_x flags.
+ */
+static unsigned
+storage_flags_to_buffer_flags(GLbitfield storageFlags)
+{
+ unsigned flags = 0;
+ if (storageFlags & GL_MAP_PERSISTENT_BIT)
+ flags |= PIPE_RESOURCE_FLAG_MAP_PERSISTENT;
+ if (storageFlags & GL_MAP_COHERENT_BIT)
+ flags |= PIPE_RESOURCE_FLAG_MAP_COHERENT;
+ if (storageFlags & GL_SPARSE_STORAGE_BIT_ARB)
+ flags |= PIPE_RESOURCE_FLAG_SPARSE;
+ return flags;
+}
+
+
+/**
+ * From a buffer object's target, immutability flag, storage flags and
+ * usage hint, return a pipe_resource_usage value (PIPE_USAGE_DYNAMIC,
+ * STREAM, etc).
+ */
+static enum pipe_resource_usage
+buffer_usage(GLenum target, GLboolean immutable,
+ GLbitfield storageFlags, GLenum usage)
+{
+ if (immutable) {
+ /* BufferStorage */
+ if (storageFlags & GL_CLIENT_STORAGE_BIT) {
+ if (storageFlags & GL_MAP_READ_BIT)
+ return PIPE_USAGE_STAGING;
+ else
+ return PIPE_USAGE_STREAM;
+ } else {
+ return PIPE_USAGE_DEFAULT;
+ }
+ }
+ else {
+ /* These are often read by the CPU, so enable CPU caches. */
+ if (target == GL_PIXEL_PACK_BUFFER ||
+ target == GL_PIXEL_UNPACK_BUFFER)
+ return PIPE_USAGE_STAGING;
+
+ /* BufferData */
+ switch (usage) {
+ case GL_DYNAMIC_DRAW:
+ case GL_DYNAMIC_COPY:
+ return PIPE_USAGE_DYNAMIC;
+ case GL_STREAM_DRAW:
+ case GL_STREAM_COPY:
+ return PIPE_USAGE_STREAM;
+ case GL_STATIC_READ:
+ case GL_DYNAMIC_READ:
+ case GL_STREAM_READ:
+ return PIPE_USAGE_STAGING;
+ case GL_STATIC_DRAW:
+ case GL_STATIC_COPY:
+ default:
+ return PIPE_USAGE_DEFAULT;
+ }
+ }
+}
+
+
+static ALWAYS_INLINE GLboolean
+bufferobj_data(struct gl_context *ctx,
+ GLenum target,
+ GLsizeiptrARB size,
+ const void *data,
+ struct gl_memory_object *memObj,
+ GLuint64 offset,
+ GLenum usage,
+ GLbitfield storageFlags,
+ struct gl_buffer_object *obj)
+{
+ struct st_context *st = st_context(ctx);
+ struct pipe_context *pipe = st->pipe;
+ struct pipe_screen *screen = pipe->screen;
+ struct st_buffer_object *st_obj = st_buffer_object(obj);
+ struct st_memory_object *st_mem_obj = st_memory_object(memObj);
+ bool is_mapped = _mesa_bufferobj_mapped(obj, MAP_USER);
+
+ if (size > UINT32_MAX || offset > UINT32_MAX) {
+ /* pipe_resource.width0 is 32 bits only and increasing it
+ * to 64 bits doesn't make much sense since hw support
+ * for > 4GB resources is limited.
+ */
+ st_obj->Base.Size = 0;
+ return GL_FALSE;
+ }
+
+ if (target != GL_EXTERNAL_VIRTUAL_MEMORY_BUFFER_AMD &&
+ size && st_obj->buffer &&
+ st_obj->Base.Size == size &&
+ st_obj->Base.Usage == usage &&
+ st_obj->Base.StorageFlags == storageFlags) {
+ if (data) {
+ /* Just discard the old contents and write new data.
+ * This should be the same as creating a new buffer, but we avoid
+ * a lot of validation in Mesa.
+ *
+ * If the buffer is mapped, we can't discard it.
+ *
+ * PIPE_TRANSFER_MAP_DIRECTLY supresses implicit buffer range
+ * invalidation.
+ */
+ pipe->buffer_subdata(pipe, st_obj->buffer,
+ is_mapped ? PIPE_TRANSFER_MAP_DIRECTLY :
+ PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE,
+ 0, size, data);
+ return GL_TRUE;
+ } else if (is_mapped) {
+ return GL_TRUE; /* can't reallocate, nothing to do */
+ } else if (screen->get_param(screen, PIPE_CAP_INVALIDATE_BUFFER)) {
+ pipe->invalidate_resource(pipe, st_obj->buffer);
+ return GL_TRUE;
+ }
+ }
+
+ st_obj->Base.Size = size;
+ st_obj->Base.Usage = usage;
+ st_obj->Base.StorageFlags = storageFlags;