gallium: add PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE and corresponding cap
[mesa.git] / src / gallium / auxiliary / util / u_inlines.h
index 582aacdca1e4a2851be4dc464d6fb4a2a2826605..e0ed594c9fedc5b88c4ef6f524e04509e3826c31 100644 (file)
@@ -1,6 +1,6 @@
 /**************************************************************************
  * 
- * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * Copyright 2007 VMware, Inc.
  * All Rights Reserved.
  * 
  * Permission is hereby granted, free of charge, to any person obtaining a
@@ -18,7 +18,7 @@
  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
- * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
@@ -51,13 +51,13 @@ extern "C" {
  */
 
 
-static INLINE void
+static inline void
 pipe_reference_init(struct pipe_reference *reference, unsigned count)
 {
    p_atomic_set(&reference->count, count);
 }
 
-static INLINE boolean
+static inline boolean
 pipe_is_referenced(struct pipe_reference *reference)
 {
    return p_atomic_read(&reference->count) != 0;
@@ -69,7 +69,7 @@ pipe_is_referenced(struct pipe_reference *reference)
  * Both 'ptr' and 'reference' may be NULL.
  * \return TRUE if the object's refcount hits zero and should be destroyed.
  */
-static INLINE boolean
+static inline boolean
 pipe_reference_described(struct pipe_reference *ptr, 
                          struct pipe_reference *reference, 
                          debug_reference_descriptor get_desc)
@@ -96,14 +96,14 @@ pipe_reference_described(struct pipe_reference *ptr,
    return destroy;
 }
 
-static INLINE boolean
+static inline boolean
 pipe_reference(struct pipe_reference *ptr, struct pipe_reference *reference)
 {
    return pipe_reference_described(ptr, reference, 
                                    (debug_reference_descriptor)debug_describe_reference);
 }
 
-static INLINE void
+static inline void
 pipe_surface_reference(struct pipe_surface **ptr, struct pipe_surface *surf)
 {
    struct pipe_surface *old_surf = *ptr;
@@ -120,7 +120,7 @@ pipe_surface_reference(struct pipe_surface **ptr, struct pipe_surface *surf)
  * of using a deleted context's surface_destroy() method when freeing a surface
  * that's shared by multiple contexts.
  */
-static INLINE void
+static inline void
 pipe_surface_release(struct pipe_context *pipe, struct pipe_surface **ptr)
 {
    if (pipe_reference_described(&(*ptr)->reference, NULL,
@@ -130,18 +130,26 @@ pipe_surface_release(struct pipe_context *pipe, struct pipe_surface **ptr)
 }
 
 
-static INLINE void
+static inline void
 pipe_resource_reference(struct pipe_resource **ptr, struct pipe_resource *tex)
 {
    struct pipe_resource *old_tex = *ptr;
 
    if (pipe_reference_described(&(*ptr)->reference, &tex->reference, 
-                                (debug_reference_descriptor)debug_describe_resource))
-      old_tex->screen->resource_destroy(old_tex->screen, old_tex);
+                                (debug_reference_descriptor)debug_describe_resource)) {
+      /* Avoid recursion, which would prevent inlining this function */
+      do {
+         struct pipe_resource *next = old_tex->next;
+
+         old_tex->screen->resource_destroy(old_tex->screen, old_tex);
+         old_tex = next;
+      } while (pipe_reference_described(&old_tex->reference, NULL,
+                                        (debug_reference_descriptor)debug_describe_resource));
+   }
    *ptr = tex;
 }
 
-static INLINE void
+static inline void
 pipe_sampler_view_reference(struct pipe_sampler_view **ptr, struct pipe_sampler_view *view)
 {
    struct pipe_sampler_view *old_view = *ptr;
@@ -158,7 +166,7 @@ pipe_sampler_view_reference(struct pipe_sampler_view **ptr, struct pipe_sampler_
  * work-around for fixing a dangling context pointer problem when textures
  * are shared by multiple contexts.  XXX fix this someday.
  */
-static INLINE void
+static inline void
 pipe_sampler_view_release(struct pipe_context *ctx,
                           struct pipe_sampler_view **ptr)
 {
@@ -173,8 +181,7 @@ pipe_sampler_view_release(struct pipe_context *ctx,
    *ptr = NULL;
 }
 
-
-static INLINE void
+static inline void
 pipe_so_target_reference(struct pipe_stream_output_target **ptr,
                          struct pipe_stream_output_target *target)
 {
@@ -186,7 +193,26 @@ pipe_so_target_reference(struct pipe_stream_output_target **ptr,
    *ptr = target;
 }
 
-static INLINE void
+static inline void
+pipe_vertex_buffer_unreference(struct pipe_vertex_buffer *dst)
+{
+   if (dst->is_user_buffer)
+      dst->buffer.user = NULL;
+   else
+      pipe_resource_reference(&dst->buffer.resource, NULL);
+}
+
+static inline void
+pipe_vertex_buffer_reference(struct pipe_vertex_buffer *dst,
+                             const struct pipe_vertex_buffer *src)
+{
+   pipe_vertex_buffer_unreference(dst);
+   if (!src->is_user_buffer)
+      pipe_resource_reference(&dst->buffer.resource, src->buffer.resource);
+   memcpy(dst, src, sizeof(*src));
+}
+
+static inline void
 pipe_surface_reset(struct pipe_context *ctx, struct pipe_surface* ps,
                    struct pipe_resource *pt, unsigned level, unsigned layer)
 {
@@ -199,7 +225,7 @@ pipe_surface_reset(struct pipe_context *ctx, struct pipe_surface* ps,
    ps->context = ctx;
 }
 
-static INLINE void
+static inline void
 pipe_surface_init(struct pipe_context *ctx, struct pipe_surface* ps,
                   struct pipe_resource *pt, unsigned level, unsigned layer)
 {
@@ -209,7 +235,7 @@ pipe_surface_init(struct pipe_context *ctx, struct pipe_surface* ps,
 }
 
 /* Return true if the surfaces are equal. */
-static INLINE boolean
+static inline boolean
 pipe_surface_equal(struct pipe_surface *s1, struct pipe_surface *s2)
 {
    return s1->texture == s2->texture &&
@@ -227,10 +253,16 @@ pipe_surface_equal(struct pipe_surface *s1, struct pipe_surface *s2)
  * Convenience wrappers for screen buffer functions.
  */
 
-static INLINE struct pipe_resource *
+
+/**
+ * Create a new resource.
+ * \param bind  bitmask of PIPE_BIND_x flags
+ * \param usage  a PIPE_USAGE_x value
+ */
+static inline struct pipe_resource *
 pipe_buffer_create( struct pipe_screen *screen,
                    unsigned bind,
-                   unsigned usage,
+                   enum pipe_resource_usage usage,
                    unsigned size )
 {
    struct pipe_resource buffer;
@@ -247,12 +279,20 @@ pipe_buffer_create( struct pipe_screen *screen,
    return screen->resource_create(screen, &buffer);
 }
 
-static INLINE void *
+
+/**
+ * Map a range of a resource.
+ * \param offset  start of region, in bytes 
+ * \param length  size of region, in bytes 
+ * \param access  bitmask of PIPE_TRANSFER_x flags
+ * \param transfer  returns a transfer object
+ */
+static inline void *
 pipe_buffer_map_range(struct pipe_context *pipe,
                      struct pipe_resource *buffer,
                      unsigned offset,
                      unsigned length,
-                     unsigned usage,
+                     unsigned access,
                      struct pipe_transfer **transfer)
 {
    struct pipe_box box;
@@ -264,8 +304,8 @@ pipe_buffer_map_range(struct pipe_context *pipe,
 
    u_box_1d(offset, length, &box);
 
-   map = pipe->transfer_map(pipe, buffer, 0, usage, &box, transfer);
-   if (map == NULL) {
+   map = pipe->transfer_map(pipe, buffer, 0, access, &box, transfer);
+   if (!map) {
       return NULL;
    }
 
@@ -273,24 +313,29 @@ pipe_buffer_map_range(struct pipe_context *pipe,
 }
 
 
-static INLINE void *
+/**
+ * Map whole resource.
+ * \param access  bitmask of PIPE_TRANSFER_x flags
+ * \param transfer  returns a transfer object
+ */
+static inline void *
 pipe_buffer_map(struct pipe_context *pipe,
                 struct pipe_resource *buffer,
-                unsigned usage,
+                unsigned access,
                 struct pipe_transfer **transfer)
 {
-   return pipe_buffer_map_range(pipe, buffer, 0, buffer->width0, usage, transfer);
+   return pipe_buffer_map_range(pipe, buffer, 0, buffer->width0, access, transfer);
 }
 
 
-static INLINE void
+static inline void
 pipe_buffer_unmap(struct pipe_context *pipe,
                   struct pipe_transfer *transfer)
 {
    pipe->transfer_unmap(pipe, transfer);
 }
 
-static INLINE void
+static inline void
 pipe_buffer_flush_mapped_range(struct pipe_context *pipe,
                                struct pipe_transfer *transfer,
                                unsigned offset,
@@ -314,32 +359,15 @@ pipe_buffer_flush_mapped_range(struct pipe_context *pipe,
    pipe->transfer_flush_region(pipe, transfer, &box);
 }
 
-static INLINE void
+static inline void
 pipe_buffer_write(struct pipe_context *pipe,
                   struct pipe_resource *buf,
                   unsigned offset,
                   unsigned size,
                   const void *data)
 {
-   struct pipe_box box;
-   unsigned usage = PIPE_TRANSFER_WRITE;
-
-   if (offset == 0 && size == buf->width0) {
-      usage |= PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE;
-   } else {
-      usage |= PIPE_TRANSFER_DISCARD_RANGE;
-   }
-
-   u_box_1d(offset, size, &box);
-
-   pipe->transfer_inline_write( pipe,
-                                buf,
-                                0,
-                                usage,
-                                &box,
-                                data,
-                                size,
-                                0);
+   /* Don't set any other usage bits. Drivers should derive them. */
+   pipe->buffer_subdata(pipe, buf, PIPE_TRANSFER_WRITE, offset, size, data);
 }
 
 /**
@@ -348,32 +376,30 @@ pipe_buffer_write(struct pipe_context *pipe,
  * We can avoid GPU/CPU synchronization when writing range that has never
  * been written before.
  */
-static INLINE void
+static inline void
 pipe_buffer_write_nooverlap(struct pipe_context *pipe,
                             struct pipe_resource *buf,
                             unsigned offset, unsigned size,
                             const void *data)
 {
-   struct pipe_box box;
-
-   u_box_1d(offset, size, &box);
-
-   pipe->transfer_inline_write(pipe,
-                               buf,
-                               0,
-                               (PIPE_TRANSFER_WRITE |
-                                PIPE_TRANSFER_UNSYNCHRONIZED),
-                               &box,
-                               data,
-                               0, 0);
+   pipe->buffer_subdata(pipe, buf,
+                        (PIPE_TRANSFER_WRITE |
+                         PIPE_TRANSFER_UNSYNCHRONIZED),
+                        offset, size, data);
 }
 
-static INLINE struct pipe_resource *
+
+/**
+ * Create a new resource and immediately put data into it
+ * \param bind  bitmask of PIPE_BIND_x flags
+ * \param usage  bitmask of PIPE_USAGE_x flags
+ */
+static inline struct pipe_resource *
 pipe_buffer_create_with_data(struct pipe_context *pipe,
                              unsigned bind,
-                             unsigned usage,
+                             enum pipe_resource_usage usage,
                              unsigned size,
-                             void *ptr)
+                             const void *ptr)
 {
    struct pipe_resource *res = pipe_buffer_create(pipe->screen,
                                                   bind, usage, size);
@@ -381,7 +407,7 @@ pipe_buffer_create_with_data(struct pipe_context *pipe,
    return res;
 }
 
-static INLINE void
+static inline void
 pipe_buffer_read(struct pipe_context *pipe,
                  struct pipe_resource *buf,
                  unsigned offset,
@@ -403,11 +429,16 @@ pipe_buffer_read(struct pipe_context *pipe,
    pipe_buffer_unmap(pipe, src_transfer);
 }
 
-static INLINE void *
+
+/**
+ * Map a resource for reading/writing.
+ * \param access  bitmask of PIPE_TRANSFER_x flags
+ */
+static inline void *
 pipe_transfer_map(struct pipe_context *context,
                   struct pipe_resource *resource,
                   unsigned level, unsigned layer,
-                  enum pipe_transfer_usage usage,
+                  unsigned access,
                   unsigned x, unsigned y,
                   unsigned w, unsigned h,
                   struct pipe_transfer **transfer)
@@ -417,19 +448,43 @@ pipe_transfer_map(struct pipe_context *context,
    return context->transfer_map(context,
                                 resource,
                                 level,
-                                usage,
+                                access,
+                                &box, transfer);
+}
+
+
+/**
+ * Map a 3D (texture) resource for reading/writing.
+ * \param access  bitmask of PIPE_TRANSFER_x flags
+ */
+static inline void *
+pipe_transfer_map_3d(struct pipe_context *context,
+                     struct pipe_resource *resource,
+                     unsigned level,
+                     unsigned access,
+                     unsigned x, unsigned y, unsigned z,
+                     unsigned w, unsigned h, unsigned d,
+                     struct pipe_transfer **transfer)
+{
+   struct pipe_box box;
+   u_box_3d(x, y, z, w, h, d, &box);
+   return context->transfer_map(context,
+                                resource,
+                                level,
+                                access,
                                 &box, transfer);
 }
 
-static INLINE void
+static inline void
 pipe_transfer_unmap( struct pipe_context *context,
                      struct pipe_transfer *transfer )
 {
    context->transfer_unmap( context, transfer );
 }
 
-static INLINE void
-pipe_set_constant_buffer(struct pipe_context *pipe, uint shader, uint index,
+static inline void
+pipe_set_constant_buffer(struct pipe_context *pipe,
+                         enum pipe_shader_type shader, uint index,
                          struct pipe_resource *buf)
 {
    if (buf) {
@@ -445,9 +500,13 @@ pipe_set_constant_buffer(struct pipe_context *pipe, uint shader, uint index,
 }
 
 
-static INLINE boolean util_get_offset( 
-   const struct pipe_rasterizer_state *templ,
-   unsigned fill_mode)
+/**
+ * Get the polygon offset enable/disable flag for the given polygon fill mode.
+ * \param fill_mode  one of PIPE_POLYGON_MODE_POINT/LINE/FILL
+ */
+static inline boolean
+util_get_offset(const struct pipe_rasterizer_state *templ,
+                unsigned fill_mode)
 {
    switch(fill_mode) {
    case PIPE_POLYGON_MODE_POINT:
@@ -462,23 +521,23 @@ static INLINE boolean util_get_offset(
    }
 }
 
-static INLINE float
+static inline float
 util_get_min_point_size(const struct pipe_rasterizer_state *state)
 {
    /* The point size should be clamped to this value at the rasterizer stage.
     */
-   return state->gl_rasterization_rules &&
-          !state->point_quad_rasterization &&
+   return !state->point_quad_rasterization &&
           !state->point_smooth &&
           !state->multisample ? 1.0f : 0.0f;
 }
 
-static INLINE void
+static inline void
 util_query_clear_result(union pipe_query_result *result, unsigned type)
 {
    switch (type) {
    case PIPE_QUERY_OCCLUSION_PREDICATE:
    case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
+   case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE:
    case PIPE_QUERY_GPU_FINISHED:
       result->b = FALSE;
       break;
@@ -499,16 +558,19 @@ util_query_clear_result(union pipe_query_result *result, unsigned type)
       memset(&result->pipeline_statistics, 0, sizeof(result->pipeline_statistics));
       break;
    default:
-      assert(0);
+      memset(result, 0, sizeof(*result));
    }
 }
 
 /** Convert PIPE_TEXTURE_x to TGSI_TEXTURE_x */
-static INLINE unsigned
+static inline enum tgsi_texture_type
 util_pipe_tex_to_tgsi_tex(enum pipe_texture_target pipe_tex_target,
                           unsigned nr_samples)
 {
    switch (pipe_tex_target) {
+   case PIPE_BUFFER:
+      return TGSI_TEXTURE_BUFFER;
+
    case PIPE_TEXTURE_1D:
       assert(nr_samples <= 1);
       return TGSI_TEXTURE_1D;
@@ -546,7 +608,7 @@ util_pipe_tex_to_tgsi_tex(enum pipe_texture_target pipe_tex_target,
 }
 
 
-static INLINE void
+static inline void
 util_copy_constant_buffer(struct pipe_constant_buffer *dst,
                           const struct pipe_constant_buffer *src)
 {
@@ -564,6 +626,52 @@ util_copy_constant_buffer(struct pipe_constant_buffer *dst,
    }
 }
 
+static inline void
+util_copy_image_view(struct pipe_image_view *dst,
+                     const struct pipe_image_view *src)
+{
+   if (src) {
+      pipe_resource_reference(&dst->resource, src->resource);
+      dst->format = src->format;
+      dst->access = src->access;
+      dst->u = src->u;
+   } else {
+      pipe_resource_reference(&dst->resource, NULL);
+      dst->format = PIPE_FORMAT_NONE;
+      dst->access = 0;
+      memset(&dst->u, 0, sizeof(dst->u));
+   }
+}
+
+static inline unsigned
+util_max_layer(const struct pipe_resource *r, unsigned level)
+{
+   switch (r->target) {
+   case PIPE_TEXTURE_3D:
+      return u_minify(r->depth0, level) - 1;
+   case PIPE_TEXTURE_CUBE:
+      assert(r->array_size == 6);
+      /* fall-through */
+   case PIPE_TEXTURE_1D_ARRAY:
+   case PIPE_TEXTURE_2D_ARRAY:
+   case PIPE_TEXTURE_CUBE_ARRAY:
+      return r->array_size - 1;
+   default:
+      return 0;
+   }
+}
+
+static inline bool
+util_texrange_covers_whole_level(const struct pipe_resource *tex,
+                                 unsigned level, unsigned x, unsigned y,
+                                 unsigned z, unsigned width,
+                                 unsigned height, unsigned depth)
+{
+   return x == 0 && y == 0 && z == 0 &&
+          width == u_minify(tex->width0, level) &&
+          height == u_minify(tex->height0, level) &&
+          depth == util_max_layer(tex, level) + 1;
+}
 
 #ifdef __cplusplus
 }