intel: Support mapping multisample miptrees
[mesa.git] / src / mesa / drivers / dri / intel / intel_mipmap_tree.c
index 7c95b7924ca275db513564989bb5d5ee24846f40..e002c743b92b4ceca2c99a4ab0068ae9d81b9e46 100644 (file)
@@ -25,6 +25,9 @@
  * 
  **************************************************************************/
 
+#include <GL/gl.h>
+#include <GL/internal/dri_interface.h>
+
 #include "intel_batchbuffer.h"
 #include "intel_context.h"
 #include "intel_mipmap_tree.h"
 #include "intel_tex.h"
 #include "intel_blit.h"
 
+#ifndef I915
+#include "brw_blorp.h"
+#endif
+
 #include "main/enums.h"
 #include "main/formats.h"
-#include "main/image.h"
+#include "main/glformats.h"
+#include "main/texcompress_etc.h"
 #include "main/teximage.h"
 
 #define FILE_DEBUG_FLAG DEBUG_MIPTREE
@@ -127,7 +135,7 @@ intel_miptree_create_internal(struct intel_context *intel,
         intel->vtbl.is_hiz_depth_format(intel, format)))) {
       /* MSAA stencil surfaces always use IMS layout. */
       enum intel_msaa_layout msaa_layout =
-         num_samples > 0 ? INTEL_MSAA_LAYOUT_IMS : INTEL_MSAA_LAYOUT_NONE;
+         num_samples > 1 ? INTEL_MSAA_LAYOUT_IMS : INTEL_MSAA_LAYOUT_NONE;
       mt->stencil_mt = intel_miptree_create(intel,
                                             mt->target,
                                             MESA_FORMAT_S8,
@@ -190,7 +198,15 @@ intel_miptree_create(struct intel_context *intel,
 {
    struct intel_mipmap_tree *mt;
    uint32_t tiling = I915_TILING_NONE;
-   GLenum base_format = _mesa_get_format_base_format(format);
+   GLenum base_format;
+   bool wraps_etc1 = false;
+
+   if (format == MESA_FORMAT_ETC1_RGB8) {
+      format = MESA_FORMAT_RGBX8888_REV;
+      wraps_etc1 = true;
+   }
+
+   base_format = _mesa_get_format_base_format(format);
 
    if (intel->use_texture_tiling && !_mesa_is_format_compressed(format)) {
       if (intel->gen >= 4 &&
@@ -237,6 +253,7 @@ intel_miptree_create(struct intel_context *intel,
       return NULL;
    }
 
+   mt->wraps_etc1 = wraps_etc1;
    mt->region = intel_region_alloc(intel->intelScreen,
                                   tiling,
                                   mt->cpp,
@@ -293,8 +310,81 @@ compute_msaa_layout(struct intel_context *intel, gl_format format)
    case GL_DEPTH_STENCIL:
       return INTEL_MSAA_LAYOUT_IMS;
    default:
-      return INTEL_MSAA_LAYOUT_UMS;
+      /* From the Ivy Bridge PRM, Vol4 Part1 p77 ("MCS Enable"):
+       *
+       *   This field must be set to 0 for all SINT MSRTs when all RT channels
+       *   are not written
+       *
+       * In practice this means that we have to disable MCS for all signed
+       * integer MSAA buffers.  The alternative, to disable MCS only when one
+       * of the render target channels is disabled, is impractical because it
+       * would require converting between CMS and UMS MSAA layouts on the fly,
+       * which is expensive.
+       */
+      if (_mesa_get_format_datatype(format) == GL_INT) {
+         /* TODO: is this workaround needed for future chipsets? */
+         assert(intel->gen == 7);
+         return INTEL_MSAA_LAYOUT_UMS;
+      } else {
+         return INTEL_MSAA_LAYOUT_CMS;
+      }
+   }
+}
+
+/**
+ * For a singlesample DRI2 buffer, this simply wraps the given region with a miptree.
+ *
+ * For a multisample DRI2 buffer, this wraps the given region with
+ * a singlesample miptree, then creates a multisample miptree into which the
+ * singlesample miptree is embedded as a child.
+ */
+struct intel_mipmap_tree*
+intel_miptree_create_for_dri2_buffer(struct intel_context *intel,
+                                     unsigned dri_attachment,
+                                     gl_format format,
+                                     uint32_t num_samples,
+                                     struct intel_region *region)
+{
+   struct intel_mipmap_tree *singlesample_mt = NULL;
+   struct intel_mipmap_tree *multisample_mt = NULL;
+   GLenum base_format = _mesa_get_format_base_format(format);
+
+   /* Only the front and back buffers, which are color buffers, are shared
+    * through DRI2.
+    */
+   assert(dri_attachment == __DRI_BUFFER_BACK_LEFT ||
+          dri_attachment == __DRI_BUFFER_FRONT_LEFT ||
+          dri_attachment == __DRI_BUFFER_FAKE_FRONT_LEFT);
+   assert(base_format == GL_RGB || base_format == GL_RGBA);
+
+   singlesample_mt = intel_miptree_create_for_region(intel, GL_TEXTURE_2D,
+                                                     format, region);
+   if (!singlesample_mt)
+      return NULL;
+
+   if (num_samples == 0)
+      return singlesample_mt;
+
+   multisample_mt = intel_miptree_create_for_renderbuffer(intel,
+                                                          format,
+                                                          region->width,
+                                                          region->height,
+                                                          num_samples);
+   if (!multisample_mt) {
+      intel_miptree_release(&singlesample_mt);
+      return NULL;
+   }
+
+   multisample_mt->singlesample_mt = singlesample_mt;
+   multisample_mt->need_downsample = false;
+
+   if (intel->is_front_buffer_rendering &&
+       (dri_attachment == __DRI_BUFFER_FRONT_LEFT ||
+        dri_attachment == __DRI_BUFFER_FAKE_FRONT_LEFT)) {
+      intel_miptree_upsample(intel, multisample_mt);
    }
+
+   return multisample_mt;
 }
 
 struct intel_mipmap_tree*
@@ -307,8 +397,11 @@ intel_miptree_create_for_renderbuffer(struct intel_context *intel,
    struct intel_mipmap_tree *mt;
    uint32_t depth = 1;
    enum intel_msaa_layout msaa_layout = INTEL_MSAA_LAYOUT_NONE;
+   const uint32_t singlesample_width = width;
+   const uint32_t singlesample_height = height;
+   bool ok;
 
-   if (num_samples > 0) {
+   if (num_samples > 1) {
       /* Adjust width/height/depth for MSAA */
       msaa_layout = compute_msaa_layout(intel, format);
       if (msaa_layout == INTEL_MSAA_LAYOUT_IMS) {
@@ -356,7 +449,7 @@ intel_miptree_create_for_renderbuffer(struct intel_context *intel,
             height = ALIGN(height, 2) * 2;
             break;
          default:
-            /* num_samples should already have been quantized to 0, 4, or
+            /* num_samples should already have been quantized to 0, 1, 4, or
              * 8.
              */
             assert(false);
@@ -370,8 +463,29 @@ intel_miptree_create_for_renderbuffer(struct intel_context *intel,
    mt = intel_miptree_create(intel, GL_TEXTURE_2D, format, 0, 0,
                             width, height, depth, true, num_samples,
                              msaa_layout);
+   if (!mt)
+      goto fail;
+
+   if (intel->vtbl.is_hiz_depth_format(intel, format)) {
+      ok = intel_miptree_alloc_hiz(intel, mt, num_samples);
+      if (!ok)
+         goto fail;
+   }
+
+   if (mt->msaa_layout == INTEL_MSAA_LAYOUT_CMS) {
+      ok = intel_miptree_alloc_mcs(intel, mt, num_samples);
+      if (!ok)
+         goto fail;
+   }
+
+   mt->singlesample_width0 = singlesample_width;
+   mt->singlesample_height0 = singlesample_height;
 
    return mt;
+
+fail:
+   intel_miptree_release(&mt);
+   return NULL;
 }
 
 void
@@ -407,6 +521,8 @@ intel_miptree_release(struct intel_mipmap_tree **mt)
       intel_region_release(&((*mt)->region));
       intel_miptree_release(&(*mt)->stencil_mt);
       intel_miptree_release(&(*mt)->hiz_mt);
+      intel_miptree_release(&(*mt)->mcs_mt);
+      intel_miptree_release(&(*mt)->singlesample_mt);
       intel_resolve_map_clear(&(*mt)->hiz_map);
 
       for (i = 0; i < MAX_TEXTURE_LEVELS; i++) {
@@ -637,6 +753,75 @@ intel_miptree_copy_teximage(struct intel_context *intel,
    intel_miptree_reference(&intelImage->mt, dst_mt);
 }
 
+bool
+intel_miptree_alloc_mcs(struct intel_context *intel,
+                        struct intel_mipmap_tree *mt,
+                        GLuint num_samples)
+{
+   assert(mt->mcs_mt == NULL);
+   assert(intel->gen >= 7); /* MCS only used on Gen7+ */
+
+   /* Choose the correct format for the MCS buffer.  All that really matters
+    * is that we allocate the right buffer size, since we'll always be
+    * accessing this miptree using MCS-specific hardware mechanisms, which
+    * infer the correct format based on num_samples.
+    */
+   gl_format format;
+   switch (num_samples) {
+   case 4:
+      /* 8 bits/pixel are required for MCS data when using 4x MSAA (2 bits for
+       * each sample).
+       */
+      format = MESA_FORMAT_R8;
+      break;
+   case 8:
+      /* 32 bits/pixel are required for MCS data when using 8x MSAA (3 bits
+       * for each sample, plus 8 padding bits).
+       */
+      format = MESA_FORMAT_R_UINT32;
+      break;
+   default:
+      assert(!"Unrecognized sample count in intel_miptree_alloc_mcs");
+      break;
+   };
+
+   /* From the Ivy Bridge PRM, Vol4 Part1 p76, "MCS Base Address":
+    *
+    *     "The MCS surface must be stored as Tile Y."
+    *
+    * We set msaa_format to INTEL_MSAA_LAYOUT_CMS to force
+    * intel_miptree_create() to use Y tiling.  msaa_format is otherwise
+    * ignored for the MCS miptree.
+    */
+   mt->mcs_mt = intel_miptree_create(intel,
+                                     mt->target,
+                                     format,
+                                     mt->first_level,
+                                     mt->last_level,
+                                     mt->width0,
+                                     mt->height0,
+                                     mt->depth0,
+                                     true,
+                                     0 /* num_samples */,
+                                     INTEL_MSAA_LAYOUT_CMS);
+
+   /* From the Ivy Bridge PRM, Vol 2 Part 1 p326:
+    *
+    *     When MCS buffer is enabled and bound to MSRT, it is required that it
+    *     is cleared prior to any rendering.
+    *
+    * Since we don't use the MCS buffer for any purpose other than rendering,
+    * it makes sense to just clear it immediately upon allocation.
+    *
+    * Note: the clear value for MCS buffers is all 1's, so we memset to 0xff.
+    */
+   void *data = intel_region_map(intel, mt->mcs_mt->region, 0);
+   memset(data, 0xff, mt->mcs_mt->region->bo->size);
+   intel_region_unmap(intel, mt->mcs_mt->region);
+
+   return mt->mcs_mt;
+}
+
 bool
 intel_miptree_alloc_hiz(struct intel_context *intel,
                        struct intel_mipmap_tree *mt,
@@ -783,6 +968,96 @@ intel_miptree_all_slices_resolve_depth(struct intel_context *intel,
                                           GEN6_HIZ_OP_DEPTH_RESOLVE);
 }
 
+static void
+intel_miptree_updownsample(struct intel_context *intel,
+                           struct intel_mipmap_tree *src,
+                           struct intel_mipmap_tree *dst,
+                           unsigned width,
+                           unsigned height)
+{
+#ifndef I915
+   int src_x0 = 0;
+   int src_y0 = 0;
+   int dst_x0 = 0;
+   int dst_y0 = 0;
+
+   intel_miptree_slice_resolve_depth(intel, src, 0, 0);
+   intel_miptree_slice_resolve_depth(intel, dst, 0, 0);
+
+   brw_blorp_blit_miptrees(intel,
+                           src, dst,
+                           src_x0, src_y0,
+                           dst_x0, dst_y0,
+                           width, height,
+                           false, false /*mirror x, y*/);
+
+   if (src->stencil_mt) {
+      brw_blorp_blit_miptrees(intel,
+                              src->stencil_mt, dst->stencil_mt,
+                              src_x0, src_y0,
+                              dst_x0, dst_y0,
+                              width, height,
+                              false, false /*mirror x, y*/);
+   }
+#endif /* I915 */
+}
+
+static void
+assert_is_flat(struct intel_mipmap_tree *mt)
+{
+   assert(mt->target == GL_TEXTURE_2D);
+   assert(mt->first_level == 0);
+   assert(mt->last_level == 0);
+}
+
+/**
+ * \brief Downsample from mt to mt->singlesample_mt.
+ *
+ * If the miptree needs no downsample, then skip.
+ */
+void
+intel_miptree_downsample(struct intel_context *intel,
+                         struct intel_mipmap_tree *mt)
+{
+   /* Only flat, renderbuffer-like miptrees are supported. */
+   assert_is_flat(mt);
+
+   if (!mt->need_downsample)
+      return;
+   intel_miptree_updownsample(intel,
+                              mt, mt->singlesample_mt,
+                              mt->singlesample_mt->width0,
+                              mt->singlesample_mt->height0);
+   mt->need_downsample = false;
+
+   /* Strictly speaking, after a downsample on a depth miptree, a hiz
+    * resolve is needed on the singlesample miptree. However, since the
+    * singlesample miptree is never rendered to, the hiz resolve will never
+    * occur. Therefore we do not mark the needed hiz resolve after
+    * downsampling.
+    */
+}
+
+/**
+ * \brief Upsample from mt->singlesample_mt to mt.
+ *
+ * The upsample is done unconditionally.
+ */
+void
+intel_miptree_upsample(struct intel_context *intel,
+                       struct intel_mipmap_tree *mt)
+{
+   /* Only flat, renderbuffer-like miptrees are supported. */
+   assert_is_flat(mt);
+   assert(!mt->need_downsample);
+
+   intel_miptree_updownsample(intel,
+                              mt->singlesample_mt, mt,
+                              mt->singlesample_mt->width0,
+                              mt->singlesample_mt->height0);
+   intel_miptree_slice_set_needs_hiz_resolve(mt, 0, 0);
+}
+
 static void
 intel_miptree_map_gtt(struct intel_context *intel,
                      struct intel_mipmap_tree *mt,
@@ -985,6 +1260,60 @@ intel_miptree_unmap_s8(struct intel_context *intel,
    free(map->buffer);
 }
 
+static void
+intel_miptree_map_etc1(struct intel_context *intel,
+                       struct intel_mipmap_tree *mt,
+                       struct intel_miptree_map *map,
+                       unsigned int level,
+                       unsigned int slice)
+{
+   /* For justification of these invariants,
+    * see intel_mipmap_tree:wraps_etc1.
+    */
+   assert(mt->wraps_etc1);
+   assert(mt->format == MESA_FORMAT_RGBX8888_REV);
+
+   /* From the GL_OES_compressed_ETC1_RGB8_texture spec:
+    *   INVALID_OPERATION is generated by CompressedTexSubImage2D,
+    *   TexSubImage2D, or CopyTexSubImage2D if the texture image <level>
+    *   bound to <target> has internal format ETC1_RGB8_OES.
+    *
+    * This implies that intel_miptree_map_etc1() can only be called from
+    * glCompressedTexImage2D, and hence the assertions below hold.
+    */
+   assert(map->mode & GL_MAP_WRITE_BIT);
+   assert(map->mode & GL_MAP_INVALIDATE_RANGE_BIT);
+   assert(map->x == 0);
+   assert(map->y == 0);
+
+   /* Each ETC1 block contains 4x4 pixels in 8 bytes. */
+   map->stride = 2 * map->w;
+   map->buffer = map->ptr = malloc(map->stride * map->h);
+}
+
+static void
+intel_miptree_unmap_etc1(struct intel_context *intel,
+                         struct intel_mipmap_tree *mt,
+                         struct intel_miptree_map *map,
+                         unsigned int level,
+                         unsigned int slice)
+{
+   uint32_t image_x;
+   uint32_t image_y;
+   intel_miptree_get_image_offset(mt, level, 0, slice, &image_x, &image_y);
+
+   uint8_t *xbgr = intel_region_map(intel, mt->region, map->mode)
+                 + image_y * mt->region->pitch * mt->region->cpp
+                 + image_x * mt->region->cpp;
+
+   _mesa_etc1_unpack_rgba8888(xbgr, mt->region->pitch * mt->region->cpp,
+                              map->ptr, map->stride,
+                              map->w, map->h);
+
+   intel_region_unmap(intel, mt->region);
+   free(map->buffer);
+}
+
 /**
  * Mapping function for packed depth/stencil miptrees backed by real separate
  * miptrees for depth and stencil.
@@ -1124,36 +1453,76 @@ intel_miptree_unmap_depthstencil(struct intel_context *intel,
    free(map->buffer);
 }
 
-void
-intel_miptree_map(struct intel_context *intel,
-                 struct intel_mipmap_tree *mt,
-                 unsigned int level,
-                 unsigned int slice,
-                 unsigned int x,
-                 unsigned int y,
-                 unsigned int w,
-                 unsigned int h,
-                 GLbitfield mode,
-                 void **out_ptr,
-                 int *out_stride)
+/**
+ * Create and attach a map to the miptree at (level, slice). Return the
+ * attached map.
+ */
+static struct intel_miptree_map*
+intel_miptree_attach_map(struct intel_mipmap_tree *mt,
+                         unsigned int level,
+                         unsigned int slice,
+                         unsigned int x,
+                         unsigned int y,
+                         unsigned int w,
+                         unsigned int h,
+                         GLbitfield mode)
 {
-   struct intel_miptree_map *map;
+   struct intel_miptree_map *map = calloc(1, sizeof(*map));
 
-   map = calloc(1, sizeof(struct intel_miptree_map));
-   if (!map){
-      *out_ptr = NULL;
-      *out_stride = 0;
-      return;
-   }
+   if (!map)
+      return NULL;
 
-   assert(!mt->level[level].slice[slice].map);
+   assert(mt->level[level].slice[slice].map == NULL);
    mt->level[level].slice[slice].map = map;
+
    map->mode = mode;
    map->x = x;
    map->y = y;
    map->w = w;
    map->h = h;
 
+   return map;
+}
+
+/**
+ * Release the map at (level, slice).
+ */
+static void
+intel_miptree_release_map(struct intel_mipmap_tree *mt,
+                         unsigned int level,
+                         unsigned int slice)
+{
+   struct intel_miptree_map **map;
+
+   map = &mt->level[level].slice[slice].map;
+   free(*map);
+   *map = NULL;
+}
+
+static void
+intel_miptree_map_singlesample(struct intel_context *intel,
+                               struct intel_mipmap_tree *mt,
+                               unsigned int level,
+                               unsigned int slice,
+                               unsigned int x,
+                               unsigned int y,
+                               unsigned int w,
+                               unsigned int h,
+                               GLbitfield mode,
+                               void **out_ptr,
+                               int *out_stride)
+{
+   struct intel_miptree_map *map;
+
+   assert(mt->num_samples <= 1);
+
+   map = intel_miptree_attach_map(mt, level, slice, x, y, w, h, mode);
+   if (!map){
+      *out_ptr = NULL;
+      *out_stride = 0;
+      return;
+   }
+
    intel_miptree_slice_resolve_depth(intel, mt, level, slice);
    if (map->mode & GL_MAP_WRITE_BIT) {
       intel_miptree_slice_set_needs_hiz_resolve(mt, level, slice);
@@ -1161,6 +1530,8 @@ intel_miptree_map(struct intel_context *intel,
 
    if (mt->format == MESA_FORMAT_S8) {
       intel_miptree_map_s8(intel, mt, map, level, slice);
+   } else if (mt->wraps_etc1) {
+      intel_miptree_map_etc1(intel, mt, map, level, slice);
    } else if (mt->stencil_mt) {
       intel_miptree_map_depthstencil(intel, mt, map, level, slice);
    } else if (intel->has_llc &&
@@ -1175,20 +1546,20 @@ intel_miptree_map(struct intel_context *intel,
    *out_ptr = map->ptr;
    *out_stride = map->stride;
 
-   if (map->ptr == NULL) {
-      mt->level[level].slice[slice].map = NULL;
-      free(map);
-   }
+   if (map->ptr == NULL)
+      intel_miptree_release_map(mt, level, slice);
 }
 
-void
-intel_miptree_unmap(struct intel_context *intel,
-                   struct intel_mipmap_tree *mt,
-                   unsigned int level,
-                   unsigned int slice)
+static void
+intel_miptree_unmap_singlesample(struct intel_context *intel,
+                                 struct intel_mipmap_tree *mt,
+                                 unsigned int level,
+                                 unsigned int slice)
 {
    struct intel_miptree_map *map = mt->level[level].slice[slice].map;
 
+   assert(mt->num_samples <= 1);
+
    if (!map)
       return;
 
@@ -1197,6 +1568,8 @@ intel_miptree_unmap(struct intel_context *intel,
 
    if (mt->format == MESA_FORMAT_S8) {
       intel_miptree_unmap_s8(intel, mt, map, level, slice);
+   } else if (mt->wraps_etc1) {
+      intel_miptree_unmap_etc1(intel, mt, map, level, slice);
    } else if (mt->stencil_mt) {
       intel_miptree_unmap_depthstencil(intel, mt, map, level, slice);
    } else if (map->bo) {
@@ -1205,6 +1578,131 @@ intel_miptree_unmap(struct intel_context *intel,
       intel_miptree_unmap_gtt(intel, mt, map, level, slice);
    }
 
-   mt->level[level].slice[slice].map = NULL;
-   free(map);
+   intel_miptree_release_map(mt, level, slice);
+}
+
+static void
+intel_miptree_map_multisample(struct intel_context *intel,
+                              struct intel_mipmap_tree *mt,
+                              unsigned int level,
+                              unsigned int slice,
+                              unsigned int x,
+                              unsigned int y,
+                              unsigned int w,
+                              unsigned int h,
+                              GLbitfield mode,
+                              void **out_ptr,
+                              int *out_stride)
+{
+   struct intel_miptree_map *map;
+
+   assert(mt->num_samples > 1);
+
+   /* Only flat, renderbuffer-like miptrees are supported. */
+   if (mt->target != GL_TEXTURE_2D ||
+       mt->first_level != 0 ||
+       mt->last_level != 0) {
+      _mesa_problem(&intel->ctx, "attempt to map a multisample miptree for "
+                    "which (target, first_level, last_level != "
+                    "(GL_TEXTURE_2D, 0, 0)");
+      goto fail;
+   }
+
+   map = intel_miptree_attach_map(mt, level, slice, x, y, w, h, mode);
+   if (!map)
+      goto fail;
+
+   if (!mt->singlesample_mt) {
+      mt->singlesample_mt =
+         intel_miptree_create_for_renderbuffer(intel,
+                                               mt->format,
+                                               mt->singlesample_width0,
+                                               mt->singlesample_height0,
+                                               0 /*num_samples*/);
+      if (!mt->singlesample_mt)
+         goto fail;
+
+      map->singlesample_mt_is_tmp = true;
+      mt->need_downsample = true;
+   }
+
+   if (mode & GL_MAP_INVALIDATE_RANGE_BIT)
+      mt->need_downsample = false;
+
+   intel_miptree_downsample(intel, mt);
+   intel_miptree_map_singlesample(intel, mt->singlesample_mt,
+                                  level, slice,
+                                  x, y, w, h,
+                                  mode,
+                                  out_ptr, out_stride);
+   return;
+
+fail:
+   intel_miptree_release_map(mt, level, slice);
+   *out_ptr = NULL;
+   *out_stride = 0;
+}
+
+static void
+intel_miptree_unmap_multisample(struct intel_context *intel,
+                                struct intel_mipmap_tree *mt,
+                                unsigned int level,
+                                unsigned int slice)
+{
+   struct intel_miptree_map *map = mt->level[level].slice[slice].map;
+
+   assert(mt->num_samples > 1);
+
+   if (!map)
+      return;
+
+   intel_miptree_unmap_singlesample(intel, mt->singlesample_mt, level, slice);
+
+   mt->need_downsample = false;
+   if (map->mode & GL_MAP_WRITE_BIT)
+      intel_miptree_upsample(intel, mt);
+
+   if (map->singlesample_mt_is_tmp)
+      intel_miptree_release(&mt->singlesample_mt);
+
+   intel_miptree_release_map(mt, level, slice);
+}
+
+void
+intel_miptree_map(struct intel_context *intel,
+                 struct intel_mipmap_tree *mt,
+                 unsigned int level,
+                 unsigned int slice,
+                 unsigned int x,
+                 unsigned int y,
+                 unsigned int w,
+                 unsigned int h,
+                 GLbitfield mode,
+                 void **out_ptr,
+                 int *out_stride)
+{
+   if (mt->num_samples <= 1)
+      intel_miptree_map_singlesample(intel, mt,
+                                     level, slice,
+                                     x, y, w, h,
+                                     mode,
+                                     out_ptr, out_stride);
+   else
+      intel_miptree_map_multisample(intel, mt,
+                                    level, slice,
+                                    x, y, w, h,
+                                    mode,
+                                    out_ptr, out_stride);
+}
+
+void
+intel_miptree_unmap(struct intel_context *intel,
+                   struct intel_mipmap_tree *mt,
+                   unsigned int level,
+                   unsigned int slice)
+{
+   if (mt->num_samples <= 1)
+      intel_miptree_unmap_singlesample(intel, mt, level, slice);
+   else
+      intel_miptree_unmap_multisample(intel, mt, level, slice);
 }