*
**************************************************************************/
+#include <GL/gl.h>
+#include <GL/internal/dri_interface.h>
+
#include "intel_batchbuffer.h"
#include "intel_context.h"
#include "intel_mipmap_tree.h"
#include "intel_tex.h"
#include "intel_blit.h"
+#ifndef I915
+#include "brw_blorp.h"
+#endif
+
#include "main/enums.h"
#include "main/formats.h"
-#include "main/image.h"
+#include "main/glformats.h"
+#include "main/texcompress_etc.h"
#include "main/teximage.h"
#define FILE_DEBUG_FLAG DEBUG_MIPTREE
}
}
+
+/**
+ * Determine which MSAA layout should be used by the MSAA surface being
+ * created, based on the chip generation and the surface type.
+ */
+static enum intel_msaa_layout
+compute_msaa_layout(struct intel_context *intel, gl_format format, GLenum target)
+{
+ /* Prior to Gen7, all MSAA surfaces used IMS layout. */
+ if (intel->gen < 7)
+ return INTEL_MSAA_LAYOUT_IMS;
+
+ /* In Gen7, IMS layout is only used for depth and stencil buffers. */
+ switch (_mesa_get_format_base_format(format)) {
+ case GL_DEPTH_COMPONENT:
+ case GL_STENCIL_INDEX:
+ case GL_DEPTH_STENCIL:
+ return INTEL_MSAA_LAYOUT_IMS;
+ default:
+ /* From the Ivy Bridge PRM, Vol4 Part1 p77 ("MCS Enable"):
+ *
+ * This field must be set to 0 for all SINT MSRTs when all RT channels
+ * are not written
+ *
+ * In practice this means that we have to disable MCS for all signed
+ * integer MSAA buffers. The alternative, to disable MCS only when one
+ * of the render target channels is disabled, is impractical because it
+ * would require converting between CMS and UMS MSAA layouts on the fly,
+ * which is expensive.
+ */
+ if (_mesa_get_format_datatype(format) == GL_INT) {
+ /* TODO: is this workaround needed for future chipsets? */
+ assert(intel->gen == 7);
+ return INTEL_MSAA_LAYOUT_UMS;
+ } else {
+ /* For now, if we're going to be texturing from this surface,
+ * force UMS, so that the shader doesn't have to do different things
+ * based on whether there's a multisample control surface needing sampled first.
+ * We can't just blindly read the MCS surface in all cases because:
+ *
+ * From the Ivy Bridge PRM, Vol4 Part1 p77 ("MCS Enable"):
+ *
+ * If this field is disabled and the sampling engine <ld_mcs> message
+ * is issued on this surface, the MCS surface may be accessed. Software
+ * must ensure that the surface is defined to avoid GTT errors.
+ */
+ if (target == GL_TEXTURE_2D_MULTISAMPLE ||
+ target == GL_TEXTURE_2D_MULTISAMPLE_ARRAY) {
+ return INTEL_MSAA_LAYOUT_UMS;
+ } else {
+ return INTEL_MSAA_LAYOUT_CMS;
+ }
+ }
+ }
+}
+
+
/**
* @param for_region Indicates that the caller is
* intel_miptree_create_for_region(). If true, then do not create
* \c stencil_mt.
*/
-static struct intel_mipmap_tree *
-intel_miptree_create_internal(struct intel_context *intel,
- GLenum target,
- gl_format format,
- GLuint first_level,
- GLuint last_level,
- GLuint width0,
- GLuint height0,
- GLuint depth0,
- bool for_region)
+struct intel_mipmap_tree *
+intel_miptree_create_layout(struct intel_context *intel,
+ GLenum target,
+ gl_format format,
+ GLuint first_level,
+ GLuint last_level,
+ GLuint width0,
+ GLuint height0,
+ GLuint depth0,
+ bool for_region,
+ GLuint num_samples)
{
struct intel_mipmap_tree *mt = calloc(sizeof(*mt), 1);
int compress_byte = 0;
mt->format = format;
mt->first_level = first_level;
mt->last_level = last_level;
- mt->width0 = width0;
- mt->height0 = height0;
+ mt->logical_width0 = width0;
+ mt->logical_height0 = height0;
+ mt->logical_depth0 = depth0;
mt->cpp = compress_byte ? compress_byte : _mesa_get_format_bytes(mt->format);
+ mt->num_samples = num_samples;
mt->compressed = compress_byte ? 1 : 0;
+ mt->msaa_layout = INTEL_MSAA_LAYOUT_NONE;
mt->refcount = 1;
+ if (num_samples > 1) {
+ /* Adjust width/height/depth for MSAA */
+ mt->msaa_layout = compute_msaa_layout(intel, format, mt->target);
+ if (mt->msaa_layout == INTEL_MSAA_LAYOUT_IMS) {
+ /* In the Sandy Bridge PRM, volume 4, part 1, page 31, it says:
+ *
+ * "Any of the other messages (sample*, LOD, load4) used with a
+ * (4x) multisampled surface will in-effect sample a surface with
+ * double the height and width as that indicated in the surface
+ * state. Each pixel position on the original-sized surface is
+ * replaced with a 2x2 of samples with the following arrangement:
+ *
+ * sample 0 sample 2
+ * sample 1 sample 3"
+ *
+ * Thus, when sampling from a multisampled texture, it behaves as
+ * though the layout in memory for (x,y,sample) is:
+ *
+ * (0,0,0) (0,0,2) (1,0,0) (1,0,2)
+ * (0,0,1) (0,0,3) (1,0,1) (1,0,3)
+ *
+ * (0,1,0) (0,1,2) (1,1,0) (1,1,2)
+ * (0,1,1) (0,1,3) (1,1,1) (1,1,3)
+ *
+ * However, the actual layout of multisampled data in memory is:
+ *
+ * (0,0,0) (1,0,0) (0,0,1) (1,0,1)
+ * (0,1,0) (1,1,0) (0,1,1) (1,1,1)
+ *
+ * (0,0,2) (1,0,2) (0,0,3) (1,0,3)
+ * (0,1,2) (1,1,2) (0,1,3) (1,1,3)
+ *
+ * This pattern repeats for each 2x2 pixel block.
+ *
+ * As a result, when calculating the size of our 4-sample buffer for
+ * an odd width or height, we have to align before scaling up because
+ * sample 3 is in that bottom right 2x2 block.
+ */
+ switch (num_samples) {
+ case 4:
+ width0 = ALIGN(width0, 2) * 2;
+ height0 = ALIGN(height0, 2) * 2;
+ break;
+ case 8:
+ width0 = ALIGN(width0, 2) * 4;
+ height0 = ALIGN(height0, 2) * 2;
+ break;
+ default:
+ /* num_samples should already have been quantized to 0, 1, 4, or
+ * 8.
+ */
+ assert(false);
+ }
+ } else {
+ /* Non-interleaved */
+ depth0 *= num_samples;
+ }
+ }
+
+ /* array_spacing_lod0 is only used for non-IMS MSAA surfaces. TODO: can we
+ * use it elsewhere?
+ */
+ switch (mt->msaa_layout) {
+ case INTEL_MSAA_LAYOUT_NONE:
+ case INTEL_MSAA_LAYOUT_IMS:
+ mt->array_spacing_lod0 = false;
+ break;
+ case INTEL_MSAA_LAYOUT_UMS:
+ case INTEL_MSAA_LAYOUT_CMS:
+ mt->array_spacing_lod0 = true;
+ break;
+ }
+
if (target == GL_TEXTURE_CUBE_MAP) {
assert(depth0 == 1);
- mt->depth0 = 6;
- } else {
- mt->depth0 = depth0;
+ depth0 = 6;
}
- if (format == MESA_FORMAT_S8) {
- /* The stencil buffer has quirky pitch requirements. From Vol 2a,
- * 11.5.6.2.1 3DSTATE_STENCIL_BUFFER, field "Surface Pitch":
- * The pitch must be set to 2x the value computed based on width, as
- * the stencil buffer is stored with two rows interleaved.
- */
- assert(intel->has_separate_stencil);
- mt->cpp = 2;
- }
+ mt->physical_width0 = width0;
+ mt->physical_height0 = height0;
+ mt->physical_depth0 = depth0;
if (!for_region &&
- _mesa_is_depthstencil_format(_mesa_get_format_base_format(format)) &&
+ _mesa_get_format_base_format(format) == GL_DEPTH_STENCIL &&
(intel->must_use_separate_stencil ||
(intel->has_separate_stencil &&
intel->vtbl.is_hiz_depth_format(intel, format)))) {
MESA_FORMAT_S8,
mt->first_level,
mt->last_level,
- mt->width0,
- mt->height0,
- mt->depth0,
- true);
+ mt->logical_width0,
+ mt->logical_height0,
+ mt->logical_depth0,
+ true,
+ num_samples,
+ false /* force_y_tiling */);
if (!mt->stencil_mt) {
intel_miptree_release(&mt);
return NULL;
return mt;
}
+/**
+ * \brief Helper function for intel_miptree_create().
+ */
+static uint32_t
+intel_miptree_choose_tiling(struct intel_context *intel,
+ gl_format format,
+ uint32_t width0,
+ uint32_t num_samples,
+ bool force_y_tiling,
+ struct intel_mipmap_tree *mt)
+{
+
+ if (format == MESA_FORMAT_S8) {
+ /* The stencil buffer is W tiled. However, we request from the kernel a
+ * non-tiled buffer because the GTT is incapable of W fencing.
+ */
+ return I915_TILING_NONE;
+ }
+
+ if (!intel->use_texture_tiling || _mesa_is_format_compressed(format))
+ return I915_TILING_NONE;
+
+ if (force_y_tiling)
+ return I915_TILING_Y;
+
+ if (num_samples > 1) {
+ /* From p82 of the Sandy Bridge PRM, dw3[1] of SURFACE_STATE ("Tiled
+ * Surface"):
+ *
+ * [DevSNB+]: For multi-sample render targets, this field must be
+ * 1. MSRTs can only be tiled.
+ *
+ * Our usual reason for preferring X tiling (fast blits using the
+ * blitting engine) doesn't apply to MSAA, since we'll generally be
+ * downsampling or upsampling when blitting between the MSAA buffer
+ * and another buffer, and the blitting engine doesn't support that.
+ * So use Y tiling, since it makes better use of the cache.
+ */
+ return I915_TILING_Y;
+ }
+
+ GLenum base_format = _mesa_get_format_base_format(format);
+ if (intel->gen >= 4 &&
+ (base_format == GL_DEPTH_COMPONENT ||
+ base_format == GL_DEPTH_STENCIL_EXT))
+ return I915_TILING_Y;
+
+ if (width0 >= 64) {
+ if (ALIGN(mt->total_width * mt->cpp, 512) < 32768)
+ return I915_TILING_X;
+
+ perf_debug("%dx%d miptree too large to blit, falling back to untiled",
+ mt->total_width, mt->total_height);
+ }
+
+ return I915_TILING_NONE;
+}
struct intel_mipmap_tree *
intel_miptree_create(struct intel_context *intel,
GLuint width0,
GLuint height0,
GLuint depth0,
- bool expect_accelerated_upload)
+ bool expect_accelerated_upload,
+ GLuint num_samples,
+ bool force_y_tiling)
{
struct intel_mipmap_tree *mt;
- uint32_t tiling = I915_TILING_NONE;
- GLenum base_format = _mesa_get_format_base_format(format);
+ gl_format tex_format = format;
+ gl_format etc_format = MESA_FORMAT_NONE;
+ GLuint total_width, total_height;
- if (intel->use_texture_tiling && !_mesa_is_format_compressed(format)) {
- if (intel->gen >= 4 &&
- (base_format == GL_DEPTH_COMPONENT ||
- base_format == GL_DEPTH_STENCIL_EXT))
- tiling = I915_TILING_Y;
- else if (width0 >= 64)
- tiling = I915_TILING_X;
+ switch (format) {
+ case MESA_FORMAT_ETC1_RGB8:
+ format = MESA_FORMAT_RGBX8888_REV;
+ break;
+ case MESA_FORMAT_ETC2_RGB8:
+ format = MESA_FORMAT_RGBX8888_REV;
+ break;
+ case MESA_FORMAT_ETC2_SRGB8:
+ case MESA_FORMAT_ETC2_SRGB8_ALPHA8_EAC:
+ case MESA_FORMAT_ETC2_SRGB8_PUNCHTHROUGH_ALPHA1:
+ format = MESA_FORMAT_SARGB8;
+ break;
+ case MESA_FORMAT_ETC2_RGBA8_EAC:
+ case MESA_FORMAT_ETC2_RGB8_PUNCHTHROUGH_ALPHA1:
+ format = MESA_FORMAT_RGBA8888_REV;
+ break;
+ case MESA_FORMAT_ETC2_R11_EAC:
+ format = MESA_FORMAT_R16;
+ break;
+ case MESA_FORMAT_ETC2_SIGNED_R11_EAC:
+ format = MESA_FORMAT_SIGNED_R16;
+ break;
+ case MESA_FORMAT_ETC2_RG11_EAC:
+ format = MESA_FORMAT_GR1616;
+ break;
+ case MESA_FORMAT_ETC2_SIGNED_RG11_EAC:
+ format = MESA_FORMAT_SIGNED_GR1616;
+ break;
+ default:
+ /* Non ETC1 / ETC2 format */
+ break;
}
- if (format == MESA_FORMAT_S8) {
- /* The stencil buffer is W tiled. However, we request from the kernel a
- * non-tiled buffer because the GTT is incapable of W fencing.
- *
- * The stencil buffer has quirky pitch requirements. From Vol 2a,
- * 11.5.6.2.1 3DSTATE_STENCIL_BUFFER, field "Surface Pitch":
- * The pitch must be set to 2x the value computed based on width, as
- * the stencil buffer is stored with two rows interleaved.
- * To accomplish this, we resort to the nasty hack of doubling the drm
- * region's cpp and halving its height.
- *
- * If we neglect to double the pitch, then render corruption occurs.
- */
- tiling = I915_TILING_NONE;
- width0 = ALIGN(width0, 64);
- height0 = ALIGN((height0 + 1) / 2, 64);
- }
+ etc_format = (format != tex_format) ? tex_format : MESA_FORMAT_NONE;
- mt = intel_miptree_create_internal(intel, target, format,
+ mt = intel_miptree_create_layout(intel, target, format,
first_level, last_level, width0,
height0, depth0,
- false);
+ false, num_samples);
/*
* pitch == 0 || height == 0 indicates the null texture
*/
return NULL;
}
+ total_width = mt->total_width;
+ total_height = mt->total_height;
+
+ if (format == MESA_FORMAT_S8) {
+ /* Align to size of W tile, 64x64. */
+ total_width = ALIGN(total_width, 64);
+ total_height = ALIGN(total_height, 64);
+ }
+
+ uint32_t tiling = intel_miptree_choose_tiling(intel, format, width0,
+ num_samples, force_y_tiling,
+ mt);
+ mt->etc_format = etc_format;
mt->region = intel_region_alloc(intel->intelScreen,
tiling,
mt->cpp,
- mt->total_width,
- mt->total_height,
+ total_width,
+ total_height,
expect_accelerated_upload);
+ mt->offset = 0;
if (!mt->region) {
intel_miptree_release(&mt);
return mt;
}
-
struct intel_mipmap_tree *
intel_miptree_create_for_region(struct intel_context *intel,
GLenum target,
{
struct intel_mipmap_tree *mt;
- mt = intel_miptree_create_internal(intel, target, format,
+ mt = intel_miptree_create_layout(intel, target, format,
0, 0,
region->width, region->height, 1,
- true);
+ true, 0 /* num_samples */);
if (!mt)
return mt;
return mt;
}
+
+/**
+ * For a singlesample DRI2 buffer, this simply wraps the given region with a miptree.
+ *
+ * For a multisample DRI2 buffer, this wraps the given region with
+ * a singlesample miptree, then creates a multisample miptree into which the
+ * singlesample miptree is embedded as a child.
+ */
+struct intel_mipmap_tree*
+intel_miptree_create_for_dri2_buffer(struct intel_context *intel,
+ unsigned dri_attachment,
+ gl_format format,
+ uint32_t num_samples,
+ struct intel_region *region)
+{
+ struct intel_mipmap_tree *singlesample_mt = NULL;
+ struct intel_mipmap_tree *multisample_mt = NULL;
+ GLenum base_format = _mesa_get_format_base_format(format);
+
+ /* Only the front and back buffers, which are color buffers, are shared
+ * through DRI2.
+ */
+ assert(dri_attachment == __DRI_BUFFER_BACK_LEFT ||
+ dri_attachment == __DRI_BUFFER_FRONT_LEFT ||
+ dri_attachment == __DRI_BUFFER_FAKE_FRONT_LEFT);
+ assert(base_format == GL_RGB || base_format == GL_RGBA);
+
+ singlesample_mt = intel_miptree_create_for_region(intel, GL_TEXTURE_2D,
+ format, region);
+ if (!singlesample_mt)
+ return NULL;
+
+ if (num_samples == 0)
+ return singlesample_mt;
+
+ multisample_mt = intel_miptree_create_for_renderbuffer(intel,
+ format,
+ region->width,
+ region->height,
+ num_samples);
+ if (!multisample_mt) {
+ intel_miptree_release(&singlesample_mt);
+ return NULL;
+ }
+
+ multisample_mt->singlesample_mt = singlesample_mt;
+ multisample_mt->need_downsample = false;
+
+ if (intel->is_front_buffer_rendering &&
+ (dri_attachment == __DRI_BUFFER_FRONT_LEFT ||
+ dri_attachment == __DRI_BUFFER_FAKE_FRONT_LEFT)) {
+ intel_miptree_upsample(intel, multisample_mt);
+ }
+
+ return multisample_mt;
+}
+
struct intel_mipmap_tree*
intel_miptree_create_for_renderbuffer(struct intel_context *intel,
gl_format format,
uint32_t width,
- uint32_t height)
+ uint32_t height,
+ uint32_t num_samples)
{
struct intel_mipmap_tree *mt;
+ uint32_t depth = 1;
+ bool ok;
mt = intel_miptree_create(intel, GL_TEXTURE_2D, format, 0, 0,
- width, height, 1, true);
+ width, height, depth, true, num_samples,
+ false /* force_y_tiling */);
+ if (!mt)
+ goto fail;
+
+ if (intel->vtbl.is_hiz_depth_format(intel, format)) {
+ ok = intel_miptree_alloc_hiz(intel, mt, num_samples);
+ if (!ok)
+ goto fail;
+ }
+
+ if (mt->msaa_layout == INTEL_MSAA_LAYOUT_CMS) {
+ ok = intel_miptree_alloc_mcs(intel, mt, num_samples);
+ if (!ok)
+ goto fail;
+ }
return mt;
+
+fail:
+ intel_miptree_release(&mt);
+ return NULL;
}
void
intel_region_release(&((*mt)->region));
intel_miptree_release(&(*mt)->stencil_mt);
intel_miptree_release(&(*mt)->hiz_mt);
+ intel_miptree_release(&(*mt)->mcs_mt);
+ intel_miptree_release(&(*mt)->singlesample_mt);
intel_resolve_map_clear(&(*mt)->hiz_map);
for (i = 0; i < MAX_TEXTURE_LEVELS; i++) {
GLuint level = intelImage->base.Base.Level;
int width, height, depth;
- if (target_to_target(image->TexObject->Target) != mt->target)
- return false;
+ /* glTexImage* choose the texture object based on the target passed in, and
+ * objects can't change targets over their lifetimes, so this should be
+ * true.
+ */
+ assert(target_to_target(image->TexObject->Target) == mt->target);
+
+ gl_format mt_format = mt->format;
+ if (mt->format == MESA_FORMAT_X8_Z24 && mt->stencil_mt)
+ mt_format = MESA_FORMAT_S8_Z24;
+ if (mt->format == MESA_FORMAT_Z32_FLOAT && mt->stencil_mt)
+ mt_format = MESA_FORMAT_Z32_FLOAT_X24S8;
+ if (mt->etc_format != MESA_FORMAT_NONE)
+ mt_format = mt->etc_format;
- if (image->TexFormat != mt->format &&
- !(image->TexFormat == MESA_FORMAT_S8_Z24 &&
- mt->format == MESA_FORMAT_X8_Z24 &&
- mt->stencil_mt)) {
+ if (image->TexFormat != mt_format)
return false;
- }
intel_miptree_get_dimensions_for_image(image, &width, &height, &depth);
* minification. This will also catch images not present in the
* tree, changed targets, etc.
*/
- if (width != mt->level[level].width ||
- height != mt->level[level].height ||
- depth != mt->level[level].depth)
+ if (mt->target == GL_TEXTURE_2D_MULTISAMPLE ||
+ mt->target == GL_TEXTURE_2D_MULTISAMPLE_ARRAY) {
+ /* nonzero level here is always bogus */
+ assert(level == 0);
+
+ if (width != mt->logical_width0 ||
+ height != mt->logical_height0 ||
+ depth != mt->logical_depth0) {
+ return false;
+ }
+ }
+ else {
+ /* all normal textures, renderbuffers, etc */
+ if (width != mt->level[level].width ||
+ height != mt->level[level].height ||
+ depth != mt->level[level].depth) {
+ return false;
+ }
+ }
+
+ if (image->NumSamples != mt->num_samples)
return false;
return true;
mt->level[level].slice[img].y_offset);
}
-
-/**
- * For cube map textures, either the \c face parameter can be used, of course,
- * or the cube face can be interpreted as a depth layer and the \c layer
- * parameter used.
- */
void
intel_miptree_get_image_offset(struct intel_mipmap_tree *mt,
- GLuint level, GLuint face, GLuint layer,
+ GLuint level, GLuint slice,
GLuint *x, GLuint *y)
{
- int slice;
+ assert(slice < mt->level[level].depth);
- if (face > 0) {
- assert(mt->target == GL_TEXTURE_CUBE_MAP);
- assert(face < 6);
- assert(layer == 0);
- slice = face;
+ *x = mt->level[level].slice[slice].x_offset;
+ *y = mt->level[level].slice[slice].y_offset;
+}
+
+void
+intel_miptree_get_tile_offsets(struct intel_mipmap_tree *mt,
+ GLuint level, GLuint slice,
+ uint32_t *tile_x,
+ uint32_t *tile_y)
+{
+ struct intel_region *region = mt->region;
+ uint32_t mask_x, mask_y;
+
+ intel_region_get_tile_masks(region, &mask_x, &mask_y, false);
+
+ *tile_x = mt->level[level].slice[slice].x_offset & mask_x;
+ *tile_y = mt->level[level].slice[slice].y_offset & mask_y;
+}
+
+static void
+intel_miptree_copy_slice_sw(struct intel_context *intel,
+ struct intel_mipmap_tree *dst_mt,
+ struct intel_mipmap_tree *src_mt,
+ int level,
+ int slice,
+ int width,
+ int height)
+{
+ void *src, *dst;
+ int src_stride, dst_stride;
+ int cpp = dst_mt->cpp;
+
+ intel_miptree_map(intel, src_mt,
+ level, slice,
+ 0, 0,
+ width, height,
+ GL_MAP_READ_BIT | BRW_MAP_DIRECT_BIT,
+ &src, &src_stride);
+
+ intel_miptree_map(intel, dst_mt,
+ level, slice,
+ 0, 0,
+ width, height,
+ GL_MAP_WRITE_BIT | GL_MAP_INVALIDATE_RANGE_BIT |
+ BRW_MAP_DIRECT_BIT,
+ &dst, &dst_stride);
+
+ DBG("sw blit %s mt %p %p/%d -> %s mt %p %p/%d (%dx%d)\n",
+ _mesa_get_format_name(src_mt->format),
+ src_mt, src, src_stride,
+ _mesa_get_format_name(dst_mt->format),
+ dst_mt, dst, dst_stride,
+ width, height);
+
+ int row_size = cpp * width;
+ if (src_stride == row_size &&
+ dst_stride == row_size) {
+ memcpy(dst, src, row_size * height);
} else {
- /* This branch may be taken even if the texture target is a cube map. In
- * that case, the caller chose to interpret each cube face as a layer.
- */
- assert(face == 0);
- slice = layer;
+ for (int i = 0; i < height; i++) {
+ memcpy(dst, src, row_size);
+ dst += dst_stride;
+ src += src_stride;
+ }
}
- *x = mt->level[level].slice[slice].x_offset;
- *y = mt->level[level].slice[slice].y_offset;
+ intel_miptree_unmap(intel, dst_mt, level, slice);
+ intel_miptree_unmap(intel, src_mt, level, slice);
+
+ /* Don't forget to copy the stencil data over, too. We could have skipped
+ * passing BRW_MAP_DIRECT_BIT, but that would have meant intel_miptree_map
+ * shuffling the two data sources in/out of temporary storage instead of
+ * the direct mapping we get this way.
+ */
+ if (dst_mt->stencil_mt) {
+ assert(src_mt->stencil_mt);
+ intel_miptree_copy_slice_sw(intel, dst_mt->stencil_mt, src_mt->stencil_mt,
+ level, slice, width, height);
+ }
}
static void
gl_format format = src_mt->format;
uint32_t width = src_mt->level[level].width;
uint32_t height = src_mt->level[level].height;
+ int slice;
+
+ if (face > 0)
+ slice = face;
+ else
+ slice = depth;
assert(depth < src_mt->level[level].depth);
+ assert(src_mt->format == dst_mt->format);
if (dst_mt->compressed) {
height = ALIGN(height, dst_mt->align_h) / dst_mt->align_h;
width = ALIGN(width, dst_mt->align_w);
}
+ /* If it's a packed depth/stencil buffer with separate stencil, the blit
+ * below won't apply since we can't do the depth's Y tiling or the
+ * stencil's W tiling in the blitter.
+ */
+ if (src_mt->stencil_mt) {
+ intel_miptree_copy_slice_sw(intel,
+ dst_mt, src_mt,
+ level, slice,
+ width, height);
+ return;
+ }
+
uint32_t dst_x, dst_y, src_x, src_y;
- intel_miptree_get_image_offset(dst_mt, level, face, depth,
- &dst_x, &dst_y);
- intel_miptree_get_image_offset(src_mt, level, face, depth,
- &src_x, &src_y);
-
- DBG("validate blit mt %p %d,%d/%d -> mt %p %d,%d/%d (%dx%d)\n",
- src_mt, src_x, src_y, src_mt->region->pitch * src_mt->region->cpp,
- dst_mt, dst_x, dst_y, dst_mt->region->pitch * dst_mt->region->cpp,
+ intel_miptree_get_image_offset(dst_mt, level, slice, &dst_x, &dst_y);
+ intel_miptree_get_image_offset(src_mt, level, slice, &src_x, &src_y);
+
+ DBG("validate blit mt %s %p %d,%d/%d -> mt %s %p %d,%d/%d (%dx%d)\n",
+ _mesa_get_format_name(src_mt->format),
+ src_mt, src_x, src_y, src_mt->region->pitch,
+ _mesa_get_format_name(dst_mt->format),
+ dst_mt, dst_x, dst_y, dst_mt->region->pitch,
width, height);
if (!intelEmitCopyBlit(intel,
width, height,
GL_COPY)) {
- fallback_debug("miptree validate blit for %s failed\n",
- _mesa_get_format_name(format));
- void *dst = intel_region_map(intel, dst_mt->region, GL_MAP_WRITE_BIT);
- void *src = intel_region_map(intel, src_mt->region, GL_MAP_READ_BIT);
-
- _mesa_copy_rect(dst,
- dst_mt->cpp,
- dst_mt->region->pitch,
- dst_x, dst_y,
- width, height,
- src, src_mt->region->pitch,
- src_x, src_y);
-
- intel_region_unmap(intel, dst_mt->region);
- intel_region_unmap(intel, src_mt->region);
- }
+ perf_debug("miptree validate blit for %s failed\n",
+ _mesa_get_format_name(format));
- if (src_mt->stencil_mt) {
- intel_miptree_copy_slice(intel,
- dst_mt->stencil_mt, src_mt->stencil_mt,
- level, face, depth);
+ intel_miptree_copy_slice_sw(intel, dst_mt, src_mt, level, slice,
+ width, height);
}
}
/**
* Copies the image's current data to the given miptree, and associates that
* miptree with the image.
+ *
+ * If \c invalidate is true, then the actual image data does not need to be
+ * copied, but the image still needs to be associated to the new miptree (this
+ * is set to true if we're about to clear the image).
*/
void
intel_miptree_copy_teximage(struct intel_context *intel,
struct intel_texture_image *intelImage,
- struct intel_mipmap_tree *dst_mt)
+ struct intel_mipmap_tree *dst_mt,
+ bool invalidate)
{
struct intel_mipmap_tree *src_mt = intelImage->mt;
+ struct intel_texture_object *intel_obj =
+ intel_texture_object(intelImage->base.Base.TexObject);
int level = intelImage->base.Base.Level;
int face = intelImage->base.Base.Face;
GLuint depth = intelImage->base.Base.Depth;
- for (int slice = 0; slice < depth; slice++) {
- intel_miptree_copy_slice(intel, dst_mt, src_mt, level, face, slice);
+ if (!invalidate) {
+ for (int slice = 0; slice < depth; slice++) {
+ intel_miptree_copy_slice(intel, dst_mt, src_mt, level, face, slice);
+ }
}
intel_miptree_reference(&intelImage->mt, dst_mt);
+ intel_obj->needs_validate = true;
+}
+
+bool
+intel_miptree_alloc_mcs(struct intel_context *intel,
+ struct intel_mipmap_tree *mt,
+ GLuint num_samples)
+{
+ assert(mt->mcs_mt == NULL);
+ assert(intel->gen >= 7); /* MCS only used on Gen7+ */
+
+ /* Choose the correct format for the MCS buffer. All that really matters
+ * is that we allocate the right buffer size, since we'll always be
+ * accessing this miptree using MCS-specific hardware mechanisms, which
+ * infer the correct format based on num_samples.
+ */
+ gl_format format;
+ switch (num_samples) {
+ case 4:
+ /* 8 bits/pixel are required for MCS data when using 4x MSAA (2 bits for
+ * each sample).
+ */
+ format = MESA_FORMAT_R8;
+ break;
+ case 8:
+ /* 32 bits/pixel are required for MCS data when using 8x MSAA (3 bits
+ * for each sample, plus 8 padding bits).
+ */
+ format = MESA_FORMAT_R_UINT32;
+ break;
+ default:
+ assert(!"Unrecognized sample count in intel_miptree_alloc_mcs");
+ break;
+ };
+
+ /* From the Ivy Bridge PRM, Vol4 Part1 p76, "MCS Base Address":
+ *
+ * "The MCS surface must be stored as Tile Y."
+ */
+ mt->mcs_mt = intel_miptree_create(intel,
+ mt->target,
+ format,
+ mt->first_level,
+ mt->last_level,
+ mt->logical_width0,
+ mt->logical_height0,
+ mt->logical_depth0,
+ true,
+ 0 /* num_samples */,
+ true /* force_y_tiling */);
+
+ /* From the Ivy Bridge PRM, Vol 2 Part 1 p326:
+ *
+ * When MCS buffer is enabled and bound to MSRT, it is required that it
+ * is cleared prior to any rendering.
+ *
+ * Since we don't use the MCS buffer for any purpose other than rendering,
+ * it makes sense to just clear it immediately upon allocation.
+ *
+ * Note: the clear value for MCS buffers is all 1's, so we memset to 0xff.
+ */
+ void *data = intel_miptree_map_raw(intel, mt->mcs_mt);
+ memset(data, 0xff, mt->mcs_mt->region->bo->size);
+ intel_miptree_unmap_raw(intel, mt->mcs_mt);
+
+ return mt->mcs_mt;
}
bool
intel_miptree_alloc_hiz(struct intel_context *intel,
- struct intel_mipmap_tree *mt)
+ struct intel_mipmap_tree *mt,
+ GLuint num_samples)
{
assert(mt->hiz_mt == NULL);
mt->hiz_mt = intel_miptree_create(intel,
mt->target,
- MESA_FORMAT_X8_Z24,
+ mt->format,
mt->first_level,
mt->last_level,
- mt->width0,
- mt->height0,
- mt->depth0,
- true);
+ mt->logical_width0,
+ mt->logical_height0,
+ mt->logical_depth0,
+ true,
+ num_samples,
+ false /* force_y_tiling */);
if (!mt->hiz_mt)
return false;
head->level = level;
head->layer = layer;
- head->need = INTEL_NEED_HIZ_RESOLVE;
+ head->need = GEN6_HIZ_OP_HIZ_RESOLVE;
}
}
return;
intel_resolve_map_set(&mt->hiz_map,
- level, layer, INTEL_NEED_HIZ_RESOLVE);
+ level, layer, GEN6_HIZ_OP_HIZ_RESOLVE);
}
return;
intel_resolve_map_set(&mt->hiz_map,
- level, layer, INTEL_NEED_DEPTH_RESOLVE);
+ level, layer, GEN6_HIZ_OP_DEPTH_RESOLVE);
}
-typedef void (*resolve_func_t)(struct intel_context *intel,
- struct intel_mipmap_tree *mt,
- uint32_t level,
- uint32_t layer);
-
static bool
intel_miptree_slice_resolve(struct intel_context *intel,
struct intel_mipmap_tree *mt,
uint32_t level,
uint32_t layer,
- enum intel_need_resolve need,
- resolve_func_t func)
+ enum gen6_hiz_op need)
{
intel_miptree_check_level_layer(mt, level, layer);
if (!item || item->need != need)
return false;
- func(intel, mt, level, layer);
+ intel_hiz_exec(intel, mt, level, layer, need);
intel_resolve_map_remove(item);
return true;
}
uint32_t layer)
{
return intel_miptree_slice_resolve(intel, mt, level, layer,
- INTEL_NEED_HIZ_RESOLVE,
- intel->vtbl.resolve_hiz_slice);
+ GEN6_HIZ_OP_HIZ_RESOLVE);
}
bool
uint32_t layer)
{
return intel_miptree_slice_resolve(intel, mt, level, layer,
- INTEL_NEED_DEPTH_RESOLVE,
- intel->vtbl.resolve_depth_slice);
+ GEN6_HIZ_OP_DEPTH_RESOLVE);
}
static bool
intel_miptree_all_slices_resolve(struct intel_context *intel,
struct intel_mipmap_tree *mt,
- enum intel_need_resolve need,
- resolve_func_t func)
+ enum gen6_hiz_op need)
{
bool did_resolve = false;
struct intel_resolve_map *i, *next;
next = i->next;
if (i->need != need)
continue;
- func(intel, mt, i->level, i->layer);
+
+ intel_hiz_exec(intel, mt, i->level, i->layer, need);
intel_resolve_map_remove(i);
did_resolve = true;
}
struct intel_mipmap_tree *mt)
{
return intel_miptree_all_slices_resolve(intel, mt,
- INTEL_NEED_HIZ_RESOLVE,
- intel->vtbl.resolve_hiz_slice);
+ GEN6_HIZ_OP_HIZ_RESOLVE);
}
bool
struct intel_mipmap_tree *mt)
{
return intel_miptree_all_slices_resolve(intel, mt,
- INTEL_NEED_DEPTH_RESOLVE,
- intel->vtbl.resolve_depth_slice);
+ GEN6_HIZ_OP_DEPTH_RESOLVE);
+}
+
+static void
+intel_miptree_updownsample(struct intel_context *intel,
+ struct intel_mipmap_tree *src,
+ struct intel_mipmap_tree *dst,
+ unsigned width,
+ unsigned height)
+{
+#ifndef I915
+ int src_x0 = 0;
+ int src_y0 = 0;
+ int dst_x0 = 0;
+ int dst_y0 = 0;
+
+ intel_miptree_slice_resolve_depth(intel, src, 0, 0);
+ intel_miptree_slice_resolve_depth(intel, dst, 0, 0);
+
+ brw_blorp_blit_miptrees(intel,
+ src, 0 /* level */, 0 /* layer */,
+ dst, 0 /* level */, 0 /* layer */,
+ src_x0, src_y0,
+ dst_x0, dst_y0,
+ width, height,
+ false, false /*mirror x, y*/);
+
+ if (src->stencil_mt) {
+ brw_blorp_blit_miptrees(intel,
+ src->stencil_mt, 0 /* level */, 0 /* layer */,
+ dst->stencil_mt, 0 /* level */, 0 /* layer */,
+ src_x0, src_y0,
+ dst_x0, dst_y0,
+ width, height,
+ false, false /*mirror x, y*/);
+ }
+#endif /* I915 */
+}
+
+static void
+assert_is_flat(struct intel_mipmap_tree *mt)
+{
+ assert(mt->target == GL_TEXTURE_2D);
+ assert(mt->first_level == 0);
+ assert(mt->last_level == 0);
+}
+
+/**
+ * \brief Downsample from mt to mt->singlesample_mt.
+ *
+ * If the miptree needs no downsample, then skip.
+ */
+void
+intel_miptree_downsample(struct intel_context *intel,
+ struct intel_mipmap_tree *mt)
+{
+ /* Only flat, renderbuffer-like miptrees are supported. */
+ assert_is_flat(mt);
+
+ if (!mt->need_downsample)
+ return;
+ intel_miptree_updownsample(intel,
+ mt, mt->singlesample_mt,
+ mt->logical_width0,
+ mt->logical_height0);
+ mt->need_downsample = false;
+
+ /* Strictly speaking, after a downsample on a depth miptree, a hiz
+ * resolve is needed on the singlesample miptree. However, since the
+ * singlesample miptree is never rendered to, the hiz resolve will never
+ * occur. Therefore we do not mark the needed hiz resolve after
+ * downsampling.
+ */
+}
+
+/**
+ * \brief Upsample from mt->singlesample_mt to mt.
+ *
+ * The upsample is done unconditionally.
+ */
+void
+intel_miptree_upsample(struct intel_context *intel,
+ struct intel_mipmap_tree *mt)
+{
+ /* Only flat, renderbuffer-like miptrees are supported. */
+ assert_is_flat(mt);
+ assert(!mt->need_downsample);
+
+ intel_miptree_updownsample(intel,
+ mt->singlesample_mt, mt,
+ mt->logical_width0,
+ mt->logical_height0);
+ intel_miptree_slice_set_needs_hiz_resolve(mt, 0, 0);
+}
+
+void *
+intel_miptree_map_raw(struct intel_context *intel, struct intel_mipmap_tree *mt)
+{
+ drm_intel_bo *bo = mt->region->bo;
+
+ if (unlikely(INTEL_DEBUG & DEBUG_PERF)) {
+ if (drm_intel_bo_busy(bo)) {
+ perf_debug("Mapping a busy BO, causing a stall on the GPU.\n");
+ }
+ }
+
+ intel_flush(&intel->ctx);
+
+ if (mt->region->tiling != I915_TILING_NONE)
+ drm_intel_gem_bo_map_gtt(bo);
+ else
+ drm_intel_bo_map(bo, true);
+
+ return bo->virtual;
+}
+
+void
+intel_miptree_unmap_raw(struct intel_context *intel,
+ struct intel_mipmap_tree *mt)
+{
+ drm_intel_bo_unmap(mt->region->bo);
}
static void
assert(y % bh == 0);
y /= bh;
- base = intel_region_map(intel, mt->region, map->mode);
- /* Note that in the case of cube maps, the caller must have passed the slice
- * number referencing the face.
- */
- intel_miptree_get_image_offset(mt, level, 0, slice, &image_x, &image_y);
- x += image_x;
- y += image_y;
+ base = intel_miptree_map_raw(intel, mt) + mt->offset;
- map->stride = mt->region->pitch * mt->cpp;
- map->ptr = base + y * map->stride + x * mt->cpp;
+ if (base == NULL)
+ map->ptr = NULL;
+ else {
+ /* Note that in the case of cube maps, the caller must have passed the
+ * slice number referencing the face.
+ */
+ intel_miptree_get_image_offset(mt, level, slice, &image_x, &image_y);
+ x += image_x;
+ y += image_y;
+
+ map->stride = mt->region->pitch;
+ map->ptr = base + y * map->stride + x * mt->cpp;
+ }
DBG("%s: %d,%d %dx%d from mt %p (%s) %d,%d = %p/%d\n", __FUNCTION__,
map->x, map->y, map->w, map->h,
unsigned int level,
unsigned int slice)
{
- intel_region_unmap(intel, mt->region);
+ intel_miptree_unmap_raw(intel, mt);
}
static void
goto fail;
}
- intel_miptree_get_image_offset(mt, level, 0, slice, &image_x, &image_y);
+ intel_miptree_get_image_offset(mt, level, slice, &image_x, &image_y);
x += image_x;
y += image_y;
if (!intelEmitCopyBlit(intel,
mt->region->cpp,
mt->region->pitch, mt->region->bo,
- 0, mt->region->tiling,
- map->stride / mt->region->cpp, map->bo,
+ mt->offset, mt->region->tiling,
+ map->stride, map->bo,
0, I915_TILING_NONE,
x, y,
0, 0,
unsigned int level,
unsigned int slice)
{
- assert(!(map->mode & GL_MAP_WRITE_BIT));
-
+ struct gl_context *ctx = &intel->ctx;
drm_intel_bo_unmap(map->bo);
+
+ if (map->mode & GL_MAP_WRITE_BIT) {
+ unsigned int image_x, image_y;
+ int x = map->x;
+ int y = map->y;
+ intel_miptree_get_image_offset(mt, level, slice, &image_x, &image_y);
+ x += image_x;
+ y += image_y;
+
+ bool ok = intelEmitCopyBlit(intel,
+ mt->region->cpp,
+ map->stride, map->bo,
+ 0, I915_TILING_NONE,
+ mt->region->pitch, mt->region->bo,
+ mt->offset, mt->region->tiling,
+ 0, 0,
+ x, y,
+ map->w, map->h,
+ GL_COPY);
+ WARN_ONCE(!ok, "Failed to blit from linear temporary mapping");
+ }
+
drm_intel_bo_unreference(map->bo);
}
*/
if (!(map->mode & GL_MAP_INVALIDATE_RANGE_BIT)) {
uint8_t *untiled_s8_map = map->ptr;
- uint8_t *tiled_s8_map = intel_region_map(intel, mt->region,
- GL_MAP_READ_BIT);
+ uint8_t *tiled_s8_map = intel_miptree_map_raw(intel, mt);
unsigned int image_x, image_y;
- intel_miptree_get_image_offset(mt, level, 0, slice, &image_x, &image_y);
+ intel_miptree_get_image_offset(mt, level, slice, &image_x, &image_y);
for (uint32_t y = 0; y < map->h; y++) {
for (uint32_t x = 0; x < map->w; x++) {
ptrdiff_t offset = intel_offset_S8(mt->region->pitch,
x + image_x + map->x,
- y + image_y + map->y);
+ y + image_y + map->y,
+ intel->has_swizzling);
untiled_s8_map[y * map->w + x] = tiled_s8_map[offset];
}
}
- intel_region_unmap(intel, mt->region);
+ intel_miptree_unmap_raw(intel, mt);
DBG("%s: %d,%d %dx%d from mt %p %d,%d = %p/%d\n", __FUNCTION__,
map->x, map->y, map->w, map->h,
if (map->mode & GL_MAP_WRITE_BIT) {
unsigned int image_x, image_y;
uint8_t *untiled_s8_map = map->ptr;
- uint8_t *tiled_s8_map = intel_region_map(intel, mt->region, map->mode);
+ uint8_t *tiled_s8_map = intel_miptree_map_raw(intel, mt);
- intel_miptree_get_image_offset(mt, level, 0, slice, &image_x, &image_y);
+ intel_miptree_get_image_offset(mt, level, slice, &image_x, &image_y);
for (uint32_t y = 0; y < map->h; y++) {
for (uint32_t x = 0; x < map->w; x++) {
ptrdiff_t offset = intel_offset_S8(mt->region->pitch,
x + map->x,
- y + map->y);
+ y + map->y,
+ intel->has_swizzling);
tiled_s8_map[offset] = untiled_s8_map[y * map->w + x];
}
}
- intel_region_unmap(intel, mt->region);
+ intel_miptree_unmap_raw(intel, mt);
}
free(map->buffer);
}
+static void
+intel_miptree_map_etc(struct intel_context *intel,
+ struct intel_mipmap_tree *mt,
+ struct intel_miptree_map *map,
+ unsigned int level,
+ unsigned int slice)
+{
+ assert(mt->etc_format != MESA_FORMAT_NONE);
+ if (mt->etc_format == MESA_FORMAT_ETC1_RGB8) {
+ assert(mt->format == MESA_FORMAT_RGBX8888_REV);
+ }
+
+ assert(map->mode & GL_MAP_WRITE_BIT);
+ assert(map->mode & GL_MAP_INVALIDATE_RANGE_BIT);
+
+ map->stride = _mesa_format_row_stride(mt->etc_format, map->w);
+ map->buffer = malloc(_mesa_format_image_size(mt->etc_format,
+ map->w, map->h, 1));
+ map->ptr = map->buffer;
+}
+
+static void
+intel_miptree_unmap_etc(struct intel_context *intel,
+ struct intel_mipmap_tree *mt,
+ struct intel_miptree_map *map,
+ unsigned int level,
+ unsigned int slice)
+{
+ uint32_t image_x;
+ uint32_t image_y;
+ intel_miptree_get_image_offset(mt, level, slice, &image_x, &image_y);
+
+ image_x += map->x;
+ image_y += map->y;
+
+ uint8_t *dst = intel_miptree_map_raw(intel, mt)
+ + image_y * mt->region->pitch
+ + image_x * mt->region->cpp;
+
+ if (mt->etc_format == MESA_FORMAT_ETC1_RGB8)
+ _mesa_etc1_unpack_rgba8888(dst, mt->region->pitch,
+ map->ptr, map->stride,
+ map->w, map->h);
+ else
+ _mesa_unpack_etc2_format(dst, mt->region->pitch,
+ map->ptr, map->stride,
+ map->w, map->h, mt->etc_format);
+
+ intel_miptree_unmap_raw(intel, mt);
+ free(map->buffer);
+}
+
/**
* Mapping function for packed depth/stencil miptrees backed by real separate
* miptrees for depth and stencil.
*/
if (!(map->mode & GL_MAP_INVALIDATE_RANGE_BIT)) {
uint32_t *packed_map = map->ptr;
- uint8_t *s_map = intel_region_map(intel, s_mt->region, GL_MAP_READ_BIT);
- uint32_t *z_map = intel_region_map(intel, z_mt->region, GL_MAP_READ_BIT);
+ uint8_t *s_map = intel_miptree_map_raw(intel, s_mt);
+ uint32_t *z_map = intel_miptree_map_raw(intel, z_mt);
unsigned int s_image_x, s_image_y;
unsigned int z_image_x, z_image_y;
- intel_miptree_get_image_offset(s_mt, level, 0, slice,
+ intel_miptree_get_image_offset(s_mt, level, slice,
&s_image_x, &s_image_y);
- intel_miptree_get_image_offset(z_mt, level, 0, slice,
+ intel_miptree_get_image_offset(z_mt, level, slice,
&z_image_x, &z_image_y);
for (uint32_t y = 0; y < map->h; y++) {
int map_x = map->x + x, map_y = map->y + y;
ptrdiff_t s_offset = intel_offset_S8(s_mt->region->pitch,
map_x + s_image_x,
- map_y + s_image_y);
- ptrdiff_t z_offset = ((map_y + z_image_y) * z_mt->region->pitch +
+ map_y + s_image_y,
+ intel->has_swizzling);
+ ptrdiff_t z_offset = ((map_y + z_image_y) *
+ (z_mt->region->pitch / 4) +
(map_x + z_image_x));
uint8_t s = s_map[s_offset];
uint32_t z = z_map[z_offset];
}
}
- intel_region_unmap(intel, s_mt->region);
- intel_region_unmap(intel, z_mt->region);
+ intel_miptree_unmap_raw(intel, s_mt);
+ intel_miptree_unmap_raw(intel, z_mt);
DBG("%s: %d,%d %dx%d from z mt %p %d,%d, s mt %p %d,%d = %p/%d\n",
__FUNCTION__,
if (map->mode & GL_MAP_WRITE_BIT) {
uint32_t *packed_map = map->ptr;
- uint8_t *s_map = intel_region_map(intel, s_mt->region, map->mode);
- uint32_t *z_map = intel_region_map(intel, z_mt->region, map->mode);
+ uint8_t *s_map = intel_miptree_map_raw(intel, s_mt);
+ uint32_t *z_map = intel_miptree_map_raw(intel, z_mt);
unsigned int s_image_x, s_image_y;
unsigned int z_image_x, z_image_y;
- intel_miptree_get_image_offset(s_mt, level, 0, slice,
+ intel_miptree_get_image_offset(s_mt, level, slice,
&s_image_x, &s_image_y);
- intel_miptree_get_image_offset(z_mt, level, 0, slice,
+ intel_miptree_get_image_offset(z_mt, level, slice,
&z_image_x, &z_image_y);
for (uint32_t y = 0; y < map->h; y++) {
for (uint32_t x = 0; x < map->w; x++) {
ptrdiff_t s_offset = intel_offset_S8(s_mt->region->pitch,
x + s_image_x + map->x,
- y + s_image_y + map->y);
- ptrdiff_t z_offset = ((y + z_image_y) * z_mt->region->pitch +
+ y + s_image_y + map->y,
+ intel->has_swizzling);
+ ptrdiff_t z_offset = ((y + z_image_y) *
+ (z_mt->region->pitch / 4) +
(x + z_image_x));
if (map_z32f_x24s8) {
}
}
- intel_region_unmap(intel, s_mt->region);
- intel_region_unmap(intel, z_mt->region);
+ intel_miptree_unmap_raw(intel, s_mt);
+ intel_miptree_unmap_raw(intel, z_mt);
DBG("%s: %d,%d %dx%d from z mt %p (%s) %d,%d, s mt %p %d,%d = %p/%d\n",
__FUNCTION__,
free(map->buffer);
}
-void
-intel_miptree_map(struct intel_context *intel,
- struct intel_mipmap_tree *mt,
- unsigned int level,
- unsigned int slice,
- unsigned int x,
- unsigned int y,
- unsigned int w,
- unsigned int h,
- GLbitfield mode,
- void **out_ptr,
- int *out_stride)
+/**
+ * Create and attach a map to the miptree at (level, slice). Return the
+ * attached map.
+ */
+static struct intel_miptree_map*
+intel_miptree_attach_map(struct intel_mipmap_tree *mt,
+ unsigned int level,
+ unsigned int slice,
+ unsigned int x,
+ unsigned int y,
+ unsigned int w,
+ unsigned int h,
+ GLbitfield mode)
{
- struct intel_miptree_map *map;
+ struct intel_miptree_map *map = calloc(1, sizeof(*map));
- map = calloc(1, sizeof(struct intel_miptree_map));
- if (!map){
- *out_ptr = NULL;
- *out_stride = 0;
- return;
- }
+ if (!map)
+ return NULL;
- assert(!mt->level[level].slice[slice].map);
+ assert(mt->level[level].slice[slice].map == NULL);
mt->level[level].slice[slice].map = map;
+
map->mode = mode;
map->x = x;
map->y = y;
map->w = w;
map->h = h;
+ return map;
+}
+
+/**
+ * Release the map at (level, slice).
+ */
+static void
+intel_miptree_release_map(struct intel_mipmap_tree *mt,
+ unsigned int level,
+ unsigned int slice)
+{
+ struct intel_miptree_map **map;
+
+ map = &mt->level[level].slice[slice].map;
+ free(*map);
+ *map = NULL;
+}
+
+static void
+intel_miptree_map_singlesample(struct intel_context *intel,
+ struct intel_mipmap_tree *mt,
+ unsigned int level,
+ unsigned int slice,
+ unsigned int x,
+ unsigned int y,
+ unsigned int w,
+ unsigned int h,
+ GLbitfield mode,
+ void **out_ptr,
+ int *out_stride)
+{
+ struct intel_miptree_map *map;
+
+ /* Estimate the size of the mappable aperture into the GTT. There's an
+ * ioctl to get the whole GTT size, but not one to get the mappable subset.
+ * It turns out it's basically always 256MB, though some ancient hardware
+ * was smaller.
+ */
+ uint32_t gtt_size = 256 * 1024 * 1024;
+ if (intel->gen == 2)
+ gtt_size = 128 * 1024 * 1024;
+
+ /* We don't want to map two objects such that a memcpy between them would
+ * just fault one mapping in and then the other over and over forever. So
+ * we would need to divide the GTT size by 2. Additionally, some GTT is
+ * taken up by things like the framebuffer and the ringbuffer and such, so
+ * be more conservative.
+ */
+ uint32_t max_gtt_map_object_size = gtt_size / 4;
+
+ assert(mt->num_samples <= 1);
+
+ map = intel_miptree_attach_map(mt, level, slice, x, y, w, h, mode);
+ if (!map){
+ *out_ptr = NULL;
+ *out_stride = 0;
+ return;
+ }
+
intel_miptree_slice_resolve_depth(intel, mt, level, slice);
if (map->mode & GL_MAP_WRITE_BIT) {
intel_miptree_slice_set_needs_hiz_resolve(mt, level, slice);
if (mt->format == MESA_FORMAT_S8) {
intel_miptree_map_s8(intel, mt, map, level, slice);
- } else if (mt->stencil_mt) {
+ } else if (mt->etc_format != MESA_FORMAT_NONE &&
+ !(mode & BRW_MAP_DIRECT_BIT)) {
+ intel_miptree_map_etc(intel, mt, map, level, slice);
+ } else if (mt->stencil_mt && !(mode & BRW_MAP_DIRECT_BIT)) {
intel_miptree_map_depthstencil(intel, mt, map, level, slice);
- } else if (intel->gen >= 6 &&
- !(mode & GL_MAP_WRITE_BIT) &&
- !mt->compressed &&
- mt->region->tiling == I915_TILING_X) {
+ }
+ /* According to the Ivy Bridge PRM, Vol1 Part4, section 1.2.1.2 (Graphics
+ * Data Size Limitations):
+ *
+ * The BLT engine is capable of transferring very large quantities of
+ * graphics data. Any graphics data read from and written to the
+ * destination is permitted to represent a number of pixels that
+ * occupies up to 65,536 scan lines and up to 32,768 bytes per scan line
+ * at the destination. The maximum number of pixels that may be
+ * represented per scan line’s worth of graphics data depends on the
+ * color depth.
+ *
+ * Furthermore, intelEmitCopyBlit (which is called by
+ * intel_miptree_map_blit) uses a signed 16-bit integer to represent buffer
+ * pitch, so it can only handle buffer pitches < 32k.
+ *
+ * As a result of these two limitations, we can only use
+ * intel_miptree_map_blit() when the region's pitch is less than 32k.
+ */
+ else if (intel->has_llc &&
+ !(mode & GL_MAP_WRITE_BIT) &&
+ !mt->compressed &&
+ mt->region->tiling == I915_TILING_X &&
+ mt->region->pitch < 32768) {
+ intel_miptree_map_blit(intel, mt, map, level, slice);
+ } else if (mt->region->tiling != I915_TILING_NONE &&
+ mt->region->bo->size >= max_gtt_map_object_size) {
+ assert(mt->region->pitch < 32768);
intel_miptree_map_blit(intel, mt, map, level, slice);
} else {
intel_miptree_map_gtt(intel, mt, map, level, slice);
*out_ptr = map->ptr;
*out_stride = map->stride;
+
+ if (map->ptr == NULL)
+ intel_miptree_release_map(mt, level, slice);
}
-void
-intel_miptree_unmap(struct intel_context *intel,
- struct intel_mipmap_tree *mt,
- unsigned int level,
- unsigned int slice)
+static void
+intel_miptree_unmap_singlesample(struct intel_context *intel,
+ struct intel_mipmap_tree *mt,
+ unsigned int level,
+ unsigned int slice)
{
struct intel_miptree_map *map = mt->level[level].slice[slice].map;
+ assert(mt->num_samples <= 1);
+
if (!map)
return;
if (mt->format == MESA_FORMAT_S8) {
intel_miptree_unmap_s8(intel, mt, map, level, slice);
- } else if (mt->stencil_mt) {
+ } else if (mt->etc_format != MESA_FORMAT_NONE &&
+ !(map->mode & BRW_MAP_DIRECT_BIT)) {
+ intel_miptree_unmap_etc(intel, mt, map, level, slice);
+ } else if (mt->stencil_mt && !(map->mode & BRW_MAP_DIRECT_BIT)) {
intel_miptree_unmap_depthstencil(intel, mt, map, level, slice);
} else if (map->bo) {
intel_miptree_unmap_blit(intel, mt, map, level, slice);
intel_miptree_unmap_gtt(intel, mt, map, level, slice);
}
- mt->level[level].slice[slice].map = NULL;
- free(map);
+ intel_miptree_release_map(mt, level, slice);
+}
+
+static void
+intel_miptree_map_multisample(struct intel_context *intel,
+ struct intel_mipmap_tree *mt,
+ unsigned int level,
+ unsigned int slice,
+ unsigned int x,
+ unsigned int y,
+ unsigned int w,
+ unsigned int h,
+ GLbitfield mode,
+ void **out_ptr,
+ int *out_stride)
+{
+ struct intel_miptree_map *map;
+
+ assert(mt->num_samples > 1);
+
+ /* Only flat, renderbuffer-like miptrees are supported. */
+ if (mt->target != GL_TEXTURE_2D ||
+ mt->first_level != 0 ||
+ mt->last_level != 0) {
+ _mesa_problem(&intel->ctx, "attempt to map a multisample miptree for "
+ "which (target, first_level, last_level != "
+ "(GL_TEXTURE_2D, 0, 0)");
+ goto fail;
+ }
+
+ map = intel_miptree_attach_map(mt, level, slice, x, y, w, h, mode);
+ if (!map)
+ goto fail;
+
+ if (!mt->singlesample_mt) {
+ mt->singlesample_mt =
+ intel_miptree_create_for_renderbuffer(intel,
+ mt->format,
+ mt->logical_width0,
+ mt->logical_height0,
+ 0 /*num_samples*/);
+ if (!mt->singlesample_mt)
+ goto fail;
+
+ map->singlesample_mt_is_tmp = true;
+ mt->need_downsample = true;
+ }
+
+ intel_miptree_downsample(intel, mt);
+ intel_miptree_map_singlesample(intel, mt->singlesample_mt,
+ level, slice,
+ x, y, w, h,
+ mode,
+ out_ptr, out_stride);
+ return;
+
+fail:
+ intel_miptree_release_map(mt, level, slice);
+ *out_ptr = NULL;
+ *out_stride = 0;
+}
+
+static void
+intel_miptree_unmap_multisample(struct intel_context *intel,
+ struct intel_mipmap_tree *mt,
+ unsigned int level,
+ unsigned int slice)
+{
+ struct intel_miptree_map *map = mt->level[level].slice[slice].map;
+
+ assert(mt->num_samples > 1);
+
+ if (!map)
+ return;
+
+ intel_miptree_unmap_singlesample(intel, mt->singlesample_mt, level, slice);
+
+ mt->need_downsample = false;
+ if (map->mode & GL_MAP_WRITE_BIT)
+ intel_miptree_upsample(intel, mt);
+
+ if (map->singlesample_mt_is_tmp)
+ intel_miptree_release(&mt->singlesample_mt);
+
+ intel_miptree_release_map(mt, level, slice);
+}
+
+void
+intel_miptree_map(struct intel_context *intel,
+ struct intel_mipmap_tree *mt,
+ unsigned int level,
+ unsigned int slice,
+ unsigned int x,
+ unsigned int y,
+ unsigned int w,
+ unsigned int h,
+ GLbitfield mode,
+ void **out_ptr,
+ int *out_stride)
+{
+ if (mt->num_samples <= 1)
+ intel_miptree_map_singlesample(intel, mt,
+ level, slice,
+ x, y, w, h,
+ mode,
+ out_ptr, out_stride);
+ else
+ intel_miptree_map_multisample(intel, mt,
+ level, slice,
+ x, y, w, h,
+ mode,
+ out_ptr, out_stride);
+}
+
+void
+intel_miptree_unmap(struct intel_context *intel,
+ struct intel_mipmap_tree *mt,
+ unsigned int level,
+ unsigned int slice)
+{
+ if (mt->num_samples <= 1)
+ intel_miptree_unmap_singlesample(intel, mt, level, slice);
+ else
+ intel_miptree_unmap_multisample(intel, mt, level, slice);
}