X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Fmesa%2Fdrivers%2Fdri%2Fi965%2Fintel_mipmap_tree.c;h=87e0136693257598c56a37d6e35a09c7e3212e56;hb=179fc4aae8f782453f0488e8dd508f9a01117376;hp=e4d67b97420c475c09b6ebb3f936b7d7e355da59;hpb=db184d43b0573c00d911ef9e98fbaab26ebd6466;p=mesa.git diff --git a/src/mesa/drivers/dri/i965/intel_mipmap_tree.c b/src/mesa/drivers/dri/i965/intel_mipmap_tree.c index e4d67b97420..87e01366932 100644 --- a/src/mesa/drivers/dri/i965/intel_mipmap_tree.c +++ b/src/mesa/drivers/dri/i965/intel_mipmap_tree.c @@ -1,5 +1,4 @@ -/************************************************************************** - * +/* * Copyright 2006 VMware, Inc. * All Rights Reserved. * @@ -7,7 +6,7 @@ * copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sub license, and/or sell copies of the Software, and to + * distribute, sublicense, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * @@ -17,19 +16,17 @@ * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - * - **************************************************************************/ + */ #include #include #include "intel_batchbuffer.h" -#include "intel_chipset.h" #include "intel_mipmap_tree.h" #include "intel_resolve_map.h" #include "intel_tex.h" @@ -38,6 +35,7 @@ #include "brw_blorp.h" #include "brw_context.h" +#include "brw_state.h" #include "main/enums.h" #include "main/fbobject.h" @@ -50,12 +48,23 @@ #define FILE_DEBUG_FLAG DEBUG_MIPTREE +static void *intel_miptree_map_raw(struct brw_context *brw, + struct intel_mipmap_tree *mt); + +static void intel_miptree_unmap_raw(struct intel_mipmap_tree *mt); + +static bool +intel_miptree_alloc_mcs(struct brw_context *brw, + struct intel_mipmap_tree *mt, + GLuint num_samples); + /** * Determine which MSAA layout should be used by the MSAA surface being * created, based on the chip generation and the surface type. */ static enum intel_msaa_layout -compute_msaa_layout(struct brw_context *brw, mesa_format format, GLenum target) +compute_msaa_layout(struct brw_context *brw, mesa_format format, + bool disable_aux_buffers) { /* Prior to Gen7, all MSAA surfaces used IMS layout. */ if (brw->gen < 7) @@ -68,14 +77,6 @@ compute_msaa_layout(struct brw_context *brw, mesa_format format, GLenum target) case GL_DEPTH_STENCIL: return INTEL_MSAA_LAYOUT_IMS; default: - /* Disable MCS on Broadwell for now. We can enable it once things - * are working without it. - */ - if (brw->gen >= 8) { - perf_debug("Missing CMS support on Broadwell.\n"); - return INTEL_MSAA_LAYOUT_UMS; - } - /* From the Ivy Bridge PRM, Vol4 Part1 p77 ("MCS Enable"): * * This field must be set to 0 for all SINT MSRTs when all RT channels @@ -89,6 +90,11 @@ compute_msaa_layout(struct brw_context *brw, mesa_format format, GLenum target) */ if (brw->gen == 7 && _mesa_get_format_datatype(format) == GL_INT) { return INTEL_MSAA_LAYOUT_UMS; + } else if (disable_aux_buffers) { + /* We can't use the CMS layout because it uses an aux buffer, the MCS + * buffer. So fallback to UMS, which is identical to CMS without the + * MCS. */ + return INTEL_MSAA_LAYOUT_UMS; } else { return INTEL_MSAA_LAYOUT_CMS; } @@ -138,13 +144,12 @@ compute_msaa_layout(struct brw_context *brw, mesa_format format, GLenum target) * by half the block width, and Y coordinates by half the block height. */ void -intel_get_non_msrt_mcs_alignment(struct brw_context *brw, - struct intel_mipmap_tree *mt, +intel_get_non_msrt_mcs_alignment(struct intel_mipmap_tree *mt, unsigned *width_px, unsigned *height) { switch (mt->tiling) { default: - assert(!"Non-MSRT MCS requires X or Y tiling"); + unreachable("Non-MSRT MCS requires X or Y tiling"); /* In release builds, fall through */ case I915_TILING_Y: *width_px = 32 / mt->cpp; @@ -156,28 +161,58 @@ intel_get_non_msrt_mcs_alignment(struct brw_context *brw, } } +static bool +intel_tiling_supports_non_msrt_mcs(struct brw_context *brw, unsigned tiling) +{ + /* From the Ivy Bridge PRM, Vol2 Part1 11.7 "MCS Buffer for Render + * Target(s)", beneath the "Fast Color Clear" bullet (p326): + * + * - Support is limited to tiled render targets. + * + * Gen9 changes the restriction to Y-tile only. + */ + if (brw->gen >= 9) + return tiling == I915_TILING_Y; + else if (brw->gen >= 7) + return tiling != I915_TILING_NONE; + else + return false; +} /** * For a single-sampled render target ("non-MSRT"), determine if an MCS buffer - * can be used. + * can be used. This doesn't (and should not) inspect any of the properties of + * the miptree's BO. * * From the Ivy Bridge PRM, Vol2 Part1 11.7 "MCS Buffer for Render Target(s)", * beneath the "Fast Color Clear" bullet (p326): * - * - Support is limited to tiled render targets. * - Support is for non-mip-mapped and non-array surface types only. * * And then later, on p327: * * - MCS buffer for non-MSRT is supported only for RT formats 32bpp, * 64bpp, and 128bpp. + * + * From the Skylake documentation, it is made clear that X-tiling is no longer + * supported: + * + * - MCS and Lossless compression is supported for TiledY/TileYs/TileYf + * non-MSRTs only. */ -bool -intel_is_non_msrt_mcs_buffer_supported(struct brw_context *brw, - struct intel_mipmap_tree *mt) +static bool +intel_miptree_supports_non_msrt_fast_clear(struct brw_context *brw, + struct intel_mipmap_tree *mt) { /* MCS support does not exist prior to Gen7 */ - if (brw->gen < 7 || brw->gen >= 8) + if (brw->gen < 7) + return false; + + if (mt->disable_aux_buffers) + return false; + + /* This function applies only to non-multisampled render targets. */ + if (mt->num_samples > 1) return false; /* MCS is only supported for color buffers */ @@ -188,15 +223,34 @@ intel_is_non_msrt_mcs_buffer_supported(struct brw_context *brw, return false; } - if (mt->tiling != I915_TILING_X && - mt->tiling != I915_TILING_Y) - return false; if (mt->cpp != 4 && mt->cpp != 8 && mt->cpp != 16) return false; - if (mt->first_level != 0 || mt->last_level != 0) + if (mt->first_level != 0 || mt->last_level != 0) { + if (brw->gen >= 8) { + perf_debug("Multi-LOD fast clear - giving up (%dx%dx%d).\n", + mt->logical_width0, mt->logical_height0, mt->last_level); + } + return false; - if (mt->physical_depth0 != 1) + } + + /* Check for layered surfaces. */ + if (mt->physical_depth0 != 1) { + /* Multisample surfaces with the CMS layout are not layered surfaces, + * yet still have physical_depth0 > 1. Assert that we don't + * accidentally reject a multisampled surface here. We should have + * rejected it earlier by explicitly checking the sample count. + */ + assert(mt->num_samples <= 1); + + if (brw->gen >= 8) { + perf_debug("Layered fast clear - giving up. (%dx%d%d)\n", + mt->logical_width0, mt->logical_height0, + mt->physical_depth0); + } + return false; + } /* There's no point in using an MCS buffer if the surface isn't in a * renderable format. @@ -204,7 +258,11 @@ intel_is_non_msrt_mcs_buffer_supported(struct brw_context *brw, if (!brw->format_supported_as_render_target[mt->format]) return false; - return true; + if (brw->gen >= 9) { + const uint32_t brw_format = brw_format_for_mesa_format(mt->format); + return brw_losslessly_compressible_format(brw, brw_format); + } else + return true; } @@ -230,7 +288,7 @@ intel_depth_format_for_depthstencil_format(mesa_format format) { * intel_miptree_create_for_bo(). If true, then do not create * \c stencil_mt. */ -struct intel_mipmap_tree * +static struct intel_mipmap_tree * intel_miptree_create_layout(struct brw_context *brw, GLenum target, mesa_format format, @@ -239,18 +297,38 @@ intel_miptree_create_layout(struct brw_context *brw, GLuint width0, GLuint height0, GLuint depth0, - bool for_bo, - GLuint num_samples) + GLuint num_samples, + uint32_t layout_flags) { struct intel_mipmap_tree *mt = calloc(sizeof(*mt), 1); if (!mt) return NULL; - DBG("%s target %s format %s level %d..%d slices %d <-- %p\n", __FUNCTION__, - _mesa_lookup_enum_by_nr(target), + DBG("%s target %s format %s level %d..%d slices %d <-- %p\n", __func__, + _mesa_enum_to_string(target), _mesa_get_format_name(format), first_level, last_level, depth0, mt); + if (target == GL_TEXTURE_1D_ARRAY) { + /* For a 1D Array texture the OpenGL API will treat the height0 + * parameter as the number of array slices. For Intel hardware, we treat + * the 1D array as a 2D Array with a height of 1. + * + * So, when we first come through this path to create a 1D Array + * texture, height0 stores the number of slices, and depth0 is 1. In + * this case, we want to swap height0 and depth0. + * + * Since some miptrees will be created based on the base miptree, we may + * come through this path and see height0 as 1 and depth0 being the + * number of slices. In this case we don't need to do the swap. + */ + assert(height0 == 1 || depth0 == 1); + if (height0 > 1) { + depth0 = height0; + height0 = 1; + } + } + mt->target = target; mt->format = format; mt->first_level = first_level; @@ -259,16 +337,9 @@ intel_miptree_create_layout(struct brw_context *brw, mt->logical_height0 = height0; mt->logical_depth0 = depth0; mt->fast_clear_state = INTEL_FAST_CLEAR_STATE_NO_MCS; + mt->disable_aux_buffers = (layout_flags & MIPTREE_LAYOUT_DISABLE_AUX) != 0; exec_list_make_empty(&mt->hiz_map); - - /* The cpp is bytes per (1, blockheight)-sized block for compressed - * textures. This is why you'll see divides by blockheight all over - */ - unsigned bw, bh; - _mesa_get_format_block_size(format, &bw, &bh); - assert(_mesa_get_format_bytes(mt->format) % bw == 0); - mt->cpp = _mesa_get_format_bytes(mt->format) / bw; - + mt->cpp = _mesa_get_format_bytes(format); mt->num_samples = num_samples; mt->compressed = _mesa_is_format_compressed(format); mt->msaa_layout = INTEL_MSAA_LAYOUT_NONE; @@ -276,9 +347,31 @@ intel_miptree_create_layout(struct brw_context *brw, if (num_samples > 1) { /* Adjust width/height/depth for MSAA */ - mt->msaa_layout = compute_msaa_layout(brw, format, mt->target); + mt->msaa_layout = compute_msaa_layout(brw, format, + mt->disable_aux_buffers); if (mt->msaa_layout == INTEL_MSAA_LAYOUT_IMS) { - /* In the Sandy Bridge PRM, volume 4, part 1, page 31, it says: + /* From the Ivybridge PRM, Volume 1, Part 1, page 108: + * "If the surface is multisampled and it is a depth or stencil + * surface or Multisampled Surface StorageFormat in SURFACE_STATE is + * MSFMT_DEPTH_STENCIL, WL and HL must be adjusted as follows before + * proceeding: + * + * +----------------------------------------------------------------+ + * | Num Multisamples | W_l = | H_l = | + * +----------------------------------------------------------------+ + * | 2 | ceiling(W_l / 2) * 4 | H_l (no adjustment) | + * | 4 | ceiling(W_l / 2) * 4 | ceiling(H_l / 2) * 4 | + * | 8 | ceiling(W_l / 2) * 8 | ceiling(H_l / 2) * 4 | + * | 16 | ceiling(W_l / 2) * 8 | ceiling(H_l / 2) * 8 | + * +----------------------------------------------------------------+ + * " + * + * Note that MSFMT_DEPTH_STENCIL just means the IMS (interleaved) + * format rather than UMS/CMS (array slices). The Sandybridge PRM, + * Volume 1, Part 1, Page 111 has the same formula for 4x MSAA. + * + * Another more complicated explanation for these adjustments comes + * from the Sandybridge PRM, volume 4, part 1, page 31: * * "Any of the other messages (sample*, LOD, load4) used with a * (4x) multisampled surface will in-effect sample a surface with @@ -326,11 +419,15 @@ intel_miptree_create_layout(struct brw_context *brw, width0 = ALIGN(width0, 2) * 4; height0 = ALIGN(height0, 2) * 2; break; + case 16: + width0 = ALIGN(width0, 2) * 4; + height0 = ALIGN(height0, 2) * 4; + break; default: - /* num_samples should already have been quantized to 0, 1, 2, 4, or - * 8. + /* num_samples should already have been quantized to 0, 1, 2, 4, 8 + * or 16. */ - assert(false); + unreachable("not reached"); } } else { /* Non-interleaved */ @@ -338,18 +435,29 @@ intel_miptree_create_layout(struct brw_context *brw, } } - /* array_spacing_lod0 is only used for non-IMS MSAA surfaces. TODO: can we - * use it elsewhere? + /* Set array_layout to ALL_SLICES_AT_EACH_LOD when array_spacing_lod0 can + * be used. array_spacing_lod0 is only used for non-IMS MSAA surfaces on + * Gen 7 and 8. On Gen 8 and 9 this layout is not available but it is still + * used on Gen8 to make it pick a qpitch value which doesn't include space + * for the mipmaps. On Gen9 this is not necessary because it will + * automatically pick a packed qpitch value whenever mt->first_level == + * mt->last_level. + * TODO: can we use it elsewhere? + * TODO: also disable this on Gen8 and pick the qpitch value like Gen9 */ - switch (mt->msaa_layout) { - case INTEL_MSAA_LAYOUT_NONE: - case INTEL_MSAA_LAYOUT_IMS: - mt->array_spacing_lod0 = false; - break; - case INTEL_MSAA_LAYOUT_UMS: - case INTEL_MSAA_LAYOUT_CMS: - mt->array_spacing_lod0 = true; - break; + if (brw->gen >= 9) { + mt->array_layout = ALL_LOD_IN_EACH_SLICE; + } else { + switch (mt->msaa_layout) { + case INTEL_MSAA_LAYOUT_NONE: + case INTEL_MSAA_LAYOUT_IMS: + mt->array_layout = ALL_LOD_IN_EACH_SLICE; + break; + case INTEL_MSAA_LAYOUT_UMS: + case INTEL_MSAA_LAYOUT_CMS: + mt->array_layout = ALL_SLICES_AT_EACH_LOD; + break; + } } if (target == GL_TEXTURE_CUBE_MAP) { @@ -361,10 +469,17 @@ intel_miptree_create_layout(struct brw_context *brw, mt->physical_height0 = height0; mt->physical_depth0 = depth0; - if (!for_bo && + if (!(layout_flags & MIPTREE_LAYOUT_FOR_BO) && _mesa_get_format_base_format(format) == GL_DEPTH_STENCIL && (brw->must_use_separate_stencil || - (brw->has_separate_stencil && brw_is_hiz_depth_format(brw, format)))) { + (brw->has_separate_stencil && + intel_miptree_wants_hiz_buffer(brw, mt)))) { + uint32_t stencil_flags = MIPTREE_LAYOUT_ACCELERATED_UPLOAD; + if (brw->gen == 6) { + stencil_flags |= MIPTREE_LAYOUT_FORCE_ALL_SLICE_AT_LOD | + MIPTREE_LAYOUT_TILING_ANY; + } + mt->stencil_mt = intel_miptree_create(brw, mt->target, MESA_FORMAT_S_UINT8, @@ -373,9 +488,9 @@ intel_miptree_create_layout(struct brw_context *brw, mt->logical_width0, mt->logical_height0, mt->logical_depth0, - true, num_samples, - INTEL_MIPTREE_TILING_ANY); + stencil_flags); + if (!mt->stencil_mt) { intel_miptree_release(&mt); return NULL; @@ -393,106 +508,36 @@ intel_miptree_create_layout(struct brw_context *brw, } } - brw_miptree_layout(brw, mt); - - return mt; -} - -/** - * \brief Helper function for intel_miptree_create(). - */ -static uint32_t -intel_miptree_choose_tiling(struct brw_context *brw, - mesa_format format, - uint32_t width0, - uint32_t num_samples, - enum intel_miptree_tiling_mode requested, - struct intel_mipmap_tree *mt) -{ - if (format == MESA_FORMAT_S_UINT8) { - /* The stencil buffer is W tiled. However, we request from the kernel a - * non-tiled buffer because the GTT is incapable of W fencing. - */ - return I915_TILING_NONE; - } + if (layout_flags & MIPTREE_LAYOUT_FORCE_ALL_SLICE_AT_LOD) + mt->array_layout = ALL_SLICES_AT_EACH_LOD; - /* Some usages may want only one type of tiling, like depth miptrees (Y - * tiled), or temporary BOs for uploading data once (linear). + /* + * Obey HALIGN_16 constraints for Gen8 and Gen9 buffers which are + * multisampled or have an AUX buffer attached to it. + * + * GEN | MSRT | AUX_CCS_* or AUX_MCS + * ------------------------------------------- + * 9 | HALIGN_16 | HALIGN_16 + * 8 | HALIGN_ANY | HALIGN_16 + * 7 | ? | ? + * 6 | ? | ? */ - switch (requested) { - case INTEL_MIPTREE_TILING_ANY: - break; - case INTEL_MIPTREE_TILING_Y: - return I915_TILING_Y; - case INTEL_MIPTREE_TILING_NONE: - return I915_TILING_NONE; - } - - if (num_samples > 1) { - /* From p82 of the Sandy Bridge PRM, dw3[1] of SURFACE_STATE ("Tiled - * Surface"): - * - * [DevSNB+]: For multi-sample render targets, this field must be - * 1. MSRTs can only be tiled. - * - * Our usual reason for preferring X tiling (fast blits using the - * blitting engine) doesn't apply to MSAA, since we'll generally be - * downsampling or upsampling when blitting between the MSAA buffer - * and another buffer, and the blitting engine doesn't support that. - * So use Y tiling, since it makes better use of the cache. - */ - return I915_TILING_Y; - } - - GLenum base_format = _mesa_get_format_base_format(format); - if (base_format == GL_DEPTH_COMPONENT || - base_format == GL_DEPTH_STENCIL_EXT) - return I915_TILING_Y; - - int minimum_pitch = mt->total_width * mt->cpp; - - /* If the width is much smaller than a tile, don't bother tiling. */ - if (minimum_pitch < 64) - return I915_TILING_NONE; - - if (ALIGN(minimum_pitch, 512) >= 32768 || - mt->total_width >= 32768 || mt->total_height >= 32768) { - perf_debug("%dx%d miptree too large to blit, falling back to untiled", - mt->total_width, mt->total_height); - return I915_TILING_NONE; + if (intel_miptree_supports_non_msrt_fast_clear(brw, mt)) { + if (brw->gen >= 9 || (brw->gen == 8 && num_samples <= 1)) + layout_flags |= MIPTREE_LAYOUT_FORCE_HALIGN16; + } else if (brw->gen >= 9 && num_samples > 1) { + layout_flags |= MIPTREE_LAYOUT_FORCE_HALIGN16; + } else { + /* For now, nothing else has this requirement */ + assert((layout_flags & MIPTREE_LAYOUT_FORCE_HALIGN16) == 0); } - /* Pre-gen6 doesn't have BLORP to handle Y-tiling, so use X-tiling. */ - if (brw->gen < 6) - return I915_TILING_X; - - /* From the Sandybridge PRM, Volume 1, Part 2, page 32: - * "NOTE: 128BPE Format Color Buffer ( render target ) MUST be either TileX - * or Linear." - * 128 bits per pixel translates to 16 bytes per pixel. This is necessary - * all the way back to 965, but is explicitly permitted on Gen7. - */ - if (brw->gen != 7 && mt->cpp >= 16) - return I915_TILING_X; + brw_miptree_layout(brw, mt, layout_flags); - /* From the Ivy Bridge PRM, Vol4 Part1 2.12.2.1 (SURFACE_STATE for most - * messages), on p64, under the heading "Surface Vertical Alignment": - * - * This field must be set to VALIGN_4 for all tiled Y Render Target - * surfaces. - * - * So if the surface is renderable and uses a vertical alignment of 2, - * force it to be X tiled. This is somewhat conservative (it's possible - * that the client won't ever render to this surface), but it's difficult - * to know that ahead of time. And besides, since we use a vertical - * alignment of 4 as often as we can, this shouldn't happen very often. - */ - if (brw->gen == 7 && mt->align_h == 2 && - brw->format_supported_as_render_target[format]) { - return I915_TILING_X; - } + if (mt->disable_aux_buffers) + assert(mt->msaa_layout != INTEL_MSAA_LAYOUT_CMS); - return I915_TILING_Y | I915_TILING_X; + return mt; } @@ -535,33 +580,62 @@ intel_lower_compressed_format(struct brw_context *brw, mesa_format format) } } +/* This function computes Yf/Ys tiled bo size, alignment and pitch. */ +static unsigned long +intel_get_yf_ys_bo_size(struct intel_mipmap_tree *mt, unsigned *alignment, + unsigned long *pitch) +{ + uint32_t tile_width, tile_height; + unsigned long stride, size, aligned_y; + + assert(mt->tr_mode != INTEL_MIPTREE_TRMODE_NONE); + intel_get_tile_dims(mt->tiling, mt->tr_mode, mt->cpp, + &tile_width, &tile_height); + + aligned_y = ALIGN(mt->total_height, tile_height); + stride = mt->total_width * mt->cpp; + stride = ALIGN(stride, tile_width); + size = stride * aligned_y; + + if (mt->tr_mode == INTEL_MIPTREE_TRMODE_YF) { + assert(size % 4096 == 0); + *alignment = 4096; + } else { + assert(size % (64 * 1024) == 0); + *alignment = 64 * 1024; + } + *pitch = stride; + return size; +} struct intel_mipmap_tree * intel_miptree_create(struct brw_context *brw, - GLenum target, - mesa_format format, - GLuint first_level, - GLuint last_level, - GLuint width0, - GLuint height0, - GLuint depth0, - bool expect_accelerated_upload, + GLenum target, + mesa_format format, + GLuint first_level, + GLuint last_level, + GLuint width0, + GLuint height0, + GLuint depth0, GLuint num_samples, - enum intel_miptree_tiling_mode requested_tiling) + uint32_t layout_flags) { struct intel_mipmap_tree *mt; mesa_format tex_format = format; mesa_format etc_format = MESA_FORMAT_NONE; GLuint total_width, total_height; + uint32_t alloc_flags = 0; format = intel_lower_compressed_format(brw, format); etc_format = (format != tex_format) ? tex_format : MESA_FORMAT_NONE; + assert((layout_flags & MIPTREE_LAYOUT_DISABLE_AUX) == 0); + assert((layout_flags & MIPTREE_LAYOUT_FOR_BO) == 0); mt = intel_miptree_create_layout(brw, target, format, - first_level, last_level, width0, - height0, depth0, - false, num_samples); + first_level, last_level, width0, + height0, depth0, num_samples, + layout_flags); /* * pitch == 0 || height == 0 indicates the null texture */ @@ -579,42 +653,48 @@ intel_miptree_create(struct brw_context *brw, total_height = ALIGN(total_height, 64); } - uint32_t tiling = intel_miptree_choose_tiling(brw, format, width0, - num_samples, requested_tiling, - mt); bool y_or_x = false; - if (tiling == (I915_TILING_Y | I915_TILING_X)) { + if (mt->tiling == (I915_TILING_Y | I915_TILING_X)) { y_or_x = true; mt->tiling = I915_TILING_Y; - } else { - mt->tiling = tiling; } + if (layout_flags & MIPTREE_LAYOUT_ACCELERATED_UPLOAD) + alloc_flags |= BO_ALLOC_FOR_RENDER; + unsigned long pitch; mt->etc_format = etc_format; - mt->bo = drm_intel_bo_alloc_tiled(brw->bufmgr, "miptree", - total_width, total_height, mt->cpp, - &mt->tiling, &pitch, - (expect_accelerated_upload ? - BO_ALLOC_FOR_RENDER : 0)); + + if (mt->tr_mode != INTEL_MIPTREE_TRMODE_NONE) { + unsigned alignment = 0; + unsigned long size; + size = intel_get_yf_ys_bo_size(mt, &alignment, &pitch); + assert(size); + mt->bo = drm_intel_bo_alloc_for_render(brw->bufmgr, "miptree", + size, alignment); + } else { + mt->bo = drm_intel_bo_alloc_tiled(brw->bufmgr, "miptree", + total_width, total_height, mt->cpp, + &mt->tiling, &pitch, + alloc_flags); + } + mt->pitch = pitch; /* If the BO is too large to fit in the aperture, we need to use the - * BLT engine to support it. The BLT paths can't currently handle Y-tiling, - * so we need to fall back to X. + * BLT engine to support it. Prior to Sandybridge, the BLT paths can't + * handle Y-tiling, so we need to fall back to X. */ - if (y_or_x && mt->bo->size >= brw->max_gtt_map_object_size) { + if (brw->gen < 6 && y_or_x && mt->bo->size >= brw->max_gtt_map_object_size) { perf_debug("%dx%d miptree larger than aperture; falling back to X-tiled\n", mt->total_width, mt->total_height); mt->tiling = I915_TILING_X; drm_intel_bo_unreference(mt->bo); mt->bo = drm_intel_bo_alloc_tiled(brw->bufmgr, "miptree", - total_width, total_height, mt->cpp, - &mt->tiling, &pitch, - (expect_accelerated_upload ? - BO_ALLOC_FOR_RENDER : 0)); + total_width, total_height, mt->cpp, + &mt->tiling, &pitch, alloc_flags); mt->pitch = pitch; } @@ -627,6 +707,7 @@ intel_miptree_create(struct brw_context *brw, if (mt->msaa_layout == INTEL_MSAA_LAYOUT_CMS) { + assert(mt->num_samples > 1); if (!intel_miptree_alloc_mcs(brw, mt, num_samples)) { intel_miptree_release(&mt); return NULL; @@ -638,8 +719,11 @@ intel_miptree_create(struct brw_context *brw, * Allocation of the MCS miptree will be deferred until the first fast * clear actually occurs. */ - if (intel_is_non_msrt_mcs_buffer_supported(brw, mt)) + if (intel_tiling_supports_non_msrt_mcs(brw, mt->tiling) && + intel_miptree_supports_non_msrt_fast_clear(brw, mt)) { mt->fast_clear_state = INTEL_FAST_CLEAR_STATE_RESOLVED; + assert(brw->gen < 8 || mt->halign == 16 || num_samples <= 1); + } return mt; } @@ -651,10 +735,13 @@ intel_miptree_create_for_bo(struct brw_context *brw, uint32_t offset, uint32_t width, uint32_t height, - int pitch) + uint32_t depth, + int pitch, + uint32_t layout_flags) { struct intel_mipmap_tree *mt; uint32_t tiling, swizzle; + GLenum target; drm_intel_bo_get_tiling(bo, &tiling, &swizzle); @@ -669,14 +756,21 @@ intel_miptree_create_for_bo(struct brw_context *brw, */ assert(pitch >= 0); - mt = intel_miptree_create_layout(brw, GL_TEXTURE_2D, format, + target = depth > 1 ? GL_TEXTURE_2D_ARRAY : GL_TEXTURE_2D; + + /* The BO already has a tiling format and we shouldn't confuse the lower + * layers by making it try to find a tiling format again. + */ + assert((layout_flags & MIPTREE_LAYOUT_TILING_ANY) == 0); + assert((layout_flags & MIPTREE_LAYOUT_TILING_NONE) == 0); + + layout_flags |= MIPTREE_LAYOUT_FOR_BO; + mt = intel_miptree_create_layout(brw, target, format, 0, 0, - width, height, 1, - true, 0 /* num_samples */); - if (!mt) { - free(mt); - return mt; - } + width, height, depth, 0, + layout_flags); + if (!mt) + return NULL; drm_intel_bo_reference(bo); mt->bo = bo; @@ -722,7 +816,9 @@ intel_update_winsys_renderbuffer_miptree(struct brw_context *intel, 0, width, height, - pitch); + 1, + pitch, + 0); if (!singlesample_mt) goto fail; @@ -731,8 +827,10 @@ intel_update_winsys_renderbuffer_miptree(struct brw_context *intel, * Allocation of the MCS miptree will be deferred until the first fast * clear actually occurs. */ - if (intel_is_non_msrt_mcs_buffer_supported(intel, singlesample_mt)) + if (intel_tiling_supports_non_msrt_mcs(intel, singlesample_mt->tiling) && + intel_miptree_supports_non_msrt_fast_clear(intel, singlesample_mt)) { singlesample_mt->fast_clear_state = INTEL_FAST_CLEAR_STATE_RESOLVED; + } if (num_samples == 0) { intel_miptree_release(&irb->mt); @@ -778,14 +876,17 @@ intel_miptree_create_for_renderbuffer(struct brw_context *brw, uint32_t depth = 1; bool ok; GLenum target = num_samples > 1 ? GL_TEXTURE_2D_MULTISAMPLE : GL_TEXTURE_2D; + const uint32_t layout_flags = MIPTREE_LAYOUT_ACCELERATED_UPLOAD | + MIPTREE_LAYOUT_TILING_ANY; + mt = intel_miptree_create(brw, target, format, 0, 0, - width, height, depth, true, num_samples, - INTEL_MIPTREE_TILING_ANY); + width, height, depth, num_samples, + layout_flags); if (!mt) goto fail; - if (brw_is_hiz_depth_format(brw, format)) { + if (intel_miptree_wants_hiz_buffer(brw, mt)) { ok = intel_miptree_alloc_hiz(brw, mt); if (!ok) goto fail; @@ -809,7 +910,7 @@ intel_miptree_reference(struct intel_mipmap_tree **dst, if (src) { src->refcount++; - DBG("%s %p refcount now %d\n", __FUNCTION__, src, src->refcount); + DBG("%s %p refcount now %d\n", __func__, src, src->refcount); } *dst = src; @@ -822,15 +923,21 @@ intel_miptree_release(struct intel_mipmap_tree **mt) if (!*mt) return; - DBG("%s %p refcount will be %d\n", __FUNCTION__, *mt, (*mt)->refcount - 1); + DBG("%s %p refcount will be %d\n", __func__, *mt, (*mt)->refcount - 1); if (--(*mt)->refcount <= 0) { GLuint i; - DBG("%s deleting %p\n", __FUNCTION__, *mt); + DBG("%s deleting %p\n", __func__, *mt); drm_intel_bo_unreference((*mt)->bo); intel_miptree_release(&(*mt)->stencil_mt); - intel_miptree_release(&(*mt)->hiz_mt); + if ((*mt)->hiz_buf) { + if ((*mt)->hiz_buf->mt) + intel_miptree_release(&(*mt)->hiz_buf->mt); + else + drm_intel_bo_unreference((*mt)->hiz_buf->bo); + free((*mt)->hiz_buf); + } intel_miptree_release(&(*mt)->mcs_mt); intel_resolve_map_clear(&(*mt)->hiz_map); @@ -843,12 +950,18 @@ intel_miptree_release(struct intel_mipmap_tree **mt) *mt = NULL; } + void -intel_miptree_get_dimensions_for_image(struct gl_texture_image *image, - int *width, int *height, int *depth) +intel_get_image_dims(struct gl_texture_image *image, + int *width, int *height, int *depth) { switch (image->TexObject->Target) { case GL_TEXTURE_1D_ARRAY: + /* For a 1D Array texture the OpenGL API will treat the image height as + * the number of array slices. For Intel hardware, we treat the 1D array + * as a 2D Array with a height of 1. So, here we want to swap image + * height and depth. + */ *width = image->Width; *height = 1; *depth = image->Height; @@ -892,7 +1005,7 @@ intel_miptree_match_image(struct intel_mipmap_tree *mt, if (image->TexFormat != mt_format) return false; - intel_miptree_get_dimensions_for_image(image, &width, &height, &depth); + intel_get_image_dims(image, &width, &height, &depth); if (mt->target == GL_TEXTURE_CUBE_MAP) depth = 6; @@ -936,7 +1049,7 @@ intel_miptree_set_level_info(struct intel_mipmap_tree *mt, mt->level[level].level_x = x; mt->level[level].level_y = y; - DBG("%s level %d, depth %d, offset %d,%d\n", __FUNCTION__, + DBG("%s level %d, depth %d, offset %d,%d\n", __func__, level, d, x, y); assert(mt->level[level].slice == NULL); @@ -961,7 +1074,7 @@ intel_miptree_set_image_offset(struct intel_mipmap_tree *mt, mt->level[level].slice[img].y_offset = mt->level[level].level_y + y; DBG("%s level %d img %d pos %d,%d\n", - __FUNCTION__, level, img, + __func__, level, img, mt->level[level].slice[img].x_offset, mt->level[level].slice[img].y_offset); } @@ -977,37 +1090,82 @@ intel_miptree_get_image_offset(const struct intel_mipmap_tree *mt, *y = mt->level[level].slice[slice].y_offset; } + +/** + * This function computes the tile_w (in bytes) and tile_h (in rows) of + * different tiling patterns. If the BO is untiled, tile_w is set to cpp + * and tile_h is set to 1. + */ +void +intel_get_tile_dims(uint32_t tiling, uint32_t tr_mode, uint32_t cpp, + uint32_t *tile_w, uint32_t *tile_h) +{ + if (tr_mode == INTEL_MIPTREE_TRMODE_NONE) { + switch (tiling) { + case I915_TILING_X: + *tile_w = 512; + *tile_h = 8; + break; + case I915_TILING_Y: + *tile_w = 128; + *tile_h = 32; + break; + case I915_TILING_NONE: + *tile_w = cpp; + *tile_h = 1; + break; + default: + unreachable("not reached"); + } + } else { + uint32_t aspect_ratio = 1; + assert(_mesa_is_pow_two(cpp)); + + switch (cpp) { + case 1: + *tile_h = 64; + break; + case 2: + case 4: + *tile_h = 32; + break; + case 8: + case 16: + *tile_h = 16; + break; + default: + unreachable("not reached"); + } + + if (cpp == 2 || cpp == 8) + aspect_ratio = 2; + + if (tr_mode == INTEL_MIPTREE_TRMODE_YS) + *tile_h *= 4; + + *tile_w = *tile_h * aspect_ratio * cpp; + } +} + + /** * This function computes masks that may be used to select the bits of the X * and Y coordinates that indicate the offset within a tile. If the BO is * untiled, the masks are set to 0. */ void -intel_miptree_get_tile_masks(const struct intel_mipmap_tree *mt, - uint32_t *mask_x, uint32_t *mask_y, - bool map_stencil_as_y_tiled) +intel_get_tile_masks(uint32_t tiling, uint32_t tr_mode, uint32_t cpp, + bool map_stencil_as_y_tiled, + uint32_t *mask_x, uint32_t *mask_y) { - int cpp = mt->cpp; - uint32_t tiling = mt->tiling; - + uint32_t tile_w_bytes, tile_h; if (map_stencil_as_y_tiled) tiling = I915_TILING_Y; - switch (tiling) { - default: - assert(false); - case I915_TILING_NONE: - *mask_x = *mask_y = 0; - break; - case I915_TILING_X: - *mask_x = 512 / cpp - 1; - *mask_y = 7; - break; - case I915_TILING_Y: - *mask_x = 128 / cpp - 1; - *mask_y = 31; - break; - } + intel_get_tile_dims(tiling, tr_mode, cpp, &tile_w_bytes, &tile_h); + + *mask_x = tile_w_bytes / cpp - 1; + *mask_y = tile_h - 1; } /** @@ -1039,7 +1197,7 @@ intel_miptree_get_aligned_offset(const struct intel_mipmap_tree *mt, switch (tiling) { default: - assert(false); + unreachable("not reached"); case I915_TILING_NONE: return y * pitch + x * cpp; case I915_TILING_X: @@ -1072,7 +1230,7 @@ intel_miptree_get_tile_offsets(const struct intel_mipmap_tree *mt, uint32_t x, y; uint32_t mask_x, mask_y; - intel_miptree_get_tile_masks(mt, &mask_x, &mask_y, false); + intel_get_tile_masks(mt->tiling, mt->tr_mode, mt->cpp, false, &mask_x, &mask_y); intel_miptree_get_image_offset(mt, level, slice, &x, &y); *tile_x = x & mask_x; @@ -1091,7 +1249,7 @@ intel_miptree_copy_slice_sw(struct brw_context *brw, int height) { void *src, *dst; - int src_stride, dst_stride; + ptrdiff_t src_stride, dst_stride; int cpp = dst_mt->cpp; intel_miptree_map(brw, src_mt, @@ -1109,7 +1267,7 @@ intel_miptree_copy_slice_sw(struct brw_context *brw, BRW_MAP_DIRECT_BIT, &dst, &dst_stride); - DBG("sw blit %s mt %p %p/%d -> %s mt %p %p/%d (%dx%d)\n", + DBG("sw blit %s mt %p %p/%"PRIdPTR" -> %s mt %p %p/%"PRIdPTR" (%dx%d)\n", _mesa_get_format_name(src_mt->format), src_mt, src, src_stride, _mesa_get_format_name(dst_mt->format), @@ -1166,8 +1324,10 @@ intel_miptree_copy_slice(struct brw_context *brw, assert(src_mt->format == dst_mt->format); if (dst_mt->compressed) { - height = ALIGN(height, dst_mt->align_h) / dst_mt->align_h; - width = ALIGN(width, dst_mt->align_w); + unsigned int i, j; + _mesa_get_format_block_size(dst_mt->format, &i, &j); + height = ALIGN_NPOT(height, j) / j; + width = ALIGN_NPOT(width, i) / i; } /* If it's a packed depth/stencil buffer with separate stencil, the blit @@ -1224,7 +1384,12 @@ intel_miptree_copy_teximage(struct brw_context *brw, intel_texture_object(intelImage->base.Base.TexObject); int level = intelImage->base.Base.Level; int face = intelImage->base.Base.Face; - GLuint depth = intelImage->base.Base.Depth; + + GLuint depth; + if (intel_obj->base.Target == GL_TEXTURE_1D_ARRAY) + depth = intelImage->base.Base.Height; + else + depth = intelImage->base.Base.Depth; if (!invalidate) { for (int slice = 0; slice < depth; slice++) { @@ -1236,13 +1401,14 @@ intel_miptree_copy_teximage(struct brw_context *brw, intel_obj->needs_validate = true; } -bool +static bool intel_miptree_alloc_mcs(struct brw_context *brw, struct intel_mipmap_tree *mt, GLuint num_samples) { assert(brw->gen >= 7); /* MCS only used on Gen7+ */ assert(mt->mcs_mt == NULL); + assert(!mt->disable_aux_buffers); /* Choose the correct format for the MCS buffer. All that really matters * is that we allocate the right buffer size, since we'll always be @@ -1264,15 +1430,22 @@ intel_miptree_alloc_mcs(struct brw_context *brw, */ format = MESA_FORMAT_R_UINT32; break; + case 16: + /* 64 bits/pixel are required for MCS data when using 16x MSAA (4 bits + * for each sample). + */ + format = MESA_FORMAT_RG_UINT32; + break; default: - assert(!"Unrecognized sample count in intel_miptree_alloc_mcs"); - return false; + unreachable("Unrecognized sample count in intel_miptree_alloc_mcs"); }; /* From the Ivy Bridge PRM, Vol4 Part1 p76, "MCS Base Address": * * "The MCS surface must be stored as Tile Y." */ + const uint32_t mcs_flags = MIPTREE_LAYOUT_ACCELERATED_UPLOAD | + MIPTREE_LAYOUT_TILING_Y; mt->mcs_mt = intel_miptree_create(brw, mt->target, format, @@ -1281,9 +1454,8 @@ intel_miptree_alloc_mcs(struct brw_context *brw, mt->logical_width0, mt->logical_height0, mt->logical_depth0, - true, 0 /* num_samples */, - INTEL_MIPTREE_TILING_Y); + mcs_flags); /* From the Ivy Bridge PRM, Vol 2 Part 1 p326: * @@ -1297,7 +1469,7 @@ intel_miptree_alloc_mcs(struct brw_context *brw, */ void *data = intel_miptree_map_raw(brw, mt->mcs_mt); memset(data, 0xff, mt->mcs_mt->total_height * mt->mcs_mt->pitch); - intel_miptree_unmap_raw(brw, mt->mcs_mt); + intel_miptree_unmap_raw(mt->mcs_mt); mt->fast_clear_state = INTEL_FAST_CLEAR_STATE_CLEAR; return mt->mcs_mt; @@ -1309,6 +1481,7 @@ intel_miptree_alloc_non_msrt_mcs(struct brw_context *brw, struct intel_mipmap_tree *mt) { assert(mt->mcs_mt == NULL); + assert(!mt->disable_aux_buffers); /* The format of the MCS buffer is opaque to the driver; all that matters * is that we get its size and pitch right. We'll pretend that the format @@ -1322,14 +1495,30 @@ intel_miptree_alloc_non_msrt_mcs(struct brw_context *brw, const mesa_format format = MESA_FORMAT_R_UINT32; unsigned block_width_px; unsigned block_height; - intel_get_non_msrt_mcs_alignment(brw, mt, &block_width_px, &block_height); + intel_get_non_msrt_mcs_alignment(mt, &block_width_px, &block_height); unsigned width_divisor = block_width_px * 4; unsigned height_divisor = block_height * 8; + + /* The Skylake MCS is twice as tall as the Broadwell MCS. + * + * In pre-Skylake, each bit in the MCS contained the state of 2 cachelines + * in the main surface. In Skylake, it's two bits. The extra bit + * doubles the MCS height, not width, because in Skylake the MCS is always + * Y-tiled. + */ + if (brw->gen >= 9) + height_divisor /= 2; + unsigned mcs_width = ALIGN(mt->logical_width0, width_divisor) / width_divisor; unsigned mcs_height = ALIGN(mt->logical_height0, height_divisor) / height_divisor; assert(mt->logical_depth0 == 1); + uint32_t layout_flags = MIPTREE_LAYOUT_ACCELERATED_UPLOAD | + MIPTREE_LAYOUT_TILING_Y; + if (brw->gen >= 8) { + layout_flags |= MIPTREE_LAYOUT_FORCE_HALIGN16; + } mt->mcs_mt = intel_miptree_create(brw, mt->target, format, @@ -1338,9 +1527,8 @@ intel_miptree_alloc_non_msrt_mcs(struct brw_context *brw, mcs_width, mcs_height, mt->logical_depth0, - true, 0 /* num_samples */, - INTEL_MIPTREE_TILING_Y); + layout_flags); return mt->mcs_mt; } @@ -1356,7 +1544,7 @@ intel_miptree_level_enable_hiz(struct brw_context *brw, struct intel_mipmap_tree *mt, uint32_t level) { - assert(mt->hiz_mt); + assert(mt->hiz_buf); if (brw->gen >= 8 || brw->is_haswell) { uint32_t width = minify(mt->physical_width0, level); @@ -1380,33 +1568,290 @@ intel_miptree_level_enable_hiz(struct brw_context *brw, } +/** + * Helper for intel_miptree_alloc_hiz() that determines the required hiz + * buffer dimensions and allocates a bo for the hiz buffer. + */ +static struct intel_miptree_aux_buffer * +intel_gen7_hiz_buf_create(struct brw_context *brw, + struct intel_mipmap_tree *mt) +{ + unsigned z_width = mt->logical_width0; + unsigned z_height = mt->logical_height0; + const unsigned z_depth = MAX2(mt->logical_depth0, 1); + unsigned hz_width, hz_height; + struct intel_miptree_aux_buffer *buf = calloc(sizeof(*buf), 1); + + if (!buf) + return NULL; + + /* Gen7 PRM Volume 2, Part 1, 11.5.3 "Hierarchical Depth Buffer" documents + * adjustments required for Z_Height and Z_Width based on multisampling. + */ + switch (mt->num_samples) { + case 0: + case 1: + break; + case 2: + case 4: + z_width *= 2; + z_height *= 2; + break; + case 8: + z_width *= 4; + z_height *= 2; + break; + default: + unreachable("unsupported sample count"); + } + + const unsigned vertical_align = 8; /* 'j' in the docs */ + const unsigned H0 = z_height; + const unsigned h0 = ALIGN(H0, vertical_align); + const unsigned h1 = ALIGN(minify(H0, 1), vertical_align); + const unsigned Z0 = z_depth; + + /* HZ_Width (bytes) = ceiling(Z_Width / 16) * 16 */ + hz_width = ALIGN(z_width, 16); + + if (mt->target == GL_TEXTURE_3D) { + unsigned H_i = H0; + unsigned Z_i = Z0; + hz_height = 0; + for (unsigned level = mt->first_level; level <= mt->last_level; ++level) { + unsigned h_i = ALIGN(H_i, vertical_align); + /* sum(i=0 to m; h_i * max(1, floor(Z_Depth/2**i))) */ + hz_height += h_i * Z_i; + H_i = minify(H_i, 1); + Z_i = minify(Z_i, 1); + } + /* HZ_Height = + * (1/2) * sum(i=0 to m; h_i * max(1, floor(Z_Depth/2**i))) + */ + hz_height = DIV_ROUND_UP(hz_height, 2); + } else { + const unsigned hz_qpitch = h0 + h1 + (12 * vertical_align); + if (mt->target == GL_TEXTURE_CUBE_MAP_ARRAY || + mt->target == GL_TEXTURE_CUBE_MAP) { + /* HZ_Height (rows) = Ceiling ( ( Q_pitch * Z_depth * 6/2) /8 ) * 8 */ + hz_height = DIV_ROUND_UP(hz_qpitch * Z0 * 6, 2 * 8) * 8; + } else { + /* HZ_Height (rows) = Ceiling ( ( Q_pitch * Z_depth/2) /8 ) * 8 */ + hz_height = DIV_ROUND_UP(hz_qpitch * Z0, 2 * 8) * 8; + } + } + + unsigned long pitch; + uint32_t tiling = I915_TILING_Y; + buf->bo = drm_intel_bo_alloc_tiled(brw->bufmgr, "hiz", + hz_width, hz_height, 1, + &tiling, &pitch, + BO_ALLOC_FOR_RENDER); + if (!buf->bo) { + free(buf); + return NULL; + } else if (tiling != I915_TILING_Y) { + drm_intel_bo_unreference(buf->bo); + free(buf); + return NULL; + } + + buf->pitch = pitch; + + return buf; +} + + +/** + * Helper for intel_miptree_alloc_hiz() that determines the required hiz + * buffer dimensions and allocates a bo for the hiz buffer. + */ +static struct intel_miptree_aux_buffer * +intel_gen8_hiz_buf_create(struct brw_context *brw, + struct intel_mipmap_tree *mt) +{ + unsigned z_width = mt->logical_width0; + unsigned z_height = mt->logical_height0; + const unsigned z_depth = MAX2(mt->logical_depth0, 1); + unsigned hz_width, hz_height; + struct intel_miptree_aux_buffer *buf = calloc(sizeof(*buf), 1); + + if (!buf) + return NULL; + + /* Gen7 PRM Volume 2, Part 1, 11.5.3 "Hierarchical Depth Buffer" documents + * adjustments required for Z_Height and Z_Width based on multisampling. + */ + if (brw->gen < 9) { + switch (mt->num_samples) { + case 0: + case 1: + break; + case 2: + case 4: + z_width *= 2; + z_height *= 2; + break; + case 8: + z_width *= 4; + z_height *= 2; + break; + default: + unreachable("unsupported sample count"); + } + } + + const unsigned vertical_align = 8; /* 'j' in the docs */ + const unsigned H0 = z_height; + const unsigned h0 = ALIGN(H0, vertical_align); + const unsigned h1 = ALIGN(minify(H0, 1), vertical_align); + const unsigned Z0 = z_depth; + + /* HZ_Width (bytes) = ceiling(Z_Width / 16) * 16 */ + hz_width = ALIGN(z_width, 16); + + unsigned H_i = H0; + unsigned Z_i = Z0; + unsigned sum_h_i = 0; + unsigned hz_height_3d_sum = 0; + for (unsigned level = mt->first_level; level <= mt->last_level; ++level) { + unsigned i = level - mt->first_level; + unsigned h_i = ALIGN(H_i, vertical_align); + /* sum(i=2 to m; h_i) */ + if (i >= 2) { + sum_h_i += h_i; + } + /* sum(i=0 to m; h_i * max(1, floor(Z_Depth/2**i))) */ + hz_height_3d_sum += h_i * Z_i; + H_i = minify(H_i, 1); + Z_i = minify(Z_i, 1); + } + /* HZ_QPitch = h0 + max(h1, sum(i=2 to m; h_i)) */ + buf->qpitch = h0 + MAX2(h1, sum_h_i); + + if (mt->target == GL_TEXTURE_3D) { + /* (1/2) * sum(i=0 to m; h_i * max(1, floor(Z_Depth/2**i))) */ + hz_height = DIV_ROUND_UP(hz_height_3d_sum, 2); + } else { + /* HZ_Height (rows) = ceiling( (HZ_QPitch/2)/8) *8 * Z_Depth */ + hz_height = DIV_ROUND_UP(buf->qpitch, 2 * 8) * 8 * Z0; + if (mt->target == GL_TEXTURE_CUBE_MAP_ARRAY || + mt->target == GL_TEXTURE_CUBE_MAP) { + /* HZ_Height (rows) = ceiling( (HZ_QPitch/2)/8) *8 * 6 * Z_Depth + * + * We can can just take our hz_height calculation from above, and + * multiply by 6 for the cube map and cube map array types. + */ + hz_height *= 6; + } + } + + unsigned long pitch; + uint32_t tiling = I915_TILING_Y; + buf->bo = drm_intel_bo_alloc_tiled(brw->bufmgr, "hiz", + hz_width, hz_height, 1, + &tiling, &pitch, + BO_ALLOC_FOR_RENDER); + if (!buf->bo) { + free(buf); + return NULL; + } else if (tiling != I915_TILING_Y) { + drm_intel_bo_unreference(buf->bo); + free(buf); + return NULL; + } + + buf->pitch = pitch; + + return buf; +} + + +static struct intel_miptree_aux_buffer * +intel_hiz_miptree_buf_create(struct brw_context *brw, + struct intel_mipmap_tree *mt) +{ + struct intel_miptree_aux_buffer *buf = calloc(sizeof(*buf), 1); + uint32_t layout_flags = MIPTREE_LAYOUT_ACCELERATED_UPLOAD; + + if (brw->gen == 6) + layout_flags |= MIPTREE_LAYOUT_FORCE_ALL_SLICE_AT_LOD; + + if (!buf) + return NULL; + + layout_flags |= MIPTREE_LAYOUT_TILING_ANY; + buf->mt = intel_miptree_create(brw, + mt->target, + mt->format, + mt->first_level, + mt->last_level, + mt->logical_width0, + mt->logical_height0, + mt->logical_depth0, + mt->num_samples, + layout_flags); + if (!buf->mt) { + free(buf); + return NULL; + } + + buf->bo = buf->mt->bo; + buf->pitch = buf->mt->pitch; + buf->qpitch = buf->mt->qpitch; + + return buf; +} + +bool +intel_miptree_wants_hiz_buffer(struct brw_context *brw, + struct intel_mipmap_tree *mt) +{ + if (!brw->has_hiz) + return false; + + if (mt->hiz_buf != NULL) + return false; + + if (mt->disable_aux_buffers) + return false; + + switch (mt->format) { + case MESA_FORMAT_Z_FLOAT32: + case MESA_FORMAT_Z32_FLOAT_S8X24_UINT: + case MESA_FORMAT_Z24_UNORM_X8_UINT: + case MESA_FORMAT_Z24_UNORM_S8_UINT: + case MESA_FORMAT_Z_UNORM16: + return true; + default: + return false; + } +} bool intel_miptree_alloc_hiz(struct brw_context *brw, struct intel_mipmap_tree *mt) { - assert(mt->hiz_mt == NULL); - mt->hiz_mt = intel_miptree_create(brw, - mt->target, - mt->format, - mt->first_level, - mt->last_level, - mt->logical_width0, - mt->logical_height0, - mt->logical_depth0, - true, - mt->num_samples, - INTEL_MIPTREE_TILING_ANY); + assert(mt->hiz_buf == NULL); + assert(!mt->disable_aux_buffers); - if (!mt->hiz_mt) + if (brw->gen == 7) { + mt->hiz_buf = intel_gen7_hiz_buf_create(brw, mt); + } else if (brw->gen >= 8) { + mt->hiz_buf = intel_gen8_hiz_buf_create(brw, mt); + } else { + mt->hiz_buf = intel_hiz_miptree_buf_create(brw, mt); + } + + if (!mt->hiz_buf) return false; /* Mark that all slices need a HiZ resolve. */ - for (int level = mt->first_level; level <= mt->last_level; ++level) { + for (unsigned level = mt->first_level; level <= mt->last_level; ++level) { if (!intel_miptree_level_enable_hiz(brw, mt, level)) continue; - for (int layer = 0; layer < mt->level[level].depth; ++layer) { + for (unsigned layer = 0; layer < mt->level[level].depth; ++layer) { struct intel_resolve_map *m = malloc(sizeof(struct intel_resolve_map)); exec_node_init(&m->link); m->level = level; @@ -1514,9 +1959,7 @@ intel_miptree_all_slices_resolve(struct brw_context *brw, { bool did_resolve = false; - foreach_list_safe(node, &mt->hiz_map) { - struct intel_resolve_map *map = (struct intel_resolve_map *)node; - + foreach_list_typed_safe(struct intel_resolve_map, map, link, &mt->hiz_map) { if (map->need != need) continue; @@ -1558,7 +2001,7 @@ intel_miptree_resolve_color(struct brw_context *brw, case INTEL_FAST_CLEAR_STATE_CLEAR: /* Fast color clear resolves only make sense for non-MSAA buffers. */ if (mt->msaa_layout == INTEL_MSAA_LAYOUT_NONE) - brw_blorp_resolve_color(brw, mt); + brw_meta_resolve_color(brw, mt); break; } } @@ -1655,8 +2098,8 @@ intel_miptree_updownsample(struct brw_context *brw, { if (brw->gen < 8) { brw_blorp_blit_miptrees(brw, - src, 0 /* level */, 0 /* layer */, - dst, 0 /* level */, 0 /* layer */, + src, 0 /* level */, 0 /* layer */, src->format, + dst, 0 /* level */, 0 /* layer */, dst->format, 0, 0, src->logical_width0, src->logical_height0, 0, 0, @@ -1676,7 +2119,9 @@ intel_miptree_updownsample(struct brw_context *brw, brw_blorp_blit_miptrees(brw, src->stencil_mt, 0 /* level */, 0 /* layer */, + src->stencil_mt->format, dst->stencil_mt, 0 /* level */, 0 /* layer */, + dst->stencil_mt->format, 0, 0, src->logical_width0, src->logical_height0, 0, 0, @@ -1707,8 +2152,7 @@ intel_miptree_map_raw(struct brw_context *brw, struct intel_mipmap_tree *mt) } void -intel_miptree_unmap_raw(struct brw_context *brw, - struct intel_mipmap_tree *mt) +intel_miptree_unmap_raw(struct intel_mipmap_tree *mt) { drm_intel_bo_unmap(mt->bo); } @@ -1722,8 +2166,8 @@ intel_miptree_map_gtt(struct brw_context *brw, unsigned int bw, bh; void *base; unsigned int image_x, image_y; - int x = map->x; - int y = map->y; + intptr_t x = map->x; + intptr_t y = map->y; /* For compressed formats, the stride is the number of bytes per * row of blocks. intel_miptree_get_image_offset() already does @@ -1731,7 +2175,9 @@ intel_miptree_map_gtt(struct brw_context *brw, */ _mesa_get_format_block_size(mt->format, &bw, &bh); assert(y % bh == 0); + assert(x % bw == 0); y /= bh; + x /= bw; base = intel_miptree_map_raw(brw, mt) + mt->offset; @@ -1749,20 +2195,17 @@ intel_miptree_map_gtt(struct brw_context *brw, map->ptr = base + y * map->stride + x * mt->cpp; } - DBG("%s: %d,%d %dx%d from mt %p (%s) %d,%d = %p/%d\n", __FUNCTION__, + DBG("%s: %d,%d %dx%d from mt %p (%s) " + "%"PRIiPTR",%"PRIiPTR" = %p/%d\n", __func__, map->x, map->y, map->w, map->h, mt, _mesa_get_format_name(mt->format), x, y, map->ptr, map->stride); } static void -intel_miptree_unmap_gtt(struct brw_context *brw, - struct intel_mipmap_tree *mt, - struct intel_miptree_map *map, - unsigned int level, - unsigned int slice) +intel_miptree_unmap_gtt(struct intel_mipmap_tree *mt) { - intel_miptree_unmap_raw(brw, mt); + intel_miptree_unmap_raw(mt); } static void @@ -1771,30 +2214,39 @@ intel_miptree_map_blit(struct brw_context *brw, struct intel_miptree_map *map, unsigned int level, unsigned int slice) { - map->mt = intel_miptree_create(brw, GL_TEXTURE_2D, mt->format, - 0, 0, - map->w, map->h, 1, - false, 0, - INTEL_MIPTREE_TILING_NONE); - if (!map->mt) { + map->linear_mt = intel_miptree_create(brw, GL_TEXTURE_2D, mt->format, + /* first_level */ 0, + /* last_level */ 0, + map->w, map->h, 1, + /* samples */ 0, + MIPTREE_LAYOUT_TILING_NONE); + + if (!map->linear_mt) { fprintf(stderr, "Failed to allocate blit temporary\n"); goto fail; } - map->stride = map->mt->pitch; + map->stride = map->linear_mt->pitch; - if (!intel_miptree_blit(brw, - mt, level, slice, - map->x, map->y, false, - map->mt, 0, 0, - 0, 0, false, - map->w, map->h, GL_COPY)) { - fprintf(stderr, "Failed to blit\n"); - goto fail; + /* One of either READ_BIT or WRITE_BIT or both is set. READ_BIT implies no + * INVALIDATE_RANGE_BIT. WRITE_BIT needs the original values read in unless + * invalidate is set, since we'll be writing the whole rectangle from our + * temporary buffer back out. + */ + if (!(map->mode & GL_MAP_INVALIDATE_RANGE_BIT)) { + if (!intel_miptree_blit(brw, + mt, level, slice, + map->x, map->y, false, + map->linear_mt, 0, 0, + 0, 0, false, + map->w, map->h, GL_COPY)) { + fprintf(stderr, "Failed to blit\n"); + goto fail; + } } - map->ptr = intel_miptree_map_raw(brw, map->mt); + map->ptr = intel_miptree_map_raw(brw, map->linear_mt); - DBG("%s: %d,%d %dx%d from mt %p (%s) %d,%d = %p/%d\n", __FUNCTION__, + DBG("%s: %d,%d %dx%d from mt %p (%s) %d,%d = %p/%d\n", __func__, map->x, map->y, map->w, map->h, mt, _mesa_get_format_name(mt->format), level, slice, map->ptr, map->stride); @@ -1802,7 +2254,7 @@ intel_miptree_map_blit(struct brw_context *brw, return; fail: - intel_miptree_release(&map->mt); + intel_miptree_release(&map->linear_mt); map->ptr = NULL; map->stride = 0; } @@ -1816,11 +2268,11 @@ intel_miptree_unmap_blit(struct brw_context *brw, { struct gl_context *ctx = &brw->ctx; - intel_miptree_unmap_raw(brw, map->mt); + intel_miptree_unmap_raw(map->linear_mt); if (map->mode & GL_MAP_WRITE_BIT) { bool ok = intel_miptree_blit(brw, - map->mt, 0, 0, + map->linear_mt, 0, 0, 0, 0, false, mt, level, slice, map->x, map->y, false, @@ -1828,12 +2280,13 @@ intel_miptree_unmap_blit(struct brw_context *brw, WARN_ONCE(!ok, "Failed to blit from linear temporary mapping"); } - intel_miptree_release(&map->mt); + intel_miptree_release(&map->linear_mt); } /** * "Map" a buffer by copying it to an untiled temporary using MOVNTDQA. */ +#if defined(USE_SSE41) static void intel_miptree_map_movntdqa(struct brw_context *brw, struct intel_mipmap_tree *mt, @@ -1843,7 +2296,7 @@ intel_miptree_map_movntdqa(struct brw_context *brw, assert(map->mode & GL_MAP_READ_BIT); assert(!(map->mode & GL_MAP_WRITE_BIT)); - DBG("%s: %d,%d %dx%d from mt %p (%s) %d,%d = %p/%d\n", __FUNCTION__, + DBG("%s: %d,%d %dx%d from mt %p (%s) %d,%d = %p/%d\n", __func__, map->x, map->y, map->w, map->h, mt, _mesa_get_format_name(mt->format), level, slice, map->ptr, map->stride); @@ -1887,7 +2340,7 @@ intel_miptree_map_movntdqa(struct brw_context *brw, _mesa_streaming_load_memcpy(dst_ptr, src_ptr, width_bytes); } - intel_miptree_unmap_raw(brw, mt); + intel_miptree_unmap_raw(mt); } static void @@ -1901,6 +2354,7 @@ intel_miptree_unmap_movntdqa(struct brw_context *brw, map->buffer = NULL; map->ptr = NULL; } +#endif static void intel_miptree_map_s8(struct brw_context *brw, @@ -1935,13 +2389,13 @@ intel_miptree_map_s8(struct brw_context *brw, } } - intel_miptree_unmap_raw(brw, mt); + intel_miptree_unmap_raw(mt); - DBG("%s: %d,%d %dx%d from mt %p %d,%d = %p/%d\n", __FUNCTION__, + DBG("%s: %d,%d %dx%d from mt %p %d,%d = %p/%d\n", __func__, map->x, map->y, map->w, map->h, mt, map->x + image_x, map->y + image_y, map->ptr, map->stride); } else { - DBG("%s: %d,%d %dx%d from mt %p = %p/%d\n", __FUNCTION__, + DBG("%s: %d,%d %dx%d from mt %p = %p/%d\n", __func__, map->x, map->y, map->w, map->h, mt, map->ptr, map->stride); } @@ -1971,7 +2425,7 @@ intel_miptree_unmap_s8(struct brw_context *brw, } } - intel_miptree_unmap_raw(brw, mt); + intel_miptree_unmap_raw(mt); } free(map->buffer); @@ -2025,7 +2479,7 @@ intel_miptree_unmap_etc(struct brw_context *brw, map->ptr, map->stride, map->w, map->h, mt->etc_format); - intel_miptree_unmap_raw(brw, mt); + intel_miptree_unmap_raw(mt); free(map->buffer); } @@ -2095,17 +2549,17 @@ intel_miptree_map_depthstencil(struct brw_context *brw, } } - intel_miptree_unmap_raw(brw, s_mt); - intel_miptree_unmap_raw(brw, z_mt); + intel_miptree_unmap_raw(s_mt); + intel_miptree_unmap_raw(z_mt); DBG("%s: %d,%d %dx%d from z mt %p %d,%d, s mt %p %d,%d = %p/%d\n", - __FUNCTION__, + __func__, map->x, map->y, map->w, map->h, z_mt, map->x + z_image_x, map->y + z_image_y, s_mt, map->x + s_image_x, map->y + s_image_y, map->ptr, map->stride); } else { - DBG("%s: %d,%d %dx%d from mt %p = %p/%d\n", __FUNCTION__, + DBG("%s: %d,%d %dx%d from mt %p = %p/%d\n", __func__, map->x, map->y, map->w, map->h, mt, map->ptr, map->stride); } @@ -2140,9 +2594,9 @@ intel_miptree_unmap_depthstencil(struct brw_context *brw, x + s_image_x + map->x, y + s_image_y + map->y, brw->has_swizzling); - ptrdiff_t z_offset = ((y + z_image_y) * + ptrdiff_t z_offset = ((y + z_image_y + map->y) * (z_mt->pitch / 4) + - (x + z_image_x)); + (x + z_image_x + map->x)); if (map_z32f_x24s8) { z_map[z_offset] = packed_map[(y * map->w + x) * 2 + 0]; @@ -2155,11 +2609,11 @@ intel_miptree_unmap_depthstencil(struct brw_context *brw, } } - intel_miptree_unmap_raw(brw, s_mt); - intel_miptree_unmap_raw(brw, z_mt); + intel_miptree_unmap_raw(s_mt); + intel_miptree_unmap_raw(z_mt); DBG("%s: %d,%d %dx%d from z mt %p (%s) %d,%d, s mt %p %d,%d = %p/%d\n", - __FUNCTION__, + __func__, map->x, map->y, map->w, map->h, z_mt, _mesa_get_format_name(z_mt->format), map->x + z_image_x, map->y + z_image_y, @@ -2226,12 +2680,52 @@ can_blit_slice(struct intel_mipmap_tree *mt, if (image_x >= 32768 || image_y >= 32768) return false; + /* See intel_miptree_blit() for details on the 32k pitch limit. */ if (mt->pitch >= 32768) return false; return true; } +static bool +use_intel_mipree_map_blit(struct brw_context *brw, + struct intel_mipmap_tree *mt, + GLbitfield mode, + unsigned int level, + unsigned int slice) +{ + if (brw->has_llc && + /* It's probably not worth swapping to the blit ring because of + * all the overhead involved. + */ + !(mode & GL_MAP_WRITE_BIT) && + !mt->compressed && + (mt->tiling == I915_TILING_X || + /* Prior to Sandybridge, the blitter can't handle Y tiling */ + (brw->gen >= 6 && mt->tiling == I915_TILING_Y)) && + can_blit_slice(mt, level, slice)) + return true; + + if (mt->tiling != I915_TILING_NONE && + mt->bo->size >= brw->max_gtt_map_object_size) { + assert(can_blit_slice(mt, level, slice)); + return true; + } + + return false; +} + +/** + * Parameter \a out_stride has type ptrdiff_t not because the buffer stride may + * exceed 32 bits but to diminish the likelihood subtle bugs in pointer + * arithmetic overflow. + * + * If you call this function and use \a out_stride, then you're doing pointer + * arithmetic on \a out_ptr. The type of \a out_stride doesn't prevent all + * bugs. The caller must still take care to avoid 32-bit overflow errors in + * all arithmetic expressions that contain buffer offsets and pixel sizes, + * which usually have type uint32_t or GLuint. + */ void intel_miptree_map(struct brw_context *brw, struct intel_mipmap_tree *mt, @@ -2243,7 +2737,7 @@ intel_miptree_map(struct brw_context *brw, unsigned int h, GLbitfield mode, void **out_ptr, - int *out_stride) + ptrdiff_t *out_stride) { struct intel_miptree_map *map; @@ -2268,21 +2762,14 @@ intel_miptree_map(struct brw_context *brw, intel_miptree_map_etc(brw, mt, map, level, slice); } else if (mt->stencil_mt && !(mode & BRW_MAP_DIRECT_BIT)) { intel_miptree_map_depthstencil(brw, mt, map, level, slice); - } - /* See intel_miptree_blit() for details on the 32k pitch limit. */ - else if (brw->has_llc && - !(mode & GL_MAP_WRITE_BIT) && - !mt->compressed && - (mt->tiling == I915_TILING_X || - (brw->gen >= 6 && mt->tiling == I915_TILING_Y)) && - can_blit_slice(mt, level, slice)) { - intel_miptree_map_blit(brw, mt, map, level, slice); - } else if (mt->tiling != I915_TILING_NONE && - mt->bo->size >= brw->max_gtt_map_object_size) { - assert(can_blit_slice(mt, level, slice)); + } else if (use_intel_mipree_map_blit(brw, mt, mode, level, slice)) { intel_miptree_map_blit(brw, mt, map, level, slice); - } else if (!(mode & GL_MAP_WRITE_BIT) && !mt->compressed && cpu_has_sse4_1) { +#if defined(USE_SSE41) + } else if (!(mode & GL_MAP_WRITE_BIT) && + !mt->compressed && cpu_has_sse4_1 && + (mt->pitch % 16 == 0)) { intel_miptree_map_movntdqa(brw, mt, map, level, slice); +#endif } else { intel_miptree_map_gtt(brw, mt, map, level, slice); } @@ -2307,7 +2794,7 @@ intel_miptree_unmap(struct brw_context *brw, if (!map) return; - DBG("%s: mt %p (%s) level %d slice %d\n", __FUNCTION__, + DBG("%s: mt %p (%s) level %d slice %d\n", __func__, mt, _mesa_get_format_name(mt->format), level, slice); if (mt->format == MESA_FORMAT_S_UINT8) { @@ -2317,12 +2804,14 @@ intel_miptree_unmap(struct brw_context *brw, intel_miptree_unmap_etc(brw, mt, map, level, slice); } else if (mt->stencil_mt && !(map->mode & BRW_MAP_DIRECT_BIT)) { intel_miptree_unmap_depthstencil(brw, mt, map, level, slice); - } else if (map->mt) { + } else if (map->linear_mt) { intel_miptree_unmap_blit(brw, mt, map, level, slice); +#if defined(USE_SSE41) } else if (map->buffer && cpu_has_sse4_1) { intel_miptree_unmap_movntdqa(brw, mt, map, level, slice); +#endif } else { - intel_miptree_unmap_gtt(brw, mt, map, level, slice); + intel_miptree_unmap_gtt(mt); } intel_miptree_release_map(mt, level, slice);