X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Fmesa%2Fdrivers%2Fdri%2Fi965%2Fintel_mipmap_tree.c;h=8616c0193c8a0addb7d61a16a820cea6133f60df;hb=b639ed2f1b170d1184c6d94c88c826c51ffc8726;hp=2a84391f73263eb4121a6dbc22b1956ea8761d81;hpb=50a01d2acafb2a937e62b24258e2e777c0cd1489;p=mesa.git diff --git a/src/mesa/drivers/dri/i965/intel_mipmap_tree.c b/src/mesa/drivers/dri/i965/intel_mipmap_tree.c index 2a84391f732..8616c0193c8 100644 --- a/src/mesa/drivers/dri/i965/intel_mipmap_tree.c +++ b/src/mesa/drivers/dri/i965/intel_mipmap_tree.c @@ -29,48 +29,38 @@ #include #include "intel_batchbuffer.h" -#include "intel_chipset.h" #include "intel_mipmap_tree.h" -#include "intel_regions.h" #include "intel_resolve_map.h" #include "intel_tex.h" #include "intel_blit.h" +#include "intel_fbo.h" #include "brw_blorp.h" #include "brw_context.h" #include "main/enums.h" +#include "main/fbobject.h" #include "main/formats.h" #include "main/glformats.h" #include "main/texcompress_etc.h" #include "main/teximage.h" #include "main/streaming-load-memcpy.h" +#include "x86/common_x86_asm.h" #define FILE_DEBUG_FLAG DEBUG_MIPTREE -static GLenum -target_to_target(GLenum target) -{ - switch (target) { - case GL_TEXTURE_CUBE_MAP_POSITIVE_X_ARB: - case GL_TEXTURE_CUBE_MAP_NEGATIVE_X_ARB: - case GL_TEXTURE_CUBE_MAP_POSITIVE_Y_ARB: - case GL_TEXTURE_CUBE_MAP_NEGATIVE_Y_ARB: - case GL_TEXTURE_CUBE_MAP_POSITIVE_Z_ARB: - case GL_TEXTURE_CUBE_MAP_NEGATIVE_Z_ARB: - return GL_TEXTURE_CUBE_MAP_ARB; - default: - return target; - } -} - +static bool +intel_miptree_alloc_mcs(struct brw_context *brw, + struct intel_mipmap_tree *mt, + GLuint num_samples); /** * Determine which MSAA layout should be used by the MSAA surface being * created, based on the chip generation and the surface type. */ static enum intel_msaa_layout -compute_msaa_layout(struct brw_context *brw, mesa_format format, GLenum target) +compute_msaa_layout(struct brw_context *brw, mesa_format format, GLenum target, + bool disable_aux_buffers) { /* Prior to Gen7, all MSAA surfaces used IMS layout. */ if (brw->gen < 7) @@ -94,9 +84,12 @@ compute_msaa_layout(struct brw_context *brw, mesa_format format, GLenum target) * would require converting between CMS and UMS MSAA layouts on the fly, * which is expensive. */ - if (_mesa_get_format_datatype(format) == GL_INT) { - /* TODO: is this workaround needed for future chipsets? */ - assert(brw->gen == 7); + if (brw->gen == 7 && _mesa_get_format_datatype(format) == GL_INT) { + return INTEL_MSAA_LAYOUT_UMS; + } else if (disable_aux_buffers) { + /* We can't use the CMS layout because it uses an aux buffer, the MCS + * buffer. So fallback to UMS, which is identical to CMS without the + * MCS. */ return INTEL_MSAA_LAYOUT_UMS; } else { return INTEL_MSAA_LAYOUT_CMS; @@ -151,9 +144,9 @@ intel_get_non_msrt_mcs_alignment(struct brw_context *brw, struct intel_mipmap_tree *mt, unsigned *width_px, unsigned *height) { - switch (mt->region->tiling) { + switch (mt->tiling) { default: - assert(!"Non-MSRT MCS requires X or Y tiling"); + unreachable("Non-MSRT MCS requires X or Y tiling"); /* In release builds, fall through */ case I915_TILING_Y: *width_px = 32 / mt->cpp; @@ -186,7 +179,10 @@ intel_is_non_msrt_mcs_buffer_supported(struct brw_context *brw, struct intel_mipmap_tree *mt) { /* MCS support does not exist prior to Gen7 */ - if (brw->gen < 7 || brw->gen >= 8) + if (brw->gen < 7) + return false; + + if (mt->disable_aux_buffers) return false; /* MCS is only supported for color buffers */ @@ -197,15 +193,28 @@ intel_is_non_msrt_mcs_buffer_supported(struct brw_context *brw, return false; } - if (mt->region->tiling != I915_TILING_X && - mt->region->tiling != I915_TILING_Y) + if (mt->tiling != I915_TILING_X && + mt->tiling != I915_TILING_Y) return false; if (mt->cpp != 4 && mt->cpp != 8 && mt->cpp != 16) return false; - if (mt->first_level != 0 || mt->last_level != 0) + if (mt->first_level != 0 || mt->last_level != 0) { + if (brw->gen >= 8) { + perf_debug("Multi-LOD fast clear - giving up (%dx%dx%d).\n", + mt->logical_width0, mt->logical_height0, mt->last_level); + } + return false; - if (mt->physical_depth0 != 1) + } + if (mt->physical_depth0 != 1) { + if (brw->gen >= 8) { + perf_debug("Layered fast clear - giving up. (%dx%d%d)\n", + mt->logical_width0, mt->logical_height0, + mt->physical_depth0); + } + return false; + } /* There's no point in using an MCS buffer if the surface isn't in a * renderable format. @@ -217,12 +226,29 @@ intel_is_non_msrt_mcs_buffer_supported(struct brw_context *brw, } +/** + * Determine depth format corresponding to a depth+stencil format, + * for separate stencil. + */ +mesa_format +intel_depth_format_for_depthstencil_format(mesa_format format) { + switch (format) { + case MESA_FORMAT_Z24_UNORM_S8_UINT: + return MESA_FORMAT_Z24_UNORM_X8_UINT; + case MESA_FORMAT_Z32_FLOAT_S8X24_UINT: + return MESA_FORMAT_Z_FLOAT32; + default: + return format; + } +} + + /** * @param for_bo Indicates that the caller is * intel_miptree_create_for_bo(). If true, then do not create * \c stencil_mt. */ -struct intel_mipmap_tree * +static struct intel_mipmap_tree * intel_miptree_create_layout(struct brw_context *brw, GLenum target, mesa_format format, @@ -232,18 +258,40 @@ intel_miptree_create_layout(struct brw_context *brw, GLuint height0, GLuint depth0, bool for_bo, - GLuint num_samples) + GLuint num_samples, + bool force_all_slices_at_each_lod, + bool disable_aux_buffers) { struct intel_mipmap_tree *mt = calloc(sizeof(*mt), 1); if (!mt) return NULL; - DBG("%s target %s format %s level %d..%d <-- %p\n", __FUNCTION__, + DBG("%s target %s format %s level %d..%d slices %d <-- %p\n", __func__, _mesa_lookup_enum_by_nr(target), _mesa_get_format_name(format), - first_level, last_level, mt); + first_level, last_level, depth0, mt); - mt->target = target_to_target(target); + if (target == GL_TEXTURE_1D_ARRAY) { + /* For a 1D Array texture the OpenGL API will treat the height0 + * parameter as the number of array slices. For Intel hardware, we treat + * the 1D array as a 2D Array with a height of 1. + * + * So, when we first come through this path to create a 1D Array + * texture, height0 stores the number of slices, and depth0 is 1. In + * this case, we want to swap height0 and depth0. + * + * Since some miptrees will be created based on the base miptree, we may + * come through this path and see height0 as 1 and depth0 being the + * number of slices. In this case we don't need to do the swap. + */ + assert(height0 == 1 || depth0 == 1); + if (height0 > 1) { + depth0 = height0; + height0 = 1; + } + } + + mt->target = target; mt->format = format; mt->first_level = first_level; mt->last_level = last_level; @@ -251,6 +299,8 @@ intel_miptree_create_layout(struct brw_context *brw, mt->logical_height0 = height0; mt->logical_depth0 = depth0; mt->fast_clear_state = INTEL_FAST_CLEAR_STATE_NO_MCS; + mt->disable_aux_buffers = disable_aux_buffers; + exec_list_make_empty(&mt->hiz_map); /* The cpp is bytes per (1, blockheight)-sized block for compressed * textures. This is why you'll see divides by blockheight all over @@ -267,9 +317,31 @@ intel_miptree_create_layout(struct brw_context *brw, if (num_samples > 1) { /* Adjust width/height/depth for MSAA */ - mt->msaa_layout = compute_msaa_layout(brw, format, mt->target); + mt->msaa_layout = compute_msaa_layout(brw, format, + mt->target, mt->disable_aux_buffers); if (mt->msaa_layout == INTEL_MSAA_LAYOUT_IMS) { - /* In the Sandy Bridge PRM, volume 4, part 1, page 31, it says: + /* From the Ivybridge PRM, Volume 1, Part 1, page 108: + * "If the surface is multisampled and it is a depth or stencil + * surface or Multisampled Surface StorageFormat in SURFACE_STATE is + * MSFMT_DEPTH_STENCIL, WL and HL must be adjusted as follows before + * proceeding: + * + * +----------------------------------------------------------------+ + * | Num Multisamples | W_l = | H_l = | + * +----------------------------------------------------------------+ + * | 2 | ceiling(W_l / 2) * 4 | H_l (no adjustment) | + * | 4 | ceiling(W_l / 2) * 4 | ceiling(H_l / 2) * 4 | + * | 8 | ceiling(W_l / 2) * 8 | ceiling(H_l / 2) * 4 | + * | 16 | ceiling(W_l / 2) * 8 | ceiling(H_l / 2) * 8 | + * +----------------------------------------------------------------+ + * " + * + * Note that MSFMT_DEPTH_STENCIL just means the IMS (interleaved) + * format rather than UMS/CMS (array slices). The Sandybridge PRM, + * Volume 1, Part 1, Page 111 has the same formula for 4x MSAA. + * + * Another more complicated explanation for these adjustments comes + * from the Sandybridge PRM, volume 4, part 1, page 31: * * "Any of the other messages (sample*, LOD, load4) used with a * (4x) multisampled surface will in-effect sample a surface with @@ -304,6 +376,11 @@ intel_miptree_create_layout(struct brw_context *brw, * sample 3 is in that bottom right 2x2 block. */ switch (num_samples) { + case 2: + assert(brw->gen >= 8); + width0 = ALIGN(width0, 2) * 2; + height0 = ALIGN(height0, 2); + break; case 4: width0 = ALIGN(width0, 2) * 2; height0 = ALIGN(height0, 2) * 2; @@ -313,10 +390,10 @@ intel_miptree_create_layout(struct brw_context *brw, height0 = ALIGN(height0, 2) * 2; break; default: - /* num_samples should already have been quantized to 0, 1, 4, or + /* num_samples should already have been quantized to 0, 1, 2, 4, or * 8. */ - assert(false); + unreachable("not reached"); } } else { /* Non-interleaved */ @@ -324,18 +401,29 @@ intel_miptree_create_layout(struct brw_context *brw, } } - /* array_spacing_lod0 is only used for non-IMS MSAA surfaces. TODO: can we - * use it elsewhere? + /* Set array_layout to ALL_SLICES_AT_EACH_LOD when array_spacing_lod0 can + * be used. array_spacing_lod0 is only used for non-IMS MSAA surfaces on + * Gen 7 and 8. On Gen 8 and 9 this layout is not available but it is still + * used on Gen8 to make it pick a qpitch value which doesn't include space + * for the mipmaps. On Gen9 this is not necessary because it will + * automatically pick a packed qpitch value whenever mt->first_level == + * mt->last_level. + * TODO: can we use it elsewhere? + * TODO: also disable this on Gen8 and pick the qpitch value like Gen9 */ - switch (mt->msaa_layout) { - case INTEL_MSAA_LAYOUT_NONE: - case INTEL_MSAA_LAYOUT_IMS: - mt->array_spacing_lod0 = false; - break; - case INTEL_MSAA_LAYOUT_UMS: - case INTEL_MSAA_LAYOUT_CMS: - mt->array_spacing_lod0 = true; - break; + if (brw->gen >= 9) { + mt->array_layout = ALL_LOD_IN_EACH_SLICE; + } else { + switch (mt->msaa_layout) { + case INTEL_MSAA_LAYOUT_NONE: + case INTEL_MSAA_LAYOUT_IMS: + mt->array_layout = ALL_LOD_IN_EACH_SLICE; + break; + case INTEL_MSAA_LAYOUT_UMS: + case INTEL_MSAA_LAYOUT_CMS: + mt->array_layout = ALL_SLICES_AT_EACH_LOD; + break; + } } if (target == GL_TEXTURE_CUBE_MAP) { @@ -350,7 +438,9 @@ intel_miptree_create_layout(struct brw_context *brw, if (!for_bo && _mesa_get_format_base_format(format) == GL_DEPTH_STENCIL && (brw->must_use_separate_stencil || - (brw->has_separate_stencil && brw_is_hiz_depth_format(brw, format)))) { + (brw->has_separate_stencil && + intel_miptree_wants_hiz_buffer(brw, mt)))) { + const bool force_all_slices_at_each_lod = brw->gen == 6; mt->stencil_mt = intel_miptree_create(brw, mt->target, MESA_FORMAT_S_UINT8, @@ -361,7 +451,8 @@ intel_miptree_create_layout(struct brw_context *brw, mt->logical_depth0, true, num_samples, - INTEL_MIPTREE_TILING_ANY); + INTEL_MIPTREE_TILING_ANY, + force_all_slices_at_each_lod); if (!mt->stencil_mt) { intel_miptree_release(&mt); return NULL; @@ -370,19 +461,23 @@ intel_miptree_create_layout(struct brw_context *brw, /* Fix up the Z miptree format for how we're splitting out separate * stencil. Gen7 expects there to be no stencil bits in its depth buffer. */ - if (mt->format == MESA_FORMAT_S8_Z24) { - mt->format = MESA_FORMAT_X8_Z24; - } else if (mt->format == MESA_FORMAT_Z32_FLOAT_X24S8) { - mt->format = MESA_FORMAT_Z_FLOAT32; - mt->cpp = 4; - } else { - _mesa_problem(NULL, "Unknown format %s in separate stencil mt\n", - _mesa_get_format_name(mt->format)); + mt->format = intel_depth_format_for_depthstencil_format(mt->format); + mt->cpp = 4; + + if (format == mt->format) { + _mesa_problem(NULL, "Unknown format %s in separate stencil mt\n", + _mesa_get_format_name(mt->format)); } } + if (force_all_slices_at_each_lod) + mt->array_layout = ALL_SLICES_AT_EACH_LOD; + brw_miptree_layout(brw, mt); + if (mt->disable_aux_buffers) + assert(mt->msaa_layout != INTEL_MSAA_LAYOUT_CMS); + return mt; } @@ -437,6 +532,13 @@ intel_miptree_choose_tiling(struct brw_context *brw, base_format == GL_DEPTH_STENCIL_EXT) return I915_TILING_Y; + /* 1D textures (and 1D array textures) don't get any benefit from tiling, + * in fact it leads to a less efficient use of memory space and bandwidth + * due to tile alignment. + */ + if (mt->logical_height0 == 1) + return I915_TILING_NONE; + int minimum_pitch = mt->total_width * mt->cpp; /* If the width is much smaller than a tile, don't bother tiling. */ @@ -457,10 +559,10 @@ intel_miptree_choose_tiling(struct brw_context *brw, /* From the Sandybridge PRM, Volume 1, Part 2, page 32: * "NOTE: 128BPE Format Color Buffer ( render target ) MUST be either TileX * or Linear." - * 128 bits per pixel translates to 16 bytes per pixel. This is necessary - * all the way back to 965, but is explicitly permitted on Gen7. + * 128 bits per pixel translates to 16 bytes per pixel. This is necessary + * all the way back to 965, but is permitted on Gen7+. */ - if (brw->gen != 7 && mt->cpp >= 16) + if (brw->gen < 7 && mt->cpp >= 16) return I915_TILING_X; /* From the Ivy Bridge PRM, Vol4 Part1 2.12.2.1 (SURFACE_STATE for most @@ -483,6 +585,47 @@ intel_miptree_choose_tiling(struct brw_context *brw, return I915_TILING_Y | I915_TILING_X; } + +/** + * Choose an appropriate uncompressed format for a requested + * compressed format, if unsupported. + */ +mesa_format +intel_lower_compressed_format(struct brw_context *brw, mesa_format format) +{ + /* No need to lower ETC formats on these platforms, + * they are supported natively. + */ + if (brw->gen >= 8 || brw->is_baytrail) + return format; + + switch (format) { + case MESA_FORMAT_ETC1_RGB8: + return MESA_FORMAT_R8G8B8X8_UNORM; + case MESA_FORMAT_ETC2_RGB8: + return MESA_FORMAT_R8G8B8X8_UNORM; + case MESA_FORMAT_ETC2_SRGB8: + case MESA_FORMAT_ETC2_SRGB8_ALPHA8_EAC: + case MESA_FORMAT_ETC2_SRGB8_PUNCHTHROUGH_ALPHA1: + return MESA_FORMAT_B8G8R8A8_SRGB; + case MESA_FORMAT_ETC2_RGBA8_EAC: + case MESA_FORMAT_ETC2_RGB8_PUNCHTHROUGH_ALPHA1: + return MESA_FORMAT_R8G8B8A8_UNORM; + case MESA_FORMAT_ETC2_R11_EAC: + return MESA_FORMAT_R_UNORM16; + case MESA_FORMAT_ETC2_SIGNED_R11_EAC: + return MESA_FORMAT_R_SNORM16; + case MESA_FORMAT_ETC2_RG11_EAC: + return MESA_FORMAT_R16G16_UNORM; + case MESA_FORMAT_ETC2_SIGNED_RG11_EAC: + return MESA_FORMAT_R16G16_SNORM; + default: + /* Non ETC1 / ETC2 format */ + return format; + } +} + + struct intel_mipmap_tree * intel_miptree_create(struct brw_context *brw, GLenum target, @@ -494,54 +637,24 @@ intel_miptree_create(struct brw_context *brw, GLuint depth0, bool expect_accelerated_upload, GLuint num_samples, - enum intel_miptree_tiling_mode requested_tiling) + enum intel_miptree_tiling_mode requested_tiling, + bool force_all_slices_at_each_lod) { struct intel_mipmap_tree *mt; mesa_format tex_format = format; mesa_format etc_format = MESA_FORMAT_NONE; GLuint total_width, total_height; - if (brw->gen < 8 && !brw->is_baytrail) { - switch (format) { - case MESA_FORMAT_ETC1_RGB8: - format = MESA_FORMAT_R8G8B8X8_UNORM; - break; - case MESA_FORMAT_ETC2_RGB8: - format = MESA_FORMAT_R8G8B8X8_UNORM; - break; - case MESA_FORMAT_ETC2_SRGB8: - case MESA_FORMAT_ETC2_SRGB8_ALPHA8_EAC: - case MESA_FORMAT_ETC2_SRGB8_PUNCHTHROUGH_ALPHA1: - format = MESA_FORMAT_SARGB8; - break; - case MESA_FORMAT_ETC2_RGBA8_EAC: - case MESA_FORMAT_ETC2_RGB8_PUNCHTHROUGH_ALPHA1: - format = MESA_FORMAT_R8G8B8A8_UNORM; - break; - case MESA_FORMAT_ETC2_R11_EAC: - format = MESA_FORMAT_R_UNORM16; - break; - case MESA_FORMAT_ETC2_SIGNED_R11_EAC: - format = MESA_FORMAT_SIGNED_R16; - break; - case MESA_FORMAT_ETC2_RG11_EAC: - format = MESA_FORMAT_GR1616; - break; - case MESA_FORMAT_ETC2_SIGNED_RG11_EAC: - format = MESA_FORMAT_SIGNED_GR1616; - break; - default: - /* Non ETC1 / ETC2 format */ - break; - } - } + format = intel_lower_compressed_format(brw, format); etc_format = (format != tex_format) ? tex_format : MESA_FORMAT_NONE; mt = intel_miptree_create_layout(brw, target, format, first_level, last_level, width0, height0, depth0, - false, num_samples); + false, num_samples, + force_all_slices_at_each_lod, + false /*disable_aux_buffers*/); /* * pitch == 0 || height == 0 indicates the null texture */ @@ -562,36 +675,45 @@ intel_miptree_create(struct brw_context *brw, uint32_t tiling = intel_miptree_choose_tiling(brw, format, width0, num_samples, requested_tiling, mt); - bool y_or_x = tiling == (I915_TILING_Y | I915_TILING_X); + bool y_or_x = false; + + if (tiling == (I915_TILING_Y | I915_TILING_X)) { + y_or_x = true; + mt->tiling = I915_TILING_Y; + } else { + mt->tiling = tiling; + } + unsigned long pitch; mt->etc_format = etc_format; - mt->region = intel_region_alloc(brw->intelScreen, - y_or_x ? I915_TILING_Y : tiling, - mt->cpp, - total_width, - total_height, - expect_accelerated_upload); - - /* If the region is too large to fit in the aperture, we need to use the - * BLT engine to support it. The BLT paths can't currently handle Y-tiling, - * so we need to fall back to X. + mt->bo = drm_intel_bo_alloc_tiled(brw->bufmgr, "miptree", + total_width, total_height, mt->cpp, + &mt->tiling, &pitch, + (expect_accelerated_upload ? + BO_ALLOC_FOR_RENDER : 0)); + mt->pitch = pitch; + + /* If the BO is too large to fit in the aperture, we need to use the + * BLT engine to support it. Prior to Sandybridge, the BLT paths can't + * handle Y-tiling, so we need to fall back to X. */ - if (y_or_x && mt->region->bo->size >= brw->max_gtt_map_object_size) { + if (brw->gen < 6 && y_or_x && mt->bo->size >= brw->max_gtt_map_object_size) { perf_debug("%dx%d miptree larger than aperture; falling back to X-tiled\n", mt->total_width, mt->total_height); - intel_region_release(&mt->region); - mt->region = intel_region_alloc(brw->intelScreen, - I915_TILING_X, - mt->cpp, - total_width, - total_height, - expect_accelerated_upload); + mt->tiling = I915_TILING_X; + drm_intel_bo_unreference(mt->bo); + mt->bo = drm_intel_bo_alloc_tiled(brw->bufmgr, "miptree", + total_width, total_height, mt->cpp, + &mt->tiling, &pitch, + (expect_accelerated_upload ? + BO_ALLOC_FOR_RENDER : 0)); + mt->pitch = pitch; } mt->offset = 0; - if (!mt->region) { + if (!mt->bo) { intel_miptree_release(&mt); return NULL; } @@ -622,14 +744,15 @@ intel_miptree_create_for_bo(struct brw_context *brw, uint32_t offset, uint32_t width, uint32_t height, + uint32_t depth, int pitch, - uint32_t tiling) + bool disable_aux_buffers) { struct intel_mipmap_tree *mt; + uint32_t tiling, swizzle; + GLenum target; - struct intel_region *region = calloc(1, sizeof(*region)); - if (!region) - return NULL; + drm_intel_bo_get_tiling(bo, &tiling, &swizzle); /* Nothing will be able to use this miptree with the BO if the offset isn't * aligned. @@ -642,118 +765,47 @@ intel_miptree_create_for_bo(struct brw_context *brw, */ assert(pitch >= 0); - mt = intel_miptree_create_layout(brw, GL_TEXTURE_2D, format, + target = depth > 1 ? GL_TEXTURE_2D_ARRAY : GL_TEXTURE_2D; + + mt = intel_miptree_create_layout(brw, target, format, 0, 0, - width, height, 1, - true, 0 /* num_samples */); - if (!mt) { - free(region); - return mt; - } - - region->cpp = mt->cpp; - region->width = width; - region->height = height; - region->pitch = pitch; - region->refcount = 1; - drm_intel_bo_reference(bo); - region->bo = bo; - region->tiling = tiling; + width, height, depth, + true, 0, false, + disable_aux_buffers); + if (!mt) + return NULL; - mt->region = region; + drm_intel_bo_reference(bo); + mt->bo = bo; + mt->pitch = pitch; mt->offset = offset; + mt->tiling = tiling; return mt; } - /** - * For a singlesample DRI2 buffer, this simply wraps the given region with a miptree. + * For a singlesample renderbuffer, this simply wraps the given BO with a + * miptree. * - * For a multisample DRI2 buffer, this wraps the given region with - * a singlesample miptree, then creates a multisample miptree into which the - * singlesample miptree is embedded as a child. + * For a multisample renderbuffer, this wraps the window system's + * (singlesample) BO with a singlesample miptree attached to the + * intel_renderbuffer, then creates a multisample miptree attached to irb->mt + * that will contain the actual rendering (which is lazily resolved to + * irb->singlesample_mt). */ -struct intel_mipmap_tree* -intel_miptree_create_for_dri2_buffer(struct brw_context *brw, - unsigned dri_attachment, - mesa_format format, - uint32_t num_samples, - struct intel_region *region) -{ - struct intel_mipmap_tree *singlesample_mt = NULL; - struct intel_mipmap_tree *multisample_mt = NULL; - - /* Only the front and back buffers, which are color buffers, are shared - * through DRI2. - */ - assert(dri_attachment == __DRI_BUFFER_BACK_LEFT || - dri_attachment == __DRI_BUFFER_FRONT_LEFT || - dri_attachment == __DRI_BUFFER_FAKE_FRONT_LEFT); - assert(_mesa_get_format_base_format(format) == GL_RGB || - _mesa_get_format_base_format(format) == GL_RGBA); - - singlesample_mt = intel_miptree_create_for_bo(brw, - region->bo, - format, - 0, - region->width, - region->height, - region->pitch, - region->tiling); - if (!singlesample_mt) - return NULL; - singlesample_mt->region->name = region->name; - - /* If this miptree is capable of supporting fast color clears, set - * fast_clear_state appropriately to ensure that fast clears will occur. - * Allocation of the MCS miptree will be deferred until the first fast - * clear actually occurs. - */ - if (intel_is_non_msrt_mcs_buffer_supported(brw, singlesample_mt)) - singlesample_mt->fast_clear_state = INTEL_FAST_CLEAR_STATE_RESOLVED; - - if (num_samples == 0) - return singlesample_mt; - - multisample_mt = intel_miptree_create_for_renderbuffer(brw, - format, - region->width, - region->height, - num_samples); - if (!multisample_mt) { - intel_miptree_release(&singlesample_mt); - return NULL; - } - - multisample_mt->singlesample_mt = singlesample_mt; - multisample_mt->need_downsample = false; - - if (brw->is_front_buffer_rendering && - (dri_attachment == __DRI_BUFFER_FRONT_LEFT || - dri_attachment == __DRI_BUFFER_FAKE_FRONT_LEFT)) { - intel_miptree_upsample(brw, multisample_mt); - } - - return multisample_mt; -} - -/** - * For a singlesample image buffer, this simply wraps the given region with a miptree. - * - * For a multisample image buffer, this wraps the given region with - * a singlesample miptree, then creates a multisample miptree into which the - * singlesample miptree is embedded as a child. - */ -struct intel_mipmap_tree* -intel_miptree_create_for_image_buffer(struct brw_context *intel, - enum __DRIimageBufferMask buffer_type, - mesa_format format, - uint32_t num_samples, - struct intel_region *region) +void +intel_update_winsys_renderbuffer_miptree(struct brw_context *intel, + struct intel_renderbuffer *irb, + drm_intel_bo *bo, + uint32_t width, uint32_t height, + uint32_t pitch) { struct intel_mipmap_tree *singlesample_mt = NULL; struct intel_mipmap_tree *multisample_mt = NULL; + struct gl_renderbuffer *rb = &irb->Base.Base; + mesa_format format = rb->Format; + int num_samples = rb->NumSamples; /* Only the front and back buffers, which are color buffers, are allocated * through the image loader. @@ -762,15 +814,16 @@ intel_miptree_create_for_image_buffer(struct brw_context *intel, _mesa_get_format_base_format(format) == GL_RGBA); singlesample_mt = intel_miptree_create_for_bo(intel, - region->bo, + bo, format, 0, - region->width, - region->height, - region->pitch, - region->tiling); + width, + height, + 1, + pitch, + false); if (!singlesample_mt) - return NULL; + goto fail; /* If this miptree is capable of supporting fast color clears, set * mcs_state appropriately to ensure that fast clears will occur. @@ -780,27 +833,37 @@ intel_miptree_create_for_image_buffer(struct brw_context *intel, if (intel_is_non_msrt_mcs_buffer_supported(intel, singlesample_mt)) singlesample_mt->fast_clear_state = INTEL_FAST_CLEAR_STATE_RESOLVED; - if (num_samples == 0) - return singlesample_mt; - - multisample_mt = intel_miptree_create_for_renderbuffer(intel, - format, - region->width, - region->height, - num_samples); - if (!multisample_mt) { - intel_miptree_release(&singlesample_mt); - return NULL; - } - - multisample_mt->singlesample_mt = singlesample_mt; - multisample_mt->need_downsample = false; + if (num_samples == 0) { + intel_miptree_release(&irb->mt); + irb->mt = singlesample_mt; - if (intel->is_front_buffer_rendering && buffer_type == __DRI_IMAGE_BUFFER_FRONT) { - intel_miptree_upsample(intel, multisample_mt); + assert(!irb->singlesample_mt); + } else { + intel_miptree_release(&irb->singlesample_mt); + irb->singlesample_mt = singlesample_mt; + + if (!irb->mt || + irb->mt->logical_width0 != width || + irb->mt->logical_height0 != height) { + multisample_mt = intel_miptree_create_for_renderbuffer(intel, + format, + width, + height, + num_samples); + if (!multisample_mt) + goto fail; + + irb->need_downsample = false; + intel_miptree_release(&irb->mt); + irb->mt = multisample_mt; + } } + return; - return multisample_mt; +fail: + intel_miptree_release(&irb->singlesample_mt); + intel_miptree_release(&irb->mt); + return; } struct intel_mipmap_tree* @@ -813,14 +876,15 @@ intel_miptree_create_for_renderbuffer(struct brw_context *brw, struct intel_mipmap_tree *mt; uint32_t depth = 1; bool ok; + GLenum target = num_samples > 1 ? GL_TEXTURE_2D_MULTISAMPLE : GL_TEXTURE_2D; - mt = intel_miptree_create(brw, GL_TEXTURE_2D, format, 0, 0, + mt = intel_miptree_create(brw, target, format, 0, 0, width, height, depth, true, num_samples, - INTEL_MIPTREE_TILING_ANY); + INTEL_MIPTREE_TILING_ANY, false); if (!mt) goto fail; - if (brw_is_hiz_depth_format(brw, format)) { + if (intel_miptree_wants_hiz_buffer(brw, mt)) { ok = intel_miptree_alloc_hiz(brw, mt); if (!ok) goto fail; @@ -844,7 +908,7 @@ intel_miptree_reference(struct intel_mipmap_tree **dst, if (src) { src->refcount++; - DBG("%s %p refcount now %d\n", __FUNCTION__, src, src->refcount); + DBG("%s %p refcount now %d\n", __func__, src, src->refcount); } *dst = src; @@ -857,17 +921,22 @@ intel_miptree_release(struct intel_mipmap_tree **mt) if (!*mt) return; - DBG("%s %p refcount will be %d\n", __FUNCTION__, *mt, (*mt)->refcount - 1); + DBG("%s %p refcount will be %d\n", __func__, *mt, (*mt)->refcount - 1); if (--(*mt)->refcount <= 0) { GLuint i; - DBG("%s deleting %p\n", __FUNCTION__, *mt); + DBG("%s deleting %p\n", __func__, *mt); - intel_region_release(&((*mt)->region)); + drm_intel_bo_unreference((*mt)->bo); intel_miptree_release(&(*mt)->stencil_mt); - intel_miptree_release(&(*mt)->hiz_mt); + if ((*mt)->hiz_buf) { + if ((*mt)->hiz_buf->mt) + intel_miptree_release(&(*mt)->hiz_buf->mt); + else + drm_intel_bo_unreference((*mt)->hiz_buf->bo); + free((*mt)->hiz_buf); + } intel_miptree_release(&(*mt)->mcs_mt); - intel_miptree_release(&(*mt)->singlesample_mt); intel_resolve_map_clear(&(*mt)->hiz_map); for (i = 0; i < MAX_TEXTURE_LEVELS; i++) { @@ -915,13 +984,13 @@ intel_miptree_match_image(struct intel_mipmap_tree *mt, * objects can't change targets over their lifetimes, so this should be * true. */ - assert(target_to_target(image->TexObject->Target) == mt->target); + assert(image->TexObject->Target == mt->target); mesa_format mt_format = mt->format; - if (mt->format == MESA_FORMAT_X8_Z24 && mt->stencil_mt) - mt_format = MESA_FORMAT_S8_Z24; + if (mt->format == MESA_FORMAT_Z24_UNORM_X8_UINT && mt->stencil_mt) + mt_format = MESA_FORMAT_Z24_UNORM_S8_UINT; if (mt->format == MESA_FORMAT_Z_FLOAT32 && mt->stencil_mt) - mt_format = MESA_FORMAT_Z32_FLOAT_X24S8; + mt_format = MESA_FORMAT_Z32_FLOAT_S8X24_UINT; if (mt->etc_format != MESA_FORMAT_NONE) mt_format = mt->etc_format; @@ -933,28 +1002,27 @@ intel_miptree_match_image(struct intel_mipmap_tree *mt, if (mt->target == GL_TEXTURE_CUBE_MAP) depth = 6; + int level_depth = mt->level[level].depth; + if (mt->num_samples > 1) { + switch (mt->msaa_layout) { + case INTEL_MSAA_LAYOUT_NONE: + case INTEL_MSAA_LAYOUT_IMS: + break; + case INTEL_MSAA_LAYOUT_UMS: + case INTEL_MSAA_LAYOUT_CMS: + level_depth /= mt->num_samples; + break; + } + } + /* Test image dimensions against the base level image adjusted for * minification. This will also catch images not present in the * tree, changed targets, etc. */ - if (mt->target == GL_TEXTURE_2D_MULTISAMPLE || - mt->target == GL_TEXTURE_2D_MULTISAMPLE_ARRAY) { - /* nonzero level here is always bogus */ - assert(level == 0); - - if (width != mt->logical_width0 || - height != mt->logical_height0 || - depth != mt->logical_depth0) { - return false; - } - } - else { - /* all normal textures, renderbuffers, etc */ - if (width != mt->level[level].width || - height != mt->level[level].height || - depth != mt->level[level].depth) { - return false; - } + if (width != minify(mt->logical_width0, level - mt->first_level) || + height != minify(mt->logical_height0, level - mt->first_level) || + depth != level_depth) { + return false; } if (image->NumSamples != mt->num_samples) @@ -967,17 +1035,14 @@ intel_miptree_match_image(struct intel_mipmap_tree *mt, void intel_miptree_set_level_info(struct intel_mipmap_tree *mt, GLuint level, - GLuint x, GLuint y, - GLuint w, GLuint h, GLuint d) + GLuint x, GLuint y, GLuint d) { - mt->level[level].width = w; - mt->level[level].height = h; mt->level[level].depth = d; mt->level[level].level_x = x; mt->level[level].level_y = y; - DBG("%s level %d size: %d,%d,%d offset %d,%d\n", __FUNCTION__, - level, w, h, d, x, y); + DBG("%s level %d, depth %d, offset %d,%d\n", __func__, + level, d, x, y); assert(mt->level[level].slice == NULL); @@ -1001,13 +1066,13 @@ intel_miptree_set_image_offset(struct intel_mipmap_tree *mt, mt->level[level].slice[img].y_offset = mt->level[level].level_y + y; DBG("%s level %d img %d pos %d,%d\n", - __FUNCTION__, level, img, + __func__, level, img, mt->level[level].slice[img].x_offset, mt->level[level].slice[img].y_offset); } void -intel_miptree_get_image_offset(struct intel_mipmap_tree *mt, +intel_miptree_get_image_offset(const struct intel_mipmap_tree *mt, GLuint level, GLuint slice, GLuint *x, GLuint *y) { @@ -1017,6 +1082,82 @@ intel_miptree_get_image_offset(struct intel_mipmap_tree *mt, *y = mt->level[level].slice[slice].y_offset; } +/** + * This function computes masks that may be used to select the bits of the X + * and Y coordinates that indicate the offset within a tile. If the BO is + * untiled, the masks are set to 0. + */ +void +intel_miptree_get_tile_masks(const struct intel_mipmap_tree *mt, + uint32_t *mask_x, uint32_t *mask_y, + bool map_stencil_as_y_tiled) +{ + int cpp = mt->cpp; + uint32_t tiling = mt->tiling; + + if (map_stencil_as_y_tiled) + tiling = I915_TILING_Y; + + switch (tiling) { + default: + unreachable("not reached"); + case I915_TILING_NONE: + *mask_x = *mask_y = 0; + break; + case I915_TILING_X: + *mask_x = 512 / cpp - 1; + *mask_y = 7; + break; + case I915_TILING_Y: + *mask_x = 128 / cpp - 1; + *mask_y = 31; + break; + } +} + +/** + * Compute the offset (in bytes) from the start of the BO to the given x + * and y coordinate. For tiled BOs, caller must ensure that x and y are + * multiples of the tile size. + */ +uint32_t +intel_miptree_get_aligned_offset(const struct intel_mipmap_tree *mt, + uint32_t x, uint32_t y, + bool map_stencil_as_y_tiled) +{ + int cpp = mt->cpp; + uint32_t pitch = mt->pitch; + uint32_t tiling = mt->tiling; + + if (map_stencil_as_y_tiled) { + tiling = I915_TILING_Y; + + /* When mapping a W-tiled stencil buffer as Y-tiled, each 64-high W-tile + * gets transformed into a 32-high Y-tile. Accordingly, the pitch of + * the resulting surface is twice the pitch of the original miptree, + * since each row in the Y-tiled view corresponds to two rows in the + * actual W-tiled surface. So we need to correct the pitch before + * computing the offsets. + */ + pitch *= 2; + } + + switch (tiling) { + default: + unreachable("not reached"); + case I915_TILING_NONE: + return y * pitch + x * cpp; + case I915_TILING_X: + assert((x % (512 / cpp)) == 0); + assert((y % 8) == 0); + return y * pitch + x / (512 / cpp) * 4096; + case I915_TILING_Y: + assert((x % (128 / cpp)) == 0); + assert((y % 32) == 0); + return y * pitch + x / (128 / cpp) * 4096; + } +} + /** * Rendering with tiled buffers requires that the base address of the buffer * be aligned to a page boundary. For renderbuffers, and sometimes with @@ -1028,23 +1169,21 @@ intel_miptree_get_image_offset(struct intel_mipmap_tree *mt, * from there. */ uint32_t -intel_miptree_get_tile_offsets(struct intel_mipmap_tree *mt, +intel_miptree_get_tile_offsets(const struct intel_mipmap_tree *mt, GLuint level, GLuint slice, uint32_t *tile_x, uint32_t *tile_y) { - struct intel_region *region = mt->region; uint32_t x, y; uint32_t mask_x, mask_y; - intel_region_get_tile_masks(region, &mask_x, &mask_y, false); + intel_miptree_get_tile_masks(mt, &mask_x, &mask_y, false); intel_miptree_get_image_offset(mt, level, slice, &x, &y); *tile_x = x & mask_x; *tile_y = y & mask_y; - return intel_region_get_aligned_offset(region, x & ~mask_x, y & ~mask_y, - false); + return intel_miptree_get_aligned_offset(mt, x & ~mask_x, y & ~mask_y, false); } static void @@ -1057,7 +1196,7 @@ intel_miptree_copy_slice_sw(struct brw_context *brw, int height) { void *src, *dst; - int src_stride, dst_stride; + ptrdiff_t src_stride, dst_stride; int cpp = dst_mt->cpp; intel_miptree_map(brw, src_mt, @@ -1075,7 +1214,7 @@ intel_miptree_copy_slice_sw(struct brw_context *brw, BRW_MAP_DIRECT_BIT, &dst, &dst_stride); - DBG("sw blit %s mt %p %p/%d -> %s mt %p %p/%d (%dx%d)\n", + DBG("sw blit %s mt %p %p/%"PRIdPTR" -> %s mt %p %p/%"PRIdPTR" (%dx%d)\n", _mesa_get_format_name(src_mt->format), src_mt, src, src_stride, _mesa_get_format_name(dst_mt->format), @@ -1119,8 +1258,8 @@ intel_miptree_copy_slice(struct brw_context *brw, { mesa_format format = src_mt->format; - uint32_t width = src_mt->level[level].width; - uint32_t height = src_mt->level[level].height; + uint32_t width = minify(src_mt->physical_width0, level - src_mt->first_level); + uint32_t height = minify(src_mt->physical_height0, level - src_mt->first_level); int slice; if (face > 0) @@ -1154,9 +1293,9 @@ intel_miptree_copy_slice(struct brw_context *brw, DBG("validate blit mt %s %p %d,%d/%d -> mt %s %p %d,%d/%d (%dx%d)\n", _mesa_get_format_name(src_mt->format), - src_mt, src_x, src_y, src_mt->region->pitch, + src_mt, src_x, src_y, src_mt->pitch, _mesa_get_format_name(dst_mt->format), - dst_mt, dst_x, dst_y, dst_mt->region->pitch, + dst_mt, dst_x, dst_y, dst_mt->pitch, width, height); if (!intel_miptree_blit(brw, @@ -1190,7 +1329,12 @@ intel_miptree_copy_teximage(struct brw_context *brw, intel_texture_object(intelImage->base.Base.TexObject); int level = intelImage->base.Base.Level; int face = intelImage->base.Base.Face; - GLuint depth = intelImage->base.Base.Depth; + + GLuint depth; + if (intel_obj->base.Target == GL_TEXTURE_1D_ARRAY) + depth = intelImage->base.Base.Height; + else + depth = intelImage->base.Base.Depth; if (!invalidate) { for (int slice = 0; slice < depth; slice++) { @@ -1202,13 +1346,14 @@ intel_miptree_copy_teximage(struct brw_context *brw, intel_obj->needs_validate = true; } -bool +static bool intel_miptree_alloc_mcs(struct brw_context *brw, struct intel_mipmap_tree *mt, GLuint num_samples) { assert(brw->gen >= 7); /* MCS only used on Gen7+ */ assert(mt->mcs_mt == NULL); + assert(!mt->disable_aux_buffers); /* Choose the correct format for the MCS buffer. All that really matters * is that we allocate the right buffer size, since we'll always be @@ -1217,6 +1362,7 @@ intel_miptree_alloc_mcs(struct brw_context *brw, */ mesa_format format; switch (num_samples) { + case 2: case 4: /* 8 bits/pixel are required for MCS data when using 4x MSAA (2 bits for * each sample). @@ -1230,8 +1376,7 @@ intel_miptree_alloc_mcs(struct brw_context *brw, format = MESA_FORMAT_R_UINT32; break; default: - assert(!"Unrecognized sample count in intel_miptree_alloc_mcs"); - return false; + unreachable("Unrecognized sample count in intel_miptree_alloc_mcs"); }; /* From the Ivy Bridge PRM, Vol4 Part1 p76, "MCS Base Address": @@ -1248,7 +1393,8 @@ intel_miptree_alloc_mcs(struct brw_context *brw, mt->logical_depth0, true, 0 /* num_samples */, - INTEL_MIPTREE_TILING_Y); + INTEL_MIPTREE_TILING_Y, + false); /* From the Ivy Bridge PRM, Vol 2 Part 1 p326: * @@ -1261,7 +1407,7 @@ intel_miptree_alloc_mcs(struct brw_context *brw, * Note: the clear value for MCS buffers is all 1's, so we memset to 0xff. */ void *data = intel_miptree_map_raw(brw, mt->mcs_mt); - memset(data, 0xff, mt->mcs_mt->region->bo->size); + memset(data, 0xff, mt->mcs_mt->total_height * mt->mcs_mt->pitch); intel_miptree_unmap_raw(brw, mt->mcs_mt); mt->fast_clear_state = INTEL_FAST_CLEAR_STATE_CLEAR; @@ -1274,6 +1420,7 @@ intel_miptree_alloc_non_msrt_mcs(struct brw_context *brw, struct intel_mipmap_tree *mt) { assert(mt->mcs_mt == NULL); + assert(!mt->disable_aux_buffers); /* The format of the MCS buffer is opaque to the driver; all that matters * is that we get its size and pitch right. We'll pretend that the format @@ -1305,7 +1452,8 @@ intel_miptree_alloc_non_msrt_mcs(struct brw_context *brw, mt->logical_depth0, true, 0 /* num_samples */, - INTEL_MIPTREE_TILING_Y); + INTEL_MIPTREE_TILING_Y, + false); return mt->mcs_mt; } @@ -1313,19 +1461,19 @@ intel_miptree_alloc_non_msrt_mcs(struct brw_context *brw, /** * Helper for intel_miptree_alloc_hiz() that sets - * \c mt->level[level].slice[layer].has_hiz. Return true if and only if + * \c mt->level[level].has_hiz. Return true if and only if * \c has_hiz was set. */ static bool -intel_miptree_slice_enable_hiz(struct brw_context *brw, +intel_miptree_level_enable_hiz(struct brw_context *brw, struct intel_mipmap_tree *mt, - uint32_t level, - uint32_t layer) + uint32_t level) { - assert(mt->hiz_mt); + assert(mt->hiz_buf); - if (brw->is_haswell) { - const struct intel_mipmap_level *l = &mt->level[level]; + if (brw->gen >= 8 || brw->is_haswell) { + uint32_t width = minify(mt->physical_width0, level); + uint32_t height = minify(mt->physical_height0, level); /* Disable HiZ for LOD > 0 unless the width is 8 aligned * and the height is 4 aligned. This allows our HiZ support @@ -1333,52 +1481,305 @@ intel_miptree_slice_enable_hiz(struct brw_context *brw, * we can grow the width & height to allow the HiZ op to * force the proper size alignments. */ - if (level > 0 && ((l->width & 7) || (l->height & 3))) { + if (level > 0 && ((width & 7) || (height & 3))) { + DBG("mt %p level %d: HiZ DISABLED\n", mt, level); return false; } } - mt->level[level].slice[layer].has_hiz = true; + DBG("mt %p level %d: HiZ enabled\n", mt, level); + mt->level[level].has_hiz = true; return true; } +/** + * Helper for intel_miptree_alloc_hiz() that determines the required hiz + * buffer dimensions and allocates a bo for the hiz buffer. + */ +static struct intel_miptree_aux_buffer * +intel_gen7_hiz_buf_create(struct brw_context *brw, + struct intel_mipmap_tree *mt) +{ + unsigned z_width = mt->logical_width0; + unsigned z_height = mt->logical_height0; + const unsigned z_depth = MAX2(mt->logical_depth0, 1); + unsigned hz_width, hz_height; + struct intel_miptree_aux_buffer *buf = calloc(sizeof(*buf), 1); + + if (!buf) + return NULL; + + /* Gen7 PRM Volume 2, Part 1, 11.5.3 "Hierarchical Depth Buffer" documents + * adjustments required for Z_Height and Z_Width based on multisampling. + */ + switch (mt->num_samples) { + case 0: + case 1: + break; + case 2: + case 4: + z_width *= 2; + z_height *= 2; + break; + case 8: + z_width *= 4; + z_height *= 2; + break; + default: + unreachable("unsupported sample count"); + } + + const unsigned vertical_align = 8; /* 'j' in the docs */ + const unsigned H0 = z_height; + const unsigned h0 = ALIGN(H0, vertical_align); + const unsigned h1 = ALIGN(minify(H0, 1), vertical_align); + const unsigned Z0 = z_depth; + + /* HZ_Width (bytes) = ceiling(Z_Width / 16) * 16 */ + hz_width = ALIGN(z_width, 16); + + if (mt->target == GL_TEXTURE_3D) { + unsigned H_i = H0; + unsigned Z_i = Z0; + hz_height = 0; + for (int level = mt->first_level; level <= mt->last_level; ++level) { + unsigned h_i = ALIGN(H_i, vertical_align); + /* sum(i=0 to m; h_i * max(1, floor(Z_Depth/2**i))) */ + hz_height += h_i * Z_i; + H_i = minify(H_i, 1); + Z_i = minify(Z_i, 1); + } + /* HZ_Height = + * (1/2) * sum(i=0 to m; h_i * max(1, floor(Z_Depth/2**i))) + */ + hz_height = DIV_ROUND_UP(hz_height, 2); + } else { + const unsigned hz_qpitch = h0 + h1 + (12 * vertical_align); + if (mt->target == GL_TEXTURE_CUBE_MAP_ARRAY || + mt->target == GL_TEXTURE_CUBE_MAP) { + /* HZ_Height (rows) = Ceiling ( ( Q_pitch * Z_depth * 6/2) /8 ) * 8 */ + hz_height = DIV_ROUND_UP(hz_qpitch * Z0 * 6, 2 * 8) * 8; + } else { + /* HZ_Height (rows) = Ceiling ( ( Q_pitch * Z_depth/2) /8 ) * 8 */ + hz_height = DIV_ROUND_UP(hz_qpitch * Z0, 2 * 8) * 8; + } + } + + unsigned long pitch; + uint32_t tiling = I915_TILING_Y; + buf->bo = drm_intel_bo_alloc_tiled(brw->bufmgr, "hiz", + hz_width, hz_height, 1, + &tiling, &pitch, + BO_ALLOC_FOR_RENDER); + if (!buf->bo) { + free(buf); + return NULL; + } else if (tiling != I915_TILING_Y) { + drm_intel_bo_unreference(buf->bo); + free(buf); + return NULL; + } + + buf->pitch = pitch; + + return buf; +} + + +/** + * Helper for intel_miptree_alloc_hiz() that determines the required hiz + * buffer dimensions and allocates a bo for the hiz buffer. + */ +static struct intel_miptree_aux_buffer * +intel_gen8_hiz_buf_create(struct brw_context *brw, + struct intel_mipmap_tree *mt) +{ + unsigned z_width = mt->logical_width0; + unsigned z_height = mt->logical_height0; + const unsigned z_depth = MAX2(mt->logical_depth0, 1); + unsigned hz_width, hz_height; + struct intel_miptree_aux_buffer *buf = calloc(sizeof(*buf), 1); + + if (!buf) + return NULL; + + /* Gen7 PRM Volume 2, Part 1, 11.5.3 "Hierarchical Depth Buffer" documents + * adjustments required for Z_Height and Z_Width based on multisampling. + */ + switch (mt->num_samples) { + case 0: + case 1: + break; + case 2: + case 4: + z_width *= 2; + z_height *= 2; + break; + case 8: + z_width *= 4; + z_height *= 2; + break; + default: + unreachable("unsupported sample count"); + } + + const unsigned vertical_align = 8; /* 'j' in the docs */ + const unsigned H0 = z_height; + const unsigned h0 = ALIGN(H0, vertical_align); + const unsigned h1 = ALIGN(minify(H0, 1), vertical_align); + const unsigned Z0 = z_depth; + + /* HZ_Width (bytes) = ceiling(Z_Width / 16) * 16 */ + hz_width = ALIGN(z_width, 16); + + unsigned H_i = H0; + unsigned Z_i = Z0; + unsigned sum_h_i = 0; + unsigned hz_height_3d_sum = 0; + for (int level = mt->first_level; level <= mt->last_level; ++level) { + unsigned i = level - mt->first_level; + unsigned h_i = ALIGN(H_i, vertical_align); + /* sum(i=2 to m; h_i) */ + if (i >= 2) { + sum_h_i += h_i; + } + /* sum(i=0 to m; h_i * max(1, floor(Z_Depth/2**i))) */ + hz_height_3d_sum += h_i * Z_i; + H_i = minify(H_i, 1); + Z_i = minify(Z_i, 1); + } + /* HZ_QPitch = h0 + max(h1, sum(i=2 to m; h_i)) */ + buf->qpitch = h0 + MAX2(h1, sum_h_i); + + if (mt->target == GL_TEXTURE_3D) { + /* (1/2) * sum(i=0 to m; h_i * max(1, floor(Z_Depth/2**i))) */ + hz_height = DIV_ROUND_UP(hz_height_3d_sum, 2); + } else { + /* HZ_Height (rows) = ceiling( (HZ_QPitch/2)/8) *8 * Z_Depth */ + hz_height = DIV_ROUND_UP(buf->qpitch, 2 * 8) * 8 * Z0; + if (mt->target == GL_TEXTURE_CUBE_MAP_ARRAY || + mt->target == GL_TEXTURE_CUBE_MAP) { + /* HZ_Height (rows) = ceiling( (HZ_QPitch/2)/8) *8 * 6 * Z_Depth + * + * We can can just take our hz_height calculation from above, and + * multiply by 6 for the cube map and cube map array types. + */ + hz_height *= 6; + } + } + + unsigned long pitch; + uint32_t tiling = I915_TILING_Y; + buf->bo = drm_intel_bo_alloc_tiled(brw->bufmgr, "hiz", + hz_width, hz_height, 1, + &tiling, &pitch, + BO_ALLOC_FOR_RENDER); + if (!buf->bo) { + free(buf); + return NULL; + } else if (tiling != I915_TILING_Y) { + drm_intel_bo_unreference(buf->bo); + free(buf); + return NULL; + } + + buf->pitch = pitch; + + return buf; +} + + +static struct intel_miptree_aux_buffer * +intel_hiz_miptree_buf_create(struct brw_context *brw, + struct intel_mipmap_tree *mt) +{ + struct intel_miptree_aux_buffer *buf = calloc(sizeof(*buf), 1); + const bool force_all_slices_at_each_lod = brw->gen == 6; + + if (!buf) + return NULL; + + buf->mt = intel_miptree_create(brw, + mt->target, + mt->format, + mt->first_level, + mt->last_level, + mt->logical_width0, + mt->logical_height0, + mt->logical_depth0, + true, + mt->num_samples, + INTEL_MIPTREE_TILING_ANY, + force_all_slices_at_each_lod); + if (!buf->mt) { + free(buf); + return NULL; + } + + buf->bo = buf->mt->bo; + buf->pitch = buf->mt->pitch; + buf->qpitch = buf->mt->qpitch; + + return buf; +} + +bool +intel_miptree_wants_hiz_buffer(struct brw_context *brw, + struct intel_mipmap_tree *mt) +{ + if (!brw->has_hiz) + return false; + + if (mt->hiz_buf != NULL) + return false; + + if (mt->disable_aux_buffers) + return false; + + switch (mt->format) { + case MESA_FORMAT_Z_FLOAT32: + case MESA_FORMAT_Z32_FLOAT_S8X24_UINT: + case MESA_FORMAT_Z24_UNORM_X8_UINT: + case MESA_FORMAT_Z24_UNORM_S8_UINT: + case MESA_FORMAT_Z_UNORM16: + return true; + default: + return false; + } +} bool intel_miptree_alloc_hiz(struct brw_context *brw, struct intel_mipmap_tree *mt) { - assert(mt->hiz_mt == NULL); - mt->hiz_mt = intel_miptree_create(brw, - mt->target, - mt->format, - mt->first_level, - mt->last_level, - mt->logical_width0, - mt->logical_height0, - mt->logical_depth0, - true, - mt->num_samples, - INTEL_MIPTREE_TILING_ANY); + assert(mt->hiz_buf == NULL); + assert(!mt->disable_aux_buffers); - if (!mt->hiz_mt) + if (brw->gen == 7) { + mt->hiz_buf = intel_gen7_hiz_buf_create(brw, mt); + } else if (brw->gen >= 8) { + mt->hiz_buf = intel_gen8_hiz_buf_create(brw, mt); + } else { + mt->hiz_buf = intel_hiz_miptree_buf_create(brw, mt); + } + + if (!mt->hiz_buf) return false; /* Mark that all slices need a HiZ resolve. */ - struct intel_resolve_map *head = &mt->hiz_map; for (int level = mt->first_level; level <= mt->last_level; ++level) { - for (int layer = 0; layer < mt->level[level].depth; ++layer) { - if (!intel_miptree_slice_enable_hiz(brw, mt, level, layer)) - continue; + if (!intel_miptree_level_enable_hiz(brw, mt, level)) + continue; - head->next = malloc(sizeof(*head->next)); - head->next->prev = head; - head->next->next = NULL; - head = head->next; + for (int layer = 0; layer < mt->level[level].depth; ++layer) { + struct intel_resolve_map *m = malloc(sizeof(struct intel_resolve_map)); + exec_node_init(&m->link); + m->level = level; + m->layer = layer; + m->need = GEN6_HIZ_OP_HIZ_RESOLVE; - head->level = level; - head->layer = layer; - head->need = GEN6_HIZ_OP_HIZ_RESOLVE; + exec_list_push_tail(&mt->hiz_map, &m->link); } } @@ -1389,12 +1790,10 @@ intel_miptree_alloc_hiz(struct brw_context *brw, * Does the miptree slice have hiz enabled? */ bool -intel_miptree_slice_has_hiz(struct intel_mipmap_tree *mt, - uint32_t level, - uint32_t layer) +intel_miptree_level_has_hiz(struct intel_mipmap_tree *mt, uint32_t level) { - intel_miptree_check_level_layer(mt, level, layer); - return mt->level[level].slice[layer].has_hiz; + intel_miptree_check_level_layer(mt, level, 0); + return mt->level[level].has_hiz; } void @@ -1402,7 +1801,7 @@ intel_miptree_slice_set_needs_hiz_resolve(struct intel_mipmap_tree *mt, uint32_t level, uint32_t layer) { - if (!intel_miptree_slice_has_hiz(mt, level, layer)) + if (!intel_miptree_level_has_hiz(mt, level)) return; intel_resolve_map_set(&mt->hiz_map, @@ -1415,7 +1814,7 @@ intel_miptree_slice_set_needs_depth_resolve(struct intel_mipmap_tree *mt, uint32_t level, uint32_t layer) { - if (!intel_miptree_slice_has_hiz(mt, level, layer)) + if (!intel_miptree_level_has_hiz(mt, level)) return; intel_resolve_map_set(&mt->hiz_map, @@ -1480,15 +1879,13 @@ intel_miptree_all_slices_resolve(struct brw_context *brw, enum gen6_hiz_op need) { bool did_resolve = false; - struct intel_resolve_map *i, *next; - for (i = mt->hiz_map.next; i; i = next) { - next = i->next; - if (i->need != need) + foreach_list_typed_safe(struct intel_resolve_map, map, link, &mt->hiz_map) { + if (map->need != need) continue; - intel_hiz_exec(brw, mt, i->level, i->layer, need); - intel_resolve_map_remove(i); + intel_hiz_exec(brw, mt, map->level, map->layer, need); + intel_resolve_map_remove(map); did_resolve = true; } @@ -1525,14 +1922,14 @@ intel_miptree_resolve_color(struct brw_context *brw, case INTEL_FAST_CLEAR_STATE_CLEAR: /* Fast color clear resolves only make sense for non-MSAA buffers. */ if (mt->msaa_layout == INTEL_MSAA_LAYOUT_NONE) - brw_blorp_resolve_color(brw, mt); + brw_meta_resolve_color(brw, mt); break; } } /** - * Make it possible to share the region backing the given miptree with another + * Make it possible to share the BO backing the given miptree with another * process or another miptree. * * Fast color clears are unsafe with shared buffers, so we need to resolve and @@ -1615,87 +2012,45 @@ intel_offset_S8(uint32_t stride, uint32_t x, uint32_t y, bool swizzled) return u; } -static void +void intel_miptree_updownsample(struct brw_context *brw, struct intel_mipmap_tree *src, - struct intel_mipmap_tree *dst, - unsigned width, - unsigned height) -{ - int src_x0 = 0; - int src_y0 = 0; - int dst_x0 = 0; - int dst_y0 = 0; - - brw_blorp_blit_miptrees(brw, - src, 0 /* level */, 0 /* layer */, - dst, 0 /* level */, 0 /* layer */, - src_x0, src_y0, - width, height, - dst_x0, dst_y0, - width, height, - GL_NEAREST, false, false /*mirror x, y*/); + struct intel_mipmap_tree *dst) +{ + if (brw->gen < 8) { + brw_blorp_blit_miptrees(brw, + src, 0 /* level */, 0 /* layer */, src->format, + dst, 0 /* level */, 0 /* layer */, dst->format, + 0, 0, + src->logical_width0, src->logical_height0, + 0, 0, + dst->logical_width0, dst->logical_height0, + GL_NEAREST, false, false /*mirror x, y*/); + } else if (src->format == MESA_FORMAT_S_UINT8) { + brw_meta_stencil_updownsample(brw, src, dst); + } else { + brw_meta_updownsample(brw, src, dst); + } if (src->stencil_mt) { + if (brw->gen >= 8) { + brw_meta_stencil_updownsample(brw, src->stencil_mt, dst); + return; + } + brw_blorp_blit_miptrees(brw, src->stencil_mt, 0 /* level */, 0 /* layer */, + src->stencil_mt->format, dst->stencil_mt, 0 /* level */, 0 /* layer */, - src_x0, src_y0, - width, height, - dst_x0, dst_y0, - width, height, + dst->stencil_mt->format, + 0, 0, + src->logical_width0, src->logical_height0, + 0, 0, + dst->logical_width0, dst->logical_height0, GL_NEAREST, false, false /*mirror x, y*/); } } -static void -assert_is_flat(struct intel_mipmap_tree *mt) -{ - assert(mt->target == GL_TEXTURE_2D); - assert(mt->first_level == 0); - assert(mt->last_level == 0); -} - -/** - * \brief Downsample from mt to mt->singlesample_mt. - * - * If the miptree needs no downsample, then skip. - */ -void -intel_miptree_downsample(struct brw_context *brw, - struct intel_mipmap_tree *mt) -{ - /* Only flat, renderbuffer-like miptrees are supported. */ - assert_is_flat(mt); - - if (!mt->need_downsample) - return; - intel_miptree_updownsample(brw, - mt, mt->singlesample_mt, - mt->logical_width0, - mt->logical_height0); - mt->need_downsample = false; -} - -/** - * \brief Upsample from mt->singlesample_mt to mt. - * - * The upsample is done unconditionally. - */ -void -intel_miptree_upsample(struct brw_context *brw, - struct intel_mipmap_tree *mt) -{ - /* Only flat, renderbuffer-like miptrees are supported. */ - assert_is_flat(mt); - assert(!mt->need_downsample); - - intel_miptree_updownsample(brw, - mt->singlesample_mt, mt, - mt->logical_width0, - mt->logical_height0); -} - void * intel_miptree_map_raw(struct brw_context *brw, struct intel_mipmap_tree *mt) { @@ -1704,20 +2059,15 @@ intel_miptree_map_raw(struct brw_context *brw, struct intel_mipmap_tree *mt) */ intel_miptree_resolve_color(brw, mt); - drm_intel_bo *bo = mt->region->bo; + drm_intel_bo *bo = mt->bo; - if (unlikely(INTEL_DEBUG & DEBUG_PERF)) { - if (drm_intel_bo_busy(bo)) { - perf_debug("Mapping a busy miptree, causing a stall on the GPU.\n"); - } - } + if (drm_intel_bo_references(brw->batch.bo, bo)) + intel_batchbuffer_flush(brw); - intel_batchbuffer_flush(brw); - - if (mt->region->tiling != I915_TILING_NONE) - drm_intel_gem_bo_map_gtt(bo); + if (mt->tiling != I915_TILING_NONE) + brw_bo_map_gtt(brw, bo, "miptree"); else - drm_intel_bo_map(bo, true); + brw_bo_map(brw, bo, true, "miptree"); return bo->virtual; } @@ -1726,7 +2076,7 @@ void intel_miptree_unmap_raw(struct brw_context *brw, struct intel_mipmap_tree *mt) { - drm_intel_bo_unmap(mt->region->bo); + drm_intel_bo_unmap(mt->bo); } static void @@ -1738,8 +2088,8 @@ intel_miptree_map_gtt(struct brw_context *brw, unsigned int bw, bh; void *base; unsigned int image_x, image_y; - int x = map->x; - int y = map->y; + intptr_t x = map->x; + intptr_t y = map->y; /* For compressed formats, the stride is the number of bytes per * row of blocks. intel_miptree_get_image_offset() already does @@ -1761,11 +2111,12 @@ intel_miptree_map_gtt(struct brw_context *brw, x += image_x; y += image_y; - map->stride = mt->region->pitch; + map->stride = mt->pitch; map->ptr = base + y * map->stride + x * mt->cpp; } - DBG("%s: %d,%d %dx%d from mt %p (%s) %d,%d = %p/%d\n", __FUNCTION__, + DBG("%s: %d,%d %dx%d from mt %p (%s) " + "%"PRIiPTR",%"PRIiPTR" = %p/%d\n", __func__, map->x, map->y, map->w, map->h, mt, _mesa_get_format_name(mt->format), x, y, map->ptr, map->stride); @@ -1791,26 +2142,34 @@ intel_miptree_map_blit(struct brw_context *brw, 0, 0, map->w, map->h, 1, false, 0, - INTEL_MIPTREE_TILING_NONE); + INTEL_MIPTREE_TILING_NONE, + false); if (!map->mt) { fprintf(stderr, "Failed to allocate blit temporary\n"); goto fail; } - map->stride = map->mt->region->pitch; + map->stride = map->mt->pitch; - if (!intel_miptree_blit(brw, - mt, level, slice, - map->x, map->y, false, - map->mt, 0, 0, - 0, 0, false, - map->w, map->h, GL_COPY)) { - fprintf(stderr, "Failed to blit\n"); - goto fail; + /* One of either READ_BIT or WRITE_BIT or both is set. READ_BIT implies no + * INVALIDATE_RANGE_BIT. WRITE_BIT needs the original values read in unless + * invalidate is set, since we'll be writing the whole rectangle from our + * temporary buffer back out. + */ + if (!(map->mode & GL_MAP_INVALIDATE_RANGE_BIT)) { + if (!intel_miptree_blit(brw, + mt, level, slice, + map->x, map->y, false, + map->mt, 0, 0, + 0, 0, false, + map->w, map->h, GL_COPY)) { + fprintf(stderr, "Failed to blit\n"); + goto fail; + } } map->ptr = intel_miptree_map_raw(brw, map->mt); - DBG("%s: %d,%d %dx%d from mt %p (%s) %d,%d = %p/%d\n", __FUNCTION__, + DBG("%s: %d,%d %dx%d from mt %p (%s) %d,%d = %p/%d\n", __func__, map->x, map->y, map->w, map->h, mt, _mesa_get_format_name(mt->format), level, slice, map->ptr, map->stride); @@ -1847,10 +2206,10 @@ intel_miptree_unmap_blit(struct brw_context *brw, intel_miptree_release(&map->mt); } -#ifdef __SSE4_1__ /** * "Map" a buffer by copying it to an untiled temporary using MOVNTDQA. */ +#if defined(USE_SSE41) static void intel_miptree_map_movntdqa(struct brw_context *brw, struct intel_mipmap_tree *mt, @@ -1860,7 +2219,7 @@ intel_miptree_map_movntdqa(struct brw_context *brw, assert(map->mode & GL_MAP_READ_BIT); assert(!(map->mode & GL_MAP_WRITE_BIT)); - DBG("%s: %d,%d %dx%d from mt %p (%s) %d,%d = %p/%d\n", __FUNCTION__, + DBG("%s: %d,%d %dx%d from mt %p (%s) %d,%d = %p/%d\n", __func__, map->x, map->y, map->w, map->h, mt, _mesa_get_format_name(mt->format), level, slice, map->ptr, map->stride); @@ -1875,15 +2234,15 @@ intel_miptree_map_movntdqa(struct brw_context *brw, void *src = intel_miptree_map_raw(brw, mt); if (!src) return; - src += image_y * mt->region->pitch; - src += image_x * mt->region->cpp; + src += image_y * mt->pitch; + src += image_x * mt->cpp; /* Due to the pixel offsets for the particular image being mapped, our * src pointer may not be 16-byte aligned. However, if the pitch is * divisible by 16, then the amount by which it's misaligned will remain * consistent from row to row. */ - assert((mt->region->pitch % 16) == 0); + assert((mt->pitch % 16) == 0); const int misalignment = ((uintptr_t) src) & 15; /* Create an untiled temporary buffer for the mapping. */ @@ -1891,7 +2250,7 @@ intel_miptree_map_movntdqa(struct brw_context *brw, map->stride = ALIGN(misalignment + width_bytes, 16); - map->buffer = malloc(map->stride * map->h); + map->buffer = _mesa_align_malloc(map->stride * map->h, 16); /* Offset the destination so it has the same misalignment as src. */ map->ptr = map->buffer + misalignment; @@ -1899,7 +2258,7 @@ intel_miptree_map_movntdqa(struct brw_context *brw, for (uint32_t y = 0; y < map->h; y++) { void *dst_ptr = map->ptr + y * map->stride; - void *src_ptr = src + y * mt->region->pitch; + void *src_ptr = src + y * mt->pitch; _mesa_streaming_load_memcpy(dst_ptr, src_ptr, width_bytes); } @@ -1914,7 +2273,7 @@ intel_miptree_unmap_movntdqa(struct brw_context *brw, unsigned int level, unsigned int slice) { - free(map->buffer); + _mesa_align_free(map->buffer); map->buffer = NULL; map->ptr = NULL; } @@ -1945,7 +2304,7 @@ intel_miptree_map_s8(struct brw_context *brw, for (uint32_t y = 0; y < map->h; y++) { for (uint32_t x = 0; x < map->w; x++) { - ptrdiff_t offset = intel_offset_S8(mt->region->pitch, + ptrdiff_t offset = intel_offset_S8(mt->pitch, x + image_x + map->x, y + image_y + map->y, brw->has_swizzling); @@ -1955,11 +2314,11 @@ intel_miptree_map_s8(struct brw_context *brw, intel_miptree_unmap_raw(brw, mt); - DBG("%s: %d,%d %dx%d from mt %p %d,%d = %p/%d\n", __FUNCTION__, + DBG("%s: %d,%d %dx%d from mt %p %d,%d = %p/%d\n", __func__, map->x, map->y, map->w, map->h, mt, map->x + image_x, map->y + image_y, map->ptr, map->stride); } else { - DBG("%s: %d,%d %dx%d from mt %p = %p/%d\n", __FUNCTION__, + DBG("%s: %d,%d %dx%d from mt %p = %p/%d\n", __func__, map->x, map->y, map->w, map->h, mt, map->ptr, map->stride); } @@ -1981,7 +2340,7 @@ intel_miptree_unmap_s8(struct brw_context *brw, for (uint32_t y = 0; y < map->h; y++) { for (uint32_t x = 0; x < map->w; x++) { - ptrdiff_t offset = intel_offset_S8(mt->region->pitch, + ptrdiff_t offset = intel_offset_S8(mt->pitch, x + map->x, y + map->y, brw->has_swizzling); @@ -2031,15 +2390,15 @@ intel_miptree_unmap_etc(struct brw_context *brw, image_y += map->y; uint8_t *dst = intel_miptree_map_raw(brw, mt) - + image_y * mt->region->pitch - + image_x * mt->region->cpp; + + image_y * mt->pitch + + image_x * mt->cpp; if (mt->etc_format == MESA_FORMAT_ETC1_RGB8) - _mesa_etc1_unpack_rgba8888(dst, mt->region->pitch, + _mesa_etc1_unpack_rgba8888(dst, mt->pitch, map->ptr, map->stride, map->w, map->h); else - _mesa_unpack_etc2_format(dst, mt->region->pitch, + _mesa_unpack_etc2_format(dst, mt->pitch, map->ptr, map->stride, map->w, map->h, mt->etc_format); @@ -2094,12 +2453,12 @@ intel_miptree_map_depthstencil(struct brw_context *brw, for (uint32_t y = 0; y < map->h; y++) { for (uint32_t x = 0; x < map->w; x++) { int map_x = map->x + x, map_y = map->y + y; - ptrdiff_t s_offset = intel_offset_S8(s_mt->region->pitch, + ptrdiff_t s_offset = intel_offset_S8(s_mt->pitch, map_x + s_image_x, map_y + s_image_y, brw->has_swizzling); ptrdiff_t z_offset = ((map_y + z_image_y) * - (z_mt->region->pitch / 4) + + (z_mt->pitch / 4) + (map_x + z_image_x)); uint8_t s = s_map[s_offset]; uint32_t z = z_map[z_offset]; @@ -2117,13 +2476,13 @@ intel_miptree_map_depthstencil(struct brw_context *brw, intel_miptree_unmap_raw(brw, z_mt); DBG("%s: %d,%d %dx%d from z mt %p %d,%d, s mt %p %d,%d = %p/%d\n", - __FUNCTION__, + __func__, map->x, map->y, map->w, map->h, z_mt, map->x + z_image_x, map->y + z_image_y, s_mt, map->x + s_image_x, map->y + s_image_y, map->ptr, map->stride); } else { - DBG("%s: %d,%d %dx%d from mt %p = %p/%d\n", __FUNCTION__, + DBG("%s: %d,%d %dx%d from mt %p = %p/%d\n", __func__, map->x, map->y, map->w, map->h, mt, map->ptr, map->stride); } @@ -2154,13 +2513,13 @@ intel_miptree_unmap_depthstencil(struct brw_context *brw, for (uint32_t y = 0; y < map->h; y++) { for (uint32_t x = 0; x < map->w; x++) { - ptrdiff_t s_offset = intel_offset_S8(s_mt->region->pitch, + ptrdiff_t s_offset = intel_offset_S8(s_mt->pitch, x + s_image_x + map->x, y + s_image_y + map->y, brw->has_swizzling); - ptrdiff_t z_offset = ((y + z_image_y) * - (z_mt->region->pitch / 4) + - (x + z_image_x)); + ptrdiff_t z_offset = ((y + z_image_y + map->y) * + (z_mt->pitch / 4) + + (x + z_image_x + map->x)); if (map_z32f_x24s8) { z_map[z_offset] = packed_map[(y * map->w + x) * 2 + 0]; @@ -2177,7 +2536,7 @@ intel_miptree_unmap_depthstencil(struct brw_context *brw, intel_miptree_unmap_raw(brw, z_mt); DBG("%s: %d,%d %dx%d from z mt %p (%s) %d,%d, s mt %p %d,%d = %p/%d\n", - __FUNCTION__, + __func__, map->x, map->y, map->w, map->h, z_mt, _mesa_get_format_name(z_mt->format), map->x + z_image_x, map->y + z_image_y, @@ -2244,24 +2603,64 @@ can_blit_slice(struct intel_mipmap_tree *mt, if (image_x >= 32768 || image_y >= 32768) return false; - if (mt->region->pitch >= 32768) + /* See intel_miptree_blit() for details on the 32k pitch limit. */ + if (mt->pitch >= 32768) return false; return true; } -static void -intel_miptree_map_singlesample(struct brw_context *brw, - struct intel_mipmap_tree *mt, - unsigned int level, - unsigned int slice, - unsigned int x, - unsigned int y, - unsigned int w, - unsigned int h, - GLbitfield mode, - void **out_ptr, - int *out_stride) +static bool +use_intel_mipree_map_blit(struct brw_context *brw, + struct intel_mipmap_tree *mt, + GLbitfield mode, + unsigned int level, + unsigned int slice) +{ + if (brw->has_llc && + /* It's probably not worth swapping to the blit ring because of + * all the overhead involved. + */ + !(mode & GL_MAP_WRITE_BIT) && + !mt->compressed && + (mt->tiling == I915_TILING_X || + /* Prior to Sandybridge, the blitter can't handle Y tiling */ + (brw->gen >= 6 && mt->tiling == I915_TILING_Y)) && + can_blit_slice(mt, level, slice)) + return true; + + if (mt->tiling != I915_TILING_NONE && + mt->bo->size >= brw->max_gtt_map_object_size) { + assert(can_blit_slice(mt, level, slice)); + return true; + } + + return false; +} + +/** + * Parameter \a out_stride has type ptrdiff_t not because the buffer stride may + * exceed 32 bits but to diminish the likelihood subtle bugs in pointer + * arithmetic overflow. + * + * If you call this function and use \a out_stride, then you're doing pointer + * arithmetic on \a out_ptr. The type of \a out_stride doesn't prevent all + * bugs. The caller must still take care to avoid 32-bit overflow errors in + * all arithmetic expressions that contain buffer offsets and pixel sizes, + * which usually have type uint32_t or GLuint. + */ +void +intel_miptree_map(struct brw_context *brw, + struct intel_mipmap_tree *mt, + unsigned int level, + unsigned int slice, + unsigned int x, + unsigned int y, + unsigned int w, + unsigned int h, + GLbitfield mode, + void **out_ptr, + ptrdiff_t *out_stride) { struct intel_miptree_map *map; @@ -2286,21 +2685,10 @@ intel_miptree_map_singlesample(struct brw_context *brw, intel_miptree_map_etc(brw, mt, map, level, slice); } else if (mt->stencil_mt && !(mode & BRW_MAP_DIRECT_BIT)) { intel_miptree_map_depthstencil(brw, mt, map, level, slice); - } - /* See intel_miptree_blit() for details on the 32k pitch limit. */ - else if (brw->has_llc && - !(mode & GL_MAP_WRITE_BIT) && - !mt->compressed && - (mt->region->tiling == I915_TILING_X || - (brw->gen >= 6 && mt->region->tiling == I915_TILING_Y)) && - can_blit_slice(mt, level, slice)) { - intel_miptree_map_blit(brw, mt, map, level, slice); - } else if (mt->region->tiling != I915_TILING_NONE && - mt->region->bo->size >= brw->max_gtt_map_object_size) { - assert(can_blit_slice(mt, level, slice)); + } else if (use_intel_mipree_map_blit(brw, mt, mode, level, slice)) { intel_miptree_map_blit(brw, mt, map, level, slice); -#ifdef __SSE4_1__ - } else if (!(mode & GL_MAP_WRITE_BIT) && !mt->compressed) { +#if defined(USE_SSE41) + } else if (!(mode & GL_MAP_WRITE_BIT) && !mt->compressed && cpu_has_sse4_1) { intel_miptree_map_movntdqa(brw, mt, map, level, slice); #endif } else { @@ -2314,11 +2702,11 @@ intel_miptree_map_singlesample(struct brw_context *brw, intel_miptree_release_map(mt, level, slice); } -static void -intel_miptree_unmap_singlesample(struct brw_context *brw, - struct intel_mipmap_tree *mt, - unsigned int level, - unsigned int slice) +void +intel_miptree_unmap(struct brw_context *brw, + struct intel_mipmap_tree *mt, + unsigned int level, + unsigned int slice) { struct intel_miptree_map *map = mt->level[level].slice[slice].map; @@ -2327,7 +2715,7 @@ intel_miptree_unmap_singlesample(struct brw_context *brw, if (!map) return; - DBG("%s: mt %p (%s) level %d slice %d\n", __FUNCTION__, + DBG("%s: mt %p (%s) level %d slice %d\n", __func__, mt, _mesa_get_format_name(mt->format), level, slice); if (mt->format == MESA_FORMAT_S_UINT8) { @@ -2339,8 +2727,8 @@ intel_miptree_unmap_singlesample(struct brw_context *brw, intel_miptree_unmap_depthstencil(brw, mt, map, level, slice); } else if (map->mt) { intel_miptree_unmap_blit(brw, mt, map, level, slice); -#ifdef __SSE4_1__ - } else if (map->buffer) { +#if defined(USE_SSE41) + } else if (map->buffer && cpu_has_sse4_1) { intel_miptree_unmap_movntdqa(brw, mt, map, level, slice); #endif } else { @@ -2349,127 +2737,3 @@ intel_miptree_unmap_singlesample(struct brw_context *brw, intel_miptree_release_map(mt, level, slice); } - -static void -intel_miptree_map_multisample(struct brw_context *brw, - struct intel_mipmap_tree *mt, - unsigned int level, - unsigned int slice, - unsigned int x, - unsigned int y, - unsigned int w, - unsigned int h, - GLbitfield mode, - void **out_ptr, - int *out_stride) -{ - struct gl_context *ctx = &brw->ctx; - struct intel_miptree_map *map; - - assert(mt->num_samples > 1); - - /* Only flat, renderbuffer-like miptrees are supported. */ - if (mt->target != GL_TEXTURE_2D || - mt->first_level != 0 || - mt->last_level != 0) { - _mesa_problem(ctx, "attempt to map a multisample miptree for " - "which (target, first_level, last_level != " - "(GL_TEXTURE_2D, 0, 0)"); - goto fail; - } - - map = intel_miptree_attach_map(mt, level, slice, x, y, w, h, mode); - if (!map) - goto fail; - - if (!mt->singlesample_mt) { - mt->singlesample_mt = - intel_miptree_create_for_renderbuffer(brw, - mt->format, - mt->logical_width0, - mt->logical_height0, - 0 /*num_samples*/); - if (!mt->singlesample_mt) - goto fail; - - map->singlesample_mt_is_tmp = true; - mt->need_downsample = true; - } - - intel_miptree_downsample(brw, mt); - intel_miptree_map_singlesample(brw, mt->singlesample_mt, - level, slice, - x, y, w, h, - mode, - out_ptr, out_stride); - return; - -fail: - intel_miptree_release_map(mt, level, slice); - *out_ptr = NULL; - *out_stride = 0; -} - -static void -intel_miptree_unmap_multisample(struct brw_context *brw, - struct intel_mipmap_tree *mt, - unsigned int level, - unsigned int slice) -{ - struct intel_miptree_map *map = mt->level[level].slice[slice].map; - - assert(mt->num_samples > 1); - - if (!map) - return; - - intel_miptree_unmap_singlesample(brw, mt->singlesample_mt, level, slice); - - mt->need_downsample = false; - if (map->mode & GL_MAP_WRITE_BIT) - intel_miptree_upsample(brw, mt); - - if (map->singlesample_mt_is_tmp) - intel_miptree_release(&mt->singlesample_mt); - - intel_miptree_release_map(mt, level, slice); -} - -void -intel_miptree_map(struct brw_context *brw, - struct intel_mipmap_tree *mt, - unsigned int level, - unsigned int slice, - unsigned int x, - unsigned int y, - unsigned int w, - unsigned int h, - GLbitfield mode, - void **out_ptr, - int *out_stride) -{ - if (mt->num_samples <= 1) - intel_miptree_map_singlesample(brw, mt, - level, slice, - x, y, w, h, - mode, - out_ptr, out_stride); - else - intel_miptree_map_multisample(brw, mt, - level, slice, - x, y, w, h, - mode, - out_ptr, out_stride); -} - -void -intel_miptree_unmap(struct brw_context *brw, - struct intel_mipmap_tree *mt, - unsigned int level, - unsigned int slice) -{ - if (mt->num_samples <= 1) - intel_miptree_unmap_singlesample(brw, mt, level, slice); - else - intel_miptree_unmap_multisample(brw, mt, level, slice); -}