intel_miptree_alloc_aux(struct brw_context *brw,
struct intel_mipmap_tree *mt);
-/**
- * Determine which MSAA layout should be used by the MSAA surface being
- * created, based on the chip generation and the surface type.
- */
-static enum intel_msaa_layout
-compute_msaa_layout(struct brw_context *brw, mesa_format format,
- uint32_t layout_flags)
+static bool
+is_mcs_supported(const struct brw_context *brw, mesa_format format,
+ uint32_t layout_flags)
{
/* Prior to Gen7, all MSAA surfaces used IMS layout. */
if (brw->gen < 7)
- return INTEL_MSAA_LAYOUT_IMS;
+ return false;
/* In Gen7, IMS layout is only used for depth and stencil buffers. */
switch (_mesa_get_format_base_format(format)) {
case GL_DEPTH_COMPONENT:
case GL_STENCIL_INDEX:
case GL_DEPTH_STENCIL:
- return INTEL_MSAA_LAYOUT_IMS;
+ return false;
default:
/* From the Ivy Bridge PRM, Vol4 Part1 p77 ("MCS Enable"):
*
* which is expensive.
*/
if (brw->gen == 7 && _mesa_get_format_datatype(format) == GL_INT) {
- return INTEL_MSAA_LAYOUT_UMS;
+ return false;
} else if (layout_flags & MIPTREE_LAYOUT_DISABLE_AUX) {
/* We can't use the CMS layout because it uses an aux buffer, the MCS
* buffer. So fallback to UMS, which is identical to CMS without the
* MCS. */
- return INTEL_MSAA_LAYOUT_UMS;
+ return false;
} else {
- return INTEL_MSAA_LAYOUT_CMS;
+ return true;
}
}
}
static bool
-intel_tiling_supports_ccs(const struct brw_context *brw, unsigned tiling)
+intel_tiling_supports_ccs(const struct brw_context *brw,
+ enum isl_tiling tiling)
{
/* From the Ivy Bridge PRM, Vol2 Part1 11.7 "MCS Buffer for Render
* Target(s)", beneath the "Fast Color Clear" bullet (p326):
* Gen9 changes the restriction to Y-tile only.
*/
if (brw->gen >= 9)
- return tiling == I915_TILING_Y;
+ return tiling == ISL_TILING_Y0;
else if (brw->gen >= 7)
- return tiling != I915_TILING_NONE;
+ return tiling != ISL_TILING_LINEAR;
else
return false;
}
return false;
/* This function applies only to non-multisampled render targets. */
- if (mt->num_samples > 1)
+ if (mt->surf.samples > 1)
return false;
/* MCS is only supported for color buffers */
return false;
const bool mip_mapped = mt->first_level != 0 || mt->last_level != 0;
- const bool arrayed = mt->physical_depth0 != 1;
+ const bool arrayed = mt->surf.logical_level0_px.array_len > 1 ||
+ mt->surf.logical_level0_px.depth > 1;
if (arrayed) {
/* Multisample surfaces with the CMS layout are not layered surfaces,
* accidentally reject a multisampled surface here. We should have
* rejected it earlier by explicitly checking the sample count.
*/
- assert(mt->num_samples <= 1);
+ assert(mt->surf.samples == 1);
}
/* Handle the hardware restrictions...
}
static bool
-intel_tiling_supports_hiz(const struct brw_context *brw, unsigned tiling)
+intel_tiling_supports_hiz(const struct brw_context *brw,
+ enum isl_tiling tiling)
{
if (brw->gen < 6)
return false;
- return tiling == I915_TILING_Y;
+ return tiling == ISL_TILING_Y0;
}
static bool
-intel_miptree_supports_hiz(struct brw_context *brw,
- struct intel_mipmap_tree *mt)
+intel_miptree_supports_hiz(const struct brw_context *brw,
+ const struct intel_mipmap_tree *mt)
{
if (!brw->has_hiz)
return false;
}
}
-
-/* On Gen9 support for color buffer compression was extended to single
- * sampled surfaces. This is a helper considering both auxiliary buffer
- * type and number of samples telling if the given miptree represents
- * the new single sampled case - also called lossless compression.
- */
-bool
-intel_miptree_is_lossless_compressed(const struct brw_context *brw,
- const struct intel_mipmap_tree *mt)
-{
- /* Only available from Gen9 onwards. */
- if (brw->gen < 9)
- return false;
-
- /* Compression always requires auxiliary buffer. */
- if (!mt->mcs_buf)
- return false;
-
- /* Single sample compression is represented re-using msaa compression
- * layout type: "Compressed Multisampled Surfaces".
- */
- if (mt->msaa_layout != INTEL_MSAA_LAYOUT_CMS)
- return false;
-
- /* And finally distinguish between msaa and single sample case. */
- return mt->num_samples <= 1;
-}
-
static bool
intel_miptree_supports_ccs_e(struct brw_context *brw,
const struct intel_mipmap_tree *mt)
{
+ if (brw->gen < 9)
+ return false;
+
/* For now compression is only enabled for integer formats even though
* there exist supported floating point formats also. This is a heuristic
* decision based on current public benchmarks. In none of the cases these
return false;
}
-/**
- * @param for_bo Indicates that the caller is
- * intel_miptree_create_for_bo(). If true, then do not create
- * \c stencil_mt.
- */
-static struct intel_mipmap_tree *
-intel_miptree_create_layout(struct brw_context *brw,
- GLenum target,
- mesa_format format,
- GLuint first_level,
- GLuint last_level,
- GLuint width0,
- GLuint height0,
- GLuint depth0,
- GLuint num_samples,
- uint32_t layout_flags)
+static bool
+needs_separate_stencil(const struct brw_context *brw,
+ struct intel_mipmap_tree *mt,
+ mesa_format format, uint32_t layout_flags)
{
- struct intel_mipmap_tree *mt = calloc(sizeof(*mt), 1);
- if (!mt)
- return NULL;
-
- DBG("%s target %s format %s level %d..%d slices %d <-- %p\n", __func__,
- _mesa_enum_to_string(target),
- _mesa_get_format_name(format),
- first_level, last_level, depth0, mt);
-
- if (target == GL_TEXTURE_1D_ARRAY)
- assert(height0 == 1);
-
- mt->target = target;
- mt->format = format;
- mt->first_level = first_level;
- mt->last_level = last_level;
- mt->logical_width0 = width0;
- mt->logical_height0 = height0;
- mt->logical_depth0 = depth0;
- mt->is_scanout = (layout_flags & MIPTREE_LAYOUT_FOR_SCANOUT) != 0;
- mt->aux_usage = ISL_AUX_USAGE_NONE;
- mt->supports_fast_clear = false;
- mt->aux_state = NULL;
- mt->cpp = _mesa_get_format_bytes(format);
- mt->num_samples = num_samples;
- mt->compressed = _mesa_is_format_compressed(format);
- mt->msaa_layout = INTEL_MSAA_LAYOUT_NONE;
- mt->refcount = 1;
-
- if (brw->gen == 6 && format == MESA_FORMAT_S_UINT8)
- layout_flags |= MIPTREE_LAYOUT_GEN6_HIZ_STENCIL;
-
- int depth_multiply = 1;
- if (num_samples > 1) {
- /* Adjust width/height/depth for MSAA */
- mt->msaa_layout = compute_msaa_layout(brw, format, layout_flags);
- if (mt->msaa_layout == INTEL_MSAA_LAYOUT_IMS) {
- /* From the Ivybridge PRM, Volume 1, Part 1, page 108:
- * "If the surface is multisampled and it is a depth or stencil
- * surface or Multisampled Surface StorageFormat in SURFACE_STATE is
- * MSFMT_DEPTH_STENCIL, WL and HL must be adjusted as follows before
- * proceeding:
- *
- * +----------------------------------------------------------------+
- * | Num Multisamples | W_l = | H_l = |
- * +----------------------------------------------------------------+
- * | 2 | ceiling(W_l / 2) * 4 | H_l (no adjustment) |
- * | 4 | ceiling(W_l / 2) * 4 | ceiling(H_l / 2) * 4 |
- * | 8 | ceiling(W_l / 2) * 8 | ceiling(H_l / 2) * 4 |
- * | 16 | ceiling(W_l / 2) * 8 | ceiling(H_l / 2) * 8 |
- * +----------------------------------------------------------------+
- * "
- *
- * Note that MSFMT_DEPTH_STENCIL just means the IMS (interleaved)
- * format rather than UMS/CMS (array slices). The Sandybridge PRM,
- * Volume 1, Part 1, Page 111 has the same formula for 4x MSAA.
- *
- * Another more complicated explanation for these adjustments comes
- * from the Sandybridge PRM, volume 4, part 1, page 31:
- *
- * "Any of the other messages (sample*, LOD, load4) used with a
- * (4x) multisampled surface will in-effect sample a surface with
- * double the height and width as that indicated in the surface
- * state. Each pixel position on the original-sized surface is
- * replaced with a 2x2 of samples with the following arrangement:
- *
- * sample 0 sample 2
- * sample 1 sample 3"
- *
- * Thus, when sampling from a multisampled texture, it behaves as
- * though the layout in memory for (x,y,sample) is:
- *
- * (0,0,0) (0,0,2) (1,0,0) (1,0,2)
- * (0,0,1) (0,0,3) (1,0,1) (1,0,3)
- *
- * (0,1,0) (0,1,2) (1,1,0) (1,1,2)
- * (0,1,1) (0,1,3) (1,1,1) (1,1,3)
- *
- * However, the actual layout of multisampled data in memory is:
- *
- * (0,0,0) (1,0,0) (0,0,1) (1,0,1)
- * (0,1,0) (1,1,0) (0,1,1) (1,1,1)
- *
- * (0,0,2) (1,0,2) (0,0,3) (1,0,3)
- * (0,1,2) (1,1,2) (0,1,3) (1,1,3)
- *
- * This pattern repeats for each 2x2 pixel block.
- *
- * As a result, when calculating the size of our 4-sample buffer for
- * an odd width or height, we have to align before scaling up because
- * sample 3 is in that bottom right 2x2 block.
- */
- switch (num_samples) {
- case 2:
- assert(brw->gen >= 8);
- width0 = ALIGN(width0, 2) * 2;
- height0 = ALIGN(height0, 2);
- break;
- case 4:
- width0 = ALIGN(width0, 2) * 2;
- height0 = ALIGN(height0, 2) * 2;
- break;
- case 8:
- width0 = ALIGN(width0, 2) * 4;
- height0 = ALIGN(height0, 2) * 2;
- break;
- case 16:
- width0 = ALIGN(width0, 2) * 4;
- height0 = ALIGN(height0, 2) * 4;
- break;
- default:
- /* num_samples should already have been quantized to 0, 1, 2, 4, 8
- * or 16.
- */
- unreachable("not reached");
- }
- } else {
- /* Non-interleaved */
- depth_multiply = num_samples;
- depth0 *= depth_multiply;
- }
- }
-
- if (!create_mapping_table(target, first_level, last_level, depth0,
- mt->level)) {
- free(mt);
- return NULL;
- }
-
- /* Set array_layout to ALL_SLICES_AT_EACH_LOD when array_spacing_lod0 can
- * be used. array_spacing_lod0 is only used for non-IMS MSAA surfaces on
- * Gen 7 and 8. On Gen 8 and 9 this layout is not available but it is still
- * used on Gen8 to make it pick a qpitch value which doesn't include space
- * for the mipmaps. On Gen9 this is not necessary because it will
- * automatically pick a packed qpitch value whenever mt->first_level ==
- * mt->last_level.
- * TODO: can we use it elsewhere?
- * TODO: also disable this on Gen8 and pick the qpitch value like Gen9
- */
- if (brw->gen >= 9) {
- mt->array_layout = ALL_LOD_IN_EACH_SLICE;
- } else {
- switch (mt->msaa_layout) {
- case INTEL_MSAA_LAYOUT_NONE:
- case INTEL_MSAA_LAYOUT_IMS:
- mt->array_layout = ALL_LOD_IN_EACH_SLICE;
- break;
- case INTEL_MSAA_LAYOUT_UMS:
- case INTEL_MSAA_LAYOUT_CMS:
- mt->array_layout = ALL_SLICES_AT_EACH_LOD;
- break;
- }
- }
-
- if (target == GL_TEXTURE_CUBE_MAP)
- assert(depth0 == 6 * depth_multiply);
- mt->physical_width0 = width0;
- mt->physical_height0 = height0;
- mt->physical_depth0 = depth0;
-
- if (!(layout_flags & MIPTREE_LAYOUT_FOR_BO) &&
- _mesa_get_format_base_format(format) == GL_DEPTH_STENCIL &&
- (brw->must_use_separate_stencil ||
- (brw->has_separate_stencil && intel_miptree_supports_hiz(brw, mt)))) {
- uint32_t stencil_flags = MIPTREE_LAYOUT_ACCELERATED_UPLOAD;
- if (brw->gen == 6) {
- stencil_flags |= MIPTREE_LAYOUT_TILING_ANY;
- }
-
- mt->stencil_mt = intel_miptree_create(brw,
- mt->target,
- MESA_FORMAT_S_UINT8,
- mt->first_level,
- mt->last_level,
- mt->logical_width0,
- mt->logical_height0,
- mt->logical_depth0,
- num_samples,
- stencil_flags);
-
- if (!mt->stencil_mt) {
- intel_miptree_release(&mt);
- return NULL;
- }
- mt->stencil_mt->r8stencil_needs_update = true;
-
- /* Fix up the Z miptree format for how we're splitting out separate
- * stencil. Gen7 expects there to be no stencil bits in its depth buffer.
- */
- mt->format = intel_depth_format_for_depthstencil_format(mt->format);
- mt->cpp = 4;
-
- if (format == mt->format) {
- _mesa_problem(NULL, "Unknown format %s in separate stencil mt\n",
- _mesa_get_format_name(mt->format));
- }
- }
-
- if (layout_flags & MIPTREE_LAYOUT_GEN6_HIZ_STENCIL)
- mt->array_layout = GEN6_HIZ_STENCIL;
-
- /*
- * Obey HALIGN_16 constraints for Gen8 and Gen9 buffers which are
- * multisampled or have an AUX buffer attached to it.
- *
- * GEN | MSRT | AUX_CCS_* or AUX_MCS
- * -------------------------------------------
- * 9 | HALIGN_16 | HALIGN_16
- * 8 | HALIGN_ANY | HALIGN_16
- * 7 | ? | ?
- * 6 | ? | ?
- */
- if (intel_miptree_supports_ccs(brw, mt)) {
- if (brw->gen >= 9 || (brw->gen == 8 && num_samples <= 1))
- layout_flags |= MIPTREE_LAYOUT_FORCE_HALIGN16;
- } else if (brw->gen >= 9 && num_samples > 1) {
- layout_flags |= MIPTREE_LAYOUT_FORCE_HALIGN16;
- } else {
- const UNUSED bool is_lossless_compressed_aux =
- brw->gen >= 9 && num_samples == 1 &&
- mt->format == MESA_FORMAT_R_UINT32;
+ if (layout_flags & MIPTREE_LAYOUT_FOR_BO)
+ return false;
- /* For now, nothing else has this requirement */
- assert(is_lossless_compressed_aux ||
- (layout_flags & MIPTREE_LAYOUT_FORCE_HALIGN16) == 0);
- }
+ if (_mesa_get_format_base_format(format) != GL_DEPTH_STENCIL)
+ return false;
- if (!brw_miptree_layout(brw, mt, layout_flags)) {
- intel_miptree_release(&mt);
- return NULL;
- }
+ if (brw->must_use_separate_stencil)
+ return true;
- return mt;
+ return brw->has_separate_stencil &&
+ intel_miptree_supports_hiz(brw, mt);
}
-
/**
* Choose the aux usage for this miptree. This function must be called fairly
* late in the miptree create process after we have a tiling.
{
assert(mt->aux_usage == ISL_AUX_USAGE_NONE);
- if (mt->msaa_layout == INTEL_MSAA_LAYOUT_CMS) {
+ const unsigned no_flags = 0;
+ if (mt->surf.samples > 1 && is_mcs_supported(brw, mt->format, no_flags)) {
+ assert(mt->surf.msaa_layout == ISL_MSAA_LAYOUT_ARRAY);
mt->aux_usage = ISL_AUX_USAGE_MCS;
- } else if (intel_tiling_supports_ccs(brw, mt->tiling) &&
+ } else if (intel_tiling_supports_ccs(brw, mt->surf.tiling) &&
intel_miptree_supports_ccs(brw, mt)) {
if (!unlikely(INTEL_DEBUG & DEBUG_NO_RBC) &&
- brw->gen >= 9 && !mt->is_scanout &&
intel_miptree_supports_ccs_e(brw, mt)) {
mt->aux_usage = ISL_AUX_USAGE_CCS_E;
} else {
mt->aux_usage = ISL_AUX_USAGE_CCS_D;
}
- } else if (intel_tiling_supports_hiz(brw, mt->tiling) &&
+ } else if (intel_tiling_supports_hiz(brw, mt->surf.tiling) &&
intel_miptree_supports_hiz(brw, mt)) {
mt->aux_usage = ISL_AUX_USAGE_HIZ;
}
}
}
+static unsigned
+get_num_logical_layers(const struct intel_mipmap_tree *mt, unsigned level)
+{
+ if (mt->surf.dim == ISL_SURF_DIM_3D)
+ return minify(mt->surf.logical_level0_px.depth, level);
+ else
+ return mt->surf.logical_level0_px.array_len;
+}
+
+static unsigned
+get_num_phys_layers(const struct isl_surf *surf, unsigned level)
+{
+ /* In case of physical dimensions one needs to consider also the layout.
+ * See isl_calc_phys_level0_extent_sa().
+ */
+ if (surf->dim != ISL_SURF_DIM_3D)
+ return surf->phys_level0_sa.array_len;
+
+ if (surf->dim_layout == ISL_DIM_LAYOUT_GEN4_2D)
+ return minify(surf->phys_level0_sa.array_len, level);
+
+ return minify(surf->phys_level0_sa.depth, level);
+}
+
/** \brief Assert that the level and layer are valid for the miptree. */
void
intel_miptree_check_level_layer(const struct intel_mipmap_tree *mt,
assert(level >= mt->first_level);
assert(level <= mt->last_level);
-
- if (mt->surf.size > 0)
- assert(layer < (mt->surf.dim == ISL_SURF_DIM_3D ?
- minify(mt->surf.phys_level0_sa.depth, level) :
- mt->surf.phys_level0_sa.array_len));
- else
- assert(layer < mt->level[level].depth);
+ assert(layer < get_num_phys_layers(&mt->surf, level));
}
static enum isl_aux_state **
uint32_t total_slices = 0;
for (uint32_t level = 0; level < levels; level++)
- total_slices += mt->level[level].depth;
+ total_slices += get_num_logical_layers(mt, level);
const size_t per_level_array_size = levels * sizeof(enum isl_aux_state *);
enum isl_aux_state *s = data + per_level_array_size;
for (uint32_t level = 0; level < levels; level++) {
per_level_arr[level] = s;
- for (uint32_t a = 0; a < mt->level[level].depth; a++)
+ const unsigned level_layers = get_num_logical_layers(mt, level);
+ for (uint32_t a = 0; a < level_layers; a++)
*(s++) = initial;
}
assert((void *)s == data + total_size);
free(state);
}
+static bool
+need_to_retile_as_linear(struct brw_context *brw, unsigned row_pitch,
+ enum isl_tiling tiling, unsigned samples)
+{
+ if (samples > 1)
+ return false;
+
+ if (tiling == ISL_TILING_LINEAR)
+ return false;
+
+ /* If the width is much smaller than a tile, don't bother tiling. */
+ if (row_pitch < 64)
+ return true;
+
+ if (ALIGN(row_pitch, 512) >= 32768) {
+ perf_debug("row pitch %u too large to blit, falling back to untiled",
+ row_pitch);
+ return true;
+ }
+
+ return false;
+}
+
+static bool
+need_to_retile_as_x(const struct brw_context *brw, uint64_t size,
+ enum isl_tiling tiling)
+{
+ /* If the BO is too large to fit in the aperture, we need to use the
+ * BLT engine to support it. Prior to Sandybridge, the BLT paths can't
+ * handle Y-tiling, so we need to fall back to X.
+ */
+ if (brw->gen < 6 && size >= brw->max_gtt_map_object_size &&
+ tiling == ISL_TILING_Y0)
+ return true;
+
+ return false;
+}
+
static struct intel_mipmap_tree *
make_surface(struct brw_context *brw, GLenum target, mesa_format format,
unsigned first_level, unsigned last_level,
unsigned width0, unsigned height0, unsigned depth0,
- unsigned num_samples, enum isl_tiling isl_tiling,
+ unsigned num_samples, isl_tiling_flags_t tiling_flags,
isl_surf_usage_flags_t isl_usage_flags, uint32_t alloc_flags,
- struct brw_bo *bo)
+ unsigned row_pitch, struct brw_bo *bo)
{
struct intel_mipmap_tree *mt = calloc(sizeof(*mt), 1);
if (!mt)
return NULL;
}
+ mt->refcount = 1;
+
if (target == GL_TEXTURE_CUBE_MAP ||
target == GL_TEXTURE_CUBE_MAP_ARRAY)
isl_usage_flags |= ISL_SURF_USAGE_CUBE_BIT;
.depth = target == GL_TEXTURE_3D ? depth0 : 1,
.levels = last_level - first_level + 1,
.array_len = target == GL_TEXTURE_3D ? 1 : depth0,
- .samples = MAX2(num_samples, 1),
+ .samples = num_samples,
+ .row_pitch = row_pitch,
.usage = isl_usage_flags,
- .tiling_flags = 1u << isl_tiling
+ .tiling_flags = tiling_flags,
};
if (!isl_surf_init_s(&brw->isl_dev, &mt->surf, &init_info))
goto fail;
- assert(mt->surf.size % mt->surf.row_pitch == 0);
+ /* In case caller doesn't specifically request Y-tiling (needed
+ * unconditionally for depth), check for corner cases needing special
+ * treatment.
+ */
+ if (tiling_flags & ~ISL_TILING_Y0_BIT) {
+ if (need_to_retile_as_linear(brw, mt->surf.row_pitch,
+ mt->surf.tiling, mt->surf.samples)) {
+ init_info.tiling_flags = 1u << ISL_TILING_LINEAR;
+ if (!isl_surf_init_s(&brw->isl_dev, &mt->surf, &init_info))
+ goto fail;
+ } else if (need_to_retile_as_x(brw, mt->surf.size, mt->surf.tiling)) {
+ init_info.tiling_flags = 1u << ISL_TILING_X;
+ if (!isl_surf_init_s(&brw->isl_dev, &mt->surf, &init_info))
+ goto fail;
+ }
+ }
+
+ /* In case of linear the buffer gets padded by fixed 64 bytes and therefore
+ * the size may not be multiple of row_pitch.
+ * See isl_apply_surface_padding().
+ */
+ if (mt->surf.tiling != ISL_TILING_LINEAR)
+ assert(mt->surf.size % mt->surf.row_pitch == 0);
if (!bo) {
mt->bo = brw_bo_alloc_tiled(brw->bufmgr, "isl-miptree",
mt->surf.size,
- isl_tiling_to_bufmgr_tiling(isl_tiling),
+ isl_tiling_to_i915_tiling(
+ mt->surf.tiling),
mt->surf.row_pitch, alloc_flags);
if (!mt->bo)
goto fail;
mt->last_level = last_level;
mt->target = target;
mt->format = format;
- mt->refcount = 1;
mt->aux_state = NULL;
+ mt->cpp = isl_format_get_layout(mt->surf.format)->bpb / 8;
+ mt->compressed = _mesa_is_format_compressed(format);
return mt;
return NULL;
}
+static bool
+make_separate_stencil_surface(struct brw_context *brw,
+ struct intel_mipmap_tree *mt)
+{
+ mt->stencil_mt = make_surface(brw, mt->target, MESA_FORMAT_S_UINT8,
+ 0, mt->surf.levels - 1,
+ mt->surf.logical_level0_px.width,
+ mt->surf.logical_level0_px.height,
+ mt->surf.dim == ISL_SURF_DIM_3D ?
+ mt->surf.logical_level0_px.depth :
+ mt->surf.logical_level0_px.array_len,
+ mt->surf.samples, ISL_TILING_W_BIT,
+ ISL_SURF_USAGE_STENCIL_BIT |
+ ISL_SURF_USAGE_TEXTURE_BIT,
+ BO_ALLOC_FOR_RENDER, 0, NULL);
+
+ if (!mt->stencil_mt)
+ return false;
+
+ mt->stencil_mt->r8stencil_needs_update = true;
+
+ return true;
+}
+
+static bool
+force_linear_tiling(uint32_t layout_flags)
+{
+ /* ANY includes NONE and Y bit. */
+ if (layout_flags & MIPTREE_LAYOUT_TILING_Y)
+ return false;
+
+ return layout_flags & MIPTREE_LAYOUT_TILING_NONE;
+}
+
static struct intel_mipmap_tree *
miptree_create(struct brw_context *brw,
GLenum target,
GLuint num_samples,
uint32_t layout_flags)
{
- if (brw->gen == 6 && format == MESA_FORMAT_S_UINT8)
+ if (format == MESA_FORMAT_S_UINT8)
return make_surface(brw, target, format, first_level, last_level,
- width0, height0, depth0, num_samples, ISL_TILING_W,
+ width0, height0, depth0, num_samples,
+ ISL_TILING_W_BIT,
ISL_SURF_USAGE_STENCIL_BIT |
ISL_SURF_USAGE_TEXTURE_BIT,
- BO_ALLOC_FOR_RENDER, NULL);
+ BO_ALLOC_FOR_RENDER,
+ 0,
+ NULL);
+
+ const GLenum base_format = _mesa_get_format_base_format(format);
+ if ((base_format == GL_DEPTH_COMPONENT ||
+ base_format == GL_DEPTH_STENCIL) &&
+ !force_linear_tiling(layout_flags)) {
+ /* Fix up the Z miptree format for how we're splitting out separate
+ * stencil. Gen7 expects there to be no stencil bits in its depth buffer.
+ */
+ const mesa_format depth_only_format =
+ intel_depth_format_for_depthstencil_format(format);
+ struct intel_mipmap_tree *mt = make_surface(
+ brw, target, brw->gen >= 6 ? depth_only_format : format,
+ first_level, last_level,
+ width0, height0, depth0, num_samples, ISL_TILING_Y0_BIT,
+ ISL_SURF_USAGE_DEPTH_BIT | ISL_SURF_USAGE_TEXTURE_BIT,
+ BO_ALLOC_FOR_RENDER, 0, NULL);
+
+ if (needs_separate_stencil(brw, mt, format, layout_flags) &&
+ !make_separate_stencil_surface(brw, mt)) {
+ intel_miptree_release(&mt);
+ return NULL;
+ }
+
+ if (!(layout_flags & MIPTREE_LAYOUT_DISABLE_AUX))
+ intel_miptree_choose_aux_usage(brw, mt);
+
+ return mt;
+ }
- struct intel_mipmap_tree *mt;
mesa_format tex_format = format;
mesa_format etc_format = MESA_FORMAT_NONE;
uint32_t alloc_flags = 0;
etc_format = (format != tex_format) ? tex_format : MESA_FORMAT_NONE;
assert((layout_flags & MIPTREE_LAYOUT_FOR_BO) == 0);
- mt = intel_miptree_create_layout(brw, target, format,
- first_level, last_level, width0,
- height0, depth0, num_samples,
- layout_flags);
- if (!mt)
- return NULL;
-
- if (mt->tiling == (I915_TILING_Y | I915_TILING_X))
- mt->tiling = I915_TILING_Y;
-
if (layout_flags & MIPTREE_LAYOUT_ACCELERATED_UPLOAD)
alloc_flags |= BO_ALLOC_FOR_RENDER;
- mt->etc_format = etc_format;
+ isl_tiling_flags_t tiling_flags = force_linear_tiling(layout_flags) ?
+ ISL_TILING_LINEAR_BIT : ISL_TILING_ANY_MASK;
- if (format == MESA_FORMAT_S_UINT8) {
- /* Align to size of W tile, 64x64. */
- mt->bo = brw_bo_alloc_tiled_2d(brw->bufmgr, "miptree",
- ALIGN(mt->total_width, 64),
- ALIGN(mt->total_height, 64),
- mt->cpp, mt->tiling, &mt->pitch,
- alloc_flags);
- } else {
- mt->bo = brw_bo_alloc_tiled_2d(brw->bufmgr, "miptree",
- mt->total_width, mt->total_height,
- mt->cpp, mt->tiling, &mt->pitch,
- alloc_flags);
- }
+ /* TODO: This used to be because there wasn't BLORP to handle Y-tiling. */
+ if (brw->gen < 6)
+ tiling_flags &= ~ISL_TILING_Y0_BIT;
+
+ struct intel_mipmap_tree *mt = make_surface(
+ brw, target, format,
+ first_level, last_level,
+ width0, height0, depth0,
+ num_samples, tiling_flags,
+ ISL_SURF_USAGE_RENDER_TARGET_BIT |
+ ISL_SURF_USAGE_TEXTURE_BIT,
+ alloc_flags, 0, NULL);
+ if (!mt)
+ return NULL;
+
+ mt->etc_format = etc_format;
if (layout_flags & MIPTREE_LAYOUT_FOR_SCANOUT)
mt->bo->cache_coherent = false;
GLuint num_samples,
uint32_t layout_flags)
{
+ assert(num_samples > 0);
+
struct intel_mipmap_tree *mt = miptree_create(
brw, target, format,
first_level, last_level,
width0, height0, depth0, num_samples,
layout_flags);
-
- /* If the BO is too large to fit in the aperture, we need to use the
- * BLT engine to support it. Prior to Sandybridge, the BLT paths can't
- * handle Y-tiling, so we need to fall back to X.
- */
- if (brw->gen < 6 && mt->bo->size >= brw->max_gtt_map_object_size &&
- mt->tiling == I915_TILING_Y) {
- const uint32_t alloc_flags =
- (layout_flags & MIPTREE_LAYOUT_ACCELERATED_UPLOAD) ?
- BO_ALLOC_FOR_RENDER : 0;
- perf_debug("%dx%d miptree larger than aperture; falling back to X-tiled\n",
- mt->total_width, mt->total_height);
-
- mt->tiling = I915_TILING_X;
- brw_bo_unreference(mt->bo);
- mt->bo = brw_bo_alloc_tiled_2d(brw->bufmgr, "miptree",
- mt->total_width, mt->total_height, mt->cpp,
- mt->tiling, &mt->pitch, alloc_flags);
- }
+ if (!mt)
+ return NULL;
mt->offset = 0;
- if (!mt->bo) {
- intel_miptree_release(&mt);
- return NULL;
- }
-
if (!intel_miptree_alloc_aux(brw, mt)) {
intel_miptree_release(&mt);
return NULL;
struct intel_mipmap_tree *mt;
uint32_t tiling, swizzle;
const GLenum target = depth > 1 ? GL_TEXTURE_2D_ARRAY : GL_TEXTURE_2D;
+ const GLenum base_format = _mesa_get_format_base_format(format);
+
+ if ((base_format == GL_DEPTH_COMPONENT ||
+ base_format == GL_DEPTH_STENCIL)) {
+ const mesa_format depth_only_format =
+ intel_depth_format_for_depthstencil_format(format);
+ mt = make_surface(brw, target,
+ brw->gen >= 6 ? depth_only_format : format,
+ 0, 0, width, height, depth, 1, ISL_TILING_Y0_BIT,
+ ISL_SURF_USAGE_DEPTH_BIT | ISL_SURF_USAGE_TEXTURE_BIT,
+ BO_ALLOC_FOR_RENDER, pitch, bo);
+
+ brw_bo_reference(bo);
+
+ if (!(layout_flags & MIPTREE_LAYOUT_DISABLE_AUX))
+ intel_miptree_choose_aux_usage(brw, mt);
- if (brw->gen == 6 && format == MESA_FORMAT_S_UINT8) {
+ return mt;
+ } else if (format == MESA_FORMAT_S_UINT8) {
mt = make_surface(brw, target, MESA_FORMAT_S_UINT8,
- 0, 0, width, height, depth, 1, ISL_TILING_W,
+ 0, 0, width, height, depth, 1,
+ ISL_TILING_W_BIT,
ISL_SURF_USAGE_STENCIL_BIT |
ISL_SURF_USAGE_TEXTURE_BIT,
- BO_ALLOC_FOR_RENDER, bo);
+ BO_ALLOC_FOR_RENDER, pitch, bo);
if (!mt)
return NULL;
assert((layout_flags & MIPTREE_LAYOUT_TILING_ANY) == 0);
assert((layout_flags & MIPTREE_LAYOUT_TILING_NONE) == 0);
- layout_flags |= MIPTREE_LAYOUT_FOR_BO;
- mt = intel_miptree_create_layout(brw, target, format,
- 0, 0,
- width, height, depth, 0,
- layout_flags);
+ mt = make_surface(brw, target, format,
+ 0, 0, width, height, depth, 1,
+ 1lu << isl_tiling_from_i915_tiling(tiling),
+ ISL_SURF_USAGE_RENDER_TARGET_BIT |
+ ISL_SURF_USAGE_TEXTURE_BIT,
+ 0, pitch, bo);
if (!mt)
return NULL;
brw_bo_reference(bo);
mt->bo = bo;
- mt->pitch = pitch;
mt->offset = offset;
- mt->tiling = tiling;
if (!(layout_flags & MIPTREE_LAYOUT_DISABLE_AUX))
intel_miptree_choose_aux_usage(brw, mt);
return NULL;
mt->target = target;
- mt->total_width = width;
- mt->total_height = height;
if (i == 0)
planar_mt = mt;
mt->target = target;
mt->level[0].level_x = image->tile_x;
mt->level[0].level_y = image->tile_y;
- mt->level[0].slice[0].x_offset = image->tile_x;
- mt->level[0].slice[0].y_offset = image->tile_y;
- mt->total_width += image->tile_x;
- mt->total_height += image->tile_y;
/* From "OES_EGL_image" error reporting. We report GL_INVALID_OPERATION
* for EGL images from non-tile aligned sufaces in gen4 hw and earlier which has
struct intel_mipmap_tree *multisample_mt = NULL;
struct gl_renderbuffer *rb = &irb->Base.Base;
mesa_format format = rb->Format;
- int num_samples = rb->NumSamples;
+ const unsigned num_samples = MAX2(rb->NumSamples, 1);
/* Only the front and back buffers, which are color buffers, are allocated
* through the image loader.
assert(singlesample_mt);
- if (num_samples == 0) {
+ if (num_samples == 1) {
intel_miptree_release(&irb->mt);
irb->mt = singlesample_mt;
irb->singlesample_mt = singlesample_mt;
if (!irb->mt ||
- irb->mt->logical_width0 != width ||
- irb->mt->logical_height0 != height) {
+ irb->mt->surf.logical_level0_px.width != width ||
+ irb->mt->surf.logical_level0_px.height != height) {
multisample_mt = intel_miptree_create_for_renderbuffer(intel,
format,
width,
uint32_t depth = 1;
GLenum target = num_samples > 1 ? GL_TEXTURE_2D_MULTISAMPLE : GL_TEXTURE_2D;
const uint32_t layout_flags = MIPTREE_LAYOUT_ACCELERATED_UPLOAD |
- MIPTREE_LAYOUT_TILING_ANY |
- MIPTREE_LAYOUT_FOR_SCANOUT;
+ MIPTREE_LAYOUT_TILING_ANY;
mt = intel_miptree_create(brw, target, format, 0, 0,
width, height, depth, num_samples,
if (mt->target == GL_TEXTURE_CUBE_MAP)
depth = 6;
- if (mt->surf.size > 0) {
- if (level >= mt->surf.levels)
- return false;
-
- const unsigned level_depth =
- mt->surf.dim == ISL_SURF_DIM_3D ?
- minify(mt->surf.logical_level0_px.depth, level) :
- mt->surf.logical_level0_px.array_len;
-
- return width == minify(mt->surf.logical_level0_px.width, level) &&
- height == minify(mt->surf.logical_level0_px.height, level) &&
- depth == level_depth &&
- MAX2(image->NumSamples, 1) == mt->surf.samples;
- }
-
- int level_depth = mt->level[level].depth;
- if (mt->num_samples > 1) {
- switch (mt->msaa_layout) {
- case INTEL_MSAA_LAYOUT_NONE:
- case INTEL_MSAA_LAYOUT_IMS:
- break;
- case INTEL_MSAA_LAYOUT_UMS:
- case INTEL_MSAA_LAYOUT_CMS:
- level_depth /= mt->num_samples;
- break;
- }
- }
-
- /* Test image dimensions against the base level image adjusted for
- * minification. This will also catch images not present in the
- * tree, changed targets, etc.
- */
- if (width != minify(mt->logical_width0, level - mt->first_level) ||
- height != minify(mt->logical_height0, level - mt->first_level) ||
- depth != level_depth) {
+ if (level >= mt->surf.levels)
return false;
- }
- if (image->NumSamples != mt->num_samples)
- return false;
+ const unsigned level_depth =
+ mt->surf.dim == ISL_SURF_DIM_3D ?
+ minify(mt->surf.logical_level0_px.depth, level) :
+ mt->surf.logical_level0_px.array_len;
- return true;
-}
-
-
-void
-intel_miptree_set_level_info(struct intel_mipmap_tree *mt,
- GLuint level,
- GLuint x, GLuint y, GLuint d)
-{
- mt->level[level].depth = d;
- mt->level[level].level_x = x;
- mt->level[level].level_y = y;
-
- DBG("%s level %d, depth %d, offset %d,%d\n", __func__,
- level, d, x, y);
-
- assert(mt->level[level].slice);
-
- mt->level[level].slice[0].x_offset = mt->level[level].level_x;
- mt->level[level].slice[0].y_offset = mt->level[level].level_y;
-}
-
-
-void
-intel_miptree_set_image_offset(struct intel_mipmap_tree *mt,
- GLuint level, GLuint img,
- GLuint x, GLuint y)
-{
- if (img == 0 && level == 0)
- assert(x == 0 && y == 0);
-
- assert(img < mt->level[level].depth);
-
- mt->level[level].slice[img].x_offset = mt->level[level].level_x + x;
- mt->level[level].slice[img].y_offset = mt->level[level].level_y + y;
-
- DBG("%s level %d img %d pos %d,%d\n",
- __func__, level, img,
- mt->level[level].slice[img].x_offset,
- mt->level[level].slice[img].y_offset);
+ return width == minify(mt->surf.logical_level0_px.width, level) &&
+ height == minify(mt->surf.logical_level0_px.height, level) &&
+ depth == level_depth &&
+ MAX2(image->NumSamples, 1) == mt->surf.samples;
}
void
GLuint level, GLuint slice,
GLuint *x, GLuint *y)
{
- if (mt->surf.size > 0) {
- uint32_t x_offset_sa, y_offset_sa;
+ if (level == 0 && slice == 0) {
+ *x = mt->level[0].level_x;
+ *y = mt->level[0].level_y;
+ return;
+ }
- /* Given level is relative to level zero while the miptree may be
- * represent just a subset of all levels starting from 'first_level'.
- */
- assert(level >= mt->first_level);
- level -= mt->first_level;
+ uint32_t x_offset_sa, y_offset_sa;
- const unsigned z = mt->surf.dim == ISL_SURF_DIM_3D ? slice : 0;
- slice = mt->surf.dim == ISL_SURF_DIM_3D ? 0 : slice;
- isl_surf_get_image_offset_sa(&mt->surf, level, slice, z,
- &x_offset_sa, &y_offset_sa);
+ /* Miptree itself can have an offset only if it represents a single
+ * slice in an imported buffer object.
+ * See intel_miptree_create_for_dri_image().
+ */
+ assert(mt->level[0].level_x == 0);
+ assert(mt->level[0].level_y == 0);
- *x = x_offset_sa;
- *y = y_offset_sa;
- return;
- }
+ /* Given level is relative to level zero while the miptree may be
+ * represent just a subset of all levels starting from 'first_level'.
+ */
+ assert(level >= mt->first_level);
+ level -= mt->first_level;
- assert(slice < mt->level[level].depth);
+ const unsigned z = mt->surf.dim == ISL_SURF_DIM_3D ? slice : 0;
+ slice = mt->surf.dim == ISL_SURF_DIM_3D ? 0 : slice;
+ isl_surf_get_image_offset_el(&mt->surf, level, slice, z,
+ &x_offset_sa, &y_offset_sa);
- *x = mt->level[level].slice[slice].x_offset;
- *y = mt->level[level].slice[slice].y_offset;
+ *x = x_offset_sa;
+ *y = y_offset_sa;
}
* and tile_h is set to 1.
*/
void
-intel_get_tile_dims(uint32_t tiling, uint32_t cpp,
+intel_get_tile_dims(enum isl_tiling tiling, uint32_t cpp,
uint32_t *tile_w, uint32_t *tile_h)
{
switch (tiling) {
- case I915_TILING_X:
+ case ISL_TILING_X:
*tile_w = 512;
*tile_h = 8;
break;
- case I915_TILING_Y:
+ case ISL_TILING_Y0:
*tile_w = 128;
*tile_h = 32;
break;
- case I915_TILING_NONE:
+ case ISL_TILING_LINEAR:
*tile_w = cpp;
*tile_h = 1;
break;
* untiled, the masks are set to 0.
*/
void
-intel_get_tile_masks(uint32_t tiling, uint32_t cpp,
+intel_get_tile_masks(enum isl_tiling tiling, uint32_t cpp,
uint32_t *mask_x, uint32_t *mask_y)
{
uint32_t tile_w_bytes, tile_h;
uint32_t x, uint32_t y)
{
int cpp = mt->cpp;
- uint32_t pitch = mt->pitch;
- uint32_t tiling = mt->tiling;
+ uint32_t pitch = mt->surf.row_pitch;
- switch (tiling) {
+ switch (mt->surf.tiling) {
default:
unreachable("not reached");
- case I915_TILING_NONE:
+ case ISL_TILING_LINEAR:
return y * pitch + x * cpp;
- case I915_TILING_X:
+ case ISL_TILING_X:
assert((x % (512 / cpp)) == 0);
assert((y % 8) == 0);
return y * pitch + x / (512 / cpp) * 4096;
- case I915_TILING_Y:
+ case ISL_TILING_Y0:
assert((x % (128 / cpp)) == 0);
assert((y % 32) == 0);
return y * pitch + x / (128 / cpp) * 4096;
uint32_t x, y;
uint32_t mask_x, mask_y;
- intel_get_tile_masks(mt->tiling, mt->cpp, &mask_x, &mask_y);
+ intel_get_tile_masks(mt->surf.tiling, mt->cpp, &mask_x, &mask_y);
intel_miptree_get_image_offset(mt, level, slice, &x, &y);
*tile_x = x & mask_x;
{
void *src, *dst;
ptrdiff_t src_stride, dst_stride;
- const unsigned cpp = dst_mt->surf.size > 0 ?
- (isl_format_get_layout(dst_mt->surf.format)->bpb / 8) : dst_mt->cpp;
+ const unsigned cpp = (isl_format_get_layout(dst_mt->surf.format)->bpb / 8);
intel_miptree_map(brw, src_mt,
src_level, src_layer,
{
mesa_format format = src_mt->format;
- uint32_t width, height;
+ unsigned width = minify(src_mt->surf.phys_level0_sa.width,
+ src_level - src_mt->first_level);
+ unsigned height = minify(src_mt->surf.phys_level0_sa.height,
+ src_level - src_mt->first_level);
- if (src_mt->surf.size > 0) {
- width = minify(src_mt->surf.phys_level0_sa.width,
- src_level - src_mt->first_level);
- height = minify(src_mt->surf.phys_level0_sa.height,
- src_level - src_mt->first_level);
-
- if (src_mt->surf.dim == ISL_SURF_DIM_3D)
- assert(src_layer < minify(src_mt->surf.phys_level0_sa.depth,
- src_level - src_mt->first_level));
- else
- assert(src_layer < src_mt->surf.phys_level0_sa.array_len);
- } else {
- width = minify(src_mt->physical_width0,
- src_level - src_mt->first_level);
- height = minify(src_mt->physical_height0,
- src_level - src_mt->first_level);
- assert(src_layer < src_mt->level[src_level].depth);
- }
+ assert(src_layer < get_num_phys_layers(&src_mt->surf,
+ src_level - src_mt->first_level));
assert(src_mt->format == dst_mt->format);
DBG("validate blit mt %s %p %d,%d/%d -> mt %s %p %d,%d/%d (%dx%d)\n",
_mesa_get_format_name(src_mt->format),
- src_mt, src_x, src_y, src_mt->pitch,
+ src_mt, src_x, src_y, src_mt->surf.row_pitch,
_mesa_get_format_name(dst_mt->format),
- dst_mt, dst_x, dst_y, dst_mt->pitch,
+ dst_mt, dst_x, dst_y, dst_mt->surf.row_pitch,
width, height);
if (!intel_miptree_blit(brw,
if (!aux_state)
return false;
- struct isl_surf temp_main_surf;
struct isl_surf temp_mcs_surf;
- /* Create first an ISL presentation for the main color surface and let ISL
- * calculate equivalent MCS surface against it.
- */
- intel_miptree_get_isl_surf(brw, mt, &temp_main_surf);
MAYBE_UNUSED bool ok =
- isl_surf_get_mcs_surf(&brw->isl_dev, &temp_main_surf, &temp_mcs_surf);
+ isl_surf_get_mcs_surf(&brw->isl_dev, &mt->surf, &temp_mcs_surf);
assert(ok);
/* Buffer needs to be initialised requiring the buffer to be immediately
assert(mt->aux_usage == ISL_AUX_USAGE_CCS_E ||
mt->aux_usage == ISL_AUX_USAGE_CCS_D);
- struct isl_surf temp_main_surf;
struct isl_surf temp_ccs_surf;
- /* Create first an ISL presentation for the main color surface and let ISL
- * calculate equivalent CCS surface against it.
- */
- intel_miptree_get_isl_surf(brw, mt, &temp_main_surf);
- if (!isl_surf_get_ccs_surf(&brw->isl_dev, &temp_main_surf, &temp_ccs_surf))
+ if (!isl_surf_get_ccs_surf(&brw->isl_dev, &mt->surf, &temp_ccs_surf, 0))
return false;
assert(temp_ccs_surf.size &&
if (!aux_state)
return false;
- /* In case of compression mcs buffer needs to be initialised requiring the
- * buffer to be immediately mapped to cpu space for writing. Therefore do
- * not use the gpu access flag which can cause an unnecessary delay if the
- * backing pages happened to be just used by the GPU.
+ /* When CCS_E is used, we need to ensure that the CCS starts off in a valid
+ * state. From the Sky Lake PRM, "MCS Buffer for Render Target(s)":
+ *
+ * "If Software wants to enable Color Compression without Fast clear,
+ * Software needs to initialize MCS with zeros."
+ *
+ * A CCS value of 0 indicates that the corresponding block is in the
+ * pass-through state which is what we want.
+ *
+ * For CCS_D, on the other hand, we don't care as we're about to perform a
+ * fast-clear operation. In that case, being hot in caches more useful.
*/
- const uint32_t alloc_flags =
- mt->aux_usage == ISL_AUX_USAGE_CCS_E ? 0 : BO_ALLOC_FOR_RENDER;
+ const uint32_t alloc_flags = mt->aux_usage == ISL_AUX_USAGE_CCS_E ?
+ BO_ALLOC_ZEROED : BO_ALLOC_FOR_RENDER;
mt->mcs_buf = intel_alloc_aux_buffer(brw, "ccs-miptree",
&temp_ccs_surf, alloc_flags, mt);
if (!mt->mcs_buf) {
mt->aux_state = aux_state;
- /* From Gen9 onwards single-sampled (non-msrt) auxiliary buffers are
- * used for lossless compression which requires similar initialisation
- * as multi-sample compression.
- */
- if (mt->aux_usage == ISL_AUX_USAGE_CCS_E) {
- /* Hardware sets the auxiliary buffer to all zeroes when it does full
- * resolve. Initialize it accordingly in case the first renderer is
- * cpu (or other none compression aware party).
- *
- * This is also explicitly stated in the spec (MCS Buffer for Render
- * Target(s)):
- * "If Software wants to enable Color Compression without Fast clear,
- * Software needs to initialize MCS with zeros."
- */
- intel_miptree_init_mcs(brw, mt, 0);
- mt->msaa_layout = INTEL_MSAA_LAYOUT_CMS;
- }
-
return true;
}
uint32_t level)
{
assert(mt->hiz_buf);
+ assert(mt->surf.size > 0);
if (brw->gen >= 8 || brw->is_haswell) {
- uint32_t width = minify(mt->physical_width0, level);
- uint32_t height = minify(mt->physical_height0, level);
+ uint32_t width = minify(mt->surf.phys_level0_sa.width, level);
+ uint32_t height = minify(mt->surf.phys_level0_sa.height, level);
/* Disable HiZ for LOD > 0 unless the width is 8 aligned
* and the height is 4 aligned. This allows our HiZ support
if (!aux_state)
return false;
- struct isl_surf temp_main_surf;
struct isl_surf temp_hiz_surf;
- intel_miptree_get_isl_surf(brw, mt, &temp_main_surf);
MAYBE_UNUSED bool ok =
- isl_surf_get_hiz_surf(&brw->isl_dev, &temp_main_surf, &temp_hiz_surf);
+ isl_surf_get_hiz_surf(&brw->isl_dev, &mt->surf, &temp_hiz_surf);
assert(ok);
const uint32_t alloc_flags = BO_ALLOC_FOR_RENDER;
case ISL_AUX_USAGE_MCS:
assert(_mesa_is_format_color_format(mt->format));
- assert(mt->num_samples > 1);
- if (!intel_miptree_alloc_mcs(brw, mt, mt->num_samples))
+ assert(mt->surf.samples > 1);
+ if (!intel_miptree_alloc_mcs(brw, mt, mt->surf.samples))
return false;
return true;
case ISL_AUX_USAGE_CCS_E:
assert(_mesa_is_format_color_format(mt->format));
- assert(mt->num_samples <= 1);
+ assert(mt->surf.samples == 1);
if (!intel_miptree_alloc_ccs(brw, mt))
return false;
return true;
* mipmap levels aren't available in the HiZ buffer. So we need all levels
* of the texture to be HiZ enabled.
*/
- for (unsigned level = mt->first_level; level <= mt->last_level; ++level) {
+ for (unsigned level = 0; level < mt->surf.levels; ++level) {
if (!intel_miptree_level_has_hiz(mt, level))
return false;
}
* There is no such blurb for 1D textures, but there is sufficient evidence
* that this is broken on SKL+.
*/
- return (mt->num_samples <= 1 &&
+ return (mt->surf.samples == 1 &&
mt->target != GL_TEXTURE_3D &&
mt->target != GL_TEXTURE_1D /* gen9+ restriction */);
}
num_levels = last_level - start_level + 1;
for (uint32_t level = start_level; level <= last_level; level++) {
- const uint32_t level_layers = MIN2(num_layers, mt->level[level].depth);
+ uint32_t level_layers = get_num_phys_layers(&mt->surf, level);
+
+ level_layers = MIN2(num_layers, level_layers);
+
for (unsigned a = 0; a < level_layers; a++) {
enum isl_aux_state aux_state =
intel_miptree_get_aux_state(mt, level, start_layer + a);
(level == 0 && mt->first_level == 0 && mt->last_level == 0));
/* Compression of arrayed msaa surfaces is supported. */
- if (mt->num_samples > 1)
+ if (mt->surf.samples > 1)
return;
/* Fast color clear is supported for non-msaa arrays only on Gen8+. */
- assert(brw->gen >= 8 || (layer == 0 && mt->logical_depth0 == 1));
+ assert(brw->gen >= 8 ||
+ (layer == 0 &&
+ mt->surf.logical_level0_px.depth == 1 &&
+ mt->surf.logical_level0_px.array_len == 1));
(void)level;
(void)layer;
static enum blorp_fast_clear_op
get_ccs_d_resolve_op(enum isl_aux_state aux_state,
- bool ccs_supported, bool fast_clear_supported)
+ enum isl_aux_usage aux_usage,
+ bool fast_clear_supported)
{
+ assert(aux_usage == ISL_AUX_USAGE_NONE || aux_usage == ISL_AUX_USAGE_CCS_D);
+
+ const bool ccs_supported = aux_usage == ISL_AUX_USAGE_CCS_D;
+
assert(ccs_supported == fast_clear_supported);
switch (aux_state) {
case ISL_AUX_STATE_CLEAR:
- case ISL_AUX_STATE_COMPRESSED_CLEAR:
+ case ISL_AUX_STATE_PARTIAL_CLEAR:
if (!ccs_supported)
return BLORP_FAST_CLEAR_OP_RESOLVE_FULL;
else
case ISL_AUX_STATE_RESOLVED:
case ISL_AUX_STATE_AUX_INVALID:
+ case ISL_AUX_STATE_COMPRESSED_CLEAR:
case ISL_AUX_STATE_COMPRESSED_NO_CLEAR:
break;
}
static enum blorp_fast_clear_op
get_ccs_e_resolve_op(enum isl_aux_state aux_state,
- bool ccs_supported, bool fast_clear_supported)
+ enum isl_aux_usage aux_usage,
+ bool fast_clear_supported)
{
+ /* CCS_E surfaces can be accessed as CCS_D if we're careful. */
+ assert(aux_usage == ISL_AUX_USAGE_NONE ||
+ aux_usage == ISL_AUX_USAGE_CCS_D ||
+ aux_usage == ISL_AUX_USAGE_CCS_E);
+
+ if (aux_usage == ISL_AUX_USAGE_CCS_D)
+ assert(fast_clear_supported);
+
switch (aux_state) {
case ISL_AUX_STATE_CLEAR:
+ case ISL_AUX_STATE_PARTIAL_CLEAR:
+ if (fast_clear_supported)
+ return BLORP_FAST_CLEAR_OP_NONE;
+ else if (aux_usage == ISL_AUX_USAGE_CCS_E)
+ return BLORP_FAST_CLEAR_OP_RESOLVE_PARTIAL;
+ else
+ return BLORP_FAST_CLEAR_OP_RESOLVE_FULL;
+
case ISL_AUX_STATE_COMPRESSED_CLEAR:
- if (!ccs_supported)
+ if (aux_usage != ISL_AUX_USAGE_CCS_E)
return BLORP_FAST_CLEAR_OP_RESOLVE_FULL;
else if (!fast_clear_supported)
return BLORP_FAST_CLEAR_OP_RESOLVE_PARTIAL;
return BLORP_FAST_CLEAR_OP_NONE;
case ISL_AUX_STATE_COMPRESSED_NO_CLEAR:
- if (!ccs_supported)
+ if (aux_usage != ISL_AUX_USAGE_CCS_E)
return BLORP_FAST_CLEAR_OP_RESOLVE_FULL;
else
return BLORP_FAST_CLEAR_OP_NONE;
intel_miptree_prepare_ccs_access(struct brw_context *brw,
struct intel_mipmap_tree *mt,
uint32_t level, uint32_t layer,
- bool aux_supported,
+ enum isl_aux_usage aux_usage,
bool fast_clear_supported)
{
enum isl_aux_state aux_state = intel_miptree_get_aux_state(mt, level, layer);
enum blorp_fast_clear_op resolve_op;
- if (intel_miptree_is_lossless_compressed(brw, mt)) {
- resolve_op = get_ccs_e_resolve_op(aux_state, aux_supported,
+ if (mt->aux_usage == ISL_AUX_USAGE_CCS_E) {
+ resolve_op = get_ccs_e_resolve_op(aux_state, aux_usage,
fast_clear_supported);
} else {
- resolve_op = get_ccs_d_resolve_op(aux_state, aux_supported,
+ assert(mt->aux_usage == ISL_AUX_USAGE_CCS_D);
+ resolve_op = get_ccs_d_resolve_op(aux_state, aux_usage,
fast_clear_supported);
}
intel_miptree_finish_ccs_write(struct brw_context *brw,
struct intel_mipmap_tree *mt,
uint32_t level, uint32_t layer,
- bool written_with_ccs)
+ enum isl_aux_usage aux_usage)
{
+ assert(aux_usage == ISL_AUX_USAGE_NONE ||
+ aux_usage == ISL_AUX_USAGE_CCS_D ||
+ aux_usage == ISL_AUX_USAGE_CCS_E);
+
enum isl_aux_state aux_state = intel_miptree_get_aux_state(mt, level, layer);
- if (intel_miptree_is_lossless_compressed(brw, mt)) {
+ if (mt->aux_usage == ISL_AUX_USAGE_CCS_E) {
switch (aux_state) {
case ISL_AUX_STATE_CLEAR:
- assert(written_with_ccs);
- intel_miptree_set_aux_state(brw, mt, level, layer, 1,
- ISL_AUX_STATE_COMPRESSED_CLEAR);
+ case ISL_AUX_STATE_PARTIAL_CLEAR:
+ assert(aux_usage == ISL_AUX_USAGE_CCS_E ||
+ aux_usage == ISL_AUX_USAGE_CCS_D);
+
+ if (aux_usage == ISL_AUX_USAGE_CCS_E) {
+ intel_miptree_set_aux_state(brw, mt, level, layer, 1,
+ ISL_AUX_STATE_COMPRESSED_CLEAR);
+ } else if (aux_state != ISL_AUX_STATE_PARTIAL_CLEAR) {
+ intel_miptree_set_aux_state(brw, mt, level, layer, 1,
+ ISL_AUX_STATE_PARTIAL_CLEAR);
+ }
break;
case ISL_AUX_STATE_COMPRESSED_CLEAR:
case ISL_AUX_STATE_COMPRESSED_NO_CLEAR:
- assert(written_with_ccs);
+ assert(aux_usage == ISL_AUX_USAGE_CCS_E);
break; /* Nothing to do */
case ISL_AUX_STATE_PASS_THROUGH:
- if (written_with_ccs) {
+ if (aux_usage == ISL_AUX_USAGE_CCS_E) {
intel_miptree_set_aux_state(brw, mt, level, layer, 1,
ISL_AUX_STATE_COMPRESSED_NO_CLEAR);
} else {
unreachable("Invalid aux state for CCS_E");
}
} else {
+ assert(mt->aux_usage == ISL_AUX_USAGE_CCS_D);
/* CCS_D is a bit simpler */
switch (aux_state) {
case ISL_AUX_STATE_CLEAR:
- assert(written_with_ccs);
+ assert(aux_usage == ISL_AUX_USAGE_CCS_D);
intel_miptree_set_aux_state(brw, mt, level, layer, 1,
- ISL_AUX_STATE_COMPRESSED_CLEAR);
+ ISL_AUX_STATE_PARTIAL_CLEAR);
break;
- case ISL_AUX_STATE_COMPRESSED_CLEAR:
- assert(written_with_ccs);
+ case ISL_AUX_STATE_PARTIAL_CLEAR:
+ assert(aux_usage == ISL_AUX_USAGE_CCS_D);
break; /* Nothing to do */
case ISL_AUX_STATE_PASS_THROUGH:
/* Nothing to do */
break;
+ case ISL_AUX_STATE_COMPRESSED_CLEAR:
case ISL_AUX_STATE_COMPRESSED_NO_CLEAR:
case ISL_AUX_STATE_RESOLVED:
case ISL_AUX_STATE_AUX_INVALID:
}
}
+static void
+intel_miptree_prepare_mcs_access(struct brw_context *brw,
+ struct intel_mipmap_tree *mt,
+ uint32_t layer,
+ enum isl_aux_usage aux_usage,
+ bool fast_clear_supported)
+{
+ assert(aux_usage == ISL_AUX_USAGE_MCS);
+
+ switch (intel_miptree_get_aux_state(mt, 0, layer)) {
+ case ISL_AUX_STATE_CLEAR:
+ case ISL_AUX_STATE_COMPRESSED_CLEAR:
+ if (!fast_clear_supported) {
+ brw_blorp_mcs_partial_resolve(brw, mt, layer, 1);
+ intel_miptree_set_aux_state(brw, mt, 0, layer, 1,
+ ISL_AUX_STATE_COMPRESSED_NO_CLEAR);
+ }
+ break;
+
+ case ISL_AUX_STATE_COMPRESSED_NO_CLEAR:
+ break; /* Nothing to do */
+
+ case ISL_AUX_STATE_RESOLVED:
+ case ISL_AUX_STATE_PASS_THROUGH:
+ case ISL_AUX_STATE_AUX_INVALID:
+ case ISL_AUX_STATE_PARTIAL_CLEAR:
+ unreachable("Invalid aux state for MCS");
+ }
+}
+
static void
intel_miptree_finish_mcs_write(struct brw_context *brw,
struct intel_mipmap_tree *mt,
- uint32_t level, uint32_t layer,
- bool written_with_aux)
+ uint32_t layer,
+ enum isl_aux_usage aux_usage)
{
- switch (intel_miptree_get_aux_state(mt, level, layer)) {
+ assert(aux_usage == ISL_AUX_USAGE_MCS);
+
+ switch (intel_miptree_get_aux_state(mt, 0, layer)) {
case ISL_AUX_STATE_CLEAR:
- assert(written_with_aux);
- intel_miptree_set_aux_state(brw, mt, level, layer, 1,
+ intel_miptree_set_aux_state(brw, mt, 0, layer, 1,
ISL_AUX_STATE_COMPRESSED_CLEAR);
break;
case ISL_AUX_STATE_COMPRESSED_CLEAR:
- assert(written_with_aux);
+ case ISL_AUX_STATE_COMPRESSED_NO_CLEAR:
break; /* Nothing to do */
- case ISL_AUX_STATE_COMPRESSED_NO_CLEAR:
case ISL_AUX_STATE_RESOLVED:
case ISL_AUX_STATE_PASS_THROUGH:
case ISL_AUX_STATE_AUX_INVALID:
+ case ISL_AUX_STATE_PARTIAL_CLEAR:
unreachable("Invalid aux state for MCS");
}
}
intel_miptree_prepare_hiz_access(struct brw_context *brw,
struct intel_mipmap_tree *mt,
uint32_t level, uint32_t layer,
- bool hiz_supported, bool fast_clear_supported)
+ enum isl_aux_usage aux_usage,
+ bool fast_clear_supported)
{
+ assert(aux_usage == ISL_AUX_USAGE_NONE || aux_usage == ISL_AUX_USAGE_HIZ);
+
enum blorp_hiz_op hiz_op = BLORP_HIZ_OP_NONE;
switch (intel_miptree_get_aux_state(mt, level, layer)) {
case ISL_AUX_STATE_CLEAR:
case ISL_AUX_STATE_COMPRESSED_CLEAR:
- if (!hiz_supported || !fast_clear_supported)
+ if (aux_usage != ISL_AUX_USAGE_HIZ || !fast_clear_supported)
hiz_op = BLORP_HIZ_OP_DEPTH_RESOLVE;
break;
case ISL_AUX_STATE_COMPRESSED_NO_CLEAR:
- if (!hiz_supported)
+ if (aux_usage != ISL_AUX_USAGE_HIZ)
hiz_op = BLORP_HIZ_OP_DEPTH_RESOLVE;
break;
break;
case ISL_AUX_STATE_AUX_INVALID:
- if (hiz_supported)
+ if (aux_usage == ISL_AUX_USAGE_HIZ)
hiz_op = BLORP_HIZ_OP_HIZ_RESOLVE;
break;
+
+ case ISL_AUX_STATE_PARTIAL_CLEAR:
+ unreachable("Invalid HiZ state");
}
if (hiz_op != BLORP_HIZ_OP_NONE) {
intel_miptree_finish_hiz_write(struct brw_context *brw,
struct intel_mipmap_tree *mt,
uint32_t level, uint32_t layer,
- bool written_with_hiz)
+ enum isl_aux_usage aux_usage)
{
+ assert(aux_usage == ISL_AUX_USAGE_NONE || aux_usage == ISL_AUX_USAGE_HIZ);
+
switch (intel_miptree_get_aux_state(mt, level, layer)) {
case ISL_AUX_STATE_CLEAR:
- assert(written_with_hiz);
+ assert(aux_usage == ISL_AUX_USAGE_HIZ);
intel_miptree_set_aux_state(brw, mt, level, layer, 1,
ISL_AUX_STATE_COMPRESSED_CLEAR);
break;
case ISL_AUX_STATE_COMPRESSED_NO_CLEAR:
case ISL_AUX_STATE_COMPRESSED_CLEAR:
- assert(written_with_hiz);
+ assert(aux_usage == ISL_AUX_USAGE_HIZ);
break; /* Nothing to do */
case ISL_AUX_STATE_RESOLVED:
- if (written_with_hiz) {
+ if (aux_usage == ISL_AUX_USAGE_HIZ) {
intel_miptree_set_aux_state(brw, mt, level, layer, 1,
ISL_AUX_STATE_COMPRESSED_NO_CLEAR);
} else {
break;
case ISL_AUX_STATE_PASS_THROUGH:
- if (written_with_hiz) {
+ if (aux_usage == ISL_AUX_USAGE_HIZ) {
intel_miptree_set_aux_state(brw, mt, level, layer, 1,
ISL_AUX_STATE_COMPRESSED_NO_CLEAR);
}
break;
case ISL_AUX_STATE_AUX_INVALID:
- assert(!written_with_hiz);
+ assert(aux_usage != ISL_AUX_USAGE_HIZ);
break;
+
+ case ISL_AUX_STATE_PARTIAL_CLEAR:
+ unreachable("Invalid HiZ state");
}
}
uint32_t start_layer, uint32_t num_layers)
{
assert(level <= mt->last_level);
- uint32_t total_num_layers;
-
- if (mt->surf.size > 0)
- total_num_layers = mt->surf.dim == ISL_SURF_DIM_3D ?
- minify(mt->surf.phys_level0_sa.depth, level) :
- mt->surf.phys_level0_sa.array_len;
- else
- total_num_layers = mt->level[level].depth;
+ const uint32_t total_num_layers = get_num_logical_layers(mt, level);
assert(start_layer < total_num_layers);
if (num_layers == INTEL_REMAINING_LAYERS)
num_layers = total_num_layers - start_layer;
struct intel_mipmap_tree *mt,
uint32_t start_level, uint32_t num_levels,
uint32_t start_layer, uint32_t num_layers,
- bool aux_supported, bool fast_clear_supported)
+ enum isl_aux_usage aux_usage,
+ bool fast_clear_supported)
{
num_levels = miptree_level_range_length(mt, start_level, num_levels);
- if (_mesa_is_format_color_format(mt->format)) {
+ switch (mt->aux_usage) {
+ case ISL_AUX_USAGE_NONE:
+ /* Nothing to do */
+ break;
+
+ case ISL_AUX_USAGE_MCS:
+ assert(mt->mcs_buf);
+ assert(start_level == 0 && num_levels == 1);
+ const uint32_t level_layers =
+ miptree_layer_range_length(mt, 0, start_layer, num_layers);
+ for (uint32_t a = 0; a < level_layers; a++) {
+ intel_miptree_prepare_mcs_access(brw, mt, start_layer + a,
+ aux_usage, fast_clear_supported);
+ }
+ break;
+
+ case ISL_AUX_USAGE_CCS_D:
+ case ISL_AUX_USAGE_CCS_E:
if (!mt->mcs_buf)
return;
- if (mt->num_samples > 1) {
- /* Nothing to do for MSAA */
- assert(aux_supported && fast_clear_supported);
- } else {
- for (uint32_t l = 0; l < num_levels; l++) {
- const uint32_t level = start_level + l;
- const uint32_t level_layers =
- miptree_layer_range_length(mt, level, start_layer, num_layers);
- for (uint32_t a = 0; a < level_layers; a++) {
- intel_miptree_prepare_ccs_access(brw, mt, level,
- start_layer + a, aux_supported,
- fast_clear_supported);
- }
+ for (uint32_t l = 0; l < num_levels; l++) {
+ const uint32_t level = start_level + l;
+ const uint32_t level_layers =
+ miptree_layer_range_length(mt, level, start_layer, num_layers);
+ for (uint32_t a = 0; a < level_layers; a++) {
+ intel_miptree_prepare_ccs_access(brw, mt, level,
+ start_layer + a,
+ aux_usage, fast_clear_supported);
}
}
- } else if (mt->format == MESA_FORMAT_S_UINT8) {
- /* Nothing to do for stencil */
- } else {
- if (!mt->hiz_buf)
- return;
+ break;
+ case ISL_AUX_USAGE_HIZ:
+ assert(mt->hiz_buf);
for (uint32_t l = 0; l < num_levels; l++) {
const uint32_t level = start_level + l;
if (!intel_miptree_level_has_hiz(mt, level))
miptree_layer_range_length(mt, level, start_layer, num_layers);
for (uint32_t a = 0; a < level_layers; a++) {
intel_miptree_prepare_hiz_access(brw, mt, level, start_layer + a,
- aux_supported,
- fast_clear_supported);
+ aux_usage, fast_clear_supported);
}
}
+ break;
+
+ default:
+ unreachable("Invalid aux usage");
}
}
intel_miptree_finish_write(struct brw_context *brw,
struct intel_mipmap_tree *mt, uint32_t level,
uint32_t start_layer, uint32_t num_layers,
- bool written_with_aux)
+ enum isl_aux_usage aux_usage)
{
num_layers = miptree_layer_range_length(mt, level, start_layer, num_layers);
- if (_mesa_is_format_color_format(mt->format)) {
+ switch (mt->aux_usage) {
+ case ISL_AUX_USAGE_NONE:
+ /* Nothing to do */
+ break;
+
+ case ISL_AUX_USAGE_MCS:
+ assert(mt->mcs_buf);
+ for (uint32_t a = 0; a < num_layers; a++) {
+ intel_miptree_finish_mcs_write(brw, mt, start_layer + a,
+ aux_usage);
+ }
+ break;
+
+ case ISL_AUX_USAGE_CCS_D:
+ case ISL_AUX_USAGE_CCS_E:
if (!mt->mcs_buf)
return;
- if (mt->num_samples > 1) {
- for (uint32_t a = 0; a < num_layers; a++) {
- intel_miptree_finish_mcs_write(brw, mt, level, start_layer + a,
- written_with_aux);
- }
- } else {
- for (uint32_t a = 0; a < num_layers; a++) {
- intel_miptree_finish_ccs_write(brw, mt, level, start_layer + a,
- written_with_aux);
- }
+ for (uint32_t a = 0; a < num_layers; a++) {
+ intel_miptree_finish_ccs_write(brw, mt, level, start_layer + a,
+ aux_usage);
}
- } else if (mt->format == MESA_FORMAT_S_UINT8) {
- /* Nothing to do for stencil */
- } else {
+ break;
+
+ case ISL_AUX_USAGE_HIZ:
if (!intel_miptree_level_has_hiz(mt, level))
return;
for (uint32_t a = 0; a < num_layers; a++) {
intel_miptree_finish_hiz_write(brw, mt, level, start_layer + a,
- written_with_aux);
+ aux_usage);
}
+ break;
+
+ default:
+ unreachable("Invavlid aux usage");
}
}
if (_mesa_is_format_color_format(mt->format)) {
assert(mt->mcs_buf != NULL);
- assert(mt->num_samples <= 1 || mt->msaa_layout == INTEL_MSAA_LAYOUT_CMS);
+ assert(mt->surf.samples == 1 ||
+ mt->surf.msaa_layout == ISL_MSAA_LAYOUT_ARRAY);
} else if (mt->format == MESA_FORMAT_S_UINT8) {
unreachable("Cannot get aux state for stencil");
} else {
if (_mesa_is_format_color_format(mt->format)) {
assert(mt->mcs_buf != NULL);
- assert(mt->num_samples <= 1 || mt->msaa_layout == INTEL_MSAA_LAYOUT_CMS);
+ assert(mt->surf.samples == 1 ||
+ mt->surf.msaa_layout == ISL_MSAA_LAYOUT_ARRAY);
} else if (mt->format == MESA_FORMAT_S_UINT8) {
unreachable("Cannot get aux state for stencil");
} else {
struct intel_mipmap_tree *mt,
mesa_format view_format)
{
- if (!intel_miptree_is_lossless_compressed(brw, mt))
+ if (mt->aux_usage != ISL_AUX_USAGE_CCS_E)
return false;
enum isl_format isl_mt_format = brw_isl_format_for_mesa_format(mt->format);
return true;
}
+enum isl_aux_usage
+intel_miptree_texture_aux_usage(struct brw_context *brw,
+ struct intel_mipmap_tree *mt,
+ enum isl_format view_format)
+{
+ switch (mt->aux_usage) {
+ case ISL_AUX_USAGE_HIZ:
+ if (intel_miptree_sample_with_hiz(brw, mt))
+ return ISL_AUX_USAGE_HIZ;
+ break;
+
+ case ISL_AUX_USAGE_MCS:
+ return ISL_AUX_USAGE_MCS;
+
+ case ISL_AUX_USAGE_CCS_D:
+ case ISL_AUX_USAGE_CCS_E:
+ if (mt->mcs_buf && can_texture_with_ccs(brw, mt, view_format))
+ return ISL_AUX_USAGE_CCS_E;
+ break;
+
+ default:
+ break;
+ }
+
+ return ISL_AUX_USAGE_NONE;
+}
+
static void
intel_miptree_prepare_texture_slices(struct brw_context *brw,
struct intel_mipmap_tree *mt,
uint32_t start_layer, uint32_t num_layers,
bool *aux_supported_out)
{
- bool aux_supported, clear_supported;
- if (_mesa_is_format_color_format(mt->format)) {
- if (mt->num_samples > 1) {
- aux_supported = clear_supported = true;
- } else {
- aux_supported = can_texture_with_ccs(brw, mt, view_format);
+ enum isl_aux_usage aux_usage =
+ intel_miptree_texture_aux_usage(brw, mt, view_format);
+ bool clear_supported = aux_usage != ISL_AUX_USAGE_NONE;
- /* Clear color is specified as ints or floats and the conversion is
- * done by the sampler. If we have a texture view, we would have to
- * perform the clear color conversion manually. Just disable clear
- * color.
- */
- clear_supported = aux_supported && (mt->format == view_format);
- }
- } else if (mt->format == MESA_FORMAT_S_UINT8) {
- aux_supported = clear_supported = false;
- } else {
- aux_supported = clear_supported = intel_miptree_sample_with_hiz(brw, mt);
- }
+ /* Clear color is specified as ints or floats and the conversion is done by
+ * the sampler. If we have a texture view, we would have to perform the
+ * clear color conversion manually. Just disable clear color.
+ */
+ if (mt->format != view_format)
+ clear_supported = false;
intel_miptree_prepare_access(brw, mt, start_level, num_levels,
start_layer, num_layers,
- aux_supported, clear_supported);
+ aux_usage, clear_supported);
if (aux_supported_out)
- *aux_supported_out = aux_supported;
+ *aux_supported_out = aux_usage != ISL_AUX_USAGE_NONE;
}
void
{
/* The data port doesn't understand any compression */
intel_miptree_prepare_access(brw, mt, 0, INTEL_REMAINING_LEVELS,
- 0, INTEL_REMAINING_LAYERS, false, false);
+ 0, INTEL_REMAINING_LAYERS,
+ ISL_AUX_USAGE_NONE, false);
}
void
start_layer, num_layers, NULL);
}
-void
-intel_miptree_prepare_render(struct brw_context *brw,
- struct intel_mipmap_tree *mt, uint32_t level,
- uint32_t start_layer, uint32_t layer_count,
- bool srgb_enabled)
+enum isl_aux_usage
+intel_miptree_render_aux_usage(struct brw_context *brw,
+ struct intel_mipmap_tree *mt,
+ bool srgb_enabled)
{
- /* If FRAMEBUFFER_SRGB is used on Gen9+ then we need to resolve any of
- * the single-sampled color renderbuffers because the CCS buffer isn't
- * supported for SRGB formats. This only matters if FRAMEBUFFER_SRGB is
- * enabled because otherwise the surface state will be programmed with
- * the linear equivalent format anyway.
- */
- if (brw->gen == 9 && srgb_enabled && mt->num_samples <= 1 &&
- _mesa_get_srgb_format_linear(mt->format) != mt->format) {
+ switch (mt->aux_usage) {
+ case ISL_AUX_USAGE_MCS:
+ assert(mt->mcs_buf);
+ return ISL_AUX_USAGE_MCS;
+
+ case ISL_AUX_USAGE_CCS_D:
+ /* If FRAMEBUFFER_SRGB is used on Gen9+ then we need to resolve any of
+ * the single-sampled color renderbuffers because the CCS buffer isn't
+ * supported for SRGB formats. This only matters if FRAMEBUFFER_SRGB is
+ * enabled because otherwise the surface state will be programmed with
+ * the linear equivalent format anyway.
+ */
+ if (srgb_enabled &&
+ _mesa_get_srgb_format_linear(mt->format) != mt->format) {
+ return ISL_AUX_USAGE_NONE;
+ } else if (!mt->mcs_buf) {
+ return ISL_AUX_USAGE_NONE;
+ } else {
+ return ISL_AUX_USAGE_CCS_D;
+ }
+ case ISL_AUX_USAGE_CCS_E: {
/* Lossless compression is not supported for SRGB formats, it
* should be impossible to get here with such surfaces.
*/
- assert(!intel_miptree_is_lossless_compressed(brw, mt));
- intel_miptree_prepare_access(brw, mt, level, 1, start_layer, layer_count,
- false, false);
+ assert(!srgb_enabled ||
+ _mesa_get_srgb_format_linear(mt->format) == mt->format);
+
+ return ISL_AUX_USAGE_CCS_E;
+ }
+
+ default:
+ return ISL_AUX_USAGE_NONE;
}
}
+void
+intel_miptree_prepare_render(struct brw_context *brw,
+ struct intel_mipmap_tree *mt, uint32_t level,
+ uint32_t start_layer, uint32_t layer_count,
+ bool srgb_enabled)
+{
+ enum isl_aux_usage aux_usage =
+ intel_miptree_render_aux_usage(brw, mt, srgb_enabled);
+ intel_miptree_prepare_access(brw, mt, level, 1, start_layer, layer_count,
+ aux_usage, aux_usage != ISL_AUX_USAGE_NONE);
+}
+
void
intel_miptree_finish_render(struct brw_context *brw,
struct intel_mipmap_tree *mt, uint32_t level,
- uint32_t start_layer, uint32_t layer_count)
+ uint32_t start_layer, uint32_t layer_count,
+ bool srgb_enabled)
{
assert(_mesa_is_format_color_format(mt->format));
+
+ enum isl_aux_usage aux_usage =
+ intel_miptree_render_aux_usage(brw, mt, srgb_enabled);
intel_miptree_finish_write(brw, mt, level, start_layer, layer_count,
- mt->mcs_buf != NULL);
+ aux_usage);
}
void
uint32_t start_layer, uint32_t layer_count)
{
intel_miptree_prepare_access(brw, mt, level, 1, start_layer, layer_count,
- mt->hiz_buf != NULL, mt->hiz_buf != NULL);
+ mt->aux_usage, mt->hiz_buf != NULL);
}
void
* pixel data is stored. Fortunately this code path should never be
* reached for multisample buffers.
*/
- assert(mt->msaa_layout == INTEL_MSAA_LAYOUT_NONE || mt->num_samples <= 1);
+ assert(mt->surf.msaa_layout == ISL_MSAA_LAYOUT_NONE ||
+ mt->surf.samples == 1);
intel_miptree_prepare_access(brw, mt, 0, INTEL_REMAINING_LEVELS,
- 0, INTEL_REMAINING_LAYERS, false, false);
+ 0, INTEL_REMAINING_LAYERS,
+ ISL_AUX_USAGE_NONE, false);
if (mt->mcs_buf) {
brw_bo_unreference(mt->mcs_buf->bo);
uint32_t tile_size = 4096;
uint32_t tile_width = 64;
uint32_t tile_height = 64;
- uint32_t row_size = 64 * stride;
+ uint32_t row_size = 64 * stride / 2; /* Two rows are interleaved. */
uint32_t tile_x = x / tile_width;
uint32_t tile_y = y / tile_height;
struct intel_mipmap_tree *src,
struct intel_mipmap_tree *dst)
{
- unsigned src_w, src_h, dst_w, dst_h;
-
- if (src->surf.size > 0) {
- src_w = src->surf.logical_level0_px.width;
- src_h = src->surf.logical_level0_px.height;
- } else {
- src_w = src->logical_width0;
- src_h = src->logical_height0;
- }
-
- if (dst->surf.size > 0) {
- dst_w = dst->surf.logical_level0_px.width;
- dst_h = dst->surf.logical_level0_px.height;
- } else {
- dst_w = dst->logical_width0;
- dst_h = dst->logical_height0;
- }
+ unsigned src_w = src->surf.logical_level0_px.width;
+ unsigned src_h = src->surf.logical_level0_px.height;
+ unsigned dst_w = dst->surf.logical_level0_px.width;
+ unsigned dst_h = dst->surf.logical_level0_px.height;
brw_blorp_blit_miptrees(brw,
src, 0 /* level */, 0 /* layer */,
false, false);
if (src->stencil_mt) {
- if (src->stencil_mt->surf.size > 0) {
- src_w = src->stencil_mt->surf.logical_level0_px.width;
- src_h = src->stencil_mt->surf.logical_level0_px.height;
- } else {
- src_w = src->stencil_mt->logical_width0;
- src_h = src->stencil_mt->logical_height0;
- }
-
- if (dst->stencil_mt->surf.size > 0) {
- dst_w = dst->stencil_mt->surf.logical_level0_px.width;
- dst_h = dst->stencil_mt->surf.logical_level0_px.height;
- } else {
- dst_w = dst->stencil_mt->logical_width0;
- dst_h = dst->stencil_mt->logical_height0;
- }
+ src_w = src->stencil_mt->surf.logical_level0_px.width;
+ src_h = src->stencil_mt->surf.logical_level0_px.height;
+ dst_w = dst->stencil_mt->surf.logical_level0_px.width;
+ dst_h = dst->stencil_mt->surf.logical_level0_px.height;
brw_blorp_blit_miptrees(brw,
src->stencil_mt, 0 /* level */, 0 /* layer */,
if (!src || brw->gen >= 8 || !src->r8stencil_needs_update)
return;
+ assert(src->surf.size > 0);
+
if (!mt->r8stencil_mt) {
- const uint32_t r8stencil_flags =
- MIPTREE_LAYOUT_ACCELERATED_UPLOAD | MIPTREE_LAYOUT_TILING_Y |
- MIPTREE_LAYOUT_DISABLE_AUX;
assert(brw->gen > 6); /* Handle MIPTREE_LAYOUT_GEN6_HIZ_STENCIL */
- mt->r8stencil_mt = intel_miptree_create(brw,
- src->target,
- MESA_FORMAT_R_UINT8,
- src->first_level,
- src->last_level,
- src->logical_width0,
- src->logical_height0,
- src->logical_depth0,
- src->num_samples,
- r8stencil_flags);
+ mt->r8stencil_mt = make_surface(
+ brw,
+ src->target,
+ MESA_FORMAT_R_UINT8,
+ src->first_level, src->last_level,
+ src->surf.logical_level0_px.width,
+ src->surf.logical_level0_px.height,
+ src->surf.dim == ISL_SURF_DIM_3D ?
+ src->surf.logical_level0_px.depth :
+ src->surf.logical_level0_px.array_len,
+ src->surf.samples,
+ ISL_TILING_Y0_BIT,
+ ISL_SURF_USAGE_TEXTURE_BIT,
+ BO_ALLOC_FOR_RENDER, 0, NULL);
assert(mt->r8stencil_mt);
}
struct intel_mipmap_tree *dst = mt->r8stencil_mt;
for (int level = src->first_level; level <= src->last_level; level++) {
- const unsigned depth = src->level[level].depth;
+ const unsigned depth = src->surf.dim == ISL_SURF_DIM_3D ?
+ minify(src->surf.phys_level0_sa.depth, level) :
+ src->surf.phys_level0_sa.array_len;
for (unsigned layer = 0; layer < depth; layer++) {
brw_blorp_copy_miptrees(brw,
src, level, layer,
dst, level, layer,
0, 0, 0, 0,
- minify(src->logical_width0, level),
- minify(src->logical_height0, level));
+ minify(src->surf.logical_level0_px.width,
+ level),
+ minify(src->surf.logical_level0_px.height,
+ level));
}
}
x += image_x;
y += image_y;
- map->stride = mt->pitch;
+ map->stride = mt->surf.row_pitch;
map->ptr = base + y * map->stride + x * mt->cpp;
}
/* first_level */ 0,
/* last_level */ 0,
map->w, map->h, 1,
- /* samples */ 0,
+ /* samples */ 1,
MIPTREE_LAYOUT_TILING_NONE);
if (!map->linear_mt) {
fprintf(stderr, "Failed to allocate blit temporary\n");
goto fail;
}
- map->stride = map->linear_mt->pitch;
+ map->stride = map->linear_mt->surf.row_pitch;
/* One of either READ_BIT or WRITE_BIT or both is set. READ_BIT implies no
* INVALIDATE_RANGE_BIT. WRITE_BIT needs the original values read in unless
src += mt->offset;
- src += image_y * mt->pitch;
+ src += image_y * mt->surf.row_pitch;
src += image_x * mt->cpp;
/* Due to the pixel offsets for the particular image being mapped, our
* divisible by 16, then the amount by which it's misaligned will remain
* consistent from row to row.
*/
- assert((mt->pitch % 16) == 0);
+ assert((mt->surf.row_pitch % 16) == 0);
const int misalignment = ((uintptr_t) src) & 15;
/* Create an untiled temporary buffer for the mapping. */
for (uint32_t y = 0; y < map->h; y++) {
void *dst_ptr = map->ptr + y * map->stride;
- void *src_ptr = src + y * mt->pitch;
+ void *src_ptr = src + y * mt->surf.row_pitch;
_mesa_streaming_load_memcpy(dst_ptr, src_ptr, width_bytes);
}
* temporary buffer back out.
*/
if (!(map->mode & GL_MAP_INVALIDATE_RANGE_BIT)) {
- /* ISL uses a stencil pitch value that is expected by hardware whereas
- * traditional miptree uses half of that. Below the value gets supplied
- * to intel_offset_S8() which expects the legacy interpretation.
- */
- const unsigned pitch = mt->surf.size > 0 ?
- mt->surf.row_pitch / 2 : mt->pitch;
uint8_t *untiled_s8_map = map->ptr;
uint8_t *tiled_s8_map = intel_miptree_map_raw(brw, mt, GL_MAP_READ_BIT);
unsigned int image_x, image_y;
for (uint32_t y = 0; y < map->h; y++) {
for (uint32_t x = 0; x < map->w; x++) {
- ptrdiff_t offset = intel_offset_S8(pitch,
+ ptrdiff_t offset = intel_offset_S8(mt->surf.row_pitch,
x + image_x + map->x,
y + image_y + map->y,
brw->has_swizzling);
unsigned int slice)
{
if (map->mode & GL_MAP_WRITE_BIT) {
- /* ISL uses a stencil pitch value that is expected by hardware whereas
- * traditional miptree uses half of that. Below the value gets supplied
- * to intel_offset_S8() which expects the legacy interpretation.
- */
- const unsigned pitch = mt->surf.size > 0 ?
- mt->surf.row_pitch / 2: mt->pitch;
unsigned int image_x, image_y;
uint8_t *untiled_s8_map = map->ptr;
uint8_t *tiled_s8_map = intel_miptree_map_raw(brw, mt, GL_MAP_WRITE_BIT);
for (uint32_t y = 0; y < map->h; y++) {
for (uint32_t x = 0; x < map->w; x++) {
- ptrdiff_t offset = intel_offset_S8(pitch,
+ ptrdiff_t offset = intel_offset_S8(mt->surf.row_pitch,
image_x + x + map->x,
image_y + y + map->y,
brw->has_swizzling);
image_y += map->y;
uint8_t *dst = intel_miptree_map_raw(brw, mt, GL_MAP_WRITE_BIT)
- + image_y * mt->pitch
+ + image_y * mt->surf.row_pitch
+ image_x * mt->cpp;
if (mt->etc_format == MESA_FORMAT_ETC1_RGB8)
- _mesa_etc1_unpack_rgba8888(dst, mt->pitch,
+ _mesa_etc1_unpack_rgba8888(dst, mt->surf.row_pitch,
map->ptr, map->stride,
map->w, map->h);
else
- _mesa_unpack_etc2_format(dst, mt->pitch,
+ _mesa_unpack_etc2_format(dst, mt->surf.row_pitch,
map->ptr, map->stride,
map->w, map->h, mt->etc_format);
* temporary buffer back out.
*/
if (!(map->mode & GL_MAP_INVALIDATE_RANGE_BIT)) {
- /* ISL uses a stencil pitch value that is expected by hardware whereas
- * traditional miptree uses half of that. Below the value gets supplied
- * to intel_offset_S8() which expects the legacy interpretation.
- */
- const unsigned s_pitch = s_mt->surf.size > 0 ?
- s_mt->surf.row_pitch / 2 : s_mt->pitch;
uint32_t *packed_map = map->ptr;
uint8_t *s_map = intel_miptree_map_raw(brw, s_mt, GL_MAP_READ_BIT);
uint32_t *z_map = intel_miptree_map_raw(brw, z_mt, GL_MAP_READ_BIT);
for (uint32_t y = 0; y < map->h; y++) {
for (uint32_t x = 0; x < map->w; x++) {
int map_x = map->x + x, map_y = map->y + y;
- ptrdiff_t s_offset = intel_offset_S8(s_pitch,
+ ptrdiff_t s_offset = intel_offset_S8(s_mt->surf.row_pitch,
map_x + s_image_x,
map_y + s_image_y,
brw->has_swizzling);
ptrdiff_t z_offset = ((map_y + z_image_y) *
- (z_mt->pitch / 4) +
+ (z_mt->surf.row_pitch / 4) +
(map_x + z_image_x));
uint8_t s = s_map[s_offset];
uint32_t z = z_map[z_offset];
bool map_z32f_x24s8 = mt->format == MESA_FORMAT_Z_FLOAT32;
if (map->mode & GL_MAP_WRITE_BIT) {
- /* ISL uses a stencil pitch value that is expected by hardware whereas
- * traditional miptree uses half of that. Below the value gets supplied
- * to intel_offset_S8() which expects the legacy interpretation.
- */
- const unsigned s_pitch = s_mt->surf.size > 0 ?
- s_mt->surf.row_pitch / 2 : s_mt->pitch;
uint32_t *packed_map = map->ptr;
uint8_t *s_map = intel_miptree_map_raw(brw, s_mt, GL_MAP_WRITE_BIT);
uint32_t *z_map = intel_miptree_map_raw(brw, z_mt, GL_MAP_WRITE_BIT);
for (uint32_t y = 0; y < map->h; y++) {
for (uint32_t x = 0; x < map->w; x++) {
- ptrdiff_t s_offset = intel_offset_S8(s_pitch,
+ ptrdiff_t s_offset = intel_offset_S8(s_mt->surf.row_pitch,
x + s_image_x + map->x,
y + s_image_y + map->y,
brw->has_swizzling);
ptrdiff_t z_offset = ((y + z_image_y + map->y) *
- (z_mt->pitch / 4) +
+ (z_mt->surf.row_pitch / 4) +
(x + z_image_x + map->x));
if (map_z32f_x24s8) {
unsigned int level, unsigned int slice)
{
/* See intel_miptree_blit() for details on the 32k pitch limit. */
- if (mt->pitch >= 32768)
+ if (mt->surf.row_pitch >= 32768)
return false;
return true;
*/
!(mode & GL_MAP_WRITE_BIT) &&
!mt->compressed &&
- (mt->tiling == I915_TILING_X ||
+ (mt->surf.tiling == ISL_TILING_X ||
/* Prior to Sandybridge, the blitter can't handle Y tiling */
- (brw->gen >= 6 && mt->tiling == I915_TILING_Y) ||
+ (brw->gen >= 6 && mt->surf.tiling == ISL_TILING_Y0) ||
/* Fast copy blit on skl+ supports all tiling formats. */
brw->gen >= 9) &&
can_blit_slice(mt, level, slice))
return true;
- if (mt->tiling != I915_TILING_NONE &&
+ if (mt->surf.tiling != ISL_TILING_LINEAR &&
mt->bo->size >= brw->max_gtt_map_object_size) {
assert(can_blit_slice(mt, level, slice));
return true;
{
struct intel_miptree_map *map;
- assert(mt->num_samples <= 1);
+ assert(mt->surf.samples == 1);
map = intel_miptree_attach_map(mt, level, slice, x, y, w, h, mode);
if (!map){
#if defined(USE_SSE41)
} else if (!(mode & GL_MAP_WRITE_BIT) &&
!mt->compressed && cpu_has_sse4_1 &&
- (mt->pitch % 16 == 0)) {
+ (mt->surf.row_pitch % 16 == 0)) {
intel_miptree_map_movntdqa(brw, mt, map, level, slice);
#endif
} else {
{
struct intel_miptree_map *map = mt->level[level].slice[slice].map;
- assert(mt->num_samples <= 1);
+ assert(mt->surf.samples == 1);
if (!map)
return;
}
enum isl_dim_layout
-get_isl_dim_layout(const struct gen_device_info *devinfo, uint32_t tiling,
- GLenum target, enum miptree_array_layout array_layout)
+get_isl_dim_layout(const struct gen_device_info *devinfo,
+ enum isl_tiling tiling, GLenum target)
{
- if (array_layout == GEN6_HIZ_STENCIL)
- return ISL_DIM_LAYOUT_GEN6_STENCIL_HIZ;
-
switch (target) {
case GL_TEXTURE_1D:
case GL_TEXTURE_1D_ARRAY:
- return (devinfo->gen >= 9 && tiling == I915_TILING_NONE ?
+ return (devinfo->gen >= 9 && tiling == ISL_TILING_LINEAR ?
ISL_DIM_LAYOUT_GEN9_1D : ISL_DIM_LAYOUT_GEN4_2D);
case GL_TEXTURE_2D:
unreachable("Invalid texture target");
}
-enum isl_tiling
-intel_miptree_get_isl_tiling(const struct intel_mipmap_tree *mt)
-{
- if (mt->format == MESA_FORMAT_S_UINT8) {
- return ISL_TILING_W;
- } else {
- switch (mt->tiling) {
- case I915_TILING_NONE:
- return ISL_TILING_LINEAR;
- case I915_TILING_X:
- return ISL_TILING_X;
- case I915_TILING_Y:
- return ISL_TILING_Y0;
- default:
- unreachable("Invalid tiling mode");
- }
- }
-}
-
-void
-intel_miptree_get_isl_surf(struct brw_context *brw,
- const struct intel_mipmap_tree *mt,
- struct isl_surf *surf)
-{
- surf->dim = get_isl_surf_dim(mt->target);
- surf->dim_layout = get_isl_dim_layout(&brw->screen->devinfo,
- mt->tiling, mt->target,
- mt->array_layout);
-
- if (mt->num_samples > 1) {
- switch (mt->msaa_layout) {
- case INTEL_MSAA_LAYOUT_IMS:
- surf->msaa_layout = ISL_MSAA_LAYOUT_INTERLEAVED;
- break;
- case INTEL_MSAA_LAYOUT_UMS:
- case INTEL_MSAA_LAYOUT_CMS:
- surf->msaa_layout = ISL_MSAA_LAYOUT_ARRAY;
- break;
- default:
- unreachable("Invalid MSAA layout");
- }
- } else {
- surf->msaa_layout = ISL_MSAA_LAYOUT_NONE;
- }
-
- surf->tiling = intel_miptree_get_isl_tiling(mt);
-
- if (mt->format == MESA_FORMAT_S_UINT8) {
- /* The ISL definition of row_pitch matches the surface state pitch field
- * a bit better than intel_mipmap_tree. In particular, ISL incorporates
- * the factor of 2 for W-tiling in row_pitch.
- */
- surf->row_pitch = 2 * mt->pitch;
- } else {
- surf->row_pitch = mt->pitch;
- }
-
- surf->format = translate_tex_format(brw, mt->format, false);
-
- if (brw->gen >= 9) {
- if (surf->dim == ISL_SURF_DIM_1D && surf->tiling == ISL_TILING_LINEAR) {
- /* For gen9 1-D surfaces, intel_mipmap_tree has a bogus alignment. */
- surf->image_alignment_el = isl_extent3d(64, 1, 1);
- } else {
- /* On gen9+, intel_mipmap_tree stores the horizontal and vertical
- * alignment in terms of surface elements like we want.
- */
- surf->image_alignment_el = isl_extent3d(mt->halign, mt->valign, 1);
- }
- } else {
- /* On earlier gens it's stored in pixels. */
- unsigned bw, bh;
- _mesa_get_format_block_size(mt->format, &bw, &bh);
- surf->image_alignment_el =
- isl_extent3d(mt->halign / bw, mt->valign / bh, 1);
- }
-
- surf->logical_level0_px.width = mt->logical_width0;
- surf->logical_level0_px.height = mt->logical_height0;
- if (surf->dim == ISL_SURF_DIM_3D) {
- surf->logical_level0_px.depth = mt->logical_depth0;
- surf->logical_level0_px.array_len = 1;
- } else {
- surf->logical_level0_px.depth = 1;
- surf->logical_level0_px.array_len = mt->logical_depth0;
- }
-
- surf->phys_level0_sa.width = mt->physical_width0;
- surf->phys_level0_sa.height = mt->physical_height0;
- if (surf->dim == ISL_SURF_DIM_3D) {
- surf->phys_level0_sa.depth = mt->physical_depth0;
- surf->phys_level0_sa.array_len = 1;
- } else {
- surf->phys_level0_sa.depth = 1;
- surf->phys_level0_sa.array_len = mt->physical_depth0;
- }
-
- surf->levels = mt->last_level - mt->first_level + 1;
- surf->samples = MAX2(mt->num_samples, 1);
-
- surf->size = 0; /* TODO */
- surf->alignment = 0; /* TODO */
-
- switch (surf->dim_layout) {
- case ISL_DIM_LAYOUT_GEN4_2D:
- case ISL_DIM_LAYOUT_GEN4_3D:
- case ISL_DIM_LAYOUT_GEN6_STENCIL_HIZ:
- if (brw->gen >= 9) {
- surf->array_pitch_el_rows = mt->qpitch;
- } else {
- unsigned bw, bh;
- _mesa_get_format_block_size(mt->format, &bw, &bh);
- assert(mt->qpitch % bh == 0);
- surf->array_pitch_el_rows = mt->qpitch / bh;
- }
- break;
- case ISL_DIM_LAYOUT_GEN9_1D:
- surf->array_pitch_el_rows = 1;
- break;
- }
-
- switch (mt->array_layout) {
- case ALL_LOD_IN_EACH_SLICE:
- surf->array_pitch_span = ISL_ARRAY_PITCH_SPAN_FULL;
- break;
- case ALL_SLICES_AT_EACH_LOD:
- case GEN6_HIZ_STENCIL:
- surf->array_pitch_span = ISL_ARRAY_PITCH_SPAN_COMPACT;
- break;
- default:
- unreachable("Invalid array layout");
- }
-
- GLenum base_format = _mesa_get_format_base_format(mt->format);
- switch (base_format) {
- case GL_DEPTH_COMPONENT:
- surf->usage = ISL_SURF_USAGE_DEPTH_BIT | ISL_SURF_USAGE_TEXTURE_BIT;
- break;
- case GL_STENCIL_INDEX:
- surf->usage = ISL_SURF_USAGE_STENCIL_BIT;
- if (brw->gen >= 8)
- surf->usage |= ISL_SURF_USAGE_TEXTURE_BIT;
- break;
- case GL_DEPTH_STENCIL:
- /* In this case we only texture from the depth part */
- surf->usage = ISL_SURF_USAGE_DEPTH_BIT | ISL_SURF_USAGE_STENCIL_BIT |
- ISL_SURF_USAGE_TEXTURE_BIT;
- break;
- default:
- surf->usage = ISL_SURF_USAGE_TEXTURE_BIT;
- if (brw->mesa_format_supports_render[mt->format])
- surf->usage = ISL_SURF_USAGE_RENDER_TARGET_BIT;
- break;
- }
-
- if (_mesa_is_cube_map_texture(mt->target))
- surf->usage |= ISL_SURF_USAGE_CUBE_BIT;
-}
-
enum isl_aux_usage
intel_miptree_get_aux_isl_usage(const struct brw_context *brw,
const struct intel_mipmap_tree *mt)