intel_miptree_get_aux_isl_surf(brw, mt, aux_surf, &surf->aux_usage);
/* For textures that are in the RESOLVED state, we ignore the MCS */
- if (mt->mcs_mt && !is_render_target &&
+ if (mt->mcs_buf && !is_render_target &&
mt->fast_clear_state == INTEL_FAST_CLEAR_STATE_RESOLVED)
surf->aux_usage = ISL_AUX_USAGE_NONE;
.write_domain = is_render_target ? I915_GEM_DOMAIN_RENDER : 0,
};
- if (mt->mcs_mt) {
- surf->aux_addr.buffer = mt->mcs_mt->bo;
- surf->aux_addr.offset = mt->mcs_mt->offset;
+ if (mt->mcs_buf) {
+ surf->aux_addr.buffer = mt->mcs_buf->mt->bo;
+ surf->aux_addr.offset = mt->mcs_buf->mt->offset;
} else {
assert(surf->aux_usage == ISL_AUX_USAGE_HIZ);
struct intel_mipmap_tree *hiz_mt = mt->hiz_buf->mt;
/* If the MCS buffer hasn't been allocated yet, we need to allocate
* it now.
*/
- if (!irb->mt->mcs_mt) {
+ if (!irb->mt->mcs_buf) {
assert(!is_lossless_compressed);
if (!intel_miptree_alloc_non_msrt_mcs(brw, irb->mt, false)) {
/* MCS allocation failed--probably this will only happen in
struct isl_surf *aux_surf = NULL, aux_surf_s;
uint64_t aux_offset = 0;
enum isl_aux_usage aux_usage = ISL_AUX_USAGE_NONE;
- if (mt->mcs_mt && !(flags & INTEL_AUX_BUFFER_DISABLED)) {
+ if (mt->mcs_buf && !(flags & INTEL_AUX_BUFFER_DISABLED)) {
intel_miptree_get_aux_isl_surf(brw, mt, &aux_surf_s, &aux_usage);
aux_surf = &aux_surf_s;
- assert(mt->mcs_mt->offset == 0);
- aux_offset = mt->mcs_mt->bo->offset64;
+ assert(mt->mcs_buf->mt->offset == 0);
+ aux_offset = mt->mcs_buf->mt->bo->offset64;
/* We only really need a clear color if we also have an auxiliary
* surfacae. Without one, it does nothing.
assert((aux_offset & 0xfff) == 0);
drm_intel_bo_emit_reloc(brw->batch.bo,
*surf_offset + 4 * ss_info.aux_reloc_dw,
- mt->mcs_mt->bo, dw[ss_info.aux_reloc_dw] & 0xfff,
+ mt->mcs_buf->mt->bo,
+ dw[ss_info.aux_reloc_dw] & 0xfff,
read_domains, write_domains);
}
}
const struct intel_mipmap_tree *mt)
{
/* Nothing to disable. */
- if (!mt->mcs_mt)
+ if (!mt->mcs_buf)
return false;
/* There are special cases only for lossless compression. */
return false;
/* Compression always requires auxiliary buffer. */
- if (!mt->mcs_mt)
+ if (!mt->mcs_buf)
return false;
/* Single sample compression is represented re-using msaa compression
drm_intel_bo_unreference((*mt)->hiz_buf->bo);
free((*mt)->hiz_buf);
}
- intel_miptree_release(&(*mt)->mcs_mt);
+ if ((*mt)->mcs_buf) {
+ intel_miptree_release(&(*mt)->mcs_buf->mt);
+ free((*mt)->mcs_buf);
+ }
intel_resolve_map_clear(&(*mt)->hiz_map);
intel_miptree_release(&(*mt)->plane[0]);
*
* Note: the clear value for MCS buffers is all 1's, so we memset to 0xff.
*/
- void *data = intel_miptree_map_raw(brw, mt->mcs_mt);
- memset(data, init_value, mt->mcs_mt->total_height * mt->mcs_mt->pitch);
- intel_miptree_unmap_raw(mt->mcs_mt);
+ void *data = intel_miptree_map_raw(brw, mt->mcs_buf->mt);
+ memset(data, init_value,
+ mt->mcs_buf->mt->total_height * mt->mcs_buf->mt->pitch);
+ intel_miptree_unmap_raw(mt->mcs_buf->mt);
mt->fast_clear_state = INTEL_FAST_CLEAR_STATE_CLEAR;
}
+static struct intel_miptree_aux_buffer *
+intel_mcs_miptree_buf_create(struct brw_context *brw,
+ struct intel_mipmap_tree *mt,
+ mesa_format format,
+ unsigned mcs_width,
+ unsigned mcs_height,
+ uint32_t layout_flags)
+{
+ struct intel_miptree_aux_buffer *buf = calloc(sizeof(*buf), 1);
+
+ if (!buf)
+ return NULL;
+
+ /* From the Ivy Bridge PRM, Vol4 Part1 p76, "MCS Base Address":
+ *
+ * "The MCS surface must be stored as Tile Y."
+ */
+ layout_flags |= MIPTREE_LAYOUT_TILING_Y;
+ buf->mt = miptree_create(brw,
+ mt->target,
+ format,
+ mt->first_level,
+ mt->last_level,
+ mcs_width,
+ mcs_height,
+ mt->logical_depth0,
+ 0 /* num_samples */,
+ layout_flags);
+ if (!buf->mt) {
+ free(buf);
+ return NULL;
+ }
+
+ buf->bo = buf->mt->bo;
+ buf->pitch = buf->mt->pitch;
+ buf->qpitch = buf->mt->qpitch;
+
+ return buf;
+}
+
static bool
intel_miptree_alloc_mcs(struct brw_context *brw,
struct intel_mipmap_tree *mt,
GLuint num_samples)
{
assert(brw->gen >= 7); /* MCS only used on Gen7+ */
- assert(mt->mcs_mt == NULL);
+ assert(mt->mcs_buf == NULL);
assert(!mt->disable_aux_buffers);
/* Choose the correct format for the MCS buffer. All that really matters
unreachable("Unrecognized sample count in intel_miptree_alloc_mcs");
};
- /* From the Ivy Bridge PRM, Vol4 Part1 p76, "MCS Base Address":
- *
- * "The MCS surface must be stored as Tile Y."
- */
- const uint32_t mcs_flags = MIPTREE_LAYOUT_ACCELERATED_UPLOAD |
- MIPTREE_LAYOUT_TILING_Y;
- mt->mcs_mt = miptree_create(brw,
- mt->target,
- format,
- mt->first_level,
- mt->last_level,
- mt->logical_width0,
- mt->logical_height0,
- mt->logical_depth0,
- 0 /* num_samples */,
- mcs_flags);
+ mt->mcs_buf =
+ intel_mcs_miptree_buf_create(brw, mt,
+ format,
+ mt->logical_width0,
+ mt->logical_height0,
+ MIPTREE_LAYOUT_ACCELERATED_UPLOAD);
intel_miptree_init_mcs(brw, mt, 0xFF);
- return mt->mcs_mt;
+ return mt->mcs_buf != NULL;
}
struct intel_mipmap_tree *mt,
bool is_lossless_compressed)
{
- assert(mt->mcs_mt == NULL);
+ assert(mt->mcs_buf == NULL);
assert(!mt->disable_aux_buffers);
/* The format of the MCS buffer is opaque to the driver; all that matters
unsigned mcs_height =
ALIGN(mt->logical_height0, height_divisor) / height_divisor;
assert(mt->logical_depth0 == 1);
- uint32_t layout_flags = MIPTREE_LAYOUT_TILING_Y;
-
- if (brw->gen >= 8) {
- layout_flags |= MIPTREE_LAYOUT_FORCE_HALIGN16;
- }
+ uint32_t layout_flags =
+ (brw->gen >= 8) ? MIPTREE_LAYOUT_FORCE_HALIGN16 : 0;
/* In case of compression mcs buffer needs to be initialised requiring the
* buffer to be immediately mapped to cpu space for writing. Therefore do
* not use the gpu access flag which can cause an unnecessary delay if the
if (!is_lossless_compressed)
layout_flags |= MIPTREE_LAYOUT_ACCELERATED_UPLOAD;
- mt->mcs_mt = miptree_create(brw,
- mt->target,
- format,
- mt->first_level,
- mt->last_level,
- mcs_width,
- mcs_height,
- mt->logical_depth0,
- 0 /* num_samples */,
- layout_flags);
+ mt->mcs_buf = intel_mcs_miptree_buf_create(brw, mt,
+ format,
+ mcs_width,
+ mcs_height,
+ layout_flags);
/* From Gen9 onwards single-sampled (non-msrt) auxiliary buffers are
* used for lossless compression which requires similar initialisation
mt->msaa_layout = INTEL_MSAA_LAYOUT_CMS;
}
- return mt->mcs_mt;
+ return mt->mcs_buf != NULL;
}
/**
*/
assert(mt->msaa_layout == INTEL_MSAA_LAYOUT_NONE);
- if (mt->mcs_mt) {
+ if (mt->mcs_buf) {
intel_miptree_resolve_color(brw, mt, 0);
- intel_miptree_release(&mt->mcs_mt);
+ intel_miptree_release(&mt->mcs_buf->mt);
mt->fast_clear_state = INTEL_FAST_CLEAR_STATE_NO_MCS;
}
}
enum isl_aux_usage *usage)
{
uint32_t aux_pitch, aux_qpitch;
- if (mt->mcs_mt) {
- aux_pitch = mt->mcs_mt->pitch;
- aux_qpitch = mt->mcs_mt->qpitch;
+ if (mt->mcs_buf) {
+ aux_pitch = mt->mcs_buf->mt->pitch;
+ aux_qpitch = mt->mcs_buf->mt->qpitch;
if (mt->num_samples > 1) {
assert(mt->msaa_layout == INTEL_MSAA_LAYOUT_CMS);