We want to add and use a function that accesses the auxiliary buffer's
clear_color_bo and doesn't care if it has an MCS or HiZ buffer
specifically.
v2 (Jason Ekstrand):
* Drop intel_miptree_get_aux_buffer().
* Mention CCS in the aux_buf field.
Reviewed-by: Rafael Antognolli <rafael.antognolli@intel.com> (v1)
Reviewed-by: Jason Ekstrand <jason@jlekstrand.net>
*/
surf->clear_color = mt->fast_clear_color;
*/
surf->clear_color = mt->fast_clear_color;
- struct intel_miptree_aux_buffer *aux_buf =
- intel_miptree_get_aux_buffer(mt);
- surf->aux_surf = &aux_buf->surf;
+ surf->aux_surf = &mt->aux_buf->surf;
surf->aux_addr = (struct blorp_address) {
.reloc_flags = is_render_target ? EXEC_OBJECT_WRITE : 0,
.mocs = surf->addr.mocs,
};
surf->aux_addr = (struct blorp_address) {
.reloc_flags = is_render_target ? EXEC_OBJECT_WRITE : 0,
.mocs = surf->addr.mocs,
};
- surf->aux_addr.buffer = aux_buf->bo;
- surf->aux_addr.offset = aux_buf->offset;
+ surf->aux_addr.buffer = mt->aux_buf->bo;
+ surf->aux_addr.offset = mt->aux_buf->offset;
if (devinfo->gen >= 10) {
surf->clear_color_addr = (struct blorp_address) {
if (devinfo->gen >= 10) {
surf->clear_color_addr = (struct blorp_address) {
- .buffer = aux_buf->clear_color_bo,
- .offset = aux_buf->clear_color_offset,
+ .buffer = mt->aux_buf->clear_color_bo,
+ .offset = mt->aux_buf->clear_color_offset,
/* If the MCS buffer hasn't been allocated yet, we need to allocate it now.
*/
/* If the MCS buffer hasn't been allocated yet, we need to allocate it now.
*/
- if (can_fast_clear && !irb->mt->mcs_buf) {
+ if (can_fast_clear && !irb->mt->aux_buf) {
assert(irb->mt->aux_usage == ISL_AUX_USAGE_CCS_D);
if (!intel_miptree_alloc_ccs(brw, irb->mt)) {
/* There are a few reasons in addition to out-of-memory, that can
assert(irb->mt->aux_usage == ISL_AUX_USAGE_CCS_D);
if (!intel_miptree_alloc_ccs(brw, irb->mt)) {
/* There are a few reasons in addition to out-of-memory, that can
brw_emit_pipe_control_flush(brw, PIPE_CONTROL_DEPTH_STALL);
}
brw_emit_pipe_control_flush(brw, PIPE_CONTROL_DEPTH_STALL);
}
- assert(mt->aux_usage == ISL_AUX_USAGE_HIZ && mt->hiz_buf);
+ assert(mt->aux_usage == ISL_AUX_USAGE_HIZ && mt->aux_buf);
struct isl_surf isl_tmp[2];
struct blorp_surf surf;
struct isl_surf isl_tmp[2];
struct blorp_surf surf;
* buffer when doing a fast clear. Since we are skipping the fast
* clear here, we need to update the clear color ourselves.
*/
* buffer when doing a fast clear. Since we are skipping the fast
* clear here, we need to update the clear color ourselves.
*/
- uint32_t clear_offset = mt->hiz_buf->clear_color_offset;
+ uint32_t clear_offset = mt->aux_buf->clear_color_offset;
union isl_color_value clear_color = { .f32 = { clear_value, } };
/* We can't update the clear color while the hardware is still using
union isl_color_value clear_color = { .f32 = { clear_value, } };
/* We can't update the clear color while the hardware is still using
*/
brw_emit_pipe_control_flush(brw, PIPE_CONTROL_CS_STALL);
for (int i = 0; i < 4; i++) {
*/
brw_emit_pipe_control_flush(brw, PIPE_CONTROL_CS_STALL);
for (int i = 0; i < 4; i++) {
- brw_store_data_imm32(brw, mt->hiz_buf->clear_color_bo,
+ brw_store_data_imm32(brw, mt->aux_buf->clear_color_bo,
clear_offset + i * 4, clear_color.u32[i]);
}
brw_emit_pipe_control_flush(brw, PIPE_CONTROL_STATE_CACHE_INVALIDATE);
clear_offset + i * 4, clear_color.u32[i]);
}
brw_emit_pipe_control_flush(brw, PIPE_CONTROL_STATE_CACHE_INVALIDATE);
if (intel_tex->mt->aux_usage == ISL_AUX_USAGE_MCS) {
assert(devinfo->gen >= 7);
assert(intel_tex->mt->surf.samples > 1);
if (intel_tex->mt->aux_usage == ISL_AUX_USAGE_MCS) {
assert(devinfo->gen >= 7);
assert(intel_tex->mt->surf.samples > 1);
- assert(intel_tex->mt->mcs_buf);
+ assert(intel_tex->mt->aux_buf);
assert(intel_tex->mt->surf.msaa_layout == ISL_MSAA_LAYOUT_ARRAY);
key->compressed_multisample_layout_mask |= 1 << s;
assert(intel_tex->mt->surf.msaa_layout == ISL_MSAA_LAYOUT_ARRAY);
key->compressed_multisample_layout_mask |= 1 << s;
struct brw_bo *aux_bo = NULL;
struct isl_surf *aux_surf = NULL;
uint64_t aux_offset = 0;
struct brw_bo *aux_bo = NULL;
struct isl_surf *aux_surf = NULL;
uint64_t aux_offset = 0;
- struct intel_miptree_aux_buffer *aux_buf = intel_miptree_get_aux_buffer(mt);
if (aux_usage != ISL_AUX_USAGE_NONE) {
if (aux_usage != ISL_AUX_USAGE_NONE) {
- aux_surf = &aux_buf->surf;
- aux_bo = aux_buf->bo;
- aux_offset = aux_buf->offset;
+ aux_surf = &mt->aux_buf->surf;
+ aux_bo = mt->aux_buf->bo;
+ aux_offset = mt->aux_buf->offset;
/* We only really need a clear color if we also have an auxiliary
* surface. Without one, it does nothing.
/* We only really need a clear color if we also have an auxiliary
* surface. Without one, it does nothing.
struct brw_bo *clear_bo = NULL;
uint32_t clear_offset = 0;
if (use_clear_address) {
struct brw_bo *clear_bo = NULL;
uint32_t clear_offset = 0;
if (use_clear_address) {
- clear_bo = aux_buf->clear_color_bo;
- clear_offset = aux_buf->clear_color_offset;
+ clear_bo = mt->aux_buf->clear_color_bo;
+ clear_offset = mt->aux_buf->clear_color_offset;
}
isl_surf_fill_state(&brw->isl_dev, state, .surf = &surf, .view = &view,
}
isl_surf_fill_state(&brw->isl_dev, state, .surf = &surf, .view = &view,
assert(depth_mt);
uint32_t offset;
assert(depth_mt);
uint32_t offset;
- isl_surf_get_image_offset_B_tile_sa(&depth_mt->hiz_buf->surf,
+ isl_surf_get_image_offset_B_tile_sa(&depth_mt->aux_buf->surf,
lod, 0, 0, &offset, NULL, NULL);
BEGIN_BATCH(3);
OUT_BATCH((_3DSTATE_HIER_DEPTH_BUFFER << 16) | (3 - 2));
lod, 0, 0, &offset, NULL, NULL);
BEGIN_BATCH(3);
OUT_BATCH((_3DSTATE_HIER_DEPTH_BUFFER << 16) | (3 - 2));
- OUT_BATCH(depth_mt->hiz_buf->surf.row_pitch - 1);
- OUT_RELOC(depth_mt->hiz_buf->bo, RELOC_WRITE, offset);
+ OUT_BATCH(depth_mt->aux_buf->surf.row_pitch - 1);
+ OUT_RELOC(depth_mt->aux_buf->bo, RELOC_WRITE, offset);
ADVANCE_BATCH();
} else {
BEGIN_BATCH(3);
ADVANCE_BATCH();
} else {
BEGIN_BATCH(3);
BEGIN_BATCH(3);
OUT_BATCH(GEN7_3DSTATE_HIER_DEPTH_BUFFER << 16 | (3 - 2));
OUT_BATCH((mocs << 25) |
BEGIN_BATCH(3);
OUT_BATCH(GEN7_3DSTATE_HIER_DEPTH_BUFFER << 16 | (3 - 2));
OUT_BATCH((mocs << 25) |
- (depth_mt->hiz_buf->pitch - 1));
- OUT_RELOC(depth_mt->hiz_buf->bo, RELOC_WRITE, 0);
+ (depth_mt->aux_buf->pitch - 1));
+ OUT_RELOC(depth_mt->aux_buf->bo, RELOC_WRITE, 0);
assert(depth_mt);
BEGIN_BATCH(5);
OUT_BATCH(GEN7_3DSTATE_HIER_DEPTH_BUFFER << 16 | (5 - 2));
assert(depth_mt);
BEGIN_BATCH(5);
OUT_BATCH(GEN7_3DSTATE_HIER_DEPTH_BUFFER << 16 | (5 - 2));
- OUT_BATCH((depth_mt->hiz_buf->pitch - 1) | mocs_wb << 25);
- OUT_RELOC64(depth_mt->hiz_buf->bo, RELOC_WRITE, 0);
- OUT_BATCH(depth_mt->hiz_buf->qpitch >> 2);
+ OUT_BATCH((depth_mt->aux_buf->pitch - 1) | mocs_wb << 25);
+ OUT_RELOC64(depth_mt->aux_buf->bo, RELOC_WRITE, 0);
+ OUT_BATCH(depth_mt->aux_buf->qpitch >> 2);
assert(mt->last_level == 0);
/* We shouldn't already have a CCS */
assert(mt->last_level == 0);
/* We shouldn't already have a CCS */
if (!isl_surf_get_ccs_surf(&brw->isl_dev, &mt->surf, &temp_ccs_surf,
image->aux_pitch))
if (!isl_surf_get_ccs_surf(&brw->isl_dev, &mt->surf, &temp_ccs_surf,
image->aux_pitch))
assert(image->aux_offset < image->bo->size);
assert(temp_ccs_surf.size <= image->bo->size - image->aux_offset);
assert(image->aux_offset < image->bo->size);
assert(temp_ccs_surf.size <= image->bo->size - image->aux_offset);
- mt->mcs_buf = calloc(sizeof(*mt->mcs_buf), 1);
- if (mt->mcs_buf == NULL)
+ mt->aux_buf = calloc(sizeof(*mt->aux_buf), 1);
+ if (mt->aux_buf == NULL)
return false;
mt->aux_state = create_aux_state_map(mt, initial_state);
if (!mt->aux_state) {
return false;
mt->aux_state = create_aux_state_map(mt, initial_state);
if (!mt->aux_state) {
- free(mt->mcs_buf);
- mt->mcs_buf = NULL;
+ free(mt->aux_buf);
+ mt->aux_buf = NULL;
*/
const struct gen_device_info *devinfo = &brw->screen->devinfo;
if (devinfo->gen >= 10) {
*/
const struct gen_device_info *devinfo = &brw->screen->devinfo;
if (devinfo->gen >= 10) {
- mt->mcs_buf->clear_color_bo =
+ mt->aux_buf->clear_color_bo =
brw_bo_alloc(brw->bufmgr, "clear_color_bo",
brw->isl_dev.ss.clear_color_state_size);
brw_bo_alloc(brw->bufmgr, "clear_color_bo",
brw->isl_dev.ss.clear_color_state_size);
- if (!mt->mcs_buf->clear_color_bo) {
- free(mt->mcs_buf);
- mt->mcs_buf = NULL;
+ if (!mt->aux_buf->clear_color_bo) {
+ free(mt->aux_buf);
+ mt->aux_buf = NULL;
- mt->mcs_buf->bo = image->bo;
+ mt->aux_buf->bo = image->bo;
brw_bo_reference(image->bo);
brw_bo_reference(image->bo);
- mt->mcs_buf->offset = image->aux_offset;
- mt->mcs_buf->size = image->bo->size - image->aux_offset;
- mt->mcs_buf->pitch = image->aux_pitch;
- mt->mcs_buf->qpitch = 0;
- mt->mcs_buf->surf = temp_ccs_surf;
+ mt->aux_buf->offset = image->aux_offset;
+ mt->aux_buf->size = image->bo->size - image->aux_offset;
+ mt->aux_buf->pitch = image->aux_pitch;
+ mt->aux_buf->qpitch = 0;
+ mt->aux_buf->surf = temp_ccs_surf;
brw_bo_unreference((*mt)->bo);
intel_miptree_release(&(*mt)->stencil_mt);
intel_miptree_release(&(*mt)->r8stencil_mt);
brw_bo_unreference((*mt)->bo);
intel_miptree_release(&(*mt)->stencil_mt);
intel_miptree_release(&(*mt)->r8stencil_mt);
- intel_miptree_aux_buffer_free(intel_miptree_get_aux_buffer(*mt));
+ intel_miptree_aux_buffer_free((*mt)->aux_buf);
free_aux_state_map((*mt)->aux_state);
intel_miptree_release(&(*mt)->plane[0]);
free_aux_state_map((*mt)->aux_state);
intel_miptree_release(&(*mt)->plane[0]);
struct intel_mipmap_tree *mt,
int init_value)
{
struct intel_mipmap_tree *mt,
int init_value)
{
- assert(mt->mcs_buf != NULL);
+ assert(mt->aux_buf != NULL);
/* From the Ivy Bridge PRM, Vol 2 Part 1 p326:
*
/* From the Ivy Bridge PRM, Vol 2 Part 1 p326:
*
*
* Note: the clear value for MCS buffers is all 1's, so we memset to 0xff.
*/
*
* Note: the clear value for MCS buffers is all 1's, so we memset to 0xff.
*/
- void *map = brw_bo_map(brw, mt->mcs_buf->bo, MAP_WRITE | MAP_RAW);
+ void *map = brw_bo_map(brw, mt->aux_buf->bo, MAP_WRITE | MAP_RAW);
if (unlikely(map == NULL)) {
fprintf(stderr, "Failed to map mcs buffer into GTT\n");
if (unlikely(map == NULL)) {
fprintf(stderr, "Failed to map mcs buffer into GTT\n");
- brw_bo_unreference(mt->mcs_buf->bo);
- free(mt->mcs_buf);
+ brw_bo_unreference(mt->aux_buf->bo);
+ free(mt->aux_buf);
return;
}
void *data = map;
return;
}
void *data = map;
- memset(data, init_value, mt->mcs_buf->size);
- brw_bo_unmap(mt->mcs_buf->bo);
+ memset(data, init_value, mt->aux_buf->size);
+ brw_bo_unmap(mt->aux_buf->bo);
}
static struct intel_miptree_aux_buffer *
}
static struct intel_miptree_aux_buffer *
GLuint num_samples)
{
assert(brw->screen->devinfo.gen >= 7); /* MCS only used on Gen7+ */
GLuint num_samples)
{
assert(brw->screen->devinfo.gen >= 7); /* MCS only used on Gen7+ */
- assert(mt->mcs_buf == NULL);
+ assert(mt->aux_buf == NULL);
assert(mt->aux_usage == ISL_AUX_USAGE_MCS);
/* Multisampled miptrees are only supported for single level. */
assert(mt->aux_usage == ISL_AUX_USAGE_MCS);
/* Multisampled miptrees are only supported for single level. */
* to be just used by the GPU.
*/
const uint32_t alloc_flags = 0;
* to be just used by the GPU.
*/
const uint32_t alloc_flags = 0;
- mt->mcs_buf = intel_alloc_aux_buffer(brw, "mcs-miptree",
+ mt->aux_buf = intel_alloc_aux_buffer(brw, "mcs-miptree",
&temp_mcs_surf, alloc_flags, mt);
&temp_mcs_surf, alloc_flags, mt);
free(aux_state);
return false;
}
free(aux_state);
return false;
}
intel_miptree_alloc_ccs(struct brw_context *brw,
struct intel_mipmap_tree *mt)
{
intel_miptree_alloc_ccs(struct brw_context *brw,
struct intel_mipmap_tree *mt)
{
- assert(mt->mcs_buf == NULL);
+ assert(mt->aux_buf == NULL);
assert(mt->aux_usage == ISL_AUX_USAGE_CCS_E ||
mt->aux_usage == ISL_AUX_USAGE_CCS_D);
assert(mt->aux_usage == ISL_AUX_USAGE_CCS_E ||
mt->aux_usage == ISL_AUX_USAGE_CCS_D);
*/
const uint32_t alloc_flags = mt->aux_usage == ISL_AUX_USAGE_CCS_E ?
BO_ALLOC_ZEROED : BO_ALLOC_BUSY;
*/
const uint32_t alloc_flags = mt->aux_usage == ISL_AUX_USAGE_CCS_E ?
BO_ALLOC_ZEROED : BO_ALLOC_BUSY;
- mt->mcs_buf = intel_alloc_aux_buffer(brw, "ccs-miptree",
+ mt->aux_buf = intel_alloc_aux_buffer(brw, "ccs-miptree",
&temp_ccs_surf, alloc_flags, mt);
&temp_ccs_surf, alloc_flags, mt);
free(aux_state);
return false;
}
free(aux_state);
return false;
}
{
const struct gen_device_info *devinfo = &brw->screen->devinfo;
{
const struct gen_device_info *devinfo = &brw->screen->devinfo;
assert(mt->surf.size > 0);
if (devinfo->gen >= 8 || devinfo->is_haswell) {
assert(mt->surf.size > 0);
if (devinfo->gen >= 8 || devinfo->is_haswell) {
intel_miptree_alloc_hiz(struct brw_context *brw,
struct intel_mipmap_tree *mt)
{
intel_miptree_alloc_hiz(struct brw_context *brw,
struct intel_mipmap_tree *mt)
{
- assert(mt->hiz_buf == NULL);
+ assert(mt->aux_buf == NULL);
assert(mt->aux_usage == ISL_AUX_USAGE_HIZ);
enum isl_aux_state **aux_state =
assert(mt->aux_usage == ISL_AUX_USAGE_HIZ);
enum isl_aux_state **aux_state =
assert(ok);
const uint32_t alloc_flags = BO_ALLOC_BUSY;
assert(ok);
const uint32_t alloc_flags = BO_ALLOC_BUSY;
- mt->hiz_buf = intel_alloc_aux_buffer(brw, "hiz-miptree",
+ mt->aux_buf = intel_alloc_aux_buffer(brw, "hiz-miptree",
&temp_hiz_surf, alloc_flags, mt);
&temp_hiz_surf, alloc_flags, mt);
free(aux_state);
return false;
}
free(aux_state);
return false;
}
{
assert(_mesa_is_format_color_format(mt->format));
{
assert(_mesa_is_format_color_format(mt->format));
return false;
/* Clamp the level range to fit the miptree */
return false;
/* Clamp the level range to fit the miptree */
const struct intel_mipmap_tree *mt,
unsigned level, unsigned layer)
{
const struct intel_mipmap_tree *mt,
unsigned level, unsigned layer)
{
return;
/* Fast color clear is supported for mipmapped surfaces only on Gen8+. */
return;
/* Fast color clear is supported for mipmapped surfaces only on Gen8+. */
break;
case ISL_AUX_USAGE_MCS:
break;
case ISL_AUX_USAGE_MCS:
assert(start_level == 0 && num_levels == 1);
const uint32_t level_layers =
miptree_layer_range_length(mt, 0, start_layer, num_layers);
assert(start_level == 0 && num_levels == 1);
const uint32_t level_layers =
miptree_layer_range_length(mt, 0, start_layer, num_layers);
case ISL_AUX_USAGE_CCS_D:
case ISL_AUX_USAGE_CCS_E:
case ISL_AUX_USAGE_CCS_D:
case ISL_AUX_USAGE_CCS_E:
return;
for (uint32_t l = 0; l < num_levels; l++) {
return;
for (uint32_t l = 0; l < num_levels; l++) {
break;
case ISL_AUX_USAGE_HIZ:
break;
case ISL_AUX_USAGE_HIZ:
for (uint32_t l = 0; l < num_levels; l++) {
const uint32_t level = start_level + l;
if (!intel_miptree_level_has_hiz(mt, level))
for (uint32_t l = 0; l < num_levels; l++) {
const uint32_t level = start_level + l;
if (!intel_miptree_level_has_hiz(mt, level))
break;
case ISL_AUX_USAGE_MCS:
break;
case ISL_AUX_USAGE_MCS:
for (uint32_t a = 0; a < num_layers; a++) {
intel_miptree_finish_mcs_write(brw, mt, start_layer + a,
aux_usage);
for (uint32_t a = 0; a < num_layers; a++) {
intel_miptree_finish_mcs_write(brw, mt, start_layer + a,
aux_usage);
case ISL_AUX_USAGE_CCS_D:
case ISL_AUX_USAGE_CCS_E:
case ISL_AUX_USAGE_CCS_D:
case ISL_AUX_USAGE_CCS_E:
return;
for (uint32_t a = 0; a < num_layers; a++) {
return;
for (uint32_t a = 0; a < num_layers; a++) {
intel_miptree_check_level_layer(mt, level, layer);
if (_mesa_is_format_color_format(mt->format)) {
intel_miptree_check_level_layer(mt, level, layer);
if (_mesa_is_format_color_format(mt->format)) {
- assert(mt->mcs_buf != NULL);
+ assert(mt->aux_buf != NULL);
assert(mt->surf.samples == 1 ||
mt->surf.msaa_layout == ISL_MSAA_LAYOUT_ARRAY);
} else if (mt->format == MESA_FORMAT_S_UINT8) {
assert(mt->surf.samples == 1 ||
mt->surf.msaa_layout == ISL_MSAA_LAYOUT_ARRAY);
} else if (mt->format == MESA_FORMAT_S_UINT8) {
num_layers = miptree_layer_range_length(mt, level, start_layer, num_layers);
if (_mesa_is_format_color_format(mt->format)) {
num_layers = miptree_layer_range_length(mt, level, start_layer, num_layers);
if (_mesa_is_format_color_format(mt->format)) {
- assert(mt->mcs_buf != NULL);
+ assert(mt->aux_buf != NULL);
assert(mt->surf.samples == 1 ||
mt->surf.msaa_layout == ISL_MSAA_LAYOUT_ARRAY);
} else if (mt->format == MESA_FORMAT_S_UINT8) {
assert(mt->surf.samples == 1 ||
mt->surf.msaa_layout == ISL_MSAA_LAYOUT_ARRAY);
} else if (mt->format == MESA_FORMAT_S_UINT8) {
case ISL_AUX_USAGE_CCS_D:
case ISL_AUX_USAGE_CCS_E:
case ISL_AUX_USAGE_CCS_D:
case ISL_AUX_USAGE_CCS_E:
assert(mt->aux_usage == ISL_AUX_USAGE_CCS_D);
return ISL_AUX_USAGE_NONE;
}
assert(mt->aux_usage == ISL_AUX_USAGE_CCS_D);
return ISL_AUX_USAGE_NONE;
}
switch (mt->aux_usage) {
case ISL_AUX_USAGE_MCS:
switch (mt->aux_usage) {
case ISL_AUX_USAGE_MCS:
return ISL_AUX_USAGE_MCS;
case ISL_AUX_USAGE_CCS_D:
case ISL_AUX_USAGE_CCS_E:
return ISL_AUX_USAGE_MCS;
case ISL_AUX_USAGE_CCS_D:
case ISL_AUX_USAGE_CCS_E:
assert(mt->aux_usage == ISL_AUX_USAGE_CCS_D);
return ISL_AUX_USAGE_NONE;
}
assert(mt->aux_usage == ISL_AUX_USAGE_CCS_D);
return ISL_AUX_USAGE_NONE;
}
uint32_t start_layer, uint32_t layer_count)
{
intel_miptree_prepare_access(brw, mt, level, 1, start_layer, layer_count,
uint32_t start_layer, uint32_t layer_count)
{
intel_miptree_prepare_access(brw, mt, level, 1, start_layer, layer_count,
- mt->aux_usage, mt->hiz_buf != NULL);
+ mt->aux_usage, mt->aux_buf != NULL);
{
if (depth_written) {
intel_miptree_finish_write(brw, mt, level, start_layer, layer_count,
{
if (depth_written) {
intel_miptree_finish_write(brw, mt, level, start_layer, layer_count,
assert(mt->surf.logical_level0_px.depth == 1);
assert(mt->surf.logical_level0_px.array_len == 1);
assert(mt->surf.samples == 1);
assert(mt->surf.logical_level0_px.depth == 1);
assert(mt->surf.logical_level0_px.array_len == 1);
assert(mt->surf.samples == 1);
- assert(mt->mcs_buf != NULL);
+ assert(mt->aux_buf != NULL);
aux_usage = mod_info->aux_usage;
supports_fast_clear = mod_info->supports_clear_color;
aux_usage = mod_info->aux_usage;
supports_fast_clear = mod_info->supports_clear_color;
intel_miptree_finish_external(struct brw_context *brw,
struct intel_mipmap_tree *mt)
{
intel_miptree_finish_external(struct brw_context *brw,
struct intel_mipmap_tree *mt)
{
return;
/* We don't know the actual aux state of the aux surface. The previous
return;
/* We don't know the actual aux state of the aux surface. The previous
0, INTEL_REMAINING_LAYERS,
ISL_AUX_USAGE_NONE, false);
0, INTEL_REMAINING_LAYERS,
ISL_AUX_USAGE_NONE, false);
- struct intel_miptree_aux_buffer *aux_buf = intel_miptree_get_aux_buffer(mt);
- if (aux_buf) {
- intel_miptree_aux_buffer_free(aux_buf);
- mt->mcs_buf = NULL;
- mt->hiz_buf = NULL;
+ if (mt->aux_buf) {
+ intel_miptree_aux_buffer_free(mt->aux_buf);
+ mt->aux_buf = NULL;
/* Make future calls of intel_miptree_level_has_hiz() return false. */
for (uint32_t l = mt->first_level; l <= mt->last_level; ++l) {
/* Make future calls of intel_miptree_level_has_hiz() return false. */
for (uint32_t l = mt->first_level; l <= mt->last_level; ++l) {
- /**
- * \brief HiZ aux buffer
- *
- * To allocate the hiz buffer, use intel_miptree_alloc_hiz().
- *
- * To determine if hiz is enabled, do not check this pointer. Instead, use
- * intel_miptree_level_has_hiz().
- */
- struct intel_miptree_aux_buffer *hiz_buf;
-
/**
* \brief The type of auxiliary compression used by this miptree.
*
/**
* \brief The type of auxiliary compression used by this miptree.
*
bool r8stencil_needs_update;
/**
bool r8stencil_needs_update;
/**
- * \brief MCS auxiliary buffer.
+ * \brief CCS, MCS, or HiZ auxiliary buffer.
+ *
+ * NULL if no auxiliary buffer is in use for this surface.
- * This buffer contains the "multisample control surface", which stores
- * the necessary information to implement compressed MSAA
- * (INTEL_MSAA_FORMAT_CMS) and "fast color clear" behaviour on Gen7+.
+ * For single-sampled color miptrees:
+ * This buffer contains the Color Control Surface, which stores the
+ * necessary information to implement lossless color compression (CCS_E)
+ * and "fast color clear" (CCS_D) behaviour.
- * NULL if no MCS buffer is in use for this surface.
+ * For multi-sampled color miptrees:
+ * This buffer contains the Multisample Control Surface, which stores the
+ * necessary information to implement compressed MSAA
+ * (INTEL_MSAA_FORMAT_CMS).
+ *
+ * For depth miptrees:
+ * This buffer contains the Hierarchical Depth Buffer, which stores the
+ * necessary information to implement lossless depth compression and fast
+ * depth clear behavior.
+ *
+ * To determine if HiZ is enabled, do not check this pointer. Instead,
+ * use intel_miptree_level_has_hiz().
- struct intel_miptree_aux_buffer *mcs_buf;
+ struct intel_miptree_aux_buffer *aux_buf;
/**
* Planes 1 and 2 in case this is a planar surface.
/**
* Planes 1 and 2 in case this is a planar surface.
get_isl_dim_layout(const struct gen_device_info *devinfo,
enum isl_tiling tiling, GLenum target);
get_isl_dim_layout(const struct gen_device_info *devinfo,
enum isl_tiling tiling, GLenum target);
-static inline struct intel_miptree_aux_buffer *
-intel_miptree_get_aux_buffer(const struct intel_mipmap_tree *mt)
-{
- switch (mt->aux_usage) {
- case ISL_AUX_USAGE_MCS:
- case ISL_AUX_USAGE_CCS_D:
- case ISL_AUX_USAGE_CCS_E:
- return mt->mcs_buf;
- case ISL_AUX_USAGE_HIZ:
- return mt->hiz_buf;
- case ISL_AUX_USAGE_NONE:
- return NULL;
- default:
- unreachable("Invalid aux_usage!\n");
- }
-}
-
void
intel_get_image_dims(struct gl_texture_image *image,
int *width, int *height, int *depth);
void
intel_get_image_dims(struct gl_texture_image *image,
int *width, int *height, int *depth);
* should be a no-op in almost all cases. On the off chance that someone
* ever triggers this, we should at least warn them.
*/
* should be a no-op in almost all cases. On the off chance that someone
* ever triggers this, we should at least warn them.
*/
- if (intel_tex->mt->mcs_buf &&
+ if (intel_tex->mt->aux_buf &&
intel_miptree_get_aux_state(intel_tex->mt, 0, 0) !=
isl_drm_modifier_get_default_aux_state(intel_tex->mt->drm_modifier)) {
_mesa_warning(ctx, "Aux state changed between BindTexImage and "
intel_miptree_get_aux_state(intel_tex->mt, 0, 0) !=
isl_drm_modifier_get_default_aux_state(intel_tex->mt->drm_modifier)) {
_mesa_warning(ctx, "Aux state changed between BindTexImage and "