#define FILE_DEBUG_FLAG DEBUG_MIPTREE
static void *intel_miptree_map_raw(struct brw_context *brw,
- struct intel_mipmap_tree *mt);
+ struct intel_mipmap_tree *mt,
+ GLbitfield mode);
static void intel_miptree_unmap_raw(struct intel_mipmap_tree *mt);
if (brw->gen >= 9) {
mesa_format linear_format = _mesa_get_srgb_format_linear(mt->format);
- const uint32_t brw_format = brw_isl_format_for_mesa_format(linear_format);
- return isl_format_supports_ccs_e(&brw->screen->devinfo, brw_format);
+ const enum isl_format isl_format =
+ brw_isl_format_for_mesa_format(linear_format);
+ return isl_format_supports_ccs_e(&brw->screen->devinfo, isl_format);
} else
return true;
}
intel_miptree_wants_hiz_buffer(brw, mt)))) {
uint32_t stencil_flags = MIPTREE_LAYOUT_ACCELERATED_UPLOAD;
if (brw->gen == 6) {
- stencil_flags |= MIPTREE_LAYOUT_FORCE_ALL_SLICE_AT_LOD |
+ stencil_flags |= MIPTREE_LAYOUT_GEN6_HIZ_STENCIL |
MIPTREE_LAYOUT_TILING_ANY;
}
}
}
- if (layout_flags & MIPTREE_LAYOUT_FORCE_ALL_SLICE_AT_LOD)
- mt->array_layout = ALL_SLICES_AT_EACH_LOD;
+ if (layout_flags & MIPTREE_LAYOUT_GEN6_HIZ_STENCIL)
+ mt->array_layout = GEN6_HIZ_STENCIL;
/*
* Obey HALIGN_16 constraints for Gen8 and Gen9 buffers which are
if (layout_flags & MIPTREE_LAYOUT_ACCELERATED_UPLOAD)
alloc_flags |= BO_ALLOC_FOR_RENDER;
- unsigned long pitch;
mt->etc_format = etc_format;
if (format == MESA_FORMAT_S_UINT8) {
/* Align to size of W tile, 64x64. */
- mt->bo = drm_intel_bo_alloc_tiled(brw->bufmgr, "miptree",
- ALIGN(mt->total_width, 64),
- ALIGN(mt->total_height, 64),
- mt->cpp, &mt->tiling, &pitch,
- alloc_flags);
+ mt->bo = brw_bo_alloc_tiled(brw->bufmgr, "miptree",
+ ALIGN(mt->total_width, 64),
+ ALIGN(mt->total_height, 64),
+ mt->cpp, mt->tiling, &mt->pitch,
+ alloc_flags);
} else {
- mt->bo = drm_intel_bo_alloc_tiled(brw->bufmgr, "miptree",
- mt->total_width, mt->total_height,
- mt->cpp, &mt->tiling, &pitch,
- alloc_flags);
+ mt->bo = brw_bo_alloc_tiled(brw->bufmgr, "miptree",
+ mt->total_width, mt->total_height,
+ mt->cpp, mt->tiling, &mt->pitch,
+ alloc_flags);
}
- mt->pitch = pitch;
+ if (layout_flags & MIPTREE_LAYOUT_FOR_SCANOUT)
+ mt->bo->cache_coherent = false;
return mt;
}
*/
if (brw->gen < 6 && mt->bo->size >= brw->max_gtt_map_object_size &&
mt->tiling == I915_TILING_Y) {
- unsigned long pitch = mt->pitch;
const uint32_t alloc_flags =
(layout_flags & MIPTREE_LAYOUT_ACCELERATED_UPLOAD) ?
BO_ALLOC_FOR_RENDER : 0;
mt->total_width, mt->total_height);
mt->tiling = I915_TILING_X;
- drm_intel_bo_unreference(mt->bo);
- mt->bo = drm_intel_bo_alloc_tiled(brw->bufmgr, "miptree",
+ brw_bo_unreference(mt->bo);
+ mt->bo = brw_bo_alloc_tiled(brw->bufmgr, "miptree",
mt->total_width, mt->total_height, mt->cpp,
- &mt->tiling, &pitch, alloc_flags);
- mt->pitch = pitch;
+ mt->tiling, &mt->pitch, alloc_flags);
}
mt->offset = 0;
struct intel_mipmap_tree *
intel_miptree_create_for_bo(struct brw_context *brw,
- drm_intel_bo *bo,
+ struct brw_bo *bo,
mesa_format format,
uint32_t offset,
uint32_t width,
uint32_t tiling, swizzle;
GLenum target;
- drm_intel_bo_get_tiling(bo, &tiling, &swizzle);
+ brw_bo_get_tiling(bo, &tiling, &swizzle);
/* Nothing will be able to use this miptree with the BO if the offset isn't
* aligned.
if (!mt)
return NULL;
- drm_intel_bo_reference(bo);
+ brw_bo_reference(bo);
mt->bo = bo;
mt->pitch = pitch;
mt->offset = offset;
void
intel_update_winsys_renderbuffer_miptree(struct brw_context *intel,
struct intel_renderbuffer *irb,
- drm_intel_bo *bo,
+ struct brw_bo *bo,
uint32_t width, uint32_t height,
uint32_t pitch)
{
if (hiz_buf->mt)
intel_miptree_release(&hiz_buf->mt);
else
- drm_intel_bo_unreference(hiz_buf->aux_base.bo);
+ brw_bo_unreference(hiz_buf->aux_base.bo);
free(hiz_buf);
}
DBG("%s deleting %p\n", __func__, *mt);
- drm_intel_bo_unreference((*mt)->bo);
+ brw_bo_unreference((*mt)->bo);
intel_miptree_release(&(*mt)->stencil_mt);
intel_miptree_release(&(*mt)->r8stencil_mt);
intel_miptree_hiz_buffer_free((*mt)->hiz_buf);
if ((*mt)->mcs_buf) {
- drm_intel_bo_unreference((*mt)->mcs_buf->bo);
+ brw_bo_unreference((*mt)->mcs_buf->bo);
free((*mt)->mcs_buf);
}
intel_resolve_map_clear(&(*mt)->hiz_map);
*
* Note: the clear value for MCS buffers is all 1's, so we memset to 0xff.
*/
- const int ret = brw_bo_map_gtt(brw, mt->mcs_buf->bo, "miptree");
- if (unlikely(ret)) {
+ void *map = brw_bo_map(brw, mt->mcs_buf->bo, MAP_WRITE);
+ if (unlikely(map == NULL)) {
fprintf(stderr, "Failed to map mcs buffer into GTT\n");
- drm_intel_bo_unreference(mt->mcs_buf->bo);
+ brw_bo_unreference(mt->mcs_buf->bo);
free(mt->mcs_buf);
return;
}
- void *data = mt->mcs_buf->bo->virtual;
+ void *data = map;
memset(data, init_value, mt->mcs_buf->size);
- drm_intel_bo_unmap(mt->mcs_buf->bo);
+ brw_bo_unmap(mt->mcs_buf->bo);
}
static struct intel_miptree_aux_buffer *
* structure should go away. We use miptree create simply as a means to make
* sure all the constraints for the buffer are satisfied.
*/
- drm_intel_bo_reference(temp_mt->bo);
+ brw_bo_reference(temp_mt->bo);
intel_miptree_release(&temp_mt);
return buf;
*/
const uint32_t alloc_flags =
is_lossless_compressed ? 0 : BO_ALLOC_FOR_RENDER;
- uint32_t tiling = I915_TILING_Y;
- unsigned long pitch;
/* ISL has stricter set of alignment rules then the drm allocator.
* Therefore one can pass the ISL dimensions in terms of bytes instead of
* trying to recalculate based on different format block sizes.
*/
- buf->bo = drm_intel_bo_alloc_tiled(brw->bufmgr, "ccs-miptree",
- buf->pitch, buf->size / buf->pitch,
- 1, &tiling, &pitch, alloc_flags);
- if (buf->bo) {
- assert(pitch == buf->pitch);
- assert(tiling == I915_TILING_Y);
- } else {
+ buf->bo = brw_bo_alloc_tiled(brw->bufmgr, "ccs-miptree",
+ buf->pitch, buf->size / buf->pitch,
+ 1, I915_TILING_Y, &buf->pitch, alloc_flags);
+ if (!buf->bo) {
free(buf);
return false;
}
hz_height = DIV_ROUND_UP(hz_qpitch * Z0, 2 * 8) * 8;
}
- unsigned long pitch;
- uint32_t tiling = I915_TILING_Y;
- buf->aux_base.bo = drm_intel_bo_alloc_tiled(brw->bufmgr, "hiz",
- hz_width, hz_height, 1,
- &tiling, &pitch,
- BO_ALLOC_FOR_RENDER);
+ buf->aux_base.bo = brw_bo_alloc_tiled(brw->bufmgr, "hiz",
+ hz_width, hz_height, 1,
+ I915_TILING_Y, &buf->aux_base.pitch,
+ BO_ALLOC_FOR_RENDER);
if (!buf->aux_base.bo) {
free(buf);
return NULL;
- } else if (tiling != I915_TILING_Y) {
- drm_intel_bo_unreference(buf->aux_base.bo);
- free(buf);
- return NULL;
}
buf->aux_base.size = hz_width * hz_height;
- buf->aux_base.pitch = pitch;
return buf;
}
hz_height = DIV_ROUND_UP(buf->aux_base.qpitch, 2 * 8) * 8 * Z0;
}
- unsigned long pitch;
- uint32_t tiling = I915_TILING_Y;
- buf->aux_base.bo = drm_intel_bo_alloc_tiled(brw->bufmgr, "hiz",
- hz_width, hz_height, 1,
- &tiling, &pitch,
- BO_ALLOC_FOR_RENDER);
+ buf->aux_base.bo = brw_bo_alloc_tiled(brw->bufmgr, "hiz",
+ hz_width, hz_height, 1,
+ I915_TILING_Y, &buf->aux_base.pitch,
+ BO_ALLOC_FOR_RENDER);
if (!buf->aux_base.bo) {
free(buf);
return NULL;
- } else if (tiling != I915_TILING_Y) {
- drm_intel_bo_unreference(buf->aux_base.bo);
- free(buf);
- return NULL;
}
buf->aux_base.size = hz_width * hz_height;
- buf->aux_base.pitch = pitch;
return buf;
}
uint32_t layout_flags = MIPTREE_LAYOUT_ACCELERATED_UPLOAD;
if (brw->gen == 6)
- layout_flags |= MIPTREE_LAYOUT_FORCE_ALL_SLICE_AT_LOD;
+ layout_flags |= MIPTREE_LAYOUT_GEN6_HIZ_STENCIL;
if (!buf)
return NULL;
buf->aux_base.bo = buf->mt->bo;
buf->aux_base.size = buf->mt->total_height * buf->mt->pitch;
buf->aux_base.pitch = buf->mt->pitch;
-
- /* On gen6 hiz is unconditionally laid out packing all slices
- * at each level-of-detail (LOD). This means there is no valid qpitch
- * setting. In fact, this is ignored when hardware is setup - there is no
- * hardware qpitch setting of hiz on gen6.
- */
- buf->aux_base.qpitch = 0;
+ buf->aux_base.qpitch = buf->mt->qpitch * 2;
return buf;
}
}
static bool
-intel_miptree_slice_resolve(struct brw_context *brw,
- struct intel_mipmap_tree *mt,
- uint32_t level,
- uint32_t layer,
- enum blorp_hiz_op need)
+intel_miptree_depth_hiz_resolve(struct brw_context *brw,
+ struct intel_mipmap_tree *mt,
+ uint32_t start_level, uint32_t num_levels,
+ uint32_t start_layer, uint32_t num_layers,
+ enum blorp_hiz_op need)
{
- intel_miptree_check_level_layer(mt, level, layer);
+ bool did_resolve = false;
+
+ foreach_list_typed_safe(struct intel_resolve_map, map, link, &mt->hiz_map) {
+ if (map->level < start_level ||
+ map->level >= (start_level + num_levels) ||
+ map->layer < start_layer ||
+ map->layer >= (start_layer + num_layers))
+ continue;
- struct intel_resolve_map *item =
- intel_resolve_map_get(&mt->hiz_map, level, layer);
+ if (map->need != need)
+ continue;
- if (!item || item->need != need)
- return false;
+ intel_hiz_exec(brw, mt, map->level, map->layer, 1, need);
+ intel_resolve_map_remove(map);
+ did_resolve = true;
+ }
- intel_hiz_exec(brw, mt, level, layer, need);
- intel_resolve_map_remove(item);
- return true;
+ return did_resolve;
}
bool
uint32_t level,
uint32_t layer)
{
- return intel_miptree_slice_resolve(brw, mt, level, layer,
- BLORP_HIZ_OP_HIZ_RESOLVE);
+ return intel_miptree_depth_hiz_resolve(brw, mt, level, 1, layer, 1,
+ BLORP_HIZ_OP_HIZ_RESOLVE);
}
bool
uint32_t level,
uint32_t layer)
{
- return intel_miptree_slice_resolve(brw, mt, level, layer,
- BLORP_HIZ_OP_DEPTH_RESOLVE);
-}
-
-static bool
-intel_miptree_all_slices_resolve(struct brw_context *brw,
- struct intel_mipmap_tree *mt,
- enum blorp_hiz_op need)
-{
- bool did_resolve = false;
-
- foreach_list_typed_safe(struct intel_resolve_map, map, link, &mt->hiz_map) {
- if (map->need != need)
- continue;
-
- intel_hiz_exec(brw, mt, map->level, map->layer, need);
- intel_resolve_map_remove(map);
- did_resolve = true;
- }
-
- return did_resolve;
+ return intel_miptree_depth_hiz_resolve(brw, mt, level, 1, layer, 1,
+ BLORP_HIZ_OP_DEPTH_RESOLVE);
}
bool
intel_miptree_all_slices_resolve_hiz(struct brw_context *brw,
struct intel_mipmap_tree *mt)
{
- return intel_miptree_all_slices_resolve(brw, mt,
- BLORP_HIZ_OP_HIZ_RESOLVE);
+ return intel_miptree_depth_hiz_resolve(brw, mt,
+ 0, UINT32_MAX, 0, UINT32_MAX,
+ BLORP_HIZ_OP_HIZ_RESOLVE);
}
bool
intel_miptree_all_slices_resolve_depth(struct brw_context *brw,
struct intel_mipmap_tree *mt)
{
- return intel_miptree_all_slices_resolve(brw, mt,
- BLORP_HIZ_OP_DEPTH_RESOLVE);
+ return intel_miptree_depth_hiz_resolve(brw, mt,
+ 0, UINT32_MAX, 0, UINT32_MAX,
+ BLORP_HIZ_OP_DEPTH_RESOLVE);
}
enum intel_fast_clear_state
bool
intel_miptree_resolve_color(struct brw_context *brw,
- struct intel_mipmap_tree *mt, unsigned level,
- unsigned start_layer, unsigned num_layers,
+ struct intel_mipmap_tree *mt,
+ uint32_t start_level, uint32_t num_levels,
+ uint32_t start_layer, uint32_t num_layers,
int flags)
{
- intel_miptree_check_color_resolve(brw, mt, level, start_layer);
+ intel_miptree_check_color_resolve(brw, mt, start_level, start_layer);
if (!intel_miptree_needs_color_resolve(brw, mt, flags))
return false;
- /* Arrayed fast clear is only supported for gen8+. */
- assert(brw->gen >= 8 || num_layers == 1);
+ enum blorp_fast_clear_op resolve_op;
+ if (brw->gen >= 9) {
+ if (intel_miptree_is_lossless_compressed(brw, mt)) {
+ resolve_op = BLORP_FAST_CLEAR_OP_RESOLVE_FULL;
+ } else {
+ resolve_op = BLORP_FAST_CLEAR_OP_RESOLVE_PARTIAL;
+ }
+ } else {
+ /* Broadwell and earlier do not have a partial resolve */
+ assert(!intel_miptree_is_lossless_compressed(brw, mt));
+ resolve_op = BLORP_FAST_CLEAR_OP_RESOLVE_FULL;
+ }
bool resolved = false;
- for (unsigned i = 0; i < num_layers; ++i) {
- intel_miptree_check_level_layer(mt, level, start_layer + i);
+ foreach_list_typed_safe(struct intel_resolve_map, map, link,
+ &mt->color_resolve_map) {
+ if (map->level < start_level ||
+ map->level >= (start_level + num_levels) ||
+ map->layer < start_layer ||
+ map->layer >= (start_layer + num_layers))
+ continue;
- struct intel_resolve_map *item =
- intel_resolve_map_get(&mt->color_resolve_map, level,
- start_layer + i);
+ /* Arrayed and mip-mapped fast clear is only supported for gen8+. */
+ assert(brw->gen >= 8 || (map->level == 0 && map->layer == 0));
- if (item) {
- assert(item->fast_clear_state != INTEL_FAST_CLEAR_STATE_RESOLVED);
+ intel_miptree_check_level_layer(mt, map->level, map->layer);
- brw_blorp_resolve_color(brw, mt, level, start_layer);
- intel_resolve_map_remove(item);
- resolved = true;
- }
+ assert(map->fast_clear_state != INTEL_FAST_CLEAR_STATE_RESOLVED);
+
+ brw_blorp_resolve_color(brw, mt, map->level, map->layer, resolve_op);
+ intel_resolve_map_remove(map);
+ resolved = true;
}
return resolved;
struct intel_mipmap_tree *mt,
int flags)
{
- if (!intel_miptree_needs_color_resolve(brw, mt, flags))
- return;
-
- foreach_list_typed_safe(struct intel_resolve_map, map, link,
- &mt->color_resolve_map) {
- assert(map->fast_clear_state != INTEL_FAST_CLEAR_STATE_RESOLVED);
- brw_blorp_resolve_color(brw, mt, map->level, map->layer);
- intel_resolve_map_remove(map);
+ intel_miptree_resolve_color(brw, mt, 0, UINT32_MAX, 0, UINT32_MAX, flags);
+}
+
+static inline uint32_t
+miptree_level_range_length(const struct intel_mipmap_tree *mt,
+ uint32_t start_level, uint32_t num_levels)
+{
+ assert(start_level >= mt->first_level);
+ assert(start_level <= mt->last_level);
+
+ if (num_levels == INTEL_REMAINING_LAYERS)
+ num_levels = mt->last_level - start_level + 1;
+ /* Check for overflow */
+ assert(start_level + num_levels >= start_level);
+ assert(start_level + num_levels <= mt->last_level + 1);
+
+ return num_levels;
+}
+
+static inline uint32_t
+miptree_layer_range_length(const struct intel_mipmap_tree *mt, uint32_t level,
+ uint32_t start_layer, uint32_t num_layers)
+{
+ assert(level <= mt->last_level);
+ uint32_t total_num_layers = mt->level[level].depth;
+
+ assert(start_layer < total_num_layers);
+ if (num_layers == INTEL_REMAINING_LAYERS)
+ num_layers = total_num_layers - start_layer;
+ /* Check for overflow */
+ assert(start_layer + num_layers >= start_layer);
+ assert(start_layer + num_layers <= total_num_layers);
+
+ return num_layers;
+}
+
+void
+intel_miptree_prepare_access(struct brw_context *brw,
+ struct intel_mipmap_tree *mt,
+ uint32_t start_level, uint32_t num_levels,
+ uint32_t start_layer, uint32_t num_layers,
+ bool aux_supported, bool fast_clear_supported)
+{
+ num_levels = miptree_level_range_length(mt, start_level, num_levels);
+
+ if (_mesa_is_format_color_format(mt->format)) {
+ if (!mt->mcs_buf)
+ return;
+
+ if (mt->num_samples > 1) {
+ /* Nothing to do for MSAA */
+ } else {
+ /* TODO: This is fairly terrible. We can do better. */
+ if (!aux_supported || !fast_clear_supported) {
+ intel_miptree_resolve_color(brw, mt, start_level, num_levels,
+ start_layer, num_layers, 0);
+ }
+ }
+ } else if (mt->format == MESA_FORMAT_S_UINT8) {
+ /* Nothing to do for stencil */
+ } else {
+ if (!mt->hiz_buf)
+ return;
+
+ if (aux_supported) {
+ assert(fast_clear_supported);
+ intel_miptree_depth_hiz_resolve(brw, mt, start_level, num_levels,
+ start_layer, num_layers,
+ BLORP_HIZ_OP_HIZ_RESOLVE);
+ } else {
+ assert(!fast_clear_supported);
+ intel_miptree_depth_hiz_resolve(brw, mt, start_level, num_levels,
+ start_layer, num_layers,
+ BLORP_HIZ_OP_DEPTH_RESOLVE);
+ }
+ }
+}
+
+void
+intel_miptree_finish_write(struct brw_context *brw,
+ struct intel_mipmap_tree *mt, uint32_t level,
+ uint32_t start_layer, uint32_t num_layers,
+ bool written_with_aux)
+{
+ num_layers = miptree_layer_range_length(mt, level, start_layer, num_layers);
+
+ if (_mesa_is_format_color_format(mt->format)) {
+ if (mt->num_samples > 1) {
+ /* Nothing to do for MSAA */
+ } else {
+ if (written_with_aux) {
+ intel_miptree_used_for_rendering(brw, mt, level,
+ start_layer, num_layers);
+ }
+ }
+ } else if (mt->format == MESA_FORMAT_S_UINT8) {
+ /* Nothing to do for stencil */
+ } else {
+ if (written_with_aux) {
+ for (unsigned a = 0; a < num_layers; a++) {
+ intel_miptree_check_level_layer(mt, level, start_layer);
+ intel_miptree_slice_set_needs_depth_resolve(mt, level,
+ start_layer + a);
+ }
+ } else {
+ for (unsigned a = 0; a < num_layers; a++) {
+ intel_miptree_check_level_layer(mt, level, start_layer);
+ intel_miptree_slice_set_needs_hiz_resolve(mt, level,
+ start_layer + a);
+ }
+ }
+ }
+}
+
+enum isl_aux_state
+intel_miptree_get_aux_state(const struct intel_mipmap_tree *mt,
+ uint32_t level, uint32_t layer)
+{
+ if (_mesa_is_format_color_format(mt->format)) {
+ assert(mt->mcs_buf != NULL);
+ if (mt->num_samples > 1) {
+ return ISL_AUX_STATE_COMPRESSED_CLEAR;
+ } else {
+ switch (intel_miptree_get_fast_clear_state(mt, level, layer)) {
+ case INTEL_FAST_CLEAR_STATE_RESOLVED:
+ return ISL_AUX_STATE_RESOLVED;
+ case INTEL_FAST_CLEAR_STATE_UNRESOLVED:
+ return ISL_AUX_STATE_COMPRESSED_CLEAR;
+ case INTEL_FAST_CLEAR_STATE_CLEAR:
+ return ISL_AUX_STATE_CLEAR;
+ default:
+ unreachable("Invalid fast clear state");
+ }
+ }
+ } else if (mt->format == MESA_FORMAT_S_UINT8) {
+ unreachable("Cannot get aux state for stencil");
+ } else {
+ assert(mt->hiz_buf != NULL);
+ const struct intel_resolve_map *map =
+ intel_resolve_map_const_get(&mt->hiz_map, level, layer);
+ if (!map)
+ return ISL_AUX_STATE_RESOLVED;
+ switch (map->need) {
+ case BLORP_HIZ_OP_DEPTH_RESOLVE:
+ return ISL_AUX_STATE_COMPRESSED_CLEAR;
+ case BLORP_HIZ_OP_HIZ_RESOLVE:
+ return ISL_AUX_STATE_AUX_INVALID;
+ default:
+ unreachable("Invalid hiz op");
+ }
+ }
+}
+
+void
+intel_miptree_set_aux_state(struct brw_context *brw,
+ struct intel_mipmap_tree *mt, uint32_t level,
+ uint32_t start_layer, uint32_t num_layers,
+ enum isl_aux_state aux_state)
+{
+ num_layers = miptree_layer_range_length(mt, level, start_layer, num_layers);
+
+ /* Right now, this only applies to clears. */
+ assert(aux_state == ISL_AUX_STATE_CLEAR);
+
+ if (_mesa_is_format_color_format(mt->format)) {
+ if (mt->num_samples > 1)
+ assert(mt->msaa_layout == INTEL_MSAA_LAYOUT_CMS);
+
+ assert(level == 0 && start_layer == 0 && num_layers == 1);
+ intel_miptree_set_fast_clear_state(brw, mt, 0, 0, 1,
+ INTEL_FAST_CLEAR_STATE_CLEAR);
+ } else if (mt->format == MESA_FORMAT_S_UINT8) {
+ assert(!"Cannot set aux state for stencil");
+ } else {
+ for (unsigned a = 0; a < num_layers; a++) {
+ intel_miptree_check_level_layer(mt, level, start_layer);
+ intel_miptree_slice_set_needs_depth_resolve(mt, level,
+ start_layer + a);
+ }
+ }
+}
+
+/* On Gen9 color buffers may be compressed by the hardware (lossless
+ * compression). There are, however, format restrictions and care needs to be
+ * taken that the sampler engine is capable for re-interpreting a buffer with
+ * format different the buffer was originally written with.
+ *
+ * For example, SRGB formats are not compressible and the sampler engine isn't
+ * capable of treating RGBA_UNORM as SRGB_ALPHA. In such a case the underlying
+ * color buffer needs to be resolved so that the sampling surface can be
+ * sampled as non-compressed (i.e., without the auxiliary MCS buffer being
+ * set).
+ */
+static bool
+intel_texture_view_requires_resolve(struct brw_context *brw,
+ struct intel_mipmap_tree *mt,
+ mesa_format format)
+{
+ if (brw->gen < 9 ||
+ !intel_miptree_is_lossless_compressed(brw, mt))
+ return false;
+
+ const enum isl_format isl_format = brw_isl_format_for_mesa_format(format);
+
+ if (isl_format_supports_ccs_e(&brw->screen->devinfo, isl_format))
+ return false;
+
+ perf_debug("Incompatible sampling format (%s) for rbc (%s)\n",
+ _mesa_get_format_name(format),
+ _mesa_get_format_name(mt->format));
+
+ return true;
+}
+
+static void
+intel_miptree_prepare_texture_slices(struct brw_context *brw,
+ struct intel_mipmap_tree *mt,
+ mesa_format view_format,
+ uint32_t start_level, uint32_t num_levels,
+ uint32_t start_layer, uint32_t num_layers,
+ bool *aux_supported_out)
+{
+ bool aux_supported;
+ if (_mesa_is_format_color_format(mt->format)) {
+ aux_supported = intel_miptree_is_lossless_compressed(brw, mt) &&
+ !intel_texture_view_requires_resolve(brw, mt, view_format);
+ } else if (mt->format == MESA_FORMAT_S_UINT8) {
+ aux_supported = false;
+ } else {
+ aux_supported = intel_miptree_sample_with_hiz(brw, mt);
+ }
+
+ intel_miptree_prepare_access(brw, mt, start_level, num_levels,
+ start_layer, num_layers,
+ aux_supported, aux_supported);
+ if (aux_supported_out)
+ *aux_supported_out = aux_supported;
+}
+
+void
+intel_miptree_prepare_texture(struct brw_context *brw,
+ struct intel_mipmap_tree *mt,
+ mesa_format view_format,
+ bool *aux_supported_out)
+{
+ intel_miptree_prepare_texture_slices(brw, mt, view_format,
+ 0, INTEL_REMAINING_LEVELS,
+ 0, INTEL_REMAINING_LAYERS,
+ aux_supported_out);
+}
+
+void
+intel_miptree_prepare_image(struct brw_context *brw,
+ struct intel_mipmap_tree *mt)
+{
+ /* The data port doesn't understand any compression */
+ intel_miptree_prepare_access(brw, mt, 0, INTEL_REMAINING_LEVELS,
+ 0, INTEL_REMAINING_LAYERS, false, false);
+}
+
+void
+intel_miptree_prepare_fb_fetch(struct brw_context *brw,
+ struct intel_mipmap_tree *mt, uint32_t level,
+ uint32_t start_layer, uint32_t num_layers)
+{
+ intel_miptree_prepare_texture_slices(brw, mt, mt->format, level, 1,
+ start_layer, num_layers, NULL);
+}
+
+void
+intel_miptree_prepare_render(struct brw_context *brw,
+ struct intel_mipmap_tree *mt, uint32_t level,
+ uint32_t start_layer, uint32_t layer_count,
+ bool srgb_enabled)
+{
+ /* If FRAMEBUFFER_SRGB is used on Gen9+ then we need to resolve any of
+ * the single-sampled color renderbuffers because the CCS buffer isn't
+ * supported for SRGB formats. This only matters if FRAMEBUFFER_SRGB is
+ * enabled because otherwise the surface state will be programmed with
+ * the linear equivalent format anyway.
+ */
+ if (brw->gen >= 9 && srgb_enabled && mt->num_samples <= 1 &&
+ _mesa_get_srgb_format_linear(mt->format) != mt->format) {
+
+ /* Lossless compression is not supported for SRGB formats, it
+ * should be impossible to get here with such surfaces.
+ */
+ assert(!intel_miptree_is_lossless_compressed(brw, mt));
+ intel_miptree_prepare_access(brw, mt, level, 1, start_layer, layer_count,
+ false, false);
+ }
+
+ /* For layered rendering non-compressed fast cleared buffers need to be
+ * resolved. Surface state can carry only one fast color clear value
+ * while each layer may have its own fast clear color value. For
+ * compressed buffers color value is available in the color buffer.
+ */
+ if (layer_count > 1 &&
+ !(mt->aux_disable & INTEL_AUX_DISABLE_CCS) &&
+ !intel_miptree_is_lossless_compressed(brw, mt)) {
+ assert(brw->gen >= 8);
+
+ intel_miptree_prepare_access(brw, mt, level, 1, start_layer, layer_count,
+ false, false);
+ }
+}
+
+void
+intel_miptree_finish_render(struct brw_context *brw,
+ struct intel_mipmap_tree *mt, uint32_t level,
+ uint32_t start_layer, uint32_t layer_count)
+{
+ assert(_mesa_is_format_color_format(mt->format));
+ intel_miptree_finish_write(brw, mt, level, start_layer, layer_count,
+ mt->mcs_buf != NULL);
+}
+
+void
+intel_miptree_prepare_depth(struct brw_context *brw,
+ struct intel_mipmap_tree *mt, uint32_t level,
+ uint32_t start_layer, uint32_t layer_count)
+{
+ intel_miptree_prepare_access(brw, mt, level, 1, start_layer, layer_count,
+ mt->hiz_buf != NULL, mt->hiz_buf != NULL);
+}
+
+void
+intel_miptree_finish_depth(struct brw_context *brw,
+ struct intel_mipmap_tree *mt, uint32_t level,
+ uint32_t start_layer, uint32_t layer_count,
+ bool depth_written)
+{
+ if (depth_written) {
+ intel_miptree_finish_write(brw, mt, level, start_layer, layer_count,
+ mt->hiz_buf != NULL);
}
}
*/
assert(mt->msaa_layout == INTEL_MSAA_LAYOUT_NONE || mt->num_samples <= 1);
+ intel_miptree_prepare_access(brw, mt, 0, INTEL_REMAINING_LEVELS,
+ 0, INTEL_REMAINING_LAYERS, false, false);
+
if (mt->mcs_buf) {
- intel_miptree_all_slices_resolve_color(brw, mt, 0);
mt->aux_disable |= (INTEL_AUX_DISABLE_CCS | INTEL_AUX_DISABLE_MCS);
- drm_intel_bo_unreference(mt->mcs_buf->bo);
+ brw_bo_unreference(mt->mcs_buf->bo);
free(mt->mcs_buf);
mt->mcs_buf = NULL;
if (mt->hiz_buf) {
mt->aux_disable |= INTEL_AUX_DISABLE_HIZ;
- intel_miptree_all_slices_resolve_depth(brw, mt);
intel_miptree_hiz_buffer_free(mt->hiz_buf);
mt->hiz_buf = NULL;
const uint32_t r8stencil_flags =
MIPTREE_LAYOUT_ACCELERATED_UPLOAD | MIPTREE_LAYOUT_TILING_Y |
MIPTREE_LAYOUT_DISABLE_AUX;
- assert(brw->gen > 6); /* Handle MIPTREE_LAYOUT_FORCE_ALL_SLICE_AT_LOD */
+ assert(brw->gen > 6); /* Handle MIPTREE_LAYOUT_GEN6_HIZ_STENCIL */
mt->r8stencil_mt = intel_miptree_create(brw,
src->target,
MESA_FORMAT_R_UINT8,
for (int level = src->first_level; level <= src->last_level; level++) {
const unsigned depth = src->level[level].depth;
- const int layers_per_blit =
- (dst->msaa_layout == INTEL_MSAA_LAYOUT_UMS ||
- dst->msaa_layout == INTEL_MSAA_LAYOUT_CMS) ?
- dst->num_samples : 1;
for (unsigned layer = 0; layer < depth; layer++) {
- brw_blorp_blit_miptrees(brw,
+ brw_blorp_copy_miptrees(brw,
src, level, layer,
- src->format, SWIZZLE_X,
- dst, level, layers_per_blit * layer,
- MESA_FORMAT_R_UNORM8,
- 0, 0,
+ dst, level, layer,
+ 0, 0, 0, 0,
minify(src->logical_width0, level),
- minify(src->logical_height0, level),
- 0, 0,
- minify(dst->logical_width0, level),
- minify(dst->logical_height0, level),
- GL_NEAREST, false, false /*mirror x, y*/,
- false, false /* decode/encode srgb */);
+ minify(src->logical_height0, level));
}
}
}
static void *
-intel_miptree_map_raw(struct brw_context *brw, struct intel_mipmap_tree *mt)
+intel_miptree_map_raw(struct brw_context *brw,
+ struct intel_mipmap_tree *mt,
+ GLbitfield mode)
{
- /* CPU accesses to color buffers don't understand fast color clears, so
- * resolve any pending fast color clears before we map.
- */
- intel_miptree_all_slices_resolve_color(brw, mt, 0);
-
- drm_intel_bo *bo = mt->bo;
+ struct brw_bo *bo = mt->bo;
- if (drm_intel_bo_references(brw->batch.bo, bo))
+ if (brw_batch_references(&brw->batch, bo))
intel_batchbuffer_flush(brw);
- /* brw_bo_map() uses a WB mmaping of the buffer's backing storage. It
- * will utilize the CPU cache even if the buffer is incoherent with the
- * GPU (i.e. any writes will be stored in the cache and not flushed to
- * memory and so will be invisible to the GPU or display engine). This
- * is the majority of buffers on a !llc machine, but even on a llc
- * almost all scanouts are incoherent with the CPU. A WB write into the
- * backing storage of the current scanout will not be immediately
- * visible on the screen. The transfer from cache to screen is slow and
- * indeterministic causing visible glitching on the screen. Never use
- * this WB mapping for writes to an active scanout (reads are fine, so
- * long as cache consistency is maintained).
- */
- if (mt->tiling != I915_TILING_NONE || mt->is_scanout)
- brw_bo_map_gtt(brw, bo, "miptree");
- else
- brw_bo_map(brw, bo, true, "miptree");
-
- return bo->virtual;
+ return brw_bo_map(brw, bo, mode);
}
static void
intel_miptree_unmap_raw(struct intel_mipmap_tree *mt)
{
- drm_intel_bo_unmap(mt->bo);
+ brw_bo_unmap(mt->bo);
}
static void
y /= bh;
x /= bw;
- base = intel_miptree_map_raw(brw, mt) + mt->offset;
+ base = intel_miptree_map_raw(brw, mt, map->mode) + mt->offset;
if (base == NULL)
map->ptr = NULL;
}
}
- map->ptr = intel_miptree_map_raw(brw, map->linear_mt);
+ map->ptr = intel_miptree_map_raw(brw, map->linear_mt, map->mode);
DBG("%s: %d,%d %dx%d from mt %p (%s) %d,%d = %p/%d\n", __func__,
map->x, map->y, map->w, map->h,
image_x += map->x;
image_y += map->y;
- void *src = intel_miptree_map_raw(brw, mt);
+ void *src = intel_miptree_map_raw(brw, mt, map->mode);
if (!src)
return;
*/
if (!(map->mode & GL_MAP_INVALIDATE_RANGE_BIT)) {
uint8_t *untiled_s8_map = map->ptr;
- uint8_t *tiled_s8_map = intel_miptree_map_raw(brw, mt);
+ uint8_t *tiled_s8_map = intel_miptree_map_raw(brw, mt, GL_MAP_READ_BIT);
unsigned int image_x, image_y;
intel_miptree_get_image_offset(mt, level, slice, &image_x, &image_y);
if (map->mode & GL_MAP_WRITE_BIT) {
unsigned int image_x, image_y;
uint8_t *untiled_s8_map = map->ptr;
- uint8_t *tiled_s8_map = intel_miptree_map_raw(brw, mt);
+ uint8_t *tiled_s8_map = intel_miptree_map_raw(brw, mt, GL_MAP_WRITE_BIT);
intel_miptree_get_image_offset(mt, level, slice, &image_x, &image_y);
image_x += map->x;
image_y += map->y;
- uint8_t *dst = intel_miptree_map_raw(brw, mt)
+ uint8_t *dst = intel_miptree_map_raw(brw, mt, GL_MAP_WRITE_BIT)
+ image_y * mt->pitch
+ image_x * mt->cpp;
*/
if (!(map->mode & GL_MAP_INVALIDATE_RANGE_BIT)) {
uint32_t *packed_map = map->ptr;
- uint8_t *s_map = intel_miptree_map_raw(brw, s_mt);
- uint32_t *z_map = intel_miptree_map_raw(brw, z_mt);
+ uint8_t *s_map = intel_miptree_map_raw(brw, s_mt, GL_MAP_READ_BIT);
+ uint32_t *z_map = intel_miptree_map_raw(brw, z_mt, GL_MAP_READ_BIT);
unsigned int s_image_x, s_image_y;
unsigned int z_image_x, z_image_y;
if (map->mode & GL_MAP_WRITE_BIT) {
uint32_t *packed_map = map->ptr;
- uint8_t *s_map = intel_miptree_map_raw(brw, s_mt);
- uint32_t *z_map = intel_miptree_map_raw(brw, z_mt);
+ uint8_t *s_map = intel_miptree_map_raw(brw, s_mt, GL_MAP_WRITE_BIT);
+ uint32_t *z_map = intel_miptree_map_raw(brw, z_mt, GL_MAP_WRITE_BIT);
unsigned int s_image_x, s_image_y;
unsigned int z_image_x, z_image_y;
return;
}
- intel_miptree_slice_resolve_depth(brw, mt, level, slice);
- if (map->mode & GL_MAP_WRITE_BIT) {
- intel_miptree_slice_set_needs_hiz_resolve(mt, level, slice);
- }
+ intel_miptree_access_raw(brw, mt, level, slice,
+ map->mode & GL_MAP_WRITE_BIT);
if (mt->format == MESA_FORMAT_S_UINT8) {
intel_miptree_map_s8(brw, mt, map, level, slice);
enum isl_dim_layout
get_isl_dim_layout(const struct gen_device_info *devinfo, uint32_t tiling,
- GLenum target)
+ GLenum target, enum miptree_array_layout array_layout)
{
+ if (array_layout == GEN6_HIZ_STENCIL)
+ return ISL_DIM_LAYOUT_GEN6_STENCIL_HIZ;
+
switch (target) {
case GL_TEXTURE_1D:
case GL_TEXTURE_1D_ARRAY:
{
surf->dim = get_isl_surf_dim(mt->target);
surf->dim_layout = get_isl_dim_layout(&brw->screen->devinfo,
- mt->tiling, mt->target);
+ mt->tiling, mt->target,
+ mt->array_layout);
if (mt->num_samples > 1) {
switch (mt->msaa_layout) {
surf->phys_level0_sa.array_len = mt->physical_depth0;
}
- surf->levels = mt->last_level + 1;
+ surf->levels = mt->last_level - mt->first_level + 1;
surf->samples = MAX2(mt->num_samples, 1);
surf->size = 0; /* TODO */
switch (surf->dim_layout) {
case ISL_DIM_LAYOUT_GEN4_2D:
case ISL_DIM_LAYOUT_GEN4_3D:
+ case ISL_DIM_LAYOUT_GEN6_STENCIL_HIZ:
if (brw->gen >= 9) {
surf->array_pitch_el_rows = mt->qpitch;
} else {
surf->array_pitch_span = ISL_ARRAY_PITCH_SPAN_FULL;
break;
case ALL_SLICES_AT_EACH_LOD:
+ case GEN6_HIZ_STENCIL:
surf->array_pitch_span = ISL_ARRAY_PITCH_SPAN_COMPACT;
break;
default:
surf->array_pitch_el_rows =
aux_qpitch / isl_format_get_layout(surf->format)->bh;
}
-
-union isl_color_value
-intel_miptree_get_isl_clear_color(struct brw_context *brw,
- const struct intel_mipmap_tree *mt)
-{
- union isl_color_value clear_color;
-
- if (_mesa_get_format_base_format(mt->format) == GL_DEPTH_COMPONENT) {
- clear_color.i32[0] = mt->depth_clear_value;
- clear_color.i32[1] = 0;
- clear_color.i32[2] = 0;
- clear_color.i32[3] = 0;
- } else if (brw->gen >= 9) {
- clear_color.i32[0] = mt->gen9_fast_clear_color.i[0];
- clear_color.i32[1] = mt->gen9_fast_clear_color.i[1];
- clear_color.i32[2] = mt->gen9_fast_clear_color.i[2];
- clear_color.i32[3] = mt->gen9_fast_clear_color.i[3];
- } else if (_mesa_is_format_integer(mt->format)) {
- clear_color.i32[0] = (mt->fast_clear_color_value & (1u << 31)) != 0;
- clear_color.i32[1] = (mt->fast_clear_color_value & (1u << 30)) != 0;
- clear_color.i32[2] = (mt->fast_clear_color_value & (1u << 29)) != 0;
- clear_color.i32[3] = (mt->fast_clear_color_value & (1u << 28)) != 0;
- } else {
- clear_color.f32[0] = (mt->fast_clear_color_value & (1u << 31)) != 0;
- clear_color.f32[1] = (mt->fast_clear_color_value & (1u << 30)) != 0;
- clear_color.f32[2] = (mt->fast_clear_color_value & (1u << 29)) != 0;
- clear_color.f32[3] = (mt->fast_clear_color_value & (1u << 28)) != 0;
- }
-
- return clear_color;
-}