uint32_t safe_aux_usage,
unsigned *level,
unsigned start_layer, unsigned num_layers,
- struct isl_surf tmp_surfs[2])
+ struct isl_surf tmp_surfs[1])
{
if (mt->msaa_layout == INTEL_MSAA_LAYOUT_UMS ||
mt->msaa_layout == INTEL_MSAA_LAYOUT_CMS) {
intel_miptree_check_level_layer(mt, *level, start_layer + i);
}
- intel_miptree_get_isl_surf(brw, mt, &tmp_surfs[0]);
- surf->surf = &tmp_surfs[0];
+ if (mt->surf.size > 0) {
+ surf->surf = &mt->surf;
+ } else {
+ intel_miptree_get_isl_surf(brw, mt, &tmp_surfs[0]);
+ surf->surf = &tmp_surfs[0];
+ }
+
surf->addr = (struct blorp_address) {
.buffer = mt->bo,
.offset = mt->offset,
.write_domain = is_render_target ? I915_GEM_DOMAIN_RENDER : 0,
};
- struct isl_surf *aux_surf = &tmp_surfs[1];
- intel_miptree_get_aux_isl_surf(brw, mt, aux_surf, &surf->aux_usage);
+ surf->aux_usage = intel_miptree_get_aux_isl_usage(brw, mt);
+
+ struct isl_surf *aux_surf = NULL;
+ if (mt->mcs_buf)
+ aux_surf = &mt->mcs_buf->surf;
+ else if (mt->hiz_buf)
+ aux_surf = &mt->hiz_buf->surf;
if (wants_resolve) {
bool supports_aux = surf->aux_usage != ISL_AUX_USAGE_NONE &&
} else {
assert(surf->aux_usage == ISL_AUX_USAGE_HIZ);
- surf->aux_addr.buffer = mt->hiz_buf->aux_base.bo;
- surf->aux_addr.offset = mt->hiz_buf->aux_base.offset;
+ surf->aux_addr.buffer = mt->hiz_buf->bo;
+ surf->aux_addr.offset = mt->hiz_buf->offset;
}
} else {
surf->aux_addr = (struct blorp_address) {
return ISL_FORMAT_R16_UNORM;
default: {
if (is_render_target) {
- assert(brw->format_supported_as_render_target[format]);
- return brw->render_target_format[format];
+ assert(brw->mesa_format_supports_render[format]);
+ return brw->mesa_to_isl_render_format[format];
} else {
return brw_isl_format_for_mesa_format(format);
}
(1 << ISL_AUX_USAGE_CCS_D);
}
- struct isl_surf tmp_surfs[4];
+ struct isl_surf tmp_surfs[2];
struct blorp_surf src_surf, dst_surf;
blorp_surf_for_miptree(brw, &src_surf, src_mt, false, true, src_usage_flags,
&src_level, src_layer, 1, &tmp_surfs[0]);
blorp_surf_for_miptree(brw, &dst_surf, dst_mt, true, true, dst_usage_flags,
- &dst_level, dst_layer, 1, &tmp_surfs[2]);
+ &dst_level, dst_layer, 1, &tmp_surfs[1]);
struct isl_swizzle src_isl_swizzle = {
.r = swizzle_to_scs(GET_SWZ(src_swizzle, 0)),
dst_mt->num_samples, _mesa_get_format_name(dst_mt->format), dst_mt,
dst_level, dst_layer, dst_x, dst_y);
- struct isl_surf tmp_surfs[4];
+ struct isl_surf tmp_surfs[2];
struct blorp_surf src_surf, dst_surf;
blorp_surf_for_miptree(brw, &src_surf, src_mt, false, true,
(1 << ISL_AUX_USAGE_MCS) |
blorp_surf_for_miptree(brw, &dst_surf, dst_mt, true, true,
(1 << ISL_AUX_USAGE_MCS) |
(1 << ISL_AUX_USAGE_CCS_E),
- &dst_level, dst_layer, 1, &tmp_surfs[2]);
+ &dst_level, dst_layer, 1, &tmp_surfs[1]);
struct blorp_batch batch;
blorp_batch_init(&brw->blorp, &batch, brw, 0);
_mesa_get_format_base_format(dst_mt->format) == GL_DEPTH_STENCIL)
return false;
- if (!brw->format_supported_as_render_target[dst_image->TexFormat])
+ if (!brw->mesa_format_supports_render[dst_image->TexFormat])
return false;
/* Source clipping shouldn't be necessary, since copytexsubimage (in
return physical_to_logical_layer(irb->mt, irb->mt_layer);
}
-static bool
+static void
do_single_blorp_clear(struct brw_context *brw, struct gl_framebuffer *fb,
struct gl_renderbuffer *rb, unsigned buf,
bool partial_clear, bool encode_srgb)
/* If the clear region is empty, just return. */
if (x0 == x1 || y0 == y1)
- return true;
+ return;
bool can_fast_clear = !partial_clear;
if (set_write_disables(irb, ctx->Color.ColorMask[buf], color_write_disable))
can_fast_clear = false;
- if (irb->mt->aux_disable & INTEL_AUX_DISABLE_CCS ||
+ if (!irb->mt->supports_fast_clear ||
!brw_is_color_fast_clear_compatible(brw, irb->mt, &ctx->Color.ClearColor))
can_fast_clear = false;
unsigned level = irb->mt_level;
const unsigned num_layers = fb->MaxNumLayers ? irb->layer_count : 1;
- if (can_fast_clear) {
- /* If the MCS buffer hasn't been allocated yet, we need to allocate
- * it now.
- */
- if (!irb->mt->mcs_buf) {
- assert(!intel_miptree_is_lossless_compressed(brw, irb->mt));
- if (!intel_miptree_alloc_non_msrt_mcs(brw, irb->mt, false)) {
- /* MCS allocation failed--probably this will only happen in
- * out-of-memory conditions. But in any case, try to recover
- * by falling back to a non-blorp clear technique.
- */
- return false;
- }
+ /* If the MCS buffer hasn't been allocated yet, we need to allocate it now.
+ */
+ if (can_fast_clear && !irb->mt->mcs_buf) {
+ assert(irb->mt->aux_usage == ISL_AUX_USAGE_CCS_D);
+ if (!intel_miptree_alloc_ccs(brw, irb->mt)) {
+ /* There are a few reasons in addition to out-of-memory, that can
+ * cause intel_miptree_alloc_non_msrt_mcs to fail. Try to recover by
+ * falling back to non-fast clear.
+ */
+ can_fast_clear = false;
}
+ }
+ if (can_fast_clear) {
const enum isl_aux_state aux_state =
intel_miptree_get_aux_state(irb->mt, irb->mt_level, logical_layer);
union isl_color_value clear_color =
if (aux_state == ISL_AUX_STATE_CLEAR &&
memcmp(&irb->mt->fast_clear_color,
&clear_color, sizeof(clear_color)) == 0)
- return true;
+ return;
irb->mt->fast_clear_color = clear_color;
struct blorp_batch batch;
blorp_batch_init(&brw->blorp, &batch, brw, 0);
blorp_fast_clear(&batch, &surf,
- brw->render_target_format[format],
+ brw->mesa_to_isl_render_format[format],
level, logical_layer, num_layers,
x0, y0, x1, y1);
blorp_batch_finish(&batch);
struct blorp_batch batch;
blorp_batch_init(&brw->blorp, &batch, brw, 0);
blorp_clear(&batch, &surf,
- brw->render_target_format[format],
+ brw->mesa_to_isl_render_format[format],
ISL_SWIZZLE_IDENTITY,
level, irb_logical_mt_layer(irb), num_layers,
x0, y0, x1, y1,
blorp_batch_finish(&batch);
}
- return true;
+ return;
}
-bool
+void
brw_blorp_clear_color(struct brw_context *brw, struct gl_framebuffer *fb,
GLbitfield mask, bool partial_clear, bool encode_srgb)
{
if (rb == NULL)
continue;
- if (!do_single_blorp_clear(brw, fb, rb, buf, partial_clear,
- encode_srgb)) {
- return false;
+ do_single_blorp_clear(brw, fb, rb, buf, partial_clear, encode_srgb);
+ irb->need_downsample = true;
+ }
+
+ return;
+}
+
+void
+brw_blorp_clear_depth_stencil(struct brw_context *brw,
+ struct gl_framebuffer *fb,
+ GLbitfield mask, bool partial_clear)
+{
+ const struct gl_context *ctx = &brw->ctx;
+ struct gl_renderbuffer *depth_rb =
+ fb->Attachment[BUFFER_DEPTH].Renderbuffer;
+ struct gl_renderbuffer *stencil_rb =
+ fb->Attachment[BUFFER_STENCIL].Renderbuffer;
+
+ if (!depth_rb || ctx->Depth.Mask == GL_FALSE)
+ mask &= ~BUFFER_BIT_DEPTH;
+
+ if (!stencil_rb || (ctx->Stencil.WriteMask[0] & 0xff) == 0)
+ mask &= ~BUFFER_BIT_STENCIL;
+
+ if (!(mask & (BUFFER_BITS_DEPTH_STENCIL)))
+ return;
+
+ uint32_t x0, x1, y0, y1, rb_name, rb_height;
+ if (depth_rb) {
+ rb_name = depth_rb->Name;
+ rb_height = depth_rb->Height;
+ if (stencil_rb) {
+ assert(depth_rb->Width == stencil_rb->Width);
+ assert(depth_rb->Height == stencil_rb->Height);
}
+ } else {
+ assert(stencil_rb);
+ rb_name = stencil_rb->Name;
+ rb_height = stencil_rb->Height;
+ }
- irb->need_downsample = true;
+ x0 = fb->_Xmin;
+ x1 = fb->_Xmax;
+ if (rb_name != 0) {
+ y0 = fb->_Ymin;
+ y1 = fb->_Ymax;
+ } else {
+ y0 = rb_height - fb->_Ymax;
+ y1 = rb_height - fb->_Ymin;
}
- return true;
+ /* If the clear region is empty, just return. */
+ if (x0 == x1 || y0 == y1)
+ return;
+
+ uint32_t level, start_layer, num_layers;
+ struct isl_surf isl_tmp[4];
+ struct blorp_surf depth_surf, stencil_surf;
+
+ if (mask & BUFFER_BIT_DEPTH) {
+ struct intel_renderbuffer *irb = intel_renderbuffer(depth_rb);
+ struct intel_mipmap_tree *depth_mt =
+ find_miptree(GL_DEPTH_BUFFER_BIT, irb);
+
+ level = irb->mt_level;
+ start_layer = irb_logical_mt_layer(irb);
+ num_layers = fb->MaxNumLayers ? irb->layer_count : 1;
+
+ unsigned depth_level = level;
+ blorp_surf_for_miptree(brw, &depth_surf, depth_mt, true,
+ true, (1 << ISL_AUX_USAGE_HIZ),
+ &depth_level, start_layer, num_layers,
+ &isl_tmp[0]);
+ assert(depth_level == level);
+ }
+
+ uint8_t stencil_mask = 0;
+ if (mask & BUFFER_BIT_STENCIL) {
+ struct intel_renderbuffer *irb = intel_renderbuffer(stencil_rb);
+ struct intel_mipmap_tree *stencil_mt =
+ find_miptree(GL_STENCIL_BUFFER_BIT, irb);
+
+ if (mask & BUFFER_BIT_DEPTH) {
+ assert(level == irb->mt_level);
+ assert(start_layer == irb_logical_mt_layer(irb));
+ assert(num_layers == fb->MaxNumLayers ? irb->layer_count : 1);
+ } else {
+ level = irb->mt_level;
+ start_layer = irb_logical_mt_layer(irb);
+ num_layers = fb->MaxNumLayers ? irb->layer_count : 1;
+ }
+
+ stencil_mask = ctx->Stencil.WriteMask[0] & 0xff;
+
+ unsigned stencil_level = level;
+ blorp_surf_for_miptree(brw, &stencil_surf, stencil_mt, true, true, 0,
+ &stencil_level, start_layer, num_layers,
+ &isl_tmp[2]);
+ }
+
+ assert((mask & BUFFER_BIT_DEPTH) || stencil_mask);
+
+ struct blorp_batch batch;
+ blorp_batch_init(&brw->blorp, &batch, brw, 0);
+ blorp_clear_depth_stencil(&batch, &depth_surf, &stencil_surf,
+ level, start_layer, num_layers,
+ x0, y0, x1, y1,
+ (mask & BUFFER_BIT_DEPTH), ctx->Depth.Clear,
+ stencil_mask, ctx->Stencil.Clear);
+ blorp_batch_finish(&batch);
}
void
const mesa_format format = _mesa_get_srgb_format_linear(mt->format);
- struct isl_surf isl_tmp[2];
+ struct isl_surf isl_tmp[1];
struct blorp_surf surf;
blorp_surf_for_miptree(brw, &surf, mt, true, false, 0,
&level, layer, 1 /* num_layers */,