#include "brw_blorp.h"
#include "brw_draw.h"
#include "brw_defines.h"
+#include "compiler/brw_eu_defines.h"
#include "brw_context.h"
#include "brw_state.h"
-#include "brw_vs.h"
#include "intel_batchbuffer.h"
#include "intel_buffers.h"
ADVANCE_BATCH();
} else if (prim->is_indirect) {
struct gl_buffer_object *indirect_buffer = brw->ctx.DrawIndirectBuffer;
- drm_intel_bo *bo = intel_bufferobj_buffer(brw,
+ struct brw_bo *bo = intel_bufferobj_buffer(brw,
intel_buffer_object(indirect_buffer),
- prim->indirect_offset, 5 * sizeof(GLuint));
+ prim->indirect_offset, 5 * sizeof(GLuint), false);
indirect_flag = GEN7_3DPRIM_INDIRECT_PARAMETER_ENABLE;
static void
brw_merge_inputs(struct brw_context *brw,
- const struct gl_client_array *arrays[])
+ const struct gl_vertex_array *arrays[])
{
const struct gl_context *ctx = &brw->ctx;
GLuint i;
for (i = 0; i < brw->vb.nr_buffers; i++) {
- drm_intel_bo_unreference(brw->vb.buffers[i].bo);
+ brw_bo_unreference(brw->vb.buffers[i].bo);
brw->vb.buffers[i].bo = NULL;
}
brw->vb.nr_buffers = 0;
}
if (brw->gen < 8 && !brw->is_haswell) {
- uint64_t mask = ctx->VertexProgram._Current->Base.nir->info->inputs_read;
+ uint64_t mask = ctx->VertexProgram._Current->info.inputs_read;
/* Prior to Haswell, the hardware can't natively support GL_FIXED or
* 2_10_10_10_REV vertex formats. Set appropriate workaround flags.
*/
}
}
+static bool
+intel_disable_rb_aux_buffer(struct brw_context *brw, const struct brw_bo *bo)
+{
+ const struct gl_framebuffer *fb = brw->ctx.DrawBuffer;
+ bool found = false;
+
+ for (unsigned i = 0; i < fb->_NumColorDrawBuffers; i++) {
+ const struct intel_renderbuffer *irb =
+ intel_renderbuffer(fb->_ColorDrawBuffers[i]);
+
+ if (irb && irb->mt->bo == bo) {
+ found = brw->draw_aux_buffer_disabled[i] = true;
+ }
+ }
+
+ return found;
+}
+
+/**
+ * \brief Resolve buffers before drawing.
+ *
+ * Resolve the depth buffer's HiZ buffer, resolve the depth buffer of each
+ * enabled depth texture, and flush the render cache for any dirty textures.
+ */
+void
+brw_predraw_resolve_inputs(struct brw_context *brw)
+{
+ struct gl_context *ctx = &brw->ctx;
+ struct intel_texture_object *tex_obj;
+
+ memset(brw->draw_aux_buffer_disabled, 0,
+ sizeof(brw->draw_aux_buffer_disabled));
+
+ /* Resolve depth buffer and render cache of each enabled texture. */
+ int maxEnabledUnit = ctx->Texture._MaxEnabledTexImageUnit;
+ for (int i = 0; i <= maxEnabledUnit; i++) {
+ if (!ctx->Texture.Unit[i]._Current)
+ continue;
+ tex_obj = intel_texture_object(ctx->Texture.Unit[i]._Current);
+ if (!tex_obj || !tex_obj->mt)
+ continue;
+
+ bool aux_supported;
+ intel_miptree_prepare_texture(brw, tex_obj->mt, tex_obj->_Format,
+ &aux_supported);
+
+ if (!aux_supported && brw->gen >= 9 &&
+ intel_disable_rb_aux_buffer(brw, tex_obj->mt->bo)) {
+ perf_debug("Sampling renderbuffer with non-compressible format - "
+ "turning off compression");
+ }
+
+ brw_render_cache_set_check_flush(brw, tex_obj->mt->bo);
+
+ if (tex_obj->base.StencilSampling ||
+ tex_obj->mt->format == MESA_FORMAT_S_UINT8) {
+ intel_update_r8stencil(brw, tex_obj->mt);
+ }
+ }
+
+ /* Resolve color for each active shader image. */
+ for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
+ const struct gl_program *prog = ctx->_Shader->CurrentProgram[i];
+
+ if (unlikely(prog && prog->info.num_images)) {
+ for (unsigned j = 0; j < prog->info.num_images; j++) {
+ struct gl_image_unit *u =
+ &ctx->ImageUnits[prog->sh.ImageUnits[j]];
+ tex_obj = intel_texture_object(u->TexObj);
+
+ if (tex_obj && tex_obj->mt) {
+ intel_miptree_prepare_image(brw, tex_obj->mt);
+
+ if (tex_obj->mt->aux_usage == ISL_AUX_USAGE_CCS_E &&
+ intel_disable_rb_aux_buffer(brw, tex_obj->mt->bo)) {
+ perf_debug("Using renderbuffer as shader image - turning "
+ "off lossless compression");
+ }
+
+ brw_render_cache_set_check_flush(brw, tex_obj->mt->bo);
+ }
+ }
+ }
+ }
+}
+
+static void
+brw_predraw_resolve_framebuffer(struct brw_context *brw)
+{
+ struct gl_context *ctx = &brw->ctx;
+ struct intel_renderbuffer *depth_irb;
+
+ /* Resolve the depth buffer's HiZ buffer. */
+ depth_irb = intel_get_renderbuffer(ctx->DrawBuffer, BUFFER_DEPTH);
+ if (depth_irb && depth_irb->mt) {
+ intel_miptree_prepare_depth(brw, depth_irb->mt,
+ depth_irb->mt_level,
+ depth_irb->mt_layer,
+ depth_irb->layer_count);
+ }
+
+ /* Resolve color buffers for non-coherent framebuffer fetch. */
+ if (!ctx->Extensions.MESA_shader_framebuffer_fetch &&
+ ctx->FragmentProgram._Current &&
+ ctx->FragmentProgram._Current->info.outputs_read) {
+ const struct gl_framebuffer *fb = ctx->DrawBuffer;
+
+ for (unsigned i = 0; i < fb->_NumColorDrawBuffers; i++) {
+ const struct intel_renderbuffer *irb =
+ intel_renderbuffer(fb->_ColorDrawBuffers[i]);
+
+ if (irb) {
+ intel_miptree_prepare_fb_fetch(brw, irb->mt, irb->mt_level,
+ irb->mt_layer, irb->layer_count);
+ }
+ }
+ }
+
+ struct gl_framebuffer *fb = ctx->DrawBuffer;
+ for (int i = 0; i < fb->_NumColorDrawBuffers; i++) {
+ struct intel_renderbuffer *irb =
+ intel_renderbuffer(fb->_ColorDrawBuffers[i]);
+
+ if (irb == NULL || irb->mt == NULL)
+ continue;
+
+ intel_miptree_prepare_render(brw, irb->mt, irb->mt_level,
+ irb->mt_layer, irb->layer_count,
+ ctx->Color.sRGBEnabled);
+ }
+}
+
/**
* \brief Call this after drawing to mark which buffers need resolving
*
front_irb->need_downsample = true;
if (back_irb)
back_irb->need_downsample = true;
- if (depth_irb && ctx->Depth.Mask) {
- intel_renderbuffer_att_set_needs_depth_resolve(depth_att);
- brw_render_cache_set_add_bo(brw, depth_irb->mt->bo);
+ if (depth_irb) {
+ bool depth_written = brw_depth_writes_enabled(brw);
+ if (depth_att->Layered) {
+ intel_miptree_finish_depth(brw, depth_irb->mt,
+ depth_irb->mt_level,
+ depth_irb->mt_layer,
+ depth_irb->layer_count,
+ depth_written);
+ } else {
+ intel_miptree_finish_depth(brw, depth_irb->mt,
+ depth_irb->mt_level,
+ depth_irb->mt_layer, 1,
+ depth_written);
+ }
+ if (depth_written)
+ brw_render_cache_set_add_bo(brw, depth_irb->mt->bo);
}
if (ctx->Extensions.ARB_stencil_texturing &&
- stencil_irb && ctx->Stencil._WriteEnabled) {
+ stencil_irb && brw->stencil_write_enabled) {
brw_render_cache_set_add_bo(brw, stencil_irb->mt->bo);
}
struct intel_renderbuffer *irb =
intel_renderbuffer(fb->_ColorDrawBuffers[i]);
- if (irb) {
- brw_render_cache_set_add_bo(brw, irb->mt->bo);
-
- if (intel_miptree_is_lossless_compressed(brw, irb->mt)) {
- irb->mt->fast_clear_state = INTEL_FAST_CLEAR_STATE_UNRESOLVED;
- }
- }
+ if (!irb)
+ continue;
+
+ brw_render_cache_set_add_bo(brw, irb->mt->bo);
+ intel_miptree_finish_render(brw, irb->mt, irb->mt_level,
+ irb->mt_layer, irb->layer_count);
}
}
static void
-brw_predraw_set_aux_buffers(struct brw_context *brw)
+intel_renderbuffer_move_temp_back(struct brw_context *brw,
+ struct intel_renderbuffer *irb)
{
- if (brw->gen < 9)
+ if (irb->align_wa_mt == NULL)
return;
+ brw_render_cache_set_check_flush(brw, irb->align_wa_mt->bo);
+
+ intel_miptree_copy_slice(brw, irb->align_wa_mt, 0, 0,
+ irb->mt,
+ irb->Base.Base.TexImage->Level, irb->mt_layer);
+
+ intel_miptree_reference(&irb->align_wa_mt, NULL);
+
+ /* Finally restore the x,y to correspond to full miptree. */
+ intel_renderbuffer_set_draw_offset(irb);
+
+ /* Make sure render surface state gets re-emitted with updated miptree. */
+ brw->NewGLState |= _NEW_BUFFERS;
+}
+
+static void
+brw_postdraw_reconcile_align_wa_slices(struct brw_context *brw)
+{
struct gl_context *ctx = &brw->ctx;
struct gl_framebuffer *fb = ctx->DrawBuffer;
+ struct intel_renderbuffer *depth_irb =
+ intel_get_renderbuffer(fb, BUFFER_DEPTH);
+ struct intel_renderbuffer *stencil_irb =
+ intel_get_renderbuffer(fb, BUFFER_STENCIL);
+
+ if (depth_irb && depth_irb->align_wa_mt)
+ intel_renderbuffer_move_temp_back(brw, depth_irb);
+
+ if (stencil_irb && stencil_irb->align_wa_mt)
+ intel_renderbuffer_move_temp_back(brw, stencil_irb);
+
for (unsigned i = 0; i < fb->_NumColorDrawBuffers; i++) {
struct intel_renderbuffer *irb =
intel_renderbuffer(fb->_ColorDrawBuffers[i]);
- if (!irb) {
+ if (!irb || irb->align_wa_mt == NULL)
continue;
- }
+
+ intel_renderbuffer_move_temp_back(brw, irb);
}
}
*/
static void
brw_try_draw_prims(struct gl_context *ctx,
- const struct gl_client_array *arrays[],
+ const struct gl_vertex_array *arrays[],
const struct _mesa_prim *prims,
GLuint nr_prims,
const struct _mesa_index_buffer *ib,
* index.
*/
brw->wm.base.sampler_count =
- util_last_bit(ctx->FragmentProgram._Current->Base.SamplersUsed);
+ util_last_bit(ctx->FragmentProgram._Current->SamplersUsed);
brw->gs.base.sampler_count = ctx->GeometryProgram._Current ?
- util_last_bit(ctx->GeometryProgram._Current->Base.SamplersUsed) : 0;
+ util_last_bit(ctx->GeometryProgram._Current->SamplersUsed) : 0;
brw->tes.base.sampler_count = ctx->TessEvalProgram._Current ?
util_last_bit(ctx->TessEvalProgram._Current->SamplersUsed) : 0;
brw->tcs.base.sampler_count = ctx->TessCtrlProgram._Current ?
util_last_bit(ctx->TessCtrlProgram._Current->SamplersUsed) : 0;
brw->vs.base.sampler_count =
- util_last_bit(ctx->VertexProgram._Current->Base.SamplersUsed);
+ util_last_bit(ctx->VertexProgram._Current->SamplersUsed);
intel_prepare_render(brw);
- brw_predraw_set_aux_buffers(brw);
/* This workaround has to happen outside of brw_upload_render_state()
* because it may flush the batchbuffer for a blit, affecting the state
*/
brw_workaround_depthstencil_alignment(brw, 0);
+ /* Resolves must occur after updating renderbuffers, updating context state,
+ * and finalizing textures but before setting up any hardware state for
+ * this draw call.
+ */
+ brw_predraw_resolve_inputs(brw);
+ brw_predraw_resolve_framebuffer(brw);
+
/* Bind all inputs, derive varying and size information:
*/
brw_merge_inputs(brw, arrays);
estimated_max_prim_size += 1024; /* gen6 WM push constants */
estimated_max_prim_size += 512; /* misc. pad */
+ /* Flag BRW_NEW_DRAW_CALL on every draw. This allows us to have
+ * atoms that happen on every draw call.
+ */
+ brw->ctx.NewDriverState |= BRW_NEW_DRAW_CALL;
+
/* Flush the batch if it's approaching full, so that we don't wrap while
* we've got validated state that needs to be in the same batch as the
* primitives.
brw->draw.params.gl_basevertex = new_basevertex;
brw->draw.params.gl_baseinstance = new_baseinstance;
- drm_intel_bo_unreference(brw->draw.draw_params_bo);
+ brw_bo_unreference(brw->draw.draw_params_bo);
if (prims[i].is_indirect) {
/* Point draw_params_bo at the indirect buffer. */
brw->draw.draw_params_bo =
intel_buffer_object(ctx->DrawIndirectBuffer)->buffer;
- drm_intel_bo_reference(brw->draw.draw_params_bo);
+ brw_bo_reference(brw->draw.draw_params_bo);
brw->draw.draw_params_offset =
prims[i].indirect_offset + (prims[i].indexed ? 12 : 8);
} else {
* the loop.
*/
brw->draw.gl_drawid = prims[i].draw_id;
- drm_intel_bo_unreference(brw->draw.draw_id_bo);
+ brw_bo_unreference(brw->draw.draw_id_bo);
brw->draw.draw_id_bo = NULL;
if (i > 0 && vs_prog_data->uses_drawid)
brw->ctx.NewDriverState |= BRW_NEW_VERTICES;
brw->no_batch_wrap = false;
- if (dri_bufmgr_check_aperture_space(&brw->batch.bo, 1)) {
+ if (!brw_batch_has_aperture_space(brw, 0)) {
if (!fail_next) {
intel_batchbuffer_reset_to_saved(brw);
intel_batchbuffer_flush(brw);
if (brw->always_flush_batch)
intel_batchbuffer_flush(brw);
- brw_state_cache_check_size(brw);
+ brw_program_cache_check_size(brw);
+ brw_postdraw_reconcile_align_wa_slices(brw);
brw_postdraw_set_buffers_need_resolve(brw);
return;
struct gl_buffer_object *indirect)
{
struct brw_context *brw = brw_context(ctx);
- const struct gl_client_array **arrays = ctx->Array._DrawArrays;
+ const struct gl_vertex_array **arrays = ctx->Array._DrawArrays;
struct brw_transform_feedback_object *xfb_obj =
(struct brw_transform_feedback_object *) gl_xfb_obj;
unsigned i;
for (i = 0; i < brw->vb.nr_buffers; i++) {
- drm_intel_bo_unreference(brw->vb.buffers[i].bo);
+ brw_bo_unreference(brw->vb.buffers[i].bo);
brw->vb.buffers[i].bo = NULL;
}
brw->vb.nr_buffers = 0;
}
brw->vb.nr_enabled = 0;
- drm_intel_bo_unreference(brw->ib.bo);
+ brw_bo_unreference(brw->ib.bo);
brw->ib.bo = NULL;
}