#include "swrast/swrast.h"
#include "swrast_setup/swrast_setup.h"
#include "drivers/common/meta.h"
+#include "util/bitscan.h"
#include "brw_blorp.h"
#include "brw_draw.h"
#include "brw_defines.h"
+#include "compiler/brw_eu_defines.h"
#include "brw_context.h"
#include "brw_state.h"
-#include "brw_vs.h"
#include "intel_batchbuffer.h"
#include "intel_buffers.h"
brw->ctx.NewDriverState |= BRW_NEW_PRIMITIVE;
if (reduced_prim[prim->mode] != brw->reduced_primitive) {
- brw->reduced_primitive = reduced_prim[prim->mode];
- brw->ctx.NewDriverState |= BRW_NEW_REDUCED_PRIMITIVE;
+ brw->reduced_primitive = reduced_prim[prim->mode];
+ brw->ctx.NewDriverState |= BRW_NEW_REDUCED_PRIMITIVE;
}
}
}
static void
brw_emit_prim(struct brw_context *brw,
const struct _mesa_prim *prim,
- uint32_t hw_prim)
+ uint32_t hw_prim,
+ struct brw_transform_feedback_object *xfb_obj,
+ unsigned stream)
{
int verts_per_instance;
int vertex_access_type;
verts_per_instance = prim->count;
/* If nothing to emit, just return. */
- if (verts_per_instance == 0 && !prim->is_indirect)
+ if (verts_per_instance == 0 && !prim->is_indirect && !xfb_obj)
return;
/* If we're set to always flush, do it before and after the primitive emit.
brw_emit_mi_flush(brw);
/* If indirect, emit a bunch of loads from the indirect BO. */
- if (prim->is_indirect) {
+ if (xfb_obj) {
+ indirect_flag = GEN7_3DPRIM_INDIRECT_PARAMETER_ENABLE;
+
+ brw_load_register_mem(brw, GEN7_3DPRIM_VERTEX_COUNT,
+ xfb_obj->prim_count_bo,
+ I915_GEM_DOMAIN_VERTEX, 0,
+ stream * sizeof(uint32_t));
+ BEGIN_BATCH(9);
+ OUT_BATCH(MI_LOAD_REGISTER_IMM | (9 - 2));
+ OUT_BATCH(GEN7_3DPRIM_INSTANCE_COUNT);
+ OUT_BATCH(prim->num_instances);
+ OUT_BATCH(GEN7_3DPRIM_START_VERTEX);
+ OUT_BATCH(0);
+ OUT_BATCH(GEN7_3DPRIM_BASE_VERTEX);
+ OUT_BATCH(0);
+ OUT_BATCH(GEN7_3DPRIM_START_INSTANCE);
+ OUT_BATCH(0);
+ ADVANCE_BATCH();
+ } else if (prim->is_indirect) {
struct gl_buffer_object *indirect_buffer = brw->ctx.DrawIndirectBuffer;
- drm_intel_bo *bo = intel_bufferobj_buffer(brw,
+ struct brw_bo *bo = intel_bufferobj_buffer(brw,
intel_buffer_object(indirect_buffer),
- prim->indirect_offset, 5 * sizeof(GLuint));
+ prim->indirect_offset, 5 * sizeof(GLuint), false);
indirect_flag = GEN7_3DPRIM_INDIRECT_PARAMETER_ENABLE;
static void
brw_merge_inputs(struct brw_context *brw,
- const struct gl_client_array *arrays[])
+ const struct gl_vertex_array *arrays[])
{
const struct gl_context *ctx = &brw->ctx;
GLuint i;
for (i = 0; i < brw->vb.nr_buffers; i++) {
- drm_intel_bo_unreference(brw->vb.buffers[i].bo);
+ brw_bo_unreference(brw->vb.buffers[i].bo);
brw->vb.buffers[i].bo = NULL;
}
brw->vb.nr_buffers = 0;
}
if (brw->gen < 8 && !brw->is_haswell) {
- struct gl_program *vp = &ctx->VertexProgram._Current->Base;
+ uint64_t mask = ctx->VertexProgram._Current->info.inputs_read;
/* Prior to Haswell, the hardware can't natively support GL_FIXED or
* 2_10_10_10_REV vertex formats. Set appropriate workaround flags.
*/
- for (i = 0; i < VERT_ATTRIB_MAX; i++) {
- if (!(vp->InputsRead & BITFIELD64_BIT(i)))
- continue;
-
+ while (mask) {
uint8_t wa_flags = 0;
+ i = u_bit_scan64(&mask);
+
switch (brw->vb.inputs[i].glarray->Type) {
case GL_FIXED:
}
}
+static bool
+intel_disable_rb_aux_buffer(struct brw_context *brw, const struct brw_bo *bo)
+{
+ const struct gl_framebuffer *fb = brw->ctx.DrawBuffer;
+ bool found = false;
+
+ for (unsigned i = 0; i < fb->_NumColorDrawBuffers; i++) {
+ const struct intel_renderbuffer *irb =
+ intel_renderbuffer(fb->_ColorDrawBuffers[i]);
+
+ if (irb && irb->mt->bo == bo) {
+ found = brw->draw_aux_buffer_disabled[i] = true;
+ }
+ }
+
+ return found;
+}
+
+/**
+ * \brief Resolve buffers before drawing.
+ *
+ * Resolve the depth buffer's HiZ buffer, resolve the depth buffer of each
+ * enabled depth texture, and flush the render cache for any dirty textures.
+ */
+void
+brw_predraw_resolve_inputs(struct brw_context *brw)
+{
+ struct gl_context *ctx = &brw->ctx;
+ struct intel_texture_object *tex_obj;
+
+ memset(brw->draw_aux_buffer_disabled, 0,
+ sizeof(brw->draw_aux_buffer_disabled));
+
+ /* Resolve depth buffer and render cache of each enabled texture. */
+ int maxEnabledUnit = ctx->Texture._MaxEnabledTexImageUnit;
+ for (int i = 0; i <= maxEnabledUnit; i++) {
+ if (!ctx->Texture.Unit[i]._Current)
+ continue;
+ tex_obj = intel_texture_object(ctx->Texture.Unit[i]._Current);
+ if (!tex_obj || !tex_obj->mt)
+ continue;
+
+ bool aux_supported;
+ intel_miptree_prepare_texture(brw, tex_obj->mt, tex_obj->_Format,
+ &aux_supported);
+
+ if (!aux_supported && brw->gen >= 9 &&
+ intel_disable_rb_aux_buffer(brw, tex_obj->mt->bo)) {
+ perf_debug("Sampling renderbuffer with non-compressible format - "
+ "turning off compression");
+ }
+
+ brw_render_cache_set_check_flush(brw, tex_obj->mt->bo);
+
+ if (tex_obj->base.StencilSampling ||
+ tex_obj->mt->format == MESA_FORMAT_S_UINT8) {
+ intel_update_r8stencil(brw, tex_obj->mt);
+ }
+ }
+
+ /* Resolve color for each active shader image. */
+ for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
+ const struct gl_program *prog = ctx->_Shader->CurrentProgram[i];
+
+ if (unlikely(prog && prog->info.num_images)) {
+ for (unsigned j = 0; j < prog->info.num_images; j++) {
+ struct gl_image_unit *u =
+ &ctx->ImageUnits[prog->sh.ImageUnits[j]];
+ tex_obj = intel_texture_object(u->TexObj);
+
+ if (tex_obj && tex_obj->mt) {
+ intel_miptree_prepare_image(brw, tex_obj->mt);
+
+ if (tex_obj->mt->aux_usage == ISL_AUX_USAGE_CCS_E &&
+ intel_disable_rb_aux_buffer(brw, tex_obj->mt->bo)) {
+ perf_debug("Using renderbuffer as shader image - turning "
+ "off lossless compression");
+ }
+
+ brw_render_cache_set_check_flush(brw, tex_obj->mt->bo);
+ }
+ }
+ }
+ }
+}
+
+static void
+brw_predraw_resolve_framebuffer(struct brw_context *brw)
+{
+ struct gl_context *ctx = &brw->ctx;
+ struct intel_renderbuffer *depth_irb;
+
+ /* Resolve the depth buffer's HiZ buffer. */
+ depth_irb = intel_get_renderbuffer(ctx->DrawBuffer, BUFFER_DEPTH);
+ if (depth_irb && depth_irb->mt) {
+ intel_miptree_prepare_depth(brw, depth_irb->mt,
+ depth_irb->mt_level,
+ depth_irb->mt_layer,
+ depth_irb->layer_count);
+ }
+
+ /* Resolve color buffers for non-coherent framebuffer fetch. */
+ if (!ctx->Extensions.MESA_shader_framebuffer_fetch &&
+ ctx->FragmentProgram._Current &&
+ ctx->FragmentProgram._Current->info.outputs_read) {
+ const struct gl_framebuffer *fb = ctx->DrawBuffer;
+
+ for (unsigned i = 0; i < fb->_NumColorDrawBuffers; i++) {
+ const struct intel_renderbuffer *irb =
+ intel_renderbuffer(fb->_ColorDrawBuffers[i]);
+
+ if (irb) {
+ intel_miptree_prepare_fb_fetch(brw, irb->mt, irb->mt_level,
+ irb->mt_layer, irb->layer_count);
+ }
+ }
+ }
+
+ struct gl_framebuffer *fb = ctx->DrawBuffer;
+ for (int i = 0; i < fb->_NumColorDrawBuffers; i++) {
+ struct intel_renderbuffer *irb =
+ intel_renderbuffer(fb->_ColorDrawBuffers[i]);
+
+ if (irb == NULL || irb->mt == NULL)
+ continue;
+
+ intel_miptree_prepare_render(brw, irb->mt, irb->mt_level,
+ irb->mt_layer, irb->layer_count,
+ ctx->Color.sRGBEnabled);
+ }
+}
+
/**
* \brief Call this after drawing to mark which buffers need resolving
*
front_irb->need_downsample = true;
if (back_irb)
back_irb->need_downsample = true;
- if (depth_irb && ctx->Depth.Mask) {
- intel_renderbuffer_att_set_needs_depth_resolve(depth_att);
- brw_render_cache_set_add_bo(brw, depth_irb->mt->bo);
+ if (depth_irb) {
+ bool depth_written = brw_depth_writes_enabled(brw);
+ if (depth_att->Layered) {
+ intel_miptree_finish_depth(brw, depth_irb->mt,
+ depth_irb->mt_level,
+ depth_irb->mt_layer,
+ depth_irb->layer_count,
+ depth_written);
+ } else {
+ intel_miptree_finish_depth(brw, depth_irb->mt,
+ depth_irb->mt_level,
+ depth_irb->mt_layer, 1,
+ depth_written);
+ }
+ if (depth_written)
+ brw_render_cache_set_add_bo(brw, depth_irb->mt->bo);
}
if (ctx->Extensions.ARB_stencil_texturing &&
- stencil_irb && ctx->Stencil._WriteEnabled) {
+ stencil_irb && brw->stencil_write_enabled) {
brw_render_cache_set_add_bo(brw, stencil_irb->mt->bo);
}
struct intel_renderbuffer *irb =
intel_renderbuffer(fb->_ColorDrawBuffers[i]);
- if (irb)
- brw_render_cache_set_add_bo(brw, irb->mt->bo);
+ if (!irb)
+ continue;
+
+ brw_render_cache_set_add_bo(brw, irb->mt->bo);
+ intel_miptree_finish_render(brw, irb->mt, irb->mt_level,
+ irb->mt_layer, irb->layer_count);
+ }
+}
+
+static void
+intel_renderbuffer_move_temp_back(struct brw_context *brw,
+ struct intel_renderbuffer *irb)
+{
+ if (irb->align_wa_mt == NULL)
+ return;
+
+ brw_render_cache_set_check_flush(brw, irb->align_wa_mt->bo);
+
+ intel_miptree_copy_slice(brw, irb->align_wa_mt, 0, 0,
+ irb->mt,
+ irb->Base.Base.TexImage->Level, irb->mt_layer);
+
+ intel_miptree_reference(&irb->align_wa_mt, NULL);
+
+ /* Finally restore the x,y to correspond to full miptree. */
+ intel_renderbuffer_set_draw_offset(irb);
+
+ /* Make sure render surface state gets re-emitted with updated miptree. */
+ brw->NewGLState |= _NEW_BUFFERS;
+}
+
+static void
+brw_postdraw_reconcile_align_wa_slices(struct brw_context *brw)
+{
+ struct gl_context *ctx = &brw->ctx;
+ struct gl_framebuffer *fb = ctx->DrawBuffer;
+
+ struct intel_renderbuffer *depth_irb =
+ intel_get_renderbuffer(fb, BUFFER_DEPTH);
+ struct intel_renderbuffer *stencil_irb =
+ intel_get_renderbuffer(fb, BUFFER_STENCIL);
+
+ if (depth_irb && depth_irb->align_wa_mt)
+ intel_renderbuffer_move_temp_back(brw, depth_irb);
+
+ if (stencil_irb && stencil_irb->align_wa_mt)
+ intel_renderbuffer_move_temp_back(brw, stencil_irb);
+
+ for (unsigned i = 0; i < fb->_NumColorDrawBuffers; i++) {
+ struct intel_renderbuffer *irb =
+ intel_renderbuffer(fb->_ColorDrawBuffers[i]);
+
+ if (!irb || irb->align_wa_mt == NULL)
+ continue;
+
+ intel_renderbuffer_move_temp_back(brw, irb);
}
}
*/
static void
brw_try_draw_prims(struct gl_context *ctx,
- const struct gl_client_array *arrays[],
+ const struct gl_vertex_array *arrays[],
const struct _mesa_prim *prims,
GLuint nr_prims,
const struct _mesa_index_buffer *ib,
+ bool index_bounds_valid,
GLuint min_index,
GLuint max_index,
+ struct brw_transform_feedback_object *xfb_obj,
+ unsigned stream,
struct gl_buffer_object *indirect)
{
struct brw_context *brw = brw_context(ctx);
if (ctx->NewState)
_mesa_update_state(ctx);
+ /* We have to validate the textures *before* checking for fallbacks;
+ * otherwise, the software fallback won't be able to rely on the
+ * texture state, the firstLevel and lastLevel fields won't be
+ * set in the intel texture object (they'll both be 0), and the
+ * software fallback will segfault if it attempts to access any
+ * texture level other than level 0.
+ */
+ brw_validate_textures(brw);
+
/* Find the highest sampler unit used by each shader program. A bit-count
* won't work since ARB programs use the texture unit number as the sampler
* index.
*/
brw->wm.base.sampler_count =
- _mesa_fls(ctx->FragmentProgram._Current->Base.SamplersUsed);
+ util_last_bit(ctx->FragmentProgram._Current->SamplersUsed);
brw->gs.base.sampler_count = ctx->GeometryProgram._Current ?
- _mesa_fls(ctx->GeometryProgram._Current->Base.SamplersUsed) : 0;
+ util_last_bit(ctx->GeometryProgram._Current->SamplersUsed) : 0;
brw->tes.base.sampler_count = ctx->TessEvalProgram._Current ?
- _mesa_fls(ctx->TessEvalProgram._Current->Base.SamplersUsed) : 0;
+ util_last_bit(ctx->TessEvalProgram._Current->SamplersUsed) : 0;
brw->tcs.base.sampler_count = ctx->TessCtrlProgram._Current ?
- _mesa_fls(ctx->TessCtrlProgram._Current->Base.SamplersUsed) : 0;
+ util_last_bit(ctx->TessCtrlProgram._Current->SamplersUsed) : 0;
brw->vs.base.sampler_count =
- _mesa_fls(ctx->VertexProgram._Current->Base.SamplersUsed);
-
- /* We have to validate the textures *before* checking for fallbacks;
- * otherwise, the software fallback won't be able to rely on the
- * texture state, the firstLevel and lastLevel fields won't be
- * set in the intel texture object (they'll both be 0), and the
- * software fallback will segfault if it attempts to access any
- * texture level other than level 0.
- */
- brw_validate_textures(brw);
+ util_last_bit(ctx->VertexProgram._Current->SamplersUsed);
intel_prepare_render(brw);
*/
brw_workaround_depthstencil_alignment(brw, 0);
+ /* Resolves must occur after updating renderbuffers, updating context state,
+ * and finalizing textures but before setting up any hardware state for
+ * this draw call.
+ */
+ brw_predraw_resolve_inputs(brw);
+ brw_predraw_resolve_framebuffer(brw);
+
/* Bind all inputs, derive varying and size information:
*/
brw_merge_inputs(brw, arrays);
brw->ib.ib = ib;
brw->ctx.NewDriverState |= BRW_NEW_INDICES;
+ brw->vb.index_bounds_valid = index_bounds_valid;
brw->vb.min_index = min_index;
brw->vb.max_index = max_index;
brw->ctx.NewDriverState |= BRW_NEW_VERTICES;
estimated_max_prim_size += 1024; /* gen6 WM push constants */
estimated_max_prim_size += 512; /* misc. pad */
+ /* Flag BRW_NEW_DRAW_CALL on every draw. This allows us to have
+ * atoms that happen on every draw call.
+ */
+ brw->ctx.NewDriverState |= BRW_NEW_DRAW_CALL;
+
/* Flush the batch if it's approaching full, so that we don't wrap while
* we've got validated state that needs to be in the same batch as the
* primitives.
intel_batchbuffer_save_state(brw);
if (brw->num_instances != prims[i].num_instances ||
- brw->basevertex != prims[i].basevertex) {
+ brw->basevertex != prims[i].basevertex ||
+ brw->baseinstance != prims[i].base_instance) {
brw->num_instances = prims[i].num_instances;
brw->basevertex = prims[i].basevertex;
+ brw->baseinstance = prims[i].base_instance;
if (i > 0) { /* For i == 0 we just did this before the loop */
brw->ctx.NewDriverState |= BRW_NEW_VERTICES;
brw_merge_inputs(brw, arrays);
const int new_basevertex =
prims[i].indexed ? prims[i].basevertex : prims[i].start;
const int new_baseinstance = prims[i].base_instance;
+ const struct brw_vs_prog_data *vs_prog_data =
+ brw_vs_prog_data(brw->vs.base.prog_data);
if (i > 0) {
const bool uses_draw_parameters =
- brw->vs.prog_data->uses_basevertex ||
- brw->vs.prog_data->uses_baseinstance;
+ vs_prog_data->uses_basevertex ||
+ vs_prog_data->uses_baseinstance;
if ((uses_draw_parameters && prims[i].is_indirect) ||
- (brw->vs.prog_data->uses_basevertex &&
+ (vs_prog_data->uses_basevertex &&
brw->draw.params.gl_basevertex != new_basevertex) ||
- (brw->vs.prog_data->uses_baseinstance &&
+ (vs_prog_data->uses_baseinstance &&
brw->draw.params.gl_baseinstance != new_baseinstance))
brw->ctx.NewDriverState |= BRW_NEW_VERTICES;
}
brw->draw.params.gl_basevertex = new_basevertex;
brw->draw.params.gl_baseinstance = new_baseinstance;
- drm_intel_bo_unreference(brw->draw.draw_params_bo);
+ brw_bo_unreference(brw->draw.draw_params_bo);
if (prims[i].is_indirect) {
/* Point draw_params_bo at the indirect buffer. */
brw->draw.draw_params_bo =
intel_buffer_object(ctx->DrawIndirectBuffer)->buffer;
- drm_intel_bo_reference(brw->draw.draw_params_bo);
+ brw_bo_reference(brw->draw.draw_params_bo);
brw->draw.draw_params_offset =
prims[i].indirect_offset + (prims[i].indexed ? 12 : 8);
} else {
/* gl_DrawID always needs its own vertex buffer since it's not part of
* the indirect parameter buffer. If the program uses gl_DrawID we need
* to flag BRW_NEW_VERTICES. For the first iteration, we don't have
- * valid brw->vs.prog_data, but we always flag BRW_NEW_VERTICES before
+ * valid vs_prog_data, but we always flag BRW_NEW_VERTICES before
* the loop.
*/
brw->draw.gl_drawid = prims[i].draw_id;
- drm_intel_bo_unreference(brw->draw.draw_id_bo);
+ brw_bo_unreference(brw->draw.draw_id_bo);
brw->draw.draw_id_bo = NULL;
- if (i > 0 && brw->vs.prog_data->uses_drawid)
+ if (i > 0 && vs_prog_data->uses_drawid)
brw->ctx.NewDriverState |= BRW_NEW_VERTICES;
if (brw->gen < 6)
- brw_set_prim(brw, &prims[i]);
+ brw_set_prim(brw, &prims[i]);
else
- gen6_set_prim(brw, &prims[i]);
+ gen6_set_prim(brw, &prims[i]);
retry:
* brw->ctx.NewDriverState.
*/
if (brw->ctx.NewDriverState) {
- brw->no_batch_wrap = true;
- brw_upload_render_state(brw);
+ brw->no_batch_wrap = true;
+ brw_upload_render_state(brw);
}
- brw_emit_prim(brw, &prims[i], brw->primitive);
+ brw_emit_prim(brw, &prims[i], brw->primitive, xfb_obj, stream);
brw->no_batch_wrap = false;
- if (dri_bufmgr_check_aperture_space(&brw->batch.bo, 1)) {
- if (!fail_next) {
- intel_batchbuffer_reset_to_saved(brw);
- intel_batchbuffer_flush(brw);
- fail_next = true;
- goto retry;
- } else {
+ if (!brw_batch_has_aperture_space(brw, 0)) {
+ if (!fail_next) {
+ intel_batchbuffer_reset_to_saved(brw);
+ intel_batchbuffer_flush(brw);
+ fail_next = true;
+ goto retry;
+ } else {
int ret = intel_batchbuffer_flush(brw);
WARN_ONCE(ret == -ENOSPC,
"i965: Single primitive emit exceeded "
"available aperture space\n");
- }
+ }
}
/* Now that we know we haven't run out of aperture space, we can safely
if (brw->always_flush_batch)
intel_batchbuffer_flush(brw);
- brw_state_cache_check_size(brw);
+ brw_program_cache_check_size(brw);
+ brw_postdraw_reconcile_align_wa_slices(brw);
brw_postdraw_set_buffers_need_resolve(brw);
return;
GLboolean index_bounds_valid,
GLuint min_index,
GLuint max_index,
- struct gl_transform_feedback_object *unused_tfb_object,
+ struct gl_transform_feedback_object *gl_xfb_obj,
unsigned stream,
struct gl_buffer_object *indirect)
{
struct brw_context *brw = brw_context(ctx);
- const struct gl_client_array **arrays = ctx->Array._DrawArrays;
-
- assert(unused_tfb_object == NULL);
+ const struct gl_vertex_array **arrays = ctx->Array._DrawArrays;
+ struct brw_transform_feedback_object *xfb_obj =
+ (struct brw_transform_feedback_object *) gl_xfb_obj;
if (!brw_check_conditional_render(brw))
return;
perf_debug("Scanning index buffer to compute index buffer bounds. "
"Use glDrawRangeElements() to avoid this.\n");
vbo_get_minmax_indices(ctx, prims, ib, &min_index, &max_index, nr_prims);
+ index_bounds_valid = true;
}
/* Try drawing with the hardware, but don't do anything else if we can't
* manage it. swrast doesn't support our featureset, so we can't fall back
* to it.
*/
- brw_try_draw_prims(ctx, arrays, prims, nr_prims, ib, min_index, max_index,
- indirect);
+ brw_try_draw_prims(ctx, arrays, prims, nr_prims, ib, index_bounds_valid,
+ min_index, max_index, xfb_obj, stream, indirect);
}
void
unsigned i;
for (i = 0; i < brw->vb.nr_buffers; i++) {
- drm_intel_bo_unreference(brw->vb.buffers[i].bo);
+ brw_bo_unreference(brw->vb.buffers[i].bo);
brw->vb.buffers[i].bo = NULL;
}
brw->vb.nr_buffers = 0;
}
brw->vb.nr_enabled = 0;
- drm_intel_bo_unreference(brw->ib.bo);
+ brw_bo_unreference(brw->ib.bo);
brw->ib.bo = NULL;
}