#include <sys/errno.h>
+#include "main/arrayobj.h"
+#include "main/blend.h"
#include "main/context.h"
#include "main/condrender.h"
#include "main/samplerobj.h"
#include "main/macros.h"
#include "main/transformfeedback.h"
#include "main/framebuffer.h"
+#include "main/varray.h"
#include "tnl/tnl.h"
-#include "vbo/vbo_context.h"
+#include "vbo/vbo.h"
#include "swrast/swrast.h"
#include "swrast_setup/swrast_setup.h"
#include "drivers/common/meta.h"
#include "util/bitscan.h"
+#include "util/bitset.h"
#include "brw_blorp.h"
#include "brw_draw.h"
brw_emit_prim(struct brw_context *brw,
const struct _mesa_prim *prim,
uint32_t hw_prim,
+ bool is_indexed,
+ GLuint num_instances, GLuint base_instance,
struct brw_transform_feedback_object *xfb_obj,
- unsigned stream)
+ unsigned stream,
+ bool is_indirect,
+ GLsizeiptr indirect_offset)
{
+ const struct gen_device_info *devinfo = &brw->screen->devinfo;
int verts_per_instance;
int vertex_access_type;
int indirect_flag;
int start_vertex_location = prim->start;
int base_vertex_location = prim->basevertex;
- if (prim->indexed) {
- vertex_access_type = brw->gen >= 7 ?
+ if (is_indexed) {
+ vertex_access_type = devinfo->gen >= 7 ?
GEN7_3DPRIM_VERTEXBUFFER_ACCESS_RANDOM :
GEN4_3DPRIM_VERTEXBUFFER_ACCESS_RANDOM;
start_vertex_location += brw->ib.start_vertex_offset;
base_vertex_location += brw->vb.start_vertex_bias;
} else {
- vertex_access_type = brw->gen >= 7 ?
+ vertex_access_type = devinfo->gen >= 7 ?
GEN7_3DPRIM_VERTEXBUFFER_ACCESS_SEQUENTIAL :
GEN4_3DPRIM_VERTEXBUFFER_ACCESS_SEQUENTIAL;
start_vertex_location += brw->vb.start_vertex_bias;
}
/* We only need to trim the primitive count on pre-Gen6. */
- if (brw->gen < 6)
+ if (devinfo->gen < 6)
verts_per_instance = trim(prim->mode, prim->count);
else
verts_per_instance = prim->count;
/* If nothing to emit, just return. */
- if (verts_per_instance == 0 && !prim->is_indirect && !xfb_obj)
+ if (verts_per_instance == 0 && !is_indirect && !xfb_obj)
return;
/* If we're set to always flush, do it before and after the primitive emit.
brw_load_register_mem(brw, GEN7_3DPRIM_VERTEX_COUNT,
xfb_obj->prim_count_bo,
- I915_GEM_DOMAIN_VERTEX, 0,
stream * sizeof(uint32_t));
BEGIN_BATCH(9);
OUT_BATCH(MI_LOAD_REGISTER_IMM | (9 - 2));
OUT_BATCH(GEN7_3DPRIM_INSTANCE_COUNT);
- OUT_BATCH(prim->num_instances);
+ OUT_BATCH(num_instances);
OUT_BATCH(GEN7_3DPRIM_START_VERTEX);
OUT_BATCH(0);
OUT_BATCH(GEN7_3DPRIM_BASE_VERTEX);
OUT_BATCH(GEN7_3DPRIM_START_INSTANCE);
OUT_BATCH(0);
ADVANCE_BATCH();
- } else if (prim->is_indirect) {
+ } else if (is_indirect) {
struct gl_buffer_object *indirect_buffer = brw->ctx.DrawIndirectBuffer;
struct brw_bo *bo = intel_bufferobj_buffer(brw,
intel_buffer_object(indirect_buffer),
- prim->indirect_offset, 5 * sizeof(GLuint));
+ indirect_offset, 5 * sizeof(GLuint), false);
indirect_flag = GEN7_3DPRIM_INDIRECT_PARAMETER_ENABLE;
brw_load_register_mem(brw, GEN7_3DPRIM_VERTEX_COUNT, bo,
- I915_GEM_DOMAIN_VERTEX, 0,
- prim->indirect_offset + 0);
+ indirect_offset + 0);
brw_load_register_mem(brw, GEN7_3DPRIM_INSTANCE_COUNT, bo,
- I915_GEM_DOMAIN_VERTEX, 0,
- prim->indirect_offset + 4);
+ indirect_offset + 4);
brw_load_register_mem(brw, GEN7_3DPRIM_START_VERTEX, bo,
- I915_GEM_DOMAIN_VERTEX, 0,
- prim->indirect_offset + 8);
- if (prim->indexed) {
+ indirect_offset + 8);
+ if (is_indexed) {
brw_load_register_mem(brw, GEN7_3DPRIM_BASE_VERTEX, bo,
- I915_GEM_DOMAIN_VERTEX, 0,
- prim->indirect_offset + 12);
+ indirect_offset + 12);
brw_load_register_mem(brw, GEN7_3DPRIM_START_INSTANCE, bo,
- I915_GEM_DOMAIN_VERTEX, 0,
- prim->indirect_offset + 16);
+ indirect_offset + 16);
} else {
brw_load_register_mem(brw, GEN7_3DPRIM_START_INSTANCE, bo,
- I915_GEM_DOMAIN_VERTEX, 0,
- prim->indirect_offset + 12);
- BEGIN_BATCH(3);
- OUT_BATCH(MI_LOAD_REGISTER_IMM | (3 - 2));
- OUT_BATCH(GEN7_3DPRIM_BASE_VERTEX);
- OUT_BATCH(0);
- ADVANCE_BATCH();
+ indirect_offset + 12);
+ brw_load_register_imm32(brw, GEN7_3DPRIM_BASE_VERTEX, 0);
}
} else {
indirect_flag = 0;
}
- BEGIN_BATCH(brw->gen >= 7 ? 7 : 6);
+ BEGIN_BATCH(devinfo->gen >= 7 ? 7 : 6);
- if (brw->gen >= 7) {
+ if (devinfo->gen >= 7) {
const int predicate_enable =
(brw->predicate.state == BRW_PREDICATE_STATE_USE_BIT)
? GEN7_3DPRIM_PREDICATE_ENABLE : 0;
}
OUT_BATCH(verts_per_instance);
OUT_BATCH(start_vertex_location);
- OUT_BATCH(prim->num_instances);
- OUT_BATCH(prim->base_instance);
+ OUT_BATCH(num_instances);
+ OUT_BATCH(base_instance);
OUT_BATCH(base_vertex_location);
ADVANCE_BATCH();
static void
-brw_merge_inputs(struct brw_context *brw,
- const struct gl_vertex_array *arrays[])
+brw_clear_buffers(struct brw_context *brw)
{
- const struct gl_context *ctx = &brw->ctx;
- GLuint i;
-
- for (i = 0; i < brw->vb.nr_buffers; i++) {
+ for (unsigned i = 0; i < brw->vb.nr_buffers; ++i) {
brw_bo_unreference(brw->vb.buffers[i].bo);
brw->vb.buffers[i].bo = NULL;
}
brw->vb.nr_buffers = 0;
- for (i = 0; i < VERT_ATTRIB_MAX; i++) {
- brw->vb.inputs[i].buffer = -1;
- brw->vb.inputs[i].glarray = arrays[i];
+ for (unsigned i = 0; i < brw->vb.nr_enabled; ++i) {
+ brw->vb.enabled[i]->buffer = -1;
+ }
+#ifndef NDEBUG
+ for (unsigned i = 0; i < VERT_ATTRIB_MAX; i++) {
+ assert(brw->vb.inputs[i].buffer == -1);
}
+#endif
+}
- if (brw->gen < 8 && !brw->is_haswell) {
- uint64_t mask = ctx->VertexProgram._Current->info.inputs_read;
- /* Prior to Haswell, the hardware can't natively support GL_FIXED or
- * 2_10_10_10_REV vertex formats. Set appropriate workaround flags.
- */
- while (mask) {
- uint8_t wa_flags = 0;
- i = u_bit_scan64(&mask);
+static uint8_t get_wa_flags(const struct gl_vertex_format *glformat)
+{
+ uint8_t wa_flags = 0;
+
+ switch (glformat->Type) {
+ case GL_FIXED:
+ wa_flags = glformat->Size;
+ break;
- switch (brw->vb.inputs[i].glarray->Type) {
+ case GL_INT_2_10_10_10_REV:
+ wa_flags |= BRW_ATTRIB_WA_SIGN;
+ /* fallthough */
- case GL_FIXED:
- wa_flags = brw->vb.inputs[i].glarray->Size;
- break;
+ case GL_UNSIGNED_INT_2_10_10_10_REV:
+ if (glformat->Format == GL_BGRA)
+ wa_flags |= BRW_ATTRIB_WA_BGRA;
- case GL_INT_2_10_10_10_REV:
- wa_flags |= BRW_ATTRIB_WA_SIGN;
- /* fallthough */
+ if (glformat->Normalized)
+ wa_flags |= BRW_ATTRIB_WA_NORMALIZE;
+ else if (!glformat->Integer)
+ wa_flags |= BRW_ATTRIB_WA_SCALE;
- case GL_UNSIGNED_INT_2_10_10_10_REV:
- if (brw->vb.inputs[i].glarray->Format == GL_BGRA)
- wa_flags |= BRW_ATTRIB_WA_BGRA;
+ break;
+ }
+
+ return wa_flags;
+}
- if (brw->vb.inputs[i].glarray->Normalized)
- wa_flags |= BRW_ATTRIB_WA_NORMALIZE;
- else if (!brw->vb.inputs[i].glarray->Integer)
- wa_flags |= BRW_ATTRIB_WA_SCALE;
- break;
+static void
+brw_merge_inputs(struct brw_context *brw)
+{
+ const struct gen_device_info *devinfo = &brw->screen->devinfo;
+ const struct gl_context *ctx = &brw->ctx;
+
+ if (devinfo->gen < 8 && !devinfo->is_haswell) {
+ /* Prior to Haswell, the hardware can't natively support GL_FIXED or
+ * 2_10_10_10_REV vertex formats. Set appropriate workaround flags.
+ */
+ const struct gl_vertex_array_object *vao = ctx->Array._DrawVAO;
+ const uint64_t vs_inputs = ctx->VertexProgram._Current->info.inputs_read;
+ assert((vs_inputs & ~((uint64_t)VERT_BIT_ALL)) == 0);
+
+ unsigned vaomask = vs_inputs & _mesa_draw_array_bits(ctx);
+ while (vaomask) {
+ const gl_vert_attrib i = u_bit_scan(&vaomask);
+ const uint8_t wa_flags =
+ get_wa_flags(_mesa_draw_array_format(vao, i));
+
+ if (brw->vb.attrib_wa_flags[i] != wa_flags) {
+ brw->vb.attrib_wa_flags[i] = wa_flags;
+ brw->ctx.NewDriverState |= BRW_NEW_VS_ATTRIB_WORKAROUNDS;
}
+ }
+
+ unsigned currmask = vs_inputs & _mesa_draw_current_bits(ctx);
+ while (currmask) {
+ const gl_vert_attrib i = u_bit_scan(&currmask);
+ const uint8_t wa_flags =
+ get_wa_flags(_mesa_draw_current_format(ctx, i));
if (brw->vb.attrib_wa_flags[i] != wa_flags) {
brw->vb.attrib_wa_flags[i] = wa_flags;
}
}
+/* Disable auxiliary buffers if a renderbuffer is also bound as a texture
+ * or shader image. This causes a self-dependency, where both rendering
+ * and sampling may concurrently read or write the CCS buffer, causing
+ * incorrect pixels.
+ */
+static bool
+intel_disable_rb_aux_buffer(struct brw_context *brw,
+ bool *draw_aux_buffer_disabled,
+ struct intel_mipmap_tree *tex_mt,
+ unsigned min_level, unsigned num_levels,
+ const char *usage)
+{
+ const struct gl_framebuffer *fb = brw->ctx.DrawBuffer;
+ bool found = false;
+
+ /* We only need to worry about color compression and fast clears. */
+ if (tex_mt->aux_usage != ISL_AUX_USAGE_CCS_D &&
+ tex_mt->aux_usage != ISL_AUX_USAGE_CCS_E)
+ return false;
+
+ for (unsigned i = 0; i < fb->_NumColorDrawBuffers; i++) {
+ const struct intel_renderbuffer *irb =
+ intel_renderbuffer(fb->_ColorDrawBuffers[i]);
+
+ if (irb && irb->mt->bo == tex_mt->bo &&
+ irb->mt_level >= min_level &&
+ irb->mt_level < min_level + num_levels) {
+ found = draw_aux_buffer_disabled[i] = true;
+ }
+ }
+
+ if (found) {
+ perf_debug("Disabling CCS because a renderbuffer is also bound %s.\n",
+ usage);
+ }
+
+ return found;
+}
+
+/** Implement the ASTC 5x5 sampler workaround
+ *
+ * Gen9 sampling hardware has a bug where an ASTC 5x5 compressed surface
+ * cannot live in the sampler cache at the same time as an aux compressed
+ * surface. In order to work around the bug we have to stall rendering with a
+ * CS and pixel scoreboard stall (implicit in the CS stall) and invalidate the
+ * texture cache whenever one of ASTC 5x5 or aux compressed may be in the
+ * sampler cache and we're about to render with something which samples from
+ * the other.
+ *
+ * In the case of a single shader which textures from both ASTC 5x5 and
+ * a texture which is CCS or HiZ compressed, we have to resolve the aux
+ * compressed texture prior to rendering. This second part is handled in
+ * brw_predraw_resolve_inputs() below.
+ *
+ * We have observed this issue to affect CCS and HiZ sampling but whether or
+ * not it also affects MCS is unknown. Because MCS has no concept of a
+ * resolve (and doing one would be stupid expensive), we choose to simply
+ * ignore the possibility and hope for the best.
+ */
+static void
+gen9_apply_astc5x5_wa_flush(struct brw_context *brw,
+ enum gen9_astc5x5_wa_tex_type curr_mask)
+{
+ assert(brw->screen->devinfo.gen == 9);
+
+ if (((brw->gen9_astc5x5_wa_tex_mask & GEN9_ASTC5X5_WA_TEX_TYPE_ASTC5x5) &&
+ (curr_mask & GEN9_ASTC5X5_WA_TEX_TYPE_AUX)) ||
+ ((brw->gen9_astc5x5_wa_tex_mask & GEN9_ASTC5X5_WA_TEX_TYPE_AUX) &&
+ (curr_mask & GEN9_ASTC5X5_WA_TEX_TYPE_ASTC5x5))) {
+ brw_emit_pipe_control_flush(brw, PIPE_CONTROL_CS_STALL);
+ brw_emit_pipe_control_flush(brw, PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
+ }
+
+ brw->gen9_astc5x5_wa_tex_mask = curr_mask;
+}
+
+static enum gen9_astc5x5_wa_tex_type
+gen9_astc5x5_wa_bits(mesa_format format, enum isl_aux_usage aux_usage)
+{
+ if (aux_usage != ISL_AUX_USAGE_NONE &&
+ aux_usage != ISL_AUX_USAGE_MCS)
+ return GEN9_ASTC5X5_WA_TEX_TYPE_AUX;
+
+ if (format == MESA_FORMAT_RGBA_ASTC_5x5 ||
+ format == MESA_FORMAT_SRGB8_ALPHA8_ASTC_5x5)
+ return GEN9_ASTC5X5_WA_TEX_TYPE_ASTC5x5;
+
+ return 0;
+}
+
+/* Helper for the gen9 ASTC 5x5 workaround. This version exists for BLORP's
+ * use-cases where only a single texture is bound.
+ */
+void
+gen9_apply_single_tex_astc5x5_wa(struct brw_context *brw,
+ mesa_format format,
+ enum isl_aux_usage aux_usage)
+{
+ gen9_apply_astc5x5_wa_flush(brw, gen9_astc5x5_wa_bits(format, aux_usage));
+}
+
+static void
+mark_textures_used_for_txf(BITSET_WORD *used_for_txf,
+ const struct gl_program *prog)
+{
+ if (!prog)
+ return;
+
+ uint32_t mask = prog->info.textures_used_by_txf;
+ while (mask) {
+ int s = u_bit_scan(&mask);
+ BITSET_SET(used_for_txf, prog->SamplerUnits[s]);
+ }
+}
+
+/**
+ * \brief Resolve buffers before drawing.
+ *
+ * Resolve the depth buffer's HiZ buffer, resolve the depth buffer of each
+ * enabled depth texture, and flush the render cache for any dirty textures.
+ */
+void
+brw_predraw_resolve_inputs(struct brw_context *brw, bool rendering,
+ bool *draw_aux_buffer_disabled)
+{
+ struct gl_context *ctx = &brw->ctx;
+ struct intel_texture_object *tex_obj;
+
+ BITSET_DECLARE(used_for_txf, MAX_COMBINED_TEXTURE_IMAGE_UNITS);
+ memset(used_for_txf, 0, sizeof(used_for_txf));
+ if (rendering) {
+ mark_textures_used_for_txf(used_for_txf, ctx->VertexProgram._Current);
+ mark_textures_used_for_txf(used_for_txf, ctx->TessCtrlProgram._Current);
+ mark_textures_used_for_txf(used_for_txf, ctx->TessEvalProgram._Current);
+ mark_textures_used_for_txf(used_for_txf, ctx->GeometryProgram._Current);
+ mark_textures_used_for_txf(used_for_txf, ctx->FragmentProgram._Current);
+ } else {
+ mark_textures_used_for_txf(used_for_txf, ctx->ComputeProgram._Current);
+ }
+
+ int maxEnabledUnit = ctx->Texture._MaxEnabledTexImageUnit;
+
+ enum gen9_astc5x5_wa_tex_type astc5x5_wa_bits = 0;
+ if (brw->screen->devinfo.gen == 9) {
+ /* In order to properly implement the ASTC 5x5 workaround for an
+ * arbitrary draw or dispatch call, we have to walk the entire list of
+ * textures looking for ASTC 5x5. If there is any ASTC 5x5 in this draw
+ * call, all aux compressed textures must be resolved and have aux
+ * compression disabled while sampling.
+ */
+ for (int i = 0; i <= maxEnabledUnit; i++) {
+ if (!ctx->Texture.Unit[i]._Current)
+ continue;
+ tex_obj = intel_texture_object(ctx->Texture.Unit[i]._Current);
+ if (!tex_obj || !tex_obj->mt)
+ continue;
+
+ astc5x5_wa_bits |= gen9_astc5x5_wa_bits(tex_obj->_Format,
+ tex_obj->mt->aux_usage);
+ }
+ gen9_apply_astc5x5_wa_flush(brw, astc5x5_wa_bits);
+ }
+
+ /* Resolve depth buffer and render cache of each enabled texture. */
+ for (int i = 0; i <= maxEnabledUnit; i++) {
+ if (!ctx->Texture.Unit[i]._Current)
+ continue;
+ tex_obj = intel_texture_object(ctx->Texture.Unit[i]._Current);
+ if (!tex_obj || !tex_obj->mt)
+ continue;
+
+ struct gl_sampler_object *sampler = _mesa_get_samplerobj(ctx, i);
+ enum isl_format view_format =
+ translate_tex_format(brw, tex_obj->_Format, sampler->sRGBDecode);
+
+ unsigned min_level, min_layer, num_levels, num_layers;
+ if (tex_obj->base.Immutable) {
+ min_level = tex_obj->base.MinLevel;
+ num_levels = MIN2(tex_obj->base.NumLevels, tex_obj->_MaxLevel + 1);
+ min_layer = tex_obj->base.MinLayer;
+ num_layers = tex_obj->base.Target != GL_TEXTURE_3D ?
+ tex_obj->base.NumLayers : INTEL_REMAINING_LAYERS;
+ } else {
+ min_level = tex_obj->base.BaseLevel;
+ num_levels = tex_obj->_MaxLevel - tex_obj->base.BaseLevel + 1;
+ min_layer = 0;
+ num_layers = INTEL_REMAINING_LAYERS;
+ }
+
+ if (rendering) {
+ intel_disable_rb_aux_buffer(brw, draw_aux_buffer_disabled,
+ tex_obj->mt, min_level, num_levels,
+ "for sampling");
+ }
+
+ intel_miptree_prepare_texture(brw, tex_obj->mt, view_format,
+ min_level, num_levels,
+ min_layer, num_layers,
+ astc5x5_wa_bits);
+
+ /* If any programs are using it with texelFetch, we may need to also do
+ * a prepare with an sRGB format to ensure texelFetch works "properly".
+ */
+ if (BITSET_TEST(used_for_txf, i)) {
+ enum isl_format txf_format =
+ translate_tex_format(brw, tex_obj->_Format, GL_DECODE_EXT);
+ if (txf_format != view_format) {
+ intel_miptree_prepare_texture(brw, tex_obj->mt, txf_format,
+ min_level, num_levels,
+ min_layer, num_layers,
+ astc5x5_wa_bits);
+ }
+ }
+
+ brw_cache_flush_for_read(brw, tex_obj->mt->bo);
+
+ if (tex_obj->base.StencilSampling ||
+ tex_obj->mt->format == MESA_FORMAT_S_UINT8) {
+ intel_update_r8stencil(brw, tex_obj->mt);
+ }
+
+ if (intel_miptree_has_etc_shadow(brw, tex_obj->mt) &&
+ tex_obj->mt->shadow_needs_update) {
+ intel_miptree_update_etc_shadow_levels(brw, tex_obj->mt);
+ }
+ }
+
+ /* Resolve color for each active shader image. */
+ for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
+ const struct gl_program *prog = ctx->_Shader->CurrentProgram[i];
+
+ if (unlikely(prog && prog->info.num_images)) {
+ for (unsigned j = 0; j < prog->info.num_images; j++) {
+ struct gl_image_unit *u =
+ &ctx->ImageUnits[prog->sh.ImageUnits[j]];
+ tex_obj = intel_texture_object(u->TexObj);
+
+ if (tex_obj && tex_obj->mt) {
+ if (rendering) {
+ intel_disable_rb_aux_buffer(brw, draw_aux_buffer_disabled,
+ tex_obj->mt, 0, ~0,
+ "as a shader image");
+ }
+
+ intel_miptree_prepare_image(brw, tex_obj->mt);
+
+ brw_cache_flush_for_read(brw, tex_obj->mt->bo);
+ }
+ }
+ }
+ }
+}
+
+static void
+brw_predraw_resolve_framebuffer(struct brw_context *brw,
+ bool *draw_aux_buffer_disabled)
+{
+ struct gl_context *ctx = &brw->ctx;
+ struct intel_renderbuffer *depth_irb;
+
+ /* Resolve the depth buffer's HiZ buffer. */
+ depth_irb = intel_get_renderbuffer(ctx->DrawBuffer, BUFFER_DEPTH);
+ if (depth_irb && depth_irb->mt) {
+ intel_miptree_prepare_depth(brw, depth_irb->mt,
+ depth_irb->mt_level,
+ depth_irb->mt_layer,
+ depth_irb->layer_count);
+ }
+
+ /* Resolve color buffers for non-coherent framebuffer fetch. */
+ if (!ctx->Extensions.EXT_shader_framebuffer_fetch &&
+ ctx->FragmentProgram._Current &&
+ ctx->FragmentProgram._Current->info.outputs_read) {
+ const struct gl_framebuffer *fb = ctx->DrawBuffer;
+
+ /* This is only used for non-coherent framebuffer fetch, so we don't
+ * need to worry about CCS_E and can simply pass 'false' below.
+ */
+ assert(brw->screen->devinfo.gen < 9);
+
+ for (unsigned i = 0; i < fb->_NumColorDrawBuffers; i++) {
+ const struct intel_renderbuffer *irb =
+ intel_renderbuffer(fb->_ColorDrawBuffers[i]);
+
+ if (irb) {
+ intel_miptree_prepare_texture(brw, irb->mt, irb->mt->surf.format,
+ irb->mt_level, 1,
+ irb->mt_layer, irb->layer_count,
+ brw->gen9_astc5x5_wa_tex_mask);
+ }
+ }
+ }
+
+ struct gl_framebuffer *fb = ctx->DrawBuffer;
+ for (int i = 0; i < fb->_NumColorDrawBuffers; i++) {
+ struct intel_renderbuffer *irb =
+ intel_renderbuffer(fb->_ColorDrawBuffers[i]);
+
+ if (irb == NULL || irb->mt == NULL)
+ continue;
+
+ mesa_format mesa_format =
+ _mesa_get_render_format(ctx, intel_rb_format(irb));
+ enum isl_format isl_format = brw_isl_format_for_mesa_format(mesa_format);
+ bool blend_enabled = ctx->Color.BlendEnabled & (1 << i);
+ enum isl_aux_usage aux_usage =
+ intel_miptree_render_aux_usage(brw, irb->mt, isl_format,
+ blend_enabled,
+ draw_aux_buffer_disabled[i]);
+ if (brw->draw_aux_usage[i] != aux_usage) {
+ brw->ctx.NewDriverState |= BRW_NEW_AUX_STATE;
+ brw->draw_aux_usage[i] = aux_usage;
+ }
+
+ intel_miptree_prepare_render(brw, irb->mt, irb->mt_level,
+ irb->mt_layer, irb->layer_count,
+ aux_usage);
+
+ brw_cache_flush_for_render(brw, irb->mt->bo,
+ isl_format, aux_usage);
+ }
+}
+
/**
* \brief Call this after drawing to mark which buffers need resolving
*
* If the depth buffer was written to and if it has an accompanying HiZ
* buffer, then mark that it needs a depth resolve.
*
+ * If the stencil buffer was written to then mark that it may need to be
+ * copied to an R8 texture.
+ *
* If the color buffer is a multisample window system buffer, then
* mark that it needs a downsample.
*
depth_written);
}
if (depth_written)
- brw_render_cache_set_add_bo(brw, depth_irb->mt->bo);
+ brw_depth_cache_add_bo(brw, depth_irb->mt->bo);
}
- if (ctx->Extensions.ARB_stencil_texturing &&
- stencil_irb && ctx->Stencil._WriteEnabled) {
- brw_render_cache_set_add_bo(brw, stencil_irb->mt->bo);
+ if (stencil_irb && brw->stencil_write_enabled) {
+ struct intel_mipmap_tree *stencil_mt =
+ stencil_irb->mt->stencil_mt != NULL ?
+ stencil_irb->mt->stencil_mt : stencil_irb->mt;
+ brw_depth_cache_add_bo(brw, stencil_mt->bo);
+ intel_miptree_finish_write(brw, stencil_mt, stencil_irb->mt_level,
+ stencil_irb->mt_layer,
+ stencil_irb->layer_count, ISL_AUX_USAGE_NONE);
}
for (unsigned i = 0; i < fb->_NumColorDrawBuffers; i++) {
if (!irb)
continue;
-
- brw_render_cache_set_add_bo(brw, irb->mt->bo);
+
+ mesa_format mesa_format =
+ _mesa_get_render_format(ctx, intel_rb_format(irb));
+ enum isl_format isl_format = brw_isl_format_for_mesa_format(mesa_format);
+ enum isl_aux_usage aux_usage = brw->draw_aux_usage[i];
+
+ brw_render_cache_add_bo(brw, irb->mt->bo, isl_format, aux_usage);
+
intel_miptree_finish_render(brw, irb->mt, irb->mt_level,
- irb->mt_layer, irb->layer_count);
+ irb->mt_layer, irb->layer_count,
+ aux_usage);
}
}
if (irb->align_wa_mt == NULL)
return;
- brw_render_cache_set_check_flush(brw, irb->align_wa_mt->bo);
+ brw_cache_flush_for_read(brw, irb->align_wa_mt->bo);
intel_miptree_copy_slice(brw, irb->align_wa_mt, 0, 0,
irb->mt,
}
}
-/* May fail if out of video memory for texture or vbo upload, or on
- * fallback conditions.
- */
static void
-brw_try_draw_prims(struct gl_context *ctx,
- const struct gl_vertex_array *arrays[],
- const struct _mesa_prim *prims,
- GLuint nr_prims,
- const struct _mesa_index_buffer *ib,
- bool index_bounds_valid,
- GLuint min_index,
- GLuint max_index,
- struct brw_transform_feedback_object *xfb_obj,
- unsigned stream,
- struct gl_buffer_object *indirect)
+brw_prepare_drawing(struct gl_context *ctx,
+ const struct _mesa_index_buffer *ib,
+ bool index_bounds_valid,
+ GLuint min_index,
+ GLuint max_index)
{
struct brw_context *brw = brw_context(ctx);
- GLuint i;
- bool fail_next = false;
if (ctx->NewState)
_mesa_update_state(ctx);
* index.
*/
brw->wm.base.sampler_count =
- util_last_bit(ctx->FragmentProgram._Current->SamplersUsed);
+ util_last_bit(ctx->FragmentProgram._Current->info.textures_used);
brw->gs.base.sampler_count = ctx->GeometryProgram._Current ?
- util_last_bit(ctx->GeometryProgram._Current->SamplersUsed) : 0;
+ util_last_bit(ctx->GeometryProgram._Current->info.textures_used) : 0;
brw->tes.base.sampler_count = ctx->TessEvalProgram._Current ?
- util_last_bit(ctx->TessEvalProgram._Current->SamplersUsed) : 0;
+ util_last_bit(ctx->TessEvalProgram._Current->info.textures_used) : 0;
brw->tcs.base.sampler_count = ctx->TessCtrlProgram._Current ?
- util_last_bit(ctx->TessCtrlProgram._Current->SamplersUsed) : 0;
+ util_last_bit(ctx->TessCtrlProgram._Current->info.textures_used) : 0;
brw->vs.base.sampler_count =
- util_last_bit(ctx->VertexProgram._Current->SamplersUsed);
+ util_last_bit(ctx->VertexProgram._Current->info.textures_used);
intel_prepare_render(brw);
*/
brw_workaround_depthstencil_alignment(brw, 0);
+ /* Resolves must occur after updating renderbuffers, updating context state,
+ * and finalizing textures but before setting up any hardware state for
+ * this draw call.
+ */
+ bool draw_aux_buffer_disabled[MAX_DRAW_BUFFERS] = { };
+ brw_predraw_resolve_inputs(brw, true, draw_aux_buffer_disabled);
+ brw_predraw_resolve_framebuffer(brw, draw_aux_buffer_disabled);
+
/* Bind all inputs, derive varying and size information:
*/
- brw_merge_inputs(brw, arrays);
+ brw_clear_buffers(brw);
+ brw_merge_inputs(brw);
brw->ib.ib = ib;
brw->ctx.NewDriverState |= BRW_NEW_INDICES;
brw->vb.min_index = min_index;
brw->vb.max_index = max_index;
brw->ctx.NewDriverState |= BRW_NEW_VERTICES;
+}
- for (i = 0; i < nr_prims; i++) {
- int estimated_max_prim_size;
- const int sampler_state_size = 16;
-
- estimated_max_prim_size = 512; /* batchbuffer commands */
- estimated_max_prim_size += BRW_MAX_TEX_UNIT *
- (sampler_state_size + sizeof(struct gen5_sampler_default_color));
- estimated_max_prim_size += 1024; /* gen6 VS push constants */
- estimated_max_prim_size += 1024; /* gen6 WM push constants */
- estimated_max_prim_size += 512; /* misc. pad */
-
- /* Flush the batch if it's approaching full, so that we don't wrap while
- * we've got validated state that needs to be in the same batch as the
- * primitives.
- */
- intel_batchbuffer_require_space(brw, estimated_max_prim_size, RENDER_RING);
- intel_batchbuffer_save_state(brw);
-
- if (brw->num_instances != prims[i].num_instances ||
- brw->basevertex != prims[i].basevertex ||
- brw->baseinstance != prims[i].base_instance) {
- brw->num_instances = prims[i].num_instances;
- brw->basevertex = prims[i].basevertex;
- brw->baseinstance = prims[i].base_instance;
- if (i > 0) { /* For i == 0 we just did this before the loop */
- brw->ctx.NewDriverState |= BRW_NEW_VERTICES;
- brw_merge_inputs(brw, arrays);
- }
- }
+static void
+brw_finish_drawing(struct gl_context *ctx)
+{
+ struct brw_context *brw = brw_context(ctx);
- /* Determine if we need to flag BRW_NEW_VERTICES for updating the
- * gl_BaseVertexARB or gl_BaseInstanceARB values. For indirect draw, we
- * always flag if the shader uses one of the values. For direct draws,
- * we only flag if the values change.
- */
- const int new_basevertex =
- prims[i].indexed ? prims[i].basevertex : prims[i].start;
- const int new_baseinstance = prims[i].base_instance;
- const struct brw_vs_prog_data *vs_prog_data =
- brw_vs_prog_data(brw->vs.base.prog_data);
- if (i > 0) {
- const bool uses_draw_parameters =
- vs_prog_data->uses_basevertex ||
- vs_prog_data->uses_baseinstance;
-
- if ((uses_draw_parameters && prims[i].is_indirect) ||
- (vs_prog_data->uses_basevertex &&
- brw->draw.params.gl_basevertex != new_basevertex) ||
- (vs_prog_data->uses_baseinstance &&
- brw->draw.params.gl_baseinstance != new_baseinstance))
- brw->ctx.NewDriverState |= BRW_NEW_VERTICES;
- }
+ if (brw->always_flush_batch)
+ intel_batchbuffer_flush(brw);
+
+ brw_program_cache_check_size(brw);
+ brw_postdraw_reconcile_align_wa_slices(brw);
+ brw_postdraw_set_buffers_need_resolve(brw);
+
+ if (brw->draw.draw_params_count_bo) {
+ brw_bo_unreference(brw->draw.draw_params_count_bo);
+ brw->draw.draw_params_count_bo = NULL;
+ }
- brw->draw.params.gl_basevertex = new_basevertex;
- brw->draw.params.gl_baseinstance = new_baseinstance;
+ if (brw->draw.draw_params_bo) {
brw_bo_unreference(brw->draw.draw_params_bo);
+ brw->draw.draw_params_bo = NULL;
+ }
- if (prims[i].is_indirect) {
- /* Point draw_params_bo at the indirect buffer. */
- brw->draw.draw_params_bo =
- intel_buffer_object(ctx->DrawIndirectBuffer)->buffer;
- brw_bo_reference(brw->draw.draw_params_bo);
- brw->draw.draw_params_offset =
- prims[i].indirect_offset + (prims[i].indexed ? 12 : 8);
- } else {
- /* Set draw_params_bo to NULL so brw_prepare_vertices knows it
- * has to upload gl_BaseVertex and such if they're needed.
- */
- brw->draw.draw_params_bo = NULL;
- brw->draw.draw_params_offset = 0;
- }
+ if (brw->draw.derived_draw_params_bo) {
+ brw_bo_unreference(brw->draw.derived_draw_params_bo);
+ brw->draw.derived_draw_params_bo = NULL;
+ }
+}
- /* gl_DrawID always needs its own vertex buffer since it's not part of
- * the indirect parameter buffer. If the program uses gl_DrawID we need
- * to flag BRW_NEW_VERTICES. For the first iteration, we don't have
- * valid vs_prog_data, but we always flag BRW_NEW_VERTICES before
- * the loop.
- */
- brw->draw.gl_drawid = prims[i].draw_id;
- brw_bo_unreference(brw->draw.draw_id_bo);
- brw->draw.draw_id_bo = NULL;
- if (i > 0 && vs_prog_data->uses_drawid)
- brw->ctx.NewDriverState |= BRW_NEW_VERTICES;
+/**
+ * Implement workarounds for preemption:
+ * - WaDisableMidObjectPreemptionForGSLineStripAdj
+ * - WaDisableMidObjectPreemptionForTrifanOrPolygon
+ * - WaDisableMidObjectPreemptionForLineLoop
+ * - WA#0798
+ */
+static void
+gen9_emit_preempt_wa(struct brw_context *brw,
+ const struct _mesa_prim *prim, GLuint num_instances)
+{
+ bool object_preemption = true;
+ ASSERTED const struct gen_device_info *devinfo = &brw->screen->devinfo;
- if (brw->gen < 6)
- brw_set_prim(brw, &prims[i]);
- else
- gen6_set_prim(brw, &prims[i]);
+ /* Only apply these workarounds for gen9 */
+ assert(devinfo->gen == 9);
-retry:
+ /* WaDisableMidObjectPreemptionForGSLineStripAdj
+ *
+ * WA: Disable mid-draw preemption when draw-call is a linestrip_adj and
+ * GS is enabled.
+ */
+ if (brw->primitive == _3DPRIM_LINESTRIP_ADJ && brw->gs.enabled)
+ object_preemption = false;
+
+ /* WaDisableMidObjectPreemptionForTrifanOrPolygon
+ *
+ * TriFan miscompare in Execlist Preemption test. Cut index that is on a
+ * previous context. End the previous, the resume another context with a
+ * tri-fan or polygon, and the vertex count is corrupted. If we prempt
+ * again we will cause corruption.
+ *
+ * WA: Disable mid-draw preemption when draw-call has a tri-fan.
+ */
+ if (brw->primitive == _3DPRIM_TRIFAN)
+ object_preemption = false;
+
+ /* WaDisableMidObjectPreemptionForLineLoop
+ *
+ * VF Stats Counters Missing a vertex when preemption enabled.
+ *
+ * WA: Disable mid-draw preemption when the draw uses a lineloop
+ * topology.
+ */
+ if (brw->primitive == _3DPRIM_LINELOOP)
+ object_preemption = false;
+
+ /* WA#0798
+ *
+ * VF is corrupting GAFS data when preempted on an instance boundary and
+ * replayed with instancing enabled.
+ *
+ * WA: Disable preemption when using instanceing.
+ */
+ if (num_instances > 1)
+ object_preemption = false;
- /* Note that before the loop, brw->ctx.NewDriverState was set to != 0, and
- * that the state updated in the loop outside of this block is that in
- * *_set_prim or intel_batchbuffer_flush(), which only impacts
- * brw->ctx.NewDriverState.
- */
- if (brw->ctx.NewDriverState) {
- brw->no_batch_wrap = true;
- brw_upload_render_state(brw);
- }
+ brw_enable_obj_preemption(brw, object_preemption);
+}
- brw_emit_prim(brw, &prims[i], brw->primitive, xfb_obj, stream);
+/* May fail if out of video memory for texture or vbo upload, or on
+ * fallback conditions.
+ */
+static void
+brw_draw_single_prim(struct gl_context *ctx,
+ const struct _mesa_prim *prim,
+ unsigned prim_id,
+ bool is_indexed,
+ GLuint num_instances, GLuint base_instance,
+ struct brw_transform_feedback_object *xfb_obj,
+ unsigned stream,
+ GLsizeiptr indirect_offset)
+{
+ struct brw_context *brw = brw_context(ctx);
+ const struct gen_device_info *devinfo = &brw->screen->devinfo;
+ bool fail_next;
+ bool is_indirect = brw->draw.draw_indirect_data != NULL;
- brw->no_batch_wrap = false;
+ /* Flag BRW_NEW_DRAW_CALL on every draw. This allows us to have
+ * atoms that happen on every draw call.
+ */
+ brw->ctx.NewDriverState |= BRW_NEW_DRAW_CALL;
- if (!brw_batch_has_aperture_space(brw, 0)) {
- if (!fail_next) {
- intel_batchbuffer_reset_to_saved(brw);
- intel_batchbuffer_flush(brw);
- fail_next = true;
- goto retry;
- } else {
- int ret = intel_batchbuffer_flush(brw);
- WARN_ONCE(ret == -ENOSPC,
- "i965: Single primitive emit exceeded "
- "available aperture space\n");
- }
+ /* Flush the batch if the batch/state buffers are nearly full. We can
+ * grow them if needed, but this is not free, so we'd like to avoid it.
+ */
+ intel_batchbuffer_require_space(brw, 1500);
+ brw_require_statebuffer_space(brw, 2400);
+ intel_batchbuffer_save_state(brw);
+ fail_next = intel_batchbuffer_saved_state_is_empty(brw);
+
+ if (brw->num_instances != num_instances ||
+ brw->basevertex != prim->basevertex ||
+ brw->baseinstance != base_instance) {
+ brw->num_instances = num_instances;
+ brw->basevertex = prim->basevertex;
+ brw->baseinstance = base_instance;
+ if (prim_id > 0) { /* For i == 0 we just did this before the loop */
+ brw->ctx.NewDriverState |= BRW_NEW_VERTICES;
+ brw_clear_buffers(brw);
}
+ }
+
+ /* Determine if we need to flag BRW_NEW_VERTICES for updating the
+ * gl_BaseVertexARB or gl_BaseInstanceARB values. For indirect draw, we
+ * always flag if the shader uses one of the values. For direct draws,
+ * we only flag if the values change.
+ */
+ const int new_firstvertex =
+ is_indexed ? prim->basevertex : prim->start;
+ const int new_baseinstance = base_instance;
+ const struct brw_vs_prog_data *vs_prog_data =
+ brw_vs_prog_data(brw->vs.base.prog_data);
+ if (prim_id > 0) {
+ const bool uses_draw_parameters =
+ vs_prog_data->uses_firstvertex ||
+ vs_prog_data->uses_baseinstance;
+
+ if ((uses_draw_parameters && is_indirect) ||
+ (vs_prog_data->uses_firstvertex &&
+ brw->draw.params.firstvertex != new_firstvertex) ||
+ (vs_prog_data->uses_baseinstance &&
+ brw->draw.params.gl_baseinstance != new_baseinstance))
+ brw->ctx.NewDriverState |= BRW_NEW_VERTICES;
+ }
- /* Now that we know we haven't run out of aperture space, we can safely
- * reset the dirty bits.
+ brw->draw.params.firstvertex = new_firstvertex;
+ brw->draw.params.gl_baseinstance = new_baseinstance;
+ brw_bo_unreference(brw->draw.draw_params_bo);
+
+ if (is_indirect) {
+ /* Point draw_params_bo at the indirect buffer. */
+ brw->draw.draw_params_bo =
+ intel_buffer_object(ctx->DrawIndirectBuffer)->buffer;
+ brw_bo_reference(brw->draw.draw_params_bo);
+ brw->draw.draw_params_offset =
+ indirect_offset + (is_indexed ? 12 : 8);
+ } else {
+ /* Set draw_params_bo to NULL so brw_prepare_vertices knows it
+ * has to upload gl_BaseVertex and such if they're needed.
*/
- if (brw->ctx.NewDriverState)
- brw_render_state_finished(brw);
+ brw->draw.draw_params_bo = NULL;
+ brw->draw.draw_params_offset = 0;
}
- if (brw->always_flush_batch)
- intel_batchbuffer_flush(brw);
+ /* gl_DrawID always needs its own vertex buffer since it's not part of
+ * the indirect parameter buffer. Same for is_indexed_draw, which shares
+ * the buffer with gl_DrawID. If the program uses gl_DrawID, we need to
+ * flag BRW_NEW_VERTICES. For the first iteration, we don't have valid
+ * vs_prog_data, but we always flag BRW_NEW_VERTICES before the loop.
+ */
+ if (prim_id > 0 && vs_prog_data->uses_drawid)
+ brw->ctx.NewDriverState |= BRW_NEW_VERTICES;
- brw_program_cache_check_size(brw);
- brw_postdraw_reconcile_align_wa_slices(brw);
- brw_postdraw_set_buffers_need_resolve(brw);
+ brw->draw.derived_params.gl_drawid = prim->draw_id;
+ brw->draw.derived_params.is_indexed_draw = is_indexed ? ~0 : 0;
+
+ brw_bo_unreference(brw->draw.derived_draw_params_bo);
+ brw->draw.derived_draw_params_bo = NULL;
+ brw->draw.derived_draw_params_offset = 0;
+
+ if (devinfo->gen < 6)
+ brw_set_prim(brw, prim);
+ else
+ gen6_set_prim(brw, prim);
+
+retry:
+
+ /* Note that before the loop, brw->ctx.NewDriverState was set to != 0, and
+ * that the state updated in the loop outside of this block is that in
+ * *_set_prim or intel_batchbuffer_flush(), which only impacts
+ * brw->ctx.NewDriverState.
+ */
+ if (brw->ctx.NewDriverState) {
+ brw->batch.no_wrap = true;
+ brw_upload_render_state(brw);
+ }
+
+ if (devinfo->gen == 9)
+ gen9_emit_preempt_wa(brw, prim, num_instances);
+
+ brw_emit_prim(brw, prim, brw->primitive, is_indexed, num_instances,
+ base_instance, xfb_obj, stream, is_indirect,
+ indirect_offset);
+
+ brw->batch.no_wrap = false;
+
+ if (!brw_batch_has_aperture_space(brw, 0)) {
+ if (!fail_next) {
+ intel_batchbuffer_reset_to_saved(brw);
+ intel_batchbuffer_flush(brw);
+ fail_next = true;
+ goto retry;
+ } else {
+ int ret = intel_batchbuffer_flush(brw);
+ WARN_ONCE(ret == -ENOSPC,
+ "i965: Single primitive emit exceeded "
+ "available aperture space\n");
+ }
+ }
+
+ /* Now that we know we haven't run out of aperture space, we can safely
+ * reset the dirty bits.
+ */
+ if (brw->ctx.NewDriverState)
+ brw_render_state_finished(brw);
return;
}
+
+
void
brw_draw_prims(struct gl_context *ctx,
const struct _mesa_prim *prims,
GLboolean index_bounds_valid,
GLuint min_index,
GLuint max_index,
+ GLuint num_instances,
+ GLuint base_instance,
struct gl_transform_feedback_object *gl_xfb_obj,
- unsigned stream,
- struct gl_buffer_object *indirect)
+ unsigned stream)
{
+ unsigned i;
struct brw_context *brw = brw_context(ctx);
- const struct gl_vertex_array **arrays = ctx->Array._DrawArrays;
+ int predicate_state = brw->predicate.state;
struct brw_transform_feedback_object *xfb_obj =
(struct brw_transform_feedback_object *) gl_xfb_obj;
return;
/* Handle primitive restart if needed */
- if (brw_handle_primitive_restart(ctx, prims, nr_prims, ib, indirect)) {
+ if (brw_handle_primitive_restart(ctx, prims, nr_prims, ib, num_instances,
+ base_instance)) {
/* The draw was handled, so we can exit now */
return;
}
_mesa_enum_to_string(ctx->RenderMode));
_swsetup_Wakeup(ctx);
_tnl_wakeup(ctx);
- _tnl_draw_prims(ctx, prims, nr_prims, ib,
- index_bounds_valid, min_index, max_index, NULL, 0, NULL);
+ _tnl_draw(ctx, prims, nr_prims, ib, index_bounds_valid, min_index,
+ max_index, num_instances, base_instance, NULL, 0);
return;
}
* get the minimum and maximum of their index buffer so we know what range
* to upload.
*/
- if (!index_bounds_valid && !vbo_all_varyings_in_vbos(arrays)) {
+ if (!index_bounds_valid && _mesa_draw_user_array_bits(ctx) != 0) {
perf_debug("Scanning index buffer to compute index buffer bounds. "
"Use glDrawRangeElements() to avoid this.\n");
vbo_get_minmax_indices(ctx, prims, ib, &min_index, &max_index, nr_prims);
index_bounds_valid = true;
}
+ brw_prepare_drawing(ctx, ib, index_bounds_valid, min_index, max_index);
/* Try drawing with the hardware, but don't do anything else if we can't
* manage it. swrast doesn't support our featureset, so we can't fall back
* to it.
*/
- brw_try_draw_prims(ctx, arrays, prims, nr_prims, ib, index_bounds_valid,
- min_index, max_index, xfb_obj, stream, indirect);
+
+ for (i = 0; i < nr_prims; i++) {
+ /* Implementation of ARB_indirect_parameters via predicates */
+ if (brw->draw.draw_params_count_bo) {
+ brw_emit_pipe_control_flush(brw, PIPE_CONTROL_FLUSH_ENABLE);
+
+ /* Upload the current draw count from the draw parameters buffer to
+ * MI_PREDICATE_SRC0.
+ */
+ brw_load_register_mem(brw, MI_PREDICATE_SRC0,
+ brw->draw.draw_params_count_bo,
+ brw->draw.draw_params_count_offset);
+ /* Zero the top 32-bits of MI_PREDICATE_SRC0 */
+ brw_load_register_imm32(brw, MI_PREDICATE_SRC0 + 4, 0);
+ /* Upload the id of the current primitive to MI_PREDICATE_SRC1. */
+ brw_load_register_imm64(brw, MI_PREDICATE_SRC1, prims[i].draw_id);
+
+ BEGIN_BATCH(1);
+ if (i == 0 && brw->predicate.state != BRW_PREDICATE_STATE_USE_BIT) {
+ OUT_BATCH(GEN7_MI_PREDICATE | MI_PREDICATE_LOADOP_LOADINV |
+ MI_PREDICATE_COMBINEOP_SET |
+ MI_PREDICATE_COMPAREOP_SRCS_EQUAL);
+ } else {
+ OUT_BATCH(GEN7_MI_PREDICATE |
+ MI_PREDICATE_LOADOP_LOAD | MI_PREDICATE_COMBINEOP_XOR |
+ MI_PREDICATE_COMPAREOP_SRCS_EQUAL);
+ }
+ ADVANCE_BATCH();
+
+ brw->predicate.state = BRW_PREDICATE_STATE_USE_BIT;
+ }
+
+ brw_draw_single_prim(ctx, &prims[i], i, ib != NULL, num_instances,
+ base_instance, xfb_obj, stream,
+ brw->draw.draw_indirect_offset +
+ brw->draw.draw_indirect_stride * i);
+ }
+
+ brw_finish_drawing(ctx);
+ brw->predicate.state = predicate_state;
}
void
-brw_draw_init(struct brw_context *brw)
+brw_draw_indirect_prims(struct gl_context *ctx,
+ GLuint mode,
+ struct gl_buffer_object *indirect_data,
+ GLsizeiptr indirect_offset,
+ unsigned draw_count,
+ unsigned stride,
+ struct gl_buffer_object *indirect_params,
+ GLsizeiptr indirect_params_offset,
+ const struct _mesa_index_buffer *ib)
{
- struct gl_context *ctx = &brw->ctx;
- struct vbo_context *vbo = vbo_context(ctx);
+ struct brw_context *brw = brw_context(ctx);
+ struct _mesa_prim *prim;
+ GLsizei i;
+
+ prim = calloc(draw_count, sizeof(*prim));
+ if (prim == NULL) {
+ _mesa_error(ctx, GL_OUT_OF_MEMORY, "gl%sDraw%sIndirect%s",
+ (draw_count > 1) ? "Multi" : "",
+ ib ? "Elements" : "Arrays",
+ indirect_params ? "CountARB" : "");
+ return;
+ }
+
+ brw->draw.draw_indirect_stride = stride;
+ brw->draw.draw_indirect_offset = indirect_offset;
+
+ prim[0].begin = 1;
+ prim[draw_count - 1].end = 1;
+ for (i = 0; i < draw_count; ++i) {
+ prim[i].mode = mode;
+ prim[i].draw_id = i;
+ }
+
+ if (indirect_params) {
+ brw->draw.draw_params_count_bo =
+ intel_buffer_object(indirect_params)->buffer;
+ brw_bo_reference(brw->draw.draw_params_count_bo);
+ brw->draw.draw_params_count_offset = indirect_params_offset;
+ }
+
+ brw->draw.draw_indirect_data = indirect_data;
+
+ brw_draw_prims(ctx, prim, draw_count, ib, false, 0, ~0, 0, 0, NULL, 0);
+ brw->draw.draw_indirect_data = NULL;
+ free(prim);
+}
+
+void
+brw_init_draw_functions(struct dd_function_table *functions)
+{
/* Register our drawing function:
*/
- vbo->draw_prims = brw_draw_prims;
+ functions->Draw = brw_draw_prims;
+ functions->DrawIndirect = brw_draw_indirect_prims;
+}
+void
+brw_draw_init(struct brw_context *brw)
+{
for (int i = 0; i < VERT_ATTRIB_MAX; i++)
brw->vb.inputs[i].buffer = -1;
brw->vb.nr_buffers = 0;