#include "main/imports.h"
-#include "main/api_noop.h"
#include "main/macros.h"
#include "main/simple_list.h"
+
+#include "vbo/vbo_context.h"
+
#include "brw_context.h"
#include "brw_defines.h"
#include "brw_draw.h"
#include "brw_state.h"
+
+#include "gen6_hiz.h"
+
+#include "intel_fbo.h"
+#include "intel_mipmap_tree.h"
+#include "intel_regions.h"
#include "intel_span.h"
+#include "intel_tex.h"
+#include "intel_tex_obj.h"
+
#include "tnl/t_pipeline.h"
#include "glsl/ralloc.h"
* Mesa's Driver Functions
***************************************/
-static void brwInitDriverFunctions( struct dd_function_table *functions )
+/**
+ * \brief Prepare for entry into glBegin/glEnd block.
+ *
+ * Resolve buffers before entering a glBegin/glEnd block. This is
+ * necessary to prevent recursive calls to FLUSH_VERTICES.
+ *
+ * This resolves the depth buffer of each enabled depth texture and the HiZ
+ * buffer of the attached depth renderbuffer.
+ *
+ * Details
+ * -------
+ * When vertices are queued during a glBegin/glEnd block, those vertices must
+ * be drawn before any rendering state changes. To ensure this, Mesa calls
+ * FLUSH_VERTICES as a prehook to such state changes. Therefore,
+ * FLUSH_VERTICES itself cannot change rendering state without falling into a
+ * recursive trap.
+ *
+ * This precludes meta-ops, namely buffer resolves, from occurring while any
+ * vertices are queued. To prevent that situation, we resolve some buffers on
+ * entering a glBegin/glEnd
+ *
+ * \see brwCleanupExecEnd()
+ */
+static void brwPrepareExecBegin(struct gl_context *ctx)
+{
+ struct brw_context *brw = brw_context(ctx);
+ struct intel_context *intel = &brw->intel;
+ struct intel_renderbuffer *draw_irb;
+ struct intel_texture_object *tex_obj;
+
+ if (!intel->has_hiz) {
+ /* The context uses no feature that requires buffer resolves. */
+ return;
+ }
+
+ /* Resolve each enabled texture. */
+ for (int i = 0; i < ctx->Const.MaxTextureImageUnits; i++) {
+ if (!ctx->Texture.Unit[i]._ReallyEnabled)
+ continue;
+ tex_obj = intel_texture_object(ctx->Texture.Unit[i]._Current);
+ if (!tex_obj || !tex_obj->mt)
+ continue;
+ intel_miptree_all_slices_resolve_depth(intel, tex_obj->mt);
+ }
+
+ /* Resolve the attached depth buffer. */
+ draw_irb = intel_get_renderbuffer(ctx->DrawBuffer, BUFFER_DEPTH);
+ if (draw_irb) {
+ intel_renderbuffer_resolve_hiz(intel, draw_irb);
+ }
+}
+
+static void brwInitDriverFunctions(struct intel_screen *screen,
+ struct dd_function_table *functions)
{
intelInitDriverFunctions( functions );
brwInitFragProgFuncs( functions );
brw_init_queryobj_functions(functions);
+
+ functions->PrepareExecBegin = brwPrepareExecBegin;
+ functions->BeginTransformFeedback = brw_begin_transform_feedback;
+
+ if (screen->gen >= 7)
+ functions->EndTransformFeedback = gen7_end_transform_feedback;
+ else
+ functions->EndTransformFeedback = brw_end_transform_feedback;
}
bool
__DRIcontext *driContextPriv,
void *sharedContextPrivate)
{
+ __DRIscreen *sPriv = driContextPriv->driScreenPriv;
+ struct intel_screen *screen = sPriv->driverPrivate;
struct dd_function_table functions;
struct brw_context *brw = rzalloc(NULL, struct brw_context);
struct intel_context *intel = &brw->intel;
return false;
}
- brwInitVtbl( brw );
- brwInitDriverFunctions( &functions );
+ brwInitDriverFunctions(screen, &functions);
if (!intelInitContext( intel, api, mesaVis, driContextPriv,
sharedContextPrivate, &functions )) {
return false;
}
+ brwInitVtbl( brw );
+
+ brw_init_surface_formats(brw);
+
/* Initialize swrast, tnl driver tables: */
intelInitSpanFuncs(ctx);
ctx->Const.MaxTextureCoordUnits = 8; /* Mesa limit */
ctx->Const.MaxTextureUnits = MIN2(ctx->Const.MaxTextureCoordUnits,
ctx->Const.MaxTextureImageUnits);
- ctx->Const.MaxVertexTextureImageUnits = 0; /* no vertex shader textures */
+ ctx->Const.MaxVertexTextureImageUnits = BRW_MAX_TEX_UNIT;
ctx->Const.MaxCombinedTextureImageUnits =
ctx->Const.MaxVertexTextureImageUnits +
ctx->Const.MaxTextureImageUnits;
ctx->Const.MaxTextureMaxAnisotropy = 16.0;
+ /* Hardware only supports a limited number of transform feedback buffers.
+ * So we need to override the Mesa default (which is based only on software
+ * limits).
+ */
+ ctx->Const.MaxTransformFeedbackSeparateAttribs = BRW_MAX_SOL_BUFFERS;
+
+ /* On Gen6, in the worst case, we use up one binding table entry per
+ * transform feedback component (see comments above the definition of
+ * BRW_MAX_SOL_BINDINGS, in brw_context.h), so we need to advertise a value
+ * for MAX_TRANSFORM_FEEDBACK_INTERLEAVED_COMPONENTS equal to
+ * BRW_MAX_SOL_BINDINGS.
+ *
+ * In "separate components" mode, we need to divide this value by
+ * BRW_MAX_SOL_BUFFERS, so that the total number of binding table entries
+ * used up by all buffers will not exceed BRW_MAX_SOL_BINDINGS.
+ */
+ ctx->Const.MaxTransformFeedbackInterleavedComponents = BRW_MAX_SOL_BINDINGS;
+ ctx->Const.MaxTransformFeedbackSeparateComponents =
+ BRW_MAX_SOL_BINDINGS / BRW_MAX_SOL_BUFFERS;
+
/* if conformance mode is set, swrast can handle any size AA point */
ctx->Const.MaxPointSizeAA = 255.0;
/* We want the GLSL compiler to emit code that uses condition codes */
for (i = 0; i <= MESA_SHADER_FRAGMENT; i++) {
+ ctx->ShaderCompilerOptions[i].MaxIfDepth = intel->gen < 6 ? 16 : UINT_MAX;
ctx->ShaderCompilerOptions[i].EmitCondCodes = true;
ctx->ShaderCompilerOptions[i].EmitNVTempInitialization = true;
ctx->ShaderCompilerOptions[i].EmitNoNoise = true;
/* WM maximum threads is number of EUs times number of threads per EU. */
if (intel->gen >= 7) {
if (intel->gt == 1) {
- brw->wm_max_threads = 86;
- brw->vs_max_threads = 36;
+ brw->max_wm_threads = 86;
+ brw->max_vs_threads = 36;
+ brw->max_gs_threads = 36;
brw->urb.size = 128;
brw->urb.max_vs_entries = 512;
brw->urb.max_gs_entries = 192;
} else if (intel->gt == 2) {
- brw->wm_max_threads = 86;
- brw->vs_max_threads = 128;
+ brw->max_wm_threads = 86;
+ brw->max_vs_threads = 128;
+ brw->max_gs_threads = 128;
brw->urb.size = 256;
brw->urb.max_vs_entries = 704;
brw->urb.max_gs_entries = 320;
* disabling of WIZ hashing (bit 6 of GT_MODE, 0x20d0) and a
* GPU reset to change.
*/
- brw->wm_max_threads = 40;
- brw->vs_max_threads = 60;
+ brw->max_wm_threads = 40;
+ brw->max_vs_threads = 60;
+ brw->max_gs_threads = 60;
brw->urb.size = 64; /* volume 5c.5 section 5.1 */
brw->urb.max_vs_entries = 256; /* volume 2a (see 3DSTATE_URB) */
+ brw->urb.max_gs_entries = 256;
} else {
- brw->wm_max_threads = 40;
- brw->vs_max_threads = 24;
+ brw->max_wm_threads = 40;
+ brw->max_vs_threads = 24;
+ brw->max_gs_threads = 21; /* conservative; 24 if rendering disabled */
brw->urb.size = 32; /* volume 5c.5 section 5.1 */
brw->urb.max_vs_entries = 128; /* volume 2a (see 3DSTATE_URB) */
+ brw->urb.max_gs_entries = 256;
}
+ brw->urb.gen6_gs_previously_active = false;
} else if (intel->gen == 5) {
brw->urb.size = 1024;
- brw->vs_max_threads = 72;
- brw->wm_max_threads = 12 * 6;
+ brw->max_vs_threads = 72;
+ brw->max_gs_threads = 32;
+ brw->max_wm_threads = 12 * 6;
} else if (intel->is_g4x) {
brw->urb.size = 384;
- brw->vs_max_threads = 32;
- brw->wm_max_threads = 10 * 5;
+ brw->max_vs_threads = 32;
+ brw->max_gs_threads = 2;
+ brw->max_wm_threads = 10 * 5;
} else if (intel->gen < 6) {
brw->urb.size = 256;
- brw->vs_max_threads = 16;
- brw->wm_max_threads = 8 * 4;
+ brw->max_vs_threads = 16;
+ brw->max_gs_threads = 2;
+ brw->max_wm_threads = 8 * 4;
brw->has_negative_rhw_bug = true;
}
- if (INTEL_DEBUG & DEBUG_SINGLE_THREAD) {
- brw->vs_max_threads = 1;
- brw->wm_max_threads = 1;
- }
-
brw_init_state( brw );
brw->curbe.last_buf = calloc(1, 4096);
brw_draw_init( brw );
brw->new_vs_backend = (getenv("INTEL_OLD_VS") == NULL);
+ brw->precompile = driQueryOptionb(&intel->optionCache, "shader_precompile");
/* If we're using the new shader backend, we require integer uniforms
* stored as actual integers.