#include "brw_context.h"
#include "brw_defines.h"
+#include "brw_blorp.h"
#include "brw_compiler.h"
#include "brw_draw.h"
#include "brw_state.h"
#include "tnl/t_pipeline.h"
#include "util/ralloc.h"
#include "util/debug.h"
+#include "isl/isl.h"
/***************************************
* Mesa's Driver Functions
const char *const brw_vendor_string = "Intel Open Source Technology Center";
+static const char *
+get_bsw_model(const struct intel_screen *intelScreen)
+{
+ switch (intelScreen->eu_total) {
+ case 16:
+ return "405";
+ case 12:
+ return "400";
+ default:
+ return " ";
+ }
+}
+
const char *
-brw_get_renderer_string(unsigned deviceID)
+brw_get_renderer_string(const struct intel_screen *intelScreen)
{
const char *chipset;
static char buffer[128];
+ char *bsw = NULL;
- switch (deviceID) {
+ switch (intelScreen->deviceID) {
#undef CHIPSET
#define CHIPSET(id, symbol, str) case id: chipset = str; break;
#include "pci_ids/i965_pci_ids.h"
break;
}
+ /* Braswell branding is funny, so we have to fix it up here */
+ if (intelScreen->deviceID == 0x22B1) {
+ bsw = strdup(chipset);
+ char *needle = strstr(bsw, "XXX");
+ if (needle) {
+ memcpy(needle, get_bsw_model(intelScreen), 3);
+ chipset = bsw;
+ }
+ }
+
(void) driGetRendererString(buffer, chipset, 0);
+ free(bsw);
return buffer;
}
case GL_RENDERER:
return
- (GLubyte *) brw_get_renderer_string(brw->intelScreen->deviceID);
+ (GLubyte *) brw_get_renderer_string(brw->intelScreen);
default:
return NULL;
fb->DefaultGeometry.NumSamples);
}
+static bool
+intel_disable_rb_aux_buffer(struct brw_context *brw, const drm_intel_bo *bo)
+{
+ const struct gl_framebuffer *fb = brw->ctx.DrawBuffer;
+ bool found = false;
+
+ for (unsigned i = 0; i < fb->_NumColorDrawBuffers; i++) {
+ const struct intel_renderbuffer *irb =
+ intel_renderbuffer(fb->_ColorDrawBuffers[i]);
+
+ if (irb && irb->mt->bo == bo) {
+ found = brw->draw_aux_buffer_disabled[i] = true;
+ }
+ }
+
+ return found;
+}
+
+/* On Gen9 color buffers may be compressed by the hardware (lossless
+ * compression). There are, however, format restrictions and care needs to be
+ * taken that the sampler engine is capable for re-interpreting a buffer with
+ * format different the buffer was originally written with.
+ *
+ * For example, SRGB formats are not compressible and the sampler engine isn't
+ * capable of treating RGBA_UNORM as SRGB_ALPHA. In such a case the underlying
+ * color buffer needs to be resolved so that the sampling surface can be
+ * sampled as non-compressed (i.e., without the auxiliary MCS buffer being
+ * set).
+ */
+static bool
+intel_texture_view_requires_resolve(struct brw_context *brw,
+ struct intel_texture_object *intel_tex)
+{
+ if (brw->gen < 9 ||
+ !intel_miptree_is_lossless_compressed(brw, intel_tex->mt))
+ return false;
+
+ const uint32_t brw_format = brw_format_for_mesa_format(intel_tex->_Format);
+
+ if (isl_format_supports_lossless_compression(brw->intelScreen->devinfo,
+ brw_format))
+ return false;
+
+ perf_debug("Incompatible sampling format (%s) for rbc (%s)\n",
+ _mesa_get_format_name(intel_tex->_Format),
+ _mesa_get_format_name(intel_tex->mt->format));
+
+ if (intel_disable_rb_aux_buffer(brw, intel_tex->mt->bo))
+ perf_debug("Sampling renderbuffer with non-compressible format - "
+ "turning off compression");
+
+ return true;
+}
+
static void
intel_update_state(struct gl_context * ctx, GLuint new_state)
{
if (depth_irb)
intel_renderbuffer_resolve_hiz(brw, depth_irb);
+ memset(brw->draw_aux_buffer_disabled, 0,
+ sizeof(brw->draw_aux_buffer_disabled));
+
/* Resolve depth buffer and render cache of each enabled texture. */
int maxEnabledUnit = ctx->Texture._MaxEnabledTexImageUnit;
for (int i = 0; i <= maxEnabledUnit; i++) {
/* Sampling engine understands lossless compression and resolving
* those surfaces should be skipped for performance reasons.
*/
- intel_miptree_resolve_color(brw, tex_obj->mt,
- INTEL_MIPTREE_IGNORE_CCS_E);
+ const int flags = intel_texture_view_requires_resolve(brw, tex_obj) ?
+ 0 : INTEL_MIPTREE_IGNORE_CCS_E;
+ intel_miptree_resolve_color(brw, tex_obj->mt, flags);
brw_render_cache_set_check_flush(brw, tex_obj->mt->bo);
+
+ if (tex_obj->base.StencilSampling ||
+ tex_obj->mt->format == MESA_FORMAT_S_UINT8) {
+ intel_update_r8stencil(brw, tex_obj->mt);
+ }
}
/* Resolve color for each active shader image. */
for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
- const struct gl_shader *shader = ctx->_Shader->CurrentProgram[i] ?
- ctx->_Shader->CurrentProgram[i]->_LinkedShaders[i] : NULL;
+ const struct gl_linked_shader *shader =
+ ctx->_Shader->CurrentProgram[i] ?
+ ctx->_Shader->CurrentProgram[i]->_LinkedShaders[i] : NULL;
if (unlikely(shader && shader->NumImages)) {
for (unsigned j = 0; j < shader->NumImages; j++) {
/* Access to images is implemented using indirect messages
* against data port. Normal render target write understands
* lossless compression but unfortunately the typed/untyped
- * read/write interface doesn't. Therefore the compressed
- * surfaces need to be resolved prior to accessing them.
+ * read/write interface doesn't. Therefore even lossless
+ * compressed surfaces need to be resolved prior to accessing
+ * them. Hence skip setting INTEL_MIPTREE_IGNORE_CCS_E.
*/
intel_miptree_resolve_color(brw, tex_obj->mt, 0);
+
+ if (intel_miptree_is_lossless_compressed(brw, tex_obj->mt) &&
+ intel_disable_rb_aux_buffer(brw, tex_obj->mt->bo)) {
+ perf_debug("Using renderbuffer as shader image - turning "
+ "off lossless compression");
+ }
+
brw_render_cache_set_check_flush(brw, tex_obj->mt->bo);
}
}
}
}
+ /* Resolve color buffers for non-coherent framebuffer fetch. */
+ if (!ctx->Extensions.MESA_shader_framebuffer_fetch &&
+ ctx->FragmentProgram._Current &&
+ ctx->FragmentProgram._Current->Base.OutputsRead) {
+ const struct gl_framebuffer *fb = ctx->DrawBuffer;
+
+ for (unsigned i = 0; i < fb->_NumColorDrawBuffers; i++) {
+ const struct intel_renderbuffer *irb =
+ intel_renderbuffer(fb->_ColorDrawBuffers[i]);
+
+ if (irb &&
+ intel_miptree_resolve_color(brw, irb->mt,
+ INTEL_MIPTREE_IGNORE_CCS_E))
+ brw_render_cache_set_check_flush(brw, irb->mt->bo);
+ }
+ }
+
/* If FRAMEBUFFER_SRGB is used on Gen9+ then we need to resolve any of the
* single-sampled color renderbuffers because the CCS buffer isn't
* supported for SRGB formats. This only matters if FRAMEBUFFER_SRGB is
brwInitFragProgFuncs( functions );
brw_init_common_queryobj_functions(functions);
- if (brw->gen >= 6)
+ if (brw->gen >= 8 || brw->is_haswell)
+ hsw_init_queryobj_functions(functions);
+ else if (brw->gen >= 6)
gen6_init_queryobj_functions(functions);
else
gen4_init_queryobj_functions(functions);
functions->NewTransformFeedback = brw_new_transform_feedback;
functions->DeleteTransformFeedback = brw_delete_transform_feedback;
- functions->GetTransformFeedbackVertexCount =
- brw_get_transform_feedback_vertex_count;
- if (brw->gen >= 7) {
+ if (brw->intelScreen->has_mi_math_and_lrr) {
+ functions->BeginTransformFeedback = hsw_begin_transform_feedback;
+ functions->EndTransformFeedback = hsw_end_transform_feedback;
+ functions->PauseTransformFeedback = hsw_pause_transform_feedback;
+ functions->ResumeTransformFeedback = hsw_resume_transform_feedback;
+ } else if (brw->gen >= 7) {
functions->BeginTransformFeedback = gen7_begin_transform_feedback;
functions->EndTransformFeedback = gen7_end_transform_feedback;
functions->PauseTransformFeedback = gen7_pause_transform_feedback;
functions->ResumeTransformFeedback = gen7_resume_transform_feedback;
+ functions->GetTransformFeedbackVertexCount =
+ brw_get_transform_feedback_vertex_count;
} else {
functions->BeginTransformFeedback = brw_begin_transform_feedback;
functions->EndTransformFeedback = brw_end_transform_feedback;
ctx->Const.MaxTransformFeedbackSeparateComponents =
BRW_MAX_SOL_BINDINGS / BRW_MAX_SOL_BUFFERS;
- ctx->Const.AlwaysUseGetTransformFeedbackVertexCount = true;
+ ctx->Const.AlwaysUseGetTransformFeedbackVertexCount =
+ !brw->intelScreen->has_mi_math_and_lrr;
int max_samples;
const int *msaa_modes = intel_supported_msaa_modes(brw->intelScreen);
ctx->Const.MaxClipPlanes = 8;
ctx->Const.LowerTessLevel = true;
+ ctx->Const.LowerTCSPatchVerticesIn = brw->gen >= 8;
+ ctx->Const.LowerTESPatchVerticesIn = true;
+ ctx->Const.PrimitiveRestartForPatches = true;
ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeInstructions = 16 * 1024;
ctx->Const.Program[MESA_SHADER_VERTEX].MaxAluInstructions = 0;
ctx->Const.MaxFramebufferHeight = 16384;
ctx->Const.MaxFramebufferLayers = ctx->Const.MaxArrayTextureLayers;
ctx->Const.MaxFramebufferSamples = max_samples;
+
+ /* OES_primitive_bounding_box */
+ ctx->Const.NoPrimitiveBoundingBoxOutput = true;
}
static void
-brw_initialize_cs_context_constants(struct brw_context *brw, unsigned max_threads)
+brw_initialize_cs_context_constants(struct brw_context *brw)
{
struct gl_context *ctx = &brw->ctx;
+ const struct intel_screen *screen = brw->intelScreen;
+ const struct gen_device_info *devinfo = screen->devinfo;
+
+ /* FINISHME: Do this for all platforms that the kernel supports */
+ if (brw->is_cherryview &&
+ screen->subslice_total > 0 && screen->eu_total > 0) {
+ /* Logical CS threads = EUs per subslice * 7 threads per EU */
+ brw->max_cs_threads = screen->eu_total / screen->subslice_total * 7;
+
+ /* Fuse configurations may give more threads than expected, never less. */
+ if (brw->max_cs_threads < devinfo->max_cs_threads)
+ brw->max_cs_threads = devinfo->max_cs_threads;
+ } else {
+ brw->max_cs_threads = devinfo->max_cs_threads;
+ }
- /* For ES, we set these constants based on SIMD8.
- *
- * TODO: Once we can always generate SIMD16, we should update this.
+ /* Maximum number of scalar compute shader invocations that can be run in
+ * parallel in the same subslice assuming SIMD32 dispatch.
*
- * For GL, we assume we can generate a SIMD16 program, but this currently
- * is not always true. This allows us to run more test cases, and will be
- * required based on desktop GL compute shader requirements.
+ * We don't advertise more than 64 threads, because we are limited to 64 by
+ * our usage of thread_width_max in the gpgpu walker command. This only
+ * currently impacts Haswell, which otherwise might be able to advertise 70
+ * threads. With SIMD32 and 64 threads, Haswell still provides twice the
+ * required the number of invocation needed for ARB_compute_shader.
*/
- const int simd_size = ctx->API == API_OPENGL_CORE ? 16 : 8;
-
- const uint32_t max_invocations = simd_size * max_threads;
+ const unsigned max_threads = MIN2(64, brw->max_cs_threads);
+ const uint32_t max_invocations = 32 * max_threads;
ctx->Const.MaxComputeWorkGroupSize[0] = max_invocations;
ctx->Const.MaxComputeWorkGroupSize[1] = max_invocations;
ctx->Const.MaxComputeWorkGroupSize[2] = max_invocations;
brw->precompile = driQueryOptionb(&brw->optionCache, "shader_precompile");
+ if (driQueryOptionb(&brw->optionCache, "precise_trig"))
+ brw->intelScreen->compiler->precise_trig = true;
+
ctx->Const.ForceGLSLExtensionsWarn =
driQueryOptionb(options, "force_glsl_extensions_warn");
ctx->Const.AllowGLSLExtensionDirectiveMidShader =
driQueryOptionb(options, "allow_glsl_extension_directive_midshader");
+ ctx->Const.GLSLZeroInit = driQueryOptionb(options, "glsl_zero_init");
+
brw->dual_color_blend_by_location =
driQueryOptionb(options, "dual_color_blend_by_location");
}
__DRIscreen *sPriv = driContextPriv->driScreenPriv;
struct gl_context *shareCtx = (struct gl_context *) sharedContextPrivate;
struct intel_screen *screen = sPriv->driverPrivate;
- const struct brw_device_info *devinfo = screen->devinfo;
+ const struct gen_device_info *devinfo = screen->devinfo;
struct dd_function_table functions;
/* Only allow the __DRI_CTX_FLAG_ROBUST_BUFFER_ACCESS flag if the kernel
brw->must_use_separate_stencil = devinfo->must_use_separate_stencil;
brw->has_swizzling = screen->hw_has_swizzling;
+ isl_device_init(&brw->isl_dev, devinfo, screen->hw_has_swizzling);
+
brw->vs.base.stage = MESA_SHADER_VERTEX;
brw->tcs.base.stage = MESA_SHADER_TESS_CTRL;
brw->tes.base.stage = MESA_SHADER_TESS_EVAL;
if (INTEL_DEBUG & DEBUG_PERF)
brw->perf_debug = true;
- brw_initialize_cs_context_constants(brw, devinfo->max_cs_threads);
+ brw_initialize_cs_context_constants(brw);
brw_initialize_context_constants(brw);
ctx->Const.ResetStrategy = notify_reset
brw_init_surface_formats(brw);
+ if (brw->gen >= 6)
+ brw_blorp_init(brw);
+
brw->max_vs_threads = devinfo->max_vs_threads;
brw->max_hs_threads = devinfo->max_hs_threads;
brw->max_ds_threads = devinfo->max_ds_threads;
brw->max_gs_threads = devinfo->max_gs_threads;
brw->max_wm_threads = devinfo->max_wm_threads;
- brw->max_cs_threads = devinfo->max_cs_threads;
brw->urb.size = devinfo->urb.size;
brw->urb.min_vs_entries = devinfo->urb.min_vs_entries;
brw->urb.max_vs_entries = devinfo->urb.max_vs_entries;
brw->urb.max_ds_entries = devinfo->urb.max_ds_entries;
brw->urb.max_gs_entries = devinfo->urb.max_gs_entries;
- /* Estimate the size of the mappable aperture into the GTT. There's an
- * ioctl to get the whole GTT size, but not one to get the mappable subset.
- * It turns out it's basically always 256MB, though some ancient hardware
- * was smaller.
- */
- uint32_t gtt_size = 256 * 1024 * 1024;
-
- /* We don't want to map two objects such that a memcpy between them would
- * just fault one mapping in and then the other over and over forever. So
- * we would need to divide the GTT size by 2. Additionally, some GTT is
- * taken up by things like the framebuffer and the ringbuffer and such, so
- * be more conservative.
- */
- brw->max_gtt_map_object_size = gtt_size / 4;
-
if (brw->gen == 6)
brw->urb.gs_present = false;
brw->predicate.state = BRW_PREDICATE_STATE_RENDER;
+ brw->max_gtt_map_object_size = screen->max_gtt_map_object_size;
+
brw->use_resource_streamer = screen->has_resource_streamer &&
(env_var_as_boolean("INTEL_USE_HW_BT", false) ||
env_var_as_boolean("INTEL_USE_GATHER", false));
}
_mesa_meta_free(&brw->ctx);
- brw_meta_fast_clear_free(brw);
if (INTEL_DEBUG & DEBUG_SHADER_TIME) {
/* Force a report. */
brw_destroy_shader_time(brw);
}
+ if (brw->gen >= 6)
+ blorp_finish(&brw->blorp);
+
brw_destroy_state(brw);
brw_draw_destroy(brw);
drm_intel_bo_unreference(brw->curbe.curbe_bo);
if (brw->vs.base.scratch_bo)
drm_intel_bo_unreference(brw->vs.base.scratch_bo);
+ if (brw->tcs.base.scratch_bo)
+ drm_intel_bo_unreference(brw->tcs.base.scratch_bo);
+ if (brw->tes.base.scratch_bo)
+ drm_intel_bo_unreference(brw->tes.base.scratch_bo);
if (brw->gs.base.scratch_bo)
drm_intel_bo_unreference(brw->gs.base.scratch_bo);
if (brw->wm.base.scratch_bo)
*/
fb->Visual.sRGBCapable = false;
for (int i = 0; i < BUFFER_COUNT; i++) {
- if (fb->Attachment[i].Renderbuffer &&
- fb->Attachment[i].Renderbuffer->Format == MESA_FORMAT_B8G8R8A8_SRGB) {
- fb->Attachment[i].Renderbuffer->Format = MESA_FORMAT_B8G8R8A8_UNORM;
- }
+ struct gl_renderbuffer *rb = fb->Attachment[i].Renderbuffer;
+ if (rb)
+ rb->Format = _mesa_get_srgb_format_linear(rb->Format);
}
}
struct __DRIimageList images;
unsigned int format;
uint32_t buffer_mask = 0;
+ int ret;
front_rb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT);
back_rb = intel_get_renderbuffer(fb, BUFFER_BACK_LEFT);
if (back_rb)
buffer_mask |= __DRI_IMAGE_BUFFER_BACK;
- (*screen->image.loader->getBuffers) (drawable,
- driGLFormatToImageFormat(format),
- &drawable->dri2.stamp,
- drawable->loaderPrivate,
- buffer_mask,
- &images);
+ ret = screen->image.loader->getBuffers(drawable,
+ driGLFormatToImageFormat(format),
+ &drawable->dri2.stamp,
+ drawable->loaderPrivate,
+ buffer_mask,
+ &images);
+ if (!ret)
+ return;
if (images.image_mask & __DRI_IMAGE_BUFFER_FRONT) {
drawable->w = images.front->width;