2 Copyright 2003 VMware, Inc.
3 Copyright (C) Intel Corp. 2006. All Rights Reserved.
4 Intel funded Tungsten Graphics to
5 develop this 3D driver.
7 Permission is hereby granted, free of charge, to any person obtaining
8 a copy of this software and associated documentation files (the
9 "Software"), to deal in the Software without restriction, including
10 without limitation the rights to use, copy, modify, merge, publish,
11 distribute, sublicense, and/or sell copies of the Software, and to
12 permit persons to whom the Software is furnished to do so, subject to
13 the following conditions:
15 The above copyright notice and this permission notice (including the
16 next paragraph) shall be included in all copies or substantial
17 portions of the Software.
19 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
20 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
22 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
23 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
24 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
25 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27 **********************************************************************/
30 * Keith Whitwell <keithw@vmware.com>
34 #include "main/api_exec.h"
35 #include "main/context.h"
36 #include "main/fbobject.h"
37 #include "main/extensions.h"
38 #include "main/imports.h"
39 #include "main/macros.h"
40 #include "main/points.h"
41 #include "main/version.h"
42 #include "main/vtxfmt.h"
43 #include "main/texobj.h"
44 #include "main/framebuffer.h"
46 #include "vbo/vbo_context.h"
48 #include "drivers/common/driverfuncs.h"
49 #include "drivers/common/meta.h"
52 #include "brw_context.h"
53 #include "brw_defines.h"
54 #include "brw_blorp.h"
55 #include "brw_compiler.h"
57 #include "brw_state.h"
59 #include "intel_batchbuffer.h"
60 #include "intel_buffer_objects.h"
61 #include "intel_buffers.h"
62 #include "intel_fbo.h"
63 #include "intel_mipmap_tree.h"
64 #include "intel_pixel.h"
65 #include "intel_image.h"
66 #include "intel_tex.h"
67 #include "intel_tex_obj.h"
69 #include "swrast_setup/swrast_setup.h"
71 #include "tnl/t_pipeline.h"
72 #include "util/ralloc.h"
73 #include "util/debug.h"
76 /***************************************
77 * Mesa's Driver Functions
78 ***************************************/
80 const char *const brw_vendor_string
= "Intel Open Source Technology Center";
83 get_bsw_model(const struct intel_screen
*screen
)
85 switch (screen
->eu_total
) {
96 brw_get_renderer_string(const struct intel_screen
*screen
)
99 static char buffer
[128];
102 switch (screen
->deviceID
) {
104 #define CHIPSET(id, symbol, str) case id: chipset = str; break;
105 #include "pci_ids/i965_pci_ids.h"
107 chipset
= "Unknown Intel Chipset";
111 /* Braswell branding is funny, so we have to fix it up here */
112 if (screen
->deviceID
== 0x22B1) {
113 bsw
= strdup(chipset
);
114 char *needle
= strstr(bsw
, "XXX");
116 memcpy(needle
, get_bsw_model(screen
), 3);
121 (void) driGetRendererString(buffer
, chipset
, 0);
126 static const GLubyte
*
127 intel_get_string(struct gl_context
* ctx
, GLenum name
)
129 const struct brw_context
*const brw
= brw_context(ctx
);
133 return (GLubyte
*) brw_vendor_string
;
137 (GLubyte
*) brw_get_renderer_string(brw
->screen
);
145 intel_viewport(struct gl_context
*ctx
)
147 struct brw_context
*brw
= brw_context(ctx
);
148 __DRIcontext
*driContext
= brw
->driContext
;
150 if (_mesa_is_winsys_fbo(ctx
->DrawBuffer
)) {
151 if (driContext
->driDrawablePriv
)
152 dri2InvalidateDrawable(driContext
->driDrawablePriv
);
153 if (driContext
->driReadablePriv
)
154 dri2InvalidateDrawable(driContext
->driReadablePriv
);
159 intel_update_framebuffer(struct gl_context
*ctx
,
160 struct gl_framebuffer
*fb
)
162 struct brw_context
*brw
= brw_context(ctx
);
164 /* Quantize the derived default number of samples
166 fb
->DefaultGeometry
._NumSamples
=
167 intel_quantize_num_samples(brw
->screen
,
168 fb
->DefaultGeometry
.NumSamples
);
172 intel_disable_rb_aux_buffer(struct brw_context
*brw
, const drm_intel_bo
*bo
)
174 const struct gl_framebuffer
*fb
= brw
->ctx
.DrawBuffer
;
177 for (unsigned i
= 0; i
< fb
->_NumColorDrawBuffers
; i
++) {
178 const struct intel_renderbuffer
*irb
=
179 intel_renderbuffer(fb
->_ColorDrawBuffers
[i
]);
181 if (irb
&& irb
->mt
->bo
== bo
) {
182 found
= brw
->draw_aux_buffer_disabled
[i
] = true;
189 /* On Gen9 color buffers may be compressed by the hardware (lossless
190 * compression). There are, however, format restrictions and care needs to be
191 * taken that the sampler engine is capable for re-interpreting a buffer with
192 * format different the buffer was originally written with.
194 * For example, SRGB formats are not compressible and the sampler engine isn't
195 * capable of treating RGBA_UNORM as SRGB_ALPHA. In such a case the underlying
196 * color buffer needs to be resolved so that the sampling surface can be
197 * sampled as non-compressed (i.e., without the auxiliary MCS buffer being
201 intel_texture_view_requires_resolve(struct brw_context
*brw
,
202 struct intel_texture_object
*intel_tex
)
205 !intel_miptree_is_lossless_compressed(brw
, intel_tex
->mt
))
208 const uint32_t brw_format
= brw_format_for_mesa_format(intel_tex
->_Format
);
210 if (isl_format_supports_lossless_compression(&brw
->screen
->devinfo
,
214 perf_debug("Incompatible sampling format (%s) for rbc (%s)\n",
215 _mesa_get_format_name(intel_tex
->_Format
),
216 _mesa_get_format_name(intel_tex
->mt
->format
));
218 if (intel_disable_rb_aux_buffer(brw
, intel_tex
->mt
->bo
))
219 perf_debug("Sampling renderbuffer with non-compressible format - "
220 "turning off compression");
226 intel_update_state(struct gl_context
* ctx
, GLuint new_state
)
228 struct brw_context
*brw
= brw_context(ctx
);
229 struct intel_texture_object
*tex_obj
;
230 struct intel_renderbuffer
*depth_irb
;
232 if (ctx
->swrast_context
)
233 _swrast_InvalidateState(ctx
, new_state
);
234 _vbo_InvalidateState(ctx
, new_state
);
236 brw
->NewGLState
|= new_state
;
238 _mesa_unlock_context_textures(ctx
);
240 /* Resolve the depth buffer's HiZ buffer. */
241 depth_irb
= intel_get_renderbuffer(ctx
->DrawBuffer
, BUFFER_DEPTH
);
243 intel_renderbuffer_resolve_hiz(brw
, depth_irb
);
245 memset(brw
->draw_aux_buffer_disabled
, 0,
246 sizeof(brw
->draw_aux_buffer_disabled
));
248 /* Resolve depth buffer and render cache of each enabled texture. */
249 int maxEnabledUnit
= ctx
->Texture
._MaxEnabledTexImageUnit
;
250 for (int i
= 0; i
<= maxEnabledUnit
; i
++) {
251 if (!ctx
->Texture
.Unit
[i
]._Current
)
253 tex_obj
= intel_texture_object(ctx
->Texture
.Unit
[i
]._Current
);
254 if (!tex_obj
|| !tex_obj
->mt
)
256 intel_miptree_all_slices_resolve_depth(brw
, tex_obj
->mt
);
257 /* Sampling engine understands lossless compression and resolving
258 * those surfaces should be skipped for performance reasons.
260 const int flags
= intel_texture_view_requires_resolve(brw
, tex_obj
) ?
261 0 : INTEL_MIPTREE_IGNORE_CCS_E
;
262 intel_miptree_resolve_color(brw
, tex_obj
->mt
, flags
);
263 brw_render_cache_set_check_flush(brw
, tex_obj
->mt
->bo
);
265 if (tex_obj
->base
.StencilSampling
||
266 tex_obj
->mt
->format
== MESA_FORMAT_S_UINT8
) {
267 intel_update_r8stencil(brw
, tex_obj
->mt
);
271 /* Resolve color for each active shader image. */
272 for (unsigned i
= 0; i
< MESA_SHADER_STAGES
; i
++) {
273 const struct gl_linked_shader
*shader
=
274 ctx
->_Shader
->CurrentProgram
[i
] ?
275 ctx
->_Shader
->CurrentProgram
[i
]->_LinkedShaders
[i
] : NULL
;
277 if (unlikely(shader
&& shader
->NumImages
)) {
278 for (unsigned j
= 0; j
< shader
->NumImages
; j
++) {
279 struct gl_image_unit
*u
= &ctx
->ImageUnits
[shader
->ImageUnits
[j
]];
280 tex_obj
= intel_texture_object(u
->TexObj
);
282 if (tex_obj
&& tex_obj
->mt
) {
283 /* Access to images is implemented using indirect messages
284 * against data port. Normal render target write understands
285 * lossless compression but unfortunately the typed/untyped
286 * read/write interface doesn't. Therefore even lossless
287 * compressed surfaces need to be resolved prior to accessing
288 * them. Hence skip setting INTEL_MIPTREE_IGNORE_CCS_E.
290 intel_miptree_resolve_color(brw
, tex_obj
->mt
, 0);
292 if (intel_miptree_is_lossless_compressed(brw
, tex_obj
->mt
) &&
293 intel_disable_rb_aux_buffer(brw
, tex_obj
->mt
->bo
)) {
294 perf_debug("Using renderbuffer as shader image - turning "
295 "off lossless compression");
298 brw_render_cache_set_check_flush(brw
, tex_obj
->mt
->bo
);
304 /* Resolve color buffers for non-coherent framebuffer fetch. */
305 if (!ctx
->Extensions
.MESA_shader_framebuffer_fetch
&&
306 ctx
->FragmentProgram
._Current
&&
307 ctx
->FragmentProgram
._Current
->Base
.OutputsRead
) {
308 const struct gl_framebuffer
*fb
= ctx
->DrawBuffer
;
310 for (unsigned i
= 0; i
< fb
->_NumColorDrawBuffers
; i
++) {
311 const struct intel_renderbuffer
*irb
=
312 intel_renderbuffer(fb
->_ColorDrawBuffers
[i
]);
315 intel_miptree_resolve_color(brw
, irb
->mt
,
316 INTEL_MIPTREE_IGNORE_CCS_E
))
317 brw_render_cache_set_check_flush(brw
, irb
->mt
->bo
);
321 /* If FRAMEBUFFER_SRGB is used on Gen9+ then we need to resolve any of the
322 * single-sampled color renderbuffers because the CCS buffer isn't
323 * supported for SRGB formats. This only matters if FRAMEBUFFER_SRGB is
324 * enabled because otherwise the surface state will be programmed with the
325 * linear equivalent format anyway.
327 if (brw
->gen
>= 9 && ctx
->Color
.sRGBEnabled
) {
328 struct gl_framebuffer
*fb
= ctx
->DrawBuffer
;
329 for (int i
= 0; i
< fb
->_NumColorDrawBuffers
; i
++) {
330 struct gl_renderbuffer
*rb
= fb
->_ColorDrawBuffers
[i
];
335 struct intel_renderbuffer
*irb
= intel_renderbuffer(rb
);
336 struct intel_mipmap_tree
*mt
= irb
->mt
;
339 mt
->num_samples
> 1 ||
340 _mesa_get_srgb_format_linear(mt
->format
) == mt
->format
)
343 /* Lossless compression is not supported for SRGB formats, it
344 * should be impossible to get here with such surfaces.
346 assert(!intel_miptree_is_lossless_compressed(brw
, mt
));
347 intel_miptree_resolve_color(brw
, mt
, 0);
348 brw_render_cache_set_check_flush(brw
, mt
->bo
);
352 _mesa_lock_context_textures(ctx
);
354 if (new_state
& _NEW_BUFFERS
) {
355 intel_update_framebuffer(ctx
, ctx
->DrawBuffer
);
356 if (ctx
->DrawBuffer
!= ctx
->ReadBuffer
)
357 intel_update_framebuffer(ctx
, ctx
->ReadBuffer
);
361 #define flushFront(screen) ((screen)->image.loader ? (screen)->image.loader->flushFrontBuffer : (screen)->dri2.loader->flushFrontBuffer)
364 intel_flush_front(struct gl_context
*ctx
)
366 struct brw_context
*brw
= brw_context(ctx
);
367 __DRIcontext
*driContext
= brw
->driContext
;
368 __DRIdrawable
*driDrawable
= driContext
->driDrawablePriv
;
369 __DRIscreen
*const dri_screen
= brw
->screen
->driScrnPriv
;
371 if (brw
->front_buffer_dirty
&& _mesa_is_winsys_fbo(ctx
->DrawBuffer
)) {
372 if (flushFront(dri_screen
) && driDrawable
&&
373 driDrawable
->loaderPrivate
) {
375 /* Resolve before flushing FAKE_FRONT_LEFT to FRONT_LEFT.
377 * This potentially resolves both front and back buffer. It
378 * is unnecessary to resolve the back, but harms nothing except
379 * performance. And no one cares about front-buffer render
382 intel_resolve_for_dri2_flush(brw
, driDrawable
);
383 intel_batchbuffer_flush(brw
);
385 flushFront(dri_screen
)(driDrawable
, driDrawable
->loaderPrivate
);
387 /* We set the dirty bit in intel_prepare_render() if we're
388 * front buffer rendering once we get there.
390 brw
->front_buffer_dirty
= false;
396 intel_glFlush(struct gl_context
*ctx
)
398 struct brw_context
*brw
= brw_context(ctx
);
400 intel_batchbuffer_flush(brw
);
401 intel_flush_front(ctx
);
403 brw
->need_flush_throttle
= true;
407 intel_finish(struct gl_context
* ctx
)
409 struct brw_context
*brw
= brw_context(ctx
);
413 if (brw
->batch
.last_bo
)
414 drm_intel_bo_wait_rendering(brw
->batch
.last_bo
);
418 brw_init_driver_functions(struct brw_context
*brw
,
419 struct dd_function_table
*functions
)
421 _mesa_init_driver_functions(functions
);
423 /* GLX uses DRI2 invalidate events to handle window resizing.
424 * Unfortunately, EGL does not - libEGL is written in XCB (not Xlib),
425 * which doesn't provide a mechanism for snooping the event queues.
427 * So EGL still relies on viewport hacks to handle window resizing.
428 * This should go away with DRI3000.
430 if (!brw
->driContext
->driScreenPriv
->dri2
.useInvalidate
)
431 functions
->Viewport
= intel_viewport
;
433 functions
->Flush
= intel_glFlush
;
434 functions
->Finish
= intel_finish
;
435 functions
->GetString
= intel_get_string
;
436 functions
->UpdateState
= intel_update_state
;
438 intelInitTextureFuncs(functions
);
439 intelInitTextureImageFuncs(functions
);
440 intelInitTextureSubImageFuncs(functions
);
441 intelInitTextureCopyImageFuncs(functions
);
442 intelInitCopyImageFuncs(functions
);
443 intelInitClearFuncs(functions
);
444 intelInitBufferFuncs(functions
);
445 intelInitPixelFuncs(functions
);
446 intelInitBufferObjectFuncs(functions
);
447 intel_init_syncobj_functions(functions
);
448 brw_init_object_purgeable_functions(functions
);
450 brwInitFragProgFuncs( functions
);
451 brw_init_common_queryobj_functions(functions
);
452 if (brw
->gen
>= 8 || brw
->is_haswell
)
453 hsw_init_queryobj_functions(functions
);
454 else if (brw
->gen
>= 6)
455 gen6_init_queryobj_functions(functions
);
457 gen4_init_queryobj_functions(functions
);
458 brw_init_compute_functions(functions
);
460 brw_init_conditional_render_functions(functions
);
462 functions
->QueryInternalFormat
= brw_query_internal_format
;
464 functions
->NewTransformFeedback
= brw_new_transform_feedback
;
465 functions
->DeleteTransformFeedback
= brw_delete_transform_feedback
;
466 if (brw
->screen
->has_mi_math_and_lrr
) {
467 functions
->BeginTransformFeedback
= hsw_begin_transform_feedback
;
468 functions
->EndTransformFeedback
= hsw_end_transform_feedback
;
469 functions
->PauseTransformFeedback
= hsw_pause_transform_feedback
;
470 functions
->ResumeTransformFeedback
= hsw_resume_transform_feedback
;
471 } else if (brw
->gen
>= 7) {
472 functions
->BeginTransformFeedback
= gen7_begin_transform_feedback
;
473 functions
->EndTransformFeedback
= gen7_end_transform_feedback
;
474 functions
->PauseTransformFeedback
= gen7_pause_transform_feedback
;
475 functions
->ResumeTransformFeedback
= gen7_resume_transform_feedback
;
476 functions
->GetTransformFeedbackVertexCount
=
477 brw_get_transform_feedback_vertex_count
;
479 functions
->BeginTransformFeedback
= brw_begin_transform_feedback
;
480 functions
->EndTransformFeedback
= brw_end_transform_feedback
;
484 functions
->GetSamplePosition
= gen6_get_sample_position
;
488 brw_initialize_context_constants(struct brw_context
*brw
)
490 struct gl_context
*ctx
= &brw
->ctx
;
491 const struct brw_compiler
*compiler
= brw
->screen
->compiler
;
493 const bool stage_exists
[MESA_SHADER_STAGES
] = {
494 [MESA_SHADER_VERTEX
] = true,
495 [MESA_SHADER_TESS_CTRL
] = brw
->gen
>= 7,
496 [MESA_SHADER_TESS_EVAL
] = brw
->gen
>= 7,
497 [MESA_SHADER_GEOMETRY
] = brw
->gen
>= 6,
498 [MESA_SHADER_FRAGMENT
] = true,
499 [MESA_SHADER_COMPUTE
] =
500 (ctx
->API
== API_OPENGL_CORE
&&
501 ctx
->Const
.MaxComputeWorkGroupSize
[0] >= 1024) ||
502 (ctx
->API
== API_OPENGLES2
&&
503 ctx
->Const
.MaxComputeWorkGroupSize
[0] >= 128) ||
504 _mesa_extension_override_enables
.ARB_compute_shader
,
507 unsigned num_stages
= 0;
508 for (int i
= 0; i
< MESA_SHADER_STAGES
; i
++) {
513 unsigned max_samplers
=
514 brw
->gen
>= 8 || brw
->is_haswell
? BRW_MAX_TEX_UNIT
: 16;
516 ctx
->Const
.MaxDualSourceDrawBuffers
= 1;
517 ctx
->Const
.MaxDrawBuffers
= BRW_MAX_DRAW_BUFFERS
;
518 ctx
->Const
.MaxCombinedShaderOutputResources
=
519 MAX_IMAGE_UNITS
+ BRW_MAX_DRAW_BUFFERS
;
521 ctx
->Const
.QueryCounterBits
.Timestamp
= 36;
523 ctx
->Const
.MaxTextureCoordUnits
= 8; /* Mesa limit */
524 ctx
->Const
.MaxImageUnits
= MAX_IMAGE_UNITS
;
525 ctx
->Const
.MaxRenderbufferSize
= 8192;
526 ctx
->Const
.MaxTextureLevels
= MIN2(14 /* 8192 */, MAX_TEXTURE_LEVELS
);
527 ctx
->Const
.Max3DTextureLevels
= 12; /* 2048 */
528 ctx
->Const
.MaxCubeTextureLevels
= 14; /* 8192 */
529 ctx
->Const
.MaxArrayTextureLayers
= brw
->gen
>= 7 ? 2048 : 512;
530 ctx
->Const
.MaxTextureMbytes
= 1536;
531 ctx
->Const
.MaxTextureRectSize
= 1 << 12;
532 ctx
->Const
.MaxTextureMaxAnisotropy
= 16.0;
533 ctx
->Const
.StripTextureBorder
= true;
535 ctx
->Const
.MaxProgramTextureGatherComponents
= 4;
536 else if (brw
->gen
== 6)
537 ctx
->Const
.MaxProgramTextureGatherComponents
= 1;
539 ctx
->Const
.MaxUniformBlockSize
= 65536;
541 for (int i
= 0; i
< MESA_SHADER_STAGES
; i
++) {
542 struct gl_program_constants
*prog
= &ctx
->Const
.Program
[i
];
544 if (!stage_exists
[i
])
547 prog
->MaxTextureImageUnits
= max_samplers
;
549 prog
->MaxUniformBlocks
= BRW_MAX_UBO
;
550 prog
->MaxCombinedUniformComponents
=
551 prog
->MaxUniformComponents
+
552 ctx
->Const
.MaxUniformBlockSize
/ 4 * prog
->MaxUniformBlocks
;
554 prog
->MaxAtomicCounters
= MAX_ATOMIC_COUNTERS
;
555 prog
->MaxAtomicBuffers
= BRW_MAX_ABO
;
556 prog
->MaxImageUniforms
= compiler
->scalar_stage
[i
] ? BRW_MAX_IMAGES
: 0;
557 prog
->MaxShaderStorageBlocks
= BRW_MAX_SSBO
;
560 ctx
->Const
.MaxTextureUnits
=
561 MIN2(ctx
->Const
.MaxTextureCoordUnits
,
562 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].MaxTextureImageUnits
);
564 ctx
->Const
.MaxUniformBufferBindings
= num_stages
* BRW_MAX_UBO
;
565 ctx
->Const
.MaxCombinedUniformBlocks
= num_stages
* BRW_MAX_UBO
;
566 ctx
->Const
.MaxCombinedAtomicBuffers
= num_stages
* BRW_MAX_ABO
;
567 ctx
->Const
.MaxCombinedShaderStorageBlocks
= num_stages
* BRW_MAX_SSBO
;
568 ctx
->Const
.MaxShaderStorageBufferBindings
= num_stages
* BRW_MAX_SSBO
;
569 ctx
->Const
.MaxCombinedTextureImageUnits
= num_stages
* max_samplers
;
570 ctx
->Const
.MaxCombinedImageUniforms
= num_stages
* BRW_MAX_IMAGES
;
573 /* Hardware only supports a limited number of transform feedback buffers.
574 * So we need to override the Mesa default (which is based only on software
577 ctx
->Const
.MaxTransformFeedbackBuffers
= BRW_MAX_SOL_BUFFERS
;
579 /* On Gen6, in the worst case, we use up one binding table entry per
580 * transform feedback component (see comments above the definition of
581 * BRW_MAX_SOL_BINDINGS, in brw_context.h), so we need to advertise a value
582 * for MAX_TRANSFORM_FEEDBACK_INTERLEAVED_COMPONENTS equal to
583 * BRW_MAX_SOL_BINDINGS.
585 * In "separate components" mode, we need to divide this value by
586 * BRW_MAX_SOL_BUFFERS, so that the total number of binding table entries
587 * used up by all buffers will not exceed BRW_MAX_SOL_BINDINGS.
589 ctx
->Const
.MaxTransformFeedbackInterleavedComponents
= BRW_MAX_SOL_BINDINGS
;
590 ctx
->Const
.MaxTransformFeedbackSeparateComponents
=
591 BRW_MAX_SOL_BINDINGS
/ BRW_MAX_SOL_BUFFERS
;
593 ctx
->Const
.AlwaysUseGetTransformFeedbackVertexCount
=
594 !brw
->screen
->has_mi_math_and_lrr
;
597 const int *msaa_modes
= intel_supported_msaa_modes(brw
->screen
);
598 const int clamp_max_samples
=
599 driQueryOptioni(&brw
->optionCache
, "clamp_max_samples");
601 if (clamp_max_samples
< 0) {
602 max_samples
= msaa_modes
[0];
604 /* Select the largest supported MSAA mode that does not exceed
608 for (int i
= 0; msaa_modes
[i
] != 0; ++i
) {
609 if (msaa_modes
[i
] <= clamp_max_samples
) {
610 max_samples
= msaa_modes
[i
];
616 ctx
->Const
.MaxSamples
= max_samples
;
617 ctx
->Const
.MaxColorTextureSamples
= max_samples
;
618 ctx
->Const
.MaxDepthTextureSamples
= max_samples
;
619 ctx
->Const
.MaxIntegerSamples
= max_samples
;
620 ctx
->Const
.MaxImageSamples
= 0;
622 /* gen6_set_sample_maps() sets SampleMap{2,4,8}x variables which are used
623 * to map indices of rectangular grid to sample numbers within a pixel.
624 * These variables are used by GL_EXT_framebuffer_multisample_blit_scaled
625 * extension implementation. For more details see the comment above
626 * gen6_set_sample_maps() definition.
628 gen6_set_sample_maps(ctx
);
630 ctx
->Const
.MinLineWidth
= 1.0;
631 ctx
->Const
.MinLineWidthAA
= 1.0;
633 ctx
->Const
.MaxLineWidth
= 7.375;
634 ctx
->Const
.MaxLineWidthAA
= 7.375;
635 ctx
->Const
.LineWidthGranularity
= 0.125;
637 ctx
->Const
.MaxLineWidth
= 7.0;
638 ctx
->Const
.MaxLineWidthAA
= 7.0;
639 ctx
->Const
.LineWidthGranularity
= 0.5;
642 /* For non-antialiased lines, we have to round the line width to the
643 * nearest whole number. Make sure that we don't advertise a line
644 * width that, when rounded, will be beyond the actual hardware
647 assert(roundf(ctx
->Const
.MaxLineWidth
) <= ctx
->Const
.MaxLineWidth
);
649 ctx
->Const
.MinPointSize
= 1.0;
650 ctx
->Const
.MinPointSizeAA
= 1.0;
651 ctx
->Const
.MaxPointSize
= 255.0;
652 ctx
->Const
.MaxPointSizeAA
= 255.0;
653 ctx
->Const
.PointSizeGranularity
= 1.0;
655 if (brw
->gen
>= 5 || brw
->is_g4x
)
656 ctx
->Const
.MaxClipPlanes
= 8;
658 ctx
->Const
.LowerTessLevel
= true;
659 ctx
->Const
.LowerTCSPatchVerticesIn
= brw
->gen
>= 8;
660 ctx
->Const
.LowerTESPatchVerticesIn
= true;
661 ctx
->Const
.PrimitiveRestartForPatches
= true;
663 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].MaxNativeInstructions
= 16 * 1024;
664 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].MaxAluInstructions
= 0;
665 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].MaxTexInstructions
= 0;
666 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].MaxTexIndirections
= 0;
667 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].MaxNativeAluInstructions
= 0;
668 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].MaxNativeTexInstructions
= 0;
669 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].MaxNativeTexIndirections
= 0;
670 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].MaxNativeAttribs
= 16;
671 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].MaxNativeTemps
= 256;
672 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].MaxNativeAddressRegs
= 1;
673 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].MaxNativeParameters
= 1024;
674 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].MaxEnvParams
=
675 MIN2(ctx
->Const
.Program
[MESA_SHADER_VERTEX
].MaxNativeParameters
,
676 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].MaxEnvParams
);
678 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].MaxNativeInstructions
= 1024;
679 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].MaxNativeAluInstructions
= 1024;
680 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].MaxNativeTexInstructions
= 1024;
681 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].MaxNativeTexIndirections
= 1024;
682 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].MaxNativeAttribs
= 12;
683 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].MaxNativeTemps
= 256;
684 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].MaxNativeAddressRegs
= 0;
685 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].MaxNativeParameters
= 1024;
686 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].MaxEnvParams
=
687 MIN2(ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].MaxNativeParameters
,
688 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].MaxEnvParams
);
690 /* Fragment shaders use real, 32-bit twos-complement integers for all
693 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].LowInt
.RangeMin
= 31;
694 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].LowInt
.RangeMax
= 30;
695 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].LowInt
.Precision
= 0;
696 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].HighInt
= ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].LowInt
;
697 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].MediumInt
= ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].LowInt
;
699 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].LowInt
.RangeMin
= 31;
700 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].LowInt
.RangeMax
= 30;
701 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].LowInt
.Precision
= 0;
702 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].HighInt
= ctx
->Const
.Program
[MESA_SHADER_VERTEX
].LowInt
;
703 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].MediumInt
= ctx
->Const
.Program
[MESA_SHADER_VERTEX
].LowInt
;
705 /* Gen6 converts quads to polygon in beginning of 3D pipeline,
706 * but we're not sure how it's actually done for vertex order,
707 * that affect provoking vertex decision. Always use last vertex
708 * convention for quad primitive which works as expected for now.
711 ctx
->Const
.QuadsFollowProvokingVertexConvention
= false;
713 ctx
->Const
.NativeIntegers
= true;
714 ctx
->Const
.VertexID_is_zero_based
= true;
716 /* Regarding the CMP instruction, the Ivybridge PRM says:
718 * "For each enabled channel 0b or 1b is assigned to the appropriate flag
719 * bit and 0/all zeros or all ones (e.g, byte 0xFF, word 0xFFFF, DWord
720 * 0xFFFFFFFF) is assigned to dst."
722 * but PRMs for earlier generations say
724 * "In dword format, one GRF may store up to 8 results. When the register
725 * is used later as a vector of Booleans, as only LSB at each channel
726 * contains meaning [sic] data, software should make sure all higher bits
727 * are masked out (e.g. by 'and-ing' an [sic] 0x01 constant)."
729 * We select the representation of a true boolean uniform to be ~0, and fix
730 * the results of Gen <= 5 CMP instruction's with -(result & 1).
732 ctx
->Const
.UniformBooleanTrue
= ~0;
734 /* From the gen4 PRM, volume 4 page 127:
736 * "For SURFTYPE_BUFFER non-rendertarget surfaces, this field specifies
737 * the base address of the first element of the surface, computed in
738 * software by adding the surface base address to the byte offset of
739 * the element in the buffer."
741 * However, unaligned accesses are slower, so enforce buffer alignment.
743 ctx
->Const
.UniformBufferOffsetAlignment
= 16;
745 /* ShaderStorageBufferOffsetAlignment should be a cacheline (64 bytes) so
746 * that we can safely have the CPU and GPU writing the same SSBO on
747 * non-cachecoherent systems (our Atom CPUs). With UBOs, the GPU never
748 * writes, so there's no problem. For an SSBO, the GPU and the CPU can
749 * be updating disjoint regions of the buffer simultaneously and that will
750 * break if the regions overlap the same cacheline.
752 ctx
->Const
.ShaderStorageBufferOffsetAlignment
= 64;
753 ctx
->Const
.TextureBufferOffsetAlignment
= 16;
754 ctx
->Const
.MaxTextureBufferSize
= 128 * 1024 * 1024;
757 ctx
->Const
.MaxVarying
= 32;
758 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].MaxOutputComponents
= 128;
759 ctx
->Const
.Program
[MESA_SHADER_GEOMETRY
].MaxInputComponents
= 64;
760 ctx
->Const
.Program
[MESA_SHADER_GEOMETRY
].MaxOutputComponents
= 128;
761 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].MaxInputComponents
= 128;
762 ctx
->Const
.Program
[MESA_SHADER_TESS_CTRL
].MaxInputComponents
= 128;
763 ctx
->Const
.Program
[MESA_SHADER_TESS_CTRL
].MaxOutputComponents
= 128;
764 ctx
->Const
.Program
[MESA_SHADER_TESS_EVAL
].MaxInputComponents
= 128;
765 ctx
->Const
.Program
[MESA_SHADER_TESS_EVAL
].MaxOutputComponents
= 128;
768 /* We want the GLSL compiler to emit code that uses condition codes */
769 for (int i
= 0; i
< MESA_SHADER_STAGES
; i
++) {
770 ctx
->Const
.ShaderCompilerOptions
[i
] =
771 brw
->screen
->compiler
->glsl_compiler_options
[i
];
775 ctx
->Const
.MaxViewportWidth
= 32768;
776 ctx
->Const
.MaxViewportHeight
= 32768;
779 /* ARB_viewport_array */
780 if (brw
->gen
>= 6 && ctx
->API
== API_OPENGL_CORE
) {
781 ctx
->Const
.MaxViewports
= GEN6_NUM_VIEWPORTS
;
782 ctx
->Const
.ViewportSubpixelBits
= 0;
784 /* Cast to float before negating because MaxViewportWidth is unsigned.
786 ctx
->Const
.ViewportBounds
.Min
= -(float)ctx
->Const
.MaxViewportWidth
;
787 ctx
->Const
.ViewportBounds
.Max
= ctx
->Const
.MaxViewportWidth
;
790 /* ARB_gpu_shader5 */
792 ctx
->Const
.MaxVertexStreams
= MIN2(4, MAX_VERTEX_STREAMS
);
794 /* ARB_framebuffer_no_attachments */
795 ctx
->Const
.MaxFramebufferWidth
= 16384;
796 ctx
->Const
.MaxFramebufferHeight
= 16384;
797 ctx
->Const
.MaxFramebufferLayers
= ctx
->Const
.MaxArrayTextureLayers
;
798 ctx
->Const
.MaxFramebufferSamples
= max_samples
;
800 /* OES_primitive_bounding_box */
801 ctx
->Const
.NoPrimitiveBoundingBoxOutput
= true;
805 brw_initialize_cs_context_constants(struct brw_context
*brw
)
807 struct gl_context
*ctx
= &brw
->ctx
;
808 const struct intel_screen
*screen
= brw
->screen
;
809 struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
811 /* FINISHME: Do this for all platforms that the kernel supports */
812 if (brw
->is_cherryview
&&
813 screen
->subslice_total
> 0 && screen
->eu_total
> 0) {
814 /* Logical CS threads = EUs per subslice * 7 threads per EU */
815 uint32_t max_cs_threads
= screen
->eu_total
/ screen
->subslice_total
* 7;
817 /* Fuse configurations may give more threads than expected, never less. */
818 if (max_cs_threads
> devinfo
->max_cs_threads
)
819 devinfo
->max_cs_threads
= max_cs_threads
;
822 /* Maximum number of scalar compute shader invocations that can be run in
823 * parallel in the same subslice assuming SIMD32 dispatch.
825 * We don't advertise more than 64 threads, because we are limited to 64 by
826 * our usage of thread_width_max in the gpgpu walker command. This only
827 * currently impacts Haswell, which otherwise might be able to advertise 70
828 * threads. With SIMD32 and 64 threads, Haswell still provides twice the
829 * required the number of invocation needed for ARB_compute_shader.
831 const unsigned max_threads
= MIN2(64, devinfo
->max_cs_threads
);
832 const uint32_t max_invocations
= 32 * max_threads
;
833 ctx
->Const
.MaxComputeWorkGroupSize
[0] = max_invocations
;
834 ctx
->Const
.MaxComputeWorkGroupSize
[1] = max_invocations
;
835 ctx
->Const
.MaxComputeWorkGroupSize
[2] = max_invocations
;
836 ctx
->Const
.MaxComputeWorkGroupInvocations
= max_invocations
;
837 ctx
->Const
.MaxComputeSharedMemorySize
= 64 * 1024;
841 * Process driconf (drirc) options, setting appropriate context flags.
843 * intelInitExtensions still pokes at optionCache directly, in order to
844 * avoid advertising various extensions. No flags are set, so it makes
845 * sense to continue doing that there.
848 brw_process_driconf_options(struct brw_context
*brw
)
850 struct gl_context
*ctx
= &brw
->ctx
;
852 driOptionCache
*options
= &brw
->optionCache
;
853 driParseConfigFiles(options
, &brw
->screen
->optionCache
,
854 brw
->driContext
->driScreenPriv
->myNum
, "i965");
856 int bo_reuse_mode
= driQueryOptioni(options
, "bo_reuse");
857 switch (bo_reuse_mode
) {
858 case DRI_CONF_BO_REUSE_DISABLED
:
860 case DRI_CONF_BO_REUSE_ALL
:
861 intel_bufmgr_gem_enable_reuse(brw
->bufmgr
);
865 if (!driQueryOptionb(options
, "hiz")) {
866 brw
->has_hiz
= false;
867 /* On gen6, you can only do separate stencil with HIZ. */
869 brw
->has_separate_stencil
= false;
872 if (driQueryOptionb(options
, "always_flush_batch")) {
873 fprintf(stderr
, "flushing batchbuffer before/after each draw call\n");
874 brw
->always_flush_batch
= true;
877 if (driQueryOptionb(options
, "always_flush_cache")) {
878 fprintf(stderr
, "flushing GPU caches before/after each draw call\n");
879 brw
->always_flush_cache
= true;
882 if (driQueryOptionb(options
, "disable_throttling")) {
883 fprintf(stderr
, "disabling flush throttling\n");
884 brw
->disable_throttling
= true;
887 brw
->precompile
= driQueryOptionb(&brw
->optionCache
, "shader_precompile");
889 if (driQueryOptionb(&brw
->optionCache
, "precise_trig"))
890 brw
->screen
->compiler
->precise_trig
= true;
892 ctx
->Const
.ForceGLSLExtensionsWarn
=
893 driQueryOptionb(options
, "force_glsl_extensions_warn");
895 ctx
->Const
.DisableGLSLLineContinuations
=
896 driQueryOptionb(options
, "disable_glsl_line_continuations");
898 ctx
->Const
.AllowGLSLExtensionDirectiveMidShader
=
899 driQueryOptionb(options
, "allow_glsl_extension_directive_midshader");
901 ctx
->Const
.GLSLZeroInit
= driQueryOptionb(options
, "glsl_zero_init");
903 brw
->dual_color_blend_by_location
=
904 driQueryOptionb(options
, "dual_color_blend_by_location");
908 brwCreateContext(gl_api api
,
909 const struct gl_config
*mesaVis
,
910 __DRIcontext
*driContextPriv
,
911 unsigned major_version
,
912 unsigned minor_version
,
915 unsigned *dri_ctx_error
,
916 void *sharedContextPrivate
)
918 struct gl_context
*shareCtx
= (struct gl_context
*) sharedContextPrivate
;
919 struct intel_screen
*screen
= driContextPriv
->driScreenPriv
->driverPrivate
;
920 const struct gen_device_info
*devinfo
= &screen
->devinfo
;
921 struct dd_function_table functions
;
923 /* Only allow the __DRI_CTX_FLAG_ROBUST_BUFFER_ACCESS flag if the kernel
924 * provides us with context reset notifications.
926 uint32_t allowed_flags
= __DRI_CTX_FLAG_DEBUG
927 | __DRI_CTX_FLAG_FORWARD_COMPATIBLE
;
929 if (screen
->has_context_reset_notification
)
930 allowed_flags
|= __DRI_CTX_FLAG_ROBUST_BUFFER_ACCESS
;
932 if (flags
& ~allowed_flags
) {
933 *dri_ctx_error
= __DRI_CTX_ERROR_UNKNOWN_FLAG
;
937 struct brw_context
*brw
= rzalloc(NULL
, struct brw_context
);
939 fprintf(stderr
, "%s: failed to alloc context\n", __func__
);
940 *dri_ctx_error
= __DRI_CTX_ERROR_NO_MEMORY
;
944 driContextPriv
->driverPrivate
= brw
;
945 brw
->driContext
= driContextPriv
;
946 brw
->screen
= screen
;
947 brw
->bufmgr
= screen
->bufmgr
;
949 brw
->gen
= devinfo
->gen
;
950 brw
->gt
= devinfo
->gt
;
951 brw
->is_g4x
= devinfo
->is_g4x
;
952 brw
->is_baytrail
= devinfo
->is_baytrail
;
953 brw
->is_haswell
= devinfo
->is_haswell
;
954 brw
->is_cherryview
= devinfo
->is_cherryview
;
955 brw
->is_broxton
= devinfo
->is_broxton
;
956 brw
->has_llc
= devinfo
->has_llc
;
957 brw
->has_hiz
= devinfo
->has_hiz_and_separate_stencil
;
958 brw
->has_separate_stencil
= devinfo
->has_hiz_and_separate_stencil
;
959 brw
->has_pln
= devinfo
->has_pln
;
960 brw
->has_compr4
= devinfo
->has_compr4
;
961 brw
->has_surface_tile_offset
= devinfo
->has_surface_tile_offset
;
962 brw
->has_negative_rhw_bug
= devinfo
->has_negative_rhw_bug
;
963 brw
->needs_unlit_centroid_workaround
=
964 devinfo
->needs_unlit_centroid_workaround
;
966 brw
->must_use_separate_stencil
= devinfo
->must_use_separate_stencil
;
967 brw
->has_swizzling
= screen
->hw_has_swizzling
;
969 isl_device_init(&brw
->isl_dev
, devinfo
, screen
->hw_has_swizzling
);
971 brw
->vs
.base
.stage
= MESA_SHADER_VERTEX
;
972 brw
->tcs
.base
.stage
= MESA_SHADER_TESS_CTRL
;
973 brw
->tes
.base
.stage
= MESA_SHADER_TESS_EVAL
;
974 brw
->gs
.base
.stage
= MESA_SHADER_GEOMETRY
;
975 brw
->wm
.base
.stage
= MESA_SHADER_FRAGMENT
;
977 gen8_init_vtable_surface_functions(brw
);
978 brw
->vtbl
.emit_depth_stencil_hiz
= gen8_emit_depth_stencil_hiz
;
979 } else if (brw
->gen
>= 7) {
980 gen7_init_vtable_surface_functions(brw
);
981 brw
->vtbl
.emit_depth_stencil_hiz
= gen7_emit_depth_stencil_hiz
;
982 } else if (brw
->gen
>= 6) {
983 gen6_init_vtable_surface_functions(brw
);
984 brw
->vtbl
.emit_depth_stencil_hiz
= gen6_emit_depth_stencil_hiz
;
986 gen4_init_vtable_surface_functions(brw
);
987 brw
->vtbl
.emit_depth_stencil_hiz
= brw_emit_depth_stencil_hiz
;
990 brw_init_driver_functions(brw
, &functions
);
993 functions
.GetGraphicsResetStatus
= brw_get_graphics_reset_status
;
995 struct gl_context
*ctx
= &brw
->ctx
;
997 if (!_mesa_initialize_context(ctx
, api
, mesaVis
, shareCtx
, &functions
)) {
998 *dri_ctx_error
= __DRI_CTX_ERROR_NO_MEMORY
;
999 fprintf(stderr
, "%s: failed to init mesa context\n", __func__
);
1000 intelDestroyContext(driContextPriv
);
1004 driContextSetFlags(ctx
, flags
);
1006 /* Initialize the software rasterizer and helper modules.
1008 * As of GL 3.1 core, the gen4+ driver doesn't need the swrast context for
1009 * software fallbacks (which we have to support on legacy GL to do weird
1010 * glDrawPixels(), glBitmap(), and other functions).
1012 if (api
!= API_OPENGL_CORE
&& api
!= API_OPENGLES2
) {
1013 _swrast_CreateContext(ctx
);
1016 _vbo_CreateContext(ctx
);
1017 if (ctx
->swrast_context
) {
1018 _tnl_CreateContext(ctx
);
1019 TNL_CONTEXT(ctx
)->Driver
.RunPipeline
= _tnl_run_pipeline
;
1020 _swsetup_CreateContext(ctx
);
1022 /* Configure swrast to match hardware characteristics: */
1023 _swrast_allow_pixel_fog(ctx
, false);
1024 _swrast_allow_vertex_fog(ctx
, true);
1027 _mesa_meta_init(ctx
);
1029 brw_process_driconf_options(brw
);
1031 if (INTEL_DEBUG
& DEBUG_PERF
)
1032 brw
->perf_debug
= true;
1034 brw_initialize_cs_context_constants(brw
);
1035 brw_initialize_context_constants(brw
);
1037 ctx
->Const
.ResetStrategy
= notify_reset
1038 ? GL_LOSE_CONTEXT_ON_RESET_ARB
: GL_NO_RESET_NOTIFICATION_ARB
;
1040 /* Reinitialize the context point state. It depends on ctx->Const values. */
1041 _mesa_init_point(ctx
);
1043 intel_fbo_init(brw
);
1045 intel_batchbuffer_init(brw
);
1047 if (brw
->gen
>= 6) {
1048 /* Create a new hardware context. Using a hardware context means that
1049 * our GPU state will be saved/restored on context switch, allowing us
1050 * to assume that the GPU is in the same state we left it in.
1052 * This is required for transform feedback buffer offsets, query objects,
1053 * and also allows us to reduce how much state we have to emit.
1055 brw
->hw_ctx
= drm_intel_gem_context_create(brw
->bufmgr
);
1058 fprintf(stderr
, "Gen6+ requires Kernel 3.6 or later.\n");
1059 intelDestroyContext(driContextPriv
);
1064 if (brw_init_pipe_control(brw
, devinfo
)) {
1065 *dri_ctx_error
= __DRI_CTX_ERROR_NO_MEMORY
;
1066 intelDestroyContext(driContextPriv
);
1070 brw_init_state(brw
);
1072 intelInitExtensions(ctx
);
1074 brw_init_surface_formats(brw
);
1077 brw_blorp_init(brw
);
1079 brw
->urb
.size
= devinfo
->urb
.size
;
1082 brw
->urb
.gs_present
= false;
1084 brw
->prim_restart
.in_progress
= false;
1085 brw
->prim_restart
.enable_cut_index
= false;
1086 brw
->gs
.enabled
= false;
1087 brw
->sf
.viewport_transform_enable
= true;
1089 brw
->predicate
.state
= BRW_PREDICATE_STATE_RENDER
;
1091 brw
->max_gtt_map_object_size
= screen
->max_gtt_map_object_size
;
1093 brw
->use_resource_streamer
= screen
->has_resource_streamer
&&
1094 (env_var_as_boolean("INTEL_USE_HW_BT", false) ||
1095 env_var_as_boolean("INTEL_USE_GATHER", false));
1097 ctx
->VertexProgram
._MaintainTnlProgram
= true;
1098 ctx
->FragmentProgram
._MaintainTexEnvProgram
= true;
1100 brw_draw_init( brw
);
1102 if ((flags
& __DRI_CTX_FLAG_DEBUG
) != 0) {
1103 /* Turn on some extra GL_ARB_debug_output generation. */
1104 brw
->perf_debug
= true;
1107 if ((flags
& __DRI_CTX_FLAG_ROBUST_BUFFER_ACCESS
) != 0)
1108 ctx
->Const
.ContextFlags
|= GL_CONTEXT_FLAG_ROBUST_ACCESS_BIT_ARB
;
1110 if (INTEL_DEBUG
& DEBUG_SHADER_TIME
)
1111 brw_init_shader_time(brw
);
1113 _mesa_compute_version(ctx
);
1115 _mesa_initialize_dispatch_tables(ctx
);
1116 _mesa_initialize_vbo_vtxfmt(ctx
);
1118 if (ctx
->Extensions
.AMD_performance_monitor
) {
1119 brw_init_performance_monitors(brw
);
1122 vbo_use_buffer_objects(ctx
);
1123 vbo_always_unmap_buffers(ctx
);
1129 intelDestroyContext(__DRIcontext
* driContextPriv
)
1131 struct brw_context
*brw
=
1132 (struct brw_context
*) driContextPriv
->driverPrivate
;
1133 struct gl_context
*ctx
= &brw
->ctx
;
1135 /* Dump a final BMP in case the application doesn't call SwapBuffers */
1136 if (INTEL_DEBUG
& DEBUG_AUB
) {
1137 intel_batchbuffer_flush(brw
);
1138 aub_dump_bmp(&brw
->ctx
);
1141 _mesa_meta_free(&brw
->ctx
);
1143 if (INTEL_DEBUG
& DEBUG_SHADER_TIME
) {
1144 /* Force a report. */
1145 brw
->shader_time
.report_time
= 0;
1147 brw_collect_and_report_shader_time(brw
);
1148 brw_destroy_shader_time(brw
);
1152 blorp_finish(&brw
->blorp
);
1154 brw_destroy_state(brw
);
1155 brw_draw_destroy(brw
);
1157 drm_intel_bo_unreference(brw
->curbe
.curbe_bo
);
1158 if (brw
->vs
.base
.scratch_bo
)
1159 drm_intel_bo_unreference(brw
->vs
.base
.scratch_bo
);
1160 if (brw
->tcs
.base
.scratch_bo
)
1161 drm_intel_bo_unreference(brw
->tcs
.base
.scratch_bo
);
1162 if (brw
->tes
.base
.scratch_bo
)
1163 drm_intel_bo_unreference(brw
->tes
.base
.scratch_bo
);
1164 if (brw
->gs
.base
.scratch_bo
)
1165 drm_intel_bo_unreference(brw
->gs
.base
.scratch_bo
);
1166 if (brw
->wm
.base
.scratch_bo
)
1167 drm_intel_bo_unreference(brw
->wm
.base
.scratch_bo
);
1169 gen7_reset_hw_bt_pool_offsets(brw
);
1170 drm_intel_bo_unreference(brw
->hw_bt_pool
.bo
);
1171 brw
->hw_bt_pool
.bo
= NULL
;
1173 drm_intel_gem_context_destroy(brw
->hw_ctx
);
1175 if (ctx
->swrast_context
) {
1176 _swsetup_DestroyContext(&brw
->ctx
);
1177 _tnl_DestroyContext(&brw
->ctx
);
1179 _vbo_DestroyContext(&brw
->ctx
);
1181 if (ctx
->swrast_context
)
1182 _swrast_DestroyContext(&brw
->ctx
);
1184 brw_fini_pipe_control(brw
);
1185 intel_batchbuffer_free(brw
);
1187 drm_intel_bo_unreference(brw
->throttle_batch
[1]);
1188 drm_intel_bo_unreference(brw
->throttle_batch
[0]);
1189 brw
->throttle_batch
[1] = NULL
;
1190 brw
->throttle_batch
[0] = NULL
;
1192 driDestroyOptionCache(&brw
->optionCache
);
1194 /* free the Mesa context */
1195 _mesa_free_context_data(&brw
->ctx
);
1198 driContextPriv
->driverPrivate
= NULL
;
1202 intelUnbindContext(__DRIcontext
* driContextPriv
)
1204 /* Unset current context and dispath table */
1205 _mesa_make_current(NULL
, NULL
, NULL
);
1211 * Fixes up the context for GLES23 with our default-to-sRGB-capable behavior
1212 * on window system framebuffers.
1214 * Desktop GL is fairly reasonable in its handling of sRGB: You can ask if
1215 * your renderbuffer can do sRGB encode, and you can flip a switch that does
1216 * sRGB encode if the renderbuffer can handle it. You can ask specifically
1217 * for a visual where you're guaranteed to be capable, but it turns out that
1218 * everyone just makes all their ARGB8888 visuals capable and doesn't offer
1219 * incapable ones, because there's no difference between the two in resources
1220 * used. Applications thus get built that accidentally rely on the default
1221 * visual choice being sRGB, so we make ours sRGB capable. Everything sounds
1224 * But for GLES2/3, they decided that it was silly to not turn on sRGB encode
1225 * for sRGB renderbuffers you made with the GL_EXT_texture_sRGB equivalent.
1226 * So they removed the enable knob and made it "if the renderbuffer is sRGB
1227 * capable, do sRGB encode". Then, for your window system renderbuffers, you
1228 * can ask for sRGB visuals and get sRGB encode, or not ask for sRGB visuals
1229 * and get no sRGB encode (assuming that both kinds of visual are available).
1230 * Thus our choice to support sRGB by default on our visuals for desktop would
1231 * result in broken rendering of GLES apps that aren't expecting sRGB encode.
1233 * Unfortunately, renderbuffer setup happens before a context is created. So
1234 * in intel_screen.c we always set up sRGB, and here, if you're a GLES2/3
1235 * context (without an sRGB visual, though we don't have sRGB visuals exposed
1236 * yet), we go turn that back off before anyone finds out.
1239 intel_gles3_srgb_workaround(struct brw_context
*brw
,
1240 struct gl_framebuffer
*fb
)
1242 struct gl_context
*ctx
= &brw
->ctx
;
1244 if (_mesa_is_desktop_gl(ctx
) || !fb
->Visual
.sRGBCapable
)
1247 /* Some day when we support the sRGB capable bit on visuals available for
1248 * GLES, we'll need to respect that and not disable things here.
1250 fb
->Visual
.sRGBCapable
= false;
1251 for (int i
= 0; i
< BUFFER_COUNT
; i
++) {
1252 struct gl_renderbuffer
*rb
= fb
->Attachment
[i
].Renderbuffer
;
1254 rb
->Format
= _mesa_get_srgb_format_linear(rb
->Format
);
1259 intelMakeCurrent(__DRIcontext
* driContextPriv
,
1260 __DRIdrawable
* driDrawPriv
,
1261 __DRIdrawable
* driReadPriv
)
1263 struct brw_context
*brw
;
1264 GET_CURRENT_CONTEXT(curCtx
);
1267 brw
= (struct brw_context
*) driContextPriv
->driverPrivate
;
1271 /* According to the glXMakeCurrent() man page: "Pending commands to
1272 * the previous context, if any, are flushed before it is released."
1273 * But only flush if we're actually changing contexts.
1275 if (brw_context(curCtx
) && brw_context(curCtx
) != brw
) {
1276 _mesa_flush(curCtx
);
1279 if (driContextPriv
) {
1280 struct gl_context
*ctx
= &brw
->ctx
;
1281 struct gl_framebuffer
*fb
, *readFb
;
1283 if (driDrawPriv
== NULL
) {
1284 fb
= _mesa_get_incomplete_framebuffer();
1286 fb
= driDrawPriv
->driverPrivate
;
1287 driContextPriv
->dri2
.draw_stamp
= driDrawPriv
->dri2
.stamp
- 1;
1290 if (driReadPriv
== NULL
) {
1291 readFb
= _mesa_get_incomplete_framebuffer();
1293 readFb
= driReadPriv
->driverPrivate
;
1294 driContextPriv
->dri2
.read_stamp
= driReadPriv
->dri2
.stamp
- 1;
1297 /* The sRGB workaround changes the renderbuffer's format. We must change
1298 * the format before the renderbuffer's miptree get's allocated, otherwise
1299 * the formats of the renderbuffer and its miptree will differ.
1301 intel_gles3_srgb_workaround(brw
, fb
);
1302 intel_gles3_srgb_workaround(brw
, readFb
);
1304 /* If the context viewport hasn't been initialized, force a call out to
1305 * the loader to get buffers so we have a drawable size for the initial
1307 if (!brw
->ctx
.ViewportInitialized
)
1308 intel_prepare_render(brw
);
1310 _mesa_make_current(ctx
, fb
, readFb
);
1312 _mesa_make_current(NULL
, NULL
, NULL
);
1319 intel_resolve_for_dri2_flush(struct brw_context
*brw
,
1320 __DRIdrawable
*drawable
)
1323 /* MSAA and fast color clear are not supported, so don't waste time
1324 * checking whether a resolve is needed.
1329 struct gl_framebuffer
*fb
= drawable
->driverPrivate
;
1330 struct intel_renderbuffer
*rb
;
1332 /* Usually, only the back buffer will need to be downsampled. However,
1333 * the front buffer will also need it if the user has rendered into it.
1335 static const gl_buffer_index buffers
[2] = {
1340 for (int i
= 0; i
< 2; ++i
) {
1341 rb
= intel_get_renderbuffer(fb
, buffers
[i
]);
1342 if (rb
== NULL
|| rb
->mt
== NULL
)
1344 if (rb
->mt
->num_samples
<= 1)
1345 intel_miptree_resolve_color(brw
, rb
->mt
, 0);
1347 intel_renderbuffer_downsample(brw
, rb
);
1352 intel_bits_per_pixel(const struct intel_renderbuffer
*rb
)
1354 return _mesa_get_format_bytes(intel_rb_format(rb
)) * 8;
1358 intel_query_dri2_buffers(struct brw_context
*brw
,
1359 __DRIdrawable
*drawable
,
1360 __DRIbuffer
**buffers
,
1364 intel_process_dri2_buffer(struct brw_context
*brw
,
1365 __DRIdrawable
*drawable
,
1366 __DRIbuffer
*buffer
,
1367 struct intel_renderbuffer
*rb
,
1368 const char *buffer_name
);
1371 intel_update_image_buffers(struct brw_context
*brw
, __DRIdrawable
*drawable
);
1374 intel_update_dri2_buffers(struct brw_context
*brw
, __DRIdrawable
*drawable
)
1376 struct gl_framebuffer
*fb
= drawable
->driverPrivate
;
1377 struct intel_renderbuffer
*rb
;
1378 __DRIbuffer
*buffers
= NULL
;
1380 const char *region_name
;
1382 /* Set this up front, so that in case our buffers get invalidated
1383 * while we're getting new buffers, we don't clobber the stamp and
1384 * thus ignore the invalidate. */
1385 drawable
->lastStamp
= drawable
->dri2
.stamp
;
1387 if (unlikely(INTEL_DEBUG
& DEBUG_DRI
))
1388 fprintf(stderr
, "enter %s, drawable %p\n", __func__
, drawable
);
1390 intel_query_dri2_buffers(brw
, drawable
, &buffers
, &count
);
1392 if (buffers
== NULL
)
1395 for (i
= 0; i
< count
; i
++) {
1396 switch (buffers
[i
].attachment
) {
1397 case __DRI_BUFFER_FRONT_LEFT
:
1398 rb
= intel_get_renderbuffer(fb
, BUFFER_FRONT_LEFT
);
1399 region_name
= "dri2 front buffer";
1402 case __DRI_BUFFER_FAKE_FRONT_LEFT
:
1403 rb
= intel_get_renderbuffer(fb
, BUFFER_FRONT_LEFT
);
1404 region_name
= "dri2 fake front buffer";
1407 case __DRI_BUFFER_BACK_LEFT
:
1408 rb
= intel_get_renderbuffer(fb
, BUFFER_BACK_LEFT
);
1409 region_name
= "dri2 back buffer";
1412 case __DRI_BUFFER_DEPTH
:
1413 case __DRI_BUFFER_HIZ
:
1414 case __DRI_BUFFER_DEPTH_STENCIL
:
1415 case __DRI_BUFFER_STENCIL
:
1416 case __DRI_BUFFER_ACCUM
:
1419 "unhandled buffer attach event, attachment type %d\n",
1420 buffers
[i
].attachment
);
1424 intel_process_dri2_buffer(brw
, drawable
, &buffers
[i
], rb
, region_name
);
1430 intel_update_renderbuffers(__DRIcontext
*context
, __DRIdrawable
*drawable
)
1432 struct brw_context
*brw
= context
->driverPrivate
;
1433 __DRIscreen
*dri_screen
= brw
->screen
->driScrnPriv
;
1435 /* Set this up front, so that in case our buffers get invalidated
1436 * while we're getting new buffers, we don't clobber the stamp and
1437 * thus ignore the invalidate. */
1438 drawable
->lastStamp
= drawable
->dri2
.stamp
;
1440 if (unlikely(INTEL_DEBUG
& DEBUG_DRI
))
1441 fprintf(stderr
, "enter %s, drawable %p\n", __func__
, drawable
);
1443 if (dri_screen
->image
.loader
)
1444 intel_update_image_buffers(brw
, drawable
);
1446 intel_update_dri2_buffers(brw
, drawable
);
1448 driUpdateFramebufferSize(&brw
->ctx
, drawable
);
1452 * intel_prepare_render should be called anywhere that curent read/drawbuffer
1453 * state is required.
1456 intel_prepare_render(struct brw_context
*brw
)
1458 struct gl_context
*ctx
= &brw
->ctx
;
1459 __DRIcontext
*driContext
= brw
->driContext
;
1460 __DRIdrawable
*drawable
;
1462 drawable
= driContext
->driDrawablePriv
;
1463 if (drawable
&& drawable
->dri2
.stamp
!= driContext
->dri2
.draw_stamp
) {
1464 if (drawable
->lastStamp
!= drawable
->dri2
.stamp
)
1465 intel_update_renderbuffers(driContext
, drawable
);
1466 driContext
->dri2
.draw_stamp
= drawable
->dri2
.stamp
;
1469 drawable
= driContext
->driReadablePriv
;
1470 if (drawable
&& drawable
->dri2
.stamp
!= driContext
->dri2
.read_stamp
) {
1471 if (drawable
->lastStamp
!= drawable
->dri2
.stamp
)
1472 intel_update_renderbuffers(driContext
, drawable
);
1473 driContext
->dri2
.read_stamp
= drawable
->dri2
.stamp
;
1476 /* If we're currently rendering to the front buffer, the rendering
1477 * that will happen next will probably dirty the front buffer. So
1478 * mark it as dirty here.
1480 if (_mesa_is_front_buffer_drawing(ctx
->DrawBuffer
))
1481 brw
->front_buffer_dirty
= true;
1485 * \brief Query DRI2 to obtain a DRIdrawable's buffers.
1487 * To determine which DRI buffers to request, examine the renderbuffers
1488 * attached to the drawable's framebuffer. Then request the buffers with
1489 * DRI2GetBuffers() or DRI2GetBuffersWithFormat().
1491 * This is called from intel_update_renderbuffers().
1493 * \param drawable Drawable whose buffers are queried.
1494 * \param buffers [out] List of buffers returned by DRI2 query.
1495 * \param buffer_count [out] Number of buffers returned.
1497 * \see intel_update_renderbuffers()
1498 * \see DRI2GetBuffers()
1499 * \see DRI2GetBuffersWithFormat()
1502 intel_query_dri2_buffers(struct brw_context
*brw
,
1503 __DRIdrawable
*drawable
,
1504 __DRIbuffer
**buffers
,
1507 __DRIscreen
*dri_screen
= brw
->screen
->driScrnPriv
;
1508 struct gl_framebuffer
*fb
= drawable
->driverPrivate
;
1510 unsigned attachments
[8];
1512 struct intel_renderbuffer
*front_rb
;
1513 struct intel_renderbuffer
*back_rb
;
1515 front_rb
= intel_get_renderbuffer(fb
, BUFFER_FRONT_LEFT
);
1516 back_rb
= intel_get_renderbuffer(fb
, BUFFER_BACK_LEFT
);
1518 memset(attachments
, 0, sizeof(attachments
));
1519 if ((_mesa_is_front_buffer_drawing(fb
) ||
1520 _mesa_is_front_buffer_reading(fb
) ||
1521 !back_rb
) && front_rb
) {
1522 /* If a fake front buffer is in use, then querying for
1523 * __DRI_BUFFER_FRONT_LEFT will cause the server to copy the image from
1524 * the real front buffer to the fake front buffer. So before doing the
1525 * query, we need to make sure all the pending drawing has landed in the
1526 * real front buffer.
1528 intel_batchbuffer_flush(brw
);
1529 intel_flush_front(&brw
->ctx
);
1531 attachments
[i
++] = __DRI_BUFFER_FRONT_LEFT
;
1532 attachments
[i
++] = intel_bits_per_pixel(front_rb
);
1533 } else if (front_rb
&& brw
->front_buffer_dirty
) {
1534 /* We have pending front buffer rendering, but we aren't querying for a
1535 * front buffer. If the front buffer we have is a fake front buffer,
1536 * the X server is going to throw it away when it processes the query.
1537 * So before doing the query, make sure all the pending drawing has
1538 * landed in the real front buffer.
1540 intel_batchbuffer_flush(brw
);
1541 intel_flush_front(&brw
->ctx
);
1545 attachments
[i
++] = __DRI_BUFFER_BACK_LEFT
;
1546 attachments
[i
++] = intel_bits_per_pixel(back_rb
);
1549 assert(i
<= ARRAY_SIZE(attachments
));
1552 dri_screen
->dri2
.loader
->getBuffersWithFormat(drawable
,
1557 drawable
->loaderPrivate
);
1561 * \brief Assign a DRI buffer's DRM region to a renderbuffer.
1563 * This is called from intel_update_renderbuffers().
1566 * DRI buffers whose attachment point is DRI2BufferStencil or
1567 * DRI2BufferDepthStencil are handled as special cases.
1569 * \param buffer_name is a human readable name, such as "dri2 front buffer",
1570 * that is passed to drm_intel_bo_gem_create_from_name().
1572 * \see intel_update_renderbuffers()
1575 intel_process_dri2_buffer(struct brw_context
*brw
,
1576 __DRIdrawable
*drawable
,
1577 __DRIbuffer
*buffer
,
1578 struct intel_renderbuffer
*rb
,
1579 const char *buffer_name
)
1581 struct gl_framebuffer
*fb
= drawable
->driverPrivate
;
1587 unsigned num_samples
= rb
->Base
.Base
.NumSamples
;
1589 /* We try to avoid closing and reopening the same BO name, because the first
1590 * use of a mapping of the buffer involves a bunch of page faulting which is
1591 * moderately expensive.
1593 struct intel_mipmap_tree
*last_mt
;
1594 if (num_samples
== 0)
1597 last_mt
= rb
->singlesample_mt
;
1599 uint32_t old_name
= 0;
1601 /* The bo already has a name because the miptree was created by a
1602 * previous call to intel_process_dri2_buffer(). If a bo already has a
1603 * name, then drm_intel_bo_flink() is a low-cost getter. It does not
1604 * create a new name.
1606 drm_intel_bo_flink(last_mt
->bo
, &old_name
);
1609 if (old_name
== buffer
->name
)
1612 if (unlikely(INTEL_DEBUG
& DEBUG_DRI
)) {
1614 "attaching buffer %d, at %d, cpp %d, pitch %d\n",
1615 buffer
->name
, buffer
->attachment
,
1616 buffer
->cpp
, buffer
->pitch
);
1619 bo
= drm_intel_bo_gem_create_from_name(brw
->bufmgr
, buffer_name
,
1623 "Failed to open BO for returned DRI2 buffer "
1624 "(%dx%d, %s, named %d).\n"
1625 "This is likely a bug in the X Server that will lead to a "
1627 drawable
->w
, drawable
->h
, buffer_name
, buffer
->name
);
1631 intel_update_winsys_renderbuffer_miptree(brw
, rb
, bo
,
1632 drawable
->w
, drawable
->h
,
1635 if (_mesa_is_front_buffer_drawing(fb
) &&
1636 (buffer
->attachment
== __DRI_BUFFER_FRONT_LEFT
||
1637 buffer
->attachment
== __DRI_BUFFER_FAKE_FRONT_LEFT
) &&
1638 rb
->Base
.Base
.NumSamples
> 1) {
1639 intel_renderbuffer_upsample(brw
, rb
);
1644 drm_intel_bo_unreference(bo
);
1648 * \brief Query DRI image loader to obtain a DRIdrawable's buffers.
1650 * To determine which DRI buffers to request, examine the renderbuffers
1651 * attached to the drawable's framebuffer. Then request the buffers from
1654 * This is called from intel_update_renderbuffers().
1656 * \param drawable Drawable whose buffers are queried.
1657 * \param buffers [out] List of buffers returned by DRI2 query.
1658 * \param buffer_count [out] Number of buffers returned.
1660 * \see intel_update_renderbuffers()
1664 intel_update_image_buffer(struct brw_context
*intel
,
1665 __DRIdrawable
*drawable
,
1666 struct intel_renderbuffer
*rb
,
1668 enum __DRIimageBufferMask buffer_type
)
1670 struct gl_framebuffer
*fb
= drawable
->driverPrivate
;
1672 if (!rb
|| !buffer
->bo
)
1675 unsigned num_samples
= rb
->Base
.Base
.NumSamples
;
1677 /* Check and see if we're already bound to the right
1680 struct intel_mipmap_tree
*last_mt
;
1681 if (num_samples
== 0)
1684 last_mt
= rb
->singlesample_mt
;
1686 if (last_mt
&& last_mt
->bo
== buffer
->bo
)
1689 intel_update_winsys_renderbuffer_miptree(intel
, rb
, buffer
->bo
,
1690 buffer
->width
, buffer
->height
,
1693 if (_mesa_is_front_buffer_drawing(fb
) &&
1694 buffer_type
== __DRI_IMAGE_BUFFER_FRONT
&&
1695 rb
->Base
.Base
.NumSamples
> 1) {
1696 intel_renderbuffer_upsample(intel
, rb
);
1701 intel_update_image_buffers(struct brw_context
*brw
, __DRIdrawable
*drawable
)
1703 struct gl_framebuffer
*fb
= drawable
->driverPrivate
;
1704 __DRIscreen
*dri_screen
= brw
->screen
->driScrnPriv
;
1705 struct intel_renderbuffer
*front_rb
;
1706 struct intel_renderbuffer
*back_rb
;
1707 struct __DRIimageList images
;
1708 unsigned int format
;
1709 uint32_t buffer_mask
= 0;
1712 front_rb
= intel_get_renderbuffer(fb
, BUFFER_FRONT_LEFT
);
1713 back_rb
= intel_get_renderbuffer(fb
, BUFFER_BACK_LEFT
);
1716 format
= intel_rb_format(back_rb
);
1718 format
= intel_rb_format(front_rb
);
1722 if (front_rb
&& (_mesa_is_front_buffer_drawing(fb
) ||
1723 _mesa_is_front_buffer_reading(fb
) || !back_rb
)) {
1724 buffer_mask
|= __DRI_IMAGE_BUFFER_FRONT
;
1728 buffer_mask
|= __DRI_IMAGE_BUFFER_BACK
;
1730 ret
= dri_screen
->image
.loader
->getBuffers(drawable
,
1731 driGLFormatToImageFormat(format
),
1732 &drawable
->dri2
.stamp
,
1733 drawable
->loaderPrivate
,
1739 if (images
.image_mask
& __DRI_IMAGE_BUFFER_FRONT
) {
1740 drawable
->w
= images
.front
->width
;
1741 drawable
->h
= images
.front
->height
;
1742 intel_update_image_buffer(brw
,
1746 __DRI_IMAGE_BUFFER_FRONT
);
1748 if (images
.image_mask
& __DRI_IMAGE_BUFFER_BACK
) {
1749 drawable
->w
= images
.back
->width
;
1750 drawable
->h
= images
.back
->height
;
1751 intel_update_image_buffer(brw
,
1755 __DRI_IMAGE_BUFFER_BACK
);