2 Copyright 2003 VMware, Inc.
3 Copyright (C) Intel Corp. 2006. All Rights Reserved.
4 Intel funded Tungsten Graphics to
5 develop this 3D driver.
7 Permission is hereby granted, free of charge, to any person obtaining
8 a copy of this software and associated documentation files (the
9 "Software"), to deal in the Software without restriction, including
10 without limitation the rights to use, copy, modify, merge, publish,
11 distribute, sublicense, and/or sell copies of the Software, and to
12 permit persons to whom the Software is furnished to do so, subject to
13 the following conditions:
15 The above copyright notice and this permission notice (including the
16 next paragraph) shall be included in all copies or substantial
17 portions of the Software.
19 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
20 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
22 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
23 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
24 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
25 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27 **********************************************************************/
30 * Keith Whitwell <keithw@vmware.com>
34 #include "main/api_exec.h"
35 #include "main/context.h"
36 #include "main/fbobject.h"
37 #include "main/extensions.h"
38 #include "main/imports.h"
39 #include "main/macros.h"
40 #include "main/points.h"
41 #include "main/version.h"
42 #include "main/vtxfmt.h"
43 #include "main/texobj.h"
44 #include "main/framebuffer.h"
46 #include "vbo/vbo_context.h"
48 #include "drivers/common/driverfuncs.h"
49 #include "drivers/common/meta.h"
52 #include "brw_context.h"
53 #include "brw_defines.h"
54 #include "brw_compiler.h"
56 #include "brw_state.h"
58 #include "intel_batchbuffer.h"
59 #include "intel_buffer_objects.h"
60 #include "intel_buffers.h"
61 #include "intel_fbo.h"
62 #include "intel_mipmap_tree.h"
63 #include "intel_pixel.h"
64 #include "intel_image.h"
65 #include "intel_tex.h"
66 #include "intel_tex_obj.h"
68 #include "swrast_setup/swrast_setup.h"
70 #include "tnl/t_pipeline.h"
71 #include "util/ralloc.h"
72 #include "util/debug.h"
74 /***************************************
75 * Mesa's Driver Functions
76 ***************************************/
78 const char *const brw_vendor_string
= "Intel Open Source Technology Center";
81 get_bsw_model(const struct intel_screen
*intelScreen
)
83 switch (intelScreen
->eu_total
) {
94 brw_get_renderer_string(const struct intel_screen
*intelScreen
)
97 static char buffer
[128];
100 switch (intelScreen
->deviceID
) {
102 #define CHIPSET(id, symbol, str) case id: chipset = str; break;
103 #include "pci_ids/i965_pci_ids.h"
105 chipset
= "Unknown Intel Chipset";
109 /* Braswell branding is funny, so we have to fix it up here */
110 if (intelScreen
->deviceID
== 0x22B1) {
111 bsw
= strdup(chipset
);
112 char *needle
= strstr(bsw
, "XXX");
114 memcpy(needle
, get_bsw_model(intelScreen
), 3);
119 (void) driGetRendererString(buffer
, chipset
, 0);
124 static const GLubyte
*
125 intel_get_string(struct gl_context
* ctx
, GLenum name
)
127 const struct brw_context
*const brw
= brw_context(ctx
);
131 return (GLubyte
*) brw_vendor_string
;
135 (GLubyte
*) brw_get_renderer_string(brw
->intelScreen
);
143 intel_viewport(struct gl_context
*ctx
)
145 struct brw_context
*brw
= brw_context(ctx
);
146 __DRIcontext
*driContext
= brw
->driContext
;
148 if (_mesa_is_winsys_fbo(ctx
->DrawBuffer
)) {
149 if (driContext
->driDrawablePriv
)
150 dri2InvalidateDrawable(driContext
->driDrawablePriv
);
151 if (driContext
->driReadablePriv
)
152 dri2InvalidateDrawable(driContext
->driReadablePriv
);
157 intel_update_framebuffer(struct gl_context
*ctx
,
158 struct gl_framebuffer
*fb
)
160 struct brw_context
*brw
= brw_context(ctx
);
162 /* Quantize the derived default number of samples
164 fb
->DefaultGeometry
._NumSamples
=
165 intel_quantize_num_samples(brw
->intelScreen
,
166 fb
->DefaultGeometry
.NumSamples
);
170 intel_update_state(struct gl_context
* ctx
, GLuint new_state
)
172 struct brw_context
*brw
= brw_context(ctx
);
173 struct intel_texture_object
*tex_obj
;
174 struct intel_renderbuffer
*depth_irb
;
176 if (ctx
->swrast_context
)
177 _swrast_InvalidateState(ctx
, new_state
);
178 _vbo_InvalidateState(ctx
, new_state
);
180 brw
->NewGLState
|= new_state
;
182 _mesa_unlock_context_textures(ctx
);
184 /* Resolve the depth buffer's HiZ buffer. */
185 depth_irb
= intel_get_renderbuffer(ctx
->DrawBuffer
, BUFFER_DEPTH
);
187 intel_renderbuffer_resolve_hiz(brw
, depth_irb
);
189 /* Resolve depth buffer and render cache of each enabled texture. */
190 int maxEnabledUnit
= ctx
->Texture
._MaxEnabledTexImageUnit
;
191 for (int i
= 0; i
<= maxEnabledUnit
; i
++) {
192 if (!ctx
->Texture
.Unit
[i
]._Current
)
194 tex_obj
= intel_texture_object(ctx
->Texture
.Unit
[i
]._Current
);
195 if (!tex_obj
|| !tex_obj
->mt
)
197 intel_miptree_all_slices_resolve_depth(brw
, tex_obj
->mt
);
198 /* Sampling engine understands lossless compression and resolving
199 * those surfaces should be skipped for performance reasons.
201 intel_miptree_resolve_color(brw
, tex_obj
->mt
,
202 INTEL_MIPTREE_IGNORE_CCS_E
);
203 brw_render_cache_set_check_flush(brw
, tex_obj
->mt
->bo
);
206 /* Resolve color for each active shader image. */
207 for (unsigned i
= 0; i
< MESA_SHADER_STAGES
; i
++) {
208 const struct gl_shader
*shader
= ctx
->_Shader
->CurrentProgram
[i
] ?
209 ctx
->_Shader
->CurrentProgram
[i
]->_LinkedShaders
[i
] : NULL
;
211 if (unlikely(shader
&& shader
->NumImages
)) {
212 for (unsigned j
= 0; j
< shader
->NumImages
; j
++) {
213 struct gl_image_unit
*u
= &ctx
->ImageUnits
[shader
->ImageUnits
[j
]];
214 tex_obj
= intel_texture_object(u
->TexObj
);
216 if (tex_obj
&& tex_obj
->mt
) {
217 /* Access to images is implemented using indirect messages
218 * against data port. Normal render target write understands
219 * lossless compression but unfortunately the typed/untyped
220 * read/write interface doesn't. Therefore the compressed
221 * surfaces need to be resolved prior to accessing them.
223 intel_miptree_resolve_color(brw
, tex_obj
->mt
, 0);
224 brw_render_cache_set_check_flush(brw
, tex_obj
->mt
->bo
);
230 /* If FRAMEBUFFER_SRGB is used on Gen9+ then we need to resolve any of the
231 * single-sampled color renderbuffers because the CCS buffer isn't
232 * supported for SRGB formats. This only matters if FRAMEBUFFER_SRGB is
233 * enabled because otherwise the surface state will be programmed with the
234 * linear equivalent format anyway.
236 if (brw
->gen
>= 9 && ctx
->Color
.sRGBEnabled
) {
237 struct gl_framebuffer
*fb
= ctx
->DrawBuffer
;
238 for (int i
= 0; i
< fb
->_NumColorDrawBuffers
; i
++) {
239 struct gl_renderbuffer
*rb
= fb
->_ColorDrawBuffers
[i
];
244 struct intel_renderbuffer
*irb
= intel_renderbuffer(rb
);
245 struct intel_mipmap_tree
*mt
= irb
->mt
;
248 mt
->num_samples
> 1 ||
249 _mesa_get_srgb_format_linear(mt
->format
) == mt
->format
)
252 /* Lossless compression is not supported for SRGB formats, it
253 * should be impossible to get here with such surfaces.
255 assert(!intel_miptree_is_lossless_compressed(brw
, mt
));
256 intel_miptree_resolve_color(brw
, mt
, 0);
257 brw_render_cache_set_check_flush(brw
, mt
->bo
);
261 _mesa_lock_context_textures(ctx
);
263 if (new_state
& _NEW_BUFFERS
) {
264 intel_update_framebuffer(ctx
, ctx
->DrawBuffer
);
265 if (ctx
->DrawBuffer
!= ctx
->ReadBuffer
)
266 intel_update_framebuffer(ctx
, ctx
->ReadBuffer
);
270 #define flushFront(screen) ((screen)->image.loader ? (screen)->image.loader->flushFrontBuffer : (screen)->dri2.loader->flushFrontBuffer)
273 intel_flush_front(struct gl_context
*ctx
)
275 struct brw_context
*brw
= brw_context(ctx
);
276 __DRIcontext
*driContext
= brw
->driContext
;
277 __DRIdrawable
*driDrawable
= driContext
->driDrawablePriv
;
278 __DRIscreen
*const screen
= brw
->intelScreen
->driScrnPriv
;
280 if (brw
->front_buffer_dirty
&& _mesa_is_winsys_fbo(ctx
->DrawBuffer
)) {
281 if (flushFront(screen
) && driDrawable
&&
282 driDrawable
->loaderPrivate
) {
284 /* Resolve before flushing FAKE_FRONT_LEFT to FRONT_LEFT.
286 * This potentially resolves both front and back buffer. It
287 * is unnecessary to resolve the back, but harms nothing except
288 * performance. And no one cares about front-buffer render
291 intel_resolve_for_dri2_flush(brw
, driDrawable
);
292 intel_batchbuffer_flush(brw
);
294 flushFront(screen
)(driDrawable
, driDrawable
->loaderPrivate
);
296 /* We set the dirty bit in intel_prepare_render() if we're
297 * front buffer rendering once we get there.
299 brw
->front_buffer_dirty
= false;
305 intel_glFlush(struct gl_context
*ctx
)
307 struct brw_context
*brw
= brw_context(ctx
);
309 intel_batchbuffer_flush(brw
);
310 intel_flush_front(ctx
);
312 brw
->need_flush_throttle
= true;
316 intel_finish(struct gl_context
* ctx
)
318 struct brw_context
*brw
= brw_context(ctx
);
322 if (brw
->batch
.last_bo
)
323 drm_intel_bo_wait_rendering(brw
->batch
.last_bo
);
327 brw_init_driver_functions(struct brw_context
*brw
,
328 struct dd_function_table
*functions
)
330 _mesa_init_driver_functions(functions
);
332 /* GLX uses DRI2 invalidate events to handle window resizing.
333 * Unfortunately, EGL does not - libEGL is written in XCB (not Xlib),
334 * which doesn't provide a mechanism for snooping the event queues.
336 * So EGL still relies on viewport hacks to handle window resizing.
337 * This should go away with DRI3000.
339 if (!brw
->driContext
->driScreenPriv
->dri2
.useInvalidate
)
340 functions
->Viewport
= intel_viewport
;
342 functions
->Flush
= intel_glFlush
;
343 functions
->Finish
= intel_finish
;
344 functions
->GetString
= intel_get_string
;
345 functions
->UpdateState
= intel_update_state
;
347 intelInitTextureFuncs(functions
);
348 intelInitTextureImageFuncs(functions
);
349 intelInitTextureSubImageFuncs(functions
);
350 intelInitTextureCopyImageFuncs(functions
);
351 intelInitCopyImageFuncs(functions
);
352 intelInitClearFuncs(functions
);
353 intelInitBufferFuncs(functions
);
354 intelInitPixelFuncs(functions
);
355 intelInitBufferObjectFuncs(functions
);
356 intel_init_syncobj_functions(functions
);
357 brw_init_object_purgeable_functions(functions
);
359 brwInitFragProgFuncs( functions
);
360 brw_init_common_queryobj_functions(functions
);
362 gen6_init_queryobj_functions(functions
);
364 gen4_init_queryobj_functions(functions
);
365 brw_init_compute_functions(functions
);
367 brw_init_conditional_render_functions(functions
);
369 functions
->QueryInternalFormat
= brw_query_internal_format
;
371 functions
->NewTransformFeedback
= brw_new_transform_feedback
;
372 functions
->DeleteTransformFeedback
= brw_delete_transform_feedback
;
373 functions
->GetTransformFeedbackVertexCount
=
374 brw_get_transform_feedback_vertex_count
;
376 functions
->BeginTransformFeedback
= gen7_begin_transform_feedback
;
377 functions
->EndTransformFeedback
= gen7_end_transform_feedback
;
378 functions
->PauseTransformFeedback
= gen7_pause_transform_feedback
;
379 functions
->ResumeTransformFeedback
= gen7_resume_transform_feedback
;
381 functions
->BeginTransformFeedback
= brw_begin_transform_feedback
;
382 functions
->EndTransformFeedback
= brw_end_transform_feedback
;
386 functions
->GetSamplePosition
= gen6_get_sample_position
;
390 brw_initialize_context_constants(struct brw_context
*brw
)
392 struct gl_context
*ctx
= &brw
->ctx
;
393 const struct brw_compiler
*compiler
= brw
->intelScreen
->compiler
;
395 const bool stage_exists
[MESA_SHADER_STAGES
] = {
396 [MESA_SHADER_VERTEX
] = true,
397 [MESA_SHADER_TESS_CTRL
] = brw
->gen
>= 7,
398 [MESA_SHADER_TESS_EVAL
] = brw
->gen
>= 7,
399 [MESA_SHADER_GEOMETRY
] = brw
->gen
>= 6,
400 [MESA_SHADER_FRAGMENT
] = true,
401 [MESA_SHADER_COMPUTE
] =
402 (ctx
->API
== API_OPENGL_CORE
&&
403 ctx
->Const
.MaxComputeWorkGroupSize
[0] >= 1024) ||
404 (ctx
->API
== API_OPENGLES2
&&
405 ctx
->Const
.MaxComputeWorkGroupSize
[0] >= 128) ||
406 _mesa_extension_override_enables
.ARB_compute_shader
,
409 unsigned num_stages
= 0;
410 for (int i
= 0; i
< MESA_SHADER_STAGES
; i
++) {
415 unsigned max_samplers
=
416 brw
->gen
>= 8 || brw
->is_haswell
? BRW_MAX_TEX_UNIT
: 16;
418 ctx
->Const
.MaxDualSourceDrawBuffers
= 1;
419 ctx
->Const
.MaxDrawBuffers
= BRW_MAX_DRAW_BUFFERS
;
420 ctx
->Const
.MaxCombinedShaderOutputResources
=
421 MAX_IMAGE_UNITS
+ BRW_MAX_DRAW_BUFFERS
;
423 ctx
->Const
.QueryCounterBits
.Timestamp
= 36;
425 ctx
->Const
.MaxTextureCoordUnits
= 8; /* Mesa limit */
426 ctx
->Const
.MaxImageUnits
= MAX_IMAGE_UNITS
;
427 ctx
->Const
.MaxRenderbufferSize
= 8192;
428 ctx
->Const
.MaxTextureLevels
= MIN2(14 /* 8192 */, MAX_TEXTURE_LEVELS
);
429 ctx
->Const
.Max3DTextureLevels
= 12; /* 2048 */
430 ctx
->Const
.MaxCubeTextureLevels
= 14; /* 8192 */
431 ctx
->Const
.MaxArrayTextureLayers
= brw
->gen
>= 7 ? 2048 : 512;
432 ctx
->Const
.MaxTextureMbytes
= 1536;
433 ctx
->Const
.MaxTextureRectSize
= 1 << 12;
434 ctx
->Const
.MaxTextureMaxAnisotropy
= 16.0;
435 ctx
->Const
.StripTextureBorder
= true;
437 ctx
->Const
.MaxProgramTextureGatherComponents
= 4;
438 else if (brw
->gen
== 6)
439 ctx
->Const
.MaxProgramTextureGatherComponents
= 1;
441 ctx
->Const
.MaxUniformBlockSize
= 65536;
443 for (int i
= 0; i
< MESA_SHADER_STAGES
; i
++) {
444 struct gl_program_constants
*prog
= &ctx
->Const
.Program
[i
];
446 if (!stage_exists
[i
])
449 prog
->MaxTextureImageUnits
= max_samplers
;
451 prog
->MaxUniformBlocks
= BRW_MAX_UBO
;
452 prog
->MaxCombinedUniformComponents
=
453 prog
->MaxUniformComponents
+
454 ctx
->Const
.MaxUniformBlockSize
/ 4 * prog
->MaxUniformBlocks
;
456 prog
->MaxAtomicCounters
= MAX_ATOMIC_COUNTERS
;
457 prog
->MaxAtomicBuffers
= BRW_MAX_ABO
;
458 prog
->MaxImageUniforms
= compiler
->scalar_stage
[i
] ? BRW_MAX_IMAGES
: 0;
459 prog
->MaxShaderStorageBlocks
= BRW_MAX_SSBO
;
462 ctx
->Const
.MaxTextureUnits
=
463 MIN2(ctx
->Const
.MaxTextureCoordUnits
,
464 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].MaxTextureImageUnits
);
466 ctx
->Const
.MaxUniformBufferBindings
= num_stages
* BRW_MAX_UBO
;
467 ctx
->Const
.MaxCombinedUniformBlocks
= num_stages
* BRW_MAX_UBO
;
468 ctx
->Const
.MaxCombinedAtomicBuffers
= num_stages
* BRW_MAX_ABO
;
469 ctx
->Const
.MaxCombinedShaderStorageBlocks
= num_stages
* BRW_MAX_SSBO
;
470 ctx
->Const
.MaxShaderStorageBufferBindings
= num_stages
* BRW_MAX_SSBO
;
471 ctx
->Const
.MaxCombinedTextureImageUnits
= num_stages
* max_samplers
;
472 ctx
->Const
.MaxCombinedImageUniforms
= num_stages
* BRW_MAX_IMAGES
;
475 /* Hardware only supports a limited number of transform feedback buffers.
476 * So we need to override the Mesa default (which is based only on software
479 ctx
->Const
.MaxTransformFeedbackBuffers
= BRW_MAX_SOL_BUFFERS
;
481 /* On Gen6, in the worst case, we use up one binding table entry per
482 * transform feedback component (see comments above the definition of
483 * BRW_MAX_SOL_BINDINGS, in brw_context.h), so we need to advertise a value
484 * for MAX_TRANSFORM_FEEDBACK_INTERLEAVED_COMPONENTS equal to
485 * BRW_MAX_SOL_BINDINGS.
487 * In "separate components" mode, we need to divide this value by
488 * BRW_MAX_SOL_BUFFERS, so that the total number of binding table entries
489 * used up by all buffers will not exceed BRW_MAX_SOL_BINDINGS.
491 ctx
->Const
.MaxTransformFeedbackInterleavedComponents
= BRW_MAX_SOL_BINDINGS
;
492 ctx
->Const
.MaxTransformFeedbackSeparateComponents
=
493 BRW_MAX_SOL_BINDINGS
/ BRW_MAX_SOL_BUFFERS
;
495 ctx
->Const
.AlwaysUseGetTransformFeedbackVertexCount
= true;
498 const int *msaa_modes
= intel_supported_msaa_modes(brw
->intelScreen
);
499 const int clamp_max_samples
=
500 driQueryOptioni(&brw
->optionCache
, "clamp_max_samples");
502 if (clamp_max_samples
< 0) {
503 max_samples
= msaa_modes
[0];
505 /* Select the largest supported MSAA mode that does not exceed
509 for (int i
= 0; msaa_modes
[i
] != 0; ++i
) {
510 if (msaa_modes
[i
] <= clamp_max_samples
) {
511 max_samples
= msaa_modes
[i
];
517 ctx
->Const
.MaxSamples
= max_samples
;
518 ctx
->Const
.MaxColorTextureSamples
= max_samples
;
519 ctx
->Const
.MaxDepthTextureSamples
= max_samples
;
520 ctx
->Const
.MaxIntegerSamples
= max_samples
;
521 ctx
->Const
.MaxImageSamples
= 0;
523 /* gen6_set_sample_maps() sets SampleMap{2,4,8}x variables which are used
524 * to map indices of rectangular grid to sample numbers within a pixel.
525 * These variables are used by GL_EXT_framebuffer_multisample_blit_scaled
526 * extension implementation. For more details see the comment above
527 * gen6_set_sample_maps() definition.
529 gen6_set_sample_maps(ctx
);
531 ctx
->Const
.MinLineWidth
= 1.0;
532 ctx
->Const
.MinLineWidthAA
= 1.0;
534 ctx
->Const
.MaxLineWidth
= 7.375;
535 ctx
->Const
.MaxLineWidthAA
= 7.375;
536 ctx
->Const
.LineWidthGranularity
= 0.125;
538 ctx
->Const
.MaxLineWidth
= 7.0;
539 ctx
->Const
.MaxLineWidthAA
= 7.0;
540 ctx
->Const
.LineWidthGranularity
= 0.5;
543 /* For non-antialiased lines, we have to round the line width to the
544 * nearest whole number. Make sure that we don't advertise a line
545 * width that, when rounded, will be beyond the actual hardware
548 assert(roundf(ctx
->Const
.MaxLineWidth
) <= ctx
->Const
.MaxLineWidth
);
550 ctx
->Const
.MinPointSize
= 1.0;
551 ctx
->Const
.MinPointSizeAA
= 1.0;
552 ctx
->Const
.MaxPointSize
= 255.0;
553 ctx
->Const
.MaxPointSizeAA
= 255.0;
554 ctx
->Const
.PointSizeGranularity
= 1.0;
556 if (brw
->gen
>= 5 || brw
->is_g4x
)
557 ctx
->Const
.MaxClipPlanes
= 8;
559 ctx
->Const
.LowerTessLevel
= true;
561 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].MaxNativeInstructions
= 16 * 1024;
562 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].MaxAluInstructions
= 0;
563 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].MaxTexInstructions
= 0;
564 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].MaxTexIndirections
= 0;
565 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].MaxNativeAluInstructions
= 0;
566 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].MaxNativeTexInstructions
= 0;
567 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].MaxNativeTexIndirections
= 0;
568 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].MaxNativeAttribs
= 16;
569 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].MaxNativeTemps
= 256;
570 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].MaxNativeAddressRegs
= 1;
571 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].MaxNativeParameters
= 1024;
572 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].MaxEnvParams
=
573 MIN2(ctx
->Const
.Program
[MESA_SHADER_VERTEX
].MaxNativeParameters
,
574 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].MaxEnvParams
);
576 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].MaxNativeInstructions
= 1024;
577 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].MaxNativeAluInstructions
= 1024;
578 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].MaxNativeTexInstructions
= 1024;
579 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].MaxNativeTexIndirections
= 1024;
580 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].MaxNativeAttribs
= 12;
581 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].MaxNativeTemps
= 256;
582 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].MaxNativeAddressRegs
= 0;
583 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].MaxNativeParameters
= 1024;
584 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].MaxEnvParams
=
585 MIN2(ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].MaxNativeParameters
,
586 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].MaxEnvParams
);
588 /* Fragment shaders use real, 32-bit twos-complement integers for all
591 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].LowInt
.RangeMin
= 31;
592 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].LowInt
.RangeMax
= 30;
593 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].LowInt
.Precision
= 0;
594 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].HighInt
= ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].LowInt
;
595 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].MediumInt
= ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].LowInt
;
597 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].LowInt
.RangeMin
= 31;
598 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].LowInt
.RangeMax
= 30;
599 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].LowInt
.Precision
= 0;
600 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].HighInt
= ctx
->Const
.Program
[MESA_SHADER_VERTEX
].LowInt
;
601 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].MediumInt
= ctx
->Const
.Program
[MESA_SHADER_VERTEX
].LowInt
;
603 /* Gen6 converts quads to polygon in beginning of 3D pipeline,
604 * but we're not sure how it's actually done for vertex order,
605 * that affect provoking vertex decision. Always use last vertex
606 * convention for quad primitive which works as expected for now.
609 ctx
->Const
.QuadsFollowProvokingVertexConvention
= false;
611 ctx
->Const
.NativeIntegers
= true;
612 ctx
->Const
.VertexID_is_zero_based
= true;
614 /* Regarding the CMP instruction, the Ivybridge PRM says:
616 * "For each enabled channel 0b or 1b is assigned to the appropriate flag
617 * bit and 0/all zeros or all ones (e.g, byte 0xFF, word 0xFFFF, DWord
618 * 0xFFFFFFFF) is assigned to dst."
620 * but PRMs for earlier generations say
622 * "In dword format, one GRF may store up to 8 results. When the register
623 * is used later as a vector of Booleans, as only LSB at each channel
624 * contains meaning [sic] data, software should make sure all higher bits
625 * are masked out (e.g. by 'and-ing' an [sic] 0x01 constant)."
627 * We select the representation of a true boolean uniform to be ~0, and fix
628 * the results of Gen <= 5 CMP instruction's with -(result & 1).
630 ctx
->Const
.UniformBooleanTrue
= ~0;
632 /* From the gen4 PRM, volume 4 page 127:
634 * "For SURFTYPE_BUFFER non-rendertarget surfaces, this field specifies
635 * the base address of the first element of the surface, computed in
636 * software by adding the surface base address to the byte offset of
637 * the element in the buffer."
639 * However, unaligned accesses are slower, so enforce buffer alignment.
641 ctx
->Const
.UniformBufferOffsetAlignment
= 16;
643 /* ShaderStorageBufferOffsetAlignment should be a cacheline (64 bytes) so
644 * that we can safely have the CPU and GPU writing the same SSBO on
645 * non-cachecoherent systems (our Atom CPUs). With UBOs, the GPU never
646 * writes, so there's no problem. For an SSBO, the GPU and the CPU can
647 * be updating disjoint regions of the buffer simultaneously and that will
648 * break if the regions overlap the same cacheline.
650 ctx
->Const
.ShaderStorageBufferOffsetAlignment
= 64;
651 ctx
->Const
.TextureBufferOffsetAlignment
= 16;
652 ctx
->Const
.MaxTextureBufferSize
= 128 * 1024 * 1024;
655 ctx
->Const
.MaxVarying
= 32;
656 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].MaxOutputComponents
= 128;
657 ctx
->Const
.Program
[MESA_SHADER_GEOMETRY
].MaxInputComponents
= 64;
658 ctx
->Const
.Program
[MESA_SHADER_GEOMETRY
].MaxOutputComponents
= 128;
659 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].MaxInputComponents
= 128;
660 ctx
->Const
.Program
[MESA_SHADER_TESS_CTRL
].MaxInputComponents
= 128;
661 ctx
->Const
.Program
[MESA_SHADER_TESS_CTRL
].MaxOutputComponents
= 128;
662 ctx
->Const
.Program
[MESA_SHADER_TESS_EVAL
].MaxInputComponents
= 128;
663 ctx
->Const
.Program
[MESA_SHADER_TESS_EVAL
].MaxOutputComponents
= 128;
666 /* We want the GLSL compiler to emit code that uses condition codes */
667 for (int i
= 0; i
< MESA_SHADER_STAGES
; i
++) {
668 ctx
->Const
.ShaderCompilerOptions
[i
] =
669 brw
->intelScreen
->compiler
->glsl_compiler_options
[i
];
673 ctx
->Const
.MaxViewportWidth
= 32768;
674 ctx
->Const
.MaxViewportHeight
= 32768;
677 /* ARB_viewport_array */
678 if (brw
->gen
>= 6 && ctx
->API
== API_OPENGL_CORE
) {
679 ctx
->Const
.MaxViewports
= GEN6_NUM_VIEWPORTS
;
680 ctx
->Const
.ViewportSubpixelBits
= 0;
682 /* Cast to float before negating because MaxViewportWidth is unsigned.
684 ctx
->Const
.ViewportBounds
.Min
= -(float)ctx
->Const
.MaxViewportWidth
;
685 ctx
->Const
.ViewportBounds
.Max
= ctx
->Const
.MaxViewportWidth
;
688 /* ARB_gpu_shader5 */
690 ctx
->Const
.MaxVertexStreams
= MIN2(4, MAX_VERTEX_STREAMS
);
692 /* ARB_framebuffer_no_attachments */
693 ctx
->Const
.MaxFramebufferWidth
= 16384;
694 ctx
->Const
.MaxFramebufferHeight
= 16384;
695 ctx
->Const
.MaxFramebufferLayers
= ctx
->Const
.MaxArrayTextureLayers
;
696 ctx
->Const
.MaxFramebufferSamples
= max_samples
;
700 brw_initialize_cs_context_constants(struct brw_context
*brw
, unsigned max_threads
)
702 struct gl_context
*ctx
= &brw
->ctx
;
704 /* For ES, we set these constants based on SIMD8.
706 * TODO: Once we can always generate SIMD16, we should update this.
708 * For GL, we assume we can generate a SIMD16 program, but this currently
709 * is not always true. This allows us to run more test cases, and will be
710 * required based on desktop GL compute shader requirements.
712 const int simd_size
= ctx
->API
== API_OPENGL_CORE
? 16 : 8;
714 const uint32_t max_invocations
= simd_size
* max_threads
;
715 ctx
->Const
.MaxComputeWorkGroupSize
[0] = max_invocations
;
716 ctx
->Const
.MaxComputeWorkGroupSize
[1] = max_invocations
;
717 ctx
->Const
.MaxComputeWorkGroupSize
[2] = max_invocations
;
718 ctx
->Const
.MaxComputeWorkGroupInvocations
= max_invocations
;
719 ctx
->Const
.MaxComputeSharedMemorySize
= 64 * 1024;
723 * Process driconf (drirc) options, setting appropriate context flags.
725 * intelInitExtensions still pokes at optionCache directly, in order to
726 * avoid advertising various extensions. No flags are set, so it makes
727 * sense to continue doing that there.
730 brw_process_driconf_options(struct brw_context
*brw
)
732 struct gl_context
*ctx
= &brw
->ctx
;
734 driOptionCache
*options
= &brw
->optionCache
;
735 driParseConfigFiles(options
, &brw
->intelScreen
->optionCache
,
736 brw
->driContext
->driScreenPriv
->myNum
, "i965");
738 int bo_reuse_mode
= driQueryOptioni(options
, "bo_reuse");
739 switch (bo_reuse_mode
) {
740 case DRI_CONF_BO_REUSE_DISABLED
:
742 case DRI_CONF_BO_REUSE_ALL
:
743 intel_bufmgr_gem_enable_reuse(brw
->bufmgr
);
747 if (!driQueryOptionb(options
, "hiz")) {
748 brw
->has_hiz
= false;
749 /* On gen6, you can only do separate stencil with HIZ. */
751 brw
->has_separate_stencil
= false;
754 if (driQueryOptionb(options
, "always_flush_batch")) {
755 fprintf(stderr
, "flushing batchbuffer before/after each draw call\n");
756 brw
->always_flush_batch
= true;
759 if (driQueryOptionb(options
, "always_flush_cache")) {
760 fprintf(stderr
, "flushing GPU caches before/after each draw call\n");
761 brw
->always_flush_cache
= true;
764 if (driQueryOptionb(options
, "disable_throttling")) {
765 fprintf(stderr
, "disabling flush throttling\n");
766 brw
->disable_throttling
= true;
769 brw
->precompile
= driQueryOptionb(&brw
->optionCache
, "shader_precompile");
771 ctx
->Const
.ForceGLSLExtensionsWarn
=
772 driQueryOptionb(options
, "force_glsl_extensions_warn");
774 ctx
->Const
.DisableGLSLLineContinuations
=
775 driQueryOptionb(options
, "disable_glsl_line_continuations");
777 ctx
->Const
.AllowGLSLExtensionDirectiveMidShader
=
778 driQueryOptionb(options
, "allow_glsl_extension_directive_midshader");
780 brw
->dual_color_blend_by_location
=
781 driQueryOptionb(options
, "dual_color_blend_by_location");
785 brwCreateContext(gl_api api
,
786 const struct gl_config
*mesaVis
,
787 __DRIcontext
*driContextPriv
,
788 unsigned major_version
,
789 unsigned minor_version
,
792 unsigned *dri_ctx_error
,
793 void *sharedContextPrivate
)
795 __DRIscreen
*sPriv
= driContextPriv
->driScreenPriv
;
796 struct gl_context
*shareCtx
= (struct gl_context
*) sharedContextPrivate
;
797 struct intel_screen
*screen
= sPriv
->driverPrivate
;
798 const struct brw_device_info
*devinfo
= screen
->devinfo
;
799 struct dd_function_table functions
;
801 /* Only allow the __DRI_CTX_FLAG_ROBUST_BUFFER_ACCESS flag if the kernel
802 * provides us with context reset notifications.
804 uint32_t allowed_flags
= __DRI_CTX_FLAG_DEBUG
805 | __DRI_CTX_FLAG_FORWARD_COMPATIBLE
;
807 if (screen
->has_context_reset_notification
)
808 allowed_flags
|= __DRI_CTX_FLAG_ROBUST_BUFFER_ACCESS
;
810 if (flags
& ~allowed_flags
) {
811 *dri_ctx_error
= __DRI_CTX_ERROR_UNKNOWN_FLAG
;
815 struct brw_context
*brw
= rzalloc(NULL
, struct brw_context
);
817 fprintf(stderr
, "%s: failed to alloc context\n", __func__
);
818 *dri_ctx_error
= __DRI_CTX_ERROR_NO_MEMORY
;
822 driContextPriv
->driverPrivate
= brw
;
823 brw
->driContext
= driContextPriv
;
824 brw
->intelScreen
= screen
;
825 brw
->bufmgr
= screen
->bufmgr
;
827 brw
->gen
= devinfo
->gen
;
828 brw
->gt
= devinfo
->gt
;
829 brw
->is_g4x
= devinfo
->is_g4x
;
830 brw
->is_baytrail
= devinfo
->is_baytrail
;
831 brw
->is_haswell
= devinfo
->is_haswell
;
832 brw
->is_cherryview
= devinfo
->is_cherryview
;
833 brw
->is_broxton
= devinfo
->is_broxton
;
834 brw
->has_llc
= devinfo
->has_llc
;
835 brw
->has_hiz
= devinfo
->has_hiz_and_separate_stencil
;
836 brw
->has_separate_stencil
= devinfo
->has_hiz_and_separate_stencil
;
837 brw
->has_pln
= devinfo
->has_pln
;
838 brw
->has_compr4
= devinfo
->has_compr4
;
839 brw
->has_surface_tile_offset
= devinfo
->has_surface_tile_offset
;
840 brw
->has_negative_rhw_bug
= devinfo
->has_negative_rhw_bug
;
841 brw
->needs_unlit_centroid_workaround
=
842 devinfo
->needs_unlit_centroid_workaround
;
844 brw
->must_use_separate_stencil
= devinfo
->must_use_separate_stencil
;
845 brw
->has_swizzling
= screen
->hw_has_swizzling
;
847 brw
->vs
.base
.stage
= MESA_SHADER_VERTEX
;
848 brw
->tcs
.base
.stage
= MESA_SHADER_TESS_CTRL
;
849 brw
->tes
.base
.stage
= MESA_SHADER_TESS_EVAL
;
850 brw
->gs
.base
.stage
= MESA_SHADER_GEOMETRY
;
851 brw
->wm
.base
.stage
= MESA_SHADER_FRAGMENT
;
853 gen8_init_vtable_surface_functions(brw
);
854 brw
->vtbl
.emit_depth_stencil_hiz
= gen8_emit_depth_stencil_hiz
;
855 } else if (brw
->gen
>= 7) {
856 gen7_init_vtable_surface_functions(brw
);
857 brw
->vtbl
.emit_depth_stencil_hiz
= gen7_emit_depth_stencil_hiz
;
858 } else if (brw
->gen
>= 6) {
859 gen6_init_vtable_surface_functions(brw
);
860 brw
->vtbl
.emit_depth_stencil_hiz
= gen6_emit_depth_stencil_hiz
;
862 gen4_init_vtable_surface_functions(brw
);
863 brw
->vtbl
.emit_depth_stencil_hiz
= brw_emit_depth_stencil_hiz
;
866 brw_init_driver_functions(brw
, &functions
);
869 functions
.GetGraphicsResetStatus
= brw_get_graphics_reset_status
;
871 struct gl_context
*ctx
= &brw
->ctx
;
873 if (!_mesa_initialize_context(ctx
, api
, mesaVis
, shareCtx
, &functions
)) {
874 *dri_ctx_error
= __DRI_CTX_ERROR_NO_MEMORY
;
875 fprintf(stderr
, "%s: failed to init mesa context\n", __func__
);
876 intelDestroyContext(driContextPriv
);
880 driContextSetFlags(ctx
, flags
);
882 /* Initialize the software rasterizer and helper modules.
884 * As of GL 3.1 core, the gen4+ driver doesn't need the swrast context for
885 * software fallbacks (which we have to support on legacy GL to do weird
886 * glDrawPixels(), glBitmap(), and other functions).
888 if (api
!= API_OPENGL_CORE
&& api
!= API_OPENGLES2
) {
889 _swrast_CreateContext(ctx
);
892 _vbo_CreateContext(ctx
);
893 if (ctx
->swrast_context
) {
894 _tnl_CreateContext(ctx
);
895 TNL_CONTEXT(ctx
)->Driver
.RunPipeline
= _tnl_run_pipeline
;
896 _swsetup_CreateContext(ctx
);
898 /* Configure swrast to match hardware characteristics: */
899 _swrast_allow_pixel_fog(ctx
, false);
900 _swrast_allow_vertex_fog(ctx
, true);
903 _mesa_meta_init(ctx
);
905 brw_process_driconf_options(brw
);
907 if (INTEL_DEBUG
& DEBUG_PERF
)
908 brw
->perf_debug
= true;
910 brw_initialize_cs_context_constants(brw
, devinfo
->max_cs_threads
);
911 brw_initialize_context_constants(brw
);
913 ctx
->Const
.ResetStrategy
= notify_reset
914 ? GL_LOSE_CONTEXT_ON_RESET_ARB
: GL_NO_RESET_NOTIFICATION_ARB
;
916 /* Reinitialize the context point state. It depends on ctx->Const values. */
917 _mesa_init_point(ctx
);
921 intel_batchbuffer_init(brw
);
924 /* Create a new hardware context. Using a hardware context means that
925 * our GPU state will be saved/restored on context switch, allowing us
926 * to assume that the GPU is in the same state we left it in.
928 * This is required for transform feedback buffer offsets, query objects,
929 * and also allows us to reduce how much state we have to emit.
931 brw
->hw_ctx
= drm_intel_gem_context_create(brw
->bufmgr
);
934 fprintf(stderr
, "Gen6+ requires Kernel 3.6 or later.\n");
935 intelDestroyContext(driContextPriv
);
940 if (brw_init_pipe_control(brw
, devinfo
)) {
941 *dri_ctx_error
= __DRI_CTX_ERROR_NO_MEMORY
;
942 intelDestroyContext(driContextPriv
);
948 intelInitExtensions(ctx
);
950 brw_init_surface_formats(brw
);
952 brw
->max_vs_threads
= devinfo
->max_vs_threads
;
953 brw
->max_hs_threads
= devinfo
->max_hs_threads
;
954 brw
->max_ds_threads
= devinfo
->max_ds_threads
;
955 brw
->max_gs_threads
= devinfo
->max_gs_threads
;
956 brw
->max_wm_threads
= devinfo
->max_wm_threads
;
957 /* FINISHME: Do this for all platforms that the kernel supports */
958 if (brw
->is_cherryview
&&
959 screen
->subslice_total
> 0 && screen
->eu_total
> 0) {
960 /* Logical CS threads = EUs per subslice * 7 threads per EU */
961 brw
->max_cs_threads
= screen
->eu_total
/ screen
->subslice_total
* 7;
963 /* Fuse configurations may give more threads than expected, never less. */
964 if (brw
->max_cs_threads
< devinfo
->max_cs_threads
)
965 brw
->max_cs_threads
= devinfo
->max_cs_threads
;
967 brw
->max_cs_threads
= devinfo
->max_cs_threads
;
969 brw
->urb
.size
= devinfo
->urb
.size
;
970 brw
->urb
.min_vs_entries
= devinfo
->urb
.min_vs_entries
;
971 brw
->urb
.max_vs_entries
= devinfo
->urb
.max_vs_entries
;
972 brw
->urb
.max_hs_entries
= devinfo
->urb
.max_hs_entries
;
973 brw
->urb
.max_ds_entries
= devinfo
->urb
.max_ds_entries
;
974 brw
->urb
.max_gs_entries
= devinfo
->urb
.max_gs_entries
;
976 /* Estimate the size of the mappable aperture into the GTT. There's an
977 * ioctl to get the whole GTT size, but not one to get the mappable subset.
978 * It turns out it's basically always 256MB, though some ancient hardware
981 uint32_t gtt_size
= 256 * 1024 * 1024;
983 /* We don't want to map two objects such that a memcpy between them would
984 * just fault one mapping in and then the other over and over forever. So
985 * we would need to divide the GTT size by 2. Additionally, some GTT is
986 * taken up by things like the framebuffer and the ringbuffer and such, so
987 * be more conservative.
989 brw
->max_gtt_map_object_size
= gtt_size
/ 4;
992 brw
->urb
.gs_present
= false;
994 brw
->prim_restart
.in_progress
= false;
995 brw
->prim_restart
.enable_cut_index
= false;
996 brw
->gs
.enabled
= false;
997 brw
->sf
.viewport_transform_enable
= true;
999 brw
->predicate
.state
= BRW_PREDICATE_STATE_RENDER
;
1001 brw
->use_resource_streamer
= screen
->has_resource_streamer
&&
1002 (env_var_as_boolean("INTEL_USE_HW_BT", false) ||
1003 env_var_as_boolean("INTEL_USE_GATHER", false));
1005 ctx
->VertexProgram
._MaintainTnlProgram
= true;
1006 ctx
->FragmentProgram
._MaintainTexEnvProgram
= true;
1008 brw_draw_init( brw
);
1010 if ((flags
& __DRI_CTX_FLAG_DEBUG
) != 0) {
1011 /* Turn on some extra GL_ARB_debug_output generation. */
1012 brw
->perf_debug
= true;
1015 if ((flags
& __DRI_CTX_FLAG_ROBUST_BUFFER_ACCESS
) != 0)
1016 ctx
->Const
.ContextFlags
|= GL_CONTEXT_FLAG_ROBUST_ACCESS_BIT_ARB
;
1018 if (INTEL_DEBUG
& DEBUG_SHADER_TIME
)
1019 brw_init_shader_time(brw
);
1021 _mesa_compute_version(ctx
);
1023 _mesa_initialize_dispatch_tables(ctx
);
1024 _mesa_initialize_vbo_vtxfmt(ctx
);
1026 if (ctx
->Extensions
.AMD_performance_monitor
) {
1027 brw_init_performance_monitors(brw
);
1030 vbo_use_buffer_objects(ctx
);
1031 vbo_always_unmap_buffers(ctx
);
1037 intelDestroyContext(__DRIcontext
* driContextPriv
)
1039 struct brw_context
*brw
=
1040 (struct brw_context
*) driContextPriv
->driverPrivate
;
1041 struct gl_context
*ctx
= &brw
->ctx
;
1043 /* Dump a final BMP in case the application doesn't call SwapBuffers */
1044 if (INTEL_DEBUG
& DEBUG_AUB
) {
1045 intel_batchbuffer_flush(brw
);
1046 aub_dump_bmp(&brw
->ctx
);
1049 _mesa_meta_free(&brw
->ctx
);
1050 brw_meta_fast_clear_free(brw
);
1052 if (INTEL_DEBUG
& DEBUG_SHADER_TIME
) {
1053 /* Force a report. */
1054 brw
->shader_time
.report_time
= 0;
1056 brw_collect_and_report_shader_time(brw
);
1057 brw_destroy_shader_time(brw
);
1060 brw_destroy_state(brw
);
1061 brw_draw_destroy(brw
);
1063 drm_intel_bo_unreference(brw
->curbe
.curbe_bo
);
1064 if (brw
->vs
.base
.scratch_bo
)
1065 drm_intel_bo_unreference(brw
->vs
.base
.scratch_bo
);
1066 if (brw
->gs
.base
.scratch_bo
)
1067 drm_intel_bo_unreference(brw
->gs
.base
.scratch_bo
);
1068 if (brw
->wm
.base
.scratch_bo
)
1069 drm_intel_bo_unreference(brw
->wm
.base
.scratch_bo
);
1071 gen7_reset_hw_bt_pool_offsets(brw
);
1072 drm_intel_bo_unreference(brw
->hw_bt_pool
.bo
);
1073 brw
->hw_bt_pool
.bo
= NULL
;
1075 drm_intel_gem_context_destroy(brw
->hw_ctx
);
1077 if (ctx
->swrast_context
) {
1078 _swsetup_DestroyContext(&brw
->ctx
);
1079 _tnl_DestroyContext(&brw
->ctx
);
1081 _vbo_DestroyContext(&brw
->ctx
);
1083 if (ctx
->swrast_context
)
1084 _swrast_DestroyContext(&brw
->ctx
);
1086 brw_fini_pipe_control(brw
);
1087 intel_batchbuffer_free(brw
);
1089 drm_intel_bo_unreference(brw
->throttle_batch
[1]);
1090 drm_intel_bo_unreference(brw
->throttle_batch
[0]);
1091 brw
->throttle_batch
[1] = NULL
;
1092 brw
->throttle_batch
[0] = NULL
;
1094 driDestroyOptionCache(&brw
->optionCache
);
1096 /* free the Mesa context */
1097 _mesa_free_context_data(&brw
->ctx
);
1100 driContextPriv
->driverPrivate
= NULL
;
1104 intelUnbindContext(__DRIcontext
* driContextPriv
)
1106 /* Unset current context and dispath table */
1107 _mesa_make_current(NULL
, NULL
, NULL
);
1113 * Fixes up the context for GLES23 with our default-to-sRGB-capable behavior
1114 * on window system framebuffers.
1116 * Desktop GL is fairly reasonable in its handling of sRGB: You can ask if
1117 * your renderbuffer can do sRGB encode, and you can flip a switch that does
1118 * sRGB encode if the renderbuffer can handle it. You can ask specifically
1119 * for a visual where you're guaranteed to be capable, but it turns out that
1120 * everyone just makes all their ARGB8888 visuals capable and doesn't offer
1121 * incapable ones, because there's no difference between the two in resources
1122 * used. Applications thus get built that accidentally rely on the default
1123 * visual choice being sRGB, so we make ours sRGB capable. Everything sounds
1126 * But for GLES2/3, they decided that it was silly to not turn on sRGB encode
1127 * for sRGB renderbuffers you made with the GL_EXT_texture_sRGB equivalent.
1128 * So they removed the enable knob and made it "if the renderbuffer is sRGB
1129 * capable, do sRGB encode". Then, for your window system renderbuffers, you
1130 * can ask for sRGB visuals and get sRGB encode, or not ask for sRGB visuals
1131 * and get no sRGB encode (assuming that both kinds of visual are available).
1132 * Thus our choice to support sRGB by default on our visuals for desktop would
1133 * result in broken rendering of GLES apps that aren't expecting sRGB encode.
1135 * Unfortunately, renderbuffer setup happens before a context is created. So
1136 * in intel_screen.c we always set up sRGB, and here, if you're a GLES2/3
1137 * context (without an sRGB visual, though we don't have sRGB visuals exposed
1138 * yet), we go turn that back off before anyone finds out.
1141 intel_gles3_srgb_workaround(struct brw_context
*brw
,
1142 struct gl_framebuffer
*fb
)
1144 struct gl_context
*ctx
= &brw
->ctx
;
1146 if (_mesa_is_desktop_gl(ctx
) || !fb
->Visual
.sRGBCapable
)
1149 /* Some day when we support the sRGB capable bit on visuals available for
1150 * GLES, we'll need to respect that and not disable things here.
1152 fb
->Visual
.sRGBCapable
= false;
1153 for (int i
= 0; i
< BUFFER_COUNT
; i
++) {
1154 struct gl_renderbuffer
*rb
= fb
->Attachment
[i
].Renderbuffer
;
1156 rb
->Format
= _mesa_get_srgb_format_linear(rb
->Format
);
1161 intelMakeCurrent(__DRIcontext
* driContextPriv
,
1162 __DRIdrawable
* driDrawPriv
,
1163 __DRIdrawable
* driReadPriv
)
1165 struct brw_context
*brw
;
1166 GET_CURRENT_CONTEXT(curCtx
);
1169 brw
= (struct brw_context
*) driContextPriv
->driverPrivate
;
1173 /* According to the glXMakeCurrent() man page: "Pending commands to
1174 * the previous context, if any, are flushed before it is released."
1175 * But only flush if we're actually changing contexts.
1177 if (brw_context(curCtx
) && brw_context(curCtx
) != brw
) {
1178 _mesa_flush(curCtx
);
1181 if (driContextPriv
) {
1182 struct gl_context
*ctx
= &brw
->ctx
;
1183 struct gl_framebuffer
*fb
, *readFb
;
1185 if (driDrawPriv
== NULL
) {
1186 fb
= _mesa_get_incomplete_framebuffer();
1188 fb
= driDrawPriv
->driverPrivate
;
1189 driContextPriv
->dri2
.draw_stamp
= driDrawPriv
->dri2
.stamp
- 1;
1192 if (driReadPriv
== NULL
) {
1193 readFb
= _mesa_get_incomplete_framebuffer();
1195 readFb
= driReadPriv
->driverPrivate
;
1196 driContextPriv
->dri2
.read_stamp
= driReadPriv
->dri2
.stamp
- 1;
1199 /* The sRGB workaround changes the renderbuffer's format. We must change
1200 * the format before the renderbuffer's miptree get's allocated, otherwise
1201 * the formats of the renderbuffer and its miptree will differ.
1203 intel_gles3_srgb_workaround(brw
, fb
);
1204 intel_gles3_srgb_workaround(brw
, readFb
);
1206 /* If the context viewport hasn't been initialized, force a call out to
1207 * the loader to get buffers so we have a drawable size for the initial
1209 if (!brw
->ctx
.ViewportInitialized
)
1210 intel_prepare_render(brw
);
1212 _mesa_make_current(ctx
, fb
, readFb
);
1214 _mesa_make_current(NULL
, NULL
, NULL
);
1221 intel_resolve_for_dri2_flush(struct brw_context
*brw
,
1222 __DRIdrawable
*drawable
)
1225 /* MSAA and fast color clear are not supported, so don't waste time
1226 * checking whether a resolve is needed.
1231 struct gl_framebuffer
*fb
= drawable
->driverPrivate
;
1232 struct intel_renderbuffer
*rb
;
1234 /* Usually, only the back buffer will need to be downsampled. However,
1235 * the front buffer will also need it if the user has rendered into it.
1237 static const gl_buffer_index buffers
[2] = {
1242 for (int i
= 0; i
< 2; ++i
) {
1243 rb
= intel_get_renderbuffer(fb
, buffers
[i
]);
1244 if (rb
== NULL
|| rb
->mt
== NULL
)
1246 if (rb
->mt
->num_samples
<= 1)
1247 intel_miptree_resolve_color(brw
, rb
->mt
, 0);
1249 intel_renderbuffer_downsample(brw
, rb
);
1254 intel_bits_per_pixel(const struct intel_renderbuffer
*rb
)
1256 return _mesa_get_format_bytes(intel_rb_format(rb
)) * 8;
1260 intel_query_dri2_buffers(struct brw_context
*brw
,
1261 __DRIdrawable
*drawable
,
1262 __DRIbuffer
**buffers
,
1266 intel_process_dri2_buffer(struct brw_context
*brw
,
1267 __DRIdrawable
*drawable
,
1268 __DRIbuffer
*buffer
,
1269 struct intel_renderbuffer
*rb
,
1270 const char *buffer_name
);
1273 intel_update_image_buffers(struct brw_context
*brw
, __DRIdrawable
*drawable
);
1276 intel_update_dri2_buffers(struct brw_context
*brw
, __DRIdrawable
*drawable
)
1278 struct gl_framebuffer
*fb
= drawable
->driverPrivate
;
1279 struct intel_renderbuffer
*rb
;
1280 __DRIbuffer
*buffers
= NULL
;
1282 const char *region_name
;
1284 /* Set this up front, so that in case our buffers get invalidated
1285 * while we're getting new buffers, we don't clobber the stamp and
1286 * thus ignore the invalidate. */
1287 drawable
->lastStamp
= drawable
->dri2
.stamp
;
1289 if (unlikely(INTEL_DEBUG
& DEBUG_DRI
))
1290 fprintf(stderr
, "enter %s, drawable %p\n", __func__
, drawable
);
1292 intel_query_dri2_buffers(brw
, drawable
, &buffers
, &count
);
1294 if (buffers
== NULL
)
1297 for (i
= 0; i
< count
; i
++) {
1298 switch (buffers
[i
].attachment
) {
1299 case __DRI_BUFFER_FRONT_LEFT
:
1300 rb
= intel_get_renderbuffer(fb
, BUFFER_FRONT_LEFT
);
1301 region_name
= "dri2 front buffer";
1304 case __DRI_BUFFER_FAKE_FRONT_LEFT
:
1305 rb
= intel_get_renderbuffer(fb
, BUFFER_FRONT_LEFT
);
1306 region_name
= "dri2 fake front buffer";
1309 case __DRI_BUFFER_BACK_LEFT
:
1310 rb
= intel_get_renderbuffer(fb
, BUFFER_BACK_LEFT
);
1311 region_name
= "dri2 back buffer";
1314 case __DRI_BUFFER_DEPTH
:
1315 case __DRI_BUFFER_HIZ
:
1316 case __DRI_BUFFER_DEPTH_STENCIL
:
1317 case __DRI_BUFFER_STENCIL
:
1318 case __DRI_BUFFER_ACCUM
:
1321 "unhandled buffer attach event, attachment type %d\n",
1322 buffers
[i
].attachment
);
1326 intel_process_dri2_buffer(brw
, drawable
, &buffers
[i
], rb
, region_name
);
1332 intel_update_renderbuffers(__DRIcontext
*context
, __DRIdrawable
*drawable
)
1334 struct brw_context
*brw
= context
->driverPrivate
;
1335 __DRIscreen
*screen
= brw
->intelScreen
->driScrnPriv
;
1337 /* Set this up front, so that in case our buffers get invalidated
1338 * while we're getting new buffers, we don't clobber the stamp and
1339 * thus ignore the invalidate. */
1340 drawable
->lastStamp
= drawable
->dri2
.stamp
;
1342 if (unlikely(INTEL_DEBUG
& DEBUG_DRI
))
1343 fprintf(stderr
, "enter %s, drawable %p\n", __func__
, drawable
);
1345 if (screen
->image
.loader
)
1346 intel_update_image_buffers(brw
, drawable
);
1348 intel_update_dri2_buffers(brw
, drawable
);
1350 driUpdateFramebufferSize(&brw
->ctx
, drawable
);
1354 * intel_prepare_render should be called anywhere that curent read/drawbuffer
1355 * state is required.
1358 intel_prepare_render(struct brw_context
*brw
)
1360 struct gl_context
*ctx
= &brw
->ctx
;
1361 __DRIcontext
*driContext
= brw
->driContext
;
1362 __DRIdrawable
*drawable
;
1364 drawable
= driContext
->driDrawablePriv
;
1365 if (drawable
&& drawable
->dri2
.stamp
!= driContext
->dri2
.draw_stamp
) {
1366 if (drawable
->lastStamp
!= drawable
->dri2
.stamp
)
1367 intel_update_renderbuffers(driContext
, drawable
);
1368 driContext
->dri2
.draw_stamp
= drawable
->dri2
.stamp
;
1371 drawable
= driContext
->driReadablePriv
;
1372 if (drawable
&& drawable
->dri2
.stamp
!= driContext
->dri2
.read_stamp
) {
1373 if (drawable
->lastStamp
!= drawable
->dri2
.stamp
)
1374 intel_update_renderbuffers(driContext
, drawable
);
1375 driContext
->dri2
.read_stamp
= drawable
->dri2
.stamp
;
1378 /* If we're currently rendering to the front buffer, the rendering
1379 * that will happen next will probably dirty the front buffer. So
1380 * mark it as dirty here.
1382 if (_mesa_is_front_buffer_drawing(ctx
->DrawBuffer
))
1383 brw
->front_buffer_dirty
= true;
1387 * \brief Query DRI2 to obtain a DRIdrawable's buffers.
1389 * To determine which DRI buffers to request, examine the renderbuffers
1390 * attached to the drawable's framebuffer. Then request the buffers with
1391 * DRI2GetBuffers() or DRI2GetBuffersWithFormat().
1393 * This is called from intel_update_renderbuffers().
1395 * \param drawable Drawable whose buffers are queried.
1396 * \param buffers [out] List of buffers returned by DRI2 query.
1397 * \param buffer_count [out] Number of buffers returned.
1399 * \see intel_update_renderbuffers()
1400 * \see DRI2GetBuffers()
1401 * \see DRI2GetBuffersWithFormat()
1404 intel_query_dri2_buffers(struct brw_context
*brw
,
1405 __DRIdrawable
*drawable
,
1406 __DRIbuffer
**buffers
,
1409 __DRIscreen
*screen
= brw
->intelScreen
->driScrnPriv
;
1410 struct gl_framebuffer
*fb
= drawable
->driverPrivate
;
1412 unsigned attachments
[8];
1414 struct intel_renderbuffer
*front_rb
;
1415 struct intel_renderbuffer
*back_rb
;
1417 front_rb
= intel_get_renderbuffer(fb
, BUFFER_FRONT_LEFT
);
1418 back_rb
= intel_get_renderbuffer(fb
, BUFFER_BACK_LEFT
);
1420 memset(attachments
, 0, sizeof(attachments
));
1421 if ((_mesa_is_front_buffer_drawing(fb
) ||
1422 _mesa_is_front_buffer_reading(fb
) ||
1423 !back_rb
) && front_rb
) {
1424 /* If a fake front buffer is in use, then querying for
1425 * __DRI_BUFFER_FRONT_LEFT will cause the server to copy the image from
1426 * the real front buffer to the fake front buffer. So before doing the
1427 * query, we need to make sure all the pending drawing has landed in the
1428 * real front buffer.
1430 intel_batchbuffer_flush(brw
);
1431 intel_flush_front(&brw
->ctx
);
1433 attachments
[i
++] = __DRI_BUFFER_FRONT_LEFT
;
1434 attachments
[i
++] = intel_bits_per_pixel(front_rb
);
1435 } else if (front_rb
&& brw
->front_buffer_dirty
) {
1436 /* We have pending front buffer rendering, but we aren't querying for a
1437 * front buffer. If the front buffer we have is a fake front buffer,
1438 * the X server is going to throw it away when it processes the query.
1439 * So before doing the query, make sure all the pending drawing has
1440 * landed in the real front buffer.
1442 intel_batchbuffer_flush(brw
);
1443 intel_flush_front(&brw
->ctx
);
1447 attachments
[i
++] = __DRI_BUFFER_BACK_LEFT
;
1448 attachments
[i
++] = intel_bits_per_pixel(back_rb
);
1451 assert(i
<= ARRAY_SIZE(attachments
));
1453 *buffers
= screen
->dri2
.loader
->getBuffersWithFormat(drawable
,
1458 drawable
->loaderPrivate
);
1462 * \brief Assign a DRI buffer's DRM region to a renderbuffer.
1464 * This is called from intel_update_renderbuffers().
1467 * DRI buffers whose attachment point is DRI2BufferStencil or
1468 * DRI2BufferDepthStencil are handled as special cases.
1470 * \param buffer_name is a human readable name, such as "dri2 front buffer",
1471 * that is passed to drm_intel_bo_gem_create_from_name().
1473 * \see intel_update_renderbuffers()
1476 intel_process_dri2_buffer(struct brw_context
*brw
,
1477 __DRIdrawable
*drawable
,
1478 __DRIbuffer
*buffer
,
1479 struct intel_renderbuffer
*rb
,
1480 const char *buffer_name
)
1482 struct gl_framebuffer
*fb
= drawable
->driverPrivate
;
1488 unsigned num_samples
= rb
->Base
.Base
.NumSamples
;
1490 /* We try to avoid closing and reopening the same BO name, because the first
1491 * use of a mapping of the buffer involves a bunch of page faulting which is
1492 * moderately expensive.
1494 struct intel_mipmap_tree
*last_mt
;
1495 if (num_samples
== 0)
1498 last_mt
= rb
->singlesample_mt
;
1500 uint32_t old_name
= 0;
1502 /* The bo already has a name because the miptree was created by a
1503 * previous call to intel_process_dri2_buffer(). If a bo already has a
1504 * name, then drm_intel_bo_flink() is a low-cost getter. It does not
1505 * create a new name.
1507 drm_intel_bo_flink(last_mt
->bo
, &old_name
);
1510 if (old_name
== buffer
->name
)
1513 if (unlikely(INTEL_DEBUG
& DEBUG_DRI
)) {
1515 "attaching buffer %d, at %d, cpp %d, pitch %d\n",
1516 buffer
->name
, buffer
->attachment
,
1517 buffer
->cpp
, buffer
->pitch
);
1520 bo
= drm_intel_bo_gem_create_from_name(brw
->bufmgr
, buffer_name
,
1524 "Failed to open BO for returned DRI2 buffer "
1525 "(%dx%d, %s, named %d).\n"
1526 "This is likely a bug in the X Server that will lead to a "
1528 drawable
->w
, drawable
->h
, buffer_name
, buffer
->name
);
1532 intel_update_winsys_renderbuffer_miptree(brw
, rb
, bo
,
1533 drawable
->w
, drawable
->h
,
1536 if (_mesa_is_front_buffer_drawing(fb
) &&
1537 (buffer
->attachment
== __DRI_BUFFER_FRONT_LEFT
||
1538 buffer
->attachment
== __DRI_BUFFER_FAKE_FRONT_LEFT
) &&
1539 rb
->Base
.Base
.NumSamples
> 1) {
1540 intel_renderbuffer_upsample(brw
, rb
);
1545 drm_intel_bo_unreference(bo
);
1549 * \brief Query DRI image loader to obtain a DRIdrawable's buffers.
1551 * To determine which DRI buffers to request, examine the renderbuffers
1552 * attached to the drawable's framebuffer. Then request the buffers from
1555 * This is called from intel_update_renderbuffers().
1557 * \param drawable Drawable whose buffers are queried.
1558 * \param buffers [out] List of buffers returned by DRI2 query.
1559 * \param buffer_count [out] Number of buffers returned.
1561 * \see intel_update_renderbuffers()
1565 intel_update_image_buffer(struct brw_context
*intel
,
1566 __DRIdrawable
*drawable
,
1567 struct intel_renderbuffer
*rb
,
1569 enum __DRIimageBufferMask buffer_type
)
1571 struct gl_framebuffer
*fb
= drawable
->driverPrivate
;
1573 if (!rb
|| !buffer
->bo
)
1576 unsigned num_samples
= rb
->Base
.Base
.NumSamples
;
1578 /* Check and see if we're already bound to the right
1581 struct intel_mipmap_tree
*last_mt
;
1582 if (num_samples
== 0)
1585 last_mt
= rb
->singlesample_mt
;
1587 if (last_mt
&& last_mt
->bo
== buffer
->bo
)
1590 intel_update_winsys_renderbuffer_miptree(intel
, rb
, buffer
->bo
,
1591 buffer
->width
, buffer
->height
,
1594 if (_mesa_is_front_buffer_drawing(fb
) &&
1595 buffer_type
== __DRI_IMAGE_BUFFER_FRONT
&&
1596 rb
->Base
.Base
.NumSamples
> 1) {
1597 intel_renderbuffer_upsample(intel
, rb
);
1602 intel_update_image_buffers(struct brw_context
*brw
, __DRIdrawable
*drawable
)
1604 struct gl_framebuffer
*fb
= drawable
->driverPrivate
;
1605 __DRIscreen
*screen
= brw
->intelScreen
->driScrnPriv
;
1606 struct intel_renderbuffer
*front_rb
;
1607 struct intel_renderbuffer
*back_rb
;
1608 struct __DRIimageList images
;
1609 unsigned int format
;
1610 uint32_t buffer_mask
= 0;
1612 front_rb
= intel_get_renderbuffer(fb
, BUFFER_FRONT_LEFT
);
1613 back_rb
= intel_get_renderbuffer(fb
, BUFFER_BACK_LEFT
);
1616 format
= intel_rb_format(back_rb
);
1618 format
= intel_rb_format(front_rb
);
1622 if (front_rb
&& (_mesa_is_front_buffer_drawing(fb
) ||
1623 _mesa_is_front_buffer_reading(fb
) || !back_rb
)) {
1624 buffer_mask
|= __DRI_IMAGE_BUFFER_FRONT
;
1628 buffer_mask
|= __DRI_IMAGE_BUFFER_BACK
;
1630 (*screen
->image
.loader
->getBuffers
) (drawable
,
1631 driGLFormatToImageFormat(format
),
1632 &drawable
->dri2
.stamp
,
1633 drawable
->loaderPrivate
,
1637 if (images
.image_mask
& __DRI_IMAGE_BUFFER_FRONT
) {
1638 drawable
->w
= images
.front
->width
;
1639 drawable
->h
= images
.front
->height
;
1640 intel_update_image_buffer(brw
,
1644 __DRI_IMAGE_BUFFER_FRONT
);
1646 if (images
.image_mask
& __DRI_IMAGE_BUFFER_BACK
) {
1647 drawable
->w
= images
.back
->width
;
1648 drawable
->h
= images
.back
->height
;
1649 intel_update_image_buffer(brw
,
1653 __DRI_IMAGE_BUFFER_BACK
);