2 Copyright 2003 VMware, Inc.
3 Copyright (C) Intel Corp. 2006. All Rights Reserved.
4 Intel funded Tungsten Graphics to
5 develop this 3D driver.
7 Permission is hereby granted, free of charge, to any person obtaining
8 a copy of this software and associated documentation files (the
9 "Software"), to deal in the Software without restriction, including
10 without limitation the rights to use, copy, modify, merge, publish,
11 distribute, sublicense, and/or sell copies of the Software, and to
12 permit persons to whom the Software is furnished to do so, subject to
13 the following conditions:
15 The above copyright notice and this permission notice (including the
16 next paragraph) shall be included in all copies or substantial
17 portions of the Software.
19 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
20 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
22 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
23 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
24 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
25 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27 **********************************************************************/
30 * Keith Whitwell <keithw@vmware.com>
34 #include "compiler/nir/nir.h"
35 #include "main/api_exec.h"
36 #include "main/context.h"
37 #include "main/fbobject.h"
38 #include "main/extensions.h"
39 #include "main/imports.h"
40 #include "main/macros.h"
41 #include "main/points.h"
42 #include "main/version.h"
43 #include "main/vtxfmt.h"
44 #include "main/texobj.h"
45 #include "main/framebuffer.h"
46 #include "main/stencil.h"
47 #include "main/state.h"
49 #include "vbo/vbo_context.h"
51 #include "drivers/common/driverfuncs.h"
52 #include "drivers/common/meta.h"
55 #include "brw_context.h"
56 #include "brw_defines.h"
57 #include "brw_blorp.h"
59 #include "brw_state.h"
61 #include "intel_batchbuffer.h"
62 #include "intel_buffer_objects.h"
63 #include "intel_buffers.h"
64 #include "intel_fbo.h"
65 #include "intel_mipmap_tree.h"
66 #include "intel_pixel.h"
67 #include "intel_image.h"
68 #include "intel_tex.h"
69 #include "intel_tex_obj.h"
71 #include "swrast_setup/swrast_setup.h"
73 #include "tnl/t_pipeline.h"
74 #include "util/ralloc.h"
75 #include "util/debug.h"
78 /***************************************
79 * Mesa's Driver Functions
80 ***************************************/
82 const char *const brw_vendor_string
= "Intel Open Source Technology Center";
85 get_bsw_model(const struct intel_screen
*screen
)
87 switch (screen
->eu_total
) {
98 brw_get_renderer_string(const struct intel_screen
*screen
)
101 static char buffer
[128];
104 switch (screen
->deviceID
) {
106 #define CHIPSET(id, symbol, str) case id: chipset = str; break;
107 #include "pci_ids/i965_pci_ids.h"
109 chipset
= "Unknown Intel Chipset";
113 /* Braswell branding is funny, so we have to fix it up here */
114 if (screen
->deviceID
== 0x22B1) {
115 bsw
= strdup(chipset
);
116 char *needle
= strstr(bsw
, "XXX");
118 memcpy(needle
, get_bsw_model(screen
), 3);
123 (void) driGetRendererString(buffer
, chipset
, 0);
128 static const GLubyte
*
129 intel_get_string(struct gl_context
* ctx
, GLenum name
)
131 const struct brw_context
*const brw
= brw_context(ctx
);
135 return (GLubyte
*) brw_vendor_string
;
139 (GLubyte
*) brw_get_renderer_string(brw
->screen
);
147 intel_viewport(struct gl_context
*ctx
)
149 struct brw_context
*brw
= brw_context(ctx
);
150 __DRIcontext
*driContext
= brw
->driContext
;
152 if (_mesa_is_winsys_fbo(ctx
->DrawBuffer
)) {
153 if (driContext
->driDrawablePriv
)
154 dri2InvalidateDrawable(driContext
->driDrawablePriv
);
155 if (driContext
->driReadablePriv
)
156 dri2InvalidateDrawable(driContext
->driReadablePriv
);
161 intel_update_framebuffer(struct gl_context
*ctx
,
162 struct gl_framebuffer
*fb
)
164 struct brw_context
*brw
= brw_context(ctx
);
166 /* Quantize the derived default number of samples
168 fb
->DefaultGeometry
._NumSamples
=
169 intel_quantize_num_samples(brw
->screen
,
170 fb
->DefaultGeometry
.NumSamples
);
174 intel_update_state(struct gl_context
* ctx
)
176 GLuint new_state
= ctx
->NewState
;
177 struct brw_context
*brw
= brw_context(ctx
);
179 if (ctx
->swrast_context
)
180 _swrast_InvalidateState(ctx
, new_state
);
182 brw
->NewGLState
|= new_state
;
184 if (new_state
& (_NEW_SCISSOR
| _NEW_BUFFERS
| _NEW_VIEWPORT
))
185 _mesa_update_draw_buffer_bounds(ctx
, ctx
->DrawBuffer
);
187 if (new_state
& (_NEW_STENCIL
| _NEW_BUFFERS
)) {
188 brw
->stencil_enabled
= _mesa_stencil_is_enabled(ctx
);
189 brw
->stencil_two_sided
= _mesa_stencil_is_two_sided(ctx
);
190 brw
->stencil_write_enabled
=
191 _mesa_stencil_is_write_enabled(ctx
, brw
->stencil_two_sided
);
194 if (new_state
& _NEW_POLYGON
)
195 brw
->polygon_front_bit
= _mesa_polygon_get_front_bit(ctx
);
197 if (new_state
& _NEW_BUFFERS
) {
198 intel_update_framebuffer(ctx
, ctx
->DrawBuffer
);
199 if (ctx
->DrawBuffer
!= ctx
->ReadBuffer
)
200 intel_update_framebuffer(ctx
, ctx
->ReadBuffer
);
204 #define flushFront(screen) ((screen)->image.loader ? (screen)->image.loader->flushFrontBuffer : (screen)->dri2.loader->flushFrontBuffer)
207 intel_flush_front(struct gl_context
*ctx
)
209 struct brw_context
*brw
= brw_context(ctx
);
210 __DRIcontext
*driContext
= brw
->driContext
;
211 __DRIdrawable
*driDrawable
= driContext
->driDrawablePriv
;
212 __DRIscreen
*const dri_screen
= brw
->screen
->driScrnPriv
;
214 if (brw
->front_buffer_dirty
&& _mesa_is_winsys_fbo(ctx
->DrawBuffer
)) {
215 if (flushFront(dri_screen
) && driDrawable
&&
216 driDrawable
->loaderPrivate
) {
218 /* Resolve before flushing FAKE_FRONT_LEFT to FRONT_LEFT.
220 * This potentially resolves both front and back buffer. It
221 * is unnecessary to resolve the back, but harms nothing except
222 * performance. And no one cares about front-buffer render
225 intel_resolve_for_dri2_flush(brw
, driDrawable
);
226 intel_batchbuffer_flush(brw
);
228 flushFront(dri_screen
)(driDrawable
, driDrawable
->loaderPrivate
);
230 /* We set the dirty bit in intel_prepare_render() if we're
231 * front buffer rendering once we get there.
233 brw
->front_buffer_dirty
= false;
239 intel_glFlush(struct gl_context
*ctx
)
241 struct brw_context
*brw
= brw_context(ctx
);
243 intel_batchbuffer_flush(brw
);
244 intel_flush_front(ctx
);
246 brw
->need_flush_throttle
= true;
250 intel_finish(struct gl_context
* ctx
)
252 struct brw_context
*brw
= brw_context(ctx
);
256 if (brw
->batch
.last_bo
)
257 brw_bo_wait_rendering(brw
->batch
.last_bo
);
261 brw_init_driver_functions(struct brw_context
*brw
,
262 struct dd_function_table
*functions
)
264 _mesa_init_driver_functions(functions
);
266 /* GLX uses DRI2 invalidate events to handle window resizing.
267 * Unfortunately, EGL does not - libEGL is written in XCB (not Xlib),
268 * which doesn't provide a mechanism for snooping the event queues.
270 * So EGL still relies on viewport hacks to handle window resizing.
271 * This should go away with DRI3000.
273 if (!brw
->driContext
->driScreenPriv
->dri2
.useInvalidate
)
274 functions
->Viewport
= intel_viewport
;
276 functions
->Flush
= intel_glFlush
;
277 functions
->Finish
= intel_finish
;
278 functions
->GetString
= intel_get_string
;
279 functions
->UpdateState
= intel_update_state
;
281 intelInitTextureFuncs(functions
);
282 intelInitTextureImageFuncs(functions
);
283 intelInitTextureSubImageFuncs(functions
);
284 intelInitTextureCopyImageFuncs(functions
);
285 intelInitCopyImageFuncs(functions
);
286 intelInitClearFuncs(functions
);
287 intelInitBufferFuncs(functions
);
288 intelInitPixelFuncs(functions
);
289 intelInitBufferObjectFuncs(functions
);
290 brw_init_syncobj_functions(functions
);
291 brw_init_object_purgeable_functions(functions
);
293 brwInitFragProgFuncs( functions
);
294 brw_init_common_queryobj_functions(functions
);
295 if (brw
->gen
>= 8 || brw
->is_haswell
)
296 hsw_init_queryobj_functions(functions
);
297 else if (brw
->gen
>= 6)
298 gen6_init_queryobj_functions(functions
);
300 gen4_init_queryobj_functions(functions
);
301 brw_init_compute_functions(functions
);
302 brw_init_conditional_render_functions(functions
);
304 functions
->QueryInternalFormat
= brw_query_internal_format
;
306 functions
->NewTransformFeedback
= brw_new_transform_feedback
;
307 functions
->DeleteTransformFeedback
= brw_delete_transform_feedback
;
308 if (can_do_mi_math_and_lrr(brw
->screen
)) {
309 functions
->BeginTransformFeedback
= hsw_begin_transform_feedback
;
310 functions
->EndTransformFeedback
= hsw_end_transform_feedback
;
311 functions
->PauseTransformFeedback
= hsw_pause_transform_feedback
;
312 functions
->ResumeTransformFeedback
= hsw_resume_transform_feedback
;
313 } else if (brw
->gen
>= 7) {
314 functions
->BeginTransformFeedback
= gen7_begin_transform_feedback
;
315 functions
->EndTransformFeedback
= gen7_end_transform_feedback
;
316 functions
->PauseTransformFeedback
= gen7_pause_transform_feedback
;
317 functions
->ResumeTransformFeedback
= gen7_resume_transform_feedback
;
318 functions
->GetTransformFeedbackVertexCount
=
319 brw_get_transform_feedback_vertex_count
;
321 functions
->BeginTransformFeedback
= brw_begin_transform_feedback
;
322 functions
->EndTransformFeedback
= brw_end_transform_feedback
;
323 functions
->PauseTransformFeedback
= brw_pause_transform_feedback
;
324 functions
->ResumeTransformFeedback
= brw_resume_transform_feedback
;
325 functions
->GetTransformFeedbackVertexCount
=
326 brw_get_transform_feedback_vertex_count
;
330 functions
->GetSamplePosition
= gen6_get_sample_position
;
334 brw_initialize_context_constants(struct brw_context
*brw
)
336 struct gl_context
*ctx
= &brw
->ctx
;
337 const struct brw_compiler
*compiler
= brw
->screen
->compiler
;
339 const bool stage_exists
[MESA_SHADER_STAGES
] = {
340 [MESA_SHADER_VERTEX
] = true,
341 [MESA_SHADER_TESS_CTRL
] = brw
->gen
>= 7,
342 [MESA_SHADER_TESS_EVAL
] = brw
->gen
>= 7,
343 [MESA_SHADER_GEOMETRY
] = brw
->gen
>= 6,
344 [MESA_SHADER_FRAGMENT
] = true,
345 [MESA_SHADER_COMPUTE
] =
346 ((ctx
->API
== API_OPENGL_COMPAT
|| ctx
->API
== API_OPENGL_CORE
) &&
347 ctx
->Const
.MaxComputeWorkGroupSize
[0] >= 1024) ||
348 (ctx
->API
== API_OPENGLES2
&&
349 ctx
->Const
.MaxComputeWorkGroupSize
[0] >= 128) ||
350 _mesa_extension_override_enables
.ARB_compute_shader
,
353 unsigned num_stages
= 0;
354 for (int i
= 0; i
< MESA_SHADER_STAGES
; i
++) {
359 unsigned max_samplers
=
360 brw
->gen
>= 8 || brw
->is_haswell
? BRW_MAX_TEX_UNIT
: 16;
362 ctx
->Const
.MaxDualSourceDrawBuffers
= 1;
363 ctx
->Const
.MaxDrawBuffers
= BRW_MAX_DRAW_BUFFERS
;
364 ctx
->Const
.MaxCombinedShaderOutputResources
=
365 MAX_IMAGE_UNITS
+ BRW_MAX_DRAW_BUFFERS
;
367 /* The timestamp register we can read for glGetTimestamp() is
368 * sometimes only 32 bits, before scaling to nanoseconds (depending
371 * Once scaled to nanoseconds the timestamp would roll over at a
372 * non-power-of-two, so an application couldn't use
373 * GL_QUERY_COUNTER_BITS to handle rollover correctly. Instead, we
374 * report 36 bits and truncate at that (rolling over 5 times as
375 * often as the HW counter), and when the 32-bit counter rolls
376 * over, it happens to also be at a rollover in the reported value
377 * from near (1<<36) to 0.
379 * The low 32 bits rolls over in ~343 seconds. Our 36-bit result
380 * rolls over every ~69 seconds.
382 ctx
->Const
.QueryCounterBits
.Timestamp
= 36;
384 ctx
->Const
.MaxTextureCoordUnits
= 8; /* Mesa limit */
385 ctx
->Const
.MaxImageUnits
= MAX_IMAGE_UNITS
;
387 ctx
->Const
.MaxRenderbufferSize
= 16384;
388 ctx
->Const
.MaxTextureLevels
= MIN2(15 /* 16384 */, MAX_TEXTURE_LEVELS
);
389 ctx
->Const
.MaxCubeTextureLevels
= 15; /* 16384 */
391 ctx
->Const
.MaxRenderbufferSize
= 8192;
392 ctx
->Const
.MaxTextureLevels
= MIN2(14 /* 8192 */, MAX_TEXTURE_LEVELS
);
393 ctx
->Const
.MaxCubeTextureLevels
= 14; /* 8192 */
395 ctx
->Const
.Max3DTextureLevels
= 12; /* 2048 */
396 ctx
->Const
.MaxArrayTextureLayers
= brw
->gen
>= 7 ? 2048 : 512;
397 ctx
->Const
.MaxTextureMbytes
= 1536;
398 ctx
->Const
.MaxTextureRectSize
= brw
->gen
>= 7 ? 16384 : 8192;
399 ctx
->Const
.MaxTextureMaxAnisotropy
= 16.0;
400 ctx
->Const
.MaxTextureLodBias
= 15.0;
401 ctx
->Const
.StripTextureBorder
= true;
403 ctx
->Const
.MaxProgramTextureGatherComponents
= 4;
404 ctx
->Const
.MinProgramTextureGatherOffset
= -32;
405 ctx
->Const
.MaxProgramTextureGatherOffset
= 31;
406 } else if (brw
->gen
== 6) {
407 ctx
->Const
.MaxProgramTextureGatherComponents
= 1;
408 ctx
->Const
.MinProgramTextureGatherOffset
= -8;
409 ctx
->Const
.MaxProgramTextureGatherOffset
= 7;
412 ctx
->Const
.MaxUniformBlockSize
= 65536;
414 for (int i
= 0; i
< MESA_SHADER_STAGES
; i
++) {
415 struct gl_program_constants
*prog
= &ctx
->Const
.Program
[i
];
417 if (!stage_exists
[i
])
420 prog
->MaxTextureImageUnits
= max_samplers
;
422 prog
->MaxUniformBlocks
= BRW_MAX_UBO
;
423 prog
->MaxCombinedUniformComponents
=
424 prog
->MaxUniformComponents
+
425 ctx
->Const
.MaxUniformBlockSize
/ 4 * prog
->MaxUniformBlocks
;
427 prog
->MaxAtomicCounters
= MAX_ATOMIC_COUNTERS
;
428 prog
->MaxAtomicBuffers
= BRW_MAX_ABO
;
429 prog
->MaxImageUniforms
= compiler
->scalar_stage
[i
] ? BRW_MAX_IMAGES
: 0;
430 prog
->MaxShaderStorageBlocks
= BRW_MAX_SSBO
;
433 ctx
->Const
.MaxTextureUnits
=
434 MIN2(ctx
->Const
.MaxTextureCoordUnits
,
435 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].MaxTextureImageUnits
);
437 ctx
->Const
.MaxUniformBufferBindings
= num_stages
* BRW_MAX_UBO
;
438 ctx
->Const
.MaxCombinedUniformBlocks
= num_stages
* BRW_MAX_UBO
;
439 ctx
->Const
.MaxCombinedAtomicBuffers
= num_stages
* BRW_MAX_ABO
;
440 ctx
->Const
.MaxCombinedShaderStorageBlocks
= num_stages
* BRW_MAX_SSBO
;
441 ctx
->Const
.MaxShaderStorageBufferBindings
= num_stages
* BRW_MAX_SSBO
;
442 ctx
->Const
.MaxCombinedTextureImageUnits
= num_stages
* max_samplers
;
443 ctx
->Const
.MaxCombinedImageUniforms
= num_stages
* BRW_MAX_IMAGES
;
446 /* Hardware only supports a limited number of transform feedback buffers.
447 * So we need to override the Mesa default (which is based only on software
450 ctx
->Const
.MaxTransformFeedbackBuffers
= BRW_MAX_SOL_BUFFERS
;
452 /* On Gen6, in the worst case, we use up one binding table entry per
453 * transform feedback component (see comments above the definition of
454 * BRW_MAX_SOL_BINDINGS, in brw_context.h), so we need to advertise a value
455 * for MAX_TRANSFORM_FEEDBACK_INTERLEAVED_COMPONENTS equal to
456 * BRW_MAX_SOL_BINDINGS.
458 * In "separate components" mode, we need to divide this value by
459 * BRW_MAX_SOL_BUFFERS, so that the total number of binding table entries
460 * used up by all buffers will not exceed BRW_MAX_SOL_BINDINGS.
462 ctx
->Const
.MaxTransformFeedbackInterleavedComponents
= BRW_MAX_SOL_BINDINGS
;
463 ctx
->Const
.MaxTransformFeedbackSeparateComponents
=
464 BRW_MAX_SOL_BINDINGS
/ BRW_MAX_SOL_BUFFERS
;
466 ctx
->Const
.AlwaysUseGetTransformFeedbackVertexCount
=
467 !can_do_mi_math_and_lrr(brw
->screen
);
470 const int *msaa_modes
= intel_supported_msaa_modes(brw
->screen
);
471 const int clamp_max_samples
=
472 driQueryOptioni(&brw
->optionCache
, "clamp_max_samples");
474 if (clamp_max_samples
< 0) {
475 max_samples
= msaa_modes
[0];
477 /* Select the largest supported MSAA mode that does not exceed
481 for (int i
= 0; msaa_modes
[i
] != 0; ++i
) {
482 if (msaa_modes
[i
] <= clamp_max_samples
) {
483 max_samples
= msaa_modes
[i
];
489 ctx
->Const
.MaxSamples
= max_samples
;
490 ctx
->Const
.MaxColorTextureSamples
= max_samples
;
491 ctx
->Const
.MaxDepthTextureSamples
= max_samples
;
492 ctx
->Const
.MaxIntegerSamples
= max_samples
;
493 ctx
->Const
.MaxImageSamples
= 0;
495 /* gen6_set_sample_maps() sets SampleMap{2,4,8}x variables which are used
496 * to map indices of rectangular grid to sample numbers within a pixel.
497 * These variables are used by GL_EXT_framebuffer_multisample_blit_scaled
498 * extension implementation. For more details see the comment above
499 * gen6_set_sample_maps() definition.
501 gen6_set_sample_maps(ctx
);
503 ctx
->Const
.MinLineWidth
= 1.0;
504 ctx
->Const
.MinLineWidthAA
= 1.0;
506 ctx
->Const
.MaxLineWidth
= 7.375;
507 ctx
->Const
.MaxLineWidthAA
= 7.375;
508 ctx
->Const
.LineWidthGranularity
= 0.125;
510 ctx
->Const
.MaxLineWidth
= 7.0;
511 ctx
->Const
.MaxLineWidthAA
= 7.0;
512 ctx
->Const
.LineWidthGranularity
= 0.5;
515 /* For non-antialiased lines, we have to round the line width to the
516 * nearest whole number. Make sure that we don't advertise a line
517 * width that, when rounded, will be beyond the actual hardware
520 assert(roundf(ctx
->Const
.MaxLineWidth
) <= ctx
->Const
.MaxLineWidth
);
522 ctx
->Const
.MinPointSize
= 1.0;
523 ctx
->Const
.MinPointSizeAA
= 1.0;
524 ctx
->Const
.MaxPointSize
= 255.0;
525 ctx
->Const
.MaxPointSizeAA
= 255.0;
526 ctx
->Const
.PointSizeGranularity
= 1.0;
528 if (brw
->gen
>= 5 || brw
->is_g4x
)
529 ctx
->Const
.MaxClipPlanes
= 8;
531 ctx
->Const
.GLSLTessLevelsAsInputs
= true;
532 ctx
->Const
.LowerTCSPatchVerticesIn
= brw
->gen
>= 8;
533 ctx
->Const
.LowerTESPatchVerticesIn
= true;
534 ctx
->Const
.PrimitiveRestartForPatches
= true;
536 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].MaxNativeInstructions
= 16 * 1024;
537 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].MaxAluInstructions
= 0;
538 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].MaxTexInstructions
= 0;
539 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].MaxTexIndirections
= 0;
540 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].MaxNativeAluInstructions
= 0;
541 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].MaxNativeTexInstructions
= 0;
542 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].MaxNativeTexIndirections
= 0;
543 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].MaxNativeAttribs
= 16;
544 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].MaxNativeTemps
= 256;
545 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].MaxNativeAddressRegs
= 1;
546 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].MaxNativeParameters
= 1024;
547 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].MaxEnvParams
=
548 MIN2(ctx
->Const
.Program
[MESA_SHADER_VERTEX
].MaxNativeParameters
,
549 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].MaxEnvParams
);
551 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].MaxNativeInstructions
= 1024;
552 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].MaxNativeAluInstructions
= 1024;
553 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].MaxNativeTexInstructions
= 1024;
554 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].MaxNativeTexIndirections
= 1024;
555 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].MaxNativeAttribs
= 12;
556 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].MaxNativeTemps
= 256;
557 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].MaxNativeAddressRegs
= 0;
558 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].MaxNativeParameters
= 1024;
559 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].MaxEnvParams
=
560 MIN2(ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].MaxNativeParameters
,
561 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].MaxEnvParams
);
563 /* Fragment shaders use real, 32-bit twos-complement integers for all
566 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].LowInt
.RangeMin
= 31;
567 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].LowInt
.RangeMax
= 30;
568 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].LowInt
.Precision
= 0;
569 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].HighInt
= ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].LowInt
;
570 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].MediumInt
= ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].LowInt
;
572 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].LowInt
.RangeMin
= 31;
573 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].LowInt
.RangeMax
= 30;
574 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].LowInt
.Precision
= 0;
575 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].HighInt
= ctx
->Const
.Program
[MESA_SHADER_VERTEX
].LowInt
;
576 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].MediumInt
= ctx
->Const
.Program
[MESA_SHADER_VERTEX
].LowInt
;
578 /* Gen6 converts quads to polygon in beginning of 3D pipeline,
579 * but we're not sure how it's actually done for vertex order,
580 * that affect provoking vertex decision. Always use last vertex
581 * convention for quad primitive which works as expected for now.
584 ctx
->Const
.QuadsFollowProvokingVertexConvention
= false;
586 ctx
->Const
.NativeIntegers
= true;
587 ctx
->Const
.VertexID_is_zero_based
= true;
589 /* Regarding the CMP instruction, the Ivybridge PRM says:
591 * "For each enabled channel 0b or 1b is assigned to the appropriate flag
592 * bit and 0/all zeros or all ones (e.g, byte 0xFF, word 0xFFFF, DWord
593 * 0xFFFFFFFF) is assigned to dst."
595 * but PRMs for earlier generations say
597 * "In dword format, one GRF may store up to 8 results. When the register
598 * is used later as a vector of Booleans, as only LSB at each channel
599 * contains meaning [sic] data, software should make sure all higher bits
600 * are masked out (e.g. by 'and-ing' an [sic] 0x01 constant)."
602 * We select the representation of a true boolean uniform to be ~0, and fix
603 * the results of Gen <= 5 CMP instruction's with -(result & 1).
605 ctx
->Const
.UniformBooleanTrue
= ~0;
607 /* From the gen4 PRM, volume 4 page 127:
609 * "For SURFTYPE_BUFFER non-rendertarget surfaces, this field specifies
610 * the base address of the first element of the surface, computed in
611 * software by adding the surface base address to the byte offset of
612 * the element in the buffer."
614 * However, unaligned accesses are slower, so enforce buffer alignment.
616 * In order to push UBO data, 3DSTATE_CONSTANT_XS imposes an additional
617 * restriction: the start of the buffer needs to be 32B aligned.
619 ctx
->Const
.UniformBufferOffsetAlignment
= 32;
621 /* ShaderStorageBufferOffsetAlignment should be a cacheline (64 bytes) so
622 * that we can safely have the CPU and GPU writing the same SSBO on
623 * non-cachecoherent systems (our Atom CPUs). With UBOs, the GPU never
624 * writes, so there's no problem. For an SSBO, the GPU and the CPU can
625 * be updating disjoint regions of the buffer simultaneously and that will
626 * break if the regions overlap the same cacheline.
628 ctx
->Const
.ShaderStorageBufferOffsetAlignment
= 64;
629 ctx
->Const
.TextureBufferOffsetAlignment
= 16;
630 ctx
->Const
.MaxTextureBufferSize
= 128 * 1024 * 1024;
633 ctx
->Const
.MaxVarying
= 32;
634 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].MaxOutputComponents
= 128;
635 ctx
->Const
.Program
[MESA_SHADER_GEOMETRY
].MaxInputComponents
= 64;
636 ctx
->Const
.Program
[MESA_SHADER_GEOMETRY
].MaxOutputComponents
= 128;
637 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].MaxInputComponents
= 128;
638 ctx
->Const
.Program
[MESA_SHADER_TESS_CTRL
].MaxInputComponents
= 128;
639 ctx
->Const
.Program
[MESA_SHADER_TESS_CTRL
].MaxOutputComponents
= 128;
640 ctx
->Const
.Program
[MESA_SHADER_TESS_EVAL
].MaxInputComponents
= 128;
641 ctx
->Const
.Program
[MESA_SHADER_TESS_EVAL
].MaxOutputComponents
= 128;
644 /* We want the GLSL compiler to emit code that uses condition codes */
645 for (int i
= 0; i
< MESA_SHADER_STAGES
; i
++) {
646 ctx
->Const
.ShaderCompilerOptions
[i
] =
647 brw
->screen
->compiler
->glsl_compiler_options
[i
];
651 ctx
->Const
.MaxViewportWidth
= 32768;
652 ctx
->Const
.MaxViewportHeight
= 32768;
655 /* ARB_viewport_array, OES_viewport_array */
657 ctx
->Const
.MaxViewports
= GEN6_NUM_VIEWPORTS
;
658 ctx
->Const
.ViewportSubpixelBits
= 0;
660 /* Cast to float before negating because MaxViewportWidth is unsigned.
662 ctx
->Const
.ViewportBounds
.Min
= -(float)ctx
->Const
.MaxViewportWidth
;
663 ctx
->Const
.ViewportBounds
.Max
= ctx
->Const
.MaxViewportWidth
;
666 /* ARB_gpu_shader5 */
668 ctx
->Const
.MaxVertexStreams
= MIN2(4, MAX_VERTEX_STREAMS
);
670 /* ARB_framebuffer_no_attachments */
671 ctx
->Const
.MaxFramebufferWidth
= 16384;
672 ctx
->Const
.MaxFramebufferHeight
= 16384;
673 ctx
->Const
.MaxFramebufferLayers
= ctx
->Const
.MaxArrayTextureLayers
;
674 ctx
->Const
.MaxFramebufferSamples
= max_samples
;
676 /* OES_primitive_bounding_box */
677 ctx
->Const
.NoPrimitiveBoundingBoxOutput
= true;
681 brw_initialize_cs_context_constants(struct brw_context
*brw
)
683 struct gl_context
*ctx
= &brw
->ctx
;
684 const struct intel_screen
*screen
= brw
->screen
;
685 struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
687 /* FINISHME: Do this for all platforms that the kernel supports */
688 if (brw
->is_cherryview
&&
689 screen
->subslice_total
> 0 && screen
->eu_total
> 0) {
690 /* Logical CS threads = EUs per subslice * 7 threads per EU */
691 uint32_t max_cs_threads
= screen
->eu_total
/ screen
->subslice_total
* 7;
693 /* Fuse configurations may give more threads than expected, never less. */
694 if (max_cs_threads
> devinfo
->max_cs_threads
)
695 devinfo
->max_cs_threads
= max_cs_threads
;
698 /* Maximum number of scalar compute shader invocations that can be run in
699 * parallel in the same subslice assuming SIMD32 dispatch.
701 * We don't advertise more than 64 threads, because we are limited to 64 by
702 * our usage of thread_width_max in the gpgpu walker command. This only
703 * currently impacts Haswell, which otherwise might be able to advertise 70
704 * threads. With SIMD32 and 64 threads, Haswell still provides twice the
705 * required the number of invocation needed for ARB_compute_shader.
707 const unsigned max_threads
= MIN2(64, devinfo
->max_cs_threads
);
708 const uint32_t max_invocations
= 32 * max_threads
;
709 ctx
->Const
.MaxComputeWorkGroupSize
[0] = max_invocations
;
710 ctx
->Const
.MaxComputeWorkGroupSize
[1] = max_invocations
;
711 ctx
->Const
.MaxComputeWorkGroupSize
[2] = max_invocations
;
712 ctx
->Const
.MaxComputeWorkGroupInvocations
= max_invocations
;
713 ctx
->Const
.MaxComputeSharedMemorySize
= 64 * 1024;
717 * Process driconf (drirc) options, setting appropriate context flags.
719 * intelInitExtensions still pokes at optionCache directly, in order to
720 * avoid advertising various extensions. No flags are set, so it makes
721 * sense to continue doing that there.
724 brw_process_driconf_options(struct brw_context
*brw
)
726 struct gl_context
*ctx
= &brw
->ctx
;
728 driOptionCache
*options
= &brw
->optionCache
;
729 driParseConfigFiles(options
, &brw
->screen
->optionCache
,
730 brw
->driContext
->driScreenPriv
->myNum
, "i965");
732 int bo_reuse_mode
= driQueryOptioni(options
, "bo_reuse");
733 switch (bo_reuse_mode
) {
734 case DRI_CONF_BO_REUSE_DISABLED
:
736 case DRI_CONF_BO_REUSE_ALL
:
737 brw_bufmgr_enable_reuse(brw
->bufmgr
);
741 if (INTEL_DEBUG
& DEBUG_NO_HIZ
) {
742 brw
->has_hiz
= false;
743 /* On gen6, you can only do separate stencil with HIZ. */
745 brw
->has_separate_stencil
= false;
748 if (driQueryOptionb(options
, "always_flush_batch")) {
749 fprintf(stderr
, "flushing batchbuffer before/after each draw call\n");
750 brw
->always_flush_batch
= true;
753 if (driQueryOptionb(options
, "always_flush_cache")) {
754 fprintf(stderr
, "flushing GPU caches before/after each draw call\n");
755 brw
->always_flush_cache
= true;
758 if (driQueryOptionb(options
, "disable_throttling")) {
759 fprintf(stderr
, "disabling flush throttling\n");
760 brw
->disable_throttling
= true;
763 brw
->precompile
= driQueryOptionb(&brw
->optionCache
, "shader_precompile");
765 if (driQueryOptionb(&brw
->optionCache
, "precise_trig"))
766 brw
->screen
->compiler
->precise_trig
= true;
768 ctx
->Const
.ForceGLSLExtensionsWarn
=
769 driQueryOptionb(options
, "force_glsl_extensions_warn");
771 ctx
->Const
.ForceGLSLVersion
=
772 driQueryOptioni(options
, "force_glsl_version");
774 ctx
->Const
.DisableGLSLLineContinuations
=
775 driQueryOptionb(options
, "disable_glsl_line_continuations");
777 ctx
->Const
.AllowGLSLExtensionDirectiveMidShader
=
778 driQueryOptionb(options
, "allow_glsl_extension_directive_midshader");
780 ctx
->Const
.AllowGLSLBuiltinVariableRedeclaration
=
781 driQueryOptionb(options
, "allow_glsl_builtin_variable_redeclaration");
783 ctx
->Const
.AllowHigherCompatVersion
=
784 driQueryOptionb(options
, "allow_higher_compat_version");
786 ctx
->Const
.ForceGLSLAbsSqrt
=
787 driQueryOptionb(options
, "force_glsl_abs_sqrt");
789 ctx
->Const
.GLSLZeroInit
= driQueryOptionb(options
, "glsl_zero_init");
791 brw
->dual_color_blend_by_location
=
792 driQueryOptionb(options
, "dual_color_blend_by_location");
796 brwCreateContext(gl_api api
,
797 const struct gl_config
*mesaVis
,
798 __DRIcontext
*driContextPriv
,
799 unsigned major_version
,
800 unsigned minor_version
,
803 unsigned *dri_ctx_error
,
804 void *sharedContextPrivate
)
806 struct gl_context
*shareCtx
= (struct gl_context
*) sharedContextPrivate
;
807 struct intel_screen
*screen
= driContextPriv
->driScreenPriv
->driverPrivate
;
808 const struct gen_device_info
*devinfo
= &screen
->devinfo
;
809 struct dd_function_table functions
;
811 /* Only allow the __DRI_CTX_FLAG_ROBUST_BUFFER_ACCESS flag if the kernel
812 * provides us with context reset notifications.
814 uint32_t allowed_flags
= __DRI_CTX_FLAG_DEBUG
|
815 __DRI_CTX_FLAG_FORWARD_COMPATIBLE
|
816 __DRI_CTX_FLAG_NO_ERROR
;
818 if (screen
->has_context_reset_notification
)
819 allowed_flags
|= __DRI_CTX_FLAG_ROBUST_BUFFER_ACCESS
;
821 if (flags
& ~allowed_flags
) {
822 *dri_ctx_error
= __DRI_CTX_ERROR_UNKNOWN_FLAG
;
826 struct brw_context
*brw
= rzalloc(NULL
, struct brw_context
);
828 fprintf(stderr
, "%s: failed to alloc context\n", __func__
);
829 *dri_ctx_error
= __DRI_CTX_ERROR_NO_MEMORY
;
833 driContextPriv
->driverPrivate
= brw
;
834 brw
->driContext
= driContextPriv
;
835 brw
->screen
= screen
;
836 brw
->bufmgr
= screen
->bufmgr
;
838 brw
->gen
= devinfo
->gen
;
839 brw
->gt
= devinfo
->gt
;
840 brw
->is_g4x
= devinfo
->is_g4x
;
841 brw
->is_baytrail
= devinfo
->is_baytrail
;
842 brw
->is_haswell
= devinfo
->is_haswell
;
843 brw
->is_cherryview
= devinfo
->is_cherryview
;
844 brw
->is_broxton
= devinfo
->is_broxton
|| devinfo
->is_geminilake
;
845 brw
->has_llc
= devinfo
->has_llc
;
846 brw
->has_hiz
= devinfo
->has_hiz_and_separate_stencil
;
847 brw
->has_separate_stencil
= devinfo
->has_hiz_and_separate_stencil
;
848 brw
->has_pln
= devinfo
->has_pln
;
849 brw
->has_compr4
= devinfo
->has_compr4
;
850 brw
->has_surface_tile_offset
= devinfo
->has_surface_tile_offset
;
851 brw
->has_negative_rhw_bug
= devinfo
->has_negative_rhw_bug
;
852 brw
->needs_unlit_centroid_workaround
=
853 devinfo
->needs_unlit_centroid_workaround
;
855 brw
->must_use_separate_stencil
= devinfo
->must_use_separate_stencil
;
856 brw
->has_swizzling
= screen
->hw_has_swizzling
;
858 brw
->isl_dev
= screen
->isl_dev
;
860 brw
->vs
.base
.stage
= MESA_SHADER_VERTEX
;
861 brw
->tcs
.base
.stage
= MESA_SHADER_TESS_CTRL
;
862 brw
->tes
.base
.stage
= MESA_SHADER_TESS_EVAL
;
863 brw
->gs
.base
.stage
= MESA_SHADER_GEOMETRY
;
864 brw
->wm
.base
.stage
= MESA_SHADER_FRAGMENT
;
866 gen8_init_vtable_surface_functions(brw
);
867 brw
->vtbl
.emit_depth_stencil_hiz
= gen8_emit_depth_stencil_hiz
;
868 } else if (brw
->gen
>= 7) {
869 gen7_init_vtable_surface_functions(brw
);
870 brw
->vtbl
.emit_depth_stencil_hiz
= gen7_emit_depth_stencil_hiz
;
871 } else if (brw
->gen
>= 6) {
872 gen6_init_vtable_surface_functions(brw
);
873 brw
->vtbl
.emit_depth_stencil_hiz
= gen6_emit_depth_stencil_hiz
;
875 gen4_init_vtable_surface_functions(brw
);
876 brw
->vtbl
.emit_depth_stencil_hiz
= brw_emit_depth_stencil_hiz
;
879 brw_init_driver_functions(brw
, &functions
);
882 functions
.GetGraphicsResetStatus
= brw_get_graphics_reset_status
;
884 struct gl_context
*ctx
= &brw
->ctx
;
886 if (!_mesa_initialize_context(ctx
, api
, mesaVis
, shareCtx
, &functions
)) {
887 *dri_ctx_error
= __DRI_CTX_ERROR_NO_MEMORY
;
888 fprintf(stderr
, "%s: failed to init mesa context\n", __func__
);
889 intelDestroyContext(driContextPriv
);
893 driContextSetFlags(ctx
, flags
);
895 /* Initialize the software rasterizer and helper modules.
897 * As of GL 3.1 core, the gen4+ driver doesn't need the swrast context for
898 * software fallbacks (which we have to support on legacy GL to do weird
899 * glDrawPixels(), glBitmap(), and other functions).
901 if (api
!= API_OPENGL_CORE
&& api
!= API_OPENGLES2
) {
902 _swrast_CreateContext(ctx
);
905 _vbo_CreateContext(ctx
);
906 if (ctx
->swrast_context
) {
907 _tnl_CreateContext(ctx
);
908 TNL_CONTEXT(ctx
)->Driver
.RunPipeline
= _tnl_run_pipeline
;
909 _swsetup_CreateContext(ctx
);
911 /* Configure swrast to match hardware characteristics: */
912 _swrast_allow_pixel_fog(ctx
, false);
913 _swrast_allow_vertex_fog(ctx
, true);
916 _mesa_meta_init(ctx
);
918 brw_process_driconf_options(brw
);
920 if (INTEL_DEBUG
& DEBUG_PERF
)
921 brw
->perf_debug
= true;
923 brw_initialize_cs_context_constants(brw
);
924 brw_initialize_context_constants(brw
);
926 ctx
->Const
.ResetStrategy
= notify_reset
927 ? GL_LOSE_CONTEXT_ON_RESET_ARB
: GL_NO_RESET_NOTIFICATION_ARB
;
929 /* Reinitialize the context point state. It depends on ctx->Const values. */
930 _mesa_init_point(ctx
);
934 intel_batchbuffer_init(&brw
->batch
, brw
->bufmgr
, brw
->has_llc
);
937 /* Create a new hardware context. Using a hardware context means that
938 * our GPU state will be saved/restored on context switch, allowing us
939 * to assume that the GPU is in the same state we left it in.
941 * This is required for transform feedback buffer offsets, query objects,
942 * and also allows us to reduce how much state we have to emit.
944 brw
->hw_ctx
= brw_create_hw_context(brw
->bufmgr
);
947 fprintf(stderr
, "Failed to create hardware context.\n");
948 intelDestroyContext(driContextPriv
);
953 if (brw_init_pipe_control(brw
, devinfo
)) {
954 *dri_ctx_error
= __DRI_CTX_ERROR_NO_MEMORY
;
955 intelDestroyContext(driContextPriv
);
961 intelInitExtensions(ctx
);
963 brw_init_surface_formats(brw
);
967 brw
->urb
.size
= devinfo
->urb
.size
;
970 brw
->urb
.gs_present
= false;
972 brw
->prim_restart
.in_progress
= false;
973 brw
->prim_restart
.enable_cut_index
= false;
974 brw
->gs
.enabled
= false;
975 brw
->clip
.viewport_count
= 1;
977 brw
->predicate
.state
= BRW_PREDICATE_STATE_RENDER
;
979 brw
->max_gtt_map_object_size
= screen
->max_gtt_map_object_size
;
981 ctx
->VertexProgram
._MaintainTnlProgram
= true;
982 ctx
->FragmentProgram
._MaintainTexEnvProgram
= true;
984 brw_draw_init( brw
);
986 if ((flags
& __DRI_CTX_FLAG_DEBUG
) != 0) {
987 /* Turn on some extra GL_ARB_debug_output generation. */
988 brw
->perf_debug
= true;
991 if ((flags
& __DRI_CTX_FLAG_ROBUST_BUFFER_ACCESS
) != 0) {
992 ctx
->Const
.ContextFlags
|= GL_CONTEXT_FLAG_ROBUST_ACCESS_BIT_ARB
;
993 ctx
->Const
.RobustAccess
= GL_TRUE
;
996 if (INTEL_DEBUG
& DEBUG_SHADER_TIME
)
997 brw_init_shader_time(brw
);
999 _mesa_compute_version(ctx
);
1001 _mesa_initialize_dispatch_tables(ctx
);
1002 _mesa_initialize_vbo_vtxfmt(ctx
);
1004 if (ctx
->Extensions
.INTEL_performance_query
)
1005 brw_init_performance_queries(brw
);
1007 vbo_use_buffer_objects(ctx
);
1008 vbo_always_unmap_buffers(ctx
);
1014 intelDestroyContext(__DRIcontext
* driContextPriv
)
1016 struct brw_context
*brw
=
1017 (struct brw_context
*) driContextPriv
->driverPrivate
;
1018 struct gl_context
*ctx
= &brw
->ctx
;
1020 _mesa_meta_free(&brw
->ctx
);
1022 if (INTEL_DEBUG
& DEBUG_SHADER_TIME
) {
1023 /* Force a report. */
1024 brw
->shader_time
.report_time
= 0;
1026 brw_collect_and_report_shader_time(brw
);
1027 brw_destroy_shader_time(brw
);
1031 blorp_finish(&brw
->blorp
);
1033 brw_destroy_state(brw
);
1034 brw_draw_destroy(brw
);
1036 brw_bo_unreference(brw
->curbe
.curbe_bo
);
1037 if (brw
->vs
.base
.scratch_bo
)
1038 brw_bo_unreference(brw
->vs
.base
.scratch_bo
);
1039 if (brw
->tcs
.base
.scratch_bo
)
1040 brw_bo_unreference(brw
->tcs
.base
.scratch_bo
);
1041 if (brw
->tes
.base
.scratch_bo
)
1042 brw_bo_unreference(brw
->tes
.base
.scratch_bo
);
1043 if (brw
->gs
.base
.scratch_bo
)
1044 brw_bo_unreference(brw
->gs
.base
.scratch_bo
);
1045 if (brw
->wm
.base
.scratch_bo
)
1046 brw_bo_unreference(brw
->wm
.base
.scratch_bo
);
1048 brw_destroy_hw_context(brw
->bufmgr
, brw
->hw_ctx
);
1050 if (ctx
->swrast_context
) {
1051 _swsetup_DestroyContext(&brw
->ctx
);
1052 _tnl_DestroyContext(&brw
->ctx
);
1054 _vbo_DestroyContext(&brw
->ctx
);
1056 if (ctx
->swrast_context
)
1057 _swrast_DestroyContext(&brw
->ctx
);
1059 brw_fini_pipe_control(brw
);
1060 intel_batchbuffer_free(&brw
->batch
);
1062 brw_bo_unreference(brw
->throttle_batch
[1]);
1063 brw_bo_unreference(brw
->throttle_batch
[0]);
1064 brw
->throttle_batch
[1] = NULL
;
1065 brw
->throttle_batch
[0] = NULL
;
1067 driDestroyOptionCache(&brw
->optionCache
);
1069 /* free the Mesa context */
1070 _mesa_free_context_data(&brw
->ctx
);
1073 driContextPriv
->driverPrivate
= NULL
;
1077 intelUnbindContext(__DRIcontext
* driContextPriv
)
1079 /* Unset current context and dispath table */
1080 _mesa_make_current(NULL
, NULL
, NULL
);
1086 * Fixes up the context for GLES23 with our default-to-sRGB-capable behavior
1087 * on window system framebuffers.
1089 * Desktop GL is fairly reasonable in its handling of sRGB: You can ask if
1090 * your renderbuffer can do sRGB encode, and you can flip a switch that does
1091 * sRGB encode if the renderbuffer can handle it. You can ask specifically
1092 * for a visual where you're guaranteed to be capable, but it turns out that
1093 * everyone just makes all their ARGB8888 visuals capable and doesn't offer
1094 * incapable ones, because there's no difference between the two in resources
1095 * used. Applications thus get built that accidentally rely on the default
1096 * visual choice being sRGB, so we make ours sRGB capable. Everything sounds
1099 * But for GLES2/3, they decided that it was silly to not turn on sRGB encode
1100 * for sRGB renderbuffers you made with the GL_EXT_texture_sRGB equivalent.
1101 * So they removed the enable knob and made it "if the renderbuffer is sRGB
1102 * capable, do sRGB encode". Then, for your window system renderbuffers, you
1103 * can ask for sRGB visuals and get sRGB encode, or not ask for sRGB visuals
1104 * and get no sRGB encode (assuming that both kinds of visual are available).
1105 * Thus our choice to support sRGB by default on our visuals for desktop would
1106 * result in broken rendering of GLES apps that aren't expecting sRGB encode.
1108 * Unfortunately, renderbuffer setup happens before a context is created. So
1109 * in intel_screen.c we always set up sRGB, and here, if you're a GLES2/3
1110 * context (without an sRGB visual, though we don't have sRGB visuals exposed
1111 * yet), we go turn that back off before anyone finds out.
1114 intel_gles3_srgb_workaround(struct brw_context
*brw
,
1115 struct gl_framebuffer
*fb
)
1117 struct gl_context
*ctx
= &brw
->ctx
;
1119 if (_mesa_is_desktop_gl(ctx
) || !fb
->Visual
.sRGBCapable
)
1122 /* Some day when we support the sRGB capable bit on visuals available for
1123 * GLES, we'll need to respect that and not disable things here.
1125 fb
->Visual
.sRGBCapable
= false;
1126 for (int i
= 0; i
< BUFFER_COUNT
; i
++) {
1127 struct gl_renderbuffer
*rb
= fb
->Attachment
[i
].Renderbuffer
;
1129 rb
->Format
= _mesa_get_srgb_format_linear(rb
->Format
);
1134 intelMakeCurrent(__DRIcontext
* driContextPriv
,
1135 __DRIdrawable
* driDrawPriv
,
1136 __DRIdrawable
* driReadPriv
)
1138 struct brw_context
*brw
;
1139 GET_CURRENT_CONTEXT(curCtx
);
1142 brw
= (struct brw_context
*) driContextPriv
->driverPrivate
;
1146 /* According to the glXMakeCurrent() man page: "Pending commands to
1147 * the previous context, if any, are flushed before it is released."
1148 * But only flush if we're actually changing contexts.
1150 if (brw_context(curCtx
) && brw_context(curCtx
) != brw
) {
1151 _mesa_flush(curCtx
);
1154 if (driContextPriv
) {
1155 struct gl_context
*ctx
= &brw
->ctx
;
1156 struct gl_framebuffer
*fb
, *readFb
;
1158 if (driDrawPriv
== NULL
) {
1159 fb
= _mesa_get_incomplete_framebuffer();
1161 fb
= driDrawPriv
->driverPrivate
;
1162 driContextPriv
->dri2
.draw_stamp
= driDrawPriv
->dri2
.stamp
- 1;
1165 if (driReadPriv
== NULL
) {
1166 readFb
= _mesa_get_incomplete_framebuffer();
1168 readFb
= driReadPriv
->driverPrivate
;
1169 driContextPriv
->dri2
.read_stamp
= driReadPriv
->dri2
.stamp
- 1;
1172 /* The sRGB workaround changes the renderbuffer's format. We must change
1173 * the format before the renderbuffer's miptree get's allocated, otherwise
1174 * the formats of the renderbuffer and its miptree will differ.
1176 intel_gles3_srgb_workaround(brw
, fb
);
1177 intel_gles3_srgb_workaround(brw
, readFb
);
1179 /* If the context viewport hasn't been initialized, force a call out to
1180 * the loader to get buffers so we have a drawable size for the initial
1182 if (!brw
->ctx
.ViewportInitialized
)
1183 intel_prepare_render(brw
);
1185 _mesa_make_current(ctx
, fb
, readFb
);
1187 _mesa_make_current(NULL
, NULL
, NULL
);
1194 intel_resolve_for_dri2_flush(struct brw_context
*brw
,
1195 __DRIdrawable
*drawable
)
1198 /* MSAA and fast color clear are not supported, so don't waste time
1199 * checking whether a resolve is needed.
1204 struct gl_framebuffer
*fb
= drawable
->driverPrivate
;
1205 struct intel_renderbuffer
*rb
;
1207 /* Usually, only the back buffer will need to be downsampled. However,
1208 * the front buffer will also need it if the user has rendered into it.
1210 static const gl_buffer_index buffers
[2] = {
1215 for (int i
= 0; i
< 2; ++i
) {
1216 rb
= intel_get_renderbuffer(fb
, buffers
[i
]);
1217 if (rb
== NULL
|| rb
->mt
== NULL
)
1219 if (rb
->mt
->surf
.samples
== 1) {
1220 assert(rb
->mt_layer
== 0 && rb
->mt_level
== 0 &&
1221 rb
->layer_count
== 1);
1222 intel_miptree_prepare_access(brw
, rb
->mt
, 0, 1, 0, 1, false, false);
1224 intel_renderbuffer_downsample(brw
, rb
);
1230 intel_bits_per_pixel(const struct intel_renderbuffer
*rb
)
1232 return _mesa_get_format_bytes(intel_rb_format(rb
)) * 8;
1236 intel_query_dri2_buffers(struct brw_context
*brw
,
1237 __DRIdrawable
*drawable
,
1238 __DRIbuffer
**buffers
,
1242 intel_process_dri2_buffer(struct brw_context
*brw
,
1243 __DRIdrawable
*drawable
,
1244 __DRIbuffer
*buffer
,
1245 struct intel_renderbuffer
*rb
,
1246 const char *buffer_name
);
1249 intel_update_image_buffers(struct brw_context
*brw
, __DRIdrawable
*drawable
);
1252 intel_update_dri2_buffers(struct brw_context
*brw
, __DRIdrawable
*drawable
)
1254 struct gl_framebuffer
*fb
= drawable
->driverPrivate
;
1255 struct intel_renderbuffer
*rb
;
1256 __DRIbuffer
*buffers
= NULL
;
1258 const char *region_name
;
1260 /* Set this up front, so that in case our buffers get invalidated
1261 * while we're getting new buffers, we don't clobber the stamp and
1262 * thus ignore the invalidate. */
1263 drawable
->lastStamp
= drawable
->dri2
.stamp
;
1265 if (unlikely(INTEL_DEBUG
& DEBUG_DRI
))
1266 fprintf(stderr
, "enter %s, drawable %p\n", __func__
, drawable
);
1268 intel_query_dri2_buffers(brw
, drawable
, &buffers
, &count
);
1270 if (buffers
== NULL
)
1273 for (int i
= 0; i
< count
; i
++) {
1274 switch (buffers
[i
].attachment
) {
1275 case __DRI_BUFFER_FRONT_LEFT
:
1276 rb
= intel_get_renderbuffer(fb
, BUFFER_FRONT_LEFT
);
1277 region_name
= "dri2 front buffer";
1280 case __DRI_BUFFER_FAKE_FRONT_LEFT
:
1281 rb
= intel_get_renderbuffer(fb
, BUFFER_FRONT_LEFT
);
1282 region_name
= "dri2 fake front buffer";
1285 case __DRI_BUFFER_BACK_LEFT
:
1286 rb
= intel_get_renderbuffer(fb
, BUFFER_BACK_LEFT
);
1287 region_name
= "dri2 back buffer";
1290 case __DRI_BUFFER_DEPTH
:
1291 case __DRI_BUFFER_HIZ
:
1292 case __DRI_BUFFER_DEPTH_STENCIL
:
1293 case __DRI_BUFFER_STENCIL
:
1294 case __DRI_BUFFER_ACCUM
:
1297 "unhandled buffer attach event, attachment type %d\n",
1298 buffers
[i
].attachment
);
1302 intel_process_dri2_buffer(brw
, drawable
, &buffers
[i
], rb
, region_name
);
1308 intel_update_renderbuffers(__DRIcontext
*context
, __DRIdrawable
*drawable
)
1310 struct brw_context
*brw
= context
->driverPrivate
;
1311 __DRIscreen
*dri_screen
= brw
->screen
->driScrnPriv
;
1313 /* Set this up front, so that in case our buffers get invalidated
1314 * while we're getting new buffers, we don't clobber the stamp and
1315 * thus ignore the invalidate. */
1316 drawable
->lastStamp
= drawable
->dri2
.stamp
;
1318 if (unlikely(INTEL_DEBUG
& DEBUG_DRI
))
1319 fprintf(stderr
, "enter %s, drawable %p\n", __func__
, drawable
);
1321 if (dri_screen
->image
.loader
)
1322 intel_update_image_buffers(brw
, drawable
);
1324 intel_update_dri2_buffers(brw
, drawable
);
1326 driUpdateFramebufferSize(&brw
->ctx
, drawable
);
1330 * intel_prepare_render should be called anywhere that curent read/drawbuffer
1331 * state is required.
1334 intel_prepare_render(struct brw_context
*brw
)
1336 struct gl_context
*ctx
= &brw
->ctx
;
1337 __DRIcontext
*driContext
= brw
->driContext
;
1338 __DRIdrawable
*drawable
;
1340 drawable
= driContext
->driDrawablePriv
;
1341 if (drawable
&& drawable
->dri2
.stamp
!= driContext
->dri2
.draw_stamp
) {
1342 if (drawable
->lastStamp
!= drawable
->dri2
.stamp
)
1343 intel_update_renderbuffers(driContext
, drawable
);
1344 driContext
->dri2
.draw_stamp
= drawable
->dri2
.stamp
;
1347 drawable
= driContext
->driReadablePriv
;
1348 if (drawable
&& drawable
->dri2
.stamp
!= driContext
->dri2
.read_stamp
) {
1349 if (drawable
->lastStamp
!= drawable
->dri2
.stamp
)
1350 intel_update_renderbuffers(driContext
, drawable
);
1351 driContext
->dri2
.read_stamp
= drawable
->dri2
.stamp
;
1354 /* If we're currently rendering to the front buffer, the rendering
1355 * that will happen next will probably dirty the front buffer. So
1356 * mark it as dirty here.
1358 if (_mesa_is_front_buffer_drawing(ctx
->DrawBuffer
))
1359 brw
->front_buffer_dirty
= true;
1363 * \brief Query DRI2 to obtain a DRIdrawable's buffers.
1365 * To determine which DRI buffers to request, examine the renderbuffers
1366 * attached to the drawable's framebuffer. Then request the buffers with
1367 * DRI2GetBuffers() or DRI2GetBuffersWithFormat().
1369 * This is called from intel_update_renderbuffers().
1371 * \param drawable Drawable whose buffers are queried.
1372 * \param buffers [out] List of buffers returned by DRI2 query.
1373 * \param buffer_count [out] Number of buffers returned.
1375 * \see intel_update_renderbuffers()
1376 * \see DRI2GetBuffers()
1377 * \see DRI2GetBuffersWithFormat()
1380 intel_query_dri2_buffers(struct brw_context
*brw
,
1381 __DRIdrawable
*drawable
,
1382 __DRIbuffer
**buffers
,
1385 __DRIscreen
*dri_screen
= brw
->screen
->driScrnPriv
;
1386 struct gl_framebuffer
*fb
= drawable
->driverPrivate
;
1388 unsigned attachments
[8];
1390 struct intel_renderbuffer
*front_rb
;
1391 struct intel_renderbuffer
*back_rb
;
1393 front_rb
= intel_get_renderbuffer(fb
, BUFFER_FRONT_LEFT
);
1394 back_rb
= intel_get_renderbuffer(fb
, BUFFER_BACK_LEFT
);
1396 memset(attachments
, 0, sizeof(attachments
));
1397 if ((_mesa_is_front_buffer_drawing(fb
) ||
1398 _mesa_is_front_buffer_reading(fb
) ||
1399 !back_rb
) && front_rb
) {
1400 /* If a fake front buffer is in use, then querying for
1401 * __DRI_BUFFER_FRONT_LEFT will cause the server to copy the image from
1402 * the real front buffer to the fake front buffer. So before doing the
1403 * query, we need to make sure all the pending drawing has landed in the
1404 * real front buffer.
1406 intel_batchbuffer_flush(brw
);
1407 intel_flush_front(&brw
->ctx
);
1409 attachments
[i
++] = __DRI_BUFFER_FRONT_LEFT
;
1410 attachments
[i
++] = intel_bits_per_pixel(front_rb
);
1411 } else if (front_rb
&& brw
->front_buffer_dirty
) {
1412 /* We have pending front buffer rendering, but we aren't querying for a
1413 * front buffer. If the front buffer we have is a fake front buffer,
1414 * the X server is going to throw it away when it processes the query.
1415 * So before doing the query, make sure all the pending drawing has
1416 * landed in the real front buffer.
1418 intel_batchbuffer_flush(brw
);
1419 intel_flush_front(&brw
->ctx
);
1423 attachments
[i
++] = __DRI_BUFFER_BACK_LEFT
;
1424 attachments
[i
++] = intel_bits_per_pixel(back_rb
);
1427 assert(i
<= ARRAY_SIZE(attachments
));
1430 dri_screen
->dri2
.loader
->getBuffersWithFormat(drawable
,
1435 drawable
->loaderPrivate
);
1439 * \brief Assign a DRI buffer's DRM region to a renderbuffer.
1441 * This is called from intel_update_renderbuffers().
1444 * DRI buffers whose attachment point is DRI2BufferStencil or
1445 * DRI2BufferDepthStencil are handled as special cases.
1447 * \param buffer_name is a human readable name, such as "dri2 front buffer",
1448 * that is passed to brw_bo_gem_create_from_name().
1450 * \see intel_update_renderbuffers()
1453 intel_process_dri2_buffer(struct brw_context
*brw
,
1454 __DRIdrawable
*drawable
,
1455 __DRIbuffer
*buffer
,
1456 struct intel_renderbuffer
*rb
,
1457 const char *buffer_name
)
1459 struct gl_framebuffer
*fb
= drawable
->driverPrivate
;
1465 unsigned num_samples
= rb
->Base
.Base
.NumSamples
;
1467 /* We try to avoid closing and reopening the same BO name, because the first
1468 * use of a mapping of the buffer involves a bunch of page faulting which is
1469 * moderately expensive.
1471 struct intel_mipmap_tree
*last_mt
;
1472 if (num_samples
== 0)
1475 last_mt
= rb
->singlesample_mt
;
1477 uint32_t old_name
= 0;
1479 /* The bo already has a name because the miptree was created by a
1480 * previous call to intel_process_dri2_buffer(). If a bo already has a
1481 * name, then brw_bo_flink() is a low-cost getter. It does not
1482 * create a new name.
1484 brw_bo_flink(last_mt
->bo
, &old_name
);
1487 if (old_name
== buffer
->name
)
1490 if (unlikely(INTEL_DEBUG
& DEBUG_DRI
)) {
1492 "attaching buffer %d, at %d, cpp %d, pitch %d\n",
1493 buffer
->name
, buffer
->attachment
,
1494 buffer
->cpp
, buffer
->pitch
);
1497 bo
= brw_bo_gem_create_from_name(brw
->bufmgr
, buffer_name
,
1501 "Failed to open BO for returned DRI2 buffer "
1502 "(%dx%d, %s, named %d).\n"
1503 "This is likely a bug in the X Server that will lead to a "
1505 drawable
->w
, drawable
->h
, buffer_name
, buffer
->name
);
1509 struct intel_mipmap_tree
*mt
=
1510 intel_miptree_create_for_bo(brw
,
1512 intel_rb_format(rb
),
1518 MIPTREE_LAYOUT_FOR_SCANOUT
);
1520 brw_bo_unreference(bo
);
1524 if (!intel_update_winsys_renderbuffer_miptree(brw
, rb
, mt
,
1525 drawable
->w
, drawable
->h
,
1527 brw_bo_unreference(bo
);
1528 intel_miptree_release(&mt
);
1532 if (_mesa_is_front_buffer_drawing(fb
) &&
1533 (buffer
->attachment
== __DRI_BUFFER_FRONT_LEFT
||
1534 buffer
->attachment
== __DRI_BUFFER_FAKE_FRONT_LEFT
) &&
1535 rb
->Base
.Base
.NumSamples
> 1) {
1536 intel_renderbuffer_upsample(brw
, rb
);
1541 brw_bo_unreference(bo
);
1545 * \brief Query DRI image loader to obtain a DRIdrawable's buffers.
1547 * To determine which DRI buffers to request, examine the renderbuffers
1548 * attached to the drawable's framebuffer. Then request the buffers from
1551 * This is called from intel_update_renderbuffers().
1553 * \param drawable Drawable whose buffers are queried.
1554 * \param buffers [out] List of buffers returned by DRI2 query.
1555 * \param buffer_count [out] Number of buffers returned.
1557 * \see intel_update_renderbuffers()
1561 intel_update_image_buffer(struct brw_context
*intel
,
1562 __DRIdrawable
*drawable
,
1563 struct intel_renderbuffer
*rb
,
1565 enum __DRIimageBufferMask buffer_type
)
1567 struct gl_framebuffer
*fb
= drawable
->driverPrivate
;
1569 if (!rb
|| !buffer
->bo
)
1572 unsigned num_samples
= rb
->Base
.Base
.NumSamples
;
1574 /* Check and see if we're already bound to the right
1577 struct intel_mipmap_tree
*last_mt
;
1578 if (num_samples
== 0)
1581 last_mt
= rb
->singlesample_mt
;
1583 if (last_mt
&& last_mt
->bo
== buffer
->bo
)
1586 enum isl_colorspace colorspace
;
1587 switch (_mesa_get_format_color_encoding(intel_rb_format(rb
))) {
1589 colorspace
= ISL_COLORSPACE_SRGB
;
1592 colorspace
= ISL_COLORSPACE_LINEAR
;
1595 unreachable("Invalid color encoding");
1598 struct intel_mipmap_tree
*mt
=
1599 intel_miptree_create_for_dri_image(intel
, buffer
, GL_TEXTURE_2D
,
1604 if (!intel_update_winsys_renderbuffer_miptree(intel
, rb
, mt
,
1605 buffer
->width
, buffer
->height
,
1607 intel_miptree_release(&mt
);
1611 if (_mesa_is_front_buffer_drawing(fb
) &&
1612 buffer_type
== __DRI_IMAGE_BUFFER_FRONT
&&
1613 rb
->Base
.Base
.NumSamples
> 1) {
1614 intel_renderbuffer_upsample(intel
, rb
);
1619 intel_update_image_buffers(struct brw_context
*brw
, __DRIdrawable
*drawable
)
1621 struct gl_framebuffer
*fb
= drawable
->driverPrivate
;
1622 __DRIscreen
*dri_screen
= brw
->screen
->driScrnPriv
;
1623 struct intel_renderbuffer
*front_rb
;
1624 struct intel_renderbuffer
*back_rb
;
1625 struct __DRIimageList images
;
1627 uint32_t buffer_mask
= 0;
1630 front_rb
= intel_get_renderbuffer(fb
, BUFFER_FRONT_LEFT
);
1631 back_rb
= intel_get_renderbuffer(fb
, BUFFER_BACK_LEFT
);
1634 format
= intel_rb_format(back_rb
);
1636 format
= intel_rb_format(front_rb
);
1640 if (front_rb
&& (_mesa_is_front_buffer_drawing(fb
) ||
1641 _mesa_is_front_buffer_reading(fb
) || !back_rb
)) {
1642 buffer_mask
|= __DRI_IMAGE_BUFFER_FRONT
;
1646 buffer_mask
|= __DRI_IMAGE_BUFFER_BACK
;
1648 ret
= dri_screen
->image
.loader
->getBuffers(drawable
,
1649 driGLFormatToImageFormat(format
),
1650 &drawable
->dri2
.stamp
,
1651 drawable
->loaderPrivate
,
1657 if (images
.image_mask
& __DRI_IMAGE_BUFFER_FRONT
) {
1658 drawable
->w
= images
.front
->width
;
1659 drawable
->h
= images
.front
->height
;
1660 intel_update_image_buffer(brw
,
1664 __DRI_IMAGE_BUFFER_FRONT
);
1667 if (images
.image_mask
& __DRI_IMAGE_BUFFER_BACK
) {
1668 drawable
->w
= images
.back
->width
;
1669 drawable
->h
= images
.back
->height
;
1670 intel_update_image_buffer(brw
,
1674 __DRI_IMAGE_BUFFER_BACK
);