2 Copyright 2003 VMware, Inc.
3 Copyright (C) Intel Corp. 2006. All Rights Reserved.
4 Intel funded Tungsten Graphics to
5 develop this 3D driver.
7 Permission is hereby granted, free of charge, to any person obtaining
8 a copy of this software and associated documentation files (the
9 "Software"), to deal in the Software without restriction, including
10 without limitation the rights to use, copy, modify, merge, publish,
11 distribute, sublicense, and/or sell copies of the Software, and to
12 permit persons to whom the Software is furnished to do so, subject to
13 the following conditions:
15 The above copyright notice and this permission notice (including the
16 next paragraph) shall be included in all copies or substantial
17 portions of the Software.
19 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
20 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
22 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
23 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
24 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
25 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27 **********************************************************************/
30 * Keith Whitwell <keithw@vmware.com>
34 #include "compiler/nir/nir.h"
35 #include "main/api_exec.h"
36 #include "main/context.h"
37 #include "main/fbobject.h"
38 #include "main/extensions.h"
39 #include "main/imports.h"
40 #include "main/macros.h"
41 #include "main/points.h"
42 #include "main/version.h"
43 #include "main/vtxfmt.h"
44 #include "main/texobj.h"
45 #include "main/framebuffer.h"
46 #include "main/stencil.h"
47 #include "main/state.h"
49 #include "vbo/vbo_context.h"
51 #include "drivers/common/driverfuncs.h"
52 #include "drivers/common/meta.h"
55 #include "brw_context.h"
56 #include "brw_defines.h"
57 #include "brw_blorp.h"
59 #include "brw_state.h"
61 #include "intel_batchbuffer.h"
62 #include "intel_buffer_objects.h"
63 #include "intel_buffers.h"
64 #include "intel_fbo.h"
65 #include "intel_mipmap_tree.h"
66 #include "intel_pixel.h"
67 #include "intel_image.h"
68 #include "intel_tex.h"
69 #include "intel_tex_obj.h"
71 #include "swrast_setup/swrast_setup.h"
73 #include "tnl/t_pipeline.h"
74 #include "util/ralloc.h"
75 #include "util/debug.h"
78 /***************************************
79 * Mesa's Driver Functions
80 ***************************************/
82 const char *const brw_vendor_string
= "Intel Open Source Technology Center";
85 get_bsw_model(const struct intel_screen
*screen
)
87 switch (screen
->eu_total
) {
98 brw_get_renderer_string(const struct intel_screen
*screen
)
101 static char buffer
[128];
104 switch (screen
->deviceID
) {
106 #define CHIPSET(id, symbol, str) case id: chipset = str; break;
107 #include "pci_ids/i965_pci_ids.h"
109 chipset
= "Unknown Intel Chipset";
113 /* Braswell branding is funny, so we have to fix it up here */
114 if (screen
->deviceID
== 0x22B1) {
115 bsw
= strdup(chipset
);
116 char *needle
= strstr(bsw
, "XXX");
118 memcpy(needle
, get_bsw_model(screen
), 3);
123 (void) driGetRendererString(buffer
, chipset
, 0);
128 static const GLubyte
*
129 intel_get_string(struct gl_context
* ctx
, GLenum name
)
131 const struct brw_context
*const brw
= brw_context(ctx
);
135 return (GLubyte
*) brw_vendor_string
;
139 (GLubyte
*) brw_get_renderer_string(brw
->screen
);
147 intel_viewport(struct gl_context
*ctx
)
149 struct brw_context
*brw
= brw_context(ctx
);
150 __DRIcontext
*driContext
= brw
->driContext
;
152 if (_mesa_is_winsys_fbo(ctx
->DrawBuffer
)) {
153 if (driContext
->driDrawablePriv
)
154 dri2InvalidateDrawable(driContext
->driDrawablePriv
);
155 if (driContext
->driReadablePriv
)
156 dri2InvalidateDrawable(driContext
->driReadablePriv
);
161 intel_update_framebuffer(struct gl_context
*ctx
,
162 struct gl_framebuffer
*fb
)
164 struct brw_context
*brw
= brw_context(ctx
);
166 /* Quantize the derived default number of samples
168 fb
->DefaultGeometry
._NumSamples
=
169 intel_quantize_num_samples(brw
->screen
,
170 fb
->DefaultGeometry
.NumSamples
);
174 intel_update_state(struct gl_context
* ctx
)
176 GLuint new_state
= ctx
->NewState
;
177 struct brw_context
*brw
= brw_context(ctx
);
179 if (ctx
->swrast_context
)
180 _swrast_InvalidateState(ctx
, new_state
);
182 brw
->NewGLState
|= new_state
;
184 if (new_state
& (_NEW_SCISSOR
| _NEW_BUFFERS
| _NEW_VIEWPORT
))
185 _mesa_update_draw_buffer_bounds(ctx
, ctx
->DrawBuffer
);
187 if (new_state
& (_NEW_STENCIL
| _NEW_BUFFERS
)) {
188 brw
->stencil_enabled
= _mesa_stencil_is_enabled(ctx
);
189 brw
->stencil_two_sided
= _mesa_stencil_is_two_sided(ctx
);
190 brw
->stencil_write_enabled
=
191 _mesa_stencil_is_write_enabled(ctx
, brw
->stencil_two_sided
);
194 if (new_state
& _NEW_POLYGON
)
195 brw
->polygon_front_bit
= _mesa_polygon_get_front_bit(ctx
);
197 if (new_state
& _NEW_BUFFERS
) {
198 intel_update_framebuffer(ctx
, ctx
->DrawBuffer
);
199 if (ctx
->DrawBuffer
!= ctx
->ReadBuffer
)
200 intel_update_framebuffer(ctx
, ctx
->ReadBuffer
);
204 #define flushFront(screen) ((screen)->image.loader ? (screen)->image.loader->flushFrontBuffer : (screen)->dri2.loader->flushFrontBuffer)
207 intel_flush_front(struct gl_context
*ctx
)
209 struct brw_context
*brw
= brw_context(ctx
);
210 __DRIcontext
*driContext
= brw
->driContext
;
211 __DRIdrawable
*driDrawable
= driContext
->driDrawablePriv
;
212 __DRIscreen
*const dri_screen
= brw
->screen
->driScrnPriv
;
214 if (brw
->front_buffer_dirty
&& _mesa_is_winsys_fbo(ctx
->DrawBuffer
)) {
215 if (flushFront(dri_screen
) && driDrawable
&&
216 driDrawable
->loaderPrivate
) {
218 /* Resolve before flushing FAKE_FRONT_LEFT to FRONT_LEFT.
220 * This potentially resolves both front and back buffer. It
221 * is unnecessary to resolve the back, but harms nothing except
222 * performance. And no one cares about front-buffer render
225 intel_resolve_for_dri2_flush(brw
, driDrawable
);
226 intel_batchbuffer_flush(brw
);
228 flushFront(dri_screen
)(driDrawable
, driDrawable
->loaderPrivate
);
230 /* We set the dirty bit in intel_prepare_render() if we're
231 * front buffer rendering once we get there.
233 brw
->front_buffer_dirty
= false;
239 intel_glFlush(struct gl_context
*ctx
)
241 struct brw_context
*brw
= brw_context(ctx
);
243 intel_batchbuffer_flush(brw
);
244 intel_flush_front(ctx
);
246 brw
->need_flush_throttle
= true;
250 intel_finish(struct gl_context
* ctx
)
252 struct brw_context
*brw
= brw_context(ctx
);
256 if (brw
->batch
.last_bo
)
257 brw_bo_wait_rendering(brw
->batch
.last_bo
);
261 brw_init_driver_functions(struct brw_context
*brw
,
262 struct dd_function_table
*functions
)
264 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
266 _mesa_init_driver_functions(functions
);
268 /* GLX uses DRI2 invalidate events to handle window resizing.
269 * Unfortunately, EGL does not - libEGL is written in XCB (not Xlib),
270 * which doesn't provide a mechanism for snooping the event queues.
272 * So EGL still relies on viewport hacks to handle window resizing.
273 * This should go away with DRI3000.
275 if (!brw
->driContext
->driScreenPriv
->dri2
.useInvalidate
)
276 functions
->Viewport
= intel_viewport
;
278 functions
->Flush
= intel_glFlush
;
279 functions
->Finish
= intel_finish
;
280 functions
->GetString
= intel_get_string
;
281 functions
->UpdateState
= intel_update_state
;
283 intelInitTextureFuncs(functions
);
284 intelInitTextureImageFuncs(functions
);
285 intelInitTextureSubImageFuncs(functions
);
286 intelInitTextureCopyImageFuncs(functions
);
287 intelInitCopyImageFuncs(functions
);
288 intelInitClearFuncs(functions
);
289 intelInitBufferFuncs(functions
);
290 intelInitPixelFuncs(functions
);
291 intelInitBufferObjectFuncs(functions
);
292 brw_init_syncobj_functions(functions
);
293 brw_init_object_purgeable_functions(functions
);
295 brwInitFragProgFuncs( functions
);
296 brw_init_common_queryobj_functions(functions
);
297 if (devinfo
->gen
>= 8 || brw
->is_haswell
)
298 hsw_init_queryobj_functions(functions
);
299 else if (devinfo
->gen
>= 6)
300 gen6_init_queryobj_functions(functions
);
302 gen4_init_queryobj_functions(functions
);
303 brw_init_compute_functions(functions
);
304 brw_init_conditional_render_functions(functions
);
306 functions
->QueryInternalFormat
= brw_query_internal_format
;
308 functions
->NewTransformFeedback
= brw_new_transform_feedback
;
309 functions
->DeleteTransformFeedback
= brw_delete_transform_feedback
;
310 if (can_do_mi_math_and_lrr(brw
->screen
)) {
311 functions
->BeginTransformFeedback
= hsw_begin_transform_feedback
;
312 functions
->EndTransformFeedback
= hsw_end_transform_feedback
;
313 functions
->PauseTransformFeedback
= hsw_pause_transform_feedback
;
314 functions
->ResumeTransformFeedback
= hsw_resume_transform_feedback
;
315 } else if (devinfo
->gen
>= 7) {
316 functions
->BeginTransformFeedback
= gen7_begin_transform_feedback
;
317 functions
->EndTransformFeedback
= gen7_end_transform_feedback
;
318 functions
->PauseTransformFeedback
= gen7_pause_transform_feedback
;
319 functions
->ResumeTransformFeedback
= gen7_resume_transform_feedback
;
320 functions
->GetTransformFeedbackVertexCount
=
321 brw_get_transform_feedback_vertex_count
;
323 functions
->BeginTransformFeedback
= brw_begin_transform_feedback
;
324 functions
->EndTransformFeedback
= brw_end_transform_feedback
;
325 functions
->PauseTransformFeedback
= brw_pause_transform_feedback
;
326 functions
->ResumeTransformFeedback
= brw_resume_transform_feedback
;
327 functions
->GetTransformFeedbackVertexCount
=
328 brw_get_transform_feedback_vertex_count
;
331 if (devinfo
->gen
>= 6)
332 functions
->GetSamplePosition
= gen6_get_sample_position
;
336 brw_initialize_context_constants(struct brw_context
*brw
)
338 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
339 struct gl_context
*ctx
= &brw
->ctx
;
340 const struct brw_compiler
*compiler
= brw
->screen
->compiler
;
342 const bool stage_exists
[MESA_SHADER_STAGES
] = {
343 [MESA_SHADER_VERTEX
] = true,
344 [MESA_SHADER_TESS_CTRL
] = devinfo
->gen
>= 7,
345 [MESA_SHADER_TESS_EVAL
] = devinfo
->gen
>= 7,
346 [MESA_SHADER_GEOMETRY
] = devinfo
->gen
>= 6,
347 [MESA_SHADER_FRAGMENT
] = true,
348 [MESA_SHADER_COMPUTE
] =
349 ((ctx
->API
== API_OPENGL_COMPAT
|| ctx
->API
== API_OPENGL_CORE
) &&
350 ctx
->Const
.MaxComputeWorkGroupSize
[0] >= 1024) ||
351 (ctx
->API
== API_OPENGLES2
&&
352 ctx
->Const
.MaxComputeWorkGroupSize
[0] >= 128) ||
353 _mesa_extension_override_enables
.ARB_compute_shader
,
356 unsigned num_stages
= 0;
357 for (int i
= 0; i
< MESA_SHADER_STAGES
; i
++) {
362 unsigned max_samplers
=
363 devinfo
->gen
>= 8 || brw
->is_haswell
? BRW_MAX_TEX_UNIT
: 16;
365 ctx
->Const
.MaxDualSourceDrawBuffers
= 1;
366 ctx
->Const
.MaxDrawBuffers
= BRW_MAX_DRAW_BUFFERS
;
367 ctx
->Const
.MaxCombinedShaderOutputResources
=
368 MAX_IMAGE_UNITS
+ BRW_MAX_DRAW_BUFFERS
;
370 /* The timestamp register we can read for glGetTimestamp() is
371 * sometimes only 32 bits, before scaling to nanoseconds (depending
374 * Once scaled to nanoseconds the timestamp would roll over at a
375 * non-power-of-two, so an application couldn't use
376 * GL_QUERY_COUNTER_BITS to handle rollover correctly. Instead, we
377 * report 36 bits and truncate at that (rolling over 5 times as
378 * often as the HW counter), and when the 32-bit counter rolls
379 * over, it happens to also be at a rollover in the reported value
380 * from near (1<<36) to 0.
382 * The low 32 bits rolls over in ~343 seconds. Our 36-bit result
383 * rolls over every ~69 seconds.
385 ctx
->Const
.QueryCounterBits
.Timestamp
= 36;
387 ctx
->Const
.MaxTextureCoordUnits
= 8; /* Mesa limit */
388 ctx
->Const
.MaxImageUnits
= MAX_IMAGE_UNITS
;
389 if (devinfo
->gen
>= 7) {
390 ctx
->Const
.MaxRenderbufferSize
= 16384;
391 ctx
->Const
.MaxTextureLevels
= MIN2(15 /* 16384 */, MAX_TEXTURE_LEVELS
);
392 ctx
->Const
.MaxCubeTextureLevels
= 15; /* 16384 */
394 ctx
->Const
.MaxRenderbufferSize
= 8192;
395 ctx
->Const
.MaxTextureLevels
= MIN2(14 /* 8192 */, MAX_TEXTURE_LEVELS
);
396 ctx
->Const
.MaxCubeTextureLevels
= 14; /* 8192 */
398 ctx
->Const
.Max3DTextureLevels
= 12; /* 2048 */
399 ctx
->Const
.MaxArrayTextureLayers
= devinfo
->gen
>= 7 ? 2048 : 512;
400 ctx
->Const
.MaxTextureMbytes
= 1536;
401 ctx
->Const
.MaxTextureRectSize
= devinfo
->gen
>= 7 ? 16384 : 8192;
402 ctx
->Const
.MaxTextureMaxAnisotropy
= 16.0;
403 ctx
->Const
.MaxTextureLodBias
= 15.0;
404 ctx
->Const
.StripTextureBorder
= true;
405 if (devinfo
->gen
>= 7) {
406 ctx
->Const
.MaxProgramTextureGatherComponents
= 4;
407 ctx
->Const
.MinProgramTextureGatherOffset
= -32;
408 ctx
->Const
.MaxProgramTextureGatherOffset
= 31;
409 } else if (devinfo
->gen
== 6) {
410 ctx
->Const
.MaxProgramTextureGatherComponents
= 1;
411 ctx
->Const
.MinProgramTextureGatherOffset
= -8;
412 ctx
->Const
.MaxProgramTextureGatherOffset
= 7;
415 ctx
->Const
.MaxUniformBlockSize
= 65536;
417 for (int i
= 0; i
< MESA_SHADER_STAGES
; i
++) {
418 struct gl_program_constants
*prog
= &ctx
->Const
.Program
[i
];
420 if (!stage_exists
[i
])
423 prog
->MaxTextureImageUnits
= max_samplers
;
425 prog
->MaxUniformBlocks
= BRW_MAX_UBO
;
426 prog
->MaxCombinedUniformComponents
=
427 prog
->MaxUniformComponents
+
428 ctx
->Const
.MaxUniformBlockSize
/ 4 * prog
->MaxUniformBlocks
;
430 prog
->MaxAtomicCounters
= MAX_ATOMIC_COUNTERS
;
431 prog
->MaxAtomicBuffers
= BRW_MAX_ABO
;
432 prog
->MaxImageUniforms
= compiler
->scalar_stage
[i
] ? BRW_MAX_IMAGES
: 0;
433 prog
->MaxShaderStorageBlocks
= BRW_MAX_SSBO
;
436 ctx
->Const
.MaxTextureUnits
=
437 MIN2(ctx
->Const
.MaxTextureCoordUnits
,
438 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].MaxTextureImageUnits
);
440 ctx
->Const
.MaxUniformBufferBindings
= num_stages
* BRW_MAX_UBO
;
441 ctx
->Const
.MaxCombinedUniformBlocks
= num_stages
* BRW_MAX_UBO
;
442 ctx
->Const
.MaxCombinedAtomicBuffers
= num_stages
* BRW_MAX_ABO
;
443 ctx
->Const
.MaxCombinedShaderStorageBlocks
= num_stages
* BRW_MAX_SSBO
;
444 ctx
->Const
.MaxShaderStorageBufferBindings
= num_stages
* BRW_MAX_SSBO
;
445 ctx
->Const
.MaxCombinedTextureImageUnits
= num_stages
* max_samplers
;
446 ctx
->Const
.MaxCombinedImageUniforms
= num_stages
* BRW_MAX_IMAGES
;
449 /* Hardware only supports a limited number of transform feedback buffers.
450 * So we need to override the Mesa default (which is based only on software
453 ctx
->Const
.MaxTransformFeedbackBuffers
= BRW_MAX_SOL_BUFFERS
;
455 /* On Gen6, in the worst case, we use up one binding table entry per
456 * transform feedback component (see comments above the definition of
457 * BRW_MAX_SOL_BINDINGS, in brw_context.h), so we need to advertise a value
458 * for MAX_TRANSFORM_FEEDBACK_INTERLEAVED_COMPONENTS equal to
459 * BRW_MAX_SOL_BINDINGS.
461 * In "separate components" mode, we need to divide this value by
462 * BRW_MAX_SOL_BUFFERS, so that the total number of binding table entries
463 * used up by all buffers will not exceed BRW_MAX_SOL_BINDINGS.
465 ctx
->Const
.MaxTransformFeedbackInterleavedComponents
= BRW_MAX_SOL_BINDINGS
;
466 ctx
->Const
.MaxTransformFeedbackSeparateComponents
=
467 BRW_MAX_SOL_BINDINGS
/ BRW_MAX_SOL_BUFFERS
;
469 ctx
->Const
.AlwaysUseGetTransformFeedbackVertexCount
=
470 !can_do_mi_math_and_lrr(brw
->screen
);
473 const int *msaa_modes
= intel_supported_msaa_modes(brw
->screen
);
474 const int clamp_max_samples
=
475 driQueryOptioni(&brw
->optionCache
, "clamp_max_samples");
477 if (clamp_max_samples
< 0) {
478 max_samples
= msaa_modes
[0];
480 /* Select the largest supported MSAA mode that does not exceed
484 for (int i
= 0; msaa_modes
[i
] != 0; ++i
) {
485 if (msaa_modes
[i
] <= clamp_max_samples
) {
486 max_samples
= msaa_modes
[i
];
492 ctx
->Const
.MaxSamples
= max_samples
;
493 ctx
->Const
.MaxColorTextureSamples
= max_samples
;
494 ctx
->Const
.MaxDepthTextureSamples
= max_samples
;
495 ctx
->Const
.MaxIntegerSamples
= max_samples
;
496 ctx
->Const
.MaxImageSamples
= 0;
498 /* gen6_set_sample_maps() sets SampleMap{2,4,8}x variables which are used
499 * to map indices of rectangular grid to sample numbers within a pixel.
500 * These variables are used by GL_EXT_framebuffer_multisample_blit_scaled
501 * extension implementation. For more details see the comment above
502 * gen6_set_sample_maps() definition.
504 gen6_set_sample_maps(ctx
);
506 ctx
->Const
.MinLineWidth
= 1.0;
507 ctx
->Const
.MinLineWidthAA
= 1.0;
508 if (devinfo
->gen
>= 6) {
509 ctx
->Const
.MaxLineWidth
= 7.375;
510 ctx
->Const
.MaxLineWidthAA
= 7.375;
511 ctx
->Const
.LineWidthGranularity
= 0.125;
513 ctx
->Const
.MaxLineWidth
= 7.0;
514 ctx
->Const
.MaxLineWidthAA
= 7.0;
515 ctx
->Const
.LineWidthGranularity
= 0.5;
518 /* For non-antialiased lines, we have to round the line width to the
519 * nearest whole number. Make sure that we don't advertise a line
520 * width that, when rounded, will be beyond the actual hardware
523 assert(roundf(ctx
->Const
.MaxLineWidth
) <= ctx
->Const
.MaxLineWidth
);
525 ctx
->Const
.MinPointSize
= 1.0;
526 ctx
->Const
.MinPointSizeAA
= 1.0;
527 ctx
->Const
.MaxPointSize
= 255.0;
528 ctx
->Const
.MaxPointSizeAA
= 255.0;
529 ctx
->Const
.PointSizeGranularity
= 1.0;
531 if (devinfo
->gen
>= 5 || brw
->is_g4x
)
532 ctx
->Const
.MaxClipPlanes
= 8;
534 ctx
->Const
.GLSLTessLevelsAsInputs
= true;
535 ctx
->Const
.LowerTCSPatchVerticesIn
= devinfo
->gen
>= 8;
536 ctx
->Const
.LowerTESPatchVerticesIn
= true;
537 ctx
->Const
.PrimitiveRestartForPatches
= true;
539 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].MaxNativeInstructions
= 16 * 1024;
540 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].MaxAluInstructions
= 0;
541 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].MaxTexInstructions
= 0;
542 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].MaxTexIndirections
= 0;
543 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].MaxNativeAluInstructions
= 0;
544 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].MaxNativeTexInstructions
= 0;
545 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].MaxNativeTexIndirections
= 0;
546 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].MaxNativeAttribs
= 16;
547 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].MaxNativeTemps
= 256;
548 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].MaxNativeAddressRegs
= 1;
549 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].MaxNativeParameters
= 1024;
550 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].MaxEnvParams
=
551 MIN2(ctx
->Const
.Program
[MESA_SHADER_VERTEX
].MaxNativeParameters
,
552 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].MaxEnvParams
);
554 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].MaxNativeInstructions
= 1024;
555 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].MaxNativeAluInstructions
= 1024;
556 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].MaxNativeTexInstructions
= 1024;
557 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].MaxNativeTexIndirections
= 1024;
558 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].MaxNativeAttribs
= 12;
559 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].MaxNativeTemps
= 256;
560 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].MaxNativeAddressRegs
= 0;
561 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].MaxNativeParameters
= 1024;
562 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].MaxEnvParams
=
563 MIN2(ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].MaxNativeParameters
,
564 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].MaxEnvParams
);
566 /* Fragment shaders use real, 32-bit twos-complement integers for all
569 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].LowInt
.RangeMin
= 31;
570 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].LowInt
.RangeMax
= 30;
571 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].LowInt
.Precision
= 0;
572 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].HighInt
= ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].LowInt
;
573 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].MediumInt
= ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].LowInt
;
575 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].LowInt
.RangeMin
= 31;
576 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].LowInt
.RangeMax
= 30;
577 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].LowInt
.Precision
= 0;
578 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].HighInt
= ctx
->Const
.Program
[MESA_SHADER_VERTEX
].LowInt
;
579 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].MediumInt
= ctx
->Const
.Program
[MESA_SHADER_VERTEX
].LowInt
;
581 /* Gen6 converts quads to polygon in beginning of 3D pipeline,
582 * but we're not sure how it's actually done for vertex order,
583 * that affect provoking vertex decision. Always use last vertex
584 * convention for quad primitive which works as expected for now.
586 if (devinfo
->gen
>= 6)
587 ctx
->Const
.QuadsFollowProvokingVertexConvention
= false;
589 ctx
->Const
.NativeIntegers
= true;
590 ctx
->Const
.VertexID_is_zero_based
= true;
592 /* Regarding the CMP instruction, the Ivybridge PRM says:
594 * "For each enabled channel 0b or 1b is assigned to the appropriate flag
595 * bit and 0/all zeros or all ones (e.g, byte 0xFF, word 0xFFFF, DWord
596 * 0xFFFFFFFF) is assigned to dst."
598 * but PRMs for earlier generations say
600 * "In dword format, one GRF may store up to 8 results. When the register
601 * is used later as a vector of Booleans, as only LSB at each channel
602 * contains meaning [sic] data, software should make sure all higher bits
603 * are masked out (e.g. by 'and-ing' an [sic] 0x01 constant)."
605 * We select the representation of a true boolean uniform to be ~0, and fix
606 * the results of Gen <= 5 CMP instruction's with -(result & 1).
608 ctx
->Const
.UniformBooleanTrue
= ~0;
610 /* From the gen4 PRM, volume 4 page 127:
612 * "For SURFTYPE_BUFFER non-rendertarget surfaces, this field specifies
613 * the base address of the first element of the surface, computed in
614 * software by adding the surface base address to the byte offset of
615 * the element in the buffer."
617 * However, unaligned accesses are slower, so enforce buffer alignment.
619 * In order to push UBO data, 3DSTATE_CONSTANT_XS imposes an additional
620 * restriction: the start of the buffer needs to be 32B aligned.
622 ctx
->Const
.UniformBufferOffsetAlignment
= 32;
624 /* ShaderStorageBufferOffsetAlignment should be a cacheline (64 bytes) so
625 * that we can safely have the CPU and GPU writing the same SSBO on
626 * non-cachecoherent systems (our Atom CPUs). With UBOs, the GPU never
627 * writes, so there's no problem. For an SSBO, the GPU and the CPU can
628 * be updating disjoint regions of the buffer simultaneously and that will
629 * break if the regions overlap the same cacheline.
631 ctx
->Const
.ShaderStorageBufferOffsetAlignment
= 64;
632 ctx
->Const
.TextureBufferOffsetAlignment
= 16;
633 ctx
->Const
.MaxTextureBufferSize
= 128 * 1024 * 1024;
635 if (devinfo
->gen
>= 6) {
636 ctx
->Const
.MaxVarying
= 32;
637 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].MaxOutputComponents
= 128;
638 ctx
->Const
.Program
[MESA_SHADER_GEOMETRY
].MaxInputComponents
= 64;
639 ctx
->Const
.Program
[MESA_SHADER_GEOMETRY
].MaxOutputComponents
= 128;
640 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].MaxInputComponents
= 128;
641 ctx
->Const
.Program
[MESA_SHADER_TESS_CTRL
].MaxInputComponents
= 128;
642 ctx
->Const
.Program
[MESA_SHADER_TESS_CTRL
].MaxOutputComponents
= 128;
643 ctx
->Const
.Program
[MESA_SHADER_TESS_EVAL
].MaxInputComponents
= 128;
644 ctx
->Const
.Program
[MESA_SHADER_TESS_EVAL
].MaxOutputComponents
= 128;
647 /* We want the GLSL compiler to emit code that uses condition codes */
648 for (int i
= 0; i
< MESA_SHADER_STAGES
; i
++) {
649 ctx
->Const
.ShaderCompilerOptions
[i
] =
650 brw
->screen
->compiler
->glsl_compiler_options
[i
];
653 if (devinfo
->gen
>= 7) {
654 ctx
->Const
.MaxViewportWidth
= 32768;
655 ctx
->Const
.MaxViewportHeight
= 32768;
658 /* ARB_viewport_array, OES_viewport_array */
659 if (devinfo
->gen
>= 6) {
660 ctx
->Const
.MaxViewports
= GEN6_NUM_VIEWPORTS
;
661 ctx
->Const
.ViewportSubpixelBits
= 0;
663 /* Cast to float before negating because MaxViewportWidth is unsigned.
665 ctx
->Const
.ViewportBounds
.Min
= -(float)ctx
->Const
.MaxViewportWidth
;
666 ctx
->Const
.ViewportBounds
.Max
= ctx
->Const
.MaxViewportWidth
;
669 /* ARB_gpu_shader5 */
670 if (devinfo
->gen
>= 7)
671 ctx
->Const
.MaxVertexStreams
= MIN2(4, MAX_VERTEX_STREAMS
);
673 /* ARB_framebuffer_no_attachments */
674 ctx
->Const
.MaxFramebufferWidth
= 16384;
675 ctx
->Const
.MaxFramebufferHeight
= 16384;
676 ctx
->Const
.MaxFramebufferLayers
= ctx
->Const
.MaxArrayTextureLayers
;
677 ctx
->Const
.MaxFramebufferSamples
= max_samples
;
679 /* OES_primitive_bounding_box */
680 ctx
->Const
.NoPrimitiveBoundingBoxOutput
= true;
682 /* TODO: We should be able to use STD430 packing by default on all hardware
683 * but some piglit tests [1] currently fail on SNB when this is enabled.
684 * The problem is the messages we're using for doing uniform pulls
685 * in the vec4 back-end on SNB is the OWORD block load instruction, which
686 * takes its offset in units of OWORDS (16 bytes). On IVB+, we use the
687 * sampler which doesn't have these restrictions.
689 * In the scalar back-end, we use the sampler for dynamic uniform loads and
690 * pull an entire cache line at a time for constant offset loads both of
691 * which support almost any alignment.
693 * [1] glsl-1.40/uniform_buffer/vs-float-array-variable-index.shader_test
695 if (devinfo
->gen
>= 7)
696 ctx
->Const
.UseSTD430AsDefaultPacking
= true;
700 brw_initialize_cs_context_constants(struct brw_context
*brw
)
702 struct gl_context
*ctx
= &brw
->ctx
;
703 const struct intel_screen
*screen
= brw
->screen
;
704 struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
706 /* FINISHME: Do this for all platforms that the kernel supports */
707 if (brw
->is_cherryview
&&
708 screen
->subslice_total
> 0 && screen
->eu_total
> 0) {
709 /* Logical CS threads = EUs per subslice * 7 threads per EU */
710 uint32_t max_cs_threads
= screen
->eu_total
/ screen
->subslice_total
* 7;
712 /* Fuse configurations may give more threads than expected, never less. */
713 if (max_cs_threads
> devinfo
->max_cs_threads
)
714 devinfo
->max_cs_threads
= max_cs_threads
;
717 /* Maximum number of scalar compute shader invocations that can be run in
718 * parallel in the same subslice assuming SIMD32 dispatch.
720 * We don't advertise more than 64 threads, because we are limited to 64 by
721 * our usage of thread_width_max in the gpgpu walker command. This only
722 * currently impacts Haswell, which otherwise might be able to advertise 70
723 * threads. With SIMD32 and 64 threads, Haswell still provides twice the
724 * required the number of invocation needed for ARB_compute_shader.
726 const unsigned max_threads
= MIN2(64, devinfo
->max_cs_threads
);
727 const uint32_t max_invocations
= 32 * max_threads
;
728 ctx
->Const
.MaxComputeWorkGroupSize
[0] = max_invocations
;
729 ctx
->Const
.MaxComputeWorkGroupSize
[1] = max_invocations
;
730 ctx
->Const
.MaxComputeWorkGroupSize
[2] = max_invocations
;
731 ctx
->Const
.MaxComputeWorkGroupInvocations
= max_invocations
;
732 ctx
->Const
.MaxComputeSharedMemorySize
= 64 * 1024;
736 * Process driconf (drirc) options, setting appropriate context flags.
738 * intelInitExtensions still pokes at optionCache directly, in order to
739 * avoid advertising various extensions. No flags are set, so it makes
740 * sense to continue doing that there.
743 brw_process_driconf_options(struct brw_context
*brw
)
745 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
746 struct gl_context
*ctx
= &brw
->ctx
;
748 driOptionCache
*options
= &brw
->optionCache
;
749 driParseConfigFiles(options
, &brw
->screen
->optionCache
,
750 brw
->driContext
->driScreenPriv
->myNum
, "i965");
752 int bo_reuse_mode
= driQueryOptioni(options
, "bo_reuse");
753 switch (bo_reuse_mode
) {
754 case DRI_CONF_BO_REUSE_DISABLED
:
756 case DRI_CONF_BO_REUSE_ALL
:
757 brw_bufmgr_enable_reuse(brw
->bufmgr
);
761 if (INTEL_DEBUG
& DEBUG_NO_HIZ
) {
762 brw
->has_hiz
= false;
763 /* On gen6, you can only do separate stencil with HIZ. */
764 if (devinfo
->gen
== 6)
765 brw
->has_separate_stencil
= false;
768 if (driQueryOptionb(options
, "mesa_no_error"))
769 ctx
->Const
.ContextFlags
|= GL_CONTEXT_FLAG_NO_ERROR_BIT_KHR
;
771 if (driQueryOptionb(options
, "always_flush_batch")) {
772 fprintf(stderr
, "flushing batchbuffer before/after each draw call\n");
773 brw
->always_flush_batch
= true;
776 if (driQueryOptionb(options
, "always_flush_cache")) {
777 fprintf(stderr
, "flushing GPU caches before/after each draw call\n");
778 brw
->always_flush_cache
= true;
781 if (driQueryOptionb(options
, "disable_throttling")) {
782 fprintf(stderr
, "disabling flush throttling\n");
783 brw
->disable_throttling
= true;
786 brw
->precompile
= driQueryOptionb(&brw
->optionCache
, "shader_precompile");
788 if (driQueryOptionb(&brw
->optionCache
, "precise_trig"))
789 brw
->screen
->compiler
->precise_trig
= true;
791 ctx
->Const
.ForceGLSLExtensionsWarn
=
792 driQueryOptionb(options
, "force_glsl_extensions_warn");
794 ctx
->Const
.ForceGLSLVersion
=
795 driQueryOptioni(options
, "force_glsl_version");
797 ctx
->Const
.DisableGLSLLineContinuations
=
798 driQueryOptionb(options
, "disable_glsl_line_continuations");
800 ctx
->Const
.AllowGLSLExtensionDirectiveMidShader
=
801 driQueryOptionb(options
, "allow_glsl_extension_directive_midshader");
803 ctx
->Const
.AllowGLSLBuiltinVariableRedeclaration
=
804 driQueryOptionb(options
, "allow_glsl_builtin_variable_redeclaration");
806 ctx
->Const
.AllowHigherCompatVersion
=
807 driQueryOptionb(options
, "allow_higher_compat_version");
809 ctx
->Const
.ForceGLSLAbsSqrt
=
810 driQueryOptionb(options
, "force_glsl_abs_sqrt");
812 ctx
->Const
.GLSLZeroInit
= driQueryOptionb(options
, "glsl_zero_init");
814 brw
->dual_color_blend_by_location
=
815 driQueryOptionb(options
, "dual_color_blend_by_location");
819 brwCreateContext(gl_api api
,
820 const struct gl_config
*mesaVis
,
821 __DRIcontext
*driContextPriv
,
822 unsigned major_version
,
823 unsigned minor_version
,
826 unsigned *dri_ctx_error
,
827 void *sharedContextPrivate
)
829 struct gl_context
*shareCtx
= (struct gl_context
*) sharedContextPrivate
;
830 struct intel_screen
*screen
= driContextPriv
->driScreenPriv
->driverPrivate
;
831 const struct gen_device_info
*devinfo
= &screen
->devinfo
;
832 struct dd_function_table functions
;
834 /* Only allow the __DRI_CTX_FLAG_ROBUST_BUFFER_ACCESS flag if the kernel
835 * provides us with context reset notifications.
837 uint32_t allowed_flags
= __DRI_CTX_FLAG_DEBUG
|
838 __DRI_CTX_FLAG_FORWARD_COMPATIBLE
|
839 __DRI_CTX_FLAG_NO_ERROR
;
841 if (screen
->has_context_reset_notification
)
842 allowed_flags
|= __DRI_CTX_FLAG_ROBUST_BUFFER_ACCESS
;
844 if (flags
& ~allowed_flags
) {
845 *dri_ctx_error
= __DRI_CTX_ERROR_UNKNOWN_FLAG
;
849 struct brw_context
*brw
= rzalloc(NULL
, struct brw_context
);
851 fprintf(stderr
, "%s: failed to alloc context\n", __func__
);
852 *dri_ctx_error
= __DRI_CTX_ERROR_NO_MEMORY
;
856 driContextPriv
->driverPrivate
= brw
;
857 brw
->driContext
= driContextPriv
;
858 brw
->screen
= screen
;
859 brw
->bufmgr
= screen
->bufmgr
;
861 brw
->gt
= devinfo
->gt
;
862 brw
->is_g4x
= devinfo
->is_g4x
;
863 brw
->is_baytrail
= devinfo
->is_baytrail
;
864 brw
->is_haswell
= devinfo
->is_haswell
;
865 brw
->is_cherryview
= devinfo
->is_cherryview
;
866 brw
->is_broxton
= devinfo
->is_broxton
|| devinfo
->is_geminilake
;
867 brw
->has_llc
= devinfo
->has_llc
;
868 brw
->has_hiz
= devinfo
->has_hiz_and_separate_stencil
;
869 brw
->has_separate_stencil
= devinfo
->has_hiz_and_separate_stencil
;
870 brw
->has_pln
= devinfo
->has_pln
;
871 brw
->has_compr4
= devinfo
->has_compr4
;
872 brw
->has_surface_tile_offset
= devinfo
->has_surface_tile_offset
;
873 brw
->has_negative_rhw_bug
= devinfo
->has_negative_rhw_bug
;
874 brw
->needs_unlit_centroid_workaround
=
875 devinfo
->needs_unlit_centroid_workaround
;
877 brw
->must_use_separate_stencil
= devinfo
->must_use_separate_stencil
;
878 brw
->has_swizzling
= screen
->hw_has_swizzling
;
880 brw
->isl_dev
= screen
->isl_dev
;
882 brw
->vs
.base
.stage
= MESA_SHADER_VERTEX
;
883 brw
->tcs
.base
.stage
= MESA_SHADER_TESS_CTRL
;
884 brw
->tes
.base
.stage
= MESA_SHADER_TESS_EVAL
;
885 brw
->gs
.base
.stage
= MESA_SHADER_GEOMETRY
;
886 brw
->wm
.base
.stage
= MESA_SHADER_FRAGMENT
;
887 if (devinfo
->gen
>= 8) {
888 brw
->vtbl
.emit_depth_stencil_hiz
= gen8_emit_depth_stencil_hiz
;
889 } else if (devinfo
->gen
>= 7) {
890 brw
->vtbl
.emit_depth_stencil_hiz
= gen7_emit_depth_stencil_hiz
;
891 } else if (devinfo
->gen
>= 6) {
892 brw
->vtbl
.emit_depth_stencil_hiz
= gen6_emit_depth_stencil_hiz
;
894 brw
->vtbl
.emit_depth_stencil_hiz
= brw_emit_depth_stencil_hiz
;
897 brw_init_driver_functions(brw
, &functions
);
900 functions
.GetGraphicsResetStatus
= brw_get_graphics_reset_status
;
902 struct gl_context
*ctx
= &brw
->ctx
;
904 if (!_mesa_initialize_context(ctx
, api
, mesaVis
, shareCtx
, &functions
)) {
905 *dri_ctx_error
= __DRI_CTX_ERROR_NO_MEMORY
;
906 fprintf(stderr
, "%s: failed to init mesa context\n", __func__
);
907 intelDestroyContext(driContextPriv
);
911 driContextSetFlags(ctx
, flags
);
913 /* Initialize the software rasterizer and helper modules.
915 * As of GL 3.1 core, the gen4+ driver doesn't need the swrast context for
916 * software fallbacks (which we have to support on legacy GL to do weird
917 * glDrawPixels(), glBitmap(), and other functions).
919 if (api
!= API_OPENGL_CORE
&& api
!= API_OPENGLES2
) {
920 _swrast_CreateContext(ctx
);
923 _vbo_CreateContext(ctx
);
924 if (ctx
->swrast_context
) {
925 _tnl_CreateContext(ctx
);
926 TNL_CONTEXT(ctx
)->Driver
.RunPipeline
= _tnl_run_pipeline
;
927 _swsetup_CreateContext(ctx
);
929 /* Configure swrast to match hardware characteristics: */
930 _swrast_allow_pixel_fog(ctx
, false);
931 _swrast_allow_vertex_fog(ctx
, true);
934 _mesa_meta_init(ctx
);
936 brw_process_driconf_options(brw
);
938 if (INTEL_DEBUG
& DEBUG_PERF
)
939 brw
->perf_debug
= true;
941 brw_initialize_cs_context_constants(brw
);
942 brw_initialize_context_constants(brw
);
944 ctx
->Const
.ResetStrategy
= notify_reset
945 ? GL_LOSE_CONTEXT_ON_RESET_ARB
: GL_NO_RESET_NOTIFICATION_ARB
;
947 /* Reinitialize the context point state. It depends on ctx->Const values. */
948 _mesa_init_point(ctx
);
952 intel_batchbuffer_init(screen
, &brw
->batch
);
954 if (devinfo
->gen
>= 6) {
955 /* Create a new hardware context. Using a hardware context means that
956 * our GPU state will be saved/restored on context switch, allowing us
957 * to assume that the GPU is in the same state we left it in.
959 * This is required for transform feedback buffer offsets, query objects,
960 * and also allows us to reduce how much state we have to emit.
962 brw
->hw_ctx
= brw_create_hw_context(brw
->bufmgr
);
965 fprintf(stderr
, "Failed to create hardware context.\n");
966 intelDestroyContext(driContextPriv
);
971 if (brw_init_pipe_control(brw
, devinfo
)) {
972 *dri_ctx_error
= __DRI_CTX_ERROR_NO_MEMORY
;
973 intelDestroyContext(driContextPriv
);
979 intelInitExtensions(ctx
);
981 brw_init_surface_formats(brw
);
985 brw
->urb
.size
= devinfo
->urb
.size
;
987 if (devinfo
->gen
== 6)
988 brw
->urb
.gs_present
= false;
990 brw
->prim_restart
.in_progress
= false;
991 brw
->prim_restart
.enable_cut_index
= false;
992 brw
->gs
.enabled
= false;
993 brw
->clip
.viewport_count
= 1;
995 brw
->predicate
.state
= BRW_PREDICATE_STATE_RENDER
;
997 brw
->max_gtt_map_object_size
= screen
->max_gtt_map_object_size
;
999 ctx
->VertexProgram
._MaintainTnlProgram
= true;
1000 ctx
->FragmentProgram
._MaintainTexEnvProgram
= true;
1002 brw_draw_init( brw
);
1004 if ((flags
& __DRI_CTX_FLAG_DEBUG
) != 0) {
1005 /* Turn on some extra GL_ARB_debug_output generation. */
1006 brw
->perf_debug
= true;
1009 if ((flags
& __DRI_CTX_FLAG_ROBUST_BUFFER_ACCESS
) != 0) {
1010 ctx
->Const
.ContextFlags
|= GL_CONTEXT_FLAG_ROBUST_ACCESS_BIT_ARB
;
1011 ctx
->Const
.RobustAccess
= GL_TRUE
;
1014 if (INTEL_DEBUG
& DEBUG_SHADER_TIME
)
1015 brw_init_shader_time(brw
);
1017 _mesa_compute_version(ctx
);
1019 _mesa_initialize_dispatch_tables(ctx
);
1020 _mesa_initialize_vbo_vtxfmt(ctx
);
1022 if (ctx
->Extensions
.INTEL_performance_query
)
1023 brw_init_performance_queries(brw
);
1025 vbo_use_buffer_objects(ctx
);
1026 vbo_always_unmap_buffers(ctx
);
1032 intelDestroyContext(__DRIcontext
* driContextPriv
)
1034 struct brw_context
*brw
=
1035 (struct brw_context
*) driContextPriv
->driverPrivate
;
1036 struct gl_context
*ctx
= &brw
->ctx
;
1037 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
1039 _mesa_meta_free(&brw
->ctx
);
1041 if (INTEL_DEBUG
& DEBUG_SHADER_TIME
) {
1042 /* Force a report. */
1043 brw
->shader_time
.report_time
= 0;
1045 brw_collect_and_report_shader_time(brw
);
1046 brw_destroy_shader_time(brw
);
1049 if (devinfo
->gen
>= 6)
1050 blorp_finish(&brw
->blorp
);
1052 brw_destroy_state(brw
);
1053 brw_draw_destroy(brw
);
1055 brw_bo_unreference(brw
->curbe
.curbe_bo
);
1056 if (brw
->vs
.base
.scratch_bo
)
1057 brw_bo_unreference(brw
->vs
.base
.scratch_bo
);
1058 if (brw
->tcs
.base
.scratch_bo
)
1059 brw_bo_unreference(brw
->tcs
.base
.scratch_bo
);
1060 if (brw
->tes
.base
.scratch_bo
)
1061 brw_bo_unreference(brw
->tes
.base
.scratch_bo
);
1062 if (brw
->gs
.base
.scratch_bo
)
1063 brw_bo_unreference(brw
->gs
.base
.scratch_bo
);
1064 if (brw
->wm
.base
.scratch_bo
)
1065 brw_bo_unreference(brw
->wm
.base
.scratch_bo
);
1067 brw_destroy_hw_context(brw
->bufmgr
, brw
->hw_ctx
);
1069 if (ctx
->swrast_context
) {
1070 _swsetup_DestroyContext(&brw
->ctx
);
1071 _tnl_DestroyContext(&brw
->ctx
);
1073 _vbo_DestroyContext(&brw
->ctx
);
1075 if (ctx
->swrast_context
)
1076 _swrast_DestroyContext(&brw
->ctx
);
1078 brw_fini_pipe_control(brw
);
1079 intel_batchbuffer_free(&brw
->batch
);
1081 brw_bo_unreference(brw
->throttle_batch
[1]);
1082 brw_bo_unreference(brw
->throttle_batch
[0]);
1083 brw
->throttle_batch
[1] = NULL
;
1084 brw
->throttle_batch
[0] = NULL
;
1086 driDestroyOptionCache(&brw
->optionCache
);
1088 /* free the Mesa context */
1089 _mesa_free_context_data(&brw
->ctx
);
1092 driContextPriv
->driverPrivate
= NULL
;
1096 intelUnbindContext(__DRIcontext
* driContextPriv
)
1098 /* Unset current context and dispath table */
1099 _mesa_make_current(NULL
, NULL
, NULL
);
1105 * Fixes up the context for GLES23 with our default-to-sRGB-capable behavior
1106 * on window system framebuffers.
1108 * Desktop GL is fairly reasonable in its handling of sRGB: You can ask if
1109 * your renderbuffer can do sRGB encode, and you can flip a switch that does
1110 * sRGB encode if the renderbuffer can handle it. You can ask specifically
1111 * for a visual where you're guaranteed to be capable, but it turns out that
1112 * everyone just makes all their ARGB8888 visuals capable and doesn't offer
1113 * incapable ones, because there's no difference between the two in resources
1114 * used. Applications thus get built that accidentally rely on the default
1115 * visual choice being sRGB, so we make ours sRGB capable. Everything sounds
1118 * But for GLES2/3, they decided that it was silly to not turn on sRGB encode
1119 * for sRGB renderbuffers you made with the GL_EXT_texture_sRGB equivalent.
1120 * So they removed the enable knob and made it "if the renderbuffer is sRGB
1121 * capable, do sRGB encode". Then, for your window system renderbuffers, you
1122 * can ask for sRGB visuals and get sRGB encode, or not ask for sRGB visuals
1123 * and get no sRGB encode (assuming that both kinds of visual are available).
1124 * Thus our choice to support sRGB by default on our visuals for desktop would
1125 * result in broken rendering of GLES apps that aren't expecting sRGB encode.
1127 * Unfortunately, renderbuffer setup happens before a context is created. So
1128 * in intel_screen.c we always set up sRGB, and here, if you're a GLES2/3
1129 * context (without an sRGB visual, though we don't have sRGB visuals exposed
1130 * yet), we go turn that back off before anyone finds out.
1133 intel_gles3_srgb_workaround(struct brw_context
*brw
,
1134 struct gl_framebuffer
*fb
)
1136 struct gl_context
*ctx
= &brw
->ctx
;
1138 if (_mesa_is_desktop_gl(ctx
) || !fb
->Visual
.sRGBCapable
)
1141 /* Some day when we support the sRGB capable bit on visuals available for
1142 * GLES, we'll need to respect that and not disable things here.
1144 fb
->Visual
.sRGBCapable
= false;
1145 for (int i
= 0; i
< BUFFER_COUNT
; i
++) {
1146 struct gl_renderbuffer
*rb
= fb
->Attachment
[i
].Renderbuffer
;
1148 rb
->Format
= _mesa_get_srgb_format_linear(rb
->Format
);
1153 intelMakeCurrent(__DRIcontext
* driContextPriv
,
1154 __DRIdrawable
* driDrawPriv
,
1155 __DRIdrawable
* driReadPriv
)
1157 struct brw_context
*brw
;
1158 GET_CURRENT_CONTEXT(curCtx
);
1161 brw
= (struct brw_context
*) driContextPriv
->driverPrivate
;
1165 /* According to the glXMakeCurrent() man page: "Pending commands to
1166 * the previous context, if any, are flushed before it is released."
1167 * But only flush if we're actually changing contexts.
1169 if (brw_context(curCtx
) && brw_context(curCtx
) != brw
) {
1170 _mesa_flush(curCtx
);
1173 if (driContextPriv
) {
1174 struct gl_context
*ctx
= &brw
->ctx
;
1175 struct gl_framebuffer
*fb
, *readFb
;
1177 if (driDrawPriv
== NULL
) {
1178 fb
= _mesa_get_incomplete_framebuffer();
1180 fb
= driDrawPriv
->driverPrivate
;
1181 driContextPriv
->dri2
.draw_stamp
= driDrawPriv
->dri2
.stamp
- 1;
1184 if (driReadPriv
== NULL
) {
1185 readFb
= _mesa_get_incomplete_framebuffer();
1187 readFb
= driReadPriv
->driverPrivate
;
1188 driContextPriv
->dri2
.read_stamp
= driReadPriv
->dri2
.stamp
- 1;
1191 /* The sRGB workaround changes the renderbuffer's format. We must change
1192 * the format before the renderbuffer's miptree get's allocated, otherwise
1193 * the formats of the renderbuffer and its miptree will differ.
1195 intel_gles3_srgb_workaround(brw
, fb
);
1196 intel_gles3_srgb_workaround(brw
, readFb
);
1198 /* If the context viewport hasn't been initialized, force a call out to
1199 * the loader to get buffers so we have a drawable size for the initial
1201 if (!brw
->ctx
.ViewportInitialized
)
1202 intel_prepare_render(brw
);
1204 _mesa_make_current(ctx
, fb
, readFb
);
1206 _mesa_make_current(NULL
, NULL
, NULL
);
1213 intel_resolve_for_dri2_flush(struct brw_context
*brw
,
1214 __DRIdrawable
*drawable
)
1216 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
1218 if (devinfo
->gen
< 6) {
1219 /* MSAA and fast color clear are not supported, so don't waste time
1220 * checking whether a resolve is needed.
1225 struct gl_framebuffer
*fb
= drawable
->driverPrivate
;
1226 struct intel_renderbuffer
*rb
;
1228 /* Usually, only the back buffer will need to be downsampled. However,
1229 * the front buffer will also need it if the user has rendered into it.
1231 static const gl_buffer_index buffers
[2] = {
1236 for (int i
= 0; i
< 2; ++i
) {
1237 rb
= intel_get_renderbuffer(fb
, buffers
[i
]);
1238 if (rb
== NULL
|| rb
->mt
== NULL
)
1240 if (rb
->mt
->surf
.samples
== 1) {
1241 assert(rb
->mt_layer
== 0 && rb
->mt_level
== 0 &&
1242 rb
->layer_count
== 1);
1243 intel_miptree_prepare_external(brw
, rb
->mt
);
1245 intel_renderbuffer_downsample(brw
, rb
);
1251 intel_bits_per_pixel(const struct intel_renderbuffer
*rb
)
1253 return _mesa_get_format_bytes(intel_rb_format(rb
)) * 8;
1257 intel_query_dri2_buffers(struct brw_context
*brw
,
1258 __DRIdrawable
*drawable
,
1259 __DRIbuffer
**buffers
,
1263 intel_process_dri2_buffer(struct brw_context
*brw
,
1264 __DRIdrawable
*drawable
,
1265 __DRIbuffer
*buffer
,
1266 struct intel_renderbuffer
*rb
,
1267 const char *buffer_name
);
1270 intel_update_image_buffers(struct brw_context
*brw
, __DRIdrawable
*drawable
);
1273 intel_update_dri2_buffers(struct brw_context
*brw
, __DRIdrawable
*drawable
)
1275 struct gl_framebuffer
*fb
= drawable
->driverPrivate
;
1276 struct intel_renderbuffer
*rb
;
1277 __DRIbuffer
*buffers
= NULL
;
1279 const char *region_name
;
1281 /* Set this up front, so that in case our buffers get invalidated
1282 * while we're getting new buffers, we don't clobber the stamp and
1283 * thus ignore the invalidate. */
1284 drawable
->lastStamp
= drawable
->dri2
.stamp
;
1286 if (unlikely(INTEL_DEBUG
& DEBUG_DRI
))
1287 fprintf(stderr
, "enter %s, drawable %p\n", __func__
, drawable
);
1289 intel_query_dri2_buffers(brw
, drawable
, &buffers
, &count
);
1291 if (buffers
== NULL
)
1294 for (int i
= 0; i
< count
; i
++) {
1295 switch (buffers
[i
].attachment
) {
1296 case __DRI_BUFFER_FRONT_LEFT
:
1297 rb
= intel_get_renderbuffer(fb
, BUFFER_FRONT_LEFT
);
1298 region_name
= "dri2 front buffer";
1301 case __DRI_BUFFER_FAKE_FRONT_LEFT
:
1302 rb
= intel_get_renderbuffer(fb
, BUFFER_FRONT_LEFT
);
1303 region_name
= "dri2 fake front buffer";
1306 case __DRI_BUFFER_BACK_LEFT
:
1307 rb
= intel_get_renderbuffer(fb
, BUFFER_BACK_LEFT
);
1308 region_name
= "dri2 back buffer";
1311 case __DRI_BUFFER_DEPTH
:
1312 case __DRI_BUFFER_HIZ
:
1313 case __DRI_BUFFER_DEPTH_STENCIL
:
1314 case __DRI_BUFFER_STENCIL
:
1315 case __DRI_BUFFER_ACCUM
:
1318 "unhandled buffer attach event, attachment type %d\n",
1319 buffers
[i
].attachment
);
1323 intel_process_dri2_buffer(brw
, drawable
, &buffers
[i
], rb
, region_name
);
1329 intel_update_renderbuffers(__DRIcontext
*context
, __DRIdrawable
*drawable
)
1331 struct brw_context
*brw
= context
->driverPrivate
;
1332 __DRIscreen
*dri_screen
= brw
->screen
->driScrnPriv
;
1334 /* Set this up front, so that in case our buffers get invalidated
1335 * while we're getting new buffers, we don't clobber the stamp and
1336 * thus ignore the invalidate. */
1337 drawable
->lastStamp
= drawable
->dri2
.stamp
;
1339 if (unlikely(INTEL_DEBUG
& DEBUG_DRI
))
1340 fprintf(stderr
, "enter %s, drawable %p\n", __func__
, drawable
);
1342 if (dri_screen
->image
.loader
)
1343 intel_update_image_buffers(brw
, drawable
);
1345 intel_update_dri2_buffers(brw
, drawable
);
1347 driUpdateFramebufferSize(&brw
->ctx
, drawable
);
1351 * intel_prepare_render should be called anywhere that curent read/drawbuffer
1352 * state is required.
1355 intel_prepare_render(struct brw_context
*brw
)
1357 struct gl_context
*ctx
= &brw
->ctx
;
1358 __DRIcontext
*driContext
= brw
->driContext
;
1359 __DRIdrawable
*drawable
;
1361 drawable
= driContext
->driDrawablePriv
;
1362 if (drawable
&& drawable
->dri2
.stamp
!= driContext
->dri2
.draw_stamp
) {
1363 if (drawable
->lastStamp
!= drawable
->dri2
.stamp
)
1364 intel_update_renderbuffers(driContext
, drawable
);
1365 driContext
->dri2
.draw_stamp
= drawable
->dri2
.stamp
;
1368 drawable
= driContext
->driReadablePriv
;
1369 if (drawable
&& drawable
->dri2
.stamp
!= driContext
->dri2
.read_stamp
) {
1370 if (drawable
->lastStamp
!= drawable
->dri2
.stamp
)
1371 intel_update_renderbuffers(driContext
, drawable
);
1372 driContext
->dri2
.read_stamp
= drawable
->dri2
.stamp
;
1375 /* If we're currently rendering to the front buffer, the rendering
1376 * that will happen next will probably dirty the front buffer. So
1377 * mark it as dirty here.
1379 if (_mesa_is_front_buffer_drawing(ctx
->DrawBuffer
))
1380 brw
->front_buffer_dirty
= true;
1384 * \brief Query DRI2 to obtain a DRIdrawable's buffers.
1386 * To determine which DRI buffers to request, examine the renderbuffers
1387 * attached to the drawable's framebuffer. Then request the buffers with
1388 * DRI2GetBuffers() or DRI2GetBuffersWithFormat().
1390 * This is called from intel_update_renderbuffers().
1392 * \param drawable Drawable whose buffers are queried.
1393 * \param buffers [out] List of buffers returned by DRI2 query.
1394 * \param buffer_count [out] Number of buffers returned.
1396 * \see intel_update_renderbuffers()
1397 * \see DRI2GetBuffers()
1398 * \see DRI2GetBuffersWithFormat()
1401 intel_query_dri2_buffers(struct brw_context
*brw
,
1402 __DRIdrawable
*drawable
,
1403 __DRIbuffer
**buffers
,
1406 __DRIscreen
*dri_screen
= brw
->screen
->driScrnPriv
;
1407 struct gl_framebuffer
*fb
= drawable
->driverPrivate
;
1409 unsigned attachments
[8];
1411 struct intel_renderbuffer
*front_rb
;
1412 struct intel_renderbuffer
*back_rb
;
1414 front_rb
= intel_get_renderbuffer(fb
, BUFFER_FRONT_LEFT
);
1415 back_rb
= intel_get_renderbuffer(fb
, BUFFER_BACK_LEFT
);
1417 memset(attachments
, 0, sizeof(attachments
));
1418 if ((_mesa_is_front_buffer_drawing(fb
) ||
1419 _mesa_is_front_buffer_reading(fb
) ||
1420 !back_rb
) && front_rb
) {
1421 /* If a fake front buffer is in use, then querying for
1422 * __DRI_BUFFER_FRONT_LEFT will cause the server to copy the image from
1423 * the real front buffer to the fake front buffer. So before doing the
1424 * query, we need to make sure all the pending drawing has landed in the
1425 * real front buffer.
1427 intel_batchbuffer_flush(brw
);
1428 intel_flush_front(&brw
->ctx
);
1430 attachments
[i
++] = __DRI_BUFFER_FRONT_LEFT
;
1431 attachments
[i
++] = intel_bits_per_pixel(front_rb
);
1432 } else if (front_rb
&& brw
->front_buffer_dirty
) {
1433 /* We have pending front buffer rendering, but we aren't querying for a
1434 * front buffer. If the front buffer we have is a fake front buffer,
1435 * the X server is going to throw it away when it processes the query.
1436 * So before doing the query, make sure all the pending drawing has
1437 * landed in the real front buffer.
1439 intel_batchbuffer_flush(brw
);
1440 intel_flush_front(&brw
->ctx
);
1444 attachments
[i
++] = __DRI_BUFFER_BACK_LEFT
;
1445 attachments
[i
++] = intel_bits_per_pixel(back_rb
);
1448 assert(i
<= ARRAY_SIZE(attachments
));
1451 dri_screen
->dri2
.loader
->getBuffersWithFormat(drawable
,
1456 drawable
->loaderPrivate
);
1460 * \brief Assign a DRI buffer's DRM region to a renderbuffer.
1462 * This is called from intel_update_renderbuffers().
1465 * DRI buffers whose attachment point is DRI2BufferStencil or
1466 * DRI2BufferDepthStencil are handled as special cases.
1468 * \param buffer_name is a human readable name, such as "dri2 front buffer",
1469 * that is passed to brw_bo_gem_create_from_name().
1471 * \see intel_update_renderbuffers()
1474 intel_process_dri2_buffer(struct brw_context
*brw
,
1475 __DRIdrawable
*drawable
,
1476 __DRIbuffer
*buffer
,
1477 struct intel_renderbuffer
*rb
,
1478 const char *buffer_name
)
1480 struct gl_framebuffer
*fb
= drawable
->driverPrivate
;
1486 unsigned num_samples
= rb
->Base
.Base
.NumSamples
;
1488 /* We try to avoid closing and reopening the same BO name, because the first
1489 * use of a mapping of the buffer involves a bunch of page faulting which is
1490 * moderately expensive.
1492 struct intel_mipmap_tree
*last_mt
;
1493 if (num_samples
== 0)
1496 last_mt
= rb
->singlesample_mt
;
1498 uint32_t old_name
= 0;
1500 /* The bo already has a name because the miptree was created by a
1501 * previous call to intel_process_dri2_buffer(). If a bo already has a
1502 * name, then brw_bo_flink() is a low-cost getter. It does not
1503 * create a new name.
1505 brw_bo_flink(last_mt
->bo
, &old_name
);
1508 if (old_name
== buffer
->name
)
1511 if (unlikely(INTEL_DEBUG
& DEBUG_DRI
)) {
1513 "attaching buffer %d, at %d, cpp %d, pitch %d\n",
1514 buffer
->name
, buffer
->attachment
,
1515 buffer
->cpp
, buffer
->pitch
);
1518 bo
= brw_bo_gem_create_from_name(brw
->bufmgr
, buffer_name
,
1522 "Failed to open BO for returned DRI2 buffer "
1523 "(%dx%d, %s, named %d).\n"
1524 "This is likely a bug in the X Server that will lead to a "
1526 drawable
->w
, drawable
->h
, buffer_name
, buffer
->name
);
1530 struct intel_mipmap_tree
*mt
=
1531 intel_miptree_create_for_bo(brw
,
1533 intel_rb_format(rb
),
1539 MIPTREE_CREATE_DEFAULT
);
1541 brw_bo_unreference(bo
);
1545 /* We got this BO from X11. We cana't assume that we have coherent texture
1546 * access because X may suddenly decide to use it for scan-out which would
1547 * destroy coherency.
1549 bo
->cache_coherent
= false;
1551 if (!intel_update_winsys_renderbuffer_miptree(brw
, rb
, mt
,
1552 drawable
->w
, drawable
->h
,
1554 brw_bo_unreference(bo
);
1555 intel_miptree_release(&mt
);
1559 if (_mesa_is_front_buffer_drawing(fb
) &&
1560 (buffer
->attachment
== __DRI_BUFFER_FRONT_LEFT
||
1561 buffer
->attachment
== __DRI_BUFFER_FAKE_FRONT_LEFT
) &&
1562 rb
->Base
.Base
.NumSamples
> 1) {
1563 intel_renderbuffer_upsample(brw
, rb
);
1568 brw_bo_unreference(bo
);
1572 * \brief Query DRI image loader to obtain a DRIdrawable's buffers.
1574 * To determine which DRI buffers to request, examine the renderbuffers
1575 * attached to the drawable's framebuffer. Then request the buffers from
1578 * This is called from intel_update_renderbuffers().
1580 * \param drawable Drawable whose buffers are queried.
1581 * \param buffers [out] List of buffers returned by DRI2 query.
1582 * \param buffer_count [out] Number of buffers returned.
1584 * \see intel_update_renderbuffers()
1588 intel_update_image_buffer(struct brw_context
*intel
,
1589 __DRIdrawable
*drawable
,
1590 struct intel_renderbuffer
*rb
,
1592 enum __DRIimageBufferMask buffer_type
)
1594 struct gl_framebuffer
*fb
= drawable
->driverPrivate
;
1596 if (!rb
|| !buffer
->bo
)
1599 unsigned num_samples
= rb
->Base
.Base
.NumSamples
;
1601 /* Check and see if we're already bound to the right
1604 struct intel_mipmap_tree
*last_mt
;
1605 if (num_samples
== 0)
1608 last_mt
= rb
->singlesample_mt
;
1610 if (last_mt
&& last_mt
->bo
== buffer
->bo
)
1613 enum isl_colorspace colorspace
;
1614 switch (_mesa_get_format_color_encoding(intel_rb_format(rb
))) {
1616 colorspace
= ISL_COLORSPACE_SRGB
;
1619 colorspace
= ISL_COLORSPACE_LINEAR
;
1622 unreachable("Invalid color encoding");
1625 struct intel_mipmap_tree
*mt
=
1626 intel_miptree_create_for_dri_image(intel
, buffer
, GL_TEXTURE_2D
,
1631 if (!intel_update_winsys_renderbuffer_miptree(intel
, rb
, mt
,
1632 buffer
->width
, buffer
->height
,
1634 intel_miptree_release(&mt
);
1638 if (_mesa_is_front_buffer_drawing(fb
) &&
1639 buffer_type
== __DRI_IMAGE_BUFFER_FRONT
&&
1640 rb
->Base
.Base
.NumSamples
> 1) {
1641 intel_renderbuffer_upsample(intel
, rb
);
1646 intel_update_image_buffers(struct brw_context
*brw
, __DRIdrawable
*drawable
)
1648 struct gl_framebuffer
*fb
= drawable
->driverPrivate
;
1649 __DRIscreen
*dri_screen
= brw
->screen
->driScrnPriv
;
1650 struct intel_renderbuffer
*front_rb
;
1651 struct intel_renderbuffer
*back_rb
;
1652 struct __DRIimageList images
;
1654 uint32_t buffer_mask
= 0;
1657 front_rb
= intel_get_renderbuffer(fb
, BUFFER_FRONT_LEFT
);
1658 back_rb
= intel_get_renderbuffer(fb
, BUFFER_BACK_LEFT
);
1661 format
= intel_rb_format(back_rb
);
1663 format
= intel_rb_format(front_rb
);
1667 if (front_rb
&& (_mesa_is_front_buffer_drawing(fb
) ||
1668 _mesa_is_front_buffer_reading(fb
) || !back_rb
)) {
1669 buffer_mask
|= __DRI_IMAGE_BUFFER_FRONT
;
1673 buffer_mask
|= __DRI_IMAGE_BUFFER_BACK
;
1675 ret
= dri_screen
->image
.loader
->getBuffers(drawable
,
1676 driGLFormatToImageFormat(format
),
1677 &drawable
->dri2
.stamp
,
1678 drawable
->loaderPrivate
,
1684 if (images
.image_mask
& __DRI_IMAGE_BUFFER_FRONT
) {
1685 drawable
->w
= images
.front
->width
;
1686 drawable
->h
= images
.front
->height
;
1687 intel_update_image_buffer(brw
,
1691 __DRI_IMAGE_BUFFER_FRONT
);
1694 if (images
.image_mask
& __DRI_IMAGE_BUFFER_BACK
) {
1695 drawable
->w
= images
.back
->width
;
1696 drawable
->h
= images
.back
->height
;
1697 intel_update_image_buffer(brw
,
1701 __DRI_IMAGE_BUFFER_BACK
);