2 Copyright 2003 VMware, Inc.
3 Copyright (C) Intel Corp. 2006. All Rights Reserved.
4 Intel funded Tungsten Graphics to
5 develop this 3D driver.
7 Permission is hereby granted, free of charge, to any person obtaining
8 a copy of this software and associated documentation files (the
9 "Software"), to deal in the Software without restriction, including
10 without limitation the rights to use, copy, modify, merge, publish,
11 distribute, sublicense, and/or sell copies of the Software, and to
12 permit persons to whom the Software is furnished to do so, subject to
13 the following conditions:
15 The above copyright notice and this permission notice (including the
16 next paragraph) shall be included in all copies or substantial
17 portions of the Software.
19 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
20 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
22 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
23 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
24 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
25 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27 **********************************************************************/
30 * Keith Whitwell <keithw@vmware.com>
34 #include "main/api_exec.h"
35 #include "main/context.h"
36 #include "main/fbobject.h"
37 #include "main/extensions.h"
38 #include "main/imports.h"
39 #include "main/macros.h"
40 #include "main/points.h"
41 #include "main/version.h"
42 #include "main/vtxfmt.h"
43 #include "main/texobj.h"
45 #include "vbo/vbo_context.h"
47 #include "drivers/common/driverfuncs.h"
48 #include "drivers/common/meta.h"
51 #include "brw_context.h"
52 #include "brw_defines.h"
53 #include "brw_shader.h"
55 #include "brw_state.h"
57 #include "intel_batchbuffer.h"
58 #include "intel_buffer_objects.h"
59 #include "intel_buffers.h"
60 #include "intel_fbo.h"
61 #include "intel_mipmap_tree.h"
62 #include "intel_pixel.h"
63 #include "intel_image.h"
64 #include "intel_tex.h"
65 #include "intel_tex_obj.h"
67 #include "swrast_setup/swrast_setup.h"
69 #include "tnl/t_pipeline.h"
70 #include "util/ralloc.h"
72 /***************************************
73 * Mesa's Driver Functions
74 ***************************************/
77 brw_query_samples_for_format(struct gl_context
*ctx
, GLenum target
,
78 GLenum internalFormat
, int samples
[16])
80 struct brw_context
*brw
= brw_context(ctx
);
102 assert(brw
->gen
< 6);
108 const char *const brw_vendor_string
= "Intel Open Source Technology Center";
111 brw_get_renderer_string(unsigned deviceID
)
114 static char buffer
[128];
118 #define CHIPSET(id, symbol, str) case id: chipset = str; break;
119 #include "pci_ids/i965_pci_ids.h"
121 chipset
= "Unknown Intel Chipset";
125 (void) driGetRendererString(buffer
, chipset
, 0);
129 static const GLubyte
*
130 intel_get_string(struct gl_context
* ctx
, GLenum name
)
132 const struct brw_context
*const brw
= brw_context(ctx
);
136 return (GLubyte
*) brw_vendor_string
;
140 (GLubyte
*) brw_get_renderer_string(brw
->intelScreen
->deviceID
);
148 intel_viewport(struct gl_context
*ctx
)
150 struct brw_context
*brw
= brw_context(ctx
);
151 __DRIcontext
*driContext
= brw
->driContext
;
153 if (_mesa_is_winsys_fbo(ctx
->DrawBuffer
)) {
154 dri2InvalidateDrawable(driContext
->driDrawablePriv
);
155 dri2InvalidateDrawable(driContext
->driReadablePriv
);
160 intel_update_state(struct gl_context
* ctx
, GLuint new_state
)
162 struct brw_context
*brw
= brw_context(ctx
);
163 struct intel_texture_object
*tex_obj
;
164 struct intel_renderbuffer
*depth_irb
;
166 if (ctx
->swrast_context
)
167 _swrast_InvalidateState(ctx
, new_state
);
168 _vbo_InvalidateState(ctx
, new_state
);
170 brw
->NewGLState
|= new_state
;
172 _mesa_unlock_context_textures(ctx
);
174 /* Resolve the depth buffer's HiZ buffer. */
175 depth_irb
= intel_get_renderbuffer(ctx
->DrawBuffer
, BUFFER_DEPTH
);
177 intel_renderbuffer_resolve_hiz(brw
, depth_irb
);
179 /* Resolve depth buffer and render cache of each enabled texture. */
180 int maxEnabledUnit
= ctx
->Texture
._MaxEnabledTexImageUnit
;
181 for (int i
= 0; i
<= maxEnabledUnit
; i
++) {
182 if (!ctx
->Texture
.Unit
[i
]._Current
)
184 tex_obj
= intel_texture_object(ctx
->Texture
.Unit
[i
]._Current
);
185 if (!tex_obj
|| !tex_obj
->mt
)
187 intel_miptree_all_slices_resolve_depth(brw
, tex_obj
->mt
);
188 intel_miptree_resolve_color(brw
, tex_obj
->mt
);
189 brw_render_cache_set_check_flush(brw
, tex_obj
->mt
->bo
);
192 _mesa_lock_context_textures(ctx
);
195 #define flushFront(screen) ((screen)->image.loader ? (screen)->image.loader->flushFrontBuffer : (screen)->dri2.loader->flushFrontBuffer)
198 intel_flush_front(struct gl_context
*ctx
)
200 struct brw_context
*brw
= brw_context(ctx
);
201 __DRIcontext
*driContext
= brw
->driContext
;
202 __DRIdrawable
*driDrawable
= driContext
->driDrawablePriv
;
203 __DRIscreen
*const screen
= brw
->intelScreen
->driScrnPriv
;
205 if (brw
->front_buffer_dirty
&& _mesa_is_winsys_fbo(ctx
->DrawBuffer
)) {
206 if (flushFront(screen
) && driDrawable
&&
207 driDrawable
->loaderPrivate
) {
209 /* Resolve before flushing FAKE_FRONT_LEFT to FRONT_LEFT.
211 * This potentially resolves both front and back buffer. It
212 * is unnecessary to resolve the back, but harms nothing except
213 * performance. And no one cares about front-buffer render
216 intel_resolve_for_dri2_flush(brw
, driDrawable
);
217 intel_batchbuffer_flush(brw
);
219 flushFront(screen
)(driDrawable
, driDrawable
->loaderPrivate
);
221 /* We set the dirty bit in intel_prepare_render() if we're
222 * front buffer rendering once we get there.
224 brw
->front_buffer_dirty
= false;
230 intel_glFlush(struct gl_context
*ctx
)
232 struct brw_context
*brw
= brw_context(ctx
);
234 intel_batchbuffer_flush(brw
);
235 intel_flush_front(ctx
);
237 brw
->need_flush_throttle
= true;
241 intel_finish(struct gl_context
* ctx
)
243 struct brw_context
*brw
= brw_context(ctx
);
247 if (brw
->batch
.last_bo
)
248 drm_intel_bo_wait_rendering(brw
->batch
.last_bo
);
252 brw_init_driver_functions(struct brw_context
*brw
,
253 struct dd_function_table
*functions
)
255 _mesa_init_driver_functions(functions
);
257 /* GLX uses DRI2 invalidate events to handle window resizing.
258 * Unfortunately, EGL does not - libEGL is written in XCB (not Xlib),
259 * which doesn't provide a mechanism for snooping the event queues.
261 * So EGL still relies on viewport hacks to handle window resizing.
262 * This should go away with DRI3000.
264 if (!brw
->driContext
->driScreenPriv
->dri2
.useInvalidate
)
265 functions
->Viewport
= intel_viewport
;
267 functions
->Flush
= intel_glFlush
;
268 functions
->Finish
= intel_finish
;
269 functions
->GetString
= intel_get_string
;
270 functions
->UpdateState
= intel_update_state
;
272 intelInitTextureFuncs(functions
);
273 intelInitTextureImageFuncs(functions
);
274 intelInitTextureSubImageFuncs(functions
);
275 intelInitTextureCopyImageFuncs(functions
);
276 intelInitCopyImageFuncs(functions
);
277 intelInitClearFuncs(functions
);
278 intelInitBufferFuncs(functions
);
279 intelInitPixelFuncs(functions
);
280 intelInitBufferObjectFuncs(functions
);
281 intel_init_syncobj_functions(functions
);
282 brw_init_object_purgeable_functions(functions
);
284 brwInitFragProgFuncs( functions
);
285 brw_init_common_queryobj_functions(functions
);
287 gen6_init_queryobj_functions(functions
);
289 gen4_init_queryobj_functions(functions
);
290 brw_init_compute_functions(functions
);
292 brw_init_conditional_render_functions(functions
);
294 functions
->QuerySamplesForFormat
= brw_query_samples_for_format
;
296 functions
->NewTransformFeedback
= brw_new_transform_feedback
;
297 functions
->DeleteTransformFeedback
= brw_delete_transform_feedback
;
298 functions
->GetTransformFeedbackVertexCount
=
299 brw_get_transform_feedback_vertex_count
;
301 functions
->BeginTransformFeedback
= gen7_begin_transform_feedback
;
302 functions
->EndTransformFeedback
= gen7_end_transform_feedback
;
303 functions
->PauseTransformFeedback
= gen7_pause_transform_feedback
;
304 functions
->ResumeTransformFeedback
= gen7_resume_transform_feedback
;
306 functions
->BeginTransformFeedback
= brw_begin_transform_feedback
;
307 functions
->EndTransformFeedback
= brw_end_transform_feedback
;
311 functions
->GetSamplePosition
= gen6_get_sample_position
;
315 brw_initialize_context_constants(struct brw_context
*brw
)
317 struct gl_context
*ctx
= &brw
->ctx
;
319 unsigned max_samplers
=
320 brw
->gen
>= 8 || brw
->is_haswell
? BRW_MAX_TEX_UNIT
: 16;
322 ctx
->Const
.QueryCounterBits
.Timestamp
= 36;
324 ctx
->Const
.StripTextureBorder
= true;
326 ctx
->Const
.MaxDualSourceDrawBuffers
= 1;
327 ctx
->Const
.MaxDrawBuffers
= BRW_MAX_DRAW_BUFFERS
;
328 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].MaxTextureImageUnits
= max_samplers
;
329 ctx
->Const
.MaxTextureCoordUnits
= 8; /* Mesa limit */
330 ctx
->Const
.MaxTextureUnits
=
331 MIN2(ctx
->Const
.MaxTextureCoordUnits
,
332 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].MaxTextureImageUnits
);
333 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].MaxTextureImageUnits
= max_samplers
;
335 ctx
->Const
.Program
[MESA_SHADER_GEOMETRY
].MaxTextureImageUnits
= max_samplers
;
337 ctx
->Const
.Program
[MESA_SHADER_GEOMETRY
].MaxTextureImageUnits
= 0;
338 if (_mesa_extension_override_enables
.ARB_compute_shader
) {
339 ctx
->Const
.Program
[MESA_SHADER_COMPUTE
].MaxTextureImageUnits
= BRW_MAX_TEX_UNIT
;
340 ctx
->Const
.MaxUniformBufferBindings
+= 12;
342 ctx
->Const
.Program
[MESA_SHADER_COMPUTE
].MaxTextureImageUnits
= 0;
344 ctx
->Const
.MaxCombinedTextureImageUnits
=
345 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].MaxTextureImageUnits
+
346 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].MaxTextureImageUnits
+
347 ctx
->Const
.Program
[MESA_SHADER_GEOMETRY
].MaxTextureImageUnits
+
348 ctx
->Const
.Program
[MESA_SHADER_COMPUTE
].MaxTextureImageUnits
;
350 ctx
->Const
.MaxTextureLevels
= 14; /* 8192 */
351 if (ctx
->Const
.MaxTextureLevels
> MAX_TEXTURE_LEVELS
)
352 ctx
->Const
.MaxTextureLevels
= MAX_TEXTURE_LEVELS
;
353 ctx
->Const
.Max3DTextureLevels
= 12; /* 2048 */
354 ctx
->Const
.MaxCubeTextureLevels
= 14; /* 8192 */
355 ctx
->Const
.MaxTextureMbytes
= 1536;
358 ctx
->Const
.MaxArrayTextureLayers
= 2048;
360 ctx
->Const
.MaxArrayTextureLayers
= 512;
362 ctx
->Const
.MaxTextureRectSize
= 1 << 12;
364 ctx
->Const
.MaxTextureMaxAnisotropy
= 16.0;
366 ctx
->Const
.MaxRenderbufferSize
= 8192;
368 /* Hardware only supports a limited number of transform feedback buffers.
369 * So we need to override the Mesa default (which is based only on software
372 ctx
->Const
.MaxTransformFeedbackBuffers
= BRW_MAX_SOL_BUFFERS
;
374 /* On Gen6, in the worst case, we use up one binding table entry per
375 * transform feedback component (see comments above the definition of
376 * BRW_MAX_SOL_BINDINGS, in brw_context.h), so we need to advertise a value
377 * for MAX_TRANSFORM_FEEDBACK_INTERLEAVED_COMPONENTS equal to
378 * BRW_MAX_SOL_BINDINGS.
380 * In "separate components" mode, we need to divide this value by
381 * BRW_MAX_SOL_BUFFERS, so that the total number of binding table entries
382 * used up by all buffers will not exceed BRW_MAX_SOL_BINDINGS.
384 ctx
->Const
.MaxTransformFeedbackInterleavedComponents
= BRW_MAX_SOL_BINDINGS
;
385 ctx
->Const
.MaxTransformFeedbackSeparateComponents
=
386 BRW_MAX_SOL_BINDINGS
/ BRW_MAX_SOL_BUFFERS
;
388 ctx
->Const
.AlwaysUseGetTransformFeedbackVertexCount
= true;
391 const int *msaa_modes
= intel_supported_msaa_modes(brw
->intelScreen
);
392 const int clamp_max_samples
=
393 driQueryOptioni(&brw
->optionCache
, "clamp_max_samples");
395 if (clamp_max_samples
< 0) {
396 max_samples
= msaa_modes
[0];
398 /* Select the largest supported MSAA mode that does not exceed
402 for (int i
= 0; msaa_modes
[i
] != 0; ++i
) {
403 if (msaa_modes
[i
] <= clamp_max_samples
) {
404 max_samples
= msaa_modes
[i
];
410 ctx
->Const
.MaxSamples
= max_samples
;
411 ctx
->Const
.MaxColorTextureSamples
= max_samples
;
412 ctx
->Const
.MaxDepthTextureSamples
= max_samples
;
413 ctx
->Const
.MaxIntegerSamples
= max_samples
;
415 /* gen6_set_sample_maps() sets SampleMap{2,4,8}x variables which are used
416 * to map indices of rectangular grid to sample numbers within a pixel.
417 * These variables are used by GL_EXT_framebuffer_multisample_blit_scaled
418 * extension implementation. For more details see the comment above
419 * gen6_set_sample_maps() definition.
421 gen6_set_sample_maps(ctx
);
424 ctx
->Const
.MaxProgramTextureGatherComponents
= 4;
425 else if (brw
->gen
== 6)
426 ctx
->Const
.MaxProgramTextureGatherComponents
= 1;
428 ctx
->Const
.MinLineWidth
= 1.0;
429 ctx
->Const
.MinLineWidthAA
= 1.0;
431 ctx
->Const
.MaxLineWidth
= 7.375;
432 ctx
->Const
.MaxLineWidthAA
= 7.375;
433 ctx
->Const
.LineWidthGranularity
= 0.125;
435 ctx
->Const
.MaxLineWidth
= 7.0;
436 ctx
->Const
.MaxLineWidthAA
= 7.0;
437 ctx
->Const
.LineWidthGranularity
= 0.5;
440 /* For non-antialiased lines, we have to round the line width to the
441 * nearest whole number. Make sure that we don't advertise a line
442 * width that, when rounded, will be beyond the actual hardware
445 assert(roundf(ctx
->Const
.MaxLineWidth
) <= ctx
->Const
.MaxLineWidth
);
447 ctx
->Const
.MinPointSize
= 1.0;
448 ctx
->Const
.MinPointSizeAA
= 1.0;
449 ctx
->Const
.MaxPointSize
= 255.0;
450 ctx
->Const
.MaxPointSizeAA
= 255.0;
451 ctx
->Const
.PointSizeGranularity
= 1.0;
453 if (brw
->gen
>= 5 || brw
->is_g4x
)
454 ctx
->Const
.MaxClipPlanes
= 8;
456 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].MaxNativeInstructions
= 16 * 1024;
457 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].MaxAluInstructions
= 0;
458 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].MaxTexInstructions
= 0;
459 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].MaxTexIndirections
= 0;
460 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].MaxNativeAluInstructions
= 0;
461 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].MaxNativeTexInstructions
= 0;
462 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].MaxNativeTexIndirections
= 0;
463 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].MaxNativeAttribs
= 16;
464 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].MaxNativeTemps
= 256;
465 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].MaxNativeAddressRegs
= 1;
466 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].MaxNativeParameters
= 1024;
467 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].MaxEnvParams
=
468 MIN2(ctx
->Const
.Program
[MESA_SHADER_VERTEX
].MaxNativeParameters
,
469 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].MaxEnvParams
);
471 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].MaxNativeInstructions
= 1024;
472 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].MaxNativeAluInstructions
= 1024;
473 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].MaxNativeTexInstructions
= 1024;
474 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].MaxNativeTexIndirections
= 1024;
475 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].MaxNativeAttribs
= 12;
476 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].MaxNativeTemps
= 256;
477 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].MaxNativeAddressRegs
= 0;
478 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].MaxNativeParameters
= 1024;
479 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].MaxEnvParams
=
480 MIN2(ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].MaxNativeParameters
,
481 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].MaxEnvParams
);
483 /* Fragment shaders use real, 32-bit twos-complement integers for all
486 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].LowInt
.RangeMin
= 31;
487 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].LowInt
.RangeMax
= 30;
488 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].LowInt
.Precision
= 0;
489 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].HighInt
= ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].LowInt
;
490 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].MediumInt
= ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].LowInt
;
492 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].LowInt
.RangeMin
= 31;
493 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].LowInt
.RangeMax
= 30;
494 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].LowInt
.Precision
= 0;
495 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].HighInt
= ctx
->Const
.Program
[MESA_SHADER_VERTEX
].LowInt
;
496 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].MediumInt
= ctx
->Const
.Program
[MESA_SHADER_VERTEX
].LowInt
;
499 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].MaxAtomicCounters
= MAX_ATOMIC_COUNTERS
;
500 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].MaxAtomicCounters
= MAX_ATOMIC_COUNTERS
;
501 ctx
->Const
.Program
[MESA_SHADER_GEOMETRY
].MaxAtomicCounters
= MAX_ATOMIC_COUNTERS
;
502 ctx
->Const
.Program
[MESA_SHADER_COMPUTE
].MaxAtomicCounters
= MAX_ATOMIC_COUNTERS
;
503 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].MaxAtomicBuffers
= BRW_MAX_ABO
;
504 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].MaxAtomicBuffers
= BRW_MAX_ABO
;
505 ctx
->Const
.Program
[MESA_SHADER_GEOMETRY
].MaxAtomicBuffers
= BRW_MAX_ABO
;
506 ctx
->Const
.Program
[MESA_SHADER_COMPUTE
].MaxAtomicBuffers
= BRW_MAX_ABO
;
507 ctx
->Const
.MaxCombinedAtomicBuffers
= 3 * BRW_MAX_ABO
;
510 /* Gen6 converts quads to polygon in beginning of 3D pipeline,
511 * but we're not sure how it's actually done for vertex order,
512 * that affect provoking vertex decision. Always use last vertex
513 * convention for quad primitive which works as expected for now.
516 ctx
->Const
.QuadsFollowProvokingVertexConvention
= false;
518 ctx
->Const
.NativeIntegers
= true;
519 ctx
->Const
.VertexID_is_zero_based
= true;
521 /* Regarding the CMP instruction, the Ivybridge PRM says:
523 * "For each enabled channel 0b or 1b is assigned to the appropriate flag
524 * bit and 0/all zeros or all ones (e.g, byte 0xFF, word 0xFFFF, DWord
525 * 0xFFFFFFFF) is assigned to dst."
527 * but PRMs for earlier generations say
529 * "In dword format, one GRF may store up to 8 results. When the register
530 * is used later as a vector of Booleans, as only LSB at each channel
531 * contains meaning [sic] data, software should make sure all higher bits
532 * are masked out (e.g. by 'and-ing' an [sic] 0x01 constant)."
534 * We select the representation of a true boolean uniform to be ~0, and fix
535 * the results of Gen <= 5 CMP instruction's with -(result & 1).
537 ctx
->Const
.UniformBooleanTrue
= ~0;
539 /* From the gen4 PRM, volume 4 page 127:
541 * "For SURFTYPE_BUFFER non-rendertarget surfaces, this field specifies
542 * the base address of the first element of the surface, computed in
543 * software by adding the surface base address to the byte offset of
544 * the element in the buffer."
546 * However, unaligned accesses are slower, so enforce buffer alignment.
548 ctx
->Const
.UniformBufferOffsetAlignment
= 16;
549 ctx
->Const
.TextureBufferOffsetAlignment
= 16;
550 ctx
->Const
.MaxTextureBufferSize
= 128 * 1024 * 1024;
553 ctx
->Const
.MaxVarying
= 32;
554 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].MaxOutputComponents
= 128;
555 ctx
->Const
.Program
[MESA_SHADER_GEOMETRY
].MaxInputComponents
= 64;
556 ctx
->Const
.Program
[MESA_SHADER_GEOMETRY
].MaxOutputComponents
= 128;
557 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].MaxInputComponents
= 128;
560 /* We want the GLSL compiler to emit code that uses condition codes */
561 for (int i
= 0; i
< MESA_SHADER_STAGES
; i
++) {
562 ctx
->Const
.ShaderCompilerOptions
[i
] =
563 brw
->intelScreen
->compiler
->glsl_compiler_options
[i
];
566 /* ARB_viewport_array */
567 if (brw
->gen
>= 6 && ctx
->API
== API_OPENGL_CORE
) {
568 ctx
->Const
.MaxViewports
= GEN6_NUM_VIEWPORTS
;
569 ctx
->Const
.ViewportSubpixelBits
= 0;
571 /* Cast to float before negating because MaxViewportWidth is unsigned.
573 ctx
->Const
.ViewportBounds
.Min
= -(float)ctx
->Const
.MaxViewportWidth
;
574 ctx
->Const
.ViewportBounds
.Max
= ctx
->Const
.MaxViewportWidth
;
577 /* ARB_gpu_shader5 */
579 ctx
->Const
.MaxVertexStreams
= MIN2(4, MAX_VERTEX_STREAMS
);
581 /* ARB_framebuffer_no_attachments */
582 ctx
->Const
.MaxFramebufferWidth
= ctx
->Const
.MaxViewportWidth
;
583 ctx
->Const
.MaxFramebufferHeight
= ctx
->Const
.MaxViewportHeight
;
584 ctx
->Const
.MaxFramebufferLayers
= ctx
->Const
.MaxArrayTextureLayers
;
585 ctx
->Const
.MaxFramebufferSamples
= max_samples
;
589 brw_adjust_cs_context_constants(struct brw_context
*brw
)
591 struct gl_context
*ctx
= &brw
->ctx
;
593 /* For ES, we set these constants based on SIMD8.
595 * TODO: Once we can always generate SIMD16, we should update this.
597 * For GL, we assume we can generate a SIMD16 program, but this currently
598 * is not always true. This allows us to run more test cases, and will be
599 * required based on desktop GL compute shader requirements.
601 const int simd_size
= ctx
->API
== API_OPENGL_CORE
? 16 : 8;
603 const uint32_t max_invocations
= simd_size
* brw
->max_cs_threads
;
604 ctx
->Const
.MaxComputeWorkGroupSize
[0] = max_invocations
;
605 ctx
->Const
.MaxComputeWorkGroupSize
[1] = max_invocations
;
606 ctx
->Const
.MaxComputeWorkGroupSize
[2] = max_invocations
;
607 ctx
->Const
.MaxComputeWorkGroupInvocations
= max_invocations
;
611 * Process driconf (drirc) options, setting appropriate context flags.
613 * intelInitExtensions still pokes at optionCache directly, in order to
614 * avoid advertising various extensions. No flags are set, so it makes
615 * sense to continue doing that there.
618 brw_process_driconf_options(struct brw_context
*brw
)
620 struct gl_context
*ctx
= &brw
->ctx
;
622 driOptionCache
*options
= &brw
->optionCache
;
623 driParseConfigFiles(options
, &brw
->intelScreen
->optionCache
,
624 brw
->driContext
->driScreenPriv
->myNum
, "i965");
626 int bo_reuse_mode
= driQueryOptioni(options
, "bo_reuse");
627 switch (bo_reuse_mode
) {
628 case DRI_CONF_BO_REUSE_DISABLED
:
630 case DRI_CONF_BO_REUSE_ALL
:
631 intel_bufmgr_gem_enable_reuse(brw
->bufmgr
);
635 if (!driQueryOptionb(options
, "hiz")) {
636 brw
->has_hiz
= false;
637 /* On gen6, you can only do separate stencil with HIZ. */
639 brw
->has_separate_stencil
= false;
642 if (driQueryOptionb(options
, "always_flush_batch")) {
643 fprintf(stderr
, "flushing batchbuffer before/after each draw call\n");
644 brw
->always_flush_batch
= true;
647 if (driQueryOptionb(options
, "always_flush_cache")) {
648 fprintf(stderr
, "flushing GPU caches before/after each draw call\n");
649 brw
->always_flush_cache
= true;
652 if (driQueryOptionb(options
, "disable_throttling")) {
653 fprintf(stderr
, "disabling flush throttling\n");
654 brw
->disable_throttling
= true;
657 brw
->precompile
= driQueryOptionb(&brw
->optionCache
, "shader_precompile");
659 ctx
->Const
.ForceGLSLExtensionsWarn
=
660 driQueryOptionb(options
, "force_glsl_extensions_warn");
662 ctx
->Const
.DisableGLSLLineContinuations
=
663 driQueryOptionb(options
, "disable_glsl_line_continuations");
665 ctx
->Const
.AllowGLSLExtensionDirectiveMidShader
=
666 driQueryOptionb(options
, "allow_glsl_extension_directive_midshader");
670 brwCreateContext(gl_api api
,
671 const struct gl_config
*mesaVis
,
672 __DRIcontext
*driContextPriv
,
673 unsigned major_version
,
674 unsigned minor_version
,
677 unsigned *dri_ctx_error
,
678 void *sharedContextPrivate
)
680 __DRIscreen
*sPriv
= driContextPriv
->driScreenPriv
;
681 struct gl_context
*shareCtx
= (struct gl_context
*) sharedContextPrivate
;
682 struct intel_screen
*screen
= sPriv
->driverPrivate
;
683 const struct brw_device_info
*devinfo
= screen
->devinfo
;
684 struct dd_function_table functions
;
686 /* Only allow the __DRI_CTX_FLAG_ROBUST_BUFFER_ACCESS flag if the kernel
687 * provides us with context reset notifications.
689 uint32_t allowed_flags
= __DRI_CTX_FLAG_DEBUG
690 | __DRI_CTX_FLAG_FORWARD_COMPATIBLE
;
692 if (screen
->has_context_reset_notification
)
693 allowed_flags
|= __DRI_CTX_FLAG_ROBUST_BUFFER_ACCESS
;
695 if (flags
& ~allowed_flags
) {
696 *dri_ctx_error
= __DRI_CTX_ERROR_UNKNOWN_FLAG
;
700 struct brw_context
*brw
= rzalloc(NULL
, struct brw_context
);
702 fprintf(stderr
, "%s: failed to alloc context\n", __func__
);
703 *dri_ctx_error
= __DRI_CTX_ERROR_NO_MEMORY
;
707 driContextPriv
->driverPrivate
= brw
;
708 brw
->driContext
= driContextPriv
;
709 brw
->intelScreen
= screen
;
710 brw
->bufmgr
= screen
->bufmgr
;
712 brw
->gen
= devinfo
->gen
;
713 brw
->gt
= devinfo
->gt
;
714 brw
->is_g4x
= devinfo
->is_g4x
;
715 brw
->is_baytrail
= devinfo
->is_baytrail
;
716 brw
->is_haswell
= devinfo
->is_haswell
;
717 brw
->is_cherryview
= devinfo
->is_cherryview
;
718 brw
->is_broxton
= devinfo
->is_broxton
;
719 brw
->has_llc
= devinfo
->has_llc
;
720 brw
->has_hiz
= devinfo
->has_hiz_and_separate_stencil
;
721 brw
->has_separate_stencil
= devinfo
->has_hiz_and_separate_stencil
;
722 brw
->has_pln
= devinfo
->has_pln
;
723 brw
->has_compr4
= devinfo
->has_compr4
;
724 brw
->has_surface_tile_offset
= devinfo
->has_surface_tile_offset
;
725 brw
->has_negative_rhw_bug
= devinfo
->has_negative_rhw_bug
;
726 brw
->needs_unlit_centroid_workaround
=
727 devinfo
->needs_unlit_centroid_workaround
;
729 brw
->must_use_separate_stencil
= screen
->hw_must_use_separate_stencil
;
730 brw
->has_swizzling
= screen
->hw_has_swizzling
;
732 brw
->vs
.base
.stage
= MESA_SHADER_VERTEX
;
733 brw
->gs
.base
.stage
= MESA_SHADER_GEOMETRY
;
734 brw
->wm
.base
.stage
= MESA_SHADER_FRAGMENT
;
736 gen8_init_vtable_surface_functions(brw
);
737 brw
->vtbl
.emit_depth_stencil_hiz
= gen8_emit_depth_stencil_hiz
;
738 } else if (brw
->gen
>= 7) {
739 gen7_init_vtable_surface_functions(brw
);
740 brw
->vtbl
.emit_depth_stencil_hiz
= gen7_emit_depth_stencil_hiz
;
741 } else if (brw
->gen
>= 6) {
742 gen6_init_vtable_surface_functions(brw
);
743 brw
->vtbl
.emit_depth_stencil_hiz
= gen6_emit_depth_stencil_hiz
;
745 gen4_init_vtable_surface_functions(brw
);
746 brw
->vtbl
.emit_depth_stencil_hiz
= brw_emit_depth_stencil_hiz
;
749 brw_init_driver_functions(brw
, &functions
);
752 functions
.GetGraphicsResetStatus
= brw_get_graphics_reset_status
;
754 struct gl_context
*ctx
= &brw
->ctx
;
756 if (!_mesa_initialize_context(ctx
, api
, mesaVis
, shareCtx
, &functions
)) {
757 *dri_ctx_error
= __DRI_CTX_ERROR_NO_MEMORY
;
758 fprintf(stderr
, "%s: failed to init mesa context\n", __func__
);
759 intelDestroyContext(driContextPriv
);
763 driContextSetFlags(ctx
, flags
);
765 /* Initialize the software rasterizer and helper modules.
767 * As of GL 3.1 core, the gen4+ driver doesn't need the swrast context for
768 * software fallbacks (which we have to support on legacy GL to do weird
769 * glDrawPixels(), glBitmap(), and other functions).
771 if (api
!= API_OPENGL_CORE
&& api
!= API_OPENGLES2
) {
772 _swrast_CreateContext(ctx
);
775 _vbo_CreateContext(ctx
);
776 if (ctx
->swrast_context
) {
777 _tnl_CreateContext(ctx
);
778 TNL_CONTEXT(ctx
)->Driver
.RunPipeline
= _tnl_run_pipeline
;
779 _swsetup_CreateContext(ctx
);
781 /* Configure swrast to match hardware characteristics: */
782 _swrast_allow_pixel_fog(ctx
, false);
783 _swrast_allow_vertex_fog(ctx
, true);
786 _mesa_meta_init(ctx
);
788 brw_process_driconf_options(brw
);
790 if (INTEL_DEBUG
& DEBUG_PERF
)
791 brw
->perf_debug
= true;
793 brw_initialize_context_constants(brw
);
795 ctx
->Const
.ResetStrategy
= notify_reset
796 ? GL_LOSE_CONTEXT_ON_RESET_ARB
: GL_NO_RESET_NOTIFICATION_ARB
;
798 /* Reinitialize the context point state. It depends on ctx->Const values. */
799 _mesa_init_point(ctx
);
803 intel_batchbuffer_init(brw
);
806 /* Create a new hardware context. Using a hardware context means that
807 * our GPU state will be saved/restored on context switch, allowing us
808 * to assume that the GPU is in the same state we left it in.
810 * This is required for transform feedback buffer offsets, query objects,
811 * and also allows us to reduce how much state we have to emit.
813 brw
->hw_ctx
= drm_intel_gem_context_create(brw
->bufmgr
);
816 fprintf(stderr
, "Gen6+ requires Kernel 3.6 or later.\n");
817 intelDestroyContext(driContextPriv
);
822 if (brw_init_pipe_control(brw
, devinfo
)) {
823 *dri_ctx_error
= __DRI_CTX_ERROR_NO_MEMORY
;
824 intelDestroyContext(driContextPriv
);
830 intelInitExtensions(ctx
);
832 brw_init_surface_formats(brw
);
834 brw
->max_vs_threads
= devinfo
->max_vs_threads
;
835 brw
->max_hs_threads
= devinfo
->max_hs_threads
;
836 brw
->max_ds_threads
= devinfo
->max_ds_threads
;
837 brw
->max_gs_threads
= devinfo
->max_gs_threads
;
838 brw
->max_wm_threads
= devinfo
->max_wm_threads
;
839 brw
->max_cs_threads
= devinfo
->max_cs_threads
;
840 brw
->urb
.size
= devinfo
->urb
.size
;
841 brw
->urb
.min_vs_entries
= devinfo
->urb
.min_vs_entries
;
842 brw
->urb
.max_vs_entries
= devinfo
->urb
.max_vs_entries
;
843 brw
->urb
.max_hs_entries
= devinfo
->urb
.max_hs_entries
;
844 brw
->urb
.max_ds_entries
= devinfo
->urb
.max_ds_entries
;
845 brw
->urb
.max_gs_entries
= devinfo
->urb
.max_gs_entries
;
847 brw_adjust_cs_context_constants(brw
);
849 /* Estimate the size of the mappable aperture into the GTT. There's an
850 * ioctl to get the whole GTT size, but not one to get the mappable subset.
851 * It turns out it's basically always 256MB, though some ancient hardware
854 uint32_t gtt_size
= 256 * 1024 * 1024;
856 /* We don't want to map two objects such that a memcpy between them would
857 * just fault one mapping in and then the other over and over forever. So
858 * we would need to divide the GTT size by 2. Additionally, some GTT is
859 * taken up by things like the framebuffer and the ringbuffer and such, so
860 * be more conservative.
862 brw
->max_gtt_map_object_size
= gtt_size
/ 4;
865 brw
->urb
.gs_present
= false;
867 brw
->prim_restart
.in_progress
= false;
868 brw
->prim_restart
.enable_cut_index
= false;
869 brw
->gs
.enabled
= false;
870 brw
->sf
.viewport_transform_enable
= true;
872 brw
->predicate
.state
= BRW_PREDICATE_STATE_RENDER
;
874 brw
->use_resource_streamer
= screen
->has_resource_streamer
&&
875 (brw_env_var_as_boolean("INTEL_USE_HW_BT", false) ||
876 brw_env_var_as_boolean("INTEL_USE_GATHER", false));
878 ctx
->VertexProgram
._MaintainTnlProgram
= true;
879 ctx
->FragmentProgram
._MaintainTexEnvProgram
= true;
881 brw_draw_init( brw
);
883 if ((flags
& __DRI_CTX_FLAG_DEBUG
) != 0) {
884 /* Turn on some extra GL_ARB_debug_output generation. */
885 brw
->perf_debug
= true;
888 if ((flags
& __DRI_CTX_FLAG_ROBUST_BUFFER_ACCESS
) != 0)
889 ctx
->Const
.ContextFlags
|= GL_CONTEXT_FLAG_ROBUST_ACCESS_BIT_ARB
;
891 if (INTEL_DEBUG
& DEBUG_SHADER_TIME
)
892 brw_init_shader_time(brw
);
894 _mesa_compute_version(ctx
);
896 _mesa_initialize_dispatch_tables(ctx
);
897 _mesa_initialize_vbo_vtxfmt(ctx
);
899 if (ctx
->Extensions
.AMD_performance_monitor
) {
900 brw_init_performance_monitors(brw
);
903 vbo_use_buffer_objects(ctx
);
904 vbo_always_unmap_buffers(ctx
);
910 intelDestroyContext(__DRIcontext
* driContextPriv
)
912 struct brw_context
*brw
=
913 (struct brw_context
*) driContextPriv
->driverPrivate
;
914 struct gl_context
*ctx
= &brw
->ctx
;
916 /* Dump a final BMP in case the application doesn't call SwapBuffers */
917 if (INTEL_DEBUG
& DEBUG_AUB
) {
918 intel_batchbuffer_flush(brw
);
919 aub_dump_bmp(&brw
->ctx
);
922 _mesa_meta_free(&brw
->ctx
);
923 brw_meta_fast_clear_free(brw
);
925 if (INTEL_DEBUG
& DEBUG_SHADER_TIME
) {
926 /* Force a report. */
927 brw
->shader_time
.report_time
= 0;
929 brw_collect_and_report_shader_time(brw
);
930 brw_destroy_shader_time(brw
);
933 brw_destroy_state(brw
);
934 brw_draw_destroy(brw
);
936 drm_intel_bo_unreference(brw
->curbe
.curbe_bo
);
937 if (brw
->vs
.base
.scratch_bo
)
938 drm_intel_bo_unreference(brw
->vs
.base
.scratch_bo
);
939 if (brw
->gs
.base
.scratch_bo
)
940 drm_intel_bo_unreference(brw
->gs
.base
.scratch_bo
);
941 if (brw
->wm
.base
.scratch_bo
)
942 drm_intel_bo_unreference(brw
->wm
.base
.scratch_bo
);
944 gen7_reset_hw_bt_pool_offsets(brw
);
945 drm_intel_bo_unreference(brw
->hw_bt_pool
.bo
);
946 brw
->hw_bt_pool
.bo
= NULL
;
948 drm_intel_gem_context_destroy(brw
->hw_ctx
);
950 if (ctx
->swrast_context
) {
951 _swsetup_DestroyContext(&brw
->ctx
);
952 _tnl_DestroyContext(&brw
->ctx
);
954 _vbo_DestroyContext(&brw
->ctx
);
956 if (ctx
->swrast_context
)
957 _swrast_DestroyContext(&brw
->ctx
);
959 brw_fini_pipe_control(brw
);
960 intel_batchbuffer_free(brw
);
962 drm_intel_bo_unreference(brw
->throttle_batch
[1]);
963 drm_intel_bo_unreference(brw
->throttle_batch
[0]);
964 brw
->throttle_batch
[1] = NULL
;
965 brw
->throttle_batch
[0] = NULL
;
967 driDestroyOptionCache(&brw
->optionCache
);
969 /* free the Mesa context */
970 _mesa_free_context_data(&brw
->ctx
);
973 driContextPriv
->driverPrivate
= NULL
;
977 intelUnbindContext(__DRIcontext
* driContextPriv
)
979 /* Unset current context and dispath table */
980 _mesa_make_current(NULL
, NULL
, NULL
);
986 * Fixes up the context for GLES23 with our default-to-sRGB-capable behavior
987 * on window system framebuffers.
989 * Desktop GL is fairly reasonable in its handling of sRGB: You can ask if
990 * your renderbuffer can do sRGB encode, and you can flip a switch that does
991 * sRGB encode if the renderbuffer can handle it. You can ask specifically
992 * for a visual where you're guaranteed to be capable, but it turns out that
993 * everyone just makes all their ARGB8888 visuals capable and doesn't offer
994 * incapable ones, because there's no difference between the two in resources
995 * used. Applications thus get built that accidentally rely on the default
996 * visual choice being sRGB, so we make ours sRGB capable. Everything sounds
999 * But for GLES2/3, they decided that it was silly to not turn on sRGB encode
1000 * for sRGB renderbuffers you made with the GL_EXT_texture_sRGB equivalent.
1001 * So they removed the enable knob and made it "if the renderbuffer is sRGB
1002 * capable, do sRGB encode". Then, for your window system renderbuffers, you
1003 * can ask for sRGB visuals and get sRGB encode, or not ask for sRGB visuals
1004 * and get no sRGB encode (assuming that both kinds of visual are available).
1005 * Thus our choice to support sRGB by default on our visuals for desktop would
1006 * result in broken rendering of GLES apps that aren't expecting sRGB encode.
1008 * Unfortunately, renderbuffer setup happens before a context is created. So
1009 * in intel_screen.c we always set up sRGB, and here, if you're a GLES2/3
1010 * context (without an sRGB visual, though we don't have sRGB visuals exposed
1011 * yet), we go turn that back off before anyone finds out.
1014 intel_gles3_srgb_workaround(struct brw_context
*brw
,
1015 struct gl_framebuffer
*fb
)
1017 struct gl_context
*ctx
= &brw
->ctx
;
1019 if (_mesa_is_desktop_gl(ctx
) || !fb
->Visual
.sRGBCapable
)
1022 /* Some day when we support the sRGB capable bit on visuals available for
1023 * GLES, we'll need to respect that and not disable things here.
1025 fb
->Visual
.sRGBCapable
= false;
1026 for (int i
= 0; i
< BUFFER_COUNT
; i
++) {
1027 if (fb
->Attachment
[i
].Renderbuffer
&&
1028 fb
->Attachment
[i
].Renderbuffer
->Format
== MESA_FORMAT_B8G8R8A8_SRGB
) {
1029 fb
->Attachment
[i
].Renderbuffer
->Format
= MESA_FORMAT_B8G8R8A8_UNORM
;
1035 intelMakeCurrent(__DRIcontext
* driContextPriv
,
1036 __DRIdrawable
* driDrawPriv
,
1037 __DRIdrawable
* driReadPriv
)
1039 struct brw_context
*brw
;
1040 GET_CURRENT_CONTEXT(curCtx
);
1043 brw
= (struct brw_context
*) driContextPriv
->driverPrivate
;
1047 /* According to the glXMakeCurrent() man page: "Pending commands to
1048 * the previous context, if any, are flushed before it is released."
1049 * But only flush if we're actually changing contexts.
1051 if (brw_context(curCtx
) && brw_context(curCtx
) != brw
) {
1052 _mesa_flush(curCtx
);
1055 if (driContextPriv
) {
1056 struct gl_context
*ctx
= &brw
->ctx
;
1057 struct gl_framebuffer
*fb
, *readFb
;
1059 if (driDrawPriv
== NULL
) {
1060 fb
= _mesa_get_incomplete_framebuffer();
1062 fb
= driDrawPriv
->driverPrivate
;
1063 driContextPriv
->dri2
.draw_stamp
= driDrawPriv
->dri2
.stamp
- 1;
1066 if (driReadPriv
== NULL
) {
1067 readFb
= _mesa_get_incomplete_framebuffer();
1069 readFb
= driReadPriv
->driverPrivate
;
1070 driContextPriv
->dri2
.read_stamp
= driReadPriv
->dri2
.stamp
- 1;
1073 /* The sRGB workaround changes the renderbuffer's format. We must change
1074 * the format before the renderbuffer's miptree get's allocated, otherwise
1075 * the formats of the renderbuffer and its miptree will differ.
1077 intel_gles3_srgb_workaround(brw
, fb
);
1078 intel_gles3_srgb_workaround(brw
, readFb
);
1080 /* If the context viewport hasn't been initialized, force a call out to
1081 * the loader to get buffers so we have a drawable size for the initial
1083 if (!brw
->ctx
.ViewportInitialized
)
1084 intel_prepare_render(brw
);
1086 _mesa_make_current(ctx
, fb
, readFb
);
1088 _mesa_make_current(NULL
, NULL
, NULL
);
1095 intel_resolve_for_dri2_flush(struct brw_context
*brw
,
1096 __DRIdrawable
*drawable
)
1099 /* MSAA and fast color clear are not supported, so don't waste time
1100 * checking whether a resolve is needed.
1105 struct gl_framebuffer
*fb
= drawable
->driverPrivate
;
1106 struct intel_renderbuffer
*rb
;
1108 /* Usually, only the back buffer will need to be downsampled. However,
1109 * the front buffer will also need it if the user has rendered into it.
1111 static const gl_buffer_index buffers
[2] = {
1116 for (int i
= 0; i
< 2; ++i
) {
1117 rb
= intel_get_renderbuffer(fb
, buffers
[i
]);
1118 if (rb
== NULL
|| rb
->mt
== NULL
)
1120 if (rb
->mt
->num_samples
<= 1)
1121 intel_miptree_resolve_color(brw
, rb
->mt
);
1123 intel_renderbuffer_downsample(brw
, rb
);
1128 intel_bits_per_pixel(const struct intel_renderbuffer
*rb
)
1130 return _mesa_get_format_bytes(intel_rb_format(rb
)) * 8;
1134 intel_query_dri2_buffers(struct brw_context
*brw
,
1135 __DRIdrawable
*drawable
,
1136 __DRIbuffer
**buffers
,
1140 intel_process_dri2_buffer(struct brw_context
*brw
,
1141 __DRIdrawable
*drawable
,
1142 __DRIbuffer
*buffer
,
1143 struct intel_renderbuffer
*rb
,
1144 const char *buffer_name
);
1147 intel_update_image_buffers(struct brw_context
*brw
, __DRIdrawable
*drawable
);
1150 intel_update_dri2_buffers(struct brw_context
*brw
, __DRIdrawable
*drawable
)
1152 struct gl_framebuffer
*fb
= drawable
->driverPrivate
;
1153 struct intel_renderbuffer
*rb
;
1154 __DRIbuffer
*buffers
= NULL
;
1156 const char *region_name
;
1158 /* Set this up front, so that in case our buffers get invalidated
1159 * while we're getting new buffers, we don't clobber the stamp and
1160 * thus ignore the invalidate. */
1161 drawable
->lastStamp
= drawable
->dri2
.stamp
;
1163 if (unlikely(INTEL_DEBUG
& DEBUG_DRI
))
1164 fprintf(stderr
, "enter %s, drawable %p\n", __func__
, drawable
);
1166 intel_query_dri2_buffers(brw
, drawable
, &buffers
, &count
);
1168 if (buffers
== NULL
)
1171 for (i
= 0; i
< count
; i
++) {
1172 switch (buffers
[i
].attachment
) {
1173 case __DRI_BUFFER_FRONT_LEFT
:
1174 rb
= intel_get_renderbuffer(fb
, BUFFER_FRONT_LEFT
);
1175 region_name
= "dri2 front buffer";
1178 case __DRI_BUFFER_FAKE_FRONT_LEFT
:
1179 rb
= intel_get_renderbuffer(fb
, BUFFER_FRONT_LEFT
);
1180 region_name
= "dri2 fake front buffer";
1183 case __DRI_BUFFER_BACK_LEFT
:
1184 rb
= intel_get_renderbuffer(fb
, BUFFER_BACK_LEFT
);
1185 region_name
= "dri2 back buffer";
1188 case __DRI_BUFFER_DEPTH
:
1189 case __DRI_BUFFER_HIZ
:
1190 case __DRI_BUFFER_DEPTH_STENCIL
:
1191 case __DRI_BUFFER_STENCIL
:
1192 case __DRI_BUFFER_ACCUM
:
1195 "unhandled buffer attach event, attachment type %d\n",
1196 buffers
[i
].attachment
);
1200 intel_process_dri2_buffer(brw
, drawable
, &buffers
[i
], rb
, region_name
);
1206 intel_update_renderbuffers(__DRIcontext
*context
, __DRIdrawable
*drawable
)
1208 struct brw_context
*brw
= context
->driverPrivate
;
1209 __DRIscreen
*screen
= brw
->intelScreen
->driScrnPriv
;
1211 /* Set this up front, so that in case our buffers get invalidated
1212 * while we're getting new buffers, we don't clobber the stamp and
1213 * thus ignore the invalidate. */
1214 drawable
->lastStamp
= drawable
->dri2
.stamp
;
1216 if (unlikely(INTEL_DEBUG
& DEBUG_DRI
))
1217 fprintf(stderr
, "enter %s, drawable %p\n", __func__
, drawable
);
1219 if (screen
->image
.loader
)
1220 intel_update_image_buffers(brw
, drawable
);
1222 intel_update_dri2_buffers(brw
, drawable
);
1224 driUpdateFramebufferSize(&brw
->ctx
, drawable
);
1228 * intel_prepare_render should be called anywhere that curent read/drawbuffer
1229 * state is required.
1232 intel_prepare_render(struct brw_context
*brw
)
1234 struct gl_context
*ctx
= &brw
->ctx
;
1235 __DRIcontext
*driContext
= brw
->driContext
;
1236 __DRIdrawable
*drawable
;
1238 drawable
= driContext
->driDrawablePriv
;
1239 if (drawable
&& drawable
->dri2
.stamp
!= driContext
->dri2
.draw_stamp
) {
1240 if (drawable
->lastStamp
!= drawable
->dri2
.stamp
)
1241 intel_update_renderbuffers(driContext
, drawable
);
1242 driContext
->dri2
.draw_stamp
= drawable
->dri2
.stamp
;
1245 drawable
= driContext
->driReadablePriv
;
1246 if (drawable
&& drawable
->dri2
.stamp
!= driContext
->dri2
.read_stamp
) {
1247 if (drawable
->lastStamp
!= drawable
->dri2
.stamp
)
1248 intel_update_renderbuffers(driContext
, drawable
);
1249 driContext
->dri2
.read_stamp
= drawable
->dri2
.stamp
;
1252 /* If we're currently rendering to the front buffer, the rendering
1253 * that will happen next will probably dirty the front buffer. So
1254 * mark it as dirty here.
1256 if (brw_is_front_buffer_drawing(ctx
->DrawBuffer
))
1257 brw
->front_buffer_dirty
= true;
1261 * \brief Query DRI2 to obtain a DRIdrawable's buffers.
1263 * To determine which DRI buffers to request, examine the renderbuffers
1264 * attached to the drawable's framebuffer. Then request the buffers with
1265 * DRI2GetBuffers() or DRI2GetBuffersWithFormat().
1267 * This is called from intel_update_renderbuffers().
1269 * \param drawable Drawable whose buffers are queried.
1270 * \param buffers [out] List of buffers returned by DRI2 query.
1271 * \param buffer_count [out] Number of buffers returned.
1273 * \see intel_update_renderbuffers()
1274 * \see DRI2GetBuffers()
1275 * \see DRI2GetBuffersWithFormat()
1278 intel_query_dri2_buffers(struct brw_context
*brw
,
1279 __DRIdrawable
*drawable
,
1280 __DRIbuffer
**buffers
,
1283 __DRIscreen
*screen
= brw
->intelScreen
->driScrnPriv
;
1284 struct gl_framebuffer
*fb
= drawable
->driverPrivate
;
1286 unsigned attachments
[8];
1288 struct intel_renderbuffer
*front_rb
;
1289 struct intel_renderbuffer
*back_rb
;
1291 front_rb
= intel_get_renderbuffer(fb
, BUFFER_FRONT_LEFT
);
1292 back_rb
= intel_get_renderbuffer(fb
, BUFFER_BACK_LEFT
);
1294 memset(attachments
, 0, sizeof(attachments
));
1295 if ((brw_is_front_buffer_drawing(fb
) ||
1296 brw_is_front_buffer_reading(fb
) ||
1297 !back_rb
) && front_rb
) {
1298 /* If a fake front buffer is in use, then querying for
1299 * __DRI_BUFFER_FRONT_LEFT will cause the server to copy the image from
1300 * the real front buffer to the fake front buffer. So before doing the
1301 * query, we need to make sure all the pending drawing has landed in the
1302 * real front buffer.
1304 intel_batchbuffer_flush(brw
);
1305 intel_flush_front(&brw
->ctx
);
1307 attachments
[i
++] = __DRI_BUFFER_FRONT_LEFT
;
1308 attachments
[i
++] = intel_bits_per_pixel(front_rb
);
1309 } else if (front_rb
&& brw
->front_buffer_dirty
) {
1310 /* We have pending front buffer rendering, but we aren't querying for a
1311 * front buffer. If the front buffer we have is a fake front buffer,
1312 * the X server is going to throw it away when it processes the query.
1313 * So before doing the query, make sure all the pending drawing has
1314 * landed in the real front buffer.
1316 intel_batchbuffer_flush(brw
);
1317 intel_flush_front(&brw
->ctx
);
1321 attachments
[i
++] = __DRI_BUFFER_BACK_LEFT
;
1322 attachments
[i
++] = intel_bits_per_pixel(back_rb
);
1325 assert(i
<= ARRAY_SIZE(attachments
));
1327 *buffers
= screen
->dri2
.loader
->getBuffersWithFormat(drawable
,
1332 drawable
->loaderPrivate
);
1336 * \brief Assign a DRI buffer's DRM region to a renderbuffer.
1338 * This is called from intel_update_renderbuffers().
1341 * DRI buffers whose attachment point is DRI2BufferStencil or
1342 * DRI2BufferDepthStencil are handled as special cases.
1344 * \param buffer_name is a human readable name, such as "dri2 front buffer",
1345 * that is passed to drm_intel_bo_gem_create_from_name().
1347 * \see intel_update_renderbuffers()
1350 intel_process_dri2_buffer(struct brw_context
*brw
,
1351 __DRIdrawable
*drawable
,
1352 __DRIbuffer
*buffer
,
1353 struct intel_renderbuffer
*rb
,
1354 const char *buffer_name
)
1356 struct gl_framebuffer
*fb
= drawable
->driverPrivate
;
1362 unsigned num_samples
= rb
->Base
.Base
.NumSamples
;
1364 /* We try to avoid closing and reopening the same BO name, because the first
1365 * use of a mapping of the buffer involves a bunch of page faulting which is
1366 * moderately expensive.
1368 struct intel_mipmap_tree
*last_mt
;
1369 if (num_samples
== 0)
1372 last_mt
= rb
->singlesample_mt
;
1374 uint32_t old_name
= 0;
1376 /* The bo already has a name because the miptree was created by a
1377 * previous call to intel_process_dri2_buffer(). If a bo already has a
1378 * name, then drm_intel_bo_flink() is a low-cost getter. It does not
1379 * create a new name.
1381 drm_intel_bo_flink(last_mt
->bo
, &old_name
);
1384 if (old_name
== buffer
->name
)
1387 if (unlikely(INTEL_DEBUG
& DEBUG_DRI
)) {
1389 "attaching buffer %d, at %d, cpp %d, pitch %d\n",
1390 buffer
->name
, buffer
->attachment
,
1391 buffer
->cpp
, buffer
->pitch
);
1394 intel_miptree_release(&rb
->mt
);
1395 bo
= drm_intel_bo_gem_create_from_name(brw
->bufmgr
, buffer_name
,
1399 "Failed to open BO for returned DRI2 buffer "
1400 "(%dx%d, %s, named %d).\n"
1401 "This is likely a bug in the X Server that will lead to a "
1403 drawable
->w
, drawable
->h
, buffer_name
, buffer
->name
);
1407 intel_update_winsys_renderbuffer_miptree(brw
, rb
, bo
,
1408 drawable
->w
, drawable
->h
,
1411 if (brw_is_front_buffer_drawing(fb
) &&
1412 (buffer
->attachment
== __DRI_BUFFER_FRONT_LEFT
||
1413 buffer
->attachment
== __DRI_BUFFER_FAKE_FRONT_LEFT
) &&
1414 rb
->Base
.Base
.NumSamples
> 1) {
1415 intel_renderbuffer_upsample(brw
, rb
);
1420 drm_intel_bo_unreference(bo
);
1424 * \brief Query DRI image loader to obtain a DRIdrawable's buffers.
1426 * To determine which DRI buffers to request, examine the renderbuffers
1427 * attached to the drawable's framebuffer. Then request the buffers from
1430 * This is called from intel_update_renderbuffers().
1432 * \param drawable Drawable whose buffers are queried.
1433 * \param buffers [out] List of buffers returned by DRI2 query.
1434 * \param buffer_count [out] Number of buffers returned.
1436 * \see intel_update_renderbuffers()
1440 intel_update_image_buffer(struct brw_context
*intel
,
1441 __DRIdrawable
*drawable
,
1442 struct intel_renderbuffer
*rb
,
1444 enum __DRIimageBufferMask buffer_type
)
1446 struct gl_framebuffer
*fb
= drawable
->driverPrivate
;
1448 if (!rb
|| !buffer
->bo
)
1451 unsigned num_samples
= rb
->Base
.Base
.NumSamples
;
1453 /* Check and see if we're already bound to the right
1456 struct intel_mipmap_tree
*last_mt
;
1457 if (num_samples
== 0)
1460 last_mt
= rb
->singlesample_mt
;
1462 if (last_mt
&& last_mt
->bo
== buffer
->bo
)
1465 intel_update_winsys_renderbuffer_miptree(intel
, rb
, buffer
->bo
,
1466 buffer
->width
, buffer
->height
,
1469 if (brw_is_front_buffer_drawing(fb
) &&
1470 buffer_type
== __DRI_IMAGE_BUFFER_FRONT
&&
1471 rb
->Base
.Base
.NumSamples
> 1) {
1472 intel_renderbuffer_upsample(intel
, rb
);
1477 intel_update_image_buffers(struct brw_context
*brw
, __DRIdrawable
*drawable
)
1479 struct gl_framebuffer
*fb
= drawable
->driverPrivate
;
1480 __DRIscreen
*screen
= brw
->intelScreen
->driScrnPriv
;
1481 struct intel_renderbuffer
*front_rb
;
1482 struct intel_renderbuffer
*back_rb
;
1483 struct __DRIimageList images
;
1484 unsigned int format
;
1485 uint32_t buffer_mask
= 0;
1487 front_rb
= intel_get_renderbuffer(fb
, BUFFER_FRONT_LEFT
);
1488 back_rb
= intel_get_renderbuffer(fb
, BUFFER_BACK_LEFT
);
1491 format
= intel_rb_format(back_rb
);
1493 format
= intel_rb_format(front_rb
);
1497 if (front_rb
&& (brw_is_front_buffer_drawing(fb
) ||
1498 brw_is_front_buffer_reading(fb
) || !back_rb
)) {
1499 buffer_mask
|= __DRI_IMAGE_BUFFER_FRONT
;
1503 buffer_mask
|= __DRI_IMAGE_BUFFER_BACK
;
1505 (*screen
->image
.loader
->getBuffers
) (drawable
,
1506 driGLFormatToImageFormat(format
),
1507 &drawable
->dri2
.stamp
,
1508 drawable
->loaderPrivate
,
1512 if (images
.image_mask
& __DRI_IMAGE_BUFFER_FRONT
) {
1513 drawable
->w
= images
.front
->width
;
1514 drawable
->h
= images
.front
->height
;
1515 intel_update_image_buffer(brw
,
1519 __DRI_IMAGE_BUFFER_FRONT
);
1521 if (images
.image_mask
& __DRI_IMAGE_BUFFER_BACK
) {
1522 drawable
->w
= images
.back
->width
;
1523 drawable
->h
= images
.back
->height
;
1524 intel_update_image_buffer(brw
,
1528 __DRI_IMAGE_BUFFER_BACK
);