2 Copyright 2003 VMware, Inc.
3 Copyright (C) Intel Corp. 2006. All Rights Reserved.
4 Intel funded Tungsten Graphics to
5 develop this 3D driver.
7 Permission is hereby granted, free of charge, to any person obtaining
8 a copy of this software and associated documentation files (the
9 "Software"), to deal in the Software without restriction, including
10 without limitation the rights to use, copy, modify, merge, publish,
11 distribute, sublicense, and/or sell copies of the Software, and to
12 permit persons to whom the Software is furnished to do so, subject to
13 the following conditions:
15 The above copyright notice and this permission notice (including the
16 next paragraph) shall be included in all copies or substantial
17 portions of the Software.
19 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
20 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
22 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
23 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
24 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
25 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27 **********************************************************************/
30 * Keith Whitwell <keithw@vmware.com>
34 #include "main/api_exec.h"
35 #include "main/context.h"
36 #include "main/fbobject.h"
37 #include "main/extensions.h"
38 #include "main/imports.h"
39 #include "main/macros.h"
40 #include "main/points.h"
41 #include "main/version.h"
42 #include "main/vtxfmt.h"
43 #include "main/texobj.h"
45 #include "vbo/vbo_context.h"
47 #include "drivers/common/driverfuncs.h"
48 #include "drivers/common/meta.h"
51 #include "brw_context.h"
52 #include "brw_defines.h"
53 #include "brw_shader.h"
55 #include "brw_state.h"
57 #include "intel_batchbuffer.h"
58 #include "intel_buffer_objects.h"
59 #include "intel_buffers.h"
60 #include "intel_fbo.h"
61 #include "intel_mipmap_tree.h"
62 #include "intel_pixel.h"
63 #include "intel_image.h"
64 #include "intel_tex.h"
65 #include "intel_tex_obj.h"
67 #include "swrast_setup/swrast_setup.h"
69 #include "tnl/t_pipeline.h"
70 #include "util/ralloc.h"
72 /***************************************
73 * Mesa's Driver Functions
74 ***************************************/
77 brw_query_samples_for_format(struct gl_context
*ctx
, GLenum target
,
78 GLenum internalFormat
, int samples
[16])
80 struct brw_context
*brw
= brw_context(ctx
);
102 assert(brw
->gen
< 6);
108 const char *const brw_vendor_string
= "Intel Open Source Technology Center";
111 brw_get_renderer_string(unsigned deviceID
)
114 static char buffer
[128];
118 #define CHIPSET(id, symbol, str) case id: chipset = str; break;
119 #include "pci_ids/i965_pci_ids.h"
121 chipset
= "Unknown Intel Chipset";
125 (void) driGetRendererString(buffer
, chipset
, 0);
129 static const GLubyte
*
130 intel_get_string(struct gl_context
* ctx
, GLenum name
)
132 const struct brw_context
*const brw
= brw_context(ctx
);
136 return (GLubyte
*) brw_vendor_string
;
140 (GLubyte
*) brw_get_renderer_string(brw
->intelScreen
->deviceID
);
148 intel_viewport(struct gl_context
*ctx
)
150 struct brw_context
*brw
= brw_context(ctx
);
151 __DRIcontext
*driContext
= brw
->driContext
;
153 if (_mesa_is_winsys_fbo(ctx
->DrawBuffer
)) {
154 dri2InvalidateDrawable(driContext
->driDrawablePriv
);
155 dri2InvalidateDrawable(driContext
->driReadablePriv
);
160 intel_update_state(struct gl_context
* ctx
, GLuint new_state
)
162 struct brw_context
*brw
= brw_context(ctx
);
163 struct intel_texture_object
*tex_obj
;
164 struct intel_renderbuffer
*depth_irb
;
166 if (ctx
->swrast_context
)
167 _swrast_InvalidateState(ctx
, new_state
);
168 _vbo_InvalidateState(ctx
, new_state
);
170 brw
->NewGLState
|= new_state
;
172 _mesa_unlock_context_textures(ctx
);
174 /* Resolve the depth buffer's HiZ buffer. */
175 depth_irb
= intel_get_renderbuffer(ctx
->DrawBuffer
, BUFFER_DEPTH
);
177 intel_renderbuffer_resolve_hiz(brw
, depth_irb
);
179 /* Resolve depth buffer and render cache of each enabled texture. */
180 int maxEnabledUnit
= ctx
->Texture
._MaxEnabledTexImageUnit
;
181 for (int i
= 0; i
<= maxEnabledUnit
; i
++) {
182 if (!ctx
->Texture
.Unit
[i
]._Current
)
184 tex_obj
= intel_texture_object(ctx
->Texture
.Unit
[i
]._Current
);
185 if (!tex_obj
|| !tex_obj
->mt
)
187 intel_miptree_all_slices_resolve_depth(brw
, tex_obj
->mt
);
188 intel_miptree_resolve_color(brw
, tex_obj
->mt
);
189 brw_render_cache_set_check_flush(brw
, tex_obj
->mt
->bo
);
192 _mesa_lock_context_textures(ctx
);
195 #define flushFront(screen) ((screen)->image.loader ? (screen)->image.loader->flushFrontBuffer : (screen)->dri2.loader->flushFrontBuffer)
198 intel_flush_front(struct gl_context
*ctx
)
200 struct brw_context
*brw
= brw_context(ctx
);
201 __DRIcontext
*driContext
= brw
->driContext
;
202 __DRIdrawable
*driDrawable
= driContext
->driDrawablePriv
;
203 __DRIscreen
*const screen
= brw
->intelScreen
->driScrnPriv
;
205 if (brw
->front_buffer_dirty
&& _mesa_is_winsys_fbo(ctx
->DrawBuffer
)) {
206 if (flushFront(screen
) && driDrawable
&&
207 driDrawable
->loaderPrivate
) {
209 /* Resolve before flushing FAKE_FRONT_LEFT to FRONT_LEFT.
211 * This potentially resolves both front and back buffer. It
212 * is unnecessary to resolve the back, but harms nothing except
213 * performance. And no one cares about front-buffer render
216 intel_resolve_for_dri2_flush(brw
, driDrawable
);
217 intel_batchbuffer_flush(brw
);
219 flushFront(screen
)(driDrawable
, driDrawable
->loaderPrivate
);
221 /* We set the dirty bit in intel_prepare_render() if we're
222 * front buffer rendering once we get there.
224 brw
->front_buffer_dirty
= false;
230 intel_glFlush(struct gl_context
*ctx
)
232 struct brw_context
*brw
= brw_context(ctx
);
234 intel_batchbuffer_flush(brw
);
235 intel_flush_front(ctx
);
237 brw
->need_flush_throttle
= true;
241 intel_finish(struct gl_context
* ctx
)
243 struct brw_context
*brw
= brw_context(ctx
);
247 if (brw
->batch
.last_bo
)
248 drm_intel_bo_wait_rendering(brw
->batch
.last_bo
);
252 brw_init_driver_functions(struct brw_context
*brw
,
253 struct dd_function_table
*functions
)
255 _mesa_init_driver_functions(functions
);
257 /* GLX uses DRI2 invalidate events to handle window resizing.
258 * Unfortunately, EGL does not - libEGL is written in XCB (not Xlib),
259 * which doesn't provide a mechanism for snooping the event queues.
261 * So EGL still relies on viewport hacks to handle window resizing.
262 * This should go away with DRI3000.
264 if (!brw
->driContext
->driScreenPriv
->dri2
.useInvalidate
)
265 functions
->Viewport
= intel_viewport
;
267 functions
->Flush
= intel_glFlush
;
268 functions
->Finish
= intel_finish
;
269 functions
->GetString
= intel_get_string
;
270 functions
->UpdateState
= intel_update_state
;
272 intelInitTextureFuncs(functions
);
273 intelInitTextureImageFuncs(functions
);
274 intelInitTextureSubImageFuncs(functions
);
275 intelInitTextureCopyImageFuncs(functions
);
276 intelInitCopyImageFuncs(functions
);
277 intelInitClearFuncs(functions
);
278 intelInitBufferFuncs(functions
);
279 intelInitPixelFuncs(functions
);
280 intelInitBufferObjectFuncs(functions
);
281 intel_init_syncobj_functions(functions
);
282 brw_init_object_purgeable_functions(functions
);
284 brwInitFragProgFuncs( functions
);
285 brw_init_common_queryobj_functions(functions
);
287 gen6_init_queryobj_functions(functions
);
289 gen4_init_queryobj_functions(functions
);
290 brw_init_compute_functions(functions
);
292 brw_init_conditional_render_functions(functions
);
294 functions
->QuerySamplesForFormat
= brw_query_samples_for_format
;
296 functions
->NewTransformFeedback
= brw_new_transform_feedback
;
297 functions
->DeleteTransformFeedback
= brw_delete_transform_feedback
;
298 functions
->GetTransformFeedbackVertexCount
=
299 brw_get_transform_feedback_vertex_count
;
301 functions
->BeginTransformFeedback
= gen7_begin_transform_feedback
;
302 functions
->EndTransformFeedback
= gen7_end_transform_feedback
;
303 functions
->PauseTransformFeedback
= gen7_pause_transform_feedback
;
304 functions
->ResumeTransformFeedback
= gen7_resume_transform_feedback
;
306 functions
->BeginTransformFeedback
= brw_begin_transform_feedback
;
307 functions
->EndTransformFeedback
= brw_end_transform_feedback
;
311 functions
->GetSamplePosition
= gen6_get_sample_position
;
315 brw_initialize_context_constants(struct brw_context
*brw
)
317 struct gl_context
*ctx
= &brw
->ctx
;
319 unsigned max_samplers
=
320 brw
->gen
>= 8 || brw
->is_haswell
? BRW_MAX_TEX_UNIT
: 16;
322 ctx
->Const
.QueryCounterBits
.Timestamp
= 36;
324 ctx
->Const
.StripTextureBorder
= true;
326 ctx
->Const
.MaxUniformBlockSize
= 65536;
327 for (int i
= 0; i
< MESA_SHADER_STAGES
; i
++) {
328 struct gl_program_constants
*prog
= &ctx
->Const
.Program
[i
];
329 prog
->MaxUniformBlocks
= BRW_MAX_UBO
;
330 prog
->MaxCombinedUniformComponents
=
331 prog
->MaxUniformComponents
+
332 ctx
->Const
.MaxUniformBlockSize
/ 4 * prog
->MaxUniformBlocks
;
335 ctx
->Const
.MaxDualSourceDrawBuffers
= 1;
336 ctx
->Const
.MaxDrawBuffers
= BRW_MAX_DRAW_BUFFERS
;
337 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].MaxTextureImageUnits
= max_samplers
;
338 ctx
->Const
.MaxTextureCoordUnits
= 8; /* Mesa limit */
339 ctx
->Const
.MaxTextureUnits
=
340 MIN2(ctx
->Const
.MaxTextureCoordUnits
,
341 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].MaxTextureImageUnits
);
342 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].MaxTextureImageUnits
= max_samplers
;
344 ctx
->Const
.Program
[MESA_SHADER_GEOMETRY
].MaxTextureImageUnits
= max_samplers
;
346 ctx
->Const
.Program
[MESA_SHADER_GEOMETRY
].MaxTextureImageUnits
= 0;
347 if (_mesa_extension_override_enables
.ARB_compute_shader
) {
348 ctx
->Const
.Program
[MESA_SHADER_COMPUTE
].MaxTextureImageUnits
= BRW_MAX_TEX_UNIT
;
349 ctx
->Const
.MaxUniformBufferBindings
+= BRW_MAX_UBO
;
351 ctx
->Const
.Program
[MESA_SHADER_COMPUTE
].MaxTextureImageUnits
= 0;
353 ctx
->Const
.MaxCombinedTextureImageUnits
=
354 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].MaxTextureImageUnits
+
355 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].MaxTextureImageUnits
+
356 ctx
->Const
.Program
[MESA_SHADER_GEOMETRY
].MaxTextureImageUnits
+
357 ctx
->Const
.Program
[MESA_SHADER_COMPUTE
].MaxTextureImageUnits
;
359 ctx
->Const
.MaxTextureLevels
= 14; /* 8192 */
360 if (ctx
->Const
.MaxTextureLevels
> MAX_TEXTURE_LEVELS
)
361 ctx
->Const
.MaxTextureLevels
= MAX_TEXTURE_LEVELS
;
362 ctx
->Const
.Max3DTextureLevels
= 12; /* 2048 */
363 ctx
->Const
.MaxCubeTextureLevels
= 14; /* 8192 */
364 ctx
->Const
.MaxTextureMbytes
= 1536;
367 ctx
->Const
.MaxArrayTextureLayers
= 2048;
369 ctx
->Const
.MaxArrayTextureLayers
= 512;
371 ctx
->Const
.MaxTextureRectSize
= 1 << 12;
373 ctx
->Const
.MaxTextureMaxAnisotropy
= 16.0;
375 ctx
->Const
.MaxRenderbufferSize
= 8192;
377 /* Hardware only supports a limited number of transform feedback buffers.
378 * So we need to override the Mesa default (which is based only on software
381 ctx
->Const
.MaxTransformFeedbackBuffers
= BRW_MAX_SOL_BUFFERS
;
383 /* On Gen6, in the worst case, we use up one binding table entry per
384 * transform feedback component (see comments above the definition of
385 * BRW_MAX_SOL_BINDINGS, in brw_context.h), so we need to advertise a value
386 * for MAX_TRANSFORM_FEEDBACK_INTERLEAVED_COMPONENTS equal to
387 * BRW_MAX_SOL_BINDINGS.
389 * In "separate components" mode, we need to divide this value by
390 * BRW_MAX_SOL_BUFFERS, so that the total number of binding table entries
391 * used up by all buffers will not exceed BRW_MAX_SOL_BINDINGS.
393 ctx
->Const
.MaxTransformFeedbackInterleavedComponents
= BRW_MAX_SOL_BINDINGS
;
394 ctx
->Const
.MaxTransformFeedbackSeparateComponents
=
395 BRW_MAX_SOL_BINDINGS
/ BRW_MAX_SOL_BUFFERS
;
397 ctx
->Const
.AlwaysUseGetTransformFeedbackVertexCount
= true;
400 const int *msaa_modes
= intel_supported_msaa_modes(brw
->intelScreen
);
401 const int clamp_max_samples
=
402 driQueryOptioni(&brw
->optionCache
, "clamp_max_samples");
404 if (clamp_max_samples
< 0) {
405 max_samples
= msaa_modes
[0];
407 /* Select the largest supported MSAA mode that does not exceed
411 for (int i
= 0; msaa_modes
[i
] != 0; ++i
) {
412 if (msaa_modes
[i
] <= clamp_max_samples
) {
413 max_samples
= msaa_modes
[i
];
419 ctx
->Const
.MaxSamples
= max_samples
;
420 ctx
->Const
.MaxColorTextureSamples
= max_samples
;
421 ctx
->Const
.MaxDepthTextureSamples
= max_samples
;
422 ctx
->Const
.MaxIntegerSamples
= max_samples
;
424 /* gen6_set_sample_maps() sets SampleMap{2,4,8}x variables which are used
425 * to map indices of rectangular grid to sample numbers within a pixel.
426 * These variables are used by GL_EXT_framebuffer_multisample_blit_scaled
427 * extension implementation. For more details see the comment above
428 * gen6_set_sample_maps() definition.
430 gen6_set_sample_maps(ctx
);
433 ctx
->Const
.MaxProgramTextureGatherComponents
= 4;
434 else if (brw
->gen
== 6)
435 ctx
->Const
.MaxProgramTextureGatherComponents
= 1;
437 ctx
->Const
.MinLineWidth
= 1.0;
438 ctx
->Const
.MinLineWidthAA
= 1.0;
440 ctx
->Const
.MaxLineWidth
= 7.375;
441 ctx
->Const
.MaxLineWidthAA
= 7.375;
442 ctx
->Const
.LineWidthGranularity
= 0.125;
444 ctx
->Const
.MaxLineWidth
= 7.0;
445 ctx
->Const
.MaxLineWidthAA
= 7.0;
446 ctx
->Const
.LineWidthGranularity
= 0.5;
449 /* For non-antialiased lines, we have to round the line width to the
450 * nearest whole number. Make sure that we don't advertise a line
451 * width that, when rounded, will be beyond the actual hardware
454 assert(roundf(ctx
->Const
.MaxLineWidth
) <= ctx
->Const
.MaxLineWidth
);
456 ctx
->Const
.MinPointSize
= 1.0;
457 ctx
->Const
.MinPointSizeAA
= 1.0;
458 ctx
->Const
.MaxPointSize
= 255.0;
459 ctx
->Const
.MaxPointSizeAA
= 255.0;
460 ctx
->Const
.PointSizeGranularity
= 1.0;
462 if (brw
->gen
>= 5 || brw
->is_g4x
)
463 ctx
->Const
.MaxClipPlanes
= 8;
465 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].MaxNativeInstructions
= 16 * 1024;
466 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].MaxAluInstructions
= 0;
467 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].MaxTexInstructions
= 0;
468 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].MaxTexIndirections
= 0;
469 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].MaxNativeAluInstructions
= 0;
470 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].MaxNativeTexInstructions
= 0;
471 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].MaxNativeTexIndirections
= 0;
472 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].MaxNativeAttribs
= 16;
473 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].MaxNativeTemps
= 256;
474 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].MaxNativeAddressRegs
= 1;
475 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].MaxNativeParameters
= 1024;
476 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].MaxEnvParams
=
477 MIN2(ctx
->Const
.Program
[MESA_SHADER_VERTEX
].MaxNativeParameters
,
478 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].MaxEnvParams
);
480 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].MaxNativeInstructions
= 1024;
481 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].MaxNativeAluInstructions
= 1024;
482 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].MaxNativeTexInstructions
= 1024;
483 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].MaxNativeTexIndirections
= 1024;
484 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].MaxNativeAttribs
= 12;
485 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].MaxNativeTemps
= 256;
486 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].MaxNativeAddressRegs
= 0;
487 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].MaxNativeParameters
= 1024;
488 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].MaxEnvParams
=
489 MIN2(ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].MaxNativeParameters
,
490 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].MaxEnvParams
);
492 /* Fragment shaders use real, 32-bit twos-complement integers for all
495 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].LowInt
.RangeMin
= 31;
496 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].LowInt
.RangeMax
= 30;
497 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].LowInt
.Precision
= 0;
498 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].HighInt
= ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].LowInt
;
499 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].MediumInt
= ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].LowInt
;
501 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].LowInt
.RangeMin
= 31;
502 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].LowInt
.RangeMax
= 30;
503 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].LowInt
.Precision
= 0;
504 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].HighInt
= ctx
->Const
.Program
[MESA_SHADER_VERTEX
].LowInt
;
505 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].MediumInt
= ctx
->Const
.Program
[MESA_SHADER_VERTEX
].LowInt
;
508 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].MaxAtomicCounters
= MAX_ATOMIC_COUNTERS
;
509 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].MaxAtomicCounters
= MAX_ATOMIC_COUNTERS
;
510 ctx
->Const
.Program
[MESA_SHADER_GEOMETRY
].MaxAtomicCounters
= MAX_ATOMIC_COUNTERS
;
511 ctx
->Const
.Program
[MESA_SHADER_COMPUTE
].MaxAtomicCounters
= MAX_ATOMIC_COUNTERS
;
512 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].MaxAtomicBuffers
= BRW_MAX_ABO
;
513 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].MaxAtomicBuffers
= BRW_MAX_ABO
;
514 ctx
->Const
.Program
[MESA_SHADER_GEOMETRY
].MaxAtomicBuffers
= BRW_MAX_ABO
;
515 ctx
->Const
.Program
[MESA_SHADER_COMPUTE
].MaxAtomicBuffers
= BRW_MAX_ABO
;
516 ctx
->Const
.MaxCombinedAtomicBuffers
= 3 * BRW_MAX_ABO
;
518 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].MaxImageUniforms
=
520 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].MaxImageUniforms
=
521 (brw
->intelScreen
->compiler
->scalar_vs
? BRW_MAX_IMAGES
: 0);
522 ctx
->Const
.Program
[MESA_SHADER_COMPUTE
].MaxImageUniforms
=
524 ctx
->Const
.MaxImageUnits
= MAX_IMAGE_UNITS
;
525 ctx
->Const
.MaxCombinedShaderOutputResources
=
526 MAX_IMAGE_UNITS
+ BRW_MAX_DRAW_BUFFERS
;
527 ctx
->Const
.MaxImageSamples
= 0;
528 ctx
->Const
.MaxCombinedImageUniforms
= 3 * BRW_MAX_IMAGES
;
531 /* Gen6 converts quads to polygon in beginning of 3D pipeline,
532 * but we're not sure how it's actually done for vertex order,
533 * that affect provoking vertex decision. Always use last vertex
534 * convention for quad primitive which works as expected for now.
537 ctx
->Const
.QuadsFollowProvokingVertexConvention
= false;
539 ctx
->Const
.NativeIntegers
= true;
540 ctx
->Const
.VertexID_is_zero_based
= true;
542 /* Regarding the CMP instruction, the Ivybridge PRM says:
544 * "For each enabled channel 0b or 1b is assigned to the appropriate flag
545 * bit and 0/all zeros or all ones (e.g, byte 0xFF, word 0xFFFF, DWord
546 * 0xFFFFFFFF) is assigned to dst."
548 * but PRMs for earlier generations say
550 * "In dword format, one GRF may store up to 8 results. When the register
551 * is used later as a vector of Booleans, as only LSB at each channel
552 * contains meaning [sic] data, software should make sure all higher bits
553 * are masked out (e.g. by 'and-ing' an [sic] 0x01 constant)."
555 * We select the representation of a true boolean uniform to be ~0, and fix
556 * the results of Gen <= 5 CMP instruction's with -(result & 1).
558 ctx
->Const
.UniformBooleanTrue
= ~0;
560 /* From the gen4 PRM, volume 4 page 127:
562 * "For SURFTYPE_BUFFER non-rendertarget surfaces, this field specifies
563 * the base address of the first element of the surface, computed in
564 * software by adding the surface base address to the byte offset of
565 * the element in the buffer."
567 * However, unaligned accesses are slower, so enforce buffer alignment.
569 ctx
->Const
.UniformBufferOffsetAlignment
= 16;
571 /* ShaderStorageBufferOffsetAlignment should be a cacheline (64 bytes) so
572 * that we can safely have the CPU and GPU writing the same SSBO on
573 * non-cachecoherent systems (our Atom CPUs). With UBOs, the GPU never
574 * writes, so there's no problem. For an SSBO, the GPU and the CPU can
575 * be updating disjoint regions of the buffer simultaneously and that will
576 * break if the regions overlap the same cacheline.
578 ctx
->Const
.ShaderStorageBufferOffsetAlignment
= 64;
579 ctx
->Const
.TextureBufferOffsetAlignment
= 16;
580 ctx
->Const
.MaxTextureBufferSize
= 128 * 1024 * 1024;
582 /* FIXME: Tessellation stages are not yet supported in i965, so
583 * MaxCombinedShaderStorageBlocks doesn't take them into account.
585 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].MaxShaderStorageBlocks
= BRW_MAX_SSBO
;
586 ctx
->Const
.Program
[MESA_SHADER_GEOMETRY
].MaxShaderStorageBlocks
= BRW_MAX_SSBO
;
587 ctx
->Const
.Program
[MESA_SHADER_TESS_EVAL
].MaxShaderStorageBlocks
= 0;
588 ctx
->Const
.Program
[MESA_SHADER_TESS_CTRL
].MaxShaderStorageBlocks
= 0;
589 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].MaxShaderStorageBlocks
= BRW_MAX_SSBO
;
590 ctx
->Const
.Program
[MESA_SHADER_COMPUTE
].MaxShaderStorageBlocks
= BRW_MAX_SSBO
;
591 ctx
->Const
.MaxCombinedShaderStorageBlocks
= BRW_MAX_SSBO
* 3;
592 ctx
->Const
.MaxShaderStorageBufferBindings
= BRW_MAX_SSBO
* 3;
594 if (_mesa_extension_override_enables
.ARB_compute_shader
)
595 ctx
->Const
.MaxShaderStorageBufferBindings
+= BRW_MAX_SSBO
;
598 ctx
->Const
.MaxVarying
= 32;
599 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].MaxOutputComponents
= 128;
600 ctx
->Const
.Program
[MESA_SHADER_GEOMETRY
].MaxInputComponents
= 64;
601 ctx
->Const
.Program
[MESA_SHADER_GEOMETRY
].MaxOutputComponents
= 128;
602 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].MaxInputComponents
= 128;
605 /* We want the GLSL compiler to emit code that uses condition codes */
606 for (int i
= 0; i
< MESA_SHADER_STAGES
; i
++) {
607 ctx
->Const
.ShaderCompilerOptions
[i
] =
608 brw
->intelScreen
->compiler
->glsl_compiler_options
[i
];
611 /* ARB_viewport_array */
612 if (brw
->gen
>= 6 && ctx
->API
== API_OPENGL_CORE
) {
613 ctx
->Const
.MaxViewports
= GEN6_NUM_VIEWPORTS
;
614 ctx
->Const
.ViewportSubpixelBits
= 0;
616 /* Cast to float before negating because MaxViewportWidth is unsigned.
618 ctx
->Const
.ViewportBounds
.Min
= -(float)ctx
->Const
.MaxViewportWidth
;
619 ctx
->Const
.ViewportBounds
.Max
= ctx
->Const
.MaxViewportWidth
;
622 /* ARB_gpu_shader5 */
624 ctx
->Const
.MaxVertexStreams
= MIN2(4, MAX_VERTEX_STREAMS
);
626 /* ARB_framebuffer_no_attachments */
627 ctx
->Const
.MaxFramebufferWidth
= ctx
->Const
.MaxViewportWidth
;
628 ctx
->Const
.MaxFramebufferHeight
= ctx
->Const
.MaxViewportHeight
;
629 ctx
->Const
.MaxFramebufferLayers
= ctx
->Const
.MaxArrayTextureLayers
;
630 ctx
->Const
.MaxFramebufferSamples
= max_samples
;
634 brw_adjust_cs_context_constants(struct brw_context
*brw
)
636 struct gl_context
*ctx
= &brw
->ctx
;
638 /* For ES, we set these constants based on SIMD8.
640 * TODO: Once we can always generate SIMD16, we should update this.
642 * For GL, we assume we can generate a SIMD16 program, but this currently
643 * is not always true. This allows us to run more test cases, and will be
644 * required based on desktop GL compute shader requirements.
646 const int simd_size
= ctx
->API
== API_OPENGL_CORE
? 16 : 8;
648 const uint32_t max_invocations
= simd_size
* brw
->max_cs_threads
;
649 ctx
->Const
.MaxComputeWorkGroupSize
[0] = max_invocations
;
650 ctx
->Const
.MaxComputeWorkGroupSize
[1] = max_invocations
;
651 ctx
->Const
.MaxComputeWorkGroupSize
[2] = max_invocations
;
652 ctx
->Const
.MaxComputeWorkGroupInvocations
= max_invocations
;
656 * Process driconf (drirc) options, setting appropriate context flags.
658 * intelInitExtensions still pokes at optionCache directly, in order to
659 * avoid advertising various extensions. No flags are set, so it makes
660 * sense to continue doing that there.
663 brw_process_driconf_options(struct brw_context
*brw
)
665 struct gl_context
*ctx
= &brw
->ctx
;
667 driOptionCache
*options
= &brw
->optionCache
;
668 driParseConfigFiles(options
, &brw
->intelScreen
->optionCache
,
669 brw
->driContext
->driScreenPriv
->myNum
, "i965");
671 int bo_reuse_mode
= driQueryOptioni(options
, "bo_reuse");
672 switch (bo_reuse_mode
) {
673 case DRI_CONF_BO_REUSE_DISABLED
:
675 case DRI_CONF_BO_REUSE_ALL
:
676 intel_bufmgr_gem_enable_reuse(brw
->bufmgr
);
680 if (!driQueryOptionb(options
, "hiz")) {
681 brw
->has_hiz
= false;
682 /* On gen6, you can only do separate stencil with HIZ. */
684 brw
->has_separate_stencil
= false;
687 if (driQueryOptionb(options
, "always_flush_batch")) {
688 fprintf(stderr
, "flushing batchbuffer before/after each draw call\n");
689 brw
->always_flush_batch
= true;
692 if (driQueryOptionb(options
, "always_flush_cache")) {
693 fprintf(stderr
, "flushing GPU caches before/after each draw call\n");
694 brw
->always_flush_cache
= true;
697 if (driQueryOptionb(options
, "disable_throttling")) {
698 fprintf(stderr
, "disabling flush throttling\n");
699 brw
->disable_throttling
= true;
702 brw
->precompile
= driQueryOptionb(&brw
->optionCache
, "shader_precompile");
704 ctx
->Const
.ForceGLSLExtensionsWarn
=
705 driQueryOptionb(options
, "force_glsl_extensions_warn");
707 ctx
->Const
.DisableGLSLLineContinuations
=
708 driQueryOptionb(options
, "disable_glsl_line_continuations");
710 ctx
->Const
.AllowGLSLExtensionDirectiveMidShader
=
711 driQueryOptionb(options
, "allow_glsl_extension_directive_midshader");
715 brwCreateContext(gl_api api
,
716 const struct gl_config
*mesaVis
,
717 __DRIcontext
*driContextPriv
,
718 unsigned major_version
,
719 unsigned minor_version
,
722 unsigned *dri_ctx_error
,
723 void *sharedContextPrivate
)
725 __DRIscreen
*sPriv
= driContextPriv
->driScreenPriv
;
726 struct gl_context
*shareCtx
= (struct gl_context
*) sharedContextPrivate
;
727 struct intel_screen
*screen
= sPriv
->driverPrivate
;
728 const struct brw_device_info
*devinfo
= screen
->devinfo
;
729 struct dd_function_table functions
;
731 /* Only allow the __DRI_CTX_FLAG_ROBUST_BUFFER_ACCESS flag if the kernel
732 * provides us with context reset notifications.
734 uint32_t allowed_flags
= __DRI_CTX_FLAG_DEBUG
735 | __DRI_CTX_FLAG_FORWARD_COMPATIBLE
;
737 if (screen
->has_context_reset_notification
)
738 allowed_flags
|= __DRI_CTX_FLAG_ROBUST_BUFFER_ACCESS
;
740 if (flags
& ~allowed_flags
) {
741 *dri_ctx_error
= __DRI_CTX_ERROR_UNKNOWN_FLAG
;
745 struct brw_context
*brw
= rzalloc(NULL
, struct brw_context
);
747 fprintf(stderr
, "%s: failed to alloc context\n", __func__
);
748 *dri_ctx_error
= __DRI_CTX_ERROR_NO_MEMORY
;
752 driContextPriv
->driverPrivate
= brw
;
753 brw
->driContext
= driContextPriv
;
754 brw
->intelScreen
= screen
;
755 brw
->bufmgr
= screen
->bufmgr
;
757 brw
->gen
= devinfo
->gen
;
758 brw
->gt
= devinfo
->gt
;
759 brw
->is_g4x
= devinfo
->is_g4x
;
760 brw
->is_baytrail
= devinfo
->is_baytrail
;
761 brw
->is_haswell
= devinfo
->is_haswell
;
762 brw
->is_cherryview
= devinfo
->is_cherryview
;
763 brw
->is_broxton
= devinfo
->is_broxton
;
764 brw
->has_llc
= devinfo
->has_llc
;
765 brw
->has_hiz
= devinfo
->has_hiz_and_separate_stencil
;
766 brw
->has_separate_stencil
= devinfo
->has_hiz_and_separate_stencil
;
767 brw
->has_pln
= devinfo
->has_pln
;
768 brw
->has_compr4
= devinfo
->has_compr4
;
769 brw
->has_surface_tile_offset
= devinfo
->has_surface_tile_offset
;
770 brw
->has_negative_rhw_bug
= devinfo
->has_negative_rhw_bug
;
771 brw
->needs_unlit_centroid_workaround
=
772 devinfo
->needs_unlit_centroid_workaround
;
774 brw
->must_use_separate_stencil
= screen
->hw_must_use_separate_stencil
;
775 brw
->has_swizzling
= screen
->hw_has_swizzling
;
777 brw
->vs
.base
.stage
= MESA_SHADER_VERTEX
;
778 brw
->gs
.base
.stage
= MESA_SHADER_GEOMETRY
;
779 brw
->wm
.base
.stage
= MESA_SHADER_FRAGMENT
;
781 gen8_init_vtable_surface_functions(brw
);
782 brw
->vtbl
.emit_depth_stencil_hiz
= gen8_emit_depth_stencil_hiz
;
783 } else if (brw
->gen
>= 7) {
784 gen7_init_vtable_surface_functions(brw
);
785 brw
->vtbl
.emit_depth_stencil_hiz
= gen7_emit_depth_stencil_hiz
;
786 } else if (brw
->gen
>= 6) {
787 gen6_init_vtable_surface_functions(brw
);
788 brw
->vtbl
.emit_depth_stencil_hiz
= gen6_emit_depth_stencil_hiz
;
790 gen4_init_vtable_surface_functions(brw
);
791 brw
->vtbl
.emit_depth_stencil_hiz
= brw_emit_depth_stencil_hiz
;
794 brw_init_driver_functions(brw
, &functions
);
797 functions
.GetGraphicsResetStatus
= brw_get_graphics_reset_status
;
799 struct gl_context
*ctx
= &brw
->ctx
;
801 if (!_mesa_initialize_context(ctx
, api
, mesaVis
, shareCtx
, &functions
)) {
802 *dri_ctx_error
= __DRI_CTX_ERROR_NO_MEMORY
;
803 fprintf(stderr
, "%s: failed to init mesa context\n", __func__
);
804 intelDestroyContext(driContextPriv
);
808 driContextSetFlags(ctx
, flags
);
810 /* Initialize the software rasterizer and helper modules.
812 * As of GL 3.1 core, the gen4+ driver doesn't need the swrast context for
813 * software fallbacks (which we have to support on legacy GL to do weird
814 * glDrawPixels(), glBitmap(), and other functions).
816 if (api
!= API_OPENGL_CORE
&& api
!= API_OPENGLES2
) {
817 _swrast_CreateContext(ctx
);
820 _vbo_CreateContext(ctx
);
821 if (ctx
->swrast_context
) {
822 _tnl_CreateContext(ctx
);
823 TNL_CONTEXT(ctx
)->Driver
.RunPipeline
= _tnl_run_pipeline
;
824 _swsetup_CreateContext(ctx
);
826 /* Configure swrast to match hardware characteristics: */
827 _swrast_allow_pixel_fog(ctx
, false);
828 _swrast_allow_vertex_fog(ctx
, true);
831 _mesa_meta_init(ctx
);
833 brw_process_driconf_options(brw
);
835 if (INTEL_DEBUG
& DEBUG_PERF
)
836 brw
->perf_debug
= true;
838 brw_initialize_context_constants(brw
);
840 ctx
->Const
.ResetStrategy
= notify_reset
841 ? GL_LOSE_CONTEXT_ON_RESET_ARB
: GL_NO_RESET_NOTIFICATION_ARB
;
843 /* Reinitialize the context point state. It depends on ctx->Const values. */
844 _mesa_init_point(ctx
);
848 intel_batchbuffer_init(brw
);
851 /* Create a new hardware context. Using a hardware context means that
852 * our GPU state will be saved/restored on context switch, allowing us
853 * to assume that the GPU is in the same state we left it in.
855 * This is required for transform feedback buffer offsets, query objects,
856 * and also allows us to reduce how much state we have to emit.
858 brw
->hw_ctx
= drm_intel_gem_context_create(brw
->bufmgr
);
861 fprintf(stderr
, "Gen6+ requires Kernel 3.6 or later.\n");
862 intelDestroyContext(driContextPriv
);
867 if (brw_init_pipe_control(brw
, devinfo
)) {
868 *dri_ctx_error
= __DRI_CTX_ERROR_NO_MEMORY
;
869 intelDestroyContext(driContextPriv
);
875 intelInitExtensions(ctx
);
877 brw_init_surface_formats(brw
);
879 brw
->max_vs_threads
= devinfo
->max_vs_threads
;
880 brw
->max_hs_threads
= devinfo
->max_hs_threads
;
881 brw
->max_ds_threads
= devinfo
->max_ds_threads
;
882 brw
->max_gs_threads
= devinfo
->max_gs_threads
;
883 brw
->max_wm_threads
= devinfo
->max_wm_threads
;
884 brw
->max_cs_threads
= devinfo
->max_cs_threads
;
885 brw
->urb
.size
= devinfo
->urb
.size
;
886 brw
->urb
.min_vs_entries
= devinfo
->urb
.min_vs_entries
;
887 brw
->urb
.max_vs_entries
= devinfo
->urb
.max_vs_entries
;
888 brw
->urb
.max_hs_entries
= devinfo
->urb
.max_hs_entries
;
889 brw
->urb
.max_ds_entries
= devinfo
->urb
.max_ds_entries
;
890 brw
->urb
.max_gs_entries
= devinfo
->urb
.max_gs_entries
;
892 brw_adjust_cs_context_constants(brw
);
894 /* Estimate the size of the mappable aperture into the GTT. There's an
895 * ioctl to get the whole GTT size, but not one to get the mappable subset.
896 * It turns out it's basically always 256MB, though some ancient hardware
899 uint32_t gtt_size
= 256 * 1024 * 1024;
901 /* We don't want to map two objects such that a memcpy between them would
902 * just fault one mapping in and then the other over and over forever. So
903 * we would need to divide the GTT size by 2. Additionally, some GTT is
904 * taken up by things like the framebuffer and the ringbuffer and such, so
905 * be more conservative.
907 brw
->max_gtt_map_object_size
= gtt_size
/ 4;
910 brw
->urb
.gs_present
= false;
912 brw
->prim_restart
.in_progress
= false;
913 brw
->prim_restart
.enable_cut_index
= false;
914 brw
->gs
.enabled
= false;
915 brw
->sf
.viewport_transform_enable
= true;
917 brw
->predicate
.state
= BRW_PREDICATE_STATE_RENDER
;
919 brw
->use_resource_streamer
= screen
->has_resource_streamer
&&
920 (brw_env_var_as_boolean("INTEL_USE_HW_BT", false) ||
921 brw_env_var_as_boolean("INTEL_USE_GATHER", false));
923 ctx
->VertexProgram
._MaintainTnlProgram
= true;
924 ctx
->FragmentProgram
._MaintainTexEnvProgram
= true;
926 brw_draw_init( brw
);
928 if ((flags
& __DRI_CTX_FLAG_DEBUG
) != 0) {
929 /* Turn on some extra GL_ARB_debug_output generation. */
930 brw
->perf_debug
= true;
933 if ((flags
& __DRI_CTX_FLAG_ROBUST_BUFFER_ACCESS
) != 0)
934 ctx
->Const
.ContextFlags
|= GL_CONTEXT_FLAG_ROBUST_ACCESS_BIT_ARB
;
936 if (INTEL_DEBUG
& DEBUG_SHADER_TIME
)
937 brw_init_shader_time(brw
);
939 _mesa_compute_version(ctx
);
941 _mesa_initialize_dispatch_tables(ctx
);
942 _mesa_initialize_vbo_vtxfmt(ctx
);
944 if (ctx
->Extensions
.AMD_performance_monitor
) {
945 brw_init_performance_monitors(brw
);
948 vbo_use_buffer_objects(ctx
);
949 vbo_always_unmap_buffers(ctx
);
955 intelDestroyContext(__DRIcontext
* driContextPriv
)
957 struct brw_context
*brw
=
958 (struct brw_context
*) driContextPriv
->driverPrivate
;
959 struct gl_context
*ctx
= &brw
->ctx
;
961 /* Dump a final BMP in case the application doesn't call SwapBuffers */
962 if (INTEL_DEBUG
& DEBUG_AUB
) {
963 intel_batchbuffer_flush(brw
);
964 aub_dump_bmp(&brw
->ctx
);
967 _mesa_meta_free(&brw
->ctx
);
968 brw_meta_fast_clear_free(brw
);
970 if (INTEL_DEBUG
& DEBUG_SHADER_TIME
) {
971 /* Force a report. */
972 brw
->shader_time
.report_time
= 0;
974 brw_collect_and_report_shader_time(brw
);
975 brw_destroy_shader_time(brw
);
978 brw_destroy_state(brw
);
979 brw_draw_destroy(brw
);
981 drm_intel_bo_unreference(brw
->curbe
.curbe_bo
);
982 if (brw
->vs
.base
.scratch_bo
)
983 drm_intel_bo_unreference(brw
->vs
.base
.scratch_bo
);
984 if (brw
->gs
.base
.scratch_bo
)
985 drm_intel_bo_unreference(brw
->gs
.base
.scratch_bo
);
986 if (brw
->wm
.base
.scratch_bo
)
987 drm_intel_bo_unreference(brw
->wm
.base
.scratch_bo
);
989 gen7_reset_hw_bt_pool_offsets(brw
);
990 drm_intel_bo_unreference(brw
->hw_bt_pool
.bo
);
991 brw
->hw_bt_pool
.bo
= NULL
;
993 drm_intel_gem_context_destroy(brw
->hw_ctx
);
995 if (ctx
->swrast_context
) {
996 _swsetup_DestroyContext(&brw
->ctx
);
997 _tnl_DestroyContext(&brw
->ctx
);
999 _vbo_DestroyContext(&brw
->ctx
);
1001 if (ctx
->swrast_context
)
1002 _swrast_DestroyContext(&brw
->ctx
);
1004 brw_fini_pipe_control(brw
);
1005 intel_batchbuffer_free(brw
);
1007 drm_intel_bo_unreference(brw
->throttle_batch
[1]);
1008 drm_intel_bo_unreference(brw
->throttle_batch
[0]);
1009 brw
->throttle_batch
[1] = NULL
;
1010 brw
->throttle_batch
[0] = NULL
;
1012 driDestroyOptionCache(&brw
->optionCache
);
1014 /* free the Mesa context */
1015 _mesa_free_context_data(&brw
->ctx
);
1018 driContextPriv
->driverPrivate
= NULL
;
1022 intelUnbindContext(__DRIcontext
* driContextPriv
)
1024 /* Unset current context and dispath table */
1025 _mesa_make_current(NULL
, NULL
, NULL
);
1031 * Fixes up the context for GLES23 with our default-to-sRGB-capable behavior
1032 * on window system framebuffers.
1034 * Desktop GL is fairly reasonable in its handling of sRGB: You can ask if
1035 * your renderbuffer can do sRGB encode, and you can flip a switch that does
1036 * sRGB encode if the renderbuffer can handle it. You can ask specifically
1037 * for a visual where you're guaranteed to be capable, but it turns out that
1038 * everyone just makes all their ARGB8888 visuals capable and doesn't offer
1039 * incapable ones, because there's no difference between the two in resources
1040 * used. Applications thus get built that accidentally rely on the default
1041 * visual choice being sRGB, so we make ours sRGB capable. Everything sounds
1044 * But for GLES2/3, they decided that it was silly to not turn on sRGB encode
1045 * for sRGB renderbuffers you made with the GL_EXT_texture_sRGB equivalent.
1046 * So they removed the enable knob and made it "if the renderbuffer is sRGB
1047 * capable, do sRGB encode". Then, for your window system renderbuffers, you
1048 * can ask for sRGB visuals and get sRGB encode, or not ask for sRGB visuals
1049 * and get no sRGB encode (assuming that both kinds of visual are available).
1050 * Thus our choice to support sRGB by default on our visuals for desktop would
1051 * result in broken rendering of GLES apps that aren't expecting sRGB encode.
1053 * Unfortunately, renderbuffer setup happens before a context is created. So
1054 * in intel_screen.c we always set up sRGB, and here, if you're a GLES2/3
1055 * context (without an sRGB visual, though we don't have sRGB visuals exposed
1056 * yet), we go turn that back off before anyone finds out.
1059 intel_gles3_srgb_workaround(struct brw_context
*brw
,
1060 struct gl_framebuffer
*fb
)
1062 struct gl_context
*ctx
= &brw
->ctx
;
1064 if (_mesa_is_desktop_gl(ctx
) || !fb
->Visual
.sRGBCapable
)
1067 /* Some day when we support the sRGB capable bit on visuals available for
1068 * GLES, we'll need to respect that and not disable things here.
1070 fb
->Visual
.sRGBCapable
= false;
1071 for (int i
= 0; i
< BUFFER_COUNT
; i
++) {
1072 if (fb
->Attachment
[i
].Renderbuffer
&&
1073 fb
->Attachment
[i
].Renderbuffer
->Format
== MESA_FORMAT_B8G8R8A8_SRGB
) {
1074 fb
->Attachment
[i
].Renderbuffer
->Format
= MESA_FORMAT_B8G8R8A8_UNORM
;
1080 intelMakeCurrent(__DRIcontext
* driContextPriv
,
1081 __DRIdrawable
* driDrawPriv
,
1082 __DRIdrawable
* driReadPriv
)
1084 struct brw_context
*brw
;
1085 GET_CURRENT_CONTEXT(curCtx
);
1088 brw
= (struct brw_context
*) driContextPriv
->driverPrivate
;
1092 /* According to the glXMakeCurrent() man page: "Pending commands to
1093 * the previous context, if any, are flushed before it is released."
1094 * But only flush if we're actually changing contexts.
1096 if (brw_context(curCtx
) && brw_context(curCtx
) != brw
) {
1097 _mesa_flush(curCtx
);
1100 if (driContextPriv
) {
1101 struct gl_context
*ctx
= &brw
->ctx
;
1102 struct gl_framebuffer
*fb
, *readFb
;
1104 if (driDrawPriv
== NULL
) {
1105 fb
= _mesa_get_incomplete_framebuffer();
1107 fb
= driDrawPriv
->driverPrivate
;
1108 driContextPriv
->dri2
.draw_stamp
= driDrawPriv
->dri2
.stamp
- 1;
1111 if (driReadPriv
== NULL
) {
1112 readFb
= _mesa_get_incomplete_framebuffer();
1114 readFb
= driReadPriv
->driverPrivate
;
1115 driContextPriv
->dri2
.read_stamp
= driReadPriv
->dri2
.stamp
- 1;
1118 /* The sRGB workaround changes the renderbuffer's format. We must change
1119 * the format before the renderbuffer's miptree get's allocated, otherwise
1120 * the formats of the renderbuffer and its miptree will differ.
1122 intel_gles3_srgb_workaround(brw
, fb
);
1123 intel_gles3_srgb_workaround(brw
, readFb
);
1125 /* If the context viewport hasn't been initialized, force a call out to
1126 * the loader to get buffers so we have a drawable size for the initial
1128 if (!brw
->ctx
.ViewportInitialized
)
1129 intel_prepare_render(brw
);
1131 _mesa_make_current(ctx
, fb
, readFb
);
1133 _mesa_make_current(NULL
, NULL
, NULL
);
1140 intel_resolve_for_dri2_flush(struct brw_context
*brw
,
1141 __DRIdrawable
*drawable
)
1144 /* MSAA and fast color clear are not supported, so don't waste time
1145 * checking whether a resolve is needed.
1150 struct gl_framebuffer
*fb
= drawable
->driverPrivate
;
1151 struct intel_renderbuffer
*rb
;
1153 /* Usually, only the back buffer will need to be downsampled. However,
1154 * the front buffer will also need it if the user has rendered into it.
1156 static const gl_buffer_index buffers
[2] = {
1161 for (int i
= 0; i
< 2; ++i
) {
1162 rb
= intel_get_renderbuffer(fb
, buffers
[i
]);
1163 if (rb
== NULL
|| rb
->mt
== NULL
)
1165 if (rb
->mt
->num_samples
<= 1)
1166 intel_miptree_resolve_color(brw
, rb
->mt
);
1168 intel_renderbuffer_downsample(brw
, rb
);
1173 intel_bits_per_pixel(const struct intel_renderbuffer
*rb
)
1175 return _mesa_get_format_bytes(intel_rb_format(rb
)) * 8;
1179 intel_query_dri2_buffers(struct brw_context
*brw
,
1180 __DRIdrawable
*drawable
,
1181 __DRIbuffer
**buffers
,
1185 intel_process_dri2_buffer(struct brw_context
*brw
,
1186 __DRIdrawable
*drawable
,
1187 __DRIbuffer
*buffer
,
1188 struct intel_renderbuffer
*rb
,
1189 const char *buffer_name
);
1192 intel_update_image_buffers(struct brw_context
*brw
, __DRIdrawable
*drawable
);
1195 intel_update_dri2_buffers(struct brw_context
*brw
, __DRIdrawable
*drawable
)
1197 struct gl_framebuffer
*fb
= drawable
->driverPrivate
;
1198 struct intel_renderbuffer
*rb
;
1199 __DRIbuffer
*buffers
= NULL
;
1201 const char *region_name
;
1203 /* Set this up front, so that in case our buffers get invalidated
1204 * while we're getting new buffers, we don't clobber the stamp and
1205 * thus ignore the invalidate. */
1206 drawable
->lastStamp
= drawable
->dri2
.stamp
;
1208 if (unlikely(INTEL_DEBUG
& DEBUG_DRI
))
1209 fprintf(stderr
, "enter %s, drawable %p\n", __func__
, drawable
);
1211 intel_query_dri2_buffers(brw
, drawable
, &buffers
, &count
);
1213 if (buffers
== NULL
)
1216 for (i
= 0; i
< count
; i
++) {
1217 switch (buffers
[i
].attachment
) {
1218 case __DRI_BUFFER_FRONT_LEFT
:
1219 rb
= intel_get_renderbuffer(fb
, BUFFER_FRONT_LEFT
);
1220 region_name
= "dri2 front buffer";
1223 case __DRI_BUFFER_FAKE_FRONT_LEFT
:
1224 rb
= intel_get_renderbuffer(fb
, BUFFER_FRONT_LEFT
);
1225 region_name
= "dri2 fake front buffer";
1228 case __DRI_BUFFER_BACK_LEFT
:
1229 rb
= intel_get_renderbuffer(fb
, BUFFER_BACK_LEFT
);
1230 region_name
= "dri2 back buffer";
1233 case __DRI_BUFFER_DEPTH
:
1234 case __DRI_BUFFER_HIZ
:
1235 case __DRI_BUFFER_DEPTH_STENCIL
:
1236 case __DRI_BUFFER_STENCIL
:
1237 case __DRI_BUFFER_ACCUM
:
1240 "unhandled buffer attach event, attachment type %d\n",
1241 buffers
[i
].attachment
);
1245 intel_process_dri2_buffer(brw
, drawable
, &buffers
[i
], rb
, region_name
);
1251 intel_update_renderbuffers(__DRIcontext
*context
, __DRIdrawable
*drawable
)
1253 struct brw_context
*brw
= context
->driverPrivate
;
1254 __DRIscreen
*screen
= brw
->intelScreen
->driScrnPriv
;
1256 /* Set this up front, so that in case our buffers get invalidated
1257 * while we're getting new buffers, we don't clobber the stamp and
1258 * thus ignore the invalidate. */
1259 drawable
->lastStamp
= drawable
->dri2
.stamp
;
1261 if (unlikely(INTEL_DEBUG
& DEBUG_DRI
))
1262 fprintf(stderr
, "enter %s, drawable %p\n", __func__
, drawable
);
1264 if (screen
->image
.loader
)
1265 intel_update_image_buffers(brw
, drawable
);
1267 intel_update_dri2_buffers(brw
, drawable
);
1269 driUpdateFramebufferSize(&brw
->ctx
, drawable
);
1273 * intel_prepare_render should be called anywhere that curent read/drawbuffer
1274 * state is required.
1277 intel_prepare_render(struct brw_context
*brw
)
1279 struct gl_context
*ctx
= &brw
->ctx
;
1280 __DRIcontext
*driContext
= brw
->driContext
;
1281 __DRIdrawable
*drawable
;
1283 drawable
= driContext
->driDrawablePriv
;
1284 if (drawable
&& drawable
->dri2
.stamp
!= driContext
->dri2
.draw_stamp
) {
1285 if (drawable
->lastStamp
!= drawable
->dri2
.stamp
)
1286 intel_update_renderbuffers(driContext
, drawable
);
1287 driContext
->dri2
.draw_stamp
= drawable
->dri2
.stamp
;
1290 drawable
= driContext
->driReadablePriv
;
1291 if (drawable
&& drawable
->dri2
.stamp
!= driContext
->dri2
.read_stamp
) {
1292 if (drawable
->lastStamp
!= drawable
->dri2
.stamp
)
1293 intel_update_renderbuffers(driContext
, drawable
);
1294 driContext
->dri2
.read_stamp
= drawable
->dri2
.stamp
;
1297 /* If we're currently rendering to the front buffer, the rendering
1298 * that will happen next will probably dirty the front buffer. So
1299 * mark it as dirty here.
1301 if (brw_is_front_buffer_drawing(ctx
->DrawBuffer
))
1302 brw
->front_buffer_dirty
= true;
1306 * \brief Query DRI2 to obtain a DRIdrawable's buffers.
1308 * To determine which DRI buffers to request, examine the renderbuffers
1309 * attached to the drawable's framebuffer. Then request the buffers with
1310 * DRI2GetBuffers() or DRI2GetBuffersWithFormat().
1312 * This is called from intel_update_renderbuffers().
1314 * \param drawable Drawable whose buffers are queried.
1315 * \param buffers [out] List of buffers returned by DRI2 query.
1316 * \param buffer_count [out] Number of buffers returned.
1318 * \see intel_update_renderbuffers()
1319 * \see DRI2GetBuffers()
1320 * \see DRI2GetBuffersWithFormat()
1323 intel_query_dri2_buffers(struct brw_context
*brw
,
1324 __DRIdrawable
*drawable
,
1325 __DRIbuffer
**buffers
,
1328 __DRIscreen
*screen
= brw
->intelScreen
->driScrnPriv
;
1329 struct gl_framebuffer
*fb
= drawable
->driverPrivate
;
1331 unsigned attachments
[8];
1333 struct intel_renderbuffer
*front_rb
;
1334 struct intel_renderbuffer
*back_rb
;
1336 front_rb
= intel_get_renderbuffer(fb
, BUFFER_FRONT_LEFT
);
1337 back_rb
= intel_get_renderbuffer(fb
, BUFFER_BACK_LEFT
);
1339 memset(attachments
, 0, sizeof(attachments
));
1340 if ((brw_is_front_buffer_drawing(fb
) ||
1341 brw_is_front_buffer_reading(fb
) ||
1342 !back_rb
) && front_rb
) {
1343 /* If a fake front buffer is in use, then querying for
1344 * __DRI_BUFFER_FRONT_LEFT will cause the server to copy the image from
1345 * the real front buffer to the fake front buffer. So before doing the
1346 * query, we need to make sure all the pending drawing has landed in the
1347 * real front buffer.
1349 intel_batchbuffer_flush(brw
);
1350 intel_flush_front(&brw
->ctx
);
1352 attachments
[i
++] = __DRI_BUFFER_FRONT_LEFT
;
1353 attachments
[i
++] = intel_bits_per_pixel(front_rb
);
1354 } else if (front_rb
&& brw
->front_buffer_dirty
) {
1355 /* We have pending front buffer rendering, but we aren't querying for a
1356 * front buffer. If the front buffer we have is a fake front buffer,
1357 * the X server is going to throw it away when it processes the query.
1358 * So before doing the query, make sure all the pending drawing has
1359 * landed in the real front buffer.
1361 intel_batchbuffer_flush(brw
);
1362 intel_flush_front(&brw
->ctx
);
1366 attachments
[i
++] = __DRI_BUFFER_BACK_LEFT
;
1367 attachments
[i
++] = intel_bits_per_pixel(back_rb
);
1370 assert(i
<= ARRAY_SIZE(attachments
));
1372 *buffers
= screen
->dri2
.loader
->getBuffersWithFormat(drawable
,
1377 drawable
->loaderPrivate
);
1381 * \brief Assign a DRI buffer's DRM region to a renderbuffer.
1383 * This is called from intel_update_renderbuffers().
1386 * DRI buffers whose attachment point is DRI2BufferStencil or
1387 * DRI2BufferDepthStencil are handled as special cases.
1389 * \param buffer_name is a human readable name, such as "dri2 front buffer",
1390 * that is passed to drm_intel_bo_gem_create_from_name().
1392 * \see intel_update_renderbuffers()
1395 intel_process_dri2_buffer(struct brw_context
*brw
,
1396 __DRIdrawable
*drawable
,
1397 __DRIbuffer
*buffer
,
1398 struct intel_renderbuffer
*rb
,
1399 const char *buffer_name
)
1401 struct gl_framebuffer
*fb
= drawable
->driverPrivate
;
1407 unsigned num_samples
= rb
->Base
.Base
.NumSamples
;
1409 /* We try to avoid closing and reopening the same BO name, because the first
1410 * use of a mapping of the buffer involves a bunch of page faulting which is
1411 * moderately expensive.
1413 struct intel_mipmap_tree
*last_mt
;
1414 if (num_samples
== 0)
1417 last_mt
= rb
->singlesample_mt
;
1419 uint32_t old_name
= 0;
1421 /* The bo already has a name because the miptree was created by a
1422 * previous call to intel_process_dri2_buffer(). If a bo already has a
1423 * name, then drm_intel_bo_flink() is a low-cost getter. It does not
1424 * create a new name.
1426 drm_intel_bo_flink(last_mt
->bo
, &old_name
);
1429 if (old_name
== buffer
->name
)
1432 if (unlikely(INTEL_DEBUG
& DEBUG_DRI
)) {
1434 "attaching buffer %d, at %d, cpp %d, pitch %d\n",
1435 buffer
->name
, buffer
->attachment
,
1436 buffer
->cpp
, buffer
->pitch
);
1439 bo
= drm_intel_bo_gem_create_from_name(brw
->bufmgr
, buffer_name
,
1443 "Failed to open BO for returned DRI2 buffer "
1444 "(%dx%d, %s, named %d).\n"
1445 "This is likely a bug in the X Server that will lead to a "
1447 drawable
->w
, drawable
->h
, buffer_name
, buffer
->name
);
1451 intel_update_winsys_renderbuffer_miptree(brw
, rb
, bo
,
1452 drawable
->w
, drawable
->h
,
1455 if (brw_is_front_buffer_drawing(fb
) &&
1456 (buffer
->attachment
== __DRI_BUFFER_FRONT_LEFT
||
1457 buffer
->attachment
== __DRI_BUFFER_FAKE_FRONT_LEFT
) &&
1458 rb
->Base
.Base
.NumSamples
> 1) {
1459 intel_renderbuffer_upsample(brw
, rb
);
1464 drm_intel_bo_unreference(bo
);
1468 * \brief Query DRI image loader to obtain a DRIdrawable's buffers.
1470 * To determine which DRI buffers to request, examine the renderbuffers
1471 * attached to the drawable's framebuffer. Then request the buffers from
1474 * This is called from intel_update_renderbuffers().
1476 * \param drawable Drawable whose buffers are queried.
1477 * \param buffers [out] List of buffers returned by DRI2 query.
1478 * \param buffer_count [out] Number of buffers returned.
1480 * \see intel_update_renderbuffers()
1484 intel_update_image_buffer(struct brw_context
*intel
,
1485 __DRIdrawable
*drawable
,
1486 struct intel_renderbuffer
*rb
,
1488 enum __DRIimageBufferMask buffer_type
)
1490 struct gl_framebuffer
*fb
= drawable
->driverPrivate
;
1492 if (!rb
|| !buffer
->bo
)
1495 unsigned num_samples
= rb
->Base
.Base
.NumSamples
;
1497 /* Check and see if we're already bound to the right
1500 struct intel_mipmap_tree
*last_mt
;
1501 if (num_samples
== 0)
1504 last_mt
= rb
->singlesample_mt
;
1506 if (last_mt
&& last_mt
->bo
== buffer
->bo
)
1509 intel_update_winsys_renderbuffer_miptree(intel
, rb
, buffer
->bo
,
1510 buffer
->width
, buffer
->height
,
1513 if (brw_is_front_buffer_drawing(fb
) &&
1514 buffer_type
== __DRI_IMAGE_BUFFER_FRONT
&&
1515 rb
->Base
.Base
.NumSamples
> 1) {
1516 intel_renderbuffer_upsample(intel
, rb
);
1521 intel_update_image_buffers(struct brw_context
*brw
, __DRIdrawable
*drawable
)
1523 struct gl_framebuffer
*fb
= drawable
->driverPrivate
;
1524 __DRIscreen
*screen
= brw
->intelScreen
->driScrnPriv
;
1525 struct intel_renderbuffer
*front_rb
;
1526 struct intel_renderbuffer
*back_rb
;
1527 struct __DRIimageList images
;
1528 unsigned int format
;
1529 uint32_t buffer_mask
= 0;
1531 front_rb
= intel_get_renderbuffer(fb
, BUFFER_FRONT_LEFT
);
1532 back_rb
= intel_get_renderbuffer(fb
, BUFFER_BACK_LEFT
);
1535 format
= intel_rb_format(back_rb
);
1537 format
= intel_rb_format(front_rb
);
1541 if (front_rb
&& (brw_is_front_buffer_drawing(fb
) ||
1542 brw_is_front_buffer_reading(fb
) || !back_rb
)) {
1543 buffer_mask
|= __DRI_IMAGE_BUFFER_FRONT
;
1547 buffer_mask
|= __DRI_IMAGE_BUFFER_BACK
;
1549 (*screen
->image
.loader
->getBuffers
) (drawable
,
1550 driGLFormatToImageFormat(format
),
1551 &drawable
->dri2
.stamp
,
1552 drawable
->loaderPrivate
,
1556 if (images
.image_mask
& __DRI_IMAGE_BUFFER_FRONT
) {
1557 drawable
->w
= images
.front
->width
;
1558 drawable
->h
= images
.front
->height
;
1559 intel_update_image_buffer(brw
,
1563 __DRI_IMAGE_BUFFER_FRONT
);
1565 if (images
.image_mask
& __DRI_IMAGE_BUFFER_BACK
) {
1566 drawable
->w
= images
.back
->width
;
1567 drawable
->h
= images
.back
->height
;
1568 intel_update_image_buffer(brw
,
1572 __DRI_IMAGE_BUFFER_BACK
);