2 Copyright 2003 VMware, Inc.
3 Copyright (C) Intel Corp. 2006. All Rights Reserved.
4 Intel funded Tungsten Graphics to
5 develop this 3D driver.
7 Permission is hereby granted, free of charge, to any person obtaining
8 a copy of this software and associated documentation files (the
9 "Software"), to deal in the Software without restriction, including
10 without limitation the rights to use, copy, modify, merge, publish,
11 distribute, sublicense, and/or sell copies of the Software, and to
12 permit persons to whom the Software is furnished to do so, subject to
13 the following conditions:
15 The above copyright notice and this permission notice (including the
16 next paragraph) shall be included in all copies or substantial
17 portions of the Software.
19 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
20 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
22 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
23 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
24 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
25 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27 **********************************************************************/
30 * Keith Whitwell <keithw@vmware.com>
34 #include "compiler/nir/nir.h"
35 #include "main/api_exec.h"
36 #include "main/context.h"
37 #include "main/fbobject.h"
38 #include "main/extensions.h"
39 #include "main/imports.h"
40 #include "main/macros.h"
41 #include "main/points.h"
42 #include "main/version.h"
43 #include "main/vtxfmt.h"
44 #include "main/texobj.h"
45 #include "main/framebuffer.h"
47 #include "vbo/vbo_context.h"
49 #include "drivers/common/driverfuncs.h"
50 #include "drivers/common/meta.h"
53 #include "brw_context.h"
54 #include "brw_defines.h"
55 #include "brw_blorp.h"
57 #include "brw_state.h"
59 #include "intel_batchbuffer.h"
60 #include "intel_buffer_objects.h"
61 #include "intel_buffers.h"
62 #include "intel_fbo.h"
63 #include "intel_mipmap_tree.h"
64 #include "intel_pixel.h"
65 #include "intel_image.h"
66 #include "intel_tex.h"
67 #include "intel_tex_obj.h"
69 #include "swrast_setup/swrast_setup.h"
71 #include "tnl/t_pipeline.h"
72 #include "util/ralloc.h"
73 #include "util/debug.h"
76 /***************************************
77 * Mesa's Driver Functions
78 ***************************************/
80 const char *const brw_vendor_string
= "Intel Open Source Technology Center";
83 get_bsw_model(const struct intel_screen
*screen
)
85 switch (screen
->eu_total
) {
96 brw_get_renderer_string(const struct intel_screen
*screen
)
99 static char buffer
[128];
102 switch (screen
->deviceID
) {
104 #define CHIPSET(id, symbol, str) case id: chipset = str; break;
105 #include "pci_ids/i965_pci_ids.h"
107 chipset
= "Unknown Intel Chipset";
111 /* Braswell branding is funny, so we have to fix it up here */
112 if (screen
->deviceID
== 0x22B1) {
113 bsw
= strdup(chipset
);
114 char *needle
= strstr(bsw
, "XXX");
116 memcpy(needle
, get_bsw_model(screen
), 3);
121 (void) driGetRendererString(buffer
, chipset
, 0);
126 static const GLubyte
*
127 intel_get_string(struct gl_context
* ctx
, GLenum name
)
129 const struct brw_context
*const brw
= brw_context(ctx
);
133 return (GLubyte
*) brw_vendor_string
;
137 (GLubyte
*) brw_get_renderer_string(brw
->screen
);
145 intel_viewport(struct gl_context
*ctx
)
147 struct brw_context
*brw
= brw_context(ctx
);
148 __DRIcontext
*driContext
= brw
->driContext
;
150 if (_mesa_is_winsys_fbo(ctx
->DrawBuffer
)) {
151 if (driContext
->driDrawablePriv
)
152 dri2InvalidateDrawable(driContext
->driDrawablePriv
);
153 if (driContext
->driReadablePriv
)
154 dri2InvalidateDrawable(driContext
->driReadablePriv
);
159 intel_update_framebuffer(struct gl_context
*ctx
,
160 struct gl_framebuffer
*fb
)
162 struct brw_context
*brw
= brw_context(ctx
);
164 /* Quantize the derived default number of samples
166 fb
->DefaultGeometry
._NumSamples
=
167 intel_quantize_num_samples(brw
->screen
,
168 fb
->DefaultGeometry
.NumSamples
);
172 intel_disable_rb_aux_buffer(struct brw_context
*brw
, const drm_intel_bo
*bo
)
174 const struct gl_framebuffer
*fb
= brw
->ctx
.DrawBuffer
;
177 for (unsigned i
= 0; i
< fb
->_NumColorDrawBuffers
; i
++) {
178 const struct intel_renderbuffer
*irb
=
179 intel_renderbuffer(fb
->_ColorDrawBuffers
[i
]);
181 if (irb
&& irb
->mt
->bo
== bo
) {
182 found
= brw
->draw_aux_buffer_disabled
[i
] = true;
189 /* On Gen9 color buffers may be compressed by the hardware (lossless
190 * compression). There are, however, format restrictions and care needs to be
191 * taken that the sampler engine is capable for re-interpreting a buffer with
192 * format different the buffer was originally written with.
194 * For example, SRGB formats are not compressible and the sampler engine isn't
195 * capable of treating RGBA_UNORM as SRGB_ALPHA. In such a case the underlying
196 * color buffer needs to be resolved so that the sampling surface can be
197 * sampled as non-compressed (i.e., without the auxiliary MCS buffer being
201 intel_texture_view_requires_resolve(struct brw_context
*brw
,
202 struct intel_texture_object
*intel_tex
)
205 !intel_miptree_is_lossless_compressed(brw
, intel_tex
->mt
))
208 const uint32_t brw_format
= brw_format_for_mesa_format(intel_tex
->_Format
);
210 if (isl_format_supports_ccs_e(&brw
->screen
->devinfo
, brw_format
))
213 perf_debug("Incompatible sampling format (%s) for rbc (%s)\n",
214 _mesa_get_format_name(intel_tex
->_Format
),
215 _mesa_get_format_name(intel_tex
->mt
->format
));
217 if (intel_disable_rb_aux_buffer(brw
, intel_tex
->mt
->bo
))
218 perf_debug("Sampling renderbuffer with non-compressible format - "
219 "turning off compression");
225 intel_update_state(struct gl_context
* ctx
, GLuint new_state
)
227 struct brw_context
*brw
= brw_context(ctx
);
228 struct intel_texture_object
*tex_obj
;
229 struct intel_renderbuffer
*depth_irb
;
231 if (ctx
->swrast_context
)
232 _swrast_InvalidateState(ctx
, new_state
);
233 _vbo_InvalidateState(ctx
, new_state
);
235 brw
->NewGLState
|= new_state
;
237 _mesa_unlock_context_textures(ctx
);
239 /* Resolve the depth buffer's HiZ buffer. */
240 depth_irb
= intel_get_renderbuffer(ctx
->DrawBuffer
, BUFFER_DEPTH
);
242 intel_renderbuffer_resolve_hiz(brw
, depth_irb
);
244 memset(brw
->draw_aux_buffer_disabled
, 0,
245 sizeof(brw
->draw_aux_buffer_disabled
));
247 /* Resolve depth buffer and render cache of each enabled texture. */
248 int maxEnabledUnit
= ctx
->Texture
._MaxEnabledTexImageUnit
;
249 for (int i
= 0; i
<= maxEnabledUnit
; i
++) {
250 if (!ctx
->Texture
.Unit
[i
]._Current
)
252 tex_obj
= intel_texture_object(ctx
->Texture
.Unit
[i
]._Current
);
253 if (!tex_obj
|| !tex_obj
->mt
)
255 if (intel_miptree_sample_with_hiz(brw
, tex_obj
->mt
))
256 intel_miptree_all_slices_resolve_hiz(brw
, tex_obj
->mt
);
258 intel_miptree_all_slices_resolve_depth(brw
, tex_obj
->mt
);
259 /* Sampling engine understands lossless compression and resolving
260 * those surfaces should be skipped for performance reasons.
262 const int flags
= intel_texture_view_requires_resolve(brw
, tex_obj
) ?
263 0 : INTEL_MIPTREE_IGNORE_CCS_E
;
264 intel_miptree_all_slices_resolve_color(brw
, tex_obj
->mt
, flags
);
265 brw_render_cache_set_check_flush(brw
, tex_obj
->mt
->bo
);
267 if (tex_obj
->base
.StencilSampling
||
268 tex_obj
->mt
->format
== MESA_FORMAT_S_UINT8
) {
269 intel_update_r8stencil(brw
, tex_obj
->mt
);
273 /* Resolve color for each active shader image. */
274 for (unsigned i
= 0; i
< MESA_SHADER_STAGES
; i
++) {
275 const struct gl_program
*prog
= ctx
->_Shader
->CurrentProgram
[i
];
277 if (unlikely(prog
&& prog
->info
.num_images
)) {
278 for (unsigned j
= 0; j
< prog
->info
.num_images
; j
++) {
279 struct gl_image_unit
*u
=
280 &ctx
->ImageUnits
[prog
->sh
.ImageUnits
[j
]];
281 tex_obj
= intel_texture_object(u
->TexObj
);
283 if (tex_obj
&& tex_obj
->mt
) {
284 /* Access to images is implemented using indirect messages
285 * against data port. Normal render target write understands
286 * lossless compression but unfortunately the typed/untyped
287 * read/write interface doesn't. Therefore even lossless
288 * compressed surfaces need to be resolved prior to accessing
289 * them. Hence skip setting INTEL_MIPTREE_IGNORE_CCS_E.
291 intel_miptree_all_slices_resolve_color(brw
, tex_obj
->mt
, 0);
293 if (intel_miptree_is_lossless_compressed(brw
, tex_obj
->mt
) &&
294 intel_disable_rb_aux_buffer(brw
, tex_obj
->mt
->bo
)) {
295 perf_debug("Using renderbuffer as shader image - turning "
296 "off lossless compression");
299 brw_render_cache_set_check_flush(brw
, tex_obj
->mt
->bo
);
305 /* Resolve color buffers for non-coherent framebuffer fetch. */
306 if (!ctx
->Extensions
.MESA_shader_framebuffer_fetch
&&
307 ctx
->FragmentProgram
._Current
&&
308 ctx
->FragmentProgram
._Current
->info
.outputs_read
) {
309 const struct gl_framebuffer
*fb
= ctx
->DrawBuffer
;
311 for (unsigned i
= 0; i
< fb
->_NumColorDrawBuffers
; i
++) {
312 const struct intel_renderbuffer
*irb
=
313 intel_renderbuffer(fb
->_ColorDrawBuffers
[i
]);
316 intel_miptree_resolve_color(
317 brw
, irb
->mt
, irb
->mt_level
, irb
->mt_layer
, irb
->layer_count
,
318 INTEL_MIPTREE_IGNORE_CCS_E
))
319 brw_render_cache_set_check_flush(brw
, irb
->mt
->bo
);
323 /* If FRAMEBUFFER_SRGB is used on Gen9+ then we need to resolve any of the
324 * single-sampled color renderbuffers because the CCS buffer isn't
325 * supported for SRGB formats. This only matters if FRAMEBUFFER_SRGB is
326 * enabled because otherwise the surface state will be programmed with the
327 * linear equivalent format anyway.
329 if (brw
->gen
>= 9 && ctx
->Color
.sRGBEnabled
) {
330 struct gl_framebuffer
*fb
= ctx
->DrawBuffer
;
331 for (int i
= 0; i
< fb
->_NumColorDrawBuffers
; i
++) {
332 struct gl_renderbuffer
*rb
= fb
->_ColorDrawBuffers
[i
];
337 struct intel_renderbuffer
*irb
= intel_renderbuffer(rb
);
338 struct intel_mipmap_tree
*mt
= irb
->mt
;
341 mt
->num_samples
> 1 ||
342 _mesa_get_srgb_format_linear(mt
->format
) == mt
->format
)
345 /* Lossless compression is not supported for SRGB formats, it
346 * should be impossible to get here with such surfaces.
348 assert(!intel_miptree_is_lossless_compressed(brw
, mt
));
349 intel_miptree_all_slices_resolve_color(brw
, mt
, 0);
350 brw_render_cache_set_check_flush(brw
, mt
->bo
);
354 _mesa_lock_context_textures(ctx
);
356 if (new_state
& _NEW_BUFFERS
) {
357 intel_update_framebuffer(ctx
, ctx
->DrawBuffer
);
358 if (ctx
->DrawBuffer
!= ctx
->ReadBuffer
)
359 intel_update_framebuffer(ctx
, ctx
->ReadBuffer
);
363 #define flushFront(screen) ((screen)->image.loader ? (screen)->image.loader->flushFrontBuffer : (screen)->dri2.loader->flushFrontBuffer)
366 intel_flush_front(struct gl_context
*ctx
)
368 struct brw_context
*brw
= brw_context(ctx
);
369 __DRIcontext
*driContext
= brw
->driContext
;
370 __DRIdrawable
*driDrawable
= driContext
->driDrawablePriv
;
371 __DRIscreen
*const dri_screen
= brw
->screen
->driScrnPriv
;
373 if (brw
->front_buffer_dirty
&& _mesa_is_winsys_fbo(ctx
->DrawBuffer
)) {
374 if (flushFront(dri_screen
) && driDrawable
&&
375 driDrawable
->loaderPrivate
) {
377 /* Resolve before flushing FAKE_FRONT_LEFT to FRONT_LEFT.
379 * This potentially resolves both front and back buffer. It
380 * is unnecessary to resolve the back, but harms nothing except
381 * performance. And no one cares about front-buffer render
384 intel_resolve_for_dri2_flush(brw
, driDrawable
);
385 intel_batchbuffer_flush(brw
);
387 flushFront(dri_screen
)(driDrawable
, driDrawable
->loaderPrivate
);
389 /* We set the dirty bit in intel_prepare_render() if we're
390 * front buffer rendering once we get there.
392 brw
->front_buffer_dirty
= false;
398 intel_glFlush(struct gl_context
*ctx
)
400 struct brw_context
*brw
= brw_context(ctx
);
402 intel_batchbuffer_flush(brw
);
403 intel_flush_front(ctx
);
405 brw
->need_flush_throttle
= true;
409 intel_finish(struct gl_context
* ctx
)
411 struct brw_context
*brw
= brw_context(ctx
);
415 if (brw
->batch
.last_bo
)
416 drm_intel_bo_wait_rendering(brw
->batch
.last_bo
);
420 brw_init_driver_functions(struct brw_context
*brw
,
421 struct dd_function_table
*functions
)
423 _mesa_init_driver_functions(functions
);
425 /* GLX uses DRI2 invalidate events to handle window resizing.
426 * Unfortunately, EGL does not - libEGL is written in XCB (not Xlib),
427 * which doesn't provide a mechanism for snooping the event queues.
429 * So EGL still relies on viewport hacks to handle window resizing.
430 * This should go away with DRI3000.
432 if (!brw
->driContext
->driScreenPriv
->dri2
.useInvalidate
)
433 functions
->Viewport
= intel_viewport
;
435 functions
->Flush
= intel_glFlush
;
436 functions
->Finish
= intel_finish
;
437 functions
->GetString
= intel_get_string
;
438 functions
->UpdateState
= intel_update_state
;
440 intelInitTextureFuncs(functions
);
441 intelInitTextureImageFuncs(functions
);
442 intelInitTextureSubImageFuncs(functions
);
443 intelInitTextureCopyImageFuncs(functions
);
444 intelInitCopyImageFuncs(functions
);
445 intelInitClearFuncs(functions
);
446 intelInitBufferFuncs(functions
);
447 intelInitPixelFuncs(functions
);
448 intelInitBufferObjectFuncs(functions
);
449 brw_init_syncobj_functions(functions
);
450 brw_init_object_purgeable_functions(functions
);
452 brwInitFragProgFuncs( functions
);
453 brw_init_common_queryobj_functions(functions
);
454 if (brw
->gen
>= 8 || brw
->is_haswell
)
455 hsw_init_queryobj_functions(functions
);
456 else if (brw
->gen
>= 6)
457 gen6_init_queryobj_functions(functions
);
459 gen4_init_queryobj_functions(functions
);
460 brw_init_compute_functions(functions
);
462 brw_init_conditional_render_functions(functions
);
464 functions
->QueryInternalFormat
= brw_query_internal_format
;
466 functions
->NewTransformFeedback
= brw_new_transform_feedback
;
467 functions
->DeleteTransformFeedback
= brw_delete_transform_feedback
;
468 if (can_do_mi_math_and_lrr(brw
->screen
)) {
469 functions
->BeginTransformFeedback
= hsw_begin_transform_feedback
;
470 functions
->EndTransformFeedback
= hsw_end_transform_feedback
;
471 functions
->PauseTransformFeedback
= hsw_pause_transform_feedback
;
472 functions
->ResumeTransformFeedback
= hsw_resume_transform_feedback
;
473 } else if (brw
->gen
>= 7) {
474 functions
->BeginTransformFeedback
= gen7_begin_transform_feedback
;
475 functions
->EndTransformFeedback
= gen7_end_transform_feedback
;
476 functions
->PauseTransformFeedback
= gen7_pause_transform_feedback
;
477 functions
->ResumeTransformFeedback
= gen7_resume_transform_feedback
;
478 functions
->GetTransformFeedbackVertexCount
=
479 brw_get_transform_feedback_vertex_count
;
481 functions
->BeginTransformFeedback
= brw_begin_transform_feedback
;
482 functions
->EndTransformFeedback
= brw_end_transform_feedback
;
483 functions
->PauseTransformFeedback
= brw_pause_transform_feedback
;
484 functions
->ResumeTransformFeedback
= brw_resume_transform_feedback
;
485 functions
->GetTransformFeedbackVertexCount
=
486 brw_get_transform_feedback_vertex_count
;
490 functions
->GetSamplePosition
= gen6_get_sample_position
;
494 brw_initialize_context_constants(struct brw_context
*brw
)
496 struct gl_context
*ctx
= &brw
->ctx
;
497 const struct brw_compiler
*compiler
= brw
->screen
->compiler
;
499 const bool stage_exists
[MESA_SHADER_STAGES
] = {
500 [MESA_SHADER_VERTEX
] = true,
501 [MESA_SHADER_TESS_CTRL
] = brw
->gen
>= 7,
502 [MESA_SHADER_TESS_EVAL
] = brw
->gen
>= 7,
503 [MESA_SHADER_GEOMETRY
] = brw
->gen
>= 6,
504 [MESA_SHADER_FRAGMENT
] = true,
505 [MESA_SHADER_COMPUTE
] =
506 ((ctx
->API
== API_OPENGL_COMPAT
|| ctx
->API
== API_OPENGL_CORE
) &&
507 ctx
->Const
.MaxComputeWorkGroupSize
[0] >= 1024) ||
508 (ctx
->API
== API_OPENGLES2
&&
509 ctx
->Const
.MaxComputeWorkGroupSize
[0] >= 128) ||
510 _mesa_extension_override_enables
.ARB_compute_shader
,
513 unsigned num_stages
= 0;
514 for (int i
= 0; i
< MESA_SHADER_STAGES
; i
++) {
519 unsigned max_samplers
=
520 brw
->gen
>= 8 || brw
->is_haswell
? BRW_MAX_TEX_UNIT
: 16;
522 ctx
->Const
.MaxDualSourceDrawBuffers
= 1;
523 ctx
->Const
.MaxDrawBuffers
= BRW_MAX_DRAW_BUFFERS
;
524 ctx
->Const
.MaxCombinedShaderOutputResources
=
525 MAX_IMAGE_UNITS
+ BRW_MAX_DRAW_BUFFERS
;
527 ctx
->Const
.QueryCounterBits
.Timestamp
= 36;
529 ctx
->Const
.MaxTextureCoordUnits
= 8; /* Mesa limit */
530 ctx
->Const
.MaxImageUnits
= MAX_IMAGE_UNITS
;
532 ctx
->Const
.MaxRenderbufferSize
= 16384;
533 ctx
->Const
.MaxTextureLevels
= MIN2(15 /* 16384 */, MAX_TEXTURE_LEVELS
);
534 ctx
->Const
.MaxCubeTextureLevels
= 15; /* 16384 */
536 ctx
->Const
.MaxRenderbufferSize
= 8192;
537 ctx
->Const
.MaxTextureLevels
= MIN2(14 /* 8192 */, MAX_TEXTURE_LEVELS
);
538 ctx
->Const
.MaxCubeTextureLevels
= 14; /* 8192 */
540 ctx
->Const
.Max3DTextureLevels
= 12; /* 2048 */
541 ctx
->Const
.MaxArrayTextureLayers
= brw
->gen
>= 7 ? 2048 : 512;
542 ctx
->Const
.MaxTextureMbytes
= 1536;
543 ctx
->Const
.MaxTextureRectSize
= 1 << 12;
544 ctx
->Const
.MaxTextureMaxAnisotropy
= 16.0;
545 ctx
->Const
.MaxTextureLodBias
= 15.0;
546 ctx
->Const
.StripTextureBorder
= true;
548 ctx
->Const
.MaxProgramTextureGatherComponents
= 4;
549 ctx
->Const
.MinProgramTextureGatherOffset
= -32;
550 ctx
->Const
.MaxProgramTextureGatherOffset
= 31;
551 } else if (brw
->gen
== 6) {
552 ctx
->Const
.MaxProgramTextureGatherComponents
= 1;
553 ctx
->Const
.MinProgramTextureGatherOffset
= -8;
554 ctx
->Const
.MaxProgramTextureGatherOffset
= 7;
557 ctx
->Const
.MaxUniformBlockSize
= 65536;
559 for (int i
= 0; i
< MESA_SHADER_STAGES
; i
++) {
560 struct gl_program_constants
*prog
= &ctx
->Const
.Program
[i
];
562 if (!stage_exists
[i
])
565 prog
->MaxTextureImageUnits
= max_samplers
;
567 prog
->MaxUniformBlocks
= BRW_MAX_UBO
;
568 prog
->MaxCombinedUniformComponents
=
569 prog
->MaxUniformComponents
+
570 ctx
->Const
.MaxUniformBlockSize
/ 4 * prog
->MaxUniformBlocks
;
572 prog
->MaxAtomicCounters
= MAX_ATOMIC_COUNTERS
;
573 prog
->MaxAtomicBuffers
= BRW_MAX_ABO
;
574 prog
->MaxImageUniforms
= compiler
->scalar_stage
[i
] ? BRW_MAX_IMAGES
: 0;
575 prog
->MaxShaderStorageBlocks
= BRW_MAX_SSBO
;
578 ctx
->Const
.MaxTextureUnits
=
579 MIN2(ctx
->Const
.MaxTextureCoordUnits
,
580 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].MaxTextureImageUnits
);
582 ctx
->Const
.MaxUniformBufferBindings
= num_stages
* BRW_MAX_UBO
;
583 ctx
->Const
.MaxCombinedUniformBlocks
= num_stages
* BRW_MAX_UBO
;
584 ctx
->Const
.MaxCombinedAtomicBuffers
= num_stages
* BRW_MAX_ABO
;
585 ctx
->Const
.MaxCombinedShaderStorageBlocks
= num_stages
* BRW_MAX_SSBO
;
586 ctx
->Const
.MaxShaderStorageBufferBindings
= num_stages
* BRW_MAX_SSBO
;
587 ctx
->Const
.MaxCombinedTextureImageUnits
= num_stages
* max_samplers
;
588 ctx
->Const
.MaxCombinedImageUniforms
= num_stages
* BRW_MAX_IMAGES
;
591 /* Hardware only supports a limited number of transform feedback buffers.
592 * So we need to override the Mesa default (which is based only on software
595 ctx
->Const
.MaxTransformFeedbackBuffers
= BRW_MAX_SOL_BUFFERS
;
597 /* On Gen6, in the worst case, we use up one binding table entry per
598 * transform feedback component (see comments above the definition of
599 * BRW_MAX_SOL_BINDINGS, in brw_context.h), so we need to advertise a value
600 * for MAX_TRANSFORM_FEEDBACK_INTERLEAVED_COMPONENTS equal to
601 * BRW_MAX_SOL_BINDINGS.
603 * In "separate components" mode, we need to divide this value by
604 * BRW_MAX_SOL_BUFFERS, so that the total number of binding table entries
605 * used up by all buffers will not exceed BRW_MAX_SOL_BINDINGS.
607 ctx
->Const
.MaxTransformFeedbackInterleavedComponents
= BRW_MAX_SOL_BINDINGS
;
608 ctx
->Const
.MaxTransformFeedbackSeparateComponents
=
609 BRW_MAX_SOL_BINDINGS
/ BRW_MAX_SOL_BUFFERS
;
611 ctx
->Const
.AlwaysUseGetTransformFeedbackVertexCount
=
612 !can_do_mi_math_and_lrr(brw
->screen
);
615 const int *msaa_modes
= intel_supported_msaa_modes(brw
->screen
);
616 const int clamp_max_samples
=
617 driQueryOptioni(&brw
->optionCache
, "clamp_max_samples");
619 if (clamp_max_samples
< 0) {
620 max_samples
= msaa_modes
[0];
622 /* Select the largest supported MSAA mode that does not exceed
626 for (int i
= 0; msaa_modes
[i
] != 0; ++i
) {
627 if (msaa_modes
[i
] <= clamp_max_samples
) {
628 max_samples
= msaa_modes
[i
];
634 ctx
->Const
.MaxSamples
= max_samples
;
635 ctx
->Const
.MaxColorTextureSamples
= max_samples
;
636 ctx
->Const
.MaxDepthTextureSamples
= max_samples
;
637 ctx
->Const
.MaxIntegerSamples
= max_samples
;
638 ctx
->Const
.MaxImageSamples
= 0;
640 /* gen6_set_sample_maps() sets SampleMap{2,4,8}x variables which are used
641 * to map indices of rectangular grid to sample numbers within a pixel.
642 * These variables are used by GL_EXT_framebuffer_multisample_blit_scaled
643 * extension implementation. For more details see the comment above
644 * gen6_set_sample_maps() definition.
646 gen6_set_sample_maps(ctx
);
648 ctx
->Const
.MinLineWidth
= 1.0;
649 ctx
->Const
.MinLineWidthAA
= 1.0;
651 ctx
->Const
.MaxLineWidth
= 7.375;
652 ctx
->Const
.MaxLineWidthAA
= 7.375;
653 ctx
->Const
.LineWidthGranularity
= 0.125;
655 ctx
->Const
.MaxLineWidth
= 7.0;
656 ctx
->Const
.MaxLineWidthAA
= 7.0;
657 ctx
->Const
.LineWidthGranularity
= 0.5;
660 /* For non-antialiased lines, we have to round the line width to the
661 * nearest whole number. Make sure that we don't advertise a line
662 * width that, when rounded, will be beyond the actual hardware
665 assert(roundf(ctx
->Const
.MaxLineWidth
) <= ctx
->Const
.MaxLineWidth
);
667 ctx
->Const
.MinPointSize
= 1.0;
668 ctx
->Const
.MinPointSizeAA
= 1.0;
669 ctx
->Const
.MaxPointSize
= 255.0;
670 ctx
->Const
.MaxPointSizeAA
= 255.0;
671 ctx
->Const
.PointSizeGranularity
= 1.0;
673 if (brw
->gen
>= 5 || brw
->is_g4x
)
674 ctx
->Const
.MaxClipPlanes
= 8;
676 ctx
->Const
.GLSLTessLevelsAsInputs
= true;
677 ctx
->Const
.LowerTCSPatchVerticesIn
= brw
->gen
>= 8;
678 ctx
->Const
.LowerTESPatchVerticesIn
= true;
679 ctx
->Const
.PrimitiveRestartForPatches
= true;
681 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].MaxNativeInstructions
= 16 * 1024;
682 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].MaxAluInstructions
= 0;
683 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].MaxTexInstructions
= 0;
684 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].MaxTexIndirections
= 0;
685 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].MaxNativeAluInstructions
= 0;
686 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].MaxNativeTexInstructions
= 0;
687 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].MaxNativeTexIndirections
= 0;
688 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].MaxNativeAttribs
= 16;
689 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].MaxNativeTemps
= 256;
690 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].MaxNativeAddressRegs
= 1;
691 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].MaxNativeParameters
= 1024;
692 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].MaxEnvParams
=
693 MIN2(ctx
->Const
.Program
[MESA_SHADER_VERTEX
].MaxNativeParameters
,
694 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].MaxEnvParams
);
696 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].MaxNativeInstructions
= 1024;
697 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].MaxNativeAluInstructions
= 1024;
698 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].MaxNativeTexInstructions
= 1024;
699 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].MaxNativeTexIndirections
= 1024;
700 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].MaxNativeAttribs
= 12;
701 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].MaxNativeTemps
= 256;
702 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].MaxNativeAddressRegs
= 0;
703 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].MaxNativeParameters
= 1024;
704 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].MaxEnvParams
=
705 MIN2(ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].MaxNativeParameters
,
706 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].MaxEnvParams
);
708 /* Fragment shaders use real, 32-bit twos-complement integers for all
711 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].LowInt
.RangeMin
= 31;
712 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].LowInt
.RangeMax
= 30;
713 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].LowInt
.Precision
= 0;
714 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].HighInt
= ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].LowInt
;
715 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].MediumInt
= ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].LowInt
;
717 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].LowInt
.RangeMin
= 31;
718 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].LowInt
.RangeMax
= 30;
719 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].LowInt
.Precision
= 0;
720 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].HighInt
= ctx
->Const
.Program
[MESA_SHADER_VERTEX
].LowInt
;
721 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].MediumInt
= ctx
->Const
.Program
[MESA_SHADER_VERTEX
].LowInt
;
723 /* Gen6 converts quads to polygon in beginning of 3D pipeline,
724 * but we're not sure how it's actually done for vertex order,
725 * that affect provoking vertex decision. Always use last vertex
726 * convention for quad primitive which works as expected for now.
729 ctx
->Const
.QuadsFollowProvokingVertexConvention
= false;
731 ctx
->Const
.NativeIntegers
= true;
732 ctx
->Const
.VertexID_is_zero_based
= true;
734 /* Regarding the CMP instruction, the Ivybridge PRM says:
736 * "For each enabled channel 0b or 1b is assigned to the appropriate flag
737 * bit and 0/all zeros or all ones (e.g, byte 0xFF, word 0xFFFF, DWord
738 * 0xFFFFFFFF) is assigned to dst."
740 * but PRMs for earlier generations say
742 * "In dword format, one GRF may store up to 8 results. When the register
743 * is used later as a vector of Booleans, as only LSB at each channel
744 * contains meaning [sic] data, software should make sure all higher bits
745 * are masked out (e.g. by 'and-ing' an [sic] 0x01 constant)."
747 * We select the representation of a true boolean uniform to be ~0, and fix
748 * the results of Gen <= 5 CMP instruction's with -(result & 1).
750 ctx
->Const
.UniformBooleanTrue
= ~0;
752 /* From the gen4 PRM, volume 4 page 127:
754 * "For SURFTYPE_BUFFER non-rendertarget surfaces, this field specifies
755 * the base address of the first element of the surface, computed in
756 * software by adding the surface base address to the byte offset of
757 * the element in the buffer."
759 * However, unaligned accesses are slower, so enforce buffer alignment.
761 ctx
->Const
.UniformBufferOffsetAlignment
= 16;
763 /* ShaderStorageBufferOffsetAlignment should be a cacheline (64 bytes) so
764 * that we can safely have the CPU and GPU writing the same SSBO on
765 * non-cachecoherent systems (our Atom CPUs). With UBOs, the GPU never
766 * writes, so there's no problem. For an SSBO, the GPU and the CPU can
767 * be updating disjoint regions of the buffer simultaneously and that will
768 * break if the regions overlap the same cacheline.
770 ctx
->Const
.ShaderStorageBufferOffsetAlignment
= 64;
771 ctx
->Const
.TextureBufferOffsetAlignment
= 16;
772 ctx
->Const
.MaxTextureBufferSize
= 128 * 1024 * 1024;
775 ctx
->Const
.MaxVarying
= 32;
776 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].MaxOutputComponents
= 128;
777 ctx
->Const
.Program
[MESA_SHADER_GEOMETRY
].MaxInputComponents
= 64;
778 ctx
->Const
.Program
[MESA_SHADER_GEOMETRY
].MaxOutputComponents
= 128;
779 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].MaxInputComponents
= 128;
780 ctx
->Const
.Program
[MESA_SHADER_TESS_CTRL
].MaxInputComponents
= 128;
781 ctx
->Const
.Program
[MESA_SHADER_TESS_CTRL
].MaxOutputComponents
= 128;
782 ctx
->Const
.Program
[MESA_SHADER_TESS_EVAL
].MaxInputComponents
= 128;
783 ctx
->Const
.Program
[MESA_SHADER_TESS_EVAL
].MaxOutputComponents
= 128;
786 /* We want the GLSL compiler to emit code that uses condition codes */
787 for (int i
= 0; i
< MESA_SHADER_STAGES
; i
++) {
788 ctx
->Const
.ShaderCompilerOptions
[i
] =
789 brw
->screen
->compiler
->glsl_compiler_options
[i
];
793 ctx
->Const
.MaxViewportWidth
= 32768;
794 ctx
->Const
.MaxViewportHeight
= 32768;
797 /* ARB_viewport_array, OES_viewport_array */
799 ctx
->Const
.MaxViewports
= GEN6_NUM_VIEWPORTS
;
800 ctx
->Const
.ViewportSubpixelBits
= 0;
802 /* Cast to float before negating because MaxViewportWidth is unsigned.
804 ctx
->Const
.ViewportBounds
.Min
= -(float)ctx
->Const
.MaxViewportWidth
;
805 ctx
->Const
.ViewportBounds
.Max
= ctx
->Const
.MaxViewportWidth
;
808 /* ARB_gpu_shader5 */
810 ctx
->Const
.MaxVertexStreams
= MIN2(4, MAX_VERTEX_STREAMS
);
812 /* ARB_framebuffer_no_attachments */
813 ctx
->Const
.MaxFramebufferWidth
= 16384;
814 ctx
->Const
.MaxFramebufferHeight
= 16384;
815 ctx
->Const
.MaxFramebufferLayers
= ctx
->Const
.MaxArrayTextureLayers
;
816 ctx
->Const
.MaxFramebufferSamples
= max_samples
;
818 /* OES_primitive_bounding_box */
819 ctx
->Const
.NoPrimitiveBoundingBoxOutput
= true;
823 brw_initialize_cs_context_constants(struct brw_context
*brw
)
825 struct gl_context
*ctx
= &brw
->ctx
;
826 const struct intel_screen
*screen
= brw
->screen
;
827 struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
829 /* FINISHME: Do this for all platforms that the kernel supports */
830 if (brw
->is_cherryview
&&
831 screen
->subslice_total
> 0 && screen
->eu_total
> 0) {
832 /* Logical CS threads = EUs per subslice * 7 threads per EU */
833 uint32_t max_cs_threads
= screen
->eu_total
/ screen
->subslice_total
* 7;
835 /* Fuse configurations may give more threads than expected, never less. */
836 if (max_cs_threads
> devinfo
->max_cs_threads
)
837 devinfo
->max_cs_threads
= max_cs_threads
;
840 /* Maximum number of scalar compute shader invocations that can be run in
841 * parallel in the same subslice assuming SIMD32 dispatch.
843 * We don't advertise more than 64 threads, because we are limited to 64 by
844 * our usage of thread_width_max in the gpgpu walker command. This only
845 * currently impacts Haswell, which otherwise might be able to advertise 70
846 * threads. With SIMD32 and 64 threads, Haswell still provides twice the
847 * required the number of invocation needed for ARB_compute_shader.
849 const unsigned max_threads
= MIN2(64, devinfo
->max_cs_threads
);
850 const uint32_t max_invocations
= 32 * max_threads
;
851 ctx
->Const
.MaxComputeWorkGroupSize
[0] = max_invocations
;
852 ctx
->Const
.MaxComputeWorkGroupSize
[1] = max_invocations
;
853 ctx
->Const
.MaxComputeWorkGroupSize
[2] = max_invocations
;
854 ctx
->Const
.MaxComputeWorkGroupInvocations
= max_invocations
;
855 ctx
->Const
.MaxComputeSharedMemorySize
= 64 * 1024;
859 * Process driconf (drirc) options, setting appropriate context flags.
861 * intelInitExtensions still pokes at optionCache directly, in order to
862 * avoid advertising various extensions. No flags are set, so it makes
863 * sense to continue doing that there.
866 brw_process_driconf_options(struct brw_context
*brw
)
868 struct gl_context
*ctx
= &brw
->ctx
;
870 driOptionCache
*options
= &brw
->optionCache
;
871 driParseConfigFiles(options
, &brw
->screen
->optionCache
,
872 brw
->driContext
->driScreenPriv
->myNum
, "i965");
874 int bo_reuse_mode
= driQueryOptioni(options
, "bo_reuse");
875 switch (bo_reuse_mode
) {
876 case DRI_CONF_BO_REUSE_DISABLED
:
878 case DRI_CONF_BO_REUSE_ALL
:
879 intel_bufmgr_gem_enable_reuse(brw
->bufmgr
);
883 if (!driQueryOptionb(options
, "hiz")) {
884 brw
->has_hiz
= false;
885 /* On gen6, you can only do separate stencil with HIZ. */
887 brw
->has_separate_stencil
= false;
890 if (driQueryOptionb(options
, "always_flush_batch")) {
891 fprintf(stderr
, "flushing batchbuffer before/after each draw call\n");
892 brw
->always_flush_batch
= true;
895 if (driQueryOptionb(options
, "always_flush_cache")) {
896 fprintf(stderr
, "flushing GPU caches before/after each draw call\n");
897 brw
->always_flush_cache
= true;
900 if (driQueryOptionb(options
, "disable_throttling")) {
901 fprintf(stderr
, "disabling flush throttling\n");
902 brw
->disable_throttling
= true;
905 brw
->precompile
= driQueryOptionb(&brw
->optionCache
, "shader_precompile");
907 if (driQueryOptionb(&brw
->optionCache
, "precise_trig"))
908 brw
->screen
->compiler
->precise_trig
= true;
910 ctx
->Const
.ForceGLSLExtensionsWarn
=
911 driQueryOptionb(options
, "force_glsl_extensions_warn");
913 ctx
->Const
.ForceGLSLVersion
=
914 driQueryOptioni(options
, "force_glsl_version");
916 ctx
->Const
.DisableGLSLLineContinuations
=
917 driQueryOptionb(options
, "disable_glsl_line_continuations");
919 ctx
->Const
.AllowGLSLExtensionDirectiveMidShader
=
920 driQueryOptionb(options
, "allow_glsl_extension_directive_midshader");
922 ctx
->Const
.AllowHigherCompatVersion
=
923 driQueryOptionb(options
, "allow_higher_compat_version");
925 ctx
->Const
.GLSLZeroInit
= driQueryOptionb(options
, "glsl_zero_init");
927 brw
->dual_color_blend_by_location
=
928 driQueryOptionb(options
, "dual_color_blend_by_location");
932 brwCreateContext(gl_api api
,
933 const struct gl_config
*mesaVis
,
934 __DRIcontext
*driContextPriv
,
935 unsigned major_version
,
936 unsigned minor_version
,
939 unsigned *dri_ctx_error
,
940 void *sharedContextPrivate
)
942 struct gl_context
*shareCtx
= (struct gl_context
*) sharedContextPrivate
;
943 struct intel_screen
*screen
= driContextPriv
->driScreenPriv
->driverPrivate
;
944 const struct gen_device_info
*devinfo
= &screen
->devinfo
;
945 struct dd_function_table functions
;
947 /* Only allow the __DRI_CTX_FLAG_ROBUST_BUFFER_ACCESS flag if the kernel
948 * provides us with context reset notifications.
950 uint32_t allowed_flags
= __DRI_CTX_FLAG_DEBUG
951 | __DRI_CTX_FLAG_FORWARD_COMPATIBLE
;
953 if (screen
->has_context_reset_notification
)
954 allowed_flags
|= __DRI_CTX_FLAG_ROBUST_BUFFER_ACCESS
;
956 if (flags
& ~allowed_flags
) {
957 *dri_ctx_error
= __DRI_CTX_ERROR_UNKNOWN_FLAG
;
961 struct brw_context
*brw
= rzalloc(NULL
, struct brw_context
);
963 fprintf(stderr
, "%s: failed to alloc context\n", __func__
);
964 *dri_ctx_error
= __DRI_CTX_ERROR_NO_MEMORY
;
968 driContextPriv
->driverPrivate
= brw
;
969 brw
->driContext
= driContextPriv
;
970 brw
->screen
= screen
;
971 brw
->bufmgr
= screen
->bufmgr
;
973 brw
->gen
= devinfo
->gen
;
974 brw
->gt
= devinfo
->gt
;
975 brw
->is_g4x
= devinfo
->is_g4x
;
976 brw
->is_baytrail
= devinfo
->is_baytrail
;
977 brw
->is_haswell
= devinfo
->is_haswell
;
978 brw
->is_cherryview
= devinfo
->is_cherryview
;
979 brw
->is_broxton
= devinfo
->is_broxton
;
980 brw
->has_llc
= devinfo
->has_llc
;
981 brw
->has_hiz
= devinfo
->has_hiz_and_separate_stencil
;
982 brw
->has_separate_stencil
= devinfo
->has_hiz_and_separate_stencil
;
983 brw
->has_pln
= devinfo
->has_pln
;
984 brw
->has_compr4
= devinfo
->has_compr4
;
985 brw
->has_surface_tile_offset
= devinfo
->has_surface_tile_offset
;
986 brw
->has_negative_rhw_bug
= devinfo
->has_negative_rhw_bug
;
987 brw
->needs_unlit_centroid_workaround
=
988 devinfo
->needs_unlit_centroid_workaround
;
990 brw
->must_use_separate_stencil
= devinfo
->must_use_separate_stencil
;
991 brw
->has_swizzling
= screen
->hw_has_swizzling
;
993 isl_device_init(&brw
->isl_dev
, devinfo
, screen
->hw_has_swizzling
);
995 brw
->vs
.base
.stage
= MESA_SHADER_VERTEX
;
996 brw
->tcs
.base
.stage
= MESA_SHADER_TESS_CTRL
;
997 brw
->tes
.base
.stage
= MESA_SHADER_TESS_EVAL
;
998 brw
->gs
.base
.stage
= MESA_SHADER_GEOMETRY
;
999 brw
->wm
.base
.stage
= MESA_SHADER_FRAGMENT
;
1000 if (brw
->gen
>= 8) {
1001 gen8_init_vtable_surface_functions(brw
);
1002 brw
->vtbl
.emit_depth_stencil_hiz
= gen8_emit_depth_stencil_hiz
;
1003 } else if (brw
->gen
>= 7) {
1004 gen7_init_vtable_surface_functions(brw
);
1005 brw
->vtbl
.emit_depth_stencil_hiz
= gen7_emit_depth_stencil_hiz
;
1006 } else if (brw
->gen
>= 6) {
1007 gen6_init_vtable_surface_functions(brw
);
1008 brw
->vtbl
.emit_depth_stencil_hiz
= gen6_emit_depth_stencil_hiz
;
1010 gen4_init_vtable_surface_functions(brw
);
1011 brw
->vtbl
.emit_depth_stencil_hiz
= brw_emit_depth_stencil_hiz
;
1014 brw_init_driver_functions(brw
, &functions
);
1017 functions
.GetGraphicsResetStatus
= brw_get_graphics_reset_status
;
1019 struct gl_context
*ctx
= &brw
->ctx
;
1021 if (!_mesa_initialize_context(ctx
, api
, mesaVis
, shareCtx
, &functions
)) {
1022 *dri_ctx_error
= __DRI_CTX_ERROR_NO_MEMORY
;
1023 fprintf(stderr
, "%s: failed to init mesa context\n", __func__
);
1024 intelDestroyContext(driContextPriv
);
1028 driContextSetFlags(ctx
, flags
);
1030 /* Initialize the software rasterizer and helper modules.
1032 * As of GL 3.1 core, the gen4+ driver doesn't need the swrast context for
1033 * software fallbacks (which we have to support on legacy GL to do weird
1034 * glDrawPixels(), glBitmap(), and other functions).
1036 if (api
!= API_OPENGL_CORE
&& api
!= API_OPENGLES2
) {
1037 _swrast_CreateContext(ctx
);
1040 _vbo_CreateContext(ctx
);
1041 if (ctx
->swrast_context
) {
1042 _tnl_CreateContext(ctx
);
1043 TNL_CONTEXT(ctx
)->Driver
.RunPipeline
= _tnl_run_pipeline
;
1044 _swsetup_CreateContext(ctx
);
1046 /* Configure swrast to match hardware characteristics: */
1047 _swrast_allow_pixel_fog(ctx
, false);
1048 _swrast_allow_vertex_fog(ctx
, true);
1051 _mesa_meta_init(ctx
);
1053 brw_process_driconf_options(brw
);
1055 if (INTEL_DEBUG
& DEBUG_PERF
)
1056 brw
->perf_debug
= true;
1058 brw_initialize_cs_context_constants(brw
);
1059 brw_initialize_context_constants(brw
);
1061 ctx
->Const
.ResetStrategy
= notify_reset
1062 ? GL_LOSE_CONTEXT_ON_RESET_ARB
: GL_NO_RESET_NOTIFICATION_ARB
;
1064 /* Reinitialize the context point state. It depends on ctx->Const values. */
1065 _mesa_init_point(ctx
);
1067 intel_fbo_init(brw
);
1069 intel_batchbuffer_init(&brw
->batch
, brw
->bufmgr
, brw
->has_llc
);
1071 if (brw
->gen
>= 6) {
1072 /* Create a new hardware context. Using a hardware context means that
1073 * our GPU state will be saved/restored on context switch, allowing us
1074 * to assume that the GPU is in the same state we left it in.
1076 * This is required for transform feedback buffer offsets, query objects,
1077 * and also allows us to reduce how much state we have to emit.
1079 brw
->hw_ctx
= drm_intel_gem_context_create(brw
->bufmgr
);
1082 fprintf(stderr
, "Gen6+ requires Kernel 3.6 or later.\n");
1083 intelDestroyContext(driContextPriv
);
1088 if (brw_init_pipe_control(brw
, devinfo
)) {
1089 *dri_ctx_error
= __DRI_CTX_ERROR_NO_MEMORY
;
1090 intelDestroyContext(driContextPriv
);
1094 brw_init_state(brw
);
1096 intelInitExtensions(ctx
);
1098 brw_init_surface_formats(brw
);
1101 brw_blorp_init(brw
);
1103 brw
->urb
.size
= devinfo
->urb
.size
;
1106 brw
->urb
.gs_present
= false;
1108 brw
->prim_restart
.in_progress
= false;
1109 brw
->prim_restart
.enable_cut_index
= false;
1110 brw
->gs
.enabled
= false;
1111 brw
->sf
.viewport_transform_enable
= true;
1112 brw
->clip
.viewport_count
= 1;
1114 brw
->predicate
.state
= BRW_PREDICATE_STATE_RENDER
;
1116 brw
->max_gtt_map_object_size
= screen
->max_gtt_map_object_size
;
1118 ctx
->VertexProgram
._MaintainTnlProgram
= true;
1119 ctx
->FragmentProgram
._MaintainTexEnvProgram
= true;
1121 brw_draw_init( brw
);
1123 if ((flags
& __DRI_CTX_FLAG_DEBUG
) != 0) {
1124 /* Turn on some extra GL_ARB_debug_output generation. */
1125 brw
->perf_debug
= true;
1128 if ((flags
& __DRI_CTX_FLAG_ROBUST_BUFFER_ACCESS
) != 0) {
1129 ctx
->Const
.ContextFlags
|= GL_CONTEXT_FLAG_ROBUST_ACCESS_BIT_ARB
;
1130 ctx
->Const
.RobustAccess
= GL_TRUE
;
1133 if (INTEL_DEBUG
& DEBUG_SHADER_TIME
)
1134 brw_init_shader_time(brw
);
1136 _mesa_compute_version(ctx
);
1138 _mesa_initialize_dispatch_tables(ctx
);
1139 _mesa_initialize_vbo_vtxfmt(ctx
);
1141 if (ctx
->Extensions
.INTEL_performance_query
)
1142 brw_init_performance_queries(brw
);
1144 vbo_use_buffer_objects(ctx
);
1145 vbo_always_unmap_buffers(ctx
);
1151 intelDestroyContext(__DRIcontext
* driContextPriv
)
1153 struct brw_context
*brw
=
1154 (struct brw_context
*) driContextPriv
->driverPrivate
;
1155 struct gl_context
*ctx
= &brw
->ctx
;
1157 /* Dump a final BMP in case the application doesn't call SwapBuffers */
1158 if (INTEL_DEBUG
& DEBUG_AUB
) {
1159 intel_batchbuffer_flush(brw
);
1160 aub_dump_bmp(&brw
->ctx
);
1163 _mesa_meta_free(&brw
->ctx
);
1165 if (INTEL_DEBUG
& DEBUG_SHADER_TIME
) {
1166 /* Force a report. */
1167 brw
->shader_time
.report_time
= 0;
1169 brw_collect_and_report_shader_time(brw
);
1170 brw_destroy_shader_time(brw
);
1174 blorp_finish(&brw
->blorp
);
1176 brw_destroy_state(brw
);
1177 brw_draw_destroy(brw
);
1179 drm_intel_bo_unreference(brw
->curbe
.curbe_bo
);
1180 if (brw
->vs
.base
.scratch_bo
)
1181 drm_intel_bo_unreference(brw
->vs
.base
.scratch_bo
);
1182 if (brw
->tcs
.base
.scratch_bo
)
1183 drm_intel_bo_unreference(brw
->tcs
.base
.scratch_bo
);
1184 if (brw
->tes
.base
.scratch_bo
)
1185 drm_intel_bo_unreference(brw
->tes
.base
.scratch_bo
);
1186 if (brw
->gs
.base
.scratch_bo
)
1187 drm_intel_bo_unreference(brw
->gs
.base
.scratch_bo
);
1188 if (brw
->wm
.base
.scratch_bo
)
1189 drm_intel_bo_unreference(brw
->wm
.base
.scratch_bo
);
1191 drm_intel_gem_context_destroy(brw
->hw_ctx
);
1193 if (ctx
->swrast_context
) {
1194 _swsetup_DestroyContext(&brw
->ctx
);
1195 _tnl_DestroyContext(&brw
->ctx
);
1197 _vbo_DestroyContext(&brw
->ctx
);
1199 if (ctx
->swrast_context
)
1200 _swrast_DestroyContext(&brw
->ctx
);
1202 brw_fini_pipe_control(brw
);
1203 intel_batchbuffer_free(&brw
->batch
);
1205 drm_intel_bo_unreference(brw
->throttle_batch
[1]);
1206 drm_intel_bo_unreference(brw
->throttle_batch
[0]);
1207 brw
->throttle_batch
[1] = NULL
;
1208 brw
->throttle_batch
[0] = NULL
;
1210 driDestroyOptionCache(&brw
->optionCache
);
1212 /* free the Mesa context */
1213 _mesa_free_context_data(&brw
->ctx
);
1216 driContextPriv
->driverPrivate
= NULL
;
1220 intelUnbindContext(__DRIcontext
* driContextPriv
)
1222 /* Unset current context and dispath table */
1223 _mesa_make_current(NULL
, NULL
, NULL
);
1229 * Fixes up the context for GLES23 with our default-to-sRGB-capable behavior
1230 * on window system framebuffers.
1232 * Desktop GL is fairly reasonable in its handling of sRGB: You can ask if
1233 * your renderbuffer can do sRGB encode, and you can flip a switch that does
1234 * sRGB encode if the renderbuffer can handle it. You can ask specifically
1235 * for a visual where you're guaranteed to be capable, but it turns out that
1236 * everyone just makes all their ARGB8888 visuals capable and doesn't offer
1237 * incapable ones, because there's no difference between the two in resources
1238 * used. Applications thus get built that accidentally rely on the default
1239 * visual choice being sRGB, so we make ours sRGB capable. Everything sounds
1242 * But for GLES2/3, they decided that it was silly to not turn on sRGB encode
1243 * for sRGB renderbuffers you made with the GL_EXT_texture_sRGB equivalent.
1244 * So they removed the enable knob and made it "if the renderbuffer is sRGB
1245 * capable, do sRGB encode". Then, for your window system renderbuffers, you
1246 * can ask for sRGB visuals and get sRGB encode, or not ask for sRGB visuals
1247 * and get no sRGB encode (assuming that both kinds of visual are available).
1248 * Thus our choice to support sRGB by default on our visuals for desktop would
1249 * result in broken rendering of GLES apps that aren't expecting sRGB encode.
1251 * Unfortunately, renderbuffer setup happens before a context is created. So
1252 * in intel_screen.c we always set up sRGB, and here, if you're a GLES2/3
1253 * context (without an sRGB visual, though we don't have sRGB visuals exposed
1254 * yet), we go turn that back off before anyone finds out.
1257 intel_gles3_srgb_workaround(struct brw_context
*brw
,
1258 struct gl_framebuffer
*fb
)
1260 struct gl_context
*ctx
= &brw
->ctx
;
1262 if (_mesa_is_desktop_gl(ctx
) || !fb
->Visual
.sRGBCapable
)
1265 /* Some day when we support the sRGB capable bit on visuals available for
1266 * GLES, we'll need to respect that and not disable things here.
1268 fb
->Visual
.sRGBCapable
= false;
1269 for (int i
= 0; i
< BUFFER_COUNT
; i
++) {
1270 struct gl_renderbuffer
*rb
= fb
->Attachment
[i
].Renderbuffer
;
1272 rb
->Format
= _mesa_get_srgb_format_linear(rb
->Format
);
1277 intelMakeCurrent(__DRIcontext
* driContextPriv
,
1278 __DRIdrawable
* driDrawPriv
,
1279 __DRIdrawable
* driReadPriv
)
1281 struct brw_context
*brw
;
1282 GET_CURRENT_CONTEXT(curCtx
);
1285 brw
= (struct brw_context
*) driContextPriv
->driverPrivate
;
1289 /* According to the glXMakeCurrent() man page: "Pending commands to
1290 * the previous context, if any, are flushed before it is released."
1291 * But only flush if we're actually changing contexts.
1293 if (brw_context(curCtx
) && brw_context(curCtx
) != brw
) {
1294 _mesa_flush(curCtx
);
1297 if (driContextPriv
) {
1298 struct gl_context
*ctx
= &brw
->ctx
;
1299 struct gl_framebuffer
*fb
, *readFb
;
1301 if (driDrawPriv
== NULL
) {
1302 fb
= _mesa_get_incomplete_framebuffer();
1304 fb
= driDrawPriv
->driverPrivate
;
1305 driContextPriv
->dri2
.draw_stamp
= driDrawPriv
->dri2
.stamp
- 1;
1308 if (driReadPriv
== NULL
) {
1309 readFb
= _mesa_get_incomplete_framebuffer();
1311 readFb
= driReadPriv
->driverPrivate
;
1312 driContextPriv
->dri2
.read_stamp
= driReadPriv
->dri2
.stamp
- 1;
1315 /* The sRGB workaround changes the renderbuffer's format. We must change
1316 * the format before the renderbuffer's miptree get's allocated, otherwise
1317 * the formats of the renderbuffer and its miptree will differ.
1319 intel_gles3_srgb_workaround(brw
, fb
);
1320 intel_gles3_srgb_workaround(brw
, readFb
);
1322 /* If the context viewport hasn't been initialized, force a call out to
1323 * the loader to get buffers so we have a drawable size for the initial
1325 if (!brw
->ctx
.ViewportInitialized
)
1326 intel_prepare_render(brw
);
1328 _mesa_make_current(ctx
, fb
, readFb
);
1330 _mesa_make_current(NULL
, NULL
, NULL
);
1337 intel_resolve_for_dri2_flush(struct brw_context
*brw
,
1338 __DRIdrawable
*drawable
)
1341 /* MSAA and fast color clear are not supported, so don't waste time
1342 * checking whether a resolve is needed.
1347 struct gl_framebuffer
*fb
= drawable
->driverPrivate
;
1348 struct intel_renderbuffer
*rb
;
1350 /* Usually, only the back buffer will need to be downsampled. However,
1351 * the front buffer will also need it if the user has rendered into it.
1353 static const gl_buffer_index buffers
[2] = {
1358 for (int i
= 0; i
< 2; ++i
) {
1359 rb
= intel_get_renderbuffer(fb
, buffers
[i
]);
1360 if (rb
== NULL
|| rb
->mt
== NULL
)
1362 if (rb
->mt
->num_samples
<= 1) {
1363 assert(rb
->mt_layer
== 0 && rb
->mt_level
== 0 &&
1364 rb
->layer_count
== 1);
1365 intel_miptree_resolve_color(brw
, rb
->mt
, 0, 0, 1, 0);
1367 intel_renderbuffer_downsample(brw
, rb
);
1373 intel_bits_per_pixel(const struct intel_renderbuffer
*rb
)
1375 return _mesa_get_format_bytes(intel_rb_format(rb
)) * 8;
1379 intel_query_dri2_buffers(struct brw_context
*brw
,
1380 __DRIdrawable
*drawable
,
1381 __DRIbuffer
**buffers
,
1385 intel_process_dri2_buffer(struct brw_context
*brw
,
1386 __DRIdrawable
*drawable
,
1387 __DRIbuffer
*buffer
,
1388 struct intel_renderbuffer
*rb
,
1389 const char *buffer_name
);
1392 intel_update_image_buffers(struct brw_context
*brw
, __DRIdrawable
*drawable
);
1395 intel_update_dri2_buffers(struct brw_context
*brw
, __DRIdrawable
*drawable
)
1397 struct gl_framebuffer
*fb
= drawable
->driverPrivate
;
1398 struct intel_renderbuffer
*rb
;
1399 __DRIbuffer
*buffers
= NULL
;
1401 const char *region_name
;
1403 /* Set this up front, so that in case our buffers get invalidated
1404 * while we're getting new buffers, we don't clobber the stamp and
1405 * thus ignore the invalidate. */
1406 drawable
->lastStamp
= drawable
->dri2
.stamp
;
1408 if (unlikely(INTEL_DEBUG
& DEBUG_DRI
))
1409 fprintf(stderr
, "enter %s, drawable %p\n", __func__
, drawable
);
1411 intel_query_dri2_buffers(brw
, drawable
, &buffers
, &count
);
1413 if (buffers
== NULL
)
1416 for (i
= 0; i
< count
; i
++) {
1417 switch (buffers
[i
].attachment
) {
1418 case __DRI_BUFFER_FRONT_LEFT
:
1419 rb
= intel_get_renderbuffer(fb
, BUFFER_FRONT_LEFT
);
1420 region_name
= "dri2 front buffer";
1423 case __DRI_BUFFER_FAKE_FRONT_LEFT
:
1424 rb
= intel_get_renderbuffer(fb
, BUFFER_FRONT_LEFT
);
1425 region_name
= "dri2 fake front buffer";
1428 case __DRI_BUFFER_BACK_LEFT
:
1429 rb
= intel_get_renderbuffer(fb
, BUFFER_BACK_LEFT
);
1430 region_name
= "dri2 back buffer";
1433 case __DRI_BUFFER_DEPTH
:
1434 case __DRI_BUFFER_HIZ
:
1435 case __DRI_BUFFER_DEPTH_STENCIL
:
1436 case __DRI_BUFFER_STENCIL
:
1437 case __DRI_BUFFER_ACCUM
:
1440 "unhandled buffer attach event, attachment type %d\n",
1441 buffers
[i
].attachment
);
1445 intel_process_dri2_buffer(brw
, drawable
, &buffers
[i
], rb
, region_name
);
1451 intel_update_renderbuffers(__DRIcontext
*context
, __DRIdrawable
*drawable
)
1453 struct brw_context
*brw
= context
->driverPrivate
;
1454 __DRIscreen
*dri_screen
= brw
->screen
->driScrnPriv
;
1456 /* Set this up front, so that in case our buffers get invalidated
1457 * while we're getting new buffers, we don't clobber the stamp and
1458 * thus ignore the invalidate. */
1459 drawable
->lastStamp
= drawable
->dri2
.stamp
;
1461 if (unlikely(INTEL_DEBUG
& DEBUG_DRI
))
1462 fprintf(stderr
, "enter %s, drawable %p\n", __func__
, drawable
);
1464 if (dri_screen
->image
.loader
)
1465 intel_update_image_buffers(brw
, drawable
);
1467 intel_update_dri2_buffers(brw
, drawable
);
1469 driUpdateFramebufferSize(&brw
->ctx
, drawable
);
1473 * intel_prepare_render should be called anywhere that curent read/drawbuffer
1474 * state is required.
1477 intel_prepare_render(struct brw_context
*brw
)
1479 struct gl_context
*ctx
= &brw
->ctx
;
1480 __DRIcontext
*driContext
= brw
->driContext
;
1481 __DRIdrawable
*drawable
;
1483 drawable
= driContext
->driDrawablePriv
;
1484 if (drawable
&& drawable
->dri2
.stamp
!= driContext
->dri2
.draw_stamp
) {
1485 if (drawable
->lastStamp
!= drawable
->dri2
.stamp
)
1486 intel_update_renderbuffers(driContext
, drawable
);
1487 driContext
->dri2
.draw_stamp
= drawable
->dri2
.stamp
;
1490 drawable
= driContext
->driReadablePriv
;
1491 if (drawable
&& drawable
->dri2
.stamp
!= driContext
->dri2
.read_stamp
) {
1492 if (drawable
->lastStamp
!= drawable
->dri2
.stamp
)
1493 intel_update_renderbuffers(driContext
, drawable
);
1494 driContext
->dri2
.read_stamp
= drawable
->dri2
.stamp
;
1497 /* If we're currently rendering to the front buffer, the rendering
1498 * that will happen next will probably dirty the front buffer. So
1499 * mark it as dirty here.
1501 if (_mesa_is_front_buffer_drawing(ctx
->DrawBuffer
))
1502 brw
->front_buffer_dirty
= true;
1506 * \brief Query DRI2 to obtain a DRIdrawable's buffers.
1508 * To determine which DRI buffers to request, examine the renderbuffers
1509 * attached to the drawable's framebuffer. Then request the buffers with
1510 * DRI2GetBuffers() or DRI2GetBuffersWithFormat().
1512 * This is called from intel_update_renderbuffers().
1514 * \param drawable Drawable whose buffers are queried.
1515 * \param buffers [out] List of buffers returned by DRI2 query.
1516 * \param buffer_count [out] Number of buffers returned.
1518 * \see intel_update_renderbuffers()
1519 * \see DRI2GetBuffers()
1520 * \see DRI2GetBuffersWithFormat()
1523 intel_query_dri2_buffers(struct brw_context
*brw
,
1524 __DRIdrawable
*drawable
,
1525 __DRIbuffer
**buffers
,
1528 __DRIscreen
*dri_screen
= brw
->screen
->driScrnPriv
;
1529 struct gl_framebuffer
*fb
= drawable
->driverPrivate
;
1531 unsigned attachments
[8];
1533 struct intel_renderbuffer
*front_rb
;
1534 struct intel_renderbuffer
*back_rb
;
1536 front_rb
= intel_get_renderbuffer(fb
, BUFFER_FRONT_LEFT
);
1537 back_rb
= intel_get_renderbuffer(fb
, BUFFER_BACK_LEFT
);
1539 memset(attachments
, 0, sizeof(attachments
));
1540 if ((_mesa_is_front_buffer_drawing(fb
) ||
1541 _mesa_is_front_buffer_reading(fb
) ||
1542 !back_rb
) && front_rb
) {
1543 /* If a fake front buffer is in use, then querying for
1544 * __DRI_BUFFER_FRONT_LEFT will cause the server to copy the image from
1545 * the real front buffer to the fake front buffer. So before doing the
1546 * query, we need to make sure all the pending drawing has landed in the
1547 * real front buffer.
1549 intel_batchbuffer_flush(brw
);
1550 intel_flush_front(&brw
->ctx
);
1552 attachments
[i
++] = __DRI_BUFFER_FRONT_LEFT
;
1553 attachments
[i
++] = intel_bits_per_pixel(front_rb
);
1554 } else if (front_rb
&& brw
->front_buffer_dirty
) {
1555 /* We have pending front buffer rendering, but we aren't querying for a
1556 * front buffer. If the front buffer we have is a fake front buffer,
1557 * the X server is going to throw it away when it processes the query.
1558 * So before doing the query, make sure all the pending drawing has
1559 * landed in the real front buffer.
1561 intel_batchbuffer_flush(brw
);
1562 intel_flush_front(&brw
->ctx
);
1566 attachments
[i
++] = __DRI_BUFFER_BACK_LEFT
;
1567 attachments
[i
++] = intel_bits_per_pixel(back_rb
);
1570 assert(i
<= ARRAY_SIZE(attachments
));
1573 dri_screen
->dri2
.loader
->getBuffersWithFormat(drawable
,
1578 drawable
->loaderPrivate
);
1582 * \brief Assign a DRI buffer's DRM region to a renderbuffer.
1584 * This is called from intel_update_renderbuffers().
1587 * DRI buffers whose attachment point is DRI2BufferStencil or
1588 * DRI2BufferDepthStencil are handled as special cases.
1590 * \param buffer_name is a human readable name, such as "dri2 front buffer",
1591 * that is passed to drm_intel_bo_gem_create_from_name().
1593 * \see intel_update_renderbuffers()
1596 intel_process_dri2_buffer(struct brw_context
*brw
,
1597 __DRIdrawable
*drawable
,
1598 __DRIbuffer
*buffer
,
1599 struct intel_renderbuffer
*rb
,
1600 const char *buffer_name
)
1602 struct gl_framebuffer
*fb
= drawable
->driverPrivate
;
1608 unsigned num_samples
= rb
->Base
.Base
.NumSamples
;
1610 /* We try to avoid closing and reopening the same BO name, because the first
1611 * use of a mapping of the buffer involves a bunch of page faulting which is
1612 * moderately expensive.
1614 struct intel_mipmap_tree
*last_mt
;
1615 if (num_samples
== 0)
1618 last_mt
= rb
->singlesample_mt
;
1620 uint32_t old_name
= 0;
1622 /* The bo already has a name because the miptree was created by a
1623 * previous call to intel_process_dri2_buffer(). If a bo already has a
1624 * name, then drm_intel_bo_flink() is a low-cost getter. It does not
1625 * create a new name.
1627 drm_intel_bo_flink(last_mt
->bo
, &old_name
);
1630 if (old_name
== buffer
->name
)
1633 if (unlikely(INTEL_DEBUG
& DEBUG_DRI
)) {
1635 "attaching buffer %d, at %d, cpp %d, pitch %d\n",
1636 buffer
->name
, buffer
->attachment
,
1637 buffer
->cpp
, buffer
->pitch
);
1640 bo
= drm_intel_bo_gem_create_from_name(brw
->bufmgr
, buffer_name
,
1644 "Failed to open BO for returned DRI2 buffer "
1645 "(%dx%d, %s, named %d).\n"
1646 "This is likely a bug in the X Server that will lead to a "
1648 drawable
->w
, drawable
->h
, buffer_name
, buffer
->name
);
1652 intel_update_winsys_renderbuffer_miptree(brw
, rb
, bo
,
1653 drawable
->w
, drawable
->h
,
1656 if (_mesa_is_front_buffer_drawing(fb
) &&
1657 (buffer
->attachment
== __DRI_BUFFER_FRONT_LEFT
||
1658 buffer
->attachment
== __DRI_BUFFER_FAKE_FRONT_LEFT
) &&
1659 rb
->Base
.Base
.NumSamples
> 1) {
1660 intel_renderbuffer_upsample(brw
, rb
);
1665 drm_intel_bo_unreference(bo
);
1669 * \brief Query DRI image loader to obtain a DRIdrawable's buffers.
1671 * To determine which DRI buffers to request, examine the renderbuffers
1672 * attached to the drawable's framebuffer. Then request the buffers from
1675 * This is called from intel_update_renderbuffers().
1677 * \param drawable Drawable whose buffers are queried.
1678 * \param buffers [out] List of buffers returned by DRI2 query.
1679 * \param buffer_count [out] Number of buffers returned.
1681 * \see intel_update_renderbuffers()
1685 intel_update_image_buffer(struct brw_context
*intel
,
1686 __DRIdrawable
*drawable
,
1687 struct intel_renderbuffer
*rb
,
1689 enum __DRIimageBufferMask buffer_type
)
1691 struct gl_framebuffer
*fb
= drawable
->driverPrivate
;
1693 if (!rb
|| !buffer
->bo
)
1696 unsigned num_samples
= rb
->Base
.Base
.NumSamples
;
1698 /* Check and see if we're already bound to the right
1701 struct intel_mipmap_tree
*last_mt
;
1702 if (num_samples
== 0)
1705 last_mt
= rb
->singlesample_mt
;
1707 if (last_mt
&& last_mt
->bo
== buffer
->bo
)
1710 intel_update_winsys_renderbuffer_miptree(intel
, rb
, buffer
->bo
,
1711 buffer
->width
, buffer
->height
,
1714 if (_mesa_is_front_buffer_drawing(fb
) &&
1715 buffer_type
== __DRI_IMAGE_BUFFER_FRONT
&&
1716 rb
->Base
.Base
.NumSamples
> 1) {
1717 intel_renderbuffer_upsample(intel
, rb
);
1722 intel_update_image_buffers(struct brw_context
*brw
, __DRIdrawable
*drawable
)
1724 struct gl_framebuffer
*fb
= drawable
->driverPrivate
;
1725 __DRIscreen
*dri_screen
= brw
->screen
->driScrnPriv
;
1726 struct intel_renderbuffer
*front_rb
;
1727 struct intel_renderbuffer
*back_rb
;
1728 struct __DRIimageList images
;
1729 unsigned int format
;
1730 uint32_t buffer_mask
= 0;
1733 front_rb
= intel_get_renderbuffer(fb
, BUFFER_FRONT_LEFT
);
1734 back_rb
= intel_get_renderbuffer(fb
, BUFFER_BACK_LEFT
);
1737 format
= intel_rb_format(back_rb
);
1739 format
= intel_rb_format(front_rb
);
1743 if (front_rb
&& (_mesa_is_front_buffer_drawing(fb
) ||
1744 _mesa_is_front_buffer_reading(fb
) || !back_rb
)) {
1745 buffer_mask
|= __DRI_IMAGE_BUFFER_FRONT
;
1749 buffer_mask
|= __DRI_IMAGE_BUFFER_BACK
;
1751 ret
= dri_screen
->image
.loader
->getBuffers(drawable
,
1752 driGLFormatToImageFormat(format
),
1753 &drawable
->dri2
.stamp
,
1754 drawable
->loaderPrivate
,
1760 if (images
.image_mask
& __DRI_IMAGE_BUFFER_FRONT
) {
1761 drawable
->w
= images
.front
->width
;
1762 drawable
->h
= images
.front
->height
;
1763 intel_update_image_buffer(brw
,
1767 __DRI_IMAGE_BUFFER_FRONT
);
1769 if (images
.image_mask
& __DRI_IMAGE_BUFFER_BACK
) {
1770 drawable
->w
= images
.back
->width
;
1771 drawable
->h
= images
.back
->height
;
1772 intel_update_image_buffer(brw
,
1776 __DRI_IMAGE_BUFFER_BACK
);