2 Copyright 2003 VMware, Inc.
3 Copyright (C) Intel Corp. 2006. All Rights Reserved.
4 Intel funded Tungsten Graphics to
5 develop this 3D driver.
7 Permission is hereby granted, free of charge, to any person obtaining
8 a copy of this software and associated documentation files (the
9 "Software"), to deal in the Software without restriction, including
10 without limitation the rights to use, copy, modify, merge, publish,
11 distribute, sublicense, and/or sell copies of the Software, and to
12 permit persons to whom the Software is furnished to do so, subject to
13 the following conditions:
15 The above copyright notice and this permission notice (including the
16 next paragraph) shall be included in all copies or substantial
17 portions of the Software.
19 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
20 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
22 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
23 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
24 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
25 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27 **********************************************************************/
30 * Keith Whitwell <keithw@vmware.com>
34 #include "compiler/nir/nir.h"
35 #include "main/api_exec.h"
36 #include "main/context.h"
37 #include "main/fbobject.h"
38 #include "main/extensions.h"
39 #include "main/imports.h"
40 #include "main/macros.h"
41 #include "main/points.h"
42 #include "main/version.h"
43 #include "main/vtxfmt.h"
44 #include "main/texobj.h"
45 #include "main/framebuffer.h"
47 #include "vbo/vbo_context.h"
49 #include "drivers/common/driverfuncs.h"
50 #include "drivers/common/meta.h"
53 #include "brw_context.h"
54 #include "brw_defines.h"
55 #include "brw_blorp.h"
57 #include "brw_state.h"
59 #include "intel_batchbuffer.h"
60 #include "intel_buffer_objects.h"
61 #include "intel_buffers.h"
62 #include "intel_fbo.h"
63 #include "intel_mipmap_tree.h"
64 #include "intel_pixel.h"
65 #include "intel_image.h"
66 #include "intel_tex.h"
67 #include "intel_tex_obj.h"
69 #include "swrast_setup/swrast_setup.h"
71 #include "tnl/t_pipeline.h"
72 #include "util/ralloc.h"
73 #include "util/debug.h"
76 /***************************************
77 * Mesa's Driver Functions
78 ***************************************/
80 const char *const brw_vendor_string
= "Intel Open Source Technology Center";
83 get_bsw_model(const struct intel_screen
*screen
)
85 switch (screen
->eu_total
) {
96 brw_get_renderer_string(const struct intel_screen
*screen
)
99 static char buffer
[128];
102 switch (screen
->deviceID
) {
104 #define CHIPSET(id, symbol, str) case id: chipset = str; break;
105 #include "pci_ids/i965_pci_ids.h"
107 chipset
= "Unknown Intel Chipset";
111 /* Braswell branding is funny, so we have to fix it up here */
112 if (screen
->deviceID
== 0x22B1) {
113 bsw
= strdup(chipset
);
114 char *needle
= strstr(bsw
, "XXX");
116 memcpy(needle
, get_bsw_model(screen
), 3);
121 (void) driGetRendererString(buffer
, chipset
, 0);
126 static const GLubyte
*
127 intel_get_string(struct gl_context
* ctx
, GLenum name
)
129 const struct brw_context
*const brw
= brw_context(ctx
);
133 return (GLubyte
*) brw_vendor_string
;
137 (GLubyte
*) brw_get_renderer_string(brw
->screen
);
145 intel_viewport(struct gl_context
*ctx
)
147 struct brw_context
*brw
= brw_context(ctx
);
148 __DRIcontext
*driContext
= brw
->driContext
;
150 if (_mesa_is_winsys_fbo(ctx
->DrawBuffer
)) {
151 if (driContext
->driDrawablePriv
)
152 dri2InvalidateDrawable(driContext
->driDrawablePriv
);
153 if (driContext
->driReadablePriv
)
154 dri2InvalidateDrawable(driContext
->driReadablePriv
);
159 intel_update_framebuffer(struct gl_context
*ctx
,
160 struct gl_framebuffer
*fb
)
162 struct brw_context
*brw
= brw_context(ctx
);
164 /* Quantize the derived default number of samples
166 fb
->DefaultGeometry
._NumSamples
=
167 intel_quantize_num_samples(brw
->screen
,
168 fb
->DefaultGeometry
.NumSamples
);
172 intel_disable_rb_aux_buffer(struct brw_context
*brw
, const struct brw_bo
*bo
)
174 const struct gl_framebuffer
*fb
= brw
->ctx
.DrawBuffer
;
177 for (unsigned i
= 0; i
< fb
->_NumColorDrawBuffers
; i
++) {
178 const struct intel_renderbuffer
*irb
=
179 intel_renderbuffer(fb
->_ColorDrawBuffers
[i
]);
181 if (irb
&& irb
->mt
->bo
== bo
) {
182 found
= brw
->draw_aux_buffer_disabled
[i
] = true;
189 /* On Gen9 color buffers may be compressed by the hardware (lossless
190 * compression). There are, however, format restrictions and care needs to be
191 * taken that the sampler engine is capable for re-interpreting a buffer with
192 * format different the buffer was originally written with.
194 * For example, SRGB formats are not compressible and the sampler engine isn't
195 * capable of treating RGBA_UNORM as SRGB_ALPHA. In such a case the underlying
196 * color buffer needs to be resolved so that the sampling surface can be
197 * sampled as non-compressed (i.e., without the auxiliary MCS buffer being
201 intel_texture_view_requires_resolve(struct brw_context
*brw
,
202 struct intel_texture_object
*intel_tex
)
205 !intel_miptree_is_lossless_compressed(brw
, intel_tex
->mt
))
208 const enum isl_format isl_format
=
209 brw_isl_format_for_mesa_format(intel_tex
->_Format
);
211 if (isl_format_supports_ccs_e(&brw
->screen
->devinfo
, isl_format
))
214 perf_debug("Incompatible sampling format (%s) for rbc (%s)\n",
215 _mesa_get_format_name(intel_tex
->_Format
),
216 _mesa_get_format_name(intel_tex
->mt
->format
));
218 if (intel_disable_rb_aux_buffer(brw
, intel_tex
->mt
->bo
))
219 perf_debug("Sampling renderbuffer with non-compressible format - "
220 "turning off compression");
226 intel_update_state(struct gl_context
* ctx
, GLuint new_state
)
228 struct brw_context
*brw
= brw_context(ctx
);
229 struct intel_texture_object
*tex_obj
;
230 struct intel_renderbuffer
*depth_irb
;
232 if (ctx
->swrast_context
)
233 _swrast_InvalidateState(ctx
, new_state
);
234 _vbo_InvalidateState(ctx
, new_state
);
236 brw
->NewGLState
|= new_state
;
238 _mesa_unlock_context_textures(ctx
);
240 /* Resolve the depth buffer's HiZ buffer. */
241 depth_irb
= intel_get_renderbuffer(ctx
->DrawBuffer
, BUFFER_DEPTH
);
243 intel_renderbuffer_resolve_hiz(brw
, depth_irb
);
245 memset(brw
->draw_aux_buffer_disabled
, 0,
246 sizeof(brw
->draw_aux_buffer_disabled
));
248 /* Resolve depth buffer and render cache of each enabled texture. */
249 int maxEnabledUnit
= ctx
->Texture
._MaxEnabledTexImageUnit
;
250 for (int i
= 0; i
<= maxEnabledUnit
; i
++) {
251 if (!ctx
->Texture
.Unit
[i
]._Current
)
253 tex_obj
= intel_texture_object(ctx
->Texture
.Unit
[i
]._Current
);
254 if (!tex_obj
|| !tex_obj
->mt
)
257 /* We need inte_texture_object::_Format to be valid */
258 intel_finalize_mipmap_tree(brw
, i
);
260 if (intel_miptree_sample_with_hiz(brw
, tex_obj
->mt
))
261 intel_miptree_all_slices_resolve_hiz(brw
, tex_obj
->mt
);
263 intel_miptree_all_slices_resolve_depth(brw
, tex_obj
->mt
);
264 /* Sampling engine understands lossless compression and resolving
265 * those surfaces should be skipped for performance reasons.
267 const int flags
= intel_texture_view_requires_resolve(brw
, tex_obj
) ?
268 0 : INTEL_MIPTREE_IGNORE_CCS_E
;
269 intel_miptree_all_slices_resolve_color(brw
, tex_obj
->mt
, flags
);
270 brw_render_cache_set_check_flush(brw
, tex_obj
->mt
->bo
);
272 if (tex_obj
->base
.StencilSampling
||
273 tex_obj
->mt
->format
== MESA_FORMAT_S_UINT8
) {
274 intel_update_r8stencil(brw
, tex_obj
->mt
);
278 /* Resolve color for each active shader image. */
279 for (unsigned i
= 0; i
< MESA_SHADER_STAGES
; i
++) {
280 const struct gl_program
*prog
= ctx
->_Shader
->CurrentProgram
[i
];
282 if (unlikely(prog
&& prog
->info
.num_images
)) {
283 for (unsigned j
= 0; j
< prog
->info
.num_images
; j
++) {
284 struct gl_image_unit
*u
=
285 &ctx
->ImageUnits
[prog
->sh
.ImageUnits
[j
]];
286 tex_obj
= intel_texture_object(u
->TexObj
);
288 if (tex_obj
&& tex_obj
->mt
) {
289 /* Access to images is implemented using indirect messages
290 * against data port. Normal render target write understands
291 * lossless compression but unfortunately the typed/untyped
292 * read/write interface doesn't. Therefore even lossless
293 * compressed surfaces need to be resolved prior to accessing
294 * them. Hence skip setting INTEL_MIPTREE_IGNORE_CCS_E.
296 intel_miptree_all_slices_resolve_color(brw
, tex_obj
->mt
, 0);
298 if (intel_miptree_is_lossless_compressed(brw
, tex_obj
->mt
) &&
299 intel_disable_rb_aux_buffer(brw
, tex_obj
->mt
->bo
)) {
300 perf_debug("Using renderbuffer as shader image - turning "
301 "off lossless compression");
304 brw_render_cache_set_check_flush(brw
, tex_obj
->mt
->bo
);
310 /* Resolve color buffers for non-coherent framebuffer fetch. */
311 if (!ctx
->Extensions
.MESA_shader_framebuffer_fetch
&&
312 ctx
->FragmentProgram
._Current
&&
313 ctx
->FragmentProgram
._Current
->info
.outputs_read
) {
314 const struct gl_framebuffer
*fb
= ctx
->DrawBuffer
;
316 for (unsigned i
= 0; i
< fb
->_NumColorDrawBuffers
; i
++) {
317 const struct intel_renderbuffer
*irb
=
318 intel_renderbuffer(fb
->_ColorDrawBuffers
[i
]);
321 intel_miptree_resolve_color(brw
, irb
->mt
,
323 irb
->mt_layer
, irb
->layer_count
,
324 INTEL_MIPTREE_IGNORE_CCS_E
))
325 brw_render_cache_set_check_flush(brw
, irb
->mt
->bo
);
329 /* If FRAMEBUFFER_SRGB is used on Gen9+ then we need to resolve any of the
330 * single-sampled color renderbuffers because the CCS buffer isn't
331 * supported for SRGB formats. This only matters if FRAMEBUFFER_SRGB is
332 * enabled because otherwise the surface state will be programmed with the
333 * linear equivalent format anyway.
335 if (brw
->gen
>= 9 && ctx
->Color
.sRGBEnabled
) {
336 struct gl_framebuffer
*fb
= ctx
->DrawBuffer
;
337 for (int i
= 0; i
< fb
->_NumColorDrawBuffers
; i
++) {
338 struct gl_renderbuffer
*rb
= fb
->_ColorDrawBuffers
[i
];
343 struct intel_renderbuffer
*irb
= intel_renderbuffer(rb
);
344 struct intel_mipmap_tree
*mt
= irb
->mt
;
347 mt
->num_samples
> 1 ||
348 _mesa_get_srgb_format_linear(mt
->format
) == mt
->format
)
351 /* Lossless compression is not supported for SRGB formats, it
352 * should be impossible to get here with such surfaces.
354 assert(!intel_miptree_is_lossless_compressed(brw
, mt
));
355 intel_miptree_all_slices_resolve_color(brw
, mt
, 0);
356 brw_render_cache_set_check_flush(brw
, mt
->bo
);
360 _mesa_lock_context_textures(ctx
);
362 if (new_state
& _NEW_BUFFERS
) {
363 intel_update_framebuffer(ctx
, ctx
->DrawBuffer
);
364 if (ctx
->DrawBuffer
!= ctx
->ReadBuffer
)
365 intel_update_framebuffer(ctx
, ctx
->ReadBuffer
);
369 #define flushFront(screen) ((screen)->image.loader ? (screen)->image.loader->flushFrontBuffer : (screen)->dri2.loader->flushFrontBuffer)
372 intel_flush_front(struct gl_context
*ctx
)
374 struct brw_context
*brw
= brw_context(ctx
);
375 __DRIcontext
*driContext
= brw
->driContext
;
376 __DRIdrawable
*driDrawable
= driContext
->driDrawablePriv
;
377 __DRIscreen
*const dri_screen
= brw
->screen
->driScrnPriv
;
379 if (brw
->front_buffer_dirty
&& _mesa_is_winsys_fbo(ctx
->DrawBuffer
)) {
380 if (flushFront(dri_screen
) && driDrawable
&&
381 driDrawable
->loaderPrivate
) {
383 /* Resolve before flushing FAKE_FRONT_LEFT to FRONT_LEFT.
385 * This potentially resolves both front and back buffer. It
386 * is unnecessary to resolve the back, but harms nothing except
387 * performance. And no one cares about front-buffer render
390 intel_resolve_for_dri2_flush(brw
, driDrawable
);
391 intel_batchbuffer_flush(brw
);
393 flushFront(dri_screen
)(driDrawable
, driDrawable
->loaderPrivate
);
395 /* We set the dirty bit in intel_prepare_render() if we're
396 * front buffer rendering once we get there.
398 brw
->front_buffer_dirty
= false;
404 intel_glFlush(struct gl_context
*ctx
)
406 struct brw_context
*brw
= brw_context(ctx
);
408 intel_batchbuffer_flush(brw
);
409 intel_flush_front(ctx
);
411 brw
->need_flush_throttle
= true;
415 intel_finish(struct gl_context
* ctx
)
417 struct brw_context
*brw
= brw_context(ctx
);
421 if (brw
->batch
.last_bo
)
422 brw_bo_wait_rendering(brw
, brw
->batch
.last_bo
);
426 brw_init_driver_functions(struct brw_context
*brw
,
427 struct dd_function_table
*functions
)
429 _mesa_init_driver_functions(functions
);
431 /* GLX uses DRI2 invalidate events to handle window resizing.
432 * Unfortunately, EGL does not - libEGL is written in XCB (not Xlib),
433 * which doesn't provide a mechanism for snooping the event queues.
435 * So EGL still relies on viewport hacks to handle window resizing.
436 * This should go away with DRI3000.
438 if (!brw
->driContext
->driScreenPriv
->dri2
.useInvalidate
)
439 functions
->Viewport
= intel_viewport
;
441 functions
->Flush
= intel_glFlush
;
442 functions
->Finish
= intel_finish
;
443 functions
->GetString
= intel_get_string
;
444 functions
->UpdateState
= intel_update_state
;
446 intelInitTextureFuncs(functions
);
447 intelInitTextureImageFuncs(functions
);
448 intelInitTextureSubImageFuncs(functions
);
449 intelInitTextureCopyImageFuncs(functions
);
450 intelInitCopyImageFuncs(functions
);
451 intelInitClearFuncs(functions
);
452 intelInitBufferFuncs(functions
);
453 intelInitPixelFuncs(functions
);
454 intelInitBufferObjectFuncs(functions
);
455 brw_init_syncobj_functions(functions
);
456 brw_init_object_purgeable_functions(functions
);
458 brwInitFragProgFuncs( functions
);
459 brw_init_common_queryobj_functions(functions
);
460 if (brw
->gen
>= 8 || brw
->is_haswell
)
461 hsw_init_queryobj_functions(functions
);
462 else if (brw
->gen
>= 6)
463 gen6_init_queryobj_functions(functions
);
465 gen4_init_queryobj_functions(functions
);
466 brw_init_compute_functions(functions
);
468 brw_init_conditional_render_functions(functions
);
470 functions
->QueryInternalFormat
= brw_query_internal_format
;
472 functions
->NewTransformFeedback
= brw_new_transform_feedback
;
473 functions
->DeleteTransformFeedback
= brw_delete_transform_feedback
;
474 if (can_do_mi_math_and_lrr(brw
->screen
)) {
475 functions
->BeginTransformFeedback
= hsw_begin_transform_feedback
;
476 functions
->EndTransformFeedback
= hsw_end_transform_feedback
;
477 functions
->PauseTransformFeedback
= hsw_pause_transform_feedback
;
478 functions
->ResumeTransformFeedback
= hsw_resume_transform_feedback
;
479 } else if (brw
->gen
>= 7) {
480 functions
->BeginTransformFeedback
= gen7_begin_transform_feedback
;
481 functions
->EndTransformFeedback
= gen7_end_transform_feedback
;
482 functions
->PauseTransformFeedback
= gen7_pause_transform_feedback
;
483 functions
->ResumeTransformFeedback
= gen7_resume_transform_feedback
;
484 functions
->GetTransformFeedbackVertexCount
=
485 brw_get_transform_feedback_vertex_count
;
487 functions
->BeginTransformFeedback
= brw_begin_transform_feedback
;
488 functions
->EndTransformFeedback
= brw_end_transform_feedback
;
489 functions
->PauseTransformFeedback
= brw_pause_transform_feedback
;
490 functions
->ResumeTransformFeedback
= brw_resume_transform_feedback
;
491 functions
->GetTransformFeedbackVertexCount
=
492 brw_get_transform_feedback_vertex_count
;
496 functions
->GetSamplePosition
= gen6_get_sample_position
;
500 brw_initialize_context_constants(struct brw_context
*brw
)
502 struct gl_context
*ctx
= &brw
->ctx
;
503 const struct brw_compiler
*compiler
= brw
->screen
->compiler
;
505 const bool stage_exists
[MESA_SHADER_STAGES
] = {
506 [MESA_SHADER_VERTEX
] = true,
507 [MESA_SHADER_TESS_CTRL
] = brw
->gen
>= 7,
508 [MESA_SHADER_TESS_EVAL
] = brw
->gen
>= 7,
509 [MESA_SHADER_GEOMETRY
] = brw
->gen
>= 6,
510 [MESA_SHADER_FRAGMENT
] = true,
511 [MESA_SHADER_COMPUTE
] =
512 ((ctx
->API
== API_OPENGL_COMPAT
|| ctx
->API
== API_OPENGL_CORE
) &&
513 ctx
->Const
.MaxComputeWorkGroupSize
[0] >= 1024) ||
514 (ctx
->API
== API_OPENGLES2
&&
515 ctx
->Const
.MaxComputeWorkGroupSize
[0] >= 128) ||
516 _mesa_extension_override_enables
.ARB_compute_shader
,
519 unsigned num_stages
= 0;
520 for (int i
= 0; i
< MESA_SHADER_STAGES
; i
++) {
525 unsigned max_samplers
=
526 brw
->gen
>= 8 || brw
->is_haswell
? BRW_MAX_TEX_UNIT
: 16;
528 ctx
->Const
.MaxDualSourceDrawBuffers
= 1;
529 ctx
->Const
.MaxDrawBuffers
= BRW_MAX_DRAW_BUFFERS
;
530 ctx
->Const
.MaxCombinedShaderOutputResources
=
531 MAX_IMAGE_UNITS
+ BRW_MAX_DRAW_BUFFERS
;
533 /* The timestamp register we can read for glGetTimestamp() is
534 * sometimes only 32 bits, before scaling to nanoseconds (depending
537 * Once scaled to nanoseconds the timestamp would roll over at a
538 * non-power-of-two, so an application couldn't use
539 * GL_QUERY_COUNTER_BITS to handle rollover correctly. Instead, we
540 * report 36 bits and truncate at that (rolling over 5 times as
541 * often as the HW counter), and when the 32-bit counter rolls
542 * over, it happens to also be at a rollover in the reported value
543 * from near (1<<36) to 0.
545 * The low 32 bits rolls over in ~343 seconds. Our 36-bit result
546 * rolls over every ~69 seconds.
548 ctx
->Const
.QueryCounterBits
.Timestamp
= 36;
550 ctx
->Const
.MaxTextureCoordUnits
= 8; /* Mesa limit */
551 ctx
->Const
.MaxImageUnits
= MAX_IMAGE_UNITS
;
553 ctx
->Const
.MaxRenderbufferSize
= 16384;
554 ctx
->Const
.MaxTextureLevels
= MIN2(15 /* 16384 */, MAX_TEXTURE_LEVELS
);
555 ctx
->Const
.MaxCubeTextureLevels
= 15; /* 16384 */
557 ctx
->Const
.MaxRenderbufferSize
= 8192;
558 ctx
->Const
.MaxTextureLevels
= MIN2(14 /* 8192 */, MAX_TEXTURE_LEVELS
);
559 ctx
->Const
.MaxCubeTextureLevels
= 14; /* 8192 */
561 ctx
->Const
.Max3DTextureLevels
= 12; /* 2048 */
562 ctx
->Const
.MaxArrayTextureLayers
= brw
->gen
>= 7 ? 2048 : 512;
563 ctx
->Const
.MaxTextureMbytes
= 1536;
564 ctx
->Const
.MaxTextureRectSize
= 1 << 12;
565 ctx
->Const
.MaxTextureMaxAnisotropy
= 16.0;
566 ctx
->Const
.MaxTextureLodBias
= 15.0;
567 ctx
->Const
.StripTextureBorder
= true;
569 ctx
->Const
.MaxProgramTextureGatherComponents
= 4;
570 ctx
->Const
.MinProgramTextureGatherOffset
= -32;
571 ctx
->Const
.MaxProgramTextureGatherOffset
= 31;
572 } else if (brw
->gen
== 6) {
573 ctx
->Const
.MaxProgramTextureGatherComponents
= 1;
574 ctx
->Const
.MinProgramTextureGatherOffset
= -8;
575 ctx
->Const
.MaxProgramTextureGatherOffset
= 7;
578 ctx
->Const
.MaxUniformBlockSize
= 65536;
580 for (int i
= 0; i
< MESA_SHADER_STAGES
; i
++) {
581 struct gl_program_constants
*prog
= &ctx
->Const
.Program
[i
];
583 if (!stage_exists
[i
])
586 prog
->MaxTextureImageUnits
= max_samplers
;
588 prog
->MaxUniformBlocks
= BRW_MAX_UBO
;
589 prog
->MaxCombinedUniformComponents
=
590 prog
->MaxUniformComponents
+
591 ctx
->Const
.MaxUniformBlockSize
/ 4 * prog
->MaxUniformBlocks
;
593 prog
->MaxAtomicCounters
= MAX_ATOMIC_COUNTERS
;
594 prog
->MaxAtomicBuffers
= BRW_MAX_ABO
;
595 prog
->MaxImageUniforms
= compiler
->scalar_stage
[i
] ? BRW_MAX_IMAGES
: 0;
596 prog
->MaxShaderStorageBlocks
= BRW_MAX_SSBO
;
599 ctx
->Const
.MaxTextureUnits
=
600 MIN2(ctx
->Const
.MaxTextureCoordUnits
,
601 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].MaxTextureImageUnits
);
603 ctx
->Const
.MaxUniformBufferBindings
= num_stages
* BRW_MAX_UBO
;
604 ctx
->Const
.MaxCombinedUniformBlocks
= num_stages
* BRW_MAX_UBO
;
605 ctx
->Const
.MaxCombinedAtomicBuffers
= num_stages
* BRW_MAX_ABO
;
606 ctx
->Const
.MaxCombinedShaderStorageBlocks
= num_stages
* BRW_MAX_SSBO
;
607 ctx
->Const
.MaxShaderStorageBufferBindings
= num_stages
* BRW_MAX_SSBO
;
608 ctx
->Const
.MaxCombinedTextureImageUnits
= num_stages
* max_samplers
;
609 ctx
->Const
.MaxCombinedImageUniforms
= num_stages
* BRW_MAX_IMAGES
;
612 /* Hardware only supports a limited number of transform feedback buffers.
613 * So we need to override the Mesa default (which is based only on software
616 ctx
->Const
.MaxTransformFeedbackBuffers
= BRW_MAX_SOL_BUFFERS
;
618 /* On Gen6, in the worst case, we use up one binding table entry per
619 * transform feedback component (see comments above the definition of
620 * BRW_MAX_SOL_BINDINGS, in brw_context.h), so we need to advertise a value
621 * for MAX_TRANSFORM_FEEDBACK_INTERLEAVED_COMPONENTS equal to
622 * BRW_MAX_SOL_BINDINGS.
624 * In "separate components" mode, we need to divide this value by
625 * BRW_MAX_SOL_BUFFERS, so that the total number of binding table entries
626 * used up by all buffers will not exceed BRW_MAX_SOL_BINDINGS.
628 ctx
->Const
.MaxTransformFeedbackInterleavedComponents
= BRW_MAX_SOL_BINDINGS
;
629 ctx
->Const
.MaxTransformFeedbackSeparateComponents
=
630 BRW_MAX_SOL_BINDINGS
/ BRW_MAX_SOL_BUFFERS
;
632 ctx
->Const
.AlwaysUseGetTransformFeedbackVertexCount
=
633 !can_do_mi_math_and_lrr(brw
->screen
);
636 const int *msaa_modes
= intel_supported_msaa_modes(brw
->screen
);
637 const int clamp_max_samples
=
638 driQueryOptioni(&brw
->optionCache
, "clamp_max_samples");
640 if (clamp_max_samples
< 0) {
641 max_samples
= msaa_modes
[0];
643 /* Select the largest supported MSAA mode that does not exceed
647 for (int i
= 0; msaa_modes
[i
] != 0; ++i
) {
648 if (msaa_modes
[i
] <= clamp_max_samples
) {
649 max_samples
= msaa_modes
[i
];
655 ctx
->Const
.MaxSamples
= max_samples
;
656 ctx
->Const
.MaxColorTextureSamples
= max_samples
;
657 ctx
->Const
.MaxDepthTextureSamples
= max_samples
;
658 ctx
->Const
.MaxIntegerSamples
= max_samples
;
659 ctx
->Const
.MaxImageSamples
= 0;
661 /* gen6_set_sample_maps() sets SampleMap{2,4,8}x variables which are used
662 * to map indices of rectangular grid to sample numbers within a pixel.
663 * These variables are used by GL_EXT_framebuffer_multisample_blit_scaled
664 * extension implementation. For more details see the comment above
665 * gen6_set_sample_maps() definition.
667 gen6_set_sample_maps(ctx
);
669 ctx
->Const
.MinLineWidth
= 1.0;
670 ctx
->Const
.MinLineWidthAA
= 1.0;
672 ctx
->Const
.MaxLineWidth
= 7.375;
673 ctx
->Const
.MaxLineWidthAA
= 7.375;
674 ctx
->Const
.LineWidthGranularity
= 0.125;
676 ctx
->Const
.MaxLineWidth
= 7.0;
677 ctx
->Const
.MaxLineWidthAA
= 7.0;
678 ctx
->Const
.LineWidthGranularity
= 0.5;
681 /* For non-antialiased lines, we have to round the line width to the
682 * nearest whole number. Make sure that we don't advertise a line
683 * width that, when rounded, will be beyond the actual hardware
686 assert(roundf(ctx
->Const
.MaxLineWidth
) <= ctx
->Const
.MaxLineWidth
);
688 ctx
->Const
.MinPointSize
= 1.0;
689 ctx
->Const
.MinPointSizeAA
= 1.0;
690 ctx
->Const
.MaxPointSize
= 255.0;
691 ctx
->Const
.MaxPointSizeAA
= 255.0;
692 ctx
->Const
.PointSizeGranularity
= 1.0;
694 if (brw
->gen
>= 5 || brw
->is_g4x
)
695 ctx
->Const
.MaxClipPlanes
= 8;
697 ctx
->Const
.GLSLTessLevelsAsInputs
= true;
698 ctx
->Const
.LowerTCSPatchVerticesIn
= brw
->gen
>= 8;
699 ctx
->Const
.LowerTESPatchVerticesIn
= true;
700 ctx
->Const
.PrimitiveRestartForPatches
= true;
702 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].MaxNativeInstructions
= 16 * 1024;
703 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].MaxAluInstructions
= 0;
704 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].MaxTexInstructions
= 0;
705 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].MaxTexIndirections
= 0;
706 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].MaxNativeAluInstructions
= 0;
707 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].MaxNativeTexInstructions
= 0;
708 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].MaxNativeTexIndirections
= 0;
709 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].MaxNativeAttribs
= 16;
710 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].MaxNativeTemps
= 256;
711 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].MaxNativeAddressRegs
= 1;
712 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].MaxNativeParameters
= 1024;
713 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].MaxEnvParams
=
714 MIN2(ctx
->Const
.Program
[MESA_SHADER_VERTEX
].MaxNativeParameters
,
715 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].MaxEnvParams
);
717 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].MaxNativeInstructions
= 1024;
718 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].MaxNativeAluInstructions
= 1024;
719 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].MaxNativeTexInstructions
= 1024;
720 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].MaxNativeTexIndirections
= 1024;
721 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].MaxNativeAttribs
= 12;
722 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].MaxNativeTemps
= 256;
723 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].MaxNativeAddressRegs
= 0;
724 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].MaxNativeParameters
= 1024;
725 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].MaxEnvParams
=
726 MIN2(ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].MaxNativeParameters
,
727 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].MaxEnvParams
);
729 /* Fragment shaders use real, 32-bit twos-complement integers for all
732 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].LowInt
.RangeMin
= 31;
733 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].LowInt
.RangeMax
= 30;
734 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].LowInt
.Precision
= 0;
735 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].HighInt
= ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].LowInt
;
736 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].MediumInt
= ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].LowInt
;
738 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].LowInt
.RangeMin
= 31;
739 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].LowInt
.RangeMax
= 30;
740 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].LowInt
.Precision
= 0;
741 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].HighInt
= ctx
->Const
.Program
[MESA_SHADER_VERTEX
].LowInt
;
742 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].MediumInt
= ctx
->Const
.Program
[MESA_SHADER_VERTEX
].LowInt
;
744 /* Gen6 converts quads to polygon in beginning of 3D pipeline,
745 * but we're not sure how it's actually done for vertex order,
746 * that affect provoking vertex decision. Always use last vertex
747 * convention for quad primitive which works as expected for now.
750 ctx
->Const
.QuadsFollowProvokingVertexConvention
= false;
752 ctx
->Const
.NativeIntegers
= true;
753 ctx
->Const
.VertexID_is_zero_based
= true;
755 /* Regarding the CMP instruction, the Ivybridge PRM says:
757 * "For each enabled channel 0b or 1b is assigned to the appropriate flag
758 * bit and 0/all zeros or all ones (e.g, byte 0xFF, word 0xFFFF, DWord
759 * 0xFFFFFFFF) is assigned to dst."
761 * but PRMs for earlier generations say
763 * "In dword format, one GRF may store up to 8 results. When the register
764 * is used later as a vector of Booleans, as only LSB at each channel
765 * contains meaning [sic] data, software should make sure all higher bits
766 * are masked out (e.g. by 'and-ing' an [sic] 0x01 constant)."
768 * We select the representation of a true boolean uniform to be ~0, and fix
769 * the results of Gen <= 5 CMP instruction's with -(result & 1).
771 ctx
->Const
.UniformBooleanTrue
= ~0;
773 /* From the gen4 PRM, volume 4 page 127:
775 * "For SURFTYPE_BUFFER non-rendertarget surfaces, this field specifies
776 * the base address of the first element of the surface, computed in
777 * software by adding the surface base address to the byte offset of
778 * the element in the buffer."
780 * However, unaligned accesses are slower, so enforce buffer alignment.
782 ctx
->Const
.UniformBufferOffsetAlignment
= 16;
784 /* ShaderStorageBufferOffsetAlignment should be a cacheline (64 bytes) so
785 * that we can safely have the CPU and GPU writing the same SSBO on
786 * non-cachecoherent systems (our Atom CPUs). With UBOs, the GPU never
787 * writes, so there's no problem. For an SSBO, the GPU and the CPU can
788 * be updating disjoint regions of the buffer simultaneously and that will
789 * break if the regions overlap the same cacheline.
791 ctx
->Const
.ShaderStorageBufferOffsetAlignment
= 64;
792 ctx
->Const
.TextureBufferOffsetAlignment
= 16;
793 ctx
->Const
.MaxTextureBufferSize
= 128 * 1024 * 1024;
796 ctx
->Const
.MaxVarying
= 32;
797 ctx
->Const
.Program
[MESA_SHADER_VERTEX
].MaxOutputComponents
= 128;
798 ctx
->Const
.Program
[MESA_SHADER_GEOMETRY
].MaxInputComponents
= 64;
799 ctx
->Const
.Program
[MESA_SHADER_GEOMETRY
].MaxOutputComponents
= 128;
800 ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].MaxInputComponents
= 128;
801 ctx
->Const
.Program
[MESA_SHADER_TESS_CTRL
].MaxInputComponents
= 128;
802 ctx
->Const
.Program
[MESA_SHADER_TESS_CTRL
].MaxOutputComponents
= 128;
803 ctx
->Const
.Program
[MESA_SHADER_TESS_EVAL
].MaxInputComponents
= 128;
804 ctx
->Const
.Program
[MESA_SHADER_TESS_EVAL
].MaxOutputComponents
= 128;
807 /* We want the GLSL compiler to emit code that uses condition codes */
808 for (int i
= 0; i
< MESA_SHADER_STAGES
; i
++) {
809 ctx
->Const
.ShaderCompilerOptions
[i
] =
810 brw
->screen
->compiler
->glsl_compiler_options
[i
];
814 ctx
->Const
.MaxViewportWidth
= 32768;
815 ctx
->Const
.MaxViewportHeight
= 32768;
818 /* ARB_viewport_array, OES_viewport_array */
820 ctx
->Const
.MaxViewports
= GEN6_NUM_VIEWPORTS
;
821 ctx
->Const
.ViewportSubpixelBits
= 0;
823 /* Cast to float before negating because MaxViewportWidth is unsigned.
825 ctx
->Const
.ViewportBounds
.Min
= -(float)ctx
->Const
.MaxViewportWidth
;
826 ctx
->Const
.ViewportBounds
.Max
= ctx
->Const
.MaxViewportWidth
;
829 /* ARB_gpu_shader5 */
831 ctx
->Const
.MaxVertexStreams
= MIN2(4, MAX_VERTEX_STREAMS
);
833 /* ARB_framebuffer_no_attachments */
834 ctx
->Const
.MaxFramebufferWidth
= 16384;
835 ctx
->Const
.MaxFramebufferHeight
= 16384;
836 ctx
->Const
.MaxFramebufferLayers
= ctx
->Const
.MaxArrayTextureLayers
;
837 ctx
->Const
.MaxFramebufferSamples
= max_samples
;
839 /* OES_primitive_bounding_box */
840 ctx
->Const
.NoPrimitiveBoundingBoxOutput
= true;
844 brw_initialize_cs_context_constants(struct brw_context
*brw
)
846 struct gl_context
*ctx
= &brw
->ctx
;
847 const struct intel_screen
*screen
= brw
->screen
;
848 struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
850 /* FINISHME: Do this for all platforms that the kernel supports */
851 if (brw
->is_cherryview
&&
852 screen
->subslice_total
> 0 && screen
->eu_total
> 0) {
853 /* Logical CS threads = EUs per subslice * 7 threads per EU */
854 uint32_t max_cs_threads
= screen
->eu_total
/ screen
->subslice_total
* 7;
856 /* Fuse configurations may give more threads than expected, never less. */
857 if (max_cs_threads
> devinfo
->max_cs_threads
)
858 devinfo
->max_cs_threads
= max_cs_threads
;
861 /* Maximum number of scalar compute shader invocations that can be run in
862 * parallel in the same subslice assuming SIMD32 dispatch.
864 * We don't advertise more than 64 threads, because we are limited to 64 by
865 * our usage of thread_width_max in the gpgpu walker command. This only
866 * currently impacts Haswell, which otherwise might be able to advertise 70
867 * threads. With SIMD32 and 64 threads, Haswell still provides twice the
868 * required the number of invocation needed for ARB_compute_shader.
870 const unsigned max_threads
= MIN2(64, devinfo
->max_cs_threads
);
871 const uint32_t max_invocations
= 32 * max_threads
;
872 ctx
->Const
.MaxComputeWorkGroupSize
[0] = max_invocations
;
873 ctx
->Const
.MaxComputeWorkGroupSize
[1] = max_invocations
;
874 ctx
->Const
.MaxComputeWorkGroupSize
[2] = max_invocations
;
875 ctx
->Const
.MaxComputeWorkGroupInvocations
= max_invocations
;
876 ctx
->Const
.MaxComputeSharedMemorySize
= 64 * 1024;
880 * Process driconf (drirc) options, setting appropriate context flags.
882 * intelInitExtensions still pokes at optionCache directly, in order to
883 * avoid advertising various extensions. No flags are set, so it makes
884 * sense to continue doing that there.
887 brw_process_driconf_options(struct brw_context
*brw
)
889 struct gl_context
*ctx
= &brw
->ctx
;
891 driOptionCache
*options
= &brw
->optionCache
;
892 driParseConfigFiles(options
, &brw
->screen
->optionCache
,
893 brw
->driContext
->driScreenPriv
->myNum
, "i965");
895 int bo_reuse_mode
= driQueryOptioni(options
, "bo_reuse");
896 switch (bo_reuse_mode
) {
897 case DRI_CONF_BO_REUSE_DISABLED
:
899 case DRI_CONF_BO_REUSE_ALL
:
900 brw_bufmgr_enable_reuse(brw
->bufmgr
);
904 if (INTEL_DEBUG
& DEBUG_NO_HIZ
) {
905 brw
->has_hiz
= false;
906 /* On gen6, you can only do separate stencil with HIZ. */
908 brw
->has_separate_stencil
= false;
911 if (driQueryOptionb(options
, "always_flush_batch")) {
912 fprintf(stderr
, "flushing batchbuffer before/after each draw call\n");
913 brw
->always_flush_batch
= true;
916 if (driQueryOptionb(options
, "always_flush_cache")) {
917 fprintf(stderr
, "flushing GPU caches before/after each draw call\n");
918 brw
->always_flush_cache
= true;
921 if (driQueryOptionb(options
, "disable_throttling")) {
922 fprintf(stderr
, "disabling flush throttling\n");
923 brw
->disable_throttling
= true;
926 brw
->precompile
= driQueryOptionb(&brw
->optionCache
, "shader_precompile");
928 if (driQueryOptionb(&brw
->optionCache
, "precise_trig"))
929 brw
->screen
->compiler
->precise_trig
= true;
931 ctx
->Const
.ForceGLSLExtensionsWarn
=
932 driQueryOptionb(options
, "force_glsl_extensions_warn");
934 ctx
->Const
.ForceGLSLVersion
=
935 driQueryOptioni(options
, "force_glsl_version");
937 ctx
->Const
.DisableGLSLLineContinuations
=
938 driQueryOptionb(options
, "disable_glsl_line_continuations");
940 ctx
->Const
.AllowGLSLExtensionDirectiveMidShader
=
941 driQueryOptionb(options
, "allow_glsl_extension_directive_midshader");
943 ctx
->Const
.AllowGLSLBuiltinVariableRedeclaration
=
944 driQueryOptionb(options
, "allow_glsl_builtin_variable_redeclaration");
946 ctx
->Const
.AllowHigherCompatVersion
=
947 driQueryOptionb(options
, "allow_higher_compat_version");
949 ctx
->Const
.ForceGLSLAbsSqrt
=
950 driQueryOptionb(options
, "force_glsl_abs_sqrt");
952 ctx
->Const
.GLSLZeroInit
= driQueryOptionb(options
, "glsl_zero_init");
954 brw
->dual_color_blend_by_location
=
955 driQueryOptionb(options
, "dual_color_blend_by_location");
959 brwCreateContext(gl_api api
,
960 const struct gl_config
*mesaVis
,
961 __DRIcontext
*driContextPriv
,
962 unsigned major_version
,
963 unsigned minor_version
,
966 unsigned *dri_ctx_error
,
967 void *sharedContextPrivate
)
969 struct gl_context
*shareCtx
= (struct gl_context
*) sharedContextPrivate
;
970 struct intel_screen
*screen
= driContextPriv
->driScreenPriv
->driverPrivate
;
971 const struct gen_device_info
*devinfo
= &screen
->devinfo
;
972 struct dd_function_table functions
;
974 /* Only allow the __DRI_CTX_FLAG_ROBUST_BUFFER_ACCESS flag if the kernel
975 * provides us with context reset notifications.
977 uint32_t allowed_flags
= __DRI_CTX_FLAG_DEBUG
978 | __DRI_CTX_FLAG_FORWARD_COMPATIBLE
;
980 if (screen
->has_context_reset_notification
)
981 allowed_flags
|= __DRI_CTX_FLAG_ROBUST_BUFFER_ACCESS
;
983 if (flags
& ~allowed_flags
) {
984 *dri_ctx_error
= __DRI_CTX_ERROR_UNKNOWN_FLAG
;
988 struct brw_context
*brw
= rzalloc(NULL
, struct brw_context
);
990 fprintf(stderr
, "%s: failed to alloc context\n", __func__
);
991 *dri_ctx_error
= __DRI_CTX_ERROR_NO_MEMORY
;
995 driContextPriv
->driverPrivate
= brw
;
996 brw
->driContext
= driContextPriv
;
997 brw
->screen
= screen
;
998 brw
->bufmgr
= screen
->bufmgr
;
1000 brw
->gen
= devinfo
->gen
;
1001 brw
->gt
= devinfo
->gt
;
1002 brw
->is_g4x
= devinfo
->is_g4x
;
1003 brw
->is_baytrail
= devinfo
->is_baytrail
;
1004 brw
->is_haswell
= devinfo
->is_haswell
;
1005 brw
->is_cherryview
= devinfo
->is_cherryview
;
1006 brw
->is_broxton
= devinfo
->is_broxton
;
1007 brw
->has_llc
= devinfo
->has_llc
;
1008 brw
->has_hiz
= devinfo
->has_hiz_and_separate_stencil
;
1009 brw
->has_separate_stencil
= devinfo
->has_hiz_and_separate_stencil
;
1010 brw
->has_pln
= devinfo
->has_pln
;
1011 brw
->has_compr4
= devinfo
->has_compr4
;
1012 brw
->has_surface_tile_offset
= devinfo
->has_surface_tile_offset
;
1013 brw
->has_negative_rhw_bug
= devinfo
->has_negative_rhw_bug
;
1014 brw
->needs_unlit_centroid_workaround
=
1015 devinfo
->needs_unlit_centroid_workaround
;
1017 brw
->must_use_separate_stencil
= devinfo
->must_use_separate_stencil
;
1018 brw
->has_swizzling
= screen
->hw_has_swizzling
;
1020 isl_device_init(&brw
->isl_dev
, devinfo
, screen
->hw_has_swizzling
);
1022 brw
->vs
.base
.stage
= MESA_SHADER_VERTEX
;
1023 brw
->tcs
.base
.stage
= MESA_SHADER_TESS_CTRL
;
1024 brw
->tes
.base
.stage
= MESA_SHADER_TESS_EVAL
;
1025 brw
->gs
.base
.stage
= MESA_SHADER_GEOMETRY
;
1026 brw
->wm
.base
.stage
= MESA_SHADER_FRAGMENT
;
1027 if (brw
->gen
>= 8) {
1028 gen8_init_vtable_surface_functions(brw
);
1029 brw
->vtbl
.emit_depth_stencil_hiz
= gen8_emit_depth_stencil_hiz
;
1030 } else if (brw
->gen
>= 7) {
1031 gen7_init_vtable_surface_functions(brw
);
1032 brw
->vtbl
.emit_depth_stencil_hiz
= gen7_emit_depth_stencil_hiz
;
1033 } else if (brw
->gen
>= 6) {
1034 gen6_init_vtable_surface_functions(brw
);
1035 brw
->vtbl
.emit_depth_stencil_hiz
= gen6_emit_depth_stencil_hiz
;
1037 gen4_init_vtable_surface_functions(brw
);
1038 brw
->vtbl
.emit_depth_stencil_hiz
= brw_emit_depth_stencil_hiz
;
1041 brw_init_driver_functions(brw
, &functions
);
1044 functions
.GetGraphicsResetStatus
= brw_get_graphics_reset_status
;
1046 struct gl_context
*ctx
= &brw
->ctx
;
1048 if (!_mesa_initialize_context(ctx
, api
, mesaVis
, shareCtx
, &functions
)) {
1049 *dri_ctx_error
= __DRI_CTX_ERROR_NO_MEMORY
;
1050 fprintf(stderr
, "%s: failed to init mesa context\n", __func__
);
1051 intelDestroyContext(driContextPriv
);
1055 driContextSetFlags(ctx
, flags
);
1057 /* Initialize the software rasterizer and helper modules.
1059 * As of GL 3.1 core, the gen4+ driver doesn't need the swrast context for
1060 * software fallbacks (which we have to support on legacy GL to do weird
1061 * glDrawPixels(), glBitmap(), and other functions).
1063 if (api
!= API_OPENGL_CORE
&& api
!= API_OPENGLES2
) {
1064 _swrast_CreateContext(ctx
);
1067 _vbo_CreateContext(ctx
);
1068 if (ctx
->swrast_context
) {
1069 _tnl_CreateContext(ctx
);
1070 TNL_CONTEXT(ctx
)->Driver
.RunPipeline
= _tnl_run_pipeline
;
1071 _swsetup_CreateContext(ctx
);
1073 /* Configure swrast to match hardware characteristics: */
1074 _swrast_allow_pixel_fog(ctx
, false);
1075 _swrast_allow_vertex_fog(ctx
, true);
1078 _mesa_meta_init(ctx
);
1080 brw_process_driconf_options(brw
);
1082 if (INTEL_DEBUG
& DEBUG_PERF
)
1083 brw
->perf_debug
= true;
1085 brw_initialize_cs_context_constants(brw
);
1086 brw_initialize_context_constants(brw
);
1088 ctx
->Const
.ResetStrategy
= notify_reset
1089 ? GL_LOSE_CONTEXT_ON_RESET_ARB
: GL_NO_RESET_NOTIFICATION_ARB
;
1091 /* Reinitialize the context point state. It depends on ctx->Const values. */
1092 _mesa_init_point(ctx
);
1094 intel_fbo_init(brw
);
1096 intel_batchbuffer_init(&brw
->batch
, brw
->bufmgr
, brw
->has_llc
);
1098 if (brw
->gen
>= 6) {
1099 /* Create a new hardware context. Using a hardware context means that
1100 * our GPU state will be saved/restored on context switch, allowing us
1101 * to assume that the GPU is in the same state we left it in.
1103 * This is required for transform feedback buffer offsets, query objects,
1104 * and also allows us to reduce how much state we have to emit.
1106 brw
->hw_ctx
= brw_create_hw_context(brw
->bufmgr
);
1109 fprintf(stderr
, "Failed to create hardware context.\n");
1110 intelDestroyContext(driContextPriv
);
1115 if (brw_init_pipe_control(brw
, devinfo
)) {
1116 *dri_ctx_error
= __DRI_CTX_ERROR_NO_MEMORY
;
1117 intelDestroyContext(driContextPriv
);
1121 brw_init_state(brw
);
1123 intelInitExtensions(ctx
);
1125 brw_init_surface_formats(brw
);
1127 brw_blorp_init(brw
);
1129 brw
->urb
.size
= devinfo
->urb
.size
;
1132 brw
->urb
.gs_present
= false;
1134 brw
->prim_restart
.in_progress
= false;
1135 brw
->prim_restart
.enable_cut_index
= false;
1136 brw
->gs
.enabled
= false;
1137 brw
->clip
.viewport_count
= 1;
1139 brw
->predicate
.state
= BRW_PREDICATE_STATE_RENDER
;
1141 brw
->max_gtt_map_object_size
= screen
->max_gtt_map_object_size
;
1143 ctx
->VertexProgram
._MaintainTnlProgram
= true;
1144 ctx
->FragmentProgram
._MaintainTexEnvProgram
= true;
1146 brw_draw_init( brw
);
1148 if ((flags
& __DRI_CTX_FLAG_DEBUG
) != 0) {
1149 /* Turn on some extra GL_ARB_debug_output generation. */
1150 brw
->perf_debug
= true;
1153 if ((flags
& __DRI_CTX_FLAG_ROBUST_BUFFER_ACCESS
) != 0) {
1154 ctx
->Const
.ContextFlags
|= GL_CONTEXT_FLAG_ROBUST_ACCESS_BIT_ARB
;
1155 ctx
->Const
.RobustAccess
= GL_TRUE
;
1158 if (INTEL_DEBUG
& DEBUG_SHADER_TIME
)
1159 brw_init_shader_time(brw
);
1161 _mesa_compute_version(ctx
);
1163 _mesa_initialize_dispatch_tables(ctx
);
1164 _mesa_initialize_vbo_vtxfmt(ctx
);
1166 if (ctx
->Extensions
.INTEL_performance_query
)
1167 brw_init_performance_queries(brw
);
1169 vbo_use_buffer_objects(ctx
);
1170 vbo_always_unmap_buffers(ctx
);
1176 intelDestroyContext(__DRIcontext
* driContextPriv
)
1178 struct brw_context
*brw
=
1179 (struct brw_context
*) driContextPriv
->driverPrivate
;
1180 struct gl_context
*ctx
= &brw
->ctx
;
1182 _mesa_meta_free(&brw
->ctx
);
1184 if (INTEL_DEBUG
& DEBUG_SHADER_TIME
) {
1185 /* Force a report. */
1186 brw
->shader_time
.report_time
= 0;
1188 brw_collect_and_report_shader_time(brw
);
1189 brw_destroy_shader_time(brw
);
1193 blorp_finish(&brw
->blorp
);
1195 brw_destroy_state(brw
);
1196 brw_draw_destroy(brw
);
1198 brw_bo_unreference(brw
->curbe
.curbe_bo
);
1199 if (brw
->vs
.base
.scratch_bo
)
1200 brw_bo_unreference(brw
->vs
.base
.scratch_bo
);
1201 if (brw
->tcs
.base
.scratch_bo
)
1202 brw_bo_unreference(brw
->tcs
.base
.scratch_bo
);
1203 if (brw
->tes
.base
.scratch_bo
)
1204 brw_bo_unreference(brw
->tes
.base
.scratch_bo
);
1205 if (brw
->gs
.base
.scratch_bo
)
1206 brw_bo_unreference(brw
->gs
.base
.scratch_bo
);
1207 if (brw
->wm
.base
.scratch_bo
)
1208 brw_bo_unreference(brw
->wm
.base
.scratch_bo
);
1210 brw_destroy_hw_context(brw
->bufmgr
, brw
->hw_ctx
);
1212 if (ctx
->swrast_context
) {
1213 _swsetup_DestroyContext(&brw
->ctx
);
1214 _tnl_DestroyContext(&brw
->ctx
);
1216 _vbo_DestroyContext(&brw
->ctx
);
1218 if (ctx
->swrast_context
)
1219 _swrast_DestroyContext(&brw
->ctx
);
1221 brw_fini_pipe_control(brw
);
1222 intel_batchbuffer_free(&brw
->batch
);
1224 brw_bo_unreference(brw
->throttle_batch
[1]);
1225 brw_bo_unreference(brw
->throttle_batch
[0]);
1226 brw
->throttle_batch
[1] = NULL
;
1227 brw
->throttle_batch
[0] = NULL
;
1229 driDestroyOptionCache(&brw
->optionCache
);
1231 /* free the Mesa context */
1232 _mesa_free_context_data(&brw
->ctx
);
1235 driContextPriv
->driverPrivate
= NULL
;
1239 intelUnbindContext(__DRIcontext
* driContextPriv
)
1241 /* Unset current context and dispath table */
1242 _mesa_make_current(NULL
, NULL
, NULL
);
1248 * Fixes up the context for GLES23 with our default-to-sRGB-capable behavior
1249 * on window system framebuffers.
1251 * Desktop GL is fairly reasonable in its handling of sRGB: You can ask if
1252 * your renderbuffer can do sRGB encode, and you can flip a switch that does
1253 * sRGB encode if the renderbuffer can handle it. You can ask specifically
1254 * for a visual where you're guaranteed to be capable, but it turns out that
1255 * everyone just makes all their ARGB8888 visuals capable and doesn't offer
1256 * incapable ones, because there's no difference between the two in resources
1257 * used. Applications thus get built that accidentally rely on the default
1258 * visual choice being sRGB, so we make ours sRGB capable. Everything sounds
1261 * But for GLES2/3, they decided that it was silly to not turn on sRGB encode
1262 * for sRGB renderbuffers you made with the GL_EXT_texture_sRGB equivalent.
1263 * So they removed the enable knob and made it "if the renderbuffer is sRGB
1264 * capable, do sRGB encode". Then, for your window system renderbuffers, you
1265 * can ask for sRGB visuals and get sRGB encode, or not ask for sRGB visuals
1266 * and get no sRGB encode (assuming that both kinds of visual are available).
1267 * Thus our choice to support sRGB by default on our visuals for desktop would
1268 * result in broken rendering of GLES apps that aren't expecting sRGB encode.
1270 * Unfortunately, renderbuffer setup happens before a context is created. So
1271 * in intel_screen.c we always set up sRGB, and here, if you're a GLES2/3
1272 * context (without an sRGB visual, though we don't have sRGB visuals exposed
1273 * yet), we go turn that back off before anyone finds out.
1276 intel_gles3_srgb_workaround(struct brw_context
*brw
,
1277 struct gl_framebuffer
*fb
)
1279 struct gl_context
*ctx
= &brw
->ctx
;
1281 if (_mesa_is_desktop_gl(ctx
) || !fb
->Visual
.sRGBCapable
)
1284 /* Some day when we support the sRGB capable bit on visuals available for
1285 * GLES, we'll need to respect that and not disable things here.
1287 fb
->Visual
.sRGBCapable
= false;
1288 for (int i
= 0; i
< BUFFER_COUNT
; i
++) {
1289 struct gl_renderbuffer
*rb
= fb
->Attachment
[i
].Renderbuffer
;
1291 rb
->Format
= _mesa_get_srgb_format_linear(rb
->Format
);
1296 intelMakeCurrent(__DRIcontext
* driContextPriv
,
1297 __DRIdrawable
* driDrawPriv
,
1298 __DRIdrawable
* driReadPriv
)
1300 struct brw_context
*brw
;
1301 GET_CURRENT_CONTEXT(curCtx
);
1304 brw
= (struct brw_context
*) driContextPriv
->driverPrivate
;
1308 /* According to the glXMakeCurrent() man page: "Pending commands to
1309 * the previous context, if any, are flushed before it is released."
1310 * But only flush if we're actually changing contexts.
1312 if (brw_context(curCtx
) && brw_context(curCtx
) != brw
) {
1313 _mesa_flush(curCtx
);
1316 if (driContextPriv
) {
1317 struct gl_context
*ctx
= &brw
->ctx
;
1318 struct gl_framebuffer
*fb
, *readFb
;
1320 if (driDrawPriv
== NULL
) {
1321 fb
= _mesa_get_incomplete_framebuffer();
1323 fb
= driDrawPriv
->driverPrivate
;
1324 driContextPriv
->dri2
.draw_stamp
= driDrawPriv
->dri2
.stamp
- 1;
1327 if (driReadPriv
== NULL
) {
1328 readFb
= _mesa_get_incomplete_framebuffer();
1330 readFb
= driReadPriv
->driverPrivate
;
1331 driContextPriv
->dri2
.read_stamp
= driReadPriv
->dri2
.stamp
- 1;
1334 /* The sRGB workaround changes the renderbuffer's format. We must change
1335 * the format before the renderbuffer's miptree get's allocated, otherwise
1336 * the formats of the renderbuffer and its miptree will differ.
1338 intel_gles3_srgb_workaround(brw
, fb
);
1339 intel_gles3_srgb_workaround(brw
, readFb
);
1341 /* If the context viewport hasn't been initialized, force a call out to
1342 * the loader to get buffers so we have a drawable size for the initial
1344 if (!brw
->ctx
.ViewportInitialized
)
1345 intel_prepare_render(brw
);
1347 _mesa_make_current(ctx
, fb
, readFb
);
1349 _mesa_make_current(NULL
, NULL
, NULL
);
1356 intel_resolve_for_dri2_flush(struct brw_context
*brw
,
1357 __DRIdrawable
*drawable
)
1360 /* MSAA and fast color clear are not supported, so don't waste time
1361 * checking whether a resolve is needed.
1366 struct gl_framebuffer
*fb
= drawable
->driverPrivate
;
1367 struct intel_renderbuffer
*rb
;
1369 /* Usually, only the back buffer will need to be downsampled. However,
1370 * the front buffer will also need it if the user has rendered into it.
1372 static const gl_buffer_index buffers
[2] = {
1377 for (int i
= 0; i
< 2; ++i
) {
1378 rb
= intel_get_renderbuffer(fb
, buffers
[i
]);
1379 if (rb
== NULL
|| rb
->mt
== NULL
)
1381 if (rb
->mt
->num_samples
<= 1) {
1382 assert(rb
->mt_layer
== 0 && rb
->mt_level
== 0 &&
1383 rb
->layer_count
== 1);
1384 intel_miptree_resolve_color(brw
, rb
->mt
, 0, 1, 0, 1, 0);
1386 intel_renderbuffer_downsample(brw
, rb
);
1392 intel_bits_per_pixel(const struct intel_renderbuffer
*rb
)
1394 return _mesa_get_format_bytes(intel_rb_format(rb
)) * 8;
1398 intel_query_dri2_buffers(struct brw_context
*brw
,
1399 __DRIdrawable
*drawable
,
1400 __DRIbuffer
**buffers
,
1404 intel_process_dri2_buffer(struct brw_context
*brw
,
1405 __DRIdrawable
*drawable
,
1406 __DRIbuffer
*buffer
,
1407 struct intel_renderbuffer
*rb
,
1408 const char *buffer_name
);
1411 intel_update_image_buffers(struct brw_context
*brw
, __DRIdrawable
*drawable
);
1414 intel_update_dri2_buffers(struct brw_context
*brw
, __DRIdrawable
*drawable
)
1416 struct gl_framebuffer
*fb
= drawable
->driverPrivate
;
1417 struct intel_renderbuffer
*rb
;
1418 __DRIbuffer
*buffers
= NULL
;
1420 const char *region_name
;
1422 /* Set this up front, so that in case our buffers get invalidated
1423 * while we're getting new buffers, we don't clobber the stamp and
1424 * thus ignore the invalidate. */
1425 drawable
->lastStamp
= drawable
->dri2
.stamp
;
1427 if (unlikely(INTEL_DEBUG
& DEBUG_DRI
))
1428 fprintf(stderr
, "enter %s, drawable %p\n", __func__
, drawable
);
1430 intel_query_dri2_buffers(brw
, drawable
, &buffers
, &count
);
1432 if (buffers
== NULL
)
1435 for (int i
= 0; i
< count
; i
++) {
1436 switch (buffers
[i
].attachment
) {
1437 case __DRI_BUFFER_FRONT_LEFT
:
1438 rb
= intel_get_renderbuffer(fb
, BUFFER_FRONT_LEFT
);
1439 region_name
= "dri2 front buffer";
1442 case __DRI_BUFFER_FAKE_FRONT_LEFT
:
1443 rb
= intel_get_renderbuffer(fb
, BUFFER_FRONT_LEFT
);
1444 region_name
= "dri2 fake front buffer";
1447 case __DRI_BUFFER_BACK_LEFT
:
1448 rb
= intel_get_renderbuffer(fb
, BUFFER_BACK_LEFT
);
1449 region_name
= "dri2 back buffer";
1452 case __DRI_BUFFER_DEPTH
:
1453 case __DRI_BUFFER_HIZ
:
1454 case __DRI_BUFFER_DEPTH_STENCIL
:
1455 case __DRI_BUFFER_STENCIL
:
1456 case __DRI_BUFFER_ACCUM
:
1459 "unhandled buffer attach event, attachment type %d\n",
1460 buffers
[i
].attachment
);
1464 intel_process_dri2_buffer(brw
, drawable
, &buffers
[i
], rb
, region_name
);
1470 intel_update_renderbuffers(__DRIcontext
*context
, __DRIdrawable
*drawable
)
1472 struct brw_context
*brw
= context
->driverPrivate
;
1473 __DRIscreen
*dri_screen
= brw
->screen
->driScrnPriv
;
1475 /* Set this up front, so that in case our buffers get invalidated
1476 * while we're getting new buffers, we don't clobber the stamp and
1477 * thus ignore the invalidate. */
1478 drawable
->lastStamp
= drawable
->dri2
.stamp
;
1480 if (unlikely(INTEL_DEBUG
& DEBUG_DRI
))
1481 fprintf(stderr
, "enter %s, drawable %p\n", __func__
, drawable
);
1483 if (dri_screen
->image
.loader
)
1484 intel_update_image_buffers(brw
, drawable
);
1486 intel_update_dri2_buffers(brw
, drawable
);
1488 driUpdateFramebufferSize(&brw
->ctx
, drawable
);
1492 * intel_prepare_render should be called anywhere that curent read/drawbuffer
1493 * state is required.
1496 intel_prepare_render(struct brw_context
*brw
)
1498 struct gl_context
*ctx
= &brw
->ctx
;
1499 __DRIcontext
*driContext
= brw
->driContext
;
1500 __DRIdrawable
*drawable
;
1502 drawable
= driContext
->driDrawablePriv
;
1503 if (drawable
&& drawable
->dri2
.stamp
!= driContext
->dri2
.draw_stamp
) {
1504 if (drawable
->lastStamp
!= drawable
->dri2
.stamp
)
1505 intel_update_renderbuffers(driContext
, drawable
);
1506 driContext
->dri2
.draw_stamp
= drawable
->dri2
.stamp
;
1509 drawable
= driContext
->driReadablePriv
;
1510 if (drawable
&& drawable
->dri2
.stamp
!= driContext
->dri2
.read_stamp
) {
1511 if (drawable
->lastStamp
!= drawable
->dri2
.stamp
)
1512 intel_update_renderbuffers(driContext
, drawable
);
1513 driContext
->dri2
.read_stamp
= drawable
->dri2
.stamp
;
1516 /* If we're currently rendering to the front buffer, the rendering
1517 * that will happen next will probably dirty the front buffer. So
1518 * mark it as dirty here.
1520 if (_mesa_is_front_buffer_drawing(ctx
->DrawBuffer
))
1521 brw
->front_buffer_dirty
= true;
1525 * \brief Query DRI2 to obtain a DRIdrawable's buffers.
1527 * To determine which DRI buffers to request, examine the renderbuffers
1528 * attached to the drawable's framebuffer. Then request the buffers with
1529 * DRI2GetBuffers() or DRI2GetBuffersWithFormat().
1531 * This is called from intel_update_renderbuffers().
1533 * \param drawable Drawable whose buffers are queried.
1534 * \param buffers [out] List of buffers returned by DRI2 query.
1535 * \param buffer_count [out] Number of buffers returned.
1537 * \see intel_update_renderbuffers()
1538 * \see DRI2GetBuffers()
1539 * \see DRI2GetBuffersWithFormat()
1542 intel_query_dri2_buffers(struct brw_context
*brw
,
1543 __DRIdrawable
*drawable
,
1544 __DRIbuffer
**buffers
,
1547 __DRIscreen
*dri_screen
= brw
->screen
->driScrnPriv
;
1548 struct gl_framebuffer
*fb
= drawable
->driverPrivate
;
1550 unsigned attachments
[8];
1552 struct intel_renderbuffer
*front_rb
;
1553 struct intel_renderbuffer
*back_rb
;
1555 front_rb
= intel_get_renderbuffer(fb
, BUFFER_FRONT_LEFT
);
1556 back_rb
= intel_get_renderbuffer(fb
, BUFFER_BACK_LEFT
);
1558 memset(attachments
, 0, sizeof(attachments
));
1559 if ((_mesa_is_front_buffer_drawing(fb
) ||
1560 _mesa_is_front_buffer_reading(fb
) ||
1561 !back_rb
) && front_rb
) {
1562 /* If a fake front buffer is in use, then querying for
1563 * __DRI_BUFFER_FRONT_LEFT will cause the server to copy the image from
1564 * the real front buffer to the fake front buffer. So before doing the
1565 * query, we need to make sure all the pending drawing has landed in the
1566 * real front buffer.
1568 intel_batchbuffer_flush(brw
);
1569 intel_flush_front(&brw
->ctx
);
1571 attachments
[i
++] = __DRI_BUFFER_FRONT_LEFT
;
1572 attachments
[i
++] = intel_bits_per_pixel(front_rb
);
1573 } else if (front_rb
&& brw
->front_buffer_dirty
) {
1574 /* We have pending front buffer rendering, but we aren't querying for a
1575 * front buffer. If the front buffer we have is a fake front buffer,
1576 * the X server is going to throw it away when it processes the query.
1577 * So before doing the query, make sure all the pending drawing has
1578 * landed in the real front buffer.
1580 intel_batchbuffer_flush(brw
);
1581 intel_flush_front(&brw
->ctx
);
1585 attachments
[i
++] = __DRI_BUFFER_BACK_LEFT
;
1586 attachments
[i
++] = intel_bits_per_pixel(back_rb
);
1589 assert(i
<= ARRAY_SIZE(attachments
));
1592 dri_screen
->dri2
.loader
->getBuffersWithFormat(drawable
,
1597 drawable
->loaderPrivate
);
1601 * \brief Assign a DRI buffer's DRM region to a renderbuffer.
1603 * This is called from intel_update_renderbuffers().
1606 * DRI buffers whose attachment point is DRI2BufferStencil or
1607 * DRI2BufferDepthStencil are handled as special cases.
1609 * \param buffer_name is a human readable name, such as "dri2 front buffer",
1610 * that is passed to brw_bo_gem_create_from_name().
1612 * \see intel_update_renderbuffers()
1615 intel_process_dri2_buffer(struct brw_context
*brw
,
1616 __DRIdrawable
*drawable
,
1617 __DRIbuffer
*buffer
,
1618 struct intel_renderbuffer
*rb
,
1619 const char *buffer_name
)
1621 struct gl_framebuffer
*fb
= drawable
->driverPrivate
;
1627 unsigned num_samples
= rb
->Base
.Base
.NumSamples
;
1629 /* We try to avoid closing and reopening the same BO name, because the first
1630 * use of a mapping of the buffer involves a bunch of page faulting which is
1631 * moderately expensive.
1633 struct intel_mipmap_tree
*last_mt
;
1634 if (num_samples
== 0)
1637 last_mt
= rb
->singlesample_mt
;
1639 uint32_t old_name
= 0;
1641 /* The bo already has a name because the miptree was created by a
1642 * previous call to intel_process_dri2_buffer(). If a bo already has a
1643 * name, then brw_bo_flink() is a low-cost getter. It does not
1644 * create a new name.
1646 brw_bo_flink(last_mt
->bo
, &old_name
);
1649 if (old_name
== buffer
->name
)
1652 if (unlikely(INTEL_DEBUG
& DEBUG_DRI
)) {
1654 "attaching buffer %d, at %d, cpp %d, pitch %d\n",
1655 buffer
->name
, buffer
->attachment
,
1656 buffer
->cpp
, buffer
->pitch
);
1659 bo
= brw_bo_gem_create_from_name(brw
->bufmgr
, buffer_name
,
1663 "Failed to open BO for returned DRI2 buffer "
1664 "(%dx%d, %s, named %d).\n"
1665 "This is likely a bug in the X Server that will lead to a "
1667 drawable
->w
, drawable
->h
, buffer_name
, buffer
->name
);
1671 intel_update_winsys_renderbuffer_miptree(brw
, rb
, bo
,
1672 drawable
->w
, drawable
->h
,
1675 if (_mesa_is_front_buffer_drawing(fb
) &&
1676 (buffer
->attachment
== __DRI_BUFFER_FRONT_LEFT
||
1677 buffer
->attachment
== __DRI_BUFFER_FAKE_FRONT_LEFT
) &&
1678 rb
->Base
.Base
.NumSamples
> 1) {
1679 intel_renderbuffer_upsample(brw
, rb
);
1684 brw_bo_unreference(bo
);
1688 * \brief Query DRI image loader to obtain a DRIdrawable's buffers.
1690 * To determine which DRI buffers to request, examine the renderbuffers
1691 * attached to the drawable's framebuffer. Then request the buffers from
1694 * This is called from intel_update_renderbuffers().
1696 * \param drawable Drawable whose buffers are queried.
1697 * \param buffers [out] List of buffers returned by DRI2 query.
1698 * \param buffer_count [out] Number of buffers returned.
1700 * \see intel_update_renderbuffers()
1704 intel_update_image_buffer(struct brw_context
*intel
,
1705 __DRIdrawable
*drawable
,
1706 struct intel_renderbuffer
*rb
,
1708 enum __DRIimageBufferMask buffer_type
)
1710 struct gl_framebuffer
*fb
= drawable
->driverPrivate
;
1712 if (!rb
|| !buffer
->bo
)
1715 unsigned num_samples
= rb
->Base
.Base
.NumSamples
;
1717 /* Check and see if we're already bound to the right
1720 struct intel_mipmap_tree
*last_mt
;
1721 if (num_samples
== 0)
1724 last_mt
= rb
->singlesample_mt
;
1726 if (last_mt
&& last_mt
->bo
== buffer
->bo
)
1729 intel_update_winsys_renderbuffer_miptree(intel
, rb
, buffer
->bo
,
1730 buffer
->width
, buffer
->height
,
1733 if (_mesa_is_front_buffer_drawing(fb
) &&
1734 buffer_type
== __DRI_IMAGE_BUFFER_FRONT
&&
1735 rb
->Base
.Base
.NumSamples
> 1) {
1736 intel_renderbuffer_upsample(intel
, rb
);
1741 intel_update_image_buffers(struct brw_context
*brw
, __DRIdrawable
*drawable
)
1743 struct gl_framebuffer
*fb
= drawable
->driverPrivate
;
1744 __DRIscreen
*dri_screen
= brw
->screen
->driScrnPriv
;
1745 struct intel_renderbuffer
*front_rb
;
1746 struct intel_renderbuffer
*back_rb
;
1747 struct __DRIimageList images
;
1749 uint32_t buffer_mask
= 0;
1752 front_rb
= intel_get_renderbuffer(fb
, BUFFER_FRONT_LEFT
);
1753 back_rb
= intel_get_renderbuffer(fb
, BUFFER_BACK_LEFT
);
1756 format
= intel_rb_format(back_rb
);
1758 format
= intel_rb_format(front_rb
);
1762 if (front_rb
&& (_mesa_is_front_buffer_drawing(fb
) ||
1763 _mesa_is_front_buffer_reading(fb
) || !back_rb
)) {
1764 buffer_mask
|= __DRI_IMAGE_BUFFER_FRONT
;
1768 buffer_mask
|= __DRI_IMAGE_BUFFER_BACK
;
1770 ret
= dri_screen
->image
.loader
->getBuffers(drawable
,
1771 driGLFormatToImageFormat(format
),
1772 &drawable
->dri2
.stamp
,
1773 drawable
->loaderPrivate
,
1779 if (images
.image_mask
& __DRI_IMAGE_BUFFER_FRONT
) {
1780 drawable
->w
= images
.front
->width
;
1781 drawable
->h
= images
.front
->height
;
1782 intel_update_image_buffer(brw
,
1786 __DRI_IMAGE_BUFFER_FRONT
);
1789 if (images
.image_mask
& __DRI_IMAGE_BUFFER_BACK
) {
1790 drawable
->w
= images
.back
->width
;
1791 drawable
->h
= images
.back
->height
;
1792 intel_update_image_buffer(brw
,
1796 __DRI_IMAGE_BUFFER_BACK
);