mesa: add AllowGLSLCrossStageInterpolationMismatch workaround
[mesa.git] / src / mesa / drivers / dri / i965 / brw_context.c
1 /*
2 Copyright 2003 VMware, Inc.
3 Copyright (C) Intel Corp. 2006. All Rights Reserved.
4 Intel funded Tungsten Graphics to
5 develop this 3D driver.
6
7 Permission is hereby granted, free of charge, to any person obtaining
8 a copy of this software and associated documentation files (the
9 "Software"), to deal in the Software without restriction, including
10 without limitation the rights to use, copy, modify, merge, publish,
11 distribute, sublicense, and/or sell copies of the Software, and to
12 permit persons to whom the Software is furnished to do so, subject to
13 the following conditions:
14
15 The above copyright notice and this permission notice (including the
16 next paragraph) shall be included in all copies or substantial
17 portions of the Software.
18
19 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
20 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
22 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
23 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
24 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
25 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26
27 **********************************************************************/
28 /*
29 * Authors:
30 * Keith Whitwell <keithw@vmware.com>
31 */
32
33
34 #include "compiler/nir/nir.h"
35 #include "main/api_exec.h"
36 #include "main/context.h"
37 #include "main/fbobject.h"
38 #include "main/extensions.h"
39 #include "main/imports.h"
40 #include "main/macros.h"
41 #include "main/points.h"
42 #include "main/version.h"
43 #include "main/vtxfmt.h"
44 #include "main/texobj.h"
45 #include "main/framebuffer.h"
46 #include "main/stencil.h"
47 #include "main/state.h"
48
49 #include "vbo/vbo_context.h"
50
51 #include "drivers/common/driverfuncs.h"
52 #include "drivers/common/meta.h"
53 #include "utils.h"
54
55 #include "brw_context.h"
56 #include "brw_defines.h"
57 #include "brw_blorp.h"
58 #include "brw_draw.h"
59 #include "brw_state.h"
60
61 #include "intel_batchbuffer.h"
62 #include "intel_buffer_objects.h"
63 #include "intel_buffers.h"
64 #include "intel_fbo.h"
65 #include "intel_mipmap_tree.h"
66 #include "intel_pixel.h"
67 #include "intel_image.h"
68 #include "intel_tex.h"
69 #include "intel_tex_obj.h"
70
71 #include "swrast_setup/swrast_setup.h"
72 #include "tnl/tnl.h"
73 #include "tnl/t_pipeline.h"
74 #include "util/ralloc.h"
75 #include "util/debug.h"
76 #include "isl/isl.h"
77
78 /***************************************
79 * Mesa's Driver Functions
80 ***************************************/
81
82 const char *const brw_vendor_string = "Intel Open Source Technology Center";
83
84 static const char *
85 get_bsw_model(const struct intel_screen *screen)
86 {
87 switch (screen->eu_total) {
88 case 16:
89 return "405";
90 case 12:
91 return "400";
92 default:
93 return " ";
94 }
95 }
96
97 const char *
98 brw_get_renderer_string(const struct intel_screen *screen)
99 {
100 const char *chipset;
101 static char buffer[128];
102 char *bsw = NULL;
103
104 switch (screen->deviceID) {
105 #undef CHIPSET
106 #define CHIPSET(id, symbol, str) case id: chipset = str; break;
107 #include "pci_ids/i965_pci_ids.h"
108 default:
109 chipset = "Unknown Intel Chipset";
110 break;
111 }
112
113 /* Braswell branding is funny, so we have to fix it up here */
114 if (screen->deviceID == 0x22B1) {
115 bsw = strdup(chipset);
116 char *needle = strstr(bsw, "XXX");
117 if (needle) {
118 memcpy(needle, get_bsw_model(screen), 3);
119 chipset = bsw;
120 }
121 }
122
123 (void) driGetRendererString(buffer, chipset, 0);
124 free(bsw);
125 return buffer;
126 }
127
128 static const GLubyte *
129 intel_get_string(struct gl_context * ctx, GLenum name)
130 {
131 const struct brw_context *const brw = brw_context(ctx);
132
133 switch (name) {
134 case GL_VENDOR:
135 return (GLubyte *) brw_vendor_string;
136
137 case GL_RENDERER:
138 return
139 (GLubyte *) brw_get_renderer_string(brw->screen);
140
141 default:
142 return NULL;
143 }
144 }
145
146 static void
147 intel_viewport(struct gl_context *ctx)
148 {
149 struct brw_context *brw = brw_context(ctx);
150 __DRIcontext *driContext = brw->driContext;
151
152 if (_mesa_is_winsys_fbo(ctx->DrawBuffer)) {
153 if (driContext->driDrawablePriv)
154 dri2InvalidateDrawable(driContext->driDrawablePriv);
155 if (driContext->driReadablePriv)
156 dri2InvalidateDrawable(driContext->driReadablePriv);
157 }
158 }
159
160 static void
161 intel_update_framebuffer(struct gl_context *ctx,
162 struct gl_framebuffer *fb)
163 {
164 struct brw_context *brw = brw_context(ctx);
165
166 /* Quantize the derived default number of samples
167 */
168 fb->DefaultGeometry._NumSamples =
169 intel_quantize_num_samples(brw->screen,
170 fb->DefaultGeometry.NumSamples);
171 }
172
173 static void
174 intel_update_state(struct gl_context * ctx)
175 {
176 GLuint new_state = ctx->NewState;
177 struct brw_context *brw = brw_context(ctx);
178
179 if (ctx->swrast_context)
180 _swrast_InvalidateState(ctx, new_state);
181
182 brw->NewGLState |= new_state;
183
184 if (new_state & (_NEW_SCISSOR | _NEW_BUFFERS | _NEW_VIEWPORT))
185 _mesa_update_draw_buffer_bounds(ctx, ctx->DrawBuffer);
186
187 if (new_state & (_NEW_STENCIL | _NEW_BUFFERS)) {
188 brw->stencil_enabled = _mesa_stencil_is_enabled(ctx);
189 brw->stencil_two_sided = _mesa_stencil_is_two_sided(ctx);
190 brw->stencil_write_enabled =
191 _mesa_stencil_is_write_enabled(ctx, brw->stencil_two_sided);
192 }
193
194 if (new_state & _NEW_POLYGON)
195 brw->polygon_front_bit = _mesa_polygon_get_front_bit(ctx);
196
197 if (new_state & _NEW_BUFFERS) {
198 intel_update_framebuffer(ctx, ctx->DrawBuffer);
199 if (ctx->DrawBuffer != ctx->ReadBuffer)
200 intel_update_framebuffer(ctx, ctx->ReadBuffer);
201 }
202 }
203
204 #define flushFront(screen) ((screen)->image.loader ? (screen)->image.loader->flushFrontBuffer : (screen)->dri2.loader->flushFrontBuffer)
205
206 static void
207 intel_flush_front(struct gl_context *ctx)
208 {
209 struct brw_context *brw = brw_context(ctx);
210 __DRIcontext *driContext = brw->driContext;
211 __DRIdrawable *driDrawable = driContext->driDrawablePriv;
212 __DRIscreen *const dri_screen = brw->screen->driScrnPriv;
213
214 if (brw->front_buffer_dirty && _mesa_is_winsys_fbo(ctx->DrawBuffer)) {
215 if (flushFront(dri_screen) && driDrawable &&
216 driDrawable->loaderPrivate) {
217
218 /* Resolve before flushing FAKE_FRONT_LEFT to FRONT_LEFT.
219 *
220 * This potentially resolves both front and back buffer. It
221 * is unnecessary to resolve the back, but harms nothing except
222 * performance. And no one cares about front-buffer render
223 * performance.
224 */
225 intel_resolve_for_dri2_flush(brw, driDrawable);
226 intel_batchbuffer_flush(brw);
227
228 flushFront(dri_screen)(driDrawable, driDrawable->loaderPrivate);
229
230 /* We set the dirty bit in intel_prepare_render() if we're
231 * front buffer rendering once we get there.
232 */
233 brw->front_buffer_dirty = false;
234 }
235 }
236 }
237
238 static void
239 intel_glFlush(struct gl_context *ctx)
240 {
241 struct brw_context *brw = brw_context(ctx);
242
243 intel_batchbuffer_flush(brw);
244 intel_flush_front(ctx);
245
246 brw->need_flush_throttle = true;
247 }
248
249 static void
250 intel_finish(struct gl_context * ctx)
251 {
252 struct brw_context *brw = brw_context(ctx);
253
254 intel_glFlush(ctx);
255
256 if (brw->batch.last_bo)
257 brw_bo_wait_rendering(brw->batch.last_bo);
258 }
259
260 static void
261 brw_init_driver_functions(struct brw_context *brw,
262 struct dd_function_table *functions)
263 {
264 const struct gen_device_info *devinfo = &brw->screen->devinfo;
265
266 _mesa_init_driver_functions(functions);
267
268 /* GLX uses DRI2 invalidate events to handle window resizing.
269 * Unfortunately, EGL does not - libEGL is written in XCB (not Xlib),
270 * which doesn't provide a mechanism for snooping the event queues.
271 *
272 * So EGL still relies on viewport hacks to handle window resizing.
273 * This should go away with DRI3000.
274 */
275 if (!brw->driContext->driScreenPriv->dri2.useInvalidate)
276 functions->Viewport = intel_viewport;
277
278 functions->Flush = intel_glFlush;
279 functions->Finish = intel_finish;
280 functions->GetString = intel_get_string;
281 functions->UpdateState = intel_update_state;
282
283 intelInitTextureFuncs(functions);
284 intelInitTextureImageFuncs(functions);
285 intelInitTextureCopyImageFuncs(functions);
286 intelInitCopyImageFuncs(functions);
287 intelInitClearFuncs(functions);
288 intelInitBufferFuncs(functions);
289 intelInitPixelFuncs(functions);
290 intelInitBufferObjectFuncs(functions);
291 brw_init_syncobj_functions(functions);
292 brw_init_object_purgeable_functions(functions);
293
294 brwInitFragProgFuncs( functions );
295 brw_init_common_queryobj_functions(functions);
296 if (devinfo->gen >= 8 || devinfo->is_haswell)
297 hsw_init_queryobj_functions(functions);
298 else if (devinfo->gen >= 6)
299 gen6_init_queryobj_functions(functions);
300 else
301 gen4_init_queryobj_functions(functions);
302 brw_init_compute_functions(functions);
303 brw_init_conditional_render_functions(functions);
304
305 functions->QueryInternalFormat = brw_query_internal_format;
306
307 functions->NewTransformFeedback = brw_new_transform_feedback;
308 functions->DeleteTransformFeedback = brw_delete_transform_feedback;
309 if (can_do_mi_math_and_lrr(brw->screen)) {
310 functions->BeginTransformFeedback = hsw_begin_transform_feedback;
311 functions->EndTransformFeedback = hsw_end_transform_feedback;
312 functions->PauseTransformFeedback = hsw_pause_transform_feedback;
313 functions->ResumeTransformFeedback = hsw_resume_transform_feedback;
314 } else if (devinfo->gen >= 7) {
315 functions->BeginTransformFeedback = gen7_begin_transform_feedback;
316 functions->EndTransformFeedback = gen7_end_transform_feedback;
317 functions->PauseTransformFeedback = gen7_pause_transform_feedback;
318 functions->ResumeTransformFeedback = gen7_resume_transform_feedback;
319 functions->GetTransformFeedbackVertexCount =
320 brw_get_transform_feedback_vertex_count;
321 } else {
322 functions->BeginTransformFeedback = brw_begin_transform_feedback;
323 functions->EndTransformFeedback = brw_end_transform_feedback;
324 functions->PauseTransformFeedback = brw_pause_transform_feedback;
325 functions->ResumeTransformFeedback = brw_resume_transform_feedback;
326 functions->GetTransformFeedbackVertexCount =
327 brw_get_transform_feedback_vertex_count;
328 }
329
330 if (devinfo->gen >= 6)
331 functions->GetSamplePosition = gen6_get_sample_position;
332 }
333
334 static void
335 brw_initialize_context_constants(struct brw_context *brw)
336 {
337 const struct gen_device_info *devinfo = &brw->screen->devinfo;
338 struct gl_context *ctx = &brw->ctx;
339 const struct brw_compiler *compiler = brw->screen->compiler;
340
341 const bool stage_exists[MESA_SHADER_STAGES] = {
342 [MESA_SHADER_VERTEX] = true,
343 [MESA_SHADER_TESS_CTRL] = devinfo->gen >= 7,
344 [MESA_SHADER_TESS_EVAL] = devinfo->gen >= 7,
345 [MESA_SHADER_GEOMETRY] = devinfo->gen >= 6,
346 [MESA_SHADER_FRAGMENT] = true,
347 [MESA_SHADER_COMPUTE] =
348 (_mesa_is_desktop_gl(ctx) &&
349 ctx->Const.MaxComputeWorkGroupSize[0] >= 1024) ||
350 (ctx->API == API_OPENGLES2 &&
351 ctx->Const.MaxComputeWorkGroupSize[0] >= 128),
352 };
353
354 unsigned num_stages = 0;
355 for (int i = 0; i < MESA_SHADER_STAGES; i++) {
356 if (stage_exists[i])
357 num_stages++;
358 }
359
360 unsigned max_samplers =
361 devinfo->gen >= 8 || devinfo->is_haswell ? BRW_MAX_TEX_UNIT : 16;
362
363 ctx->Const.MaxDualSourceDrawBuffers = 1;
364 ctx->Const.MaxDrawBuffers = BRW_MAX_DRAW_BUFFERS;
365 ctx->Const.MaxCombinedShaderOutputResources =
366 MAX_IMAGE_UNITS + BRW_MAX_DRAW_BUFFERS;
367
368 /* The timestamp register we can read for glGetTimestamp() is
369 * sometimes only 32 bits, before scaling to nanoseconds (depending
370 * on kernel).
371 *
372 * Once scaled to nanoseconds the timestamp would roll over at a
373 * non-power-of-two, so an application couldn't use
374 * GL_QUERY_COUNTER_BITS to handle rollover correctly. Instead, we
375 * report 36 bits and truncate at that (rolling over 5 times as
376 * often as the HW counter), and when the 32-bit counter rolls
377 * over, it happens to also be at a rollover in the reported value
378 * from near (1<<36) to 0.
379 *
380 * The low 32 bits rolls over in ~343 seconds. Our 36-bit result
381 * rolls over every ~69 seconds.
382 */
383 ctx->Const.QueryCounterBits.Timestamp = 36;
384
385 ctx->Const.MaxTextureCoordUnits = 8; /* Mesa limit */
386 ctx->Const.MaxImageUnits = MAX_IMAGE_UNITS;
387 if (devinfo->gen >= 7) {
388 ctx->Const.MaxRenderbufferSize = 16384;
389 ctx->Const.MaxTextureLevels = MIN2(15 /* 16384 */, MAX_TEXTURE_LEVELS);
390 ctx->Const.MaxCubeTextureLevels = 15; /* 16384 */
391 } else {
392 ctx->Const.MaxRenderbufferSize = 8192;
393 ctx->Const.MaxTextureLevels = MIN2(14 /* 8192 */, MAX_TEXTURE_LEVELS);
394 ctx->Const.MaxCubeTextureLevels = 14; /* 8192 */
395 }
396 ctx->Const.Max3DTextureLevels = 12; /* 2048 */
397 ctx->Const.MaxArrayTextureLayers = devinfo->gen >= 7 ? 2048 : 512;
398 ctx->Const.MaxTextureMbytes = 1536;
399 ctx->Const.MaxTextureRectSize = devinfo->gen >= 7 ? 16384 : 8192;
400 ctx->Const.MaxTextureMaxAnisotropy = 16.0;
401 ctx->Const.MaxTextureLodBias = 15.0;
402 ctx->Const.StripTextureBorder = true;
403 if (devinfo->gen >= 7) {
404 ctx->Const.MaxProgramTextureGatherComponents = 4;
405 ctx->Const.MinProgramTextureGatherOffset = -32;
406 ctx->Const.MaxProgramTextureGatherOffset = 31;
407 } else if (devinfo->gen == 6) {
408 ctx->Const.MaxProgramTextureGatherComponents = 1;
409 ctx->Const.MinProgramTextureGatherOffset = -8;
410 ctx->Const.MaxProgramTextureGatherOffset = 7;
411 }
412
413 ctx->Const.MaxUniformBlockSize = 65536;
414
415 for (int i = 0; i < MESA_SHADER_STAGES; i++) {
416 struct gl_program_constants *prog = &ctx->Const.Program[i];
417
418 if (!stage_exists[i])
419 continue;
420
421 prog->MaxTextureImageUnits = max_samplers;
422
423 prog->MaxUniformBlocks = BRW_MAX_UBO;
424 prog->MaxCombinedUniformComponents =
425 prog->MaxUniformComponents +
426 ctx->Const.MaxUniformBlockSize / 4 * prog->MaxUniformBlocks;
427
428 prog->MaxAtomicCounters = MAX_ATOMIC_COUNTERS;
429 prog->MaxAtomicBuffers = BRW_MAX_ABO;
430 prog->MaxImageUniforms = compiler->scalar_stage[i] ? BRW_MAX_IMAGES : 0;
431 prog->MaxShaderStorageBlocks = BRW_MAX_SSBO;
432 }
433
434 ctx->Const.MaxTextureUnits =
435 MIN2(ctx->Const.MaxTextureCoordUnits,
436 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxTextureImageUnits);
437
438 ctx->Const.MaxUniformBufferBindings = num_stages * BRW_MAX_UBO;
439 ctx->Const.MaxCombinedUniformBlocks = num_stages * BRW_MAX_UBO;
440 ctx->Const.MaxCombinedAtomicBuffers = num_stages * BRW_MAX_ABO;
441 ctx->Const.MaxCombinedShaderStorageBlocks = num_stages * BRW_MAX_SSBO;
442 ctx->Const.MaxShaderStorageBufferBindings = num_stages * BRW_MAX_SSBO;
443 ctx->Const.MaxCombinedTextureImageUnits = num_stages * max_samplers;
444 ctx->Const.MaxCombinedImageUniforms = num_stages * BRW_MAX_IMAGES;
445
446
447 /* Hardware only supports a limited number of transform feedback buffers.
448 * So we need to override the Mesa default (which is based only on software
449 * limits).
450 */
451 ctx->Const.MaxTransformFeedbackBuffers = BRW_MAX_SOL_BUFFERS;
452
453 /* On Gen6, in the worst case, we use up one binding table entry per
454 * transform feedback component (see comments above the definition of
455 * BRW_MAX_SOL_BINDINGS, in brw_context.h), so we need to advertise a value
456 * for MAX_TRANSFORM_FEEDBACK_INTERLEAVED_COMPONENTS equal to
457 * BRW_MAX_SOL_BINDINGS.
458 *
459 * In "separate components" mode, we need to divide this value by
460 * BRW_MAX_SOL_BUFFERS, so that the total number of binding table entries
461 * used up by all buffers will not exceed BRW_MAX_SOL_BINDINGS.
462 */
463 ctx->Const.MaxTransformFeedbackInterleavedComponents = BRW_MAX_SOL_BINDINGS;
464 ctx->Const.MaxTransformFeedbackSeparateComponents =
465 BRW_MAX_SOL_BINDINGS / BRW_MAX_SOL_BUFFERS;
466
467 ctx->Const.AlwaysUseGetTransformFeedbackVertexCount =
468 !can_do_mi_math_and_lrr(brw->screen);
469
470 int max_samples;
471 const int *msaa_modes = intel_supported_msaa_modes(brw->screen);
472 const int clamp_max_samples =
473 driQueryOptioni(&brw->optionCache, "clamp_max_samples");
474
475 if (clamp_max_samples < 0) {
476 max_samples = msaa_modes[0];
477 } else {
478 /* Select the largest supported MSAA mode that does not exceed
479 * clamp_max_samples.
480 */
481 max_samples = 0;
482 for (int i = 0; msaa_modes[i] != 0; ++i) {
483 if (msaa_modes[i] <= clamp_max_samples) {
484 max_samples = msaa_modes[i];
485 break;
486 }
487 }
488 }
489
490 ctx->Const.MaxSamples = max_samples;
491 ctx->Const.MaxColorTextureSamples = max_samples;
492 ctx->Const.MaxDepthTextureSamples = max_samples;
493 ctx->Const.MaxIntegerSamples = max_samples;
494 ctx->Const.MaxImageSamples = 0;
495
496 /* gen6_set_sample_maps() sets SampleMap{2,4,8}x variables which are used
497 * to map indices of rectangular grid to sample numbers within a pixel.
498 * These variables are used by GL_EXT_framebuffer_multisample_blit_scaled
499 * extension implementation. For more details see the comment above
500 * gen6_set_sample_maps() definition.
501 */
502 gen6_set_sample_maps(ctx);
503
504 ctx->Const.MinLineWidth = 1.0;
505 ctx->Const.MinLineWidthAA = 1.0;
506 if (devinfo->gen >= 6) {
507 ctx->Const.MaxLineWidth = 7.375;
508 ctx->Const.MaxLineWidthAA = 7.375;
509 ctx->Const.LineWidthGranularity = 0.125;
510 } else {
511 ctx->Const.MaxLineWidth = 7.0;
512 ctx->Const.MaxLineWidthAA = 7.0;
513 ctx->Const.LineWidthGranularity = 0.5;
514 }
515
516 /* For non-antialiased lines, we have to round the line width to the
517 * nearest whole number. Make sure that we don't advertise a line
518 * width that, when rounded, will be beyond the actual hardware
519 * maximum.
520 */
521 assert(roundf(ctx->Const.MaxLineWidth) <= ctx->Const.MaxLineWidth);
522
523 ctx->Const.MinPointSize = 1.0;
524 ctx->Const.MinPointSizeAA = 1.0;
525 ctx->Const.MaxPointSize = 255.0;
526 ctx->Const.MaxPointSizeAA = 255.0;
527 ctx->Const.PointSizeGranularity = 1.0;
528
529 if (devinfo->gen >= 5 || devinfo->is_g4x)
530 ctx->Const.MaxClipPlanes = 8;
531
532 ctx->Const.GLSLTessLevelsAsInputs = true;
533 ctx->Const.LowerTCSPatchVerticesIn = devinfo->gen >= 8;
534 ctx->Const.LowerTESPatchVerticesIn = true;
535 ctx->Const.PrimitiveRestartForPatches = true;
536
537 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeInstructions = 16 * 1024;
538 ctx->Const.Program[MESA_SHADER_VERTEX].MaxAluInstructions = 0;
539 ctx->Const.Program[MESA_SHADER_VERTEX].MaxTexInstructions = 0;
540 ctx->Const.Program[MESA_SHADER_VERTEX].MaxTexIndirections = 0;
541 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeAluInstructions = 0;
542 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeTexInstructions = 0;
543 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeTexIndirections = 0;
544 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeAttribs = 16;
545 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeTemps = 256;
546 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeAddressRegs = 1;
547 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeParameters = 1024;
548 ctx->Const.Program[MESA_SHADER_VERTEX].MaxEnvParams =
549 MIN2(ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeParameters,
550 ctx->Const.Program[MESA_SHADER_VERTEX].MaxEnvParams);
551
552 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeInstructions = 1024;
553 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeAluInstructions = 1024;
554 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeTexInstructions = 1024;
555 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeTexIndirections = 1024;
556 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeAttribs = 12;
557 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeTemps = 256;
558 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeAddressRegs = 0;
559 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeParameters = 1024;
560 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxEnvParams =
561 MIN2(ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeParameters,
562 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxEnvParams);
563
564 /* Fragment shaders use real, 32-bit twos-complement integers for all
565 * integer types.
566 */
567 ctx->Const.Program[MESA_SHADER_FRAGMENT].LowInt.RangeMin = 31;
568 ctx->Const.Program[MESA_SHADER_FRAGMENT].LowInt.RangeMax = 30;
569 ctx->Const.Program[MESA_SHADER_FRAGMENT].LowInt.Precision = 0;
570 ctx->Const.Program[MESA_SHADER_FRAGMENT].HighInt = ctx->Const.Program[MESA_SHADER_FRAGMENT].LowInt;
571 ctx->Const.Program[MESA_SHADER_FRAGMENT].MediumInt = ctx->Const.Program[MESA_SHADER_FRAGMENT].LowInt;
572
573 ctx->Const.Program[MESA_SHADER_VERTEX].LowInt.RangeMin = 31;
574 ctx->Const.Program[MESA_SHADER_VERTEX].LowInt.RangeMax = 30;
575 ctx->Const.Program[MESA_SHADER_VERTEX].LowInt.Precision = 0;
576 ctx->Const.Program[MESA_SHADER_VERTEX].HighInt = ctx->Const.Program[MESA_SHADER_VERTEX].LowInt;
577 ctx->Const.Program[MESA_SHADER_VERTEX].MediumInt = ctx->Const.Program[MESA_SHADER_VERTEX].LowInt;
578
579 /* Gen6 converts quads to polygon in beginning of 3D pipeline,
580 * but we're not sure how it's actually done for vertex order,
581 * that affect provoking vertex decision. Always use last vertex
582 * convention for quad primitive which works as expected for now.
583 */
584 if (devinfo->gen >= 6)
585 ctx->Const.QuadsFollowProvokingVertexConvention = false;
586
587 ctx->Const.NativeIntegers = true;
588 ctx->Const.VertexID_is_zero_based = true;
589
590 /* Regarding the CMP instruction, the Ivybridge PRM says:
591 *
592 * "For each enabled channel 0b or 1b is assigned to the appropriate flag
593 * bit and 0/all zeros or all ones (e.g, byte 0xFF, word 0xFFFF, DWord
594 * 0xFFFFFFFF) is assigned to dst."
595 *
596 * but PRMs for earlier generations say
597 *
598 * "In dword format, one GRF may store up to 8 results. When the register
599 * is used later as a vector of Booleans, as only LSB at each channel
600 * contains meaning [sic] data, software should make sure all higher bits
601 * are masked out (e.g. by 'and-ing' an [sic] 0x01 constant)."
602 *
603 * We select the representation of a true boolean uniform to be ~0, and fix
604 * the results of Gen <= 5 CMP instruction's with -(result & 1).
605 */
606 ctx->Const.UniformBooleanTrue = ~0;
607
608 /* From the gen4 PRM, volume 4 page 127:
609 *
610 * "For SURFTYPE_BUFFER non-rendertarget surfaces, this field specifies
611 * the base address of the first element of the surface, computed in
612 * software by adding the surface base address to the byte offset of
613 * the element in the buffer."
614 *
615 * However, unaligned accesses are slower, so enforce buffer alignment.
616 *
617 * In order to push UBO data, 3DSTATE_CONSTANT_XS imposes an additional
618 * restriction: the start of the buffer needs to be 32B aligned.
619 */
620 ctx->Const.UniformBufferOffsetAlignment = 32;
621
622 /* ShaderStorageBufferOffsetAlignment should be a cacheline (64 bytes) so
623 * that we can safely have the CPU and GPU writing the same SSBO on
624 * non-cachecoherent systems (our Atom CPUs). With UBOs, the GPU never
625 * writes, so there's no problem. For an SSBO, the GPU and the CPU can
626 * be updating disjoint regions of the buffer simultaneously and that will
627 * break if the regions overlap the same cacheline.
628 */
629 ctx->Const.ShaderStorageBufferOffsetAlignment = 64;
630 ctx->Const.TextureBufferOffsetAlignment = 16;
631 ctx->Const.MaxTextureBufferSize = 128 * 1024 * 1024;
632
633 if (devinfo->gen >= 6) {
634 ctx->Const.MaxVarying = 32;
635 ctx->Const.Program[MESA_SHADER_VERTEX].MaxOutputComponents = 128;
636 ctx->Const.Program[MESA_SHADER_GEOMETRY].MaxInputComponents =
637 compiler->scalar_stage[MESA_SHADER_GEOMETRY] ? 128 : 64;
638 ctx->Const.Program[MESA_SHADER_GEOMETRY].MaxOutputComponents = 128;
639 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxInputComponents = 128;
640 ctx->Const.Program[MESA_SHADER_TESS_CTRL].MaxInputComponents = 128;
641 ctx->Const.Program[MESA_SHADER_TESS_CTRL].MaxOutputComponents = 128;
642 ctx->Const.Program[MESA_SHADER_TESS_EVAL].MaxInputComponents = 128;
643 ctx->Const.Program[MESA_SHADER_TESS_EVAL].MaxOutputComponents = 128;
644 }
645
646 /* We want the GLSL compiler to emit code that uses condition codes */
647 for (int i = 0; i < MESA_SHADER_STAGES; i++) {
648 ctx->Const.ShaderCompilerOptions[i] =
649 brw->screen->compiler->glsl_compiler_options[i];
650 }
651
652 if (devinfo->gen >= 7) {
653 ctx->Const.MaxViewportWidth = 32768;
654 ctx->Const.MaxViewportHeight = 32768;
655 }
656
657 /* ARB_viewport_array, OES_viewport_array */
658 if (devinfo->gen >= 6) {
659 ctx->Const.MaxViewports = GEN6_NUM_VIEWPORTS;
660 ctx->Const.ViewportSubpixelBits = 0;
661
662 /* Cast to float before negating because MaxViewportWidth is unsigned.
663 */
664 ctx->Const.ViewportBounds.Min = -(float)ctx->Const.MaxViewportWidth;
665 ctx->Const.ViewportBounds.Max = ctx->Const.MaxViewportWidth;
666 }
667
668 /* ARB_gpu_shader5 */
669 if (devinfo->gen >= 7)
670 ctx->Const.MaxVertexStreams = MIN2(4, MAX_VERTEX_STREAMS);
671
672 /* ARB_framebuffer_no_attachments */
673 ctx->Const.MaxFramebufferWidth = 16384;
674 ctx->Const.MaxFramebufferHeight = 16384;
675 ctx->Const.MaxFramebufferLayers = ctx->Const.MaxArrayTextureLayers;
676 ctx->Const.MaxFramebufferSamples = max_samples;
677
678 /* OES_primitive_bounding_box */
679 ctx->Const.NoPrimitiveBoundingBoxOutput = true;
680
681 /* TODO: We should be able to use STD430 packing by default on all hardware
682 * but some piglit tests [1] currently fail on SNB when this is enabled.
683 * The problem is the messages we're using for doing uniform pulls
684 * in the vec4 back-end on SNB is the OWORD block load instruction, which
685 * takes its offset in units of OWORDS (16 bytes). On IVB+, we use the
686 * sampler which doesn't have these restrictions.
687 *
688 * In the scalar back-end, we use the sampler for dynamic uniform loads and
689 * pull an entire cache line at a time for constant offset loads both of
690 * which support almost any alignment.
691 *
692 * [1] glsl-1.40/uniform_buffer/vs-float-array-variable-index.shader_test
693 */
694 if (devinfo->gen >= 7)
695 ctx->Const.UseSTD430AsDefaultPacking = true;
696
697 if (!(ctx->Const.ContextFlags & GL_CONTEXT_FLAG_DEBUG_BIT))
698 ctx->Const.AllowMappedBuffersDuringExecution = true;
699 }
700
701 static void
702 brw_initialize_cs_context_constants(struct brw_context *brw)
703 {
704 struct gl_context *ctx = &brw->ctx;
705 const struct intel_screen *screen = brw->screen;
706 struct gen_device_info *devinfo = &brw->screen->devinfo;
707
708 /* FINISHME: Do this for all platforms that the kernel supports */
709 if (devinfo->is_cherryview &&
710 screen->subslice_total > 0 && screen->eu_total > 0) {
711 /* Logical CS threads = EUs per subslice * 7 threads per EU */
712 uint32_t max_cs_threads = screen->eu_total / screen->subslice_total * 7;
713
714 /* Fuse configurations may give more threads than expected, never less. */
715 if (max_cs_threads > devinfo->max_cs_threads)
716 devinfo->max_cs_threads = max_cs_threads;
717 }
718
719 /* Maximum number of scalar compute shader invocations that can be run in
720 * parallel in the same subslice assuming SIMD32 dispatch.
721 *
722 * We don't advertise more than 64 threads, because we are limited to 64 by
723 * our usage of thread_width_max in the gpgpu walker command. This only
724 * currently impacts Haswell, which otherwise might be able to advertise 70
725 * threads. With SIMD32 and 64 threads, Haswell still provides twice the
726 * required the number of invocation needed for ARB_compute_shader.
727 */
728 const unsigned max_threads = MIN2(64, devinfo->max_cs_threads);
729 const uint32_t max_invocations = 32 * max_threads;
730 ctx->Const.MaxComputeWorkGroupSize[0] = max_invocations;
731 ctx->Const.MaxComputeWorkGroupSize[1] = max_invocations;
732 ctx->Const.MaxComputeWorkGroupSize[2] = max_invocations;
733 ctx->Const.MaxComputeWorkGroupInvocations = max_invocations;
734 ctx->Const.MaxComputeSharedMemorySize = 64 * 1024;
735 }
736
737 /**
738 * Process driconf (drirc) options, setting appropriate context flags.
739 *
740 * intelInitExtensions still pokes at optionCache directly, in order to
741 * avoid advertising various extensions. No flags are set, so it makes
742 * sense to continue doing that there.
743 */
744 static void
745 brw_process_driconf_options(struct brw_context *brw)
746 {
747 const struct gen_device_info *devinfo = &brw->screen->devinfo;
748 struct gl_context *ctx = &brw->ctx;
749
750 driOptionCache *options = &brw->optionCache;
751 driParseConfigFiles(options, &brw->screen->optionCache,
752 brw->driContext->driScreenPriv->myNum, "i965");
753
754 int bo_reuse_mode = driQueryOptioni(options, "bo_reuse");
755 switch (bo_reuse_mode) {
756 case DRI_CONF_BO_REUSE_DISABLED:
757 break;
758 case DRI_CONF_BO_REUSE_ALL:
759 brw_bufmgr_enable_reuse(brw->bufmgr);
760 break;
761 }
762
763 if (INTEL_DEBUG & DEBUG_NO_HIZ) {
764 brw->has_hiz = false;
765 /* On gen6, you can only do separate stencil with HIZ. */
766 if (devinfo->gen == 6)
767 brw->has_separate_stencil = false;
768 }
769
770 if (driQueryOptionb(options, "mesa_no_error"))
771 ctx->Const.ContextFlags |= GL_CONTEXT_FLAG_NO_ERROR_BIT_KHR;
772
773 if (driQueryOptionb(options, "always_flush_batch")) {
774 fprintf(stderr, "flushing batchbuffer before/after each draw call\n");
775 brw->always_flush_batch = true;
776 }
777
778 if (driQueryOptionb(options, "always_flush_cache")) {
779 fprintf(stderr, "flushing GPU caches before/after each draw call\n");
780 brw->always_flush_cache = true;
781 }
782
783 if (driQueryOptionb(options, "disable_throttling")) {
784 fprintf(stderr, "disabling flush throttling\n");
785 brw->disable_throttling = true;
786 }
787
788 brw->precompile = driQueryOptionb(&brw->optionCache, "shader_precompile");
789
790 if (driQueryOptionb(&brw->optionCache, "precise_trig"))
791 brw->screen->compiler->precise_trig = true;
792
793 ctx->Const.ForceGLSLExtensionsWarn =
794 driQueryOptionb(options, "force_glsl_extensions_warn");
795
796 ctx->Const.ForceGLSLVersion =
797 driQueryOptioni(options, "force_glsl_version");
798
799 ctx->Const.DisableGLSLLineContinuations =
800 driQueryOptionb(options, "disable_glsl_line_continuations");
801
802 ctx->Const.AllowGLSLExtensionDirectiveMidShader =
803 driQueryOptionb(options, "allow_glsl_extension_directive_midshader");
804
805 ctx->Const.AllowGLSLBuiltinVariableRedeclaration =
806 driQueryOptionb(options, "allow_glsl_builtin_variable_redeclaration");
807
808 ctx->Const.AllowHigherCompatVersion =
809 driQueryOptionb(options, "allow_higher_compat_version");
810
811 ctx->Const.ForceGLSLAbsSqrt =
812 driQueryOptionb(options, "force_glsl_abs_sqrt");
813
814 ctx->Const.GLSLZeroInit = driQueryOptionb(options, "glsl_zero_init");
815
816 brw->dual_color_blend_by_location =
817 driQueryOptionb(options, "dual_color_blend_by_location");
818
819 ctx->Const.AllowGLSLCrossStageInterpolationMismatch =
820 driQueryOptionb(options, "allow_glsl_cross_stage_interpolation_mismatch");
821
822 ctx->Const.dri_config_options_sha1 = ralloc_array(brw, unsigned char, 20);
823 driComputeOptionsSha1(&brw->screen->optionCache,
824 ctx->Const.dri_config_options_sha1);
825 }
826
827 GLboolean
828 brwCreateContext(gl_api api,
829 const struct gl_config *mesaVis,
830 __DRIcontext *driContextPriv,
831 const struct __DriverContextConfig *ctx_config,
832 unsigned *dri_ctx_error,
833 void *sharedContextPrivate)
834 {
835 struct gl_context *shareCtx = (struct gl_context *) sharedContextPrivate;
836 struct intel_screen *screen = driContextPriv->driScreenPriv->driverPrivate;
837 const struct gen_device_info *devinfo = &screen->devinfo;
838 struct dd_function_table functions;
839
840 /* Only allow the __DRI_CTX_FLAG_ROBUST_BUFFER_ACCESS flag if the kernel
841 * provides us with context reset notifications.
842 */
843 uint32_t allowed_flags = __DRI_CTX_FLAG_DEBUG |
844 __DRI_CTX_FLAG_FORWARD_COMPATIBLE |
845 __DRI_CTX_FLAG_NO_ERROR;
846
847 if (screen->has_context_reset_notification)
848 allowed_flags |= __DRI_CTX_FLAG_ROBUST_BUFFER_ACCESS;
849
850 if (ctx_config->flags & ~allowed_flags) {
851 *dri_ctx_error = __DRI_CTX_ERROR_UNKNOWN_FLAG;
852 return false;
853 }
854
855 if (ctx_config->attribute_mask &
856 ~(__DRIVER_CONTEXT_ATTRIB_RESET_STRATEGY |
857 __DRIVER_CONTEXT_ATTRIB_RELEASE_BEHAVIOR)) {
858 *dri_ctx_error = __DRI_CTX_ERROR_UNKNOWN_ATTRIBUTE;
859 return false;
860 }
861
862 bool notify_reset =
863 ((ctx_config->attribute_mask & __DRIVER_CONTEXT_ATTRIB_RESET_STRATEGY) &&
864 ctx_config->reset_strategy != __DRI_CTX_RESET_NO_NOTIFICATION);
865
866 GLenum release_behavior = GL_CONTEXT_RELEASE_BEHAVIOR_FLUSH;
867 if (ctx_config->attribute_mask & __DRIVER_CONTEXT_ATTRIB_RELEASE_BEHAVIOR) {
868 switch (ctx_config->release_behavior) {
869 case __DRI_CTX_RELEASE_BEHAVIOR_NONE:
870 release_behavior = GL_NONE;
871 break;
872 case __DRI_CTX_RELEASE_BEHAVIOR_FLUSH:
873 break;
874 default:
875 *dri_ctx_error = __DRI_CTX_ERROR_UNKNOWN_ATTRIBUTE;
876 return false;
877 }
878 }
879
880 struct brw_context *brw = rzalloc(NULL, struct brw_context);
881 if (!brw) {
882 fprintf(stderr, "%s: failed to alloc context\n", __func__);
883 *dri_ctx_error = __DRI_CTX_ERROR_NO_MEMORY;
884 return false;
885 }
886
887 driContextPriv->driverPrivate = brw;
888 brw->driContext = driContextPriv;
889 brw->screen = screen;
890 brw->bufmgr = screen->bufmgr;
891
892 brw->has_hiz = devinfo->has_hiz_and_separate_stencil;
893 brw->has_separate_stencil = devinfo->has_hiz_and_separate_stencil;
894
895 brw->has_swizzling = screen->hw_has_swizzling;
896
897 brw->isl_dev = screen->isl_dev;
898
899 brw->vs.base.stage = MESA_SHADER_VERTEX;
900 brw->tcs.base.stage = MESA_SHADER_TESS_CTRL;
901 brw->tes.base.stage = MESA_SHADER_TESS_EVAL;
902 brw->gs.base.stage = MESA_SHADER_GEOMETRY;
903 brw->wm.base.stage = MESA_SHADER_FRAGMENT;
904 brw->cs.base.stage = MESA_SHADER_COMPUTE;
905 if (devinfo->gen >= 8) {
906 brw->vtbl.emit_depth_stencil_hiz = gen8_emit_depth_stencil_hiz;
907 } else if (devinfo->gen >= 7) {
908 brw->vtbl.emit_depth_stencil_hiz = gen7_emit_depth_stencil_hiz;
909 } else if (devinfo->gen >= 6) {
910 brw->vtbl.emit_depth_stencil_hiz = gen6_emit_depth_stencil_hiz;
911 } else {
912 brw->vtbl.emit_depth_stencil_hiz = brw_emit_depth_stencil_hiz;
913 }
914
915 brw_init_driver_functions(brw, &functions);
916
917 if (notify_reset)
918 functions.GetGraphicsResetStatus = brw_get_graphics_reset_status;
919
920 struct gl_context *ctx = &brw->ctx;
921
922 if (!_mesa_initialize_context(ctx, api, mesaVis, shareCtx, &functions)) {
923 *dri_ctx_error = __DRI_CTX_ERROR_NO_MEMORY;
924 fprintf(stderr, "%s: failed to init mesa context\n", __func__);
925 intelDestroyContext(driContextPriv);
926 return false;
927 }
928
929 driContextSetFlags(ctx, ctx_config->flags);
930
931 /* Initialize the software rasterizer and helper modules.
932 *
933 * As of GL 3.1 core, the gen4+ driver doesn't need the swrast context for
934 * software fallbacks (which we have to support on legacy GL to do weird
935 * glDrawPixels(), glBitmap(), and other functions).
936 */
937 if (api != API_OPENGL_CORE && api != API_OPENGLES2) {
938 _swrast_CreateContext(ctx);
939 }
940
941 _vbo_CreateContext(ctx);
942 if (ctx->swrast_context) {
943 _tnl_CreateContext(ctx);
944 TNL_CONTEXT(ctx)->Driver.RunPipeline = _tnl_run_pipeline;
945 _swsetup_CreateContext(ctx);
946
947 /* Configure swrast to match hardware characteristics: */
948 _swrast_allow_pixel_fog(ctx, false);
949 _swrast_allow_vertex_fog(ctx, true);
950 }
951
952 _mesa_meta_init(ctx);
953
954 brw_process_driconf_options(brw);
955
956 if (INTEL_DEBUG & DEBUG_PERF)
957 brw->perf_debug = true;
958
959 brw_initialize_cs_context_constants(brw);
960 brw_initialize_context_constants(brw);
961
962 ctx->Const.ResetStrategy = notify_reset
963 ? GL_LOSE_CONTEXT_ON_RESET_ARB : GL_NO_RESET_NOTIFICATION_ARB;
964
965 /* Reinitialize the context point state. It depends on ctx->Const values. */
966 _mesa_init_point(ctx);
967
968 intel_fbo_init(brw);
969
970 intel_batchbuffer_init(brw);
971
972 if (devinfo->gen >= 6) {
973 /* Create a new hardware context. Using a hardware context means that
974 * our GPU state will be saved/restored on context switch, allowing us
975 * to assume that the GPU is in the same state we left it in.
976 *
977 * This is required for transform feedback buffer offsets, query objects,
978 * and also allows us to reduce how much state we have to emit.
979 */
980 brw->hw_ctx = brw_create_hw_context(brw->bufmgr);
981
982 if (!brw->hw_ctx) {
983 fprintf(stderr, "Failed to create hardware context.\n");
984 intelDestroyContext(driContextPriv);
985 return false;
986 }
987
988 int hw_priority = BRW_CONTEXT_MEDIUM_PRIORITY;
989 if (ctx_config->attribute_mask & __DRIVER_CONTEXT_ATTRIB_PRIORITY) {
990 switch (ctx_config->priority) {
991 case __DRI_CTX_PRIORITY_LOW:
992 hw_priority = BRW_CONTEXT_LOW_PRIORITY;
993 break;
994 case __DRI_CTX_PRIORITY_HIGH:
995 hw_priority = BRW_CONTEXT_HIGH_PRIORITY;
996 break;
997 }
998 }
999 if (hw_priority != I915_CONTEXT_DEFAULT_PRIORITY &&
1000 brw_hw_context_set_priority(brw->bufmgr, brw->hw_ctx, hw_priority)) {
1001 fprintf(stderr,
1002 "Failed to set priority [%d:%d] for hardware context.\n",
1003 ctx_config->priority, hw_priority);
1004 intelDestroyContext(driContextPriv);
1005 return false;
1006 }
1007 }
1008
1009 if (brw_init_pipe_control(brw, devinfo)) {
1010 *dri_ctx_error = __DRI_CTX_ERROR_NO_MEMORY;
1011 intelDestroyContext(driContextPriv);
1012 return false;
1013 }
1014
1015 brw_init_state(brw);
1016
1017 intelInitExtensions(ctx);
1018
1019 brw_init_surface_formats(brw);
1020
1021 brw_blorp_init(brw);
1022
1023 brw->urb.size = devinfo->urb.size;
1024
1025 if (devinfo->gen == 6)
1026 brw->urb.gs_present = false;
1027
1028 brw->prim_restart.in_progress = false;
1029 brw->prim_restart.enable_cut_index = false;
1030 brw->gs.enabled = false;
1031 brw->clip.viewport_count = 1;
1032
1033 brw->predicate.state = BRW_PREDICATE_STATE_RENDER;
1034
1035 brw->max_gtt_map_object_size = screen->max_gtt_map_object_size;
1036
1037 ctx->VertexProgram._MaintainTnlProgram = true;
1038 ctx->FragmentProgram._MaintainTexEnvProgram = true;
1039
1040 brw_draw_init( brw );
1041
1042 if ((ctx_config->flags & __DRI_CTX_FLAG_DEBUG) != 0) {
1043 /* Turn on some extra GL_ARB_debug_output generation. */
1044 brw->perf_debug = true;
1045 }
1046
1047 if ((ctx_config->flags & __DRI_CTX_FLAG_ROBUST_BUFFER_ACCESS) != 0) {
1048 ctx->Const.ContextFlags |= GL_CONTEXT_FLAG_ROBUST_ACCESS_BIT_ARB;
1049 ctx->Const.RobustAccess = GL_TRUE;
1050 }
1051
1052 ctx->Const.ContextReleaseBehavior = release_behavior;
1053
1054 if (INTEL_DEBUG & DEBUG_SHADER_TIME)
1055 brw_init_shader_time(brw);
1056
1057 _mesa_override_extensions(ctx);
1058 _mesa_compute_version(ctx);
1059
1060 _mesa_initialize_dispatch_tables(ctx);
1061 _mesa_initialize_vbo_vtxfmt(ctx);
1062
1063 if (ctx->Extensions.INTEL_performance_query)
1064 brw_init_performance_queries(brw);
1065
1066 vbo_use_buffer_objects(ctx);
1067 vbo_always_unmap_buffers(ctx);
1068
1069 brw_disk_cache_init(brw);
1070
1071 return true;
1072 }
1073
1074 void
1075 intelDestroyContext(__DRIcontext * driContextPriv)
1076 {
1077 struct brw_context *brw =
1078 (struct brw_context *) driContextPriv->driverPrivate;
1079 struct gl_context *ctx = &brw->ctx;
1080 const struct gen_device_info *devinfo = &brw->screen->devinfo;
1081
1082 _mesa_meta_free(&brw->ctx);
1083
1084 if (INTEL_DEBUG & DEBUG_SHADER_TIME) {
1085 /* Force a report. */
1086 brw->shader_time.report_time = 0;
1087
1088 brw_collect_and_report_shader_time(brw);
1089 brw_destroy_shader_time(brw);
1090 }
1091
1092 if (devinfo->gen >= 6)
1093 blorp_finish(&brw->blorp);
1094
1095 brw_destroy_state(brw);
1096 brw_draw_destroy(brw);
1097
1098 brw_bo_unreference(brw->curbe.curbe_bo);
1099
1100 brw_bo_unreference(brw->vs.base.scratch_bo);
1101 brw_bo_unreference(brw->tcs.base.scratch_bo);
1102 brw_bo_unreference(brw->tes.base.scratch_bo);
1103 brw_bo_unreference(brw->gs.base.scratch_bo);
1104 brw_bo_unreference(brw->wm.base.scratch_bo);
1105
1106 brw_bo_unreference(brw->vs.base.push_const_bo);
1107 brw_bo_unreference(brw->tcs.base.push_const_bo);
1108 brw_bo_unreference(brw->tes.base.push_const_bo);
1109 brw_bo_unreference(brw->gs.base.push_const_bo);
1110 brw_bo_unreference(brw->wm.base.push_const_bo);
1111
1112 brw_destroy_hw_context(brw->bufmgr, brw->hw_ctx);
1113
1114 if (ctx->swrast_context) {
1115 _swsetup_DestroyContext(&brw->ctx);
1116 _tnl_DestroyContext(&brw->ctx);
1117 }
1118 _vbo_DestroyContext(&brw->ctx);
1119
1120 if (ctx->swrast_context)
1121 _swrast_DestroyContext(&brw->ctx);
1122
1123 brw_fini_pipe_control(brw);
1124 intel_batchbuffer_free(&brw->batch);
1125
1126 brw_bo_unreference(brw->throttle_batch[1]);
1127 brw_bo_unreference(brw->throttle_batch[0]);
1128 brw->throttle_batch[1] = NULL;
1129 brw->throttle_batch[0] = NULL;
1130
1131 driDestroyOptionCache(&brw->optionCache);
1132
1133 /* free the Mesa context */
1134 _mesa_free_context_data(&brw->ctx);
1135
1136 ralloc_free(brw);
1137 driContextPriv->driverPrivate = NULL;
1138 }
1139
1140 GLboolean
1141 intelUnbindContext(__DRIcontext * driContextPriv)
1142 {
1143 /* Unset current context and dispath table */
1144 _mesa_make_current(NULL, NULL, NULL);
1145
1146 return true;
1147 }
1148
1149 /**
1150 * Fixes up the context for GLES23 with our default-to-sRGB-capable behavior
1151 * on window system framebuffers.
1152 *
1153 * Desktop GL is fairly reasonable in its handling of sRGB: You can ask if
1154 * your renderbuffer can do sRGB encode, and you can flip a switch that does
1155 * sRGB encode if the renderbuffer can handle it. You can ask specifically
1156 * for a visual where you're guaranteed to be capable, but it turns out that
1157 * everyone just makes all their ARGB8888 visuals capable and doesn't offer
1158 * incapable ones, because there's no difference between the two in resources
1159 * used. Applications thus get built that accidentally rely on the default
1160 * visual choice being sRGB, so we make ours sRGB capable. Everything sounds
1161 * great...
1162 *
1163 * But for GLES2/3, they decided that it was silly to not turn on sRGB encode
1164 * for sRGB renderbuffers you made with the GL_EXT_texture_sRGB equivalent.
1165 * So they removed the enable knob and made it "if the renderbuffer is sRGB
1166 * capable, do sRGB encode". Then, for your window system renderbuffers, you
1167 * can ask for sRGB visuals and get sRGB encode, or not ask for sRGB visuals
1168 * and get no sRGB encode (assuming that both kinds of visual are available).
1169 * Thus our choice to support sRGB by default on our visuals for desktop would
1170 * result in broken rendering of GLES apps that aren't expecting sRGB encode.
1171 *
1172 * Unfortunately, renderbuffer setup happens before a context is created. So
1173 * in intel_screen.c we always set up sRGB, and here, if you're a GLES2/3
1174 * context (without an sRGB visual), we go turn that back off before anyone
1175 * finds out.
1176 */
1177 static void
1178 intel_gles3_srgb_workaround(struct brw_context *brw,
1179 struct gl_framebuffer *fb)
1180 {
1181 struct gl_context *ctx = &brw->ctx;
1182
1183 if (_mesa_is_desktop_gl(ctx) || !fb->Visual.sRGBCapable)
1184 return;
1185
1186 for (int i = 0; i < BUFFER_COUNT; i++) {
1187 struct gl_renderbuffer *rb = fb->Attachment[i].Renderbuffer;
1188
1189 /* Check if sRGB was specifically asked for. */
1190 struct intel_renderbuffer *irb = intel_get_renderbuffer(fb, i);
1191 if (irb && irb->need_srgb)
1192 return;
1193
1194 if (rb)
1195 rb->Format = _mesa_get_srgb_format_linear(rb->Format);
1196 }
1197 /* Disable sRGB from framebuffers that are not compatible. */
1198 fb->Visual.sRGBCapable = false;
1199 }
1200
1201 GLboolean
1202 intelMakeCurrent(__DRIcontext * driContextPriv,
1203 __DRIdrawable * driDrawPriv,
1204 __DRIdrawable * driReadPriv)
1205 {
1206 struct brw_context *brw;
1207
1208 if (driContextPriv)
1209 brw = (struct brw_context *) driContextPriv->driverPrivate;
1210 else
1211 brw = NULL;
1212
1213 if (driContextPriv) {
1214 struct gl_context *ctx = &brw->ctx;
1215 struct gl_framebuffer *fb, *readFb;
1216
1217 if (driDrawPriv == NULL) {
1218 fb = _mesa_get_incomplete_framebuffer();
1219 } else {
1220 fb = driDrawPriv->driverPrivate;
1221 driContextPriv->dri2.draw_stamp = driDrawPriv->dri2.stamp - 1;
1222 }
1223
1224 if (driReadPriv == NULL) {
1225 readFb = _mesa_get_incomplete_framebuffer();
1226 } else {
1227 readFb = driReadPriv->driverPrivate;
1228 driContextPriv->dri2.read_stamp = driReadPriv->dri2.stamp - 1;
1229 }
1230
1231 /* The sRGB workaround changes the renderbuffer's format. We must change
1232 * the format before the renderbuffer's miptree get's allocated, otherwise
1233 * the formats of the renderbuffer and its miptree will differ.
1234 */
1235 intel_gles3_srgb_workaround(brw, fb);
1236 intel_gles3_srgb_workaround(brw, readFb);
1237
1238 /* If the context viewport hasn't been initialized, force a call out to
1239 * the loader to get buffers so we have a drawable size for the initial
1240 * viewport. */
1241 if (!brw->ctx.ViewportInitialized)
1242 intel_prepare_render(brw);
1243
1244 _mesa_make_current(ctx, fb, readFb);
1245 } else {
1246 _mesa_make_current(NULL, NULL, NULL);
1247 }
1248
1249 return true;
1250 }
1251
1252 void
1253 intel_resolve_for_dri2_flush(struct brw_context *brw,
1254 __DRIdrawable *drawable)
1255 {
1256 const struct gen_device_info *devinfo = &brw->screen->devinfo;
1257
1258 if (devinfo->gen < 6) {
1259 /* MSAA and fast color clear are not supported, so don't waste time
1260 * checking whether a resolve is needed.
1261 */
1262 return;
1263 }
1264
1265 struct gl_framebuffer *fb = drawable->driverPrivate;
1266 struct intel_renderbuffer *rb;
1267
1268 /* Usually, only the back buffer will need to be downsampled. However,
1269 * the front buffer will also need it if the user has rendered into it.
1270 */
1271 static const gl_buffer_index buffers[2] = {
1272 BUFFER_BACK_LEFT,
1273 BUFFER_FRONT_LEFT,
1274 };
1275
1276 for (int i = 0; i < 2; ++i) {
1277 rb = intel_get_renderbuffer(fb, buffers[i]);
1278 if (rb == NULL || rb->mt == NULL)
1279 continue;
1280 if (rb->mt->surf.samples == 1) {
1281 assert(rb->mt_layer == 0 && rb->mt_level == 0 &&
1282 rb->layer_count == 1);
1283 intel_miptree_prepare_external(brw, rb->mt);
1284 } else {
1285 intel_renderbuffer_downsample(brw, rb);
1286 }
1287 }
1288 }
1289
1290 static unsigned
1291 intel_bits_per_pixel(const struct intel_renderbuffer *rb)
1292 {
1293 return _mesa_get_format_bytes(intel_rb_format(rb)) * 8;
1294 }
1295
1296 static void
1297 intel_query_dri2_buffers(struct brw_context *brw,
1298 __DRIdrawable *drawable,
1299 __DRIbuffer **buffers,
1300 int *count);
1301
1302 static void
1303 intel_process_dri2_buffer(struct brw_context *brw,
1304 __DRIdrawable *drawable,
1305 __DRIbuffer *buffer,
1306 struct intel_renderbuffer *rb,
1307 const char *buffer_name);
1308
1309 static void
1310 intel_update_image_buffers(struct brw_context *brw, __DRIdrawable *drawable);
1311
1312 static void
1313 intel_update_dri2_buffers(struct brw_context *brw, __DRIdrawable *drawable)
1314 {
1315 struct gl_framebuffer *fb = drawable->driverPrivate;
1316 struct intel_renderbuffer *rb;
1317 __DRIbuffer *buffers = NULL;
1318 int count;
1319 const char *region_name;
1320
1321 /* Set this up front, so that in case our buffers get invalidated
1322 * while we're getting new buffers, we don't clobber the stamp and
1323 * thus ignore the invalidate. */
1324 drawable->lastStamp = drawable->dri2.stamp;
1325
1326 if (unlikely(INTEL_DEBUG & DEBUG_DRI))
1327 fprintf(stderr, "enter %s, drawable %p\n", __func__, drawable);
1328
1329 intel_query_dri2_buffers(brw, drawable, &buffers, &count);
1330
1331 if (buffers == NULL)
1332 return;
1333
1334 for (int i = 0; i < count; i++) {
1335 switch (buffers[i].attachment) {
1336 case __DRI_BUFFER_FRONT_LEFT:
1337 rb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT);
1338 region_name = "dri2 front buffer";
1339 break;
1340
1341 case __DRI_BUFFER_FAKE_FRONT_LEFT:
1342 rb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT);
1343 region_name = "dri2 fake front buffer";
1344 break;
1345
1346 case __DRI_BUFFER_BACK_LEFT:
1347 rb = intel_get_renderbuffer(fb, BUFFER_BACK_LEFT);
1348 region_name = "dri2 back buffer";
1349 break;
1350
1351 case __DRI_BUFFER_DEPTH:
1352 case __DRI_BUFFER_HIZ:
1353 case __DRI_BUFFER_DEPTH_STENCIL:
1354 case __DRI_BUFFER_STENCIL:
1355 case __DRI_BUFFER_ACCUM:
1356 default:
1357 fprintf(stderr,
1358 "unhandled buffer attach event, attachment type %d\n",
1359 buffers[i].attachment);
1360 return;
1361 }
1362
1363 intel_process_dri2_buffer(brw, drawable, &buffers[i], rb, region_name);
1364 }
1365
1366 }
1367
1368 void
1369 intel_update_renderbuffers(__DRIcontext *context, __DRIdrawable *drawable)
1370 {
1371 struct brw_context *brw = context->driverPrivate;
1372 __DRIscreen *dri_screen = brw->screen->driScrnPriv;
1373
1374 /* Set this up front, so that in case our buffers get invalidated
1375 * while we're getting new buffers, we don't clobber the stamp and
1376 * thus ignore the invalidate. */
1377 drawable->lastStamp = drawable->dri2.stamp;
1378
1379 if (unlikely(INTEL_DEBUG & DEBUG_DRI))
1380 fprintf(stderr, "enter %s, drawable %p\n", __func__, drawable);
1381
1382 if (dri_screen->image.loader)
1383 intel_update_image_buffers(brw, drawable);
1384 else
1385 intel_update_dri2_buffers(brw, drawable);
1386
1387 driUpdateFramebufferSize(&brw->ctx, drawable);
1388 }
1389
1390 /**
1391 * intel_prepare_render should be called anywhere that curent read/drawbuffer
1392 * state is required.
1393 */
1394 void
1395 intel_prepare_render(struct brw_context *brw)
1396 {
1397 struct gl_context *ctx = &brw->ctx;
1398 __DRIcontext *driContext = brw->driContext;
1399 __DRIdrawable *drawable;
1400
1401 drawable = driContext->driDrawablePriv;
1402 if (drawable && drawable->dri2.stamp != driContext->dri2.draw_stamp) {
1403 if (drawable->lastStamp != drawable->dri2.stamp)
1404 intel_update_renderbuffers(driContext, drawable);
1405 driContext->dri2.draw_stamp = drawable->dri2.stamp;
1406 }
1407
1408 drawable = driContext->driReadablePriv;
1409 if (drawable && drawable->dri2.stamp != driContext->dri2.read_stamp) {
1410 if (drawable->lastStamp != drawable->dri2.stamp)
1411 intel_update_renderbuffers(driContext, drawable);
1412 driContext->dri2.read_stamp = drawable->dri2.stamp;
1413 }
1414
1415 /* If we're currently rendering to the front buffer, the rendering
1416 * that will happen next will probably dirty the front buffer. So
1417 * mark it as dirty here.
1418 */
1419 if (_mesa_is_front_buffer_drawing(ctx->DrawBuffer))
1420 brw->front_buffer_dirty = true;
1421 }
1422
1423 /**
1424 * \brief Query DRI2 to obtain a DRIdrawable's buffers.
1425 *
1426 * To determine which DRI buffers to request, examine the renderbuffers
1427 * attached to the drawable's framebuffer. Then request the buffers with
1428 * DRI2GetBuffers() or DRI2GetBuffersWithFormat().
1429 *
1430 * This is called from intel_update_renderbuffers().
1431 *
1432 * \param drawable Drawable whose buffers are queried.
1433 * \param buffers [out] List of buffers returned by DRI2 query.
1434 * \param buffer_count [out] Number of buffers returned.
1435 *
1436 * \see intel_update_renderbuffers()
1437 * \see DRI2GetBuffers()
1438 * \see DRI2GetBuffersWithFormat()
1439 */
1440 static void
1441 intel_query_dri2_buffers(struct brw_context *brw,
1442 __DRIdrawable *drawable,
1443 __DRIbuffer **buffers,
1444 int *buffer_count)
1445 {
1446 __DRIscreen *dri_screen = brw->screen->driScrnPriv;
1447 struct gl_framebuffer *fb = drawable->driverPrivate;
1448 int i = 0;
1449 unsigned attachments[8];
1450
1451 struct intel_renderbuffer *front_rb;
1452 struct intel_renderbuffer *back_rb;
1453
1454 front_rb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT);
1455 back_rb = intel_get_renderbuffer(fb, BUFFER_BACK_LEFT);
1456
1457 memset(attachments, 0, sizeof(attachments));
1458 if ((_mesa_is_front_buffer_drawing(fb) ||
1459 _mesa_is_front_buffer_reading(fb) ||
1460 !back_rb) && front_rb) {
1461 /* If a fake front buffer is in use, then querying for
1462 * __DRI_BUFFER_FRONT_LEFT will cause the server to copy the image from
1463 * the real front buffer to the fake front buffer. So before doing the
1464 * query, we need to make sure all the pending drawing has landed in the
1465 * real front buffer.
1466 */
1467 intel_batchbuffer_flush(brw);
1468 intel_flush_front(&brw->ctx);
1469
1470 attachments[i++] = __DRI_BUFFER_FRONT_LEFT;
1471 attachments[i++] = intel_bits_per_pixel(front_rb);
1472 } else if (front_rb && brw->front_buffer_dirty) {
1473 /* We have pending front buffer rendering, but we aren't querying for a
1474 * front buffer. If the front buffer we have is a fake front buffer,
1475 * the X server is going to throw it away when it processes the query.
1476 * So before doing the query, make sure all the pending drawing has
1477 * landed in the real front buffer.
1478 */
1479 intel_batchbuffer_flush(brw);
1480 intel_flush_front(&brw->ctx);
1481 }
1482
1483 if (back_rb) {
1484 attachments[i++] = __DRI_BUFFER_BACK_LEFT;
1485 attachments[i++] = intel_bits_per_pixel(back_rb);
1486 }
1487
1488 assert(i <= ARRAY_SIZE(attachments));
1489
1490 *buffers =
1491 dri_screen->dri2.loader->getBuffersWithFormat(drawable,
1492 &drawable->w,
1493 &drawable->h,
1494 attachments, i / 2,
1495 buffer_count,
1496 drawable->loaderPrivate);
1497 }
1498
1499 /**
1500 * \brief Assign a DRI buffer's DRM region to a renderbuffer.
1501 *
1502 * This is called from intel_update_renderbuffers().
1503 *
1504 * \par Note:
1505 * DRI buffers whose attachment point is DRI2BufferStencil or
1506 * DRI2BufferDepthStencil are handled as special cases.
1507 *
1508 * \param buffer_name is a human readable name, such as "dri2 front buffer",
1509 * that is passed to brw_bo_gem_create_from_name().
1510 *
1511 * \see intel_update_renderbuffers()
1512 */
1513 static void
1514 intel_process_dri2_buffer(struct brw_context *brw,
1515 __DRIdrawable *drawable,
1516 __DRIbuffer *buffer,
1517 struct intel_renderbuffer *rb,
1518 const char *buffer_name)
1519 {
1520 struct gl_framebuffer *fb = drawable->driverPrivate;
1521 struct brw_bo *bo;
1522
1523 if (!rb)
1524 return;
1525
1526 unsigned num_samples = rb->Base.Base.NumSamples;
1527
1528 /* We try to avoid closing and reopening the same BO name, because the first
1529 * use of a mapping of the buffer involves a bunch of page faulting which is
1530 * moderately expensive.
1531 */
1532 struct intel_mipmap_tree *last_mt;
1533 if (num_samples == 0)
1534 last_mt = rb->mt;
1535 else
1536 last_mt = rb->singlesample_mt;
1537
1538 uint32_t old_name = 0;
1539 if (last_mt) {
1540 /* The bo already has a name because the miptree was created by a
1541 * previous call to intel_process_dri2_buffer(). If a bo already has a
1542 * name, then brw_bo_flink() is a low-cost getter. It does not
1543 * create a new name.
1544 */
1545 brw_bo_flink(last_mt->bo, &old_name);
1546 }
1547
1548 if (old_name == buffer->name)
1549 return;
1550
1551 if (unlikely(INTEL_DEBUG & DEBUG_DRI)) {
1552 fprintf(stderr,
1553 "attaching buffer %d, at %d, cpp %d, pitch %d\n",
1554 buffer->name, buffer->attachment,
1555 buffer->cpp, buffer->pitch);
1556 }
1557
1558 bo = brw_bo_gem_create_from_name(brw->bufmgr, buffer_name,
1559 buffer->name);
1560 if (!bo) {
1561 fprintf(stderr,
1562 "Failed to open BO for returned DRI2 buffer "
1563 "(%dx%d, %s, named %d).\n"
1564 "This is likely a bug in the X Server that will lead to a "
1565 "crash soon.\n",
1566 drawable->w, drawable->h, buffer_name, buffer->name);
1567 return;
1568 }
1569
1570 struct intel_mipmap_tree *mt =
1571 intel_miptree_create_for_bo(brw,
1572 bo,
1573 intel_rb_format(rb),
1574 0,
1575 drawable->w,
1576 drawable->h,
1577 1,
1578 buffer->pitch,
1579 MIPTREE_CREATE_DEFAULT);
1580 if (!mt) {
1581 brw_bo_unreference(bo);
1582 return;
1583 }
1584
1585 /* We got this BO from X11. We cana't assume that we have coherent texture
1586 * access because X may suddenly decide to use it for scan-out which would
1587 * destroy coherency.
1588 */
1589 bo->cache_coherent = false;
1590
1591 if (!intel_update_winsys_renderbuffer_miptree(brw, rb, mt,
1592 drawable->w, drawable->h,
1593 buffer->pitch)) {
1594 brw_bo_unreference(bo);
1595 intel_miptree_release(&mt);
1596 return;
1597 }
1598
1599 if (_mesa_is_front_buffer_drawing(fb) &&
1600 (buffer->attachment == __DRI_BUFFER_FRONT_LEFT ||
1601 buffer->attachment == __DRI_BUFFER_FAKE_FRONT_LEFT) &&
1602 rb->Base.Base.NumSamples > 1) {
1603 intel_renderbuffer_upsample(brw, rb);
1604 }
1605
1606 assert(rb->mt);
1607
1608 brw_bo_unreference(bo);
1609 }
1610
1611 /**
1612 * \brief Query DRI image loader to obtain a DRIdrawable's buffers.
1613 *
1614 * To determine which DRI buffers to request, examine the renderbuffers
1615 * attached to the drawable's framebuffer. Then request the buffers from
1616 * the image loader
1617 *
1618 * This is called from intel_update_renderbuffers().
1619 *
1620 * \param drawable Drawable whose buffers are queried.
1621 * \param buffers [out] List of buffers returned by DRI2 query.
1622 * \param buffer_count [out] Number of buffers returned.
1623 *
1624 * \see intel_update_renderbuffers()
1625 */
1626
1627 static void
1628 intel_update_image_buffer(struct brw_context *intel,
1629 __DRIdrawable *drawable,
1630 struct intel_renderbuffer *rb,
1631 __DRIimage *buffer,
1632 enum __DRIimageBufferMask buffer_type)
1633 {
1634 struct gl_framebuffer *fb = drawable->driverPrivate;
1635
1636 if (!rb || !buffer->bo)
1637 return;
1638
1639 unsigned num_samples = rb->Base.Base.NumSamples;
1640
1641 /* Check and see if we're already bound to the right
1642 * buffer object
1643 */
1644 struct intel_mipmap_tree *last_mt;
1645 if (num_samples == 0)
1646 last_mt = rb->mt;
1647 else
1648 last_mt = rb->singlesample_mt;
1649
1650 if (last_mt && last_mt->bo == buffer->bo)
1651 return;
1652
1653 struct intel_mipmap_tree *mt =
1654 intel_miptree_create_for_dri_image(intel, buffer, GL_TEXTURE_2D,
1655 intel_rb_format(rb), true);
1656 if (!mt)
1657 return;
1658
1659 if (!intel_update_winsys_renderbuffer_miptree(intel, rb, mt,
1660 buffer->width, buffer->height,
1661 buffer->pitch)) {
1662 intel_miptree_release(&mt);
1663 return;
1664 }
1665
1666 if (_mesa_is_front_buffer_drawing(fb) &&
1667 buffer_type == __DRI_IMAGE_BUFFER_FRONT &&
1668 rb->Base.Base.NumSamples > 1) {
1669 intel_renderbuffer_upsample(intel, rb);
1670 }
1671 }
1672
1673 static void
1674 intel_update_image_buffers(struct brw_context *brw, __DRIdrawable *drawable)
1675 {
1676 struct gl_framebuffer *fb = drawable->driverPrivate;
1677 __DRIscreen *dri_screen = brw->screen->driScrnPriv;
1678 struct intel_renderbuffer *front_rb;
1679 struct intel_renderbuffer *back_rb;
1680 struct __DRIimageList images;
1681 mesa_format format;
1682 uint32_t buffer_mask = 0;
1683 int ret;
1684
1685 front_rb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT);
1686 back_rb = intel_get_renderbuffer(fb, BUFFER_BACK_LEFT);
1687
1688 if (back_rb)
1689 format = intel_rb_format(back_rb);
1690 else if (front_rb)
1691 format = intel_rb_format(front_rb);
1692 else
1693 return;
1694
1695 if (front_rb && (_mesa_is_front_buffer_drawing(fb) ||
1696 _mesa_is_front_buffer_reading(fb) || !back_rb)) {
1697 buffer_mask |= __DRI_IMAGE_BUFFER_FRONT;
1698 }
1699
1700 if (back_rb)
1701 buffer_mask |= __DRI_IMAGE_BUFFER_BACK;
1702
1703 ret = dri_screen->image.loader->getBuffers(drawable,
1704 driGLFormatToImageFormat(format),
1705 &drawable->dri2.stamp,
1706 drawable->loaderPrivate,
1707 buffer_mask,
1708 &images);
1709 if (!ret)
1710 return;
1711
1712 if (images.image_mask & __DRI_IMAGE_BUFFER_FRONT) {
1713 drawable->w = images.front->width;
1714 drawable->h = images.front->height;
1715 intel_update_image_buffer(brw,
1716 drawable,
1717 front_rb,
1718 images.front,
1719 __DRI_IMAGE_BUFFER_FRONT);
1720 }
1721
1722 if (images.image_mask & __DRI_IMAGE_BUFFER_BACK) {
1723 drawable->w = images.back->width;
1724 drawable->h = images.back->height;
1725 intel_update_image_buffer(brw,
1726 drawable,
1727 back_rb,
1728 images.back,
1729 __DRI_IMAGE_BUFFER_BACK);
1730 }
1731 }