Revert "i965: Enable flush control"
[mesa.git] / src / mesa / drivers / dri / i965 / brw_context.c
1 /*
2 Copyright 2003 VMware, Inc.
3 Copyright (C) Intel Corp. 2006. All Rights Reserved.
4 Intel funded Tungsten Graphics to
5 develop this 3D driver.
6
7 Permission is hereby granted, free of charge, to any person obtaining
8 a copy of this software and associated documentation files (the
9 "Software"), to deal in the Software without restriction, including
10 without limitation the rights to use, copy, modify, merge, publish,
11 distribute, sublicense, and/or sell copies of the Software, and to
12 permit persons to whom the Software is furnished to do so, subject to
13 the following conditions:
14
15 The above copyright notice and this permission notice (including the
16 next paragraph) shall be included in all copies or substantial
17 portions of the Software.
18
19 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
20 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
22 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
23 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
24 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
25 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26
27 **********************************************************************/
28 /*
29 * Authors:
30 * Keith Whitwell <keithw@vmware.com>
31 */
32
33
34 #include "compiler/nir/nir.h"
35 #include "main/api_exec.h"
36 #include "main/context.h"
37 #include "main/fbobject.h"
38 #include "main/extensions.h"
39 #include "main/imports.h"
40 #include "main/macros.h"
41 #include "main/points.h"
42 #include "main/version.h"
43 #include "main/vtxfmt.h"
44 #include "main/texobj.h"
45 #include "main/framebuffer.h"
46 #include "main/stencil.h"
47 #include "main/state.h"
48
49 #include "vbo/vbo_context.h"
50
51 #include "drivers/common/driverfuncs.h"
52 #include "drivers/common/meta.h"
53 #include "utils.h"
54
55 #include "brw_context.h"
56 #include "brw_defines.h"
57 #include "brw_blorp.h"
58 #include "brw_draw.h"
59 #include "brw_state.h"
60
61 #include "intel_batchbuffer.h"
62 #include "intel_buffer_objects.h"
63 #include "intel_buffers.h"
64 #include "intel_fbo.h"
65 #include "intel_mipmap_tree.h"
66 #include "intel_pixel.h"
67 #include "intel_image.h"
68 #include "intel_tex.h"
69 #include "intel_tex_obj.h"
70
71 #include "swrast_setup/swrast_setup.h"
72 #include "tnl/tnl.h"
73 #include "tnl/t_pipeline.h"
74 #include "util/ralloc.h"
75 #include "util/debug.h"
76 #include "isl/isl.h"
77
78 /***************************************
79 * Mesa's Driver Functions
80 ***************************************/
81
82 const char *const brw_vendor_string = "Intel Open Source Technology Center";
83
84 static const char *
85 get_bsw_model(const struct intel_screen *screen)
86 {
87 switch (screen->eu_total) {
88 case 16:
89 return "405";
90 case 12:
91 return "400";
92 default:
93 return " ";
94 }
95 }
96
97 const char *
98 brw_get_renderer_string(const struct intel_screen *screen)
99 {
100 const char *chipset;
101 static char buffer[128];
102 char *bsw = NULL;
103
104 switch (screen->deviceID) {
105 #undef CHIPSET
106 #define CHIPSET(id, symbol, str) case id: chipset = str; break;
107 #include "pci_ids/i965_pci_ids.h"
108 default:
109 chipset = "Unknown Intel Chipset";
110 break;
111 }
112
113 /* Braswell branding is funny, so we have to fix it up here */
114 if (screen->deviceID == 0x22B1) {
115 bsw = strdup(chipset);
116 char *needle = strstr(bsw, "XXX");
117 if (needle) {
118 memcpy(needle, get_bsw_model(screen), 3);
119 chipset = bsw;
120 }
121 }
122
123 (void) driGetRendererString(buffer, chipset, 0);
124 free(bsw);
125 return buffer;
126 }
127
128 static const GLubyte *
129 intel_get_string(struct gl_context * ctx, GLenum name)
130 {
131 const struct brw_context *const brw = brw_context(ctx);
132
133 switch (name) {
134 case GL_VENDOR:
135 return (GLubyte *) brw_vendor_string;
136
137 case GL_RENDERER:
138 return
139 (GLubyte *) brw_get_renderer_string(brw->screen);
140
141 default:
142 return NULL;
143 }
144 }
145
146 static void
147 intel_viewport(struct gl_context *ctx)
148 {
149 struct brw_context *brw = brw_context(ctx);
150 __DRIcontext *driContext = brw->driContext;
151
152 if (_mesa_is_winsys_fbo(ctx->DrawBuffer)) {
153 if (driContext->driDrawablePriv)
154 dri2InvalidateDrawable(driContext->driDrawablePriv);
155 if (driContext->driReadablePriv)
156 dri2InvalidateDrawable(driContext->driReadablePriv);
157 }
158 }
159
160 static void
161 intel_update_framebuffer(struct gl_context *ctx,
162 struct gl_framebuffer *fb)
163 {
164 struct brw_context *brw = brw_context(ctx);
165
166 /* Quantize the derived default number of samples
167 */
168 fb->DefaultGeometry._NumSamples =
169 intel_quantize_num_samples(brw->screen,
170 fb->DefaultGeometry.NumSamples);
171 }
172
173 static void
174 intel_update_state(struct gl_context * ctx)
175 {
176 GLuint new_state = ctx->NewState;
177 struct brw_context *brw = brw_context(ctx);
178
179 if (ctx->swrast_context)
180 _swrast_InvalidateState(ctx, new_state);
181
182 brw->NewGLState |= new_state;
183
184 if (new_state & (_NEW_SCISSOR | _NEW_BUFFERS | _NEW_VIEWPORT))
185 _mesa_update_draw_buffer_bounds(ctx, ctx->DrawBuffer);
186
187 if (new_state & (_NEW_STENCIL | _NEW_BUFFERS)) {
188 brw->stencil_enabled = _mesa_stencil_is_enabled(ctx);
189 brw->stencil_two_sided = _mesa_stencil_is_two_sided(ctx);
190 brw->stencil_write_enabled =
191 _mesa_stencil_is_write_enabled(ctx, brw->stencil_two_sided);
192 }
193
194 if (new_state & _NEW_POLYGON)
195 brw->polygon_front_bit = _mesa_polygon_get_front_bit(ctx);
196
197 if (new_state & _NEW_BUFFERS) {
198 intel_update_framebuffer(ctx, ctx->DrawBuffer);
199 if (ctx->DrawBuffer != ctx->ReadBuffer)
200 intel_update_framebuffer(ctx, ctx->ReadBuffer);
201 }
202 }
203
204 #define flushFront(screen) ((screen)->image.loader ? (screen)->image.loader->flushFrontBuffer : (screen)->dri2.loader->flushFrontBuffer)
205
206 static void
207 intel_flush_front(struct gl_context *ctx)
208 {
209 struct brw_context *brw = brw_context(ctx);
210 __DRIcontext *driContext = brw->driContext;
211 __DRIdrawable *driDrawable = driContext->driDrawablePriv;
212 __DRIscreen *const dri_screen = brw->screen->driScrnPriv;
213
214 if (brw->front_buffer_dirty && _mesa_is_winsys_fbo(ctx->DrawBuffer)) {
215 if (flushFront(dri_screen) && driDrawable &&
216 driDrawable->loaderPrivate) {
217
218 /* Resolve before flushing FAKE_FRONT_LEFT to FRONT_LEFT.
219 *
220 * This potentially resolves both front and back buffer. It
221 * is unnecessary to resolve the back, but harms nothing except
222 * performance. And no one cares about front-buffer render
223 * performance.
224 */
225 intel_resolve_for_dri2_flush(brw, driDrawable);
226 intel_batchbuffer_flush(brw);
227
228 flushFront(dri_screen)(driDrawable, driDrawable->loaderPrivate);
229
230 /* We set the dirty bit in intel_prepare_render() if we're
231 * front buffer rendering once we get there.
232 */
233 brw->front_buffer_dirty = false;
234 }
235 }
236 }
237
238 static void
239 intel_glFlush(struct gl_context *ctx)
240 {
241 struct brw_context *brw = brw_context(ctx);
242
243 intel_batchbuffer_flush(brw);
244 intel_flush_front(ctx);
245
246 brw->need_flush_throttle = true;
247 }
248
249 static void
250 intel_finish(struct gl_context * ctx)
251 {
252 struct brw_context *brw = brw_context(ctx);
253
254 intel_glFlush(ctx);
255
256 if (brw->batch.last_bo)
257 brw_bo_wait_rendering(brw->batch.last_bo);
258 }
259
260 static void
261 brw_init_driver_functions(struct brw_context *brw,
262 struct dd_function_table *functions)
263 {
264 const struct gen_device_info *devinfo = &brw->screen->devinfo;
265
266 _mesa_init_driver_functions(functions);
267
268 /* GLX uses DRI2 invalidate events to handle window resizing.
269 * Unfortunately, EGL does not - libEGL is written in XCB (not Xlib),
270 * which doesn't provide a mechanism for snooping the event queues.
271 *
272 * So EGL still relies on viewport hacks to handle window resizing.
273 * This should go away with DRI3000.
274 */
275 if (!brw->driContext->driScreenPriv->dri2.useInvalidate)
276 functions->Viewport = intel_viewport;
277
278 functions->Flush = intel_glFlush;
279 functions->Finish = intel_finish;
280 functions->GetString = intel_get_string;
281 functions->UpdateState = intel_update_state;
282
283 intelInitTextureFuncs(functions);
284 intelInitTextureImageFuncs(functions);
285 intelInitTextureCopyImageFuncs(functions);
286 intelInitCopyImageFuncs(functions);
287 intelInitClearFuncs(functions);
288 intelInitBufferFuncs(functions);
289 intelInitPixelFuncs(functions);
290 intelInitBufferObjectFuncs(functions);
291 brw_init_syncobj_functions(functions);
292 brw_init_object_purgeable_functions(functions);
293
294 brwInitFragProgFuncs( functions );
295 brw_init_common_queryobj_functions(functions);
296 if (devinfo->gen >= 8 || devinfo->is_haswell)
297 hsw_init_queryobj_functions(functions);
298 else if (devinfo->gen >= 6)
299 gen6_init_queryobj_functions(functions);
300 else
301 gen4_init_queryobj_functions(functions);
302 brw_init_compute_functions(functions);
303 brw_init_conditional_render_functions(functions);
304
305 functions->QueryInternalFormat = brw_query_internal_format;
306
307 functions->NewTransformFeedback = brw_new_transform_feedback;
308 functions->DeleteTransformFeedback = brw_delete_transform_feedback;
309 if (can_do_mi_math_and_lrr(brw->screen)) {
310 functions->BeginTransformFeedback = hsw_begin_transform_feedback;
311 functions->EndTransformFeedback = hsw_end_transform_feedback;
312 functions->PauseTransformFeedback = hsw_pause_transform_feedback;
313 functions->ResumeTransformFeedback = hsw_resume_transform_feedback;
314 } else if (devinfo->gen >= 7) {
315 functions->BeginTransformFeedback = gen7_begin_transform_feedback;
316 functions->EndTransformFeedback = gen7_end_transform_feedback;
317 functions->PauseTransformFeedback = gen7_pause_transform_feedback;
318 functions->ResumeTransformFeedback = gen7_resume_transform_feedback;
319 functions->GetTransformFeedbackVertexCount =
320 brw_get_transform_feedback_vertex_count;
321 } else {
322 functions->BeginTransformFeedback = brw_begin_transform_feedback;
323 functions->EndTransformFeedback = brw_end_transform_feedback;
324 functions->PauseTransformFeedback = brw_pause_transform_feedback;
325 functions->ResumeTransformFeedback = brw_resume_transform_feedback;
326 functions->GetTransformFeedbackVertexCount =
327 brw_get_transform_feedback_vertex_count;
328 }
329
330 if (devinfo->gen >= 6)
331 functions->GetSamplePosition = gen6_get_sample_position;
332
333 /* GL_ARB_get_program_binary */
334 brw_program_binary_init(brw->screen->deviceID);
335 functions->GetProgramBinaryDriverSHA1 = brw_get_program_binary_driver_sha1;
336 functions->ProgramBinarySerializeDriverBlob = brw_program_serialize_nir;
337 functions->ProgramBinaryDeserializeDriverBlob =
338 brw_deserialize_program_binary;
339 }
340
341 static void
342 brw_initialize_context_constants(struct brw_context *brw)
343 {
344 const struct gen_device_info *devinfo = &brw->screen->devinfo;
345 struct gl_context *ctx = &brw->ctx;
346 const struct brw_compiler *compiler = brw->screen->compiler;
347
348 const bool stage_exists[MESA_SHADER_STAGES] = {
349 [MESA_SHADER_VERTEX] = true,
350 [MESA_SHADER_TESS_CTRL] = devinfo->gen >= 7,
351 [MESA_SHADER_TESS_EVAL] = devinfo->gen >= 7,
352 [MESA_SHADER_GEOMETRY] = devinfo->gen >= 6,
353 [MESA_SHADER_FRAGMENT] = true,
354 [MESA_SHADER_COMPUTE] =
355 (_mesa_is_desktop_gl(ctx) &&
356 ctx->Const.MaxComputeWorkGroupSize[0] >= 1024) ||
357 (ctx->API == API_OPENGLES2 &&
358 ctx->Const.MaxComputeWorkGroupSize[0] >= 128),
359 };
360
361 unsigned num_stages = 0;
362 for (int i = 0; i < MESA_SHADER_STAGES; i++) {
363 if (stage_exists[i])
364 num_stages++;
365 }
366
367 unsigned max_samplers =
368 devinfo->gen >= 8 || devinfo->is_haswell ? BRW_MAX_TEX_UNIT : 16;
369
370 ctx->Const.MaxDualSourceDrawBuffers = 1;
371 ctx->Const.MaxDrawBuffers = BRW_MAX_DRAW_BUFFERS;
372 ctx->Const.MaxCombinedShaderOutputResources =
373 MAX_IMAGE_UNITS + BRW_MAX_DRAW_BUFFERS;
374
375 /* The timestamp register we can read for glGetTimestamp() is
376 * sometimes only 32 bits, before scaling to nanoseconds (depending
377 * on kernel).
378 *
379 * Once scaled to nanoseconds the timestamp would roll over at a
380 * non-power-of-two, so an application couldn't use
381 * GL_QUERY_COUNTER_BITS to handle rollover correctly. Instead, we
382 * report 36 bits and truncate at that (rolling over 5 times as
383 * often as the HW counter), and when the 32-bit counter rolls
384 * over, it happens to also be at a rollover in the reported value
385 * from near (1<<36) to 0.
386 *
387 * The low 32 bits rolls over in ~343 seconds. Our 36-bit result
388 * rolls over every ~69 seconds.
389 */
390 ctx->Const.QueryCounterBits.Timestamp = 36;
391
392 ctx->Const.MaxTextureCoordUnits = 8; /* Mesa limit */
393 ctx->Const.MaxImageUnits = MAX_IMAGE_UNITS;
394 if (devinfo->gen >= 7) {
395 ctx->Const.MaxRenderbufferSize = 16384;
396 ctx->Const.MaxTextureLevels = MIN2(15 /* 16384 */, MAX_TEXTURE_LEVELS);
397 ctx->Const.MaxCubeTextureLevels = 15; /* 16384 */
398 } else {
399 ctx->Const.MaxRenderbufferSize = 8192;
400 ctx->Const.MaxTextureLevels = MIN2(14 /* 8192 */, MAX_TEXTURE_LEVELS);
401 ctx->Const.MaxCubeTextureLevels = 14; /* 8192 */
402 }
403 ctx->Const.Max3DTextureLevels = 12; /* 2048 */
404 ctx->Const.MaxArrayTextureLayers = devinfo->gen >= 7 ? 2048 : 512;
405 ctx->Const.MaxTextureMbytes = 1536;
406 ctx->Const.MaxTextureRectSize = devinfo->gen >= 7 ? 16384 : 8192;
407 ctx->Const.MaxTextureMaxAnisotropy = 16.0;
408 ctx->Const.MaxTextureLodBias = 15.0;
409 ctx->Const.StripTextureBorder = true;
410 if (devinfo->gen >= 7) {
411 ctx->Const.MaxProgramTextureGatherComponents = 4;
412 ctx->Const.MinProgramTextureGatherOffset = -32;
413 ctx->Const.MaxProgramTextureGatherOffset = 31;
414 } else if (devinfo->gen == 6) {
415 ctx->Const.MaxProgramTextureGatherComponents = 1;
416 ctx->Const.MinProgramTextureGatherOffset = -8;
417 ctx->Const.MaxProgramTextureGatherOffset = 7;
418 }
419
420 ctx->Const.MaxUniformBlockSize = 65536;
421
422 for (int i = 0; i < MESA_SHADER_STAGES; i++) {
423 struct gl_program_constants *prog = &ctx->Const.Program[i];
424
425 if (!stage_exists[i])
426 continue;
427
428 prog->MaxTextureImageUnits = max_samplers;
429
430 prog->MaxUniformBlocks = BRW_MAX_UBO;
431 prog->MaxCombinedUniformComponents =
432 prog->MaxUniformComponents +
433 ctx->Const.MaxUniformBlockSize / 4 * prog->MaxUniformBlocks;
434
435 prog->MaxAtomicCounters = MAX_ATOMIC_COUNTERS;
436 prog->MaxAtomicBuffers = BRW_MAX_ABO;
437 prog->MaxImageUniforms = compiler->scalar_stage[i] ? BRW_MAX_IMAGES : 0;
438 prog->MaxShaderStorageBlocks = BRW_MAX_SSBO;
439 }
440
441 ctx->Const.MaxTextureUnits =
442 MIN2(ctx->Const.MaxTextureCoordUnits,
443 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxTextureImageUnits);
444
445 ctx->Const.MaxUniformBufferBindings = num_stages * BRW_MAX_UBO;
446 ctx->Const.MaxCombinedUniformBlocks = num_stages * BRW_MAX_UBO;
447 ctx->Const.MaxCombinedAtomicBuffers = num_stages * BRW_MAX_ABO;
448 ctx->Const.MaxCombinedShaderStorageBlocks = num_stages * BRW_MAX_SSBO;
449 ctx->Const.MaxShaderStorageBufferBindings = num_stages * BRW_MAX_SSBO;
450 ctx->Const.MaxCombinedTextureImageUnits = num_stages * max_samplers;
451 ctx->Const.MaxCombinedImageUniforms = num_stages * BRW_MAX_IMAGES;
452
453
454 /* Hardware only supports a limited number of transform feedback buffers.
455 * So we need to override the Mesa default (which is based only on software
456 * limits).
457 */
458 ctx->Const.MaxTransformFeedbackBuffers = BRW_MAX_SOL_BUFFERS;
459
460 /* On Gen6, in the worst case, we use up one binding table entry per
461 * transform feedback component (see comments above the definition of
462 * BRW_MAX_SOL_BINDINGS, in brw_context.h), so we need to advertise a value
463 * for MAX_TRANSFORM_FEEDBACK_INTERLEAVED_COMPONENTS equal to
464 * BRW_MAX_SOL_BINDINGS.
465 *
466 * In "separate components" mode, we need to divide this value by
467 * BRW_MAX_SOL_BUFFERS, so that the total number of binding table entries
468 * used up by all buffers will not exceed BRW_MAX_SOL_BINDINGS.
469 */
470 ctx->Const.MaxTransformFeedbackInterleavedComponents = BRW_MAX_SOL_BINDINGS;
471 ctx->Const.MaxTransformFeedbackSeparateComponents =
472 BRW_MAX_SOL_BINDINGS / BRW_MAX_SOL_BUFFERS;
473
474 ctx->Const.AlwaysUseGetTransformFeedbackVertexCount =
475 !can_do_mi_math_and_lrr(brw->screen);
476
477 int max_samples;
478 const int *msaa_modes = intel_supported_msaa_modes(brw->screen);
479 const int clamp_max_samples =
480 driQueryOptioni(&brw->optionCache, "clamp_max_samples");
481
482 if (clamp_max_samples < 0) {
483 max_samples = msaa_modes[0];
484 } else {
485 /* Select the largest supported MSAA mode that does not exceed
486 * clamp_max_samples.
487 */
488 max_samples = 0;
489 for (int i = 0; msaa_modes[i] != 0; ++i) {
490 if (msaa_modes[i] <= clamp_max_samples) {
491 max_samples = msaa_modes[i];
492 break;
493 }
494 }
495 }
496
497 ctx->Const.MaxSamples = max_samples;
498 ctx->Const.MaxColorTextureSamples = max_samples;
499 ctx->Const.MaxDepthTextureSamples = max_samples;
500 ctx->Const.MaxIntegerSamples = max_samples;
501 ctx->Const.MaxImageSamples = 0;
502
503 /* gen6_set_sample_maps() sets SampleMap{2,4,8}x variables which are used
504 * to map indices of rectangular grid to sample numbers within a pixel.
505 * These variables are used by GL_EXT_framebuffer_multisample_blit_scaled
506 * extension implementation. For more details see the comment above
507 * gen6_set_sample_maps() definition.
508 */
509 gen6_set_sample_maps(ctx);
510
511 ctx->Const.MinLineWidth = 1.0;
512 ctx->Const.MinLineWidthAA = 1.0;
513 if (devinfo->gen >= 6) {
514 ctx->Const.MaxLineWidth = 7.375;
515 ctx->Const.MaxLineWidthAA = 7.375;
516 ctx->Const.LineWidthGranularity = 0.125;
517 } else {
518 ctx->Const.MaxLineWidth = 7.0;
519 ctx->Const.MaxLineWidthAA = 7.0;
520 ctx->Const.LineWidthGranularity = 0.5;
521 }
522
523 /* For non-antialiased lines, we have to round the line width to the
524 * nearest whole number. Make sure that we don't advertise a line
525 * width that, when rounded, will be beyond the actual hardware
526 * maximum.
527 */
528 assert(roundf(ctx->Const.MaxLineWidth) <= ctx->Const.MaxLineWidth);
529
530 ctx->Const.MinPointSize = 1.0;
531 ctx->Const.MinPointSizeAA = 1.0;
532 ctx->Const.MaxPointSize = 255.0;
533 ctx->Const.MaxPointSizeAA = 255.0;
534 ctx->Const.PointSizeGranularity = 1.0;
535
536 if (devinfo->gen >= 5 || devinfo->is_g4x)
537 ctx->Const.MaxClipPlanes = 8;
538
539 ctx->Const.GLSLTessLevelsAsInputs = true;
540 ctx->Const.PrimitiveRestartForPatches = true;
541
542 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeInstructions = 16 * 1024;
543 ctx->Const.Program[MESA_SHADER_VERTEX].MaxAluInstructions = 0;
544 ctx->Const.Program[MESA_SHADER_VERTEX].MaxTexInstructions = 0;
545 ctx->Const.Program[MESA_SHADER_VERTEX].MaxTexIndirections = 0;
546 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeAluInstructions = 0;
547 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeTexInstructions = 0;
548 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeTexIndirections = 0;
549 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeAttribs = 16;
550 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeTemps = 256;
551 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeAddressRegs = 1;
552 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeParameters = 1024;
553 ctx->Const.Program[MESA_SHADER_VERTEX].MaxEnvParams =
554 MIN2(ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeParameters,
555 ctx->Const.Program[MESA_SHADER_VERTEX].MaxEnvParams);
556
557 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeInstructions = 1024;
558 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeAluInstructions = 1024;
559 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeTexInstructions = 1024;
560 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeTexIndirections = 1024;
561 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeAttribs = 12;
562 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeTemps = 256;
563 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeAddressRegs = 0;
564 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeParameters = 1024;
565 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxEnvParams =
566 MIN2(ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeParameters,
567 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxEnvParams);
568
569 /* Fragment shaders use real, 32-bit twos-complement integers for all
570 * integer types.
571 */
572 ctx->Const.Program[MESA_SHADER_FRAGMENT].LowInt.RangeMin = 31;
573 ctx->Const.Program[MESA_SHADER_FRAGMENT].LowInt.RangeMax = 30;
574 ctx->Const.Program[MESA_SHADER_FRAGMENT].LowInt.Precision = 0;
575 ctx->Const.Program[MESA_SHADER_FRAGMENT].HighInt = ctx->Const.Program[MESA_SHADER_FRAGMENT].LowInt;
576 ctx->Const.Program[MESA_SHADER_FRAGMENT].MediumInt = ctx->Const.Program[MESA_SHADER_FRAGMENT].LowInt;
577
578 ctx->Const.Program[MESA_SHADER_VERTEX].LowInt.RangeMin = 31;
579 ctx->Const.Program[MESA_SHADER_VERTEX].LowInt.RangeMax = 30;
580 ctx->Const.Program[MESA_SHADER_VERTEX].LowInt.Precision = 0;
581 ctx->Const.Program[MESA_SHADER_VERTEX].HighInt = ctx->Const.Program[MESA_SHADER_VERTEX].LowInt;
582 ctx->Const.Program[MESA_SHADER_VERTEX].MediumInt = ctx->Const.Program[MESA_SHADER_VERTEX].LowInt;
583
584 /* Gen6 converts quads to polygon in beginning of 3D pipeline,
585 * but we're not sure how it's actually done for vertex order,
586 * that affect provoking vertex decision. Always use last vertex
587 * convention for quad primitive which works as expected for now.
588 */
589 if (devinfo->gen >= 6)
590 ctx->Const.QuadsFollowProvokingVertexConvention = false;
591
592 ctx->Const.NativeIntegers = true;
593 ctx->Const.VertexID_is_zero_based = true;
594
595 /* Regarding the CMP instruction, the Ivybridge PRM says:
596 *
597 * "For each enabled channel 0b or 1b is assigned to the appropriate flag
598 * bit and 0/all zeros or all ones (e.g, byte 0xFF, word 0xFFFF, DWord
599 * 0xFFFFFFFF) is assigned to dst."
600 *
601 * but PRMs for earlier generations say
602 *
603 * "In dword format, one GRF may store up to 8 results. When the register
604 * is used later as a vector of Booleans, as only LSB at each channel
605 * contains meaning [sic] data, software should make sure all higher bits
606 * are masked out (e.g. by 'and-ing' an [sic] 0x01 constant)."
607 *
608 * We select the representation of a true boolean uniform to be ~0, and fix
609 * the results of Gen <= 5 CMP instruction's with -(result & 1).
610 */
611 ctx->Const.UniformBooleanTrue = ~0;
612
613 /* From the gen4 PRM, volume 4 page 127:
614 *
615 * "For SURFTYPE_BUFFER non-rendertarget surfaces, this field specifies
616 * the base address of the first element of the surface, computed in
617 * software by adding the surface base address to the byte offset of
618 * the element in the buffer."
619 *
620 * However, unaligned accesses are slower, so enforce buffer alignment.
621 *
622 * In order to push UBO data, 3DSTATE_CONSTANT_XS imposes an additional
623 * restriction: the start of the buffer needs to be 32B aligned.
624 */
625 ctx->Const.UniformBufferOffsetAlignment = 32;
626
627 /* ShaderStorageBufferOffsetAlignment should be a cacheline (64 bytes) so
628 * that we can safely have the CPU and GPU writing the same SSBO on
629 * non-cachecoherent systems (our Atom CPUs). With UBOs, the GPU never
630 * writes, so there's no problem. For an SSBO, the GPU and the CPU can
631 * be updating disjoint regions of the buffer simultaneously and that will
632 * break if the regions overlap the same cacheline.
633 */
634 ctx->Const.ShaderStorageBufferOffsetAlignment = 64;
635 ctx->Const.TextureBufferOffsetAlignment = 16;
636 ctx->Const.MaxTextureBufferSize = 128 * 1024 * 1024;
637
638 if (devinfo->gen >= 6) {
639 ctx->Const.MaxVarying = 32;
640 ctx->Const.Program[MESA_SHADER_VERTEX].MaxOutputComponents = 128;
641 ctx->Const.Program[MESA_SHADER_GEOMETRY].MaxInputComponents =
642 compiler->scalar_stage[MESA_SHADER_GEOMETRY] ? 128 : 64;
643 ctx->Const.Program[MESA_SHADER_GEOMETRY].MaxOutputComponents = 128;
644 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxInputComponents = 128;
645 ctx->Const.Program[MESA_SHADER_TESS_CTRL].MaxInputComponents = 128;
646 ctx->Const.Program[MESA_SHADER_TESS_CTRL].MaxOutputComponents = 128;
647 ctx->Const.Program[MESA_SHADER_TESS_EVAL].MaxInputComponents = 128;
648 ctx->Const.Program[MESA_SHADER_TESS_EVAL].MaxOutputComponents = 128;
649 }
650
651 /* We want the GLSL compiler to emit code that uses condition codes */
652 for (int i = 0; i < MESA_SHADER_STAGES; i++) {
653 ctx->Const.ShaderCompilerOptions[i] =
654 brw->screen->compiler->glsl_compiler_options[i];
655 }
656
657 if (devinfo->gen >= 7) {
658 ctx->Const.MaxViewportWidth = 32768;
659 ctx->Const.MaxViewportHeight = 32768;
660 }
661
662 /* ARB_viewport_array, OES_viewport_array */
663 if (devinfo->gen >= 6) {
664 ctx->Const.MaxViewports = GEN6_NUM_VIEWPORTS;
665 ctx->Const.ViewportSubpixelBits = 0;
666
667 /* Cast to float before negating because MaxViewportWidth is unsigned.
668 */
669 ctx->Const.ViewportBounds.Min = -(float)ctx->Const.MaxViewportWidth;
670 ctx->Const.ViewportBounds.Max = ctx->Const.MaxViewportWidth;
671 }
672
673 /* ARB_gpu_shader5 */
674 if (devinfo->gen >= 7)
675 ctx->Const.MaxVertexStreams = MIN2(4, MAX_VERTEX_STREAMS);
676
677 /* ARB_framebuffer_no_attachments */
678 ctx->Const.MaxFramebufferWidth = 16384;
679 ctx->Const.MaxFramebufferHeight = 16384;
680 ctx->Const.MaxFramebufferLayers = ctx->Const.MaxArrayTextureLayers;
681 ctx->Const.MaxFramebufferSamples = max_samples;
682
683 /* OES_primitive_bounding_box */
684 ctx->Const.NoPrimitiveBoundingBoxOutput = true;
685
686 /* TODO: We should be able to use STD430 packing by default on all hardware
687 * but some piglit tests [1] currently fail on SNB when this is enabled.
688 * The problem is the messages we're using for doing uniform pulls
689 * in the vec4 back-end on SNB is the OWORD block load instruction, which
690 * takes its offset in units of OWORDS (16 bytes). On IVB+, we use the
691 * sampler which doesn't have these restrictions.
692 *
693 * In the scalar back-end, we use the sampler for dynamic uniform loads and
694 * pull an entire cache line at a time for constant offset loads both of
695 * which support almost any alignment.
696 *
697 * [1] glsl-1.40/uniform_buffer/vs-float-array-variable-index.shader_test
698 */
699 if (devinfo->gen >= 7)
700 ctx->Const.UseSTD430AsDefaultPacking = true;
701
702 if (!(ctx->Const.ContextFlags & GL_CONTEXT_FLAG_DEBUG_BIT))
703 ctx->Const.AllowMappedBuffersDuringExecution = true;
704
705 /* GL_ARB_get_program_binary */
706 ctx->Const.NumProgramBinaryFormats = 1;
707 }
708
709 static void
710 brw_initialize_cs_context_constants(struct brw_context *brw)
711 {
712 struct gl_context *ctx = &brw->ctx;
713 const struct intel_screen *screen = brw->screen;
714 struct gen_device_info *devinfo = &brw->screen->devinfo;
715
716 /* FINISHME: Do this for all platforms that the kernel supports */
717 if (devinfo->is_cherryview &&
718 screen->subslice_total > 0 && screen->eu_total > 0) {
719 /* Logical CS threads = EUs per subslice * 7 threads per EU */
720 uint32_t max_cs_threads = screen->eu_total / screen->subslice_total * 7;
721
722 /* Fuse configurations may give more threads than expected, never less. */
723 if (max_cs_threads > devinfo->max_cs_threads)
724 devinfo->max_cs_threads = max_cs_threads;
725 }
726
727 /* Maximum number of scalar compute shader invocations that can be run in
728 * parallel in the same subslice assuming SIMD32 dispatch.
729 *
730 * We don't advertise more than 64 threads, because we are limited to 64 by
731 * our usage of thread_width_max in the gpgpu walker command. This only
732 * currently impacts Haswell, which otherwise might be able to advertise 70
733 * threads. With SIMD32 and 64 threads, Haswell still provides twice the
734 * required the number of invocation needed for ARB_compute_shader.
735 */
736 const unsigned max_threads = MIN2(64, devinfo->max_cs_threads);
737 const uint32_t max_invocations = 32 * max_threads;
738 ctx->Const.MaxComputeWorkGroupSize[0] = max_invocations;
739 ctx->Const.MaxComputeWorkGroupSize[1] = max_invocations;
740 ctx->Const.MaxComputeWorkGroupSize[2] = max_invocations;
741 ctx->Const.MaxComputeWorkGroupInvocations = max_invocations;
742 ctx->Const.MaxComputeSharedMemorySize = 64 * 1024;
743 }
744
745 /**
746 * Process driconf (drirc) options, setting appropriate context flags.
747 *
748 * intelInitExtensions still pokes at optionCache directly, in order to
749 * avoid advertising various extensions. No flags are set, so it makes
750 * sense to continue doing that there.
751 */
752 static void
753 brw_process_driconf_options(struct brw_context *brw)
754 {
755 const struct gen_device_info *devinfo = &brw->screen->devinfo;
756 struct gl_context *ctx = &brw->ctx;
757
758 driOptionCache *options = &brw->optionCache;
759 driParseConfigFiles(options, &brw->screen->optionCache,
760 brw->driContext->driScreenPriv->myNum, "i965");
761
762 int bo_reuse_mode = driQueryOptioni(options, "bo_reuse");
763 switch (bo_reuse_mode) {
764 case DRI_CONF_BO_REUSE_DISABLED:
765 break;
766 case DRI_CONF_BO_REUSE_ALL:
767 brw_bufmgr_enable_reuse(brw->bufmgr);
768 break;
769 }
770
771 if (INTEL_DEBUG & DEBUG_NO_HIZ) {
772 brw->has_hiz = false;
773 /* On gen6, you can only do separate stencil with HIZ. */
774 if (devinfo->gen == 6)
775 brw->has_separate_stencil = false;
776 }
777
778 if (driQueryOptionb(options, "mesa_no_error"))
779 ctx->Const.ContextFlags |= GL_CONTEXT_FLAG_NO_ERROR_BIT_KHR;
780
781 if (driQueryOptionb(options, "always_flush_batch")) {
782 fprintf(stderr, "flushing batchbuffer before/after each draw call\n");
783 brw->always_flush_batch = true;
784 }
785
786 if (driQueryOptionb(options, "always_flush_cache")) {
787 fprintf(stderr, "flushing GPU caches before/after each draw call\n");
788 brw->always_flush_cache = true;
789 }
790
791 if (driQueryOptionb(options, "disable_throttling")) {
792 fprintf(stderr, "disabling flush throttling\n");
793 brw->disable_throttling = true;
794 }
795
796 brw->precompile = driQueryOptionb(&brw->optionCache, "shader_precompile");
797
798 if (driQueryOptionb(&brw->optionCache, "precise_trig"))
799 brw->screen->compiler->precise_trig = true;
800
801 ctx->Const.ForceGLSLExtensionsWarn =
802 driQueryOptionb(options, "force_glsl_extensions_warn");
803
804 ctx->Const.ForceGLSLVersion =
805 driQueryOptioni(options, "force_glsl_version");
806
807 ctx->Const.DisableGLSLLineContinuations =
808 driQueryOptionb(options, "disable_glsl_line_continuations");
809
810 ctx->Const.AllowGLSLExtensionDirectiveMidShader =
811 driQueryOptionb(options, "allow_glsl_extension_directive_midshader");
812
813 ctx->Const.AllowGLSLBuiltinVariableRedeclaration =
814 driQueryOptionb(options, "allow_glsl_builtin_variable_redeclaration");
815
816 ctx->Const.AllowHigherCompatVersion =
817 driQueryOptionb(options, "allow_higher_compat_version");
818
819 ctx->Const.ForceGLSLAbsSqrt =
820 driQueryOptionb(options, "force_glsl_abs_sqrt");
821
822 ctx->Const.GLSLZeroInit = driQueryOptionb(options, "glsl_zero_init");
823
824 brw->dual_color_blend_by_location =
825 driQueryOptionb(options, "dual_color_blend_by_location");
826
827 ctx->Const.AllowGLSLCrossStageInterpolationMismatch =
828 driQueryOptionb(options, "allow_glsl_cross_stage_interpolation_mismatch");
829
830 ctx->Const.dri_config_options_sha1 = ralloc_array(brw, unsigned char, 20);
831 driComputeOptionsSha1(&brw->screen->optionCache,
832 ctx->Const.dri_config_options_sha1);
833 }
834
835 GLboolean
836 brwCreateContext(gl_api api,
837 const struct gl_config *mesaVis,
838 __DRIcontext *driContextPriv,
839 const struct __DriverContextConfig *ctx_config,
840 unsigned *dri_ctx_error,
841 void *sharedContextPrivate)
842 {
843 struct gl_context *shareCtx = (struct gl_context *) sharedContextPrivate;
844 struct intel_screen *screen = driContextPriv->driScreenPriv->driverPrivate;
845 const struct gen_device_info *devinfo = &screen->devinfo;
846 struct dd_function_table functions;
847
848 /* Only allow the __DRI_CTX_FLAG_ROBUST_BUFFER_ACCESS flag if the kernel
849 * provides us with context reset notifications.
850 */
851 uint32_t allowed_flags = __DRI_CTX_FLAG_DEBUG |
852 __DRI_CTX_FLAG_FORWARD_COMPATIBLE |
853 __DRI_CTX_FLAG_NO_ERROR;
854
855 if (screen->has_context_reset_notification)
856 allowed_flags |= __DRI_CTX_FLAG_ROBUST_BUFFER_ACCESS;
857
858 if (ctx_config->flags & ~allowed_flags) {
859 *dri_ctx_error = __DRI_CTX_ERROR_UNKNOWN_FLAG;
860 return false;
861 }
862
863 if (ctx_config->attribute_mask & ~__DRIVER_CONTEXT_ATTRIB_RESET_STRATEGY) {
864 *dri_ctx_error = __DRI_CTX_ERROR_UNKNOWN_ATTRIBUTE;
865 return false;
866 }
867
868 bool notify_reset =
869 ((ctx_config->attribute_mask & __DRIVER_CONTEXT_ATTRIB_RESET_STRATEGY) &&
870 ctx_config->reset_strategy != __DRI_CTX_RESET_NO_NOTIFICATION);
871
872 struct brw_context *brw = rzalloc(NULL, struct brw_context);
873 if (!brw) {
874 fprintf(stderr, "%s: failed to alloc context\n", __func__);
875 *dri_ctx_error = __DRI_CTX_ERROR_NO_MEMORY;
876 return false;
877 }
878
879 driContextPriv->driverPrivate = brw;
880 brw->driContext = driContextPriv;
881 brw->screen = screen;
882 brw->bufmgr = screen->bufmgr;
883
884 brw->has_hiz = devinfo->has_hiz_and_separate_stencil;
885 brw->has_separate_stencil = devinfo->has_hiz_and_separate_stencil;
886
887 brw->has_swizzling = screen->hw_has_swizzling;
888
889 brw->isl_dev = screen->isl_dev;
890
891 brw->vs.base.stage = MESA_SHADER_VERTEX;
892 brw->tcs.base.stage = MESA_SHADER_TESS_CTRL;
893 brw->tes.base.stage = MESA_SHADER_TESS_EVAL;
894 brw->gs.base.stage = MESA_SHADER_GEOMETRY;
895 brw->wm.base.stage = MESA_SHADER_FRAGMENT;
896 brw->cs.base.stage = MESA_SHADER_COMPUTE;
897 if (devinfo->gen >= 8) {
898 brw->vtbl.emit_depth_stencil_hiz = gen8_emit_depth_stencil_hiz;
899 } else if (devinfo->gen >= 7) {
900 brw->vtbl.emit_depth_stencil_hiz = gen7_emit_depth_stencil_hiz;
901 } else if (devinfo->gen >= 6) {
902 brw->vtbl.emit_depth_stencil_hiz = gen6_emit_depth_stencil_hiz;
903 } else {
904 brw->vtbl.emit_depth_stencil_hiz = brw_emit_depth_stencil_hiz;
905 }
906
907 brw_init_driver_functions(brw, &functions);
908
909 if (notify_reset)
910 functions.GetGraphicsResetStatus = brw_get_graphics_reset_status;
911
912 struct gl_context *ctx = &brw->ctx;
913
914 if (!_mesa_initialize_context(ctx, api, mesaVis, shareCtx, &functions)) {
915 *dri_ctx_error = __DRI_CTX_ERROR_NO_MEMORY;
916 fprintf(stderr, "%s: failed to init mesa context\n", __func__);
917 intelDestroyContext(driContextPriv);
918 return false;
919 }
920
921 driContextSetFlags(ctx, ctx_config->flags);
922
923 /* Initialize the software rasterizer and helper modules.
924 *
925 * As of GL 3.1 core, the gen4+ driver doesn't need the swrast context for
926 * software fallbacks (which we have to support on legacy GL to do weird
927 * glDrawPixels(), glBitmap(), and other functions).
928 */
929 if (api != API_OPENGL_CORE && api != API_OPENGLES2) {
930 _swrast_CreateContext(ctx);
931 }
932
933 _vbo_CreateContext(ctx);
934 if (ctx->swrast_context) {
935 _tnl_CreateContext(ctx);
936 TNL_CONTEXT(ctx)->Driver.RunPipeline = _tnl_run_pipeline;
937 _swsetup_CreateContext(ctx);
938
939 /* Configure swrast to match hardware characteristics: */
940 _swrast_allow_pixel_fog(ctx, false);
941 _swrast_allow_vertex_fog(ctx, true);
942 }
943
944 _mesa_meta_init(ctx);
945
946 brw_process_driconf_options(brw);
947
948 if (INTEL_DEBUG & DEBUG_PERF)
949 brw->perf_debug = true;
950
951 brw_initialize_cs_context_constants(brw);
952 brw_initialize_context_constants(brw);
953
954 ctx->Const.ResetStrategy = notify_reset
955 ? GL_LOSE_CONTEXT_ON_RESET_ARB : GL_NO_RESET_NOTIFICATION_ARB;
956
957 /* Reinitialize the context point state. It depends on ctx->Const values. */
958 _mesa_init_point(ctx);
959
960 intel_fbo_init(brw);
961
962 intel_batchbuffer_init(brw);
963
964 if (devinfo->gen >= 6) {
965 /* Create a new hardware context. Using a hardware context means that
966 * our GPU state will be saved/restored on context switch, allowing us
967 * to assume that the GPU is in the same state we left it in.
968 *
969 * This is required for transform feedback buffer offsets, query objects,
970 * and also allows us to reduce how much state we have to emit.
971 */
972 brw->hw_ctx = brw_create_hw_context(brw->bufmgr);
973
974 if (!brw->hw_ctx) {
975 fprintf(stderr, "Failed to create hardware context.\n");
976 intelDestroyContext(driContextPriv);
977 return false;
978 }
979
980 int hw_priority = BRW_CONTEXT_MEDIUM_PRIORITY;
981 if (ctx_config->attribute_mask & __DRIVER_CONTEXT_ATTRIB_PRIORITY) {
982 switch (ctx_config->priority) {
983 case __DRI_CTX_PRIORITY_LOW:
984 hw_priority = BRW_CONTEXT_LOW_PRIORITY;
985 break;
986 case __DRI_CTX_PRIORITY_HIGH:
987 hw_priority = BRW_CONTEXT_HIGH_PRIORITY;
988 break;
989 }
990 }
991 if (hw_priority != I915_CONTEXT_DEFAULT_PRIORITY &&
992 brw_hw_context_set_priority(brw->bufmgr, brw->hw_ctx, hw_priority)) {
993 fprintf(stderr,
994 "Failed to set priority [%d:%d] for hardware context.\n",
995 ctx_config->priority, hw_priority);
996 intelDestroyContext(driContextPriv);
997 return false;
998 }
999 }
1000
1001 if (brw_init_pipe_control(brw, devinfo)) {
1002 *dri_ctx_error = __DRI_CTX_ERROR_NO_MEMORY;
1003 intelDestroyContext(driContextPriv);
1004 return false;
1005 }
1006
1007 if (devinfo->gen == 10) {
1008 fprintf(stderr,
1009 "WARNING: i965 does not fully support Gen10 yet.\n"
1010 "Instability or lower performance might occur.\n");
1011
1012 }
1013
1014 brw_init_state(brw);
1015
1016 intelInitExtensions(ctx);
1017
1018 brw_init_surface_formats(brw);
1019
1020 brw_blorp_init(brw);
1021
1022 brw->urb.size = devinfo->urb.size;
1023
1024 if (devinfo->gen == 6)
1025 brw->urb.gs_present = false;
1026
1027 brw->prim_restart.in_progress = false;
1028 brw->prim_restart.enable_cut_index = false;
1029 brw->gs.enabled = false;
1030 brw->clip.viewport_count = 1;
1031
1032 brw->predicate.state = BRW_PREDICATE_STATE_RENDER;
1033
1034 brw->max_gtt_map_object_size = screen->max_gtt_map_object_size;
1035
1036 ctx->VertexProgram._MaintainTnlProgram = true;
1037 ctx->FragmentProgram._MaintainTexEnvProgram = true;
1038
1039 brw_draw_init( brw );
1040
1041 if ((ctx_config->flags & __DRI_CTX_FLAG_DEBUG) != 0) {
1042 /* Turn on some extra GL_ARB_debug_output generation. */
1043 brw->perf_debug = true;
1044 }
1045
1046 if ((ctx_config->flags & __DRI_CTX_FLAG_ROBUST_BUFFER_ACCESS) != 0) {
1047 ctx->Const.ContextFlags |= GL_CONTEXT_FLAG_ROBUST_ACCESS_BIT_ARB;
1048 ctx->Const.RobustAccess = GL_TRUE;
1049 }
1050
1051 if (INTEL_DEBUG & DEBUG_SHADER_TIME)
1052 brw_init_shader_time(brw);
1053
1054 _mesa_override_extensions(ctx);
1055 _mesa_compute_version(ctx);
1056
1057 _mesa_initialize_dispatch_tables(ctx);
1058 _mesa_initialize_vbo_vtxfmt(ctx);
1059
1060 if (ctx->Extensions.INTEL_performance_query)
1061 brw_init_performance_queries(brw);
1062
1063 vbo_use_buffer_objects(ctx);
1064 vbo_always_unmap_buffers(ctx);
1065
1066 brw_disk_cache_init(brw);
1067
1068 return true;
1069 }
1070
1071 void
1072 intelDestroyContext(__DRIcontext * driContextPriv)
1073 {
1074 struct brw_context *brw =
1075 (struct brw_context *) driContextPriv->driverPrivate;
1076 struct gl_context *ctx = &brw->ctx;
1077 const struct gen_device_info *devinfo = &brw->screen->devinfo;
1078
1079 _mesa_meta_free(&brw->ctx);
1080
1081 if (INTEL_DEBUG & DEBUG_SHADER_TIME) {
1082 /* Force a report. */
1083 brw->shader_time.report_time = 0;
1084
1085 brw_collect_and_report_shader_time(brw);
1086 brw_destroy_shader_time(brw);
1087 }
1088
1089 if (devinfo->gen >= 6)
1090 blorp_finish(&brw->blorp);
1091
1092 brw_destroy_state(brw);
1093 brw_draw_destroy(brw);
1094
1095 brw_bo_unreference(brw->curbe.curbe_bo);
1096
1097 brw_bo_unreference(brw->vs.base.scratch_bo);
1098 brw_bo_unreference(brw->tcs.base.scratch_bo);
1099 brw_bo_unreference(brw->tes.base.scratch_bo);
1100 brw_bo_unreference(brw->gs.base.scratch_bo);
1101 brw_bo_unreference(brw->wm.base.scratch_bo);
1102
1103 brw_bo_unreference(brw->vs.base.push_const_bo);
1104 brw_bo_unreference(brw->tcs.base.push_const_bo);
1105 brw_bo_unreference(brw->tes.base.push_const_bo);
1106 brw_bo_unreference(brw->gs.base.push_const_bo);
1107 brw_bo_unreference(brw->wm.base.push_const_bo);
1108
1109 brw_destroy_hw_context(brw->bufmgr, brw->hw_ctx);
1110
1111 if (ctx->swrast_context) {
1112 _swsetup_DestroyContext(&brw->ctx);
1113 _tnl_DestroyContext(&brw->ctx);
1114 }
1115 _vbo_DestroyContext(&brw->ctx);
1116
1117 if (ctx->swrast_context)
1118 _swrast_DestroyContext(&brw->ctx);
1119
1120 brw_fini_pipe_control(brw);
1121 intel_batchbuffer_free(&brw->batch);
1122
1123 brw_bo_unreference(brw->throttle_batch[1]);
1124 brw_bo_unreference(brw->throttle_batch[0]);
1125 brw->throttle_batch[1] = NULL;
1126 brw->throttle_batch[0] = NULL;
1127
1128 driDestroyOptionCache(&brw->optionCache);
1129
1130 /* free the Mesa context */
1131 _mesa_free_context_data(&brw->ctx);
1132
1133 ralloc_free(brw);
1134 driContextPriv->driverPrivate = NULL;
1135 }
1136
1137 GLboolean
1138 intelUnbindContext(__DRIcontext * driContextPriv)
1139 {
1140 /* Unset current context and dispath table */
1141 _mesa_make_current(NULL, NULL, NULL);
1142
1143 return true;
1144 }
1145
1146 /**
1147 * Fixes up the context for GLES23 with our default-to-sRGB-capable behavior
1148 * on window system framebuffers.
1149 *
1150 * Desktop GL is fairly reasonable in its handling of sRGB: You can ask if
1151 * your renderbuffer can do sRGB encode, and you can flip a switch that does
1152 * sRGB encode if the renderbuffer can handle it. You can ask specifically
1153 * for a visual where you're guaranteed to be capable, but it turns out that
1154 * everyone just makes all their ARGB8888 visuals capable and doesn't offer
1155 * incapable ones, because there's no difference between the two in resources
1156 * used. Applications thus get built that accidentally rely on the default
1157 * visual choice being sRGB, so we make ours sRGB capable. Everything sounds
1158 * great...
1159 *
1160 * But for GLES2/3, they decided that it was silly to not turn on sRGB encode
1161 * for sRGB renderbuffers you made with the GL_EXT_texture_sRGB equivalent.
1162 * So they removed the enable knob and made it "if the renderbuffer is sRGB
1163 * capable, do sRGB encode". Then, for your window system renderbuffers, you
1164 * can ask for sRGB visuals and get sRGB encode, or not ask for sRGB visuals
1165 * and get no sRGB encode (assuming that both kinds of visual are available).
1166 * Thus our choice to support sRGB by default on our visuals for desktop would
1167 * result in broken rendering of GLES apps that aren't expecting sRGB encode.
1168 *
1169 * Unfortunately, renderbuffer setup happens before a context is created. So
1170 * in intel_screen.c we always set up sRGB, and here, if you're a GLES2/3
1171 * context (without an sRGB visual), we go turn that back off before anyone
1172 * finds out.
1173 */
1174 static void
1175 intel_gles3_srgb_workaround(struct brw_context *brw,
1176 struct gl_framebuffer *fb)
1177 {
1178 struct gl_context *ctx = &brw->ctx;
1179
1180 if (_mesa_is_desktop_gl(ctx) || !fb->Visual.sRGBCapable)
1181 return;
1182
1183 for (int i = 0; i < BUFFER_COUNT; i++) {
1184 struct gl_renderbuffer *rb = fb->Attachment[i].Renderbuffer;
1185
1186 /* Check if sRGB was specifically asked for. */
1187 struct intel_renderbuffer *irb = intel_get_renderbuffer(fb, i);
1188 if (irb && irb->need_srgb)
1189 return;
1190
1191 if (rb)
1192 rb->Format = _mesa_get_srgb_format_linear(rb->Format);
1193 }
1194 /* Disable sRGB from framebuffers that are not compatible. */
1195 fb->Visual.sRGBCapable = false;
1196 }
1197
1198 GLboolean
1199 intelMakeCurrent(__DRIcontext * driContextPriv,
1200 __DRIdrawable * driDrawPriv,
1201 __DRIdrawable * driReadPriv)
1202 {
1203 struct brw_context *brw;
1204
1205 if (driContextPriv)
1206 brw = (struct brw_context *) driContextPriv->driverPrivate;
1207 else
1208 brw = NULL;
1209
1210 if (driContextPriv) {
1211 struct gl_context *ctx = &brw->ctx;
1212 struct gl_framebuffer *fb, *readFb;
1213
1214 if (driDrawPriv == NULL) {
1215 fb = _mesa_get_incomplete_framebuffer();
1216 } else {
1217 fb = driDrawPriv->driverPrivate;
1218 driContextPriv->dri2.draw_stamp = driDrawPriv->dri2.stamp - 1;
1219 }
1220
1221 if (driReadPriv == NULL) {
1222 readFb = _mesa_get_incomplete_framebuffer();
1223 } else {
1224 readFb = driReadPriv->driverPrivate;
1225 driContextPriv->dri2.read_stamp = driReadPriv->dri2.stamp - 1;
1226 }
1227
1228 /* The sRGB workaround changes the renderbuffer's format. We must change
1229 * the format before the renderbuffer's miptree get's allocated, otherwise
1230 * the formats of the renderbuffer and its miptree will differ.
1231 */
1232 intel_gles3_srgb_workaround(brw, fb);
1233 intel_gles3_srgb_workaround(brw, readFb);
1234
1235 /* If the context viewport hasn't been initialized, force a call out to
1236 * the loader to get buffers so we have a drawable size for the initial
1237 * viewport. */
1238 if (!brw->ctx.ViewportInitialized)
1239 intel_prepare_render(brw);
1240
1241 _mesa_make_current(ctx, fb, readFb);
1242 } else {
1243 _mesa_make_current(NULL, NULL, NULL);
1244 }
1245
1246 return true;
1247 }
1248
1249 void
1250 intel_resolve_for_dri2_flush(struct brw_context *brw,
1251 __DRIdrawable *drawable)
1252 {
1253 const struct gen_device_info *devinfo = &brw->screen->devinfo;
1254
1255 if (devinfo->gen < 6) {
1256 /* MSAA and fast color clear are not supported, so don't waste time
1257 * checking whether a resolve is needed.
1258 */
1259 return;
1260 }
1261
1262 struct gl_framebuffer *fb = drawable->driverPrivate;
1263 struct intel_renderbuffer *rb;
1264
1265 /* Usually, only the back buffer will need to be downsampled. However,
1266 * the front buffer will also need it if the user has rendered into it.
1267 */
1268 static const gl_buffer_index buffers[2] = {
1269 BUFFER_BACK_LEFT,
1270 BUFFER_FRONT_LEFT,
1271 };
1272
1273 for (int i = 0; i < 2; ++i) {
1274 rb = intel_get_renderbuffer(fb, buffers[i]);
1275 if (rb == NULL || rb->mt == NULL)
1276 continue;
1277 if (rb->mt->surf.samples == 1) {
1278 assert(rb->mt_layer == 0 && rb->mt_level == 0 &&
1279 rb->layer_count == 1);
1280 intel_miptree_prepare_external(brw, rb->mt);
1281 } else {
1282 intel_renderbuffer_downsample(brw, rb);
1283 }
1284 }
1285 }
1286
1287 static unsigned
1288 intel_bits_per_pixel(const struct intel_renderbuffer *rb)
1289 {
1290 return _mesa_get_format_bytes(intel_rb_format(rb)) * 8;
1291 }
1292
1293 static void
1294 intel_query_dri2_buffers(struct brw_context *brw,
1295 __DRIdrawable *drawable,
1296 __DRIbuffer **buffers,
1297 int *count);
1298
1299 static void
1300 intel_process_dri2_buffer(struct brw_context *brw,
1301 __DRIdrawable *drawable,
1302 __DRIbuffer *buffer,
1303 struct intel_renderbuffer *rb,
1304 const char *buffer_name);
1305
1306 static void
1307 intel_update_image_buffers(struct brw_context *brw, __DRIdrawable *drawable);
1308
1309 static void
1310 intel_update_dri2_buffers(struct brw_context *brw, __DRIdrawable *drawable)
1311 {
1312 struct gl_framebuffer *fb = drawable->driverPrivate;
1313 struct intel_renderbuffer *rb;
1314 __DRIbuffer *buffers = NULL;
1315 int count;
1316 const char *region_name;
1317
1318 /* Set this up front, so that in case our buffers get invalidated
1319 * while we're getting new buffers, we don't clobber the stamp and
1320 * thus ignore the invalidate. */
1321 drawable->lastStamp = drawable->dri2.stamp;
1322
1323 if (unlikely(INTEL_DEBUG & DEBUG_DRI))
1324 fprintf(stderr, "enter %s, drawable %p\n", __func__, drawable);
1325
1326 intel_query_dri2_buffers(brw, drawable, &buffers, &count);
1327
1328 if (buffers == NULL)
1329 return;
1330
1331 for (int i = 0; i < count; i++) {
1332 switch (buffers[i].attachment) {
1333 case __DRI_BUFFER_FRONT_LEFT:
1334 rb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT);
1335 region_name = "dri2 front buffer";
1336 break;
1337
1338 case __DRI_BUFFER_FAKE_FRONT_LEFT:
1339 rb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT);
1340 region_name = "dri2 fake front buffer";
1341 break;
1342
1343 case __DRI_BUFFER_BACK_LEFT:
1344 rb = intel_get_renderbuffer(fb, BUFFER_BACK_LEFT);
1345 region_name = "dri2 back buffer";
1346 break;
1347
1348 case __DRI_BUFFER_DEPTH:
1349 case __DRI_BUFFER_HIZ:
1350 case __DRI_BUFFER_DEPTH_STENCIL:
1351 case __DRI_BUFFER_STENCIL:
1352 case __DRI_BUFFER_ACCUM:
1353 default:
1354 fprintf(stderr,
1355 "unhandled buffer attach event, attachment type %d\n",
1356 buffers[i].attachment);
1357 return;
1358 }
1359
1360 intel_process_dri2_buffer(brw, drawable, &buffers[i], rb, region_name);
1361 }
1362
1363 }
1364
1365 void
1366 intel_update_renderbuffers(__DRIcontext *context, __DRIdrawable *drawable)
1367 {
1368 struct brw_context *brw = context->driverPrivate;
1369 __DRIscreen *dri_screen = brw->screen->driScrnPriv;
1370
1371 /* Set this up front, so that in case our buffers get invalidated
1372 * while we're getting new buffers, we don't clobber the stamp and
1373 * thus ignore the invalidate. */
1374 drawable->lastStamp = drawable->dri2.stamp;
1375
1376 if (unlikely(INTEL_DEBUG & DEBUG_DRI))
1377 fprintf(stderr, "enter %s, drawable %p\n", __func__, drawable);
1378
1379 if (dri_screen->image.loader)
1380 intel_update_image_buffers(brw, drawable);
1381 else
1382 intel_update_dri2_buffers(brw, drawable);
1383
1384 driUpdateFramebufferSize(&brw->ctx, drawable);
1385 }
1386
1387 /**
1388 * intel_prepare_render should be called anywhere that curent read/drawbuffer
1389 * state is required.
1390 */
1391 void
1392 intel_prepare_render(struct brw_context *brw)
1393 {
1394 struct gl_context *ctx = &brw->ctx;
1395 __DRIcontext *driContext = brw->driContext;
1396 __DRIdrawable *drawable;
1397
1398 drawable = driContext->driDrawablePriv;
1399 if (drawable && drawable->dri2.stamp != driContext->dri2.draw_stamp) {
1400 if (drawable->lastStamp != drawable->dri2.stamp)
1401 intel_update_renderbuffers(driContext, drawable);
1402 driContext->dri2.draw_stamp = drawable->dri2.stamp;
1403 }
1404
1405 drawable = driContext->driReadablePriv;
1406 if (drawable && drawable->dri2.stamp != driContext->dri2.read_stamp) {
1407 if (drawable->lastStamp != drawable->dri2.stamp)
1408 intel_update_renderbuffers(driContext, drawable);
1409 driContext->dri2.read_stamp = drawable->dri2.stamp;
1410 }
1411
1412 /* If we're currently rendering to the front buffer, the rendering
1413 * that will happen next will probably dirty the front buffer. So
1414 * mark it as dirty here.
1415 */
1416 if (_mesa_is_front_buffer_drawing(ctx->DrawBuffer))
1417 brw->front_buffer_dirty = true;
1418 }
1419
1420 /**
1421 * \brief Query DRI2 to obtain a DRIdrawable's buffers.
1422 *
1423 * To determine which DRI buffers to request, examine the renderbuffers
1424 * attached to the drawable's framebuffer. Then request the buffers with
1425 * DRI2GetBuffers() or DRI2GetBuffersWithFormat().
1426 *
1427 * This is called from intel_update_renderbuffers().
1428 *
1429 * \param drawable Drawable whose buffers are queried.
1430 * \param buffers [out] List of buffers returned by DRI2 query.
1431 * \param buffer_count [out] Number of buffers returned.
1432 *
1433 * \see intel_update_renderbuffers()
1434 * \see DRI2GetBuffers()
1435 * \see DRI2GetBuffersWithFormat()
1436 */
1437 static void
1438 intel_query_dri2_buffers(struct brw_context *brw,
1439 __DRIdrawable *drawable,
1440 __DRIbuffer **buffers,
1441 int *buffer_count)
1442 {
1443 __DRIscreen *dri_screen = brw->screen->driScrnPriv;
1444 struct gl_framebuffer *fb = drawable->driverPrivate;
1445 int i = 0;
1446 unsigned attachments[8];
1447
1448 struct intel_renderbuffer *front_rb;
1449 struct intel_renderbuffer *back_rb;
1450
1451 front_rb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT);
1452 back_rb = intel_get_renderbuffer(fb, BUFFER_BACK_LEFT);
1453
1454 memset(attachments, 0, sizeof(attachments));
1455 if ((_mesa_is_front_buffer_drawing(fb) ||
1456 _mesa_is_front_buffer_reading(fb) ||
1457 !back_rb) && front_rb) {
1458 /* If a fake front buffer is in use, then querying for
1459 * __DRI_BUFFER_FRONT_LEFT will cause the server to copy the image from
1460 * the real front buffer to the fake front buffer. So before doing the
1461 * query, we need to make sure all the pending drawing has landed in the
1462 * real front buffer.
1463 */
1464 intel_batchbuffer_flush(brw);
1465 intel_flush_front(&brw->ctx);
1466
1467 attachments[i++] = __DRI_BUFFER_FRONT_LEFT;
1468 attachments[i++] = intel_bits_per_pixel(front_rb);
1469 } else if (front_rb && brw->front_buffer_dirty) {
1470 /* We have pending front buffer rendering, but we aren't querying for a
1471 * front buffer. If the front buffer we have is a fake front buffer,
1472 * the X server is going to throw it away when it processes the query.
1473 * So before doing the query, make sure all the pending drawing has
1474 * landed in the real front buffer.
1475 */
1476 intel_batchbuffer_flush(brw);
1477 intel_flush_front(&brw->ctx);
1478 }
1479
1480 if (back_rb) {
1481 attachments[i++] = __DRI_BUFFER_BACK_LEFT;
1482 attachments[i++] = intel_bits_per_pixel(back_rb);
1483 }
1484
1485 assert(i <= ARRAY_SIZE(attachments));
1486
1487 *buffers =
1488 dri_screen->dri2.loader->getBuffersWithFormat(drawable,
1489 &drawable->w,
1490 &drawable->h,
1491 attachments, i / 2,
1492 buffer_count,
1493 drawable->loaderPrivate);
1494 }
1495
1496 /**
1497 * \brief Assign a DRI buffer's DRM region to a renderbuffer.
1498 *
1499 * This is called from intel_update_renderbuffers().
1500 *
1501 * \par Note:
1502 * DRI buffers whose attachment point is DRI2BufferStencil or
1503 * DRI2BufferDepthStencil are handled as special cases.
1504 *
1505 * \param buffer_name is a human readable name, such as "dri2 front buffer",
1506 * that is passed to brw_bo_gem_create_from_name().
1507 *
1508 * \see intel_update_renderbuffers()
1509 */
1510 static void
1511 intel_process_dri2_buffer(struct brw_context *brw,
1512 __DRIdrawable *drawable,
1513 __DRIbuffer *buffer,
1514 struct intel_renderbuffer *rb,
1515 const char *buffer_name)
1516 {
1517 struct gl_framebuffer *fb = drawable->driverPrivate;
1518 struct brw_bo *bo;
1519
1520 if (!rb)
1521 return;
1522
1523 unsigned num_samples = rb->Base.Base.NumSamples;
1524
1525 /* We try to avoid closing and reopening the same BO name, because the first
1526 * use of a mapping of the buffer involves a bunch of page faulting which is
1527 * moderately expensive.
1528 */
1529 struct intel_mipmap_tree *last_mt;
1530 if (num_samples == 0)
1531 last_mt = rb->mt;
1532 else
1533 last_mt = rb->singlesample_mt;
1534
1535 uint32_t old_name = 0;
1536 if (last_mt) {
1537 /* The bo already has a name because the miptree was created by a
1538 * previous call to intel_process_dri2_buffer(). If a bo already has a
1539 * name, then brw_bo_flink() is a low-cost getter. It does not
1540 * create a new name.
1541 */
1542 brw_bo_flink(last_mt->bo, &old_name);
1543 }
1544
1545 if (old_name == buffer->name)
1546 return;
1547
1548 if (unlikely(INTEL_DEBUG & DEBUG_DRI)) {
1549 fprintf(stderr,
1550 "attaching buffer %d, at %d, cpp %d, pitch %d\n",
1551 buffer->name, buffer->attachment,
1552 buffer->cpp, buffer->pitch);
1553 }
1554
1555 bo = brw_bo_gem_create_from_name(brw->bufmgr, buffer_name,
1556 buffer->name);
1557 if (!bo) {
1558 fprintf(stderr,
1559 "Failed to open BO for returned DRI2 buffer "
1560 "(%dx%d, %s, named %d).\n"
1561 "This is likely a bug in the X Server that will lead to a "
1562 "crash soon.\n",
1563 drawable->w, drawable->h, buffer_name, buffer->name);
1564 return;
1565 }
1566
1567 struct intel_mipmap_tree *mt =
1568 intel_miptree_create_for_bo(brw,
1569 bo,
1570 intel_rb_format(rb),
1571 0,
1572 drawable->w,
1573 drawable->h,
1574 1,
1575 buffer->pitch,
1576 MIPTREE_CREATE_DEFAULT);
1577 if (!mt) {
1578 brw_bo_unreference(bo);
1579 return;
1580 }
1581
1582 /* We got this BO from X11. We cana't assume that we have coherent texture
1583 * access because X may suddenly decide to use it for scan-out which would
1584 * destroy coherency.
1585 */
1586 bo->cache_coherent = false;
1587
1588 if (!intel_update_winsys_renderbuffer_miptree(brw, rb, mt,
1589 drawable->w, drawable->h,
1590 buffer->pitch)) {
1591 brw_bo_unreference(bo);
1592 intel_miptree_release(&mt);
1593 return;
1594 }
1595
1596 if (_mesa_is_front_buffer_drawing(fb) &&
1597 (buffer->attachment == __DRI_BUFFER_FRONT_LEFT ||
1598 buffer->attachment == __DRI_BUFFER_FAKE_FRONT_LEFT) &&
1599 rb->Base.Base.NumSamples > 1) {
1600 intel_renderbuffer_upsample(brw, rb);
1601 }
1602
1603 assert(rb->mt);
1604
1605 brw_bo_unreference(bo);
1606 }
1607
1608 /**
1609 * \brief Query DRI image loader to obtain a DRIdrawable's buffers.
1610 *
1611 * To determine which DRI buffers to request, examine the renderbuffers
1612 * attached to the drawable's framebuffer. Then request the buffers from
1613 * the image loader
1614 *
1615 * This is called from intel_update_renderbuffers().
1616 *
1617 * \param drawable Drawable whose buffers are queried.
1618 * \param buffers [out] List of buffers returned by DRI2 query.
1619 * \param buffer_count [out] Number of buffers returned.
1620 *
1621 * \see intel_update_renderbuffers()
1622 */
1623
1624 static void
1625 intel_update_image_buffer(struct brw_context *intel,
1626 __DRIdrawable *drawable,
1627 struct intel_renderbuffer *rb,
1628 __DRIimage *buffer,
1629 enum __DRIimageBufferMask buffer_type)
1630 {
1631 struct gl_framebuffer *fb = drawable->driverPrivate;
1632
1633 if (!rb || !buffer->bo)
1634 return;
1635
1636 unsigned num_samples = rb->Base.Base.NumSamples;
1637
1638 /* Check and see if we're already bound to the right
1639 * buffer object
1640 */
1641 struct intel_mipmap_tree *last_mt;
1642 if (num_samples == 0)
1643 last_mt = rb->mt;
1644 else
1645 last_mt = rb->singlesample_mt;
1646
1647 if (last_mt && last_mt->bo == buffer->bo)
1648 return;
1649
1650 struct intel_mipmap_tree *mt =
1651 intel_miptree_create_for_dri_image(intel, buffer, GL_TEXTURE_2D,
1652 intel_rb_format(rb), true);
1653 if (!mt)
1654 return;
1655
1656 if (!intel_update_winsys_renderbuffer_miptree(intel, rb, mt,
1657 buffer->width, buffer->height,
1658 buffer->pitch)) {
1659 intel_miptree_release(&mt);
1660 return;
1661 }
1662
1663 if (_mesa_is_front_buffer_drawing(fb) &&
1664 buffer_type == __DRI_IMAGE_BUFFER_FRONT &&
1665 rb->Base.Base.NumSamples > 1) {
1666 intel_renderbuffer_upsample(intel, rb);
1667 }
1668 }
1669
1670 static void
1671 intel_update_image_buffers(struct brw_context *brw, __DRIdrawable *drawable)
1672 {
1673 struct gl_framebuffer *fb = drawable->driverPrivate;
1674 __DRIscreen *dri_screen = brw->screen->driScrnPriv;
1675 struct intel_renderbuffer *front_rb;
1676 struct intel_renderbuffer *back_rb;
1677 struct __DRIimageList images;
1678 mesa_format format;
1679 uint32_t buffer_mask = 0;
1680 int ret;
1681
1682 front_rb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT);
1683 back_rb = intel_get_renderbuffer(fb, BUFFER_BACK_LEFT);
1684
1685 if (back_rb)
1686 format = intel_rb_format(back_rb);
1687 else if (front_rb)
1688 format = intel_rb_format(front_rb);
1689 else
1690 return;
1691
1692 if (front_rb && (_mesa_is_front_buffer_drawing(fb) ||
1693 _mesa_is_front_buffer_reading(fb) || !back_rb)) {
1694 buffer_mask |= __DRI_IMAGE_BUFFER_FRONT;
1695 }
1696
1697 if (back_rb)
1698 buffer_mask |= __DRI_IMAGE_BUFFER_BACK;
1699
1700 ret = dri_screen->image.loader->getBuffers(drawable,
1701 driGLFormatToImageFormat(format),
1702 &drawable->dri2.stamp,
1703 drawable->loaderPrivate,
1704 buffer_mask,
1705 &images);
1706 if (!ret)
1707 return;
1708
1709 if (images.image_mask & __DRI_IMAGE_BUFFER_FRONT) {
1710 drawable->w = images.front->width;
1711 drawable->h = images.front->height;
1712 intel_update_image_buffer(brw,
1713 drawable,
1714 front_rb,
1715 images.front,
1716 __DRI_IMAGE_BUFFER_FRONT);
1717 }
1718
1719 if (images.image_mask & __DRI_IMAGE_BUFFER_BACK) {
1720 drawable->w = images.back->width;
1721 drawable->h = images.back->height;
1722 intel_update_image_buffer(brw,
1723 drawable,
1724 back_rb,
1725 images.back,
1726 __DRI_IMAGE_BUFFER_BACK);
1727 }
1728 }