i965: Improve conditional rendering in fallback paths.
[mesa.git] / src / mesa / drivers / dri / i965 / brw_context.c
1 /*
2 Copyright 2003 VMware, Inc.
3 Copyright (C) Intel Corp. 2006. All Rights Reserved.
4 Intel funded Tungsten Graphics to
5 develop this 3D driver.
6
7 Permission is hereby granted, free of charge, to any person obtaining
8 a copy of this software and associated documentation files (the
9 "Software"), to deal in the Software without restriction, including
10 without limitation the rights to use, copy, modify, merge, publish,
11 distribute, sublicense, and/or sell copies of the Software, and to
12 permit persons to whom the Software is furnished to do so, subject to
13 the following conditions:
14
15 The above copyright notice and this permission notice (including the
16 next paragraph) shall be included in all copies or substantial
17 portions of the Software.
18
19 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
20 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
22 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
23 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
24 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
25 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26
27 **********************************************************************/
28 /*
29 * Authors:
30 * Keith Whitwell <keithw@vmware.com>
31 */
32
33
34 #include "compiler/nir/nir.h"
35 #include "main/api_exec.h"
36 #include "main/context.h"
37 #include "main/fbobject.h"
38 #include "main/extensions.h"
39 #include "main/imports.h"
40 #include "main/macros.h"
41 #include "main/points.h"
42 #include "main/version.h"
43 #include "main/vtxfmt.h"
44 #include "main/texobj.h"
45 #include "main/framebuffer.h"
46
47 #include "vbo/vbo_context.h"
48
49 #include "drivers/common/driverfuncs.h"
50 #include "drivers/common/meta.h"
51 #include "utils.h"
52
53 #include "brw_context.h"
54 #include "brw_defines.h"
55 #include "brw_blorp.h"
56 #include "brw_draw.h"
57 #include "brw_state.h"
58
59 #include "intel_batchbuffer.h"
60 #include "intel_buffer_objects.h"
61 #include "intel_buffers.h"
62 #include "intel_fbo.h"
63 #include "intel_mipmap_tree.h"
64 #include "intel_pixel.h"
65 #include "intel_image.h"
66 #include "intel_tex.h"
67 #include "intel_tex_obj.h"
68
69 #include "swrast_setup/swrast_setup.h"
70 #include "tnl/tnl.h"
71 #include "tnl/t_pipeline.h"
72 #include "util/ralloc.h"
73 #include "util/debug.h"
74 #include "isl/isl.h"
75
76 /***************************************
77 * Mesa's Driver Functions
78 ***************************************/
79
80 const char *const brw_vendor_string = "Intel Open Source Technology Center";
81
82 static const char *
83 get_bsw_model(const struct intel_screen *screen)
84 {
85 switch (screen->eu_total) {
86 case 16:
87 return "405";
88 case 12:
89 return "400";
90 default:
91 return " ";
92 }
93 }
94
95 const char *
96 brw_get_renderer_string(const struct intel_screen *screen)
97 {
98 const char *chipset;
99 static char buffer[128];
100 char *bsw = NULL;
101
102 switch (screen->deviceID) {
103 #undef CHIPSET
104 #define CHIPSET(id, symbol, str) case id: chipset = str; break;
105 #include "pci_ids/i965_pci_ids.h"
106 default:
107 chipset = "Unknown Intel Chipset";
108 break;
109 }
110
111 /* Braswell branding is funny, so we have to fix it up here */
112 if (screen->deviceID == 0x22B1) {
113 bsw = strdup(chipset);
114 char *needle = strstr(bsw, "XXX");
115 if (needle) {
116 memcpy(needle, get_bsw_model(screen), 3);
117 chipset = bsw;
118 }
119 }
120
121 (void) driGetRendererString(buffer, chipset, 0);
122 free(bsw);
123 return buffer;
124 }
125
126 static const GLubyte *
127 intel_get_string(struct gl_context * ctx, GLenum name)
128 {
129 const struct brw_context *const brw = brw_context(ctx);
130
131 switch (name) {
132 case GL_VENDOR:
133 return (GLubyte *) brw_vendor_string;
134
135 case GL_RENDERER:
136 return
137 (GLubyte *) brw_get_renderer_string(brw->screen);
138
139 default:
140 return NULL;
141 }
142 }
143
144 static void
145 intel_viewport(struct gl_context *ctx)
146 {
147 struct brw_context *brw = brw_context(ctx);
148 __DRIcontext *driContext = brw->driContext;
149
150 if (_mesa_is_winsys_fbo(ctx->DrawBuffer)) {
151 if (driContext->driDrawablePriv)
152 dri2InvalidateDrawable(driContext->driDrawablePriv);
153 if (driContext->driReadablePriv)
154 dri2InvalidateDrawable(driContext->driReadablePriv);
155 }
156 }
157
158 static void
159 intel_update_framebuffer(struct gl_context *ctx,
160 struct gl_framebuffer *fb)
161 {
162 struct brw_context *brw = brw_context(ctx);
163
164 /* Quantize the derived default number of samples
165 */
166 fb->DefaultGeometry._NumSamples =
167 intel_quantize_num_samples(brw->screen,
168 fb->DefaultGeometry.NumSamples);
169 }
170
171 static bool
172 intel_disable_rb_aux_buffer(struct brw_context *brw, const struct brw_bo *bo)
173 {
174 const struct gl_framebuffer *fb = brw->ctx.DrawBuffer;
175 bool found = false;
176
177 for (unsigned i = 0; i < fb->_NumColorDrawBuffers; i++) {
178 const struct intel_renderbuffer *irb =
179 intel_renderbuffer(fb->_ColorDrawBuffers[i]);
180
181 if (irb && irb->mt->bo == bo) {
182 found = brw->draw_aux_buffer_disabled[i] = true;
183 }
184 }
185
186 return found;
187 }
188
189 static void
190 intel_update_state(struct gl_context * ctx)
191 {
192 GLuint new_state = ctx->NewState;
193 struct brw_context *brw = brw_context(ctx);
194 struct intel_texture_object *tex_obj;
195 struct intel_renderbuffer *depth_irb;
196
197 if (ctx->swrast_context)
198 _swrast_InvalidateState(ctx, new_state);
199
200 brw->NewGLState |= new_state;
201
202 _mesa_unlock_context_textures(ctx);
203
204 intel_prepare_render(brw);
205
206 /* Resolve the depth buffer's HiZ buffer. */
207 depth_irb = intel_get_renderbuffer(ctx->DrawBuffer, BUFFER_DEPTH);
208 if (depth_irb && depth_irb->mt) {
209 intel_miptree_prepare_depth(brw, depth_irb->mt,
210 depth_irb->mt_level,
211 depth_irb->mt_layer,
212 depth_irb->layer_count);
213 }
214
215 memset(brw->draw_aux_buffer_disabled, 0,
216 sizeof(brw->draw_aux_buffer_disabled));
217
218 /* Resolve depth buffer and render cache of each enabled texture. */
219 int maxEnabledUnit = ctx->Texture._MaxEnabledTexImageUnit;
220 for (int i = 0; i <= maxEnabledUnit; i++) {
221 if (!ctx->Texture.Unit[i]._Current)
222 continue;
223 tex_obj = intel_texture_object(ctx->Texture.Unit[i]._Current);
224 if (!tex_obj || !tex_obj->mt)
225 continue;
226
227 /* We need inte_texture_object::_Format to be valid */
228 intel_finalize_mipmap_tree(brw, i);
229
230 bool aux_supported;
231 intel_miptree_prepare_texture(brw, tex_obj->mt, tex_obj->_Format,
232 &aux_supported);
233
234 if (!aux_supported && brw->gen >= 9 &&
235 intel_disable_rb_aux_buffer(brw, tex_obj->mt->bo)) {
236 perf_debug("Sampling renderbuffer with non-compressible format - "
237 "turning off compression");
238 }
239
240 brw_render_cache_set_check_flush(brw, tex_obj->mt->bo);
241
242 if (tex_obj->base.StencilSampling ||
243 tex_obj->mt->format == MESA_FORMAT_S_UINT8) {
244 intel_update_r8stencil(brw, tex_obj->mt);
245 }
246 }
247
248 /* Resolve color for each active shader image. */
249 for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
250 const struct gl_program *prog = ctx->_Shader->CurrentProgram[i];
251
252 if (unlikely(prog && prog->info.num_images)) {
253 for (unsigned j = 0; j < prog->info.num_images; j++) {
254 struct gl_image_unit *u =
255 &ctx->ImageUnits[prog->sh.ImageUnits[j]];
256 tex_obj = intel_texture_object(u->TexObj);
257
258 if (tex_obj && tex_obj->mt) {
259 intel_miptree_prepare_image(brw, tex_obj->mt);
260
261 if (intel_miptree_is_lossless_compressed(brw, tex_obj->mt) &&
262 intel_disable_rb_aux_buffer(brw, tex_obj->mt->bo)) {
263 perf_debug("Using renderbuffer as shader image - turning "
264 "off lossless compression");
265 }
266
267 brw_render_cache_set_check_flush(brw, tex_obj->mt->bo);
268 }
269 }
270 }
271 }
272
273 /* Resolve color buffers for non-coherent framebuffer fetch. */
274 if (!ctx->Extensions.MESA_shader_framebuffer_fetch &&
275 ctx->FragmentProgram._Current &&
276 ctx->FragmentProgram._Current->info.outputs_read) {
277 const struct gl_framebuffer *fb = ctx->DrawBuffer;
278
279 for (unsigned i = 0; i < fb->_NumColorDrawBuffers; i++) {
280 const struct intel_renderbuffer *irb =
281 intel_renderbuffer(fb->_ColorDrawBuffers[i]);
282
283 if (irb) {
284 intel_miptree_prepare_fb_fetch(brw, irb->mt, irb->mt_level,
285 irb->mt_layer, irb->layer_count);
286 }
287 }
288 }
289
290 struct gl_framebuffer *fb = ctx->DrawBuffer;
291 for (int i = 0; i < fb->_NumColorDrawBuffers; i++) {
292 struct intel_renderbuffer *irb =
293 intel_renderbuffer(fb->_ColorDrawBuffers[i]);
294
295 if (irb == NULL || irb->mt == NULL)
296 continue;
297
298 intel_miptree_prepare_render(brw, irb->mt, irb->mt_level,
299 irb->mt_layer, irb->layer_count,
300 ctx->Color.sRGBEnabled);
301 }
302
303 _mesa_lock_context_textures(ctx);
304
305 if (new_state & _NEW_BUFFERS) {
306 intel_update_framebuffer(ctx, ctx->DrawBuffer);
307 if (ctx->DrawBuffer != ctx->ReadBuffer)
308 intel_update_framebuffer(ctx, ctx->ReadBuffer);
309 }
310 }
311
312 #define flushFront(screen) ((screen)->image.loader ? (screen)->image.loader->flushFrontBuffer : (screen)->dri2.loader->flushFrontBuffer)
313
314 static void
315 intel_flush_front(struct gl_context *ctx)
316 {
317 struct brw_context *brw = brw_context(ctx);
318 __DRIcontext *driContext = brw->driContext;
319 __DRIdrawable *driDrawable = driContext->driDrawablePriv;
320 __DRIscreen *const dri_screen = brw->screen->driScrnPriv;
321
322 if (brw->front_buffer_dirty && _mesa_is_winsys_fbo(ctx->DrawBuffer)) {
323 if (flushFront(dri_screen) && driDrawable &&
324 driDrawable->loaderPrivate) {
325
326 /* Resolve before flushing FAKE_FRONT_LEFT to FRONT_LEFT.
327 *
328 * This potentially resolves both front and back buffer. It
329 * is unnecessary to resolve the back, but harms nothing except
330 * performance. And no one cares about front-buffer render
331 * performance.
332 */
333 intel_resolve_for_dri2_flush(brw, driDrawable);
334 intel_batchbuffer_flush(brw);
335
336 flushFront(dri_screen)(driDrawable, driDrawable->loaderPrivate);
337
338 /* We set the dirty bit in intel_prepare_render() if we're
339 * front buffer rendering once we get there.
340 */
341 brw->front_buffer_dirty = false;
342 }
343 }
344 }
345
346 static void
347 intel_glFlush(struct gl_context *ctx)
348 {
349 struct brw_context *brw = brw_context(ctx);
350
351 intel_batchbuffer_flush(brw);
352 intel_flush_front(ctx);
353
354 brw->need_flush_throttle = true;
355 }
356
357 static void
358 intel_finish(struct gl_context * ctx)
359 {
360 struct brw_context *brw = brw_context(ctx);
361
362 intel_glFlush(ctx);
363
364 if (brw->batch.last_bo)
365 brw_bo_wait_rendering(brw, brw->batch.last_bo);
366 }
367
368 static void
369 brw_init_driver_functions(struct brw_context *brw,
370 struct dd_function_table *functions)
371 {
372 _mesa_init_driver_functions(functions);
373
374 /* GLX uses DRI2 invalidate events to handle window resizing.
375 * Unfortunately, EGL does not - libEGL is written in XCB (not Xlib),
376 * which doesn't provide a mechanism for snooping the event queues.
377 *
378 * So EGL still relies on viewport hacks to handle window resizing.
379 * This should go away with DRI3000.
380 */
381 if (!brw->driContext->driScreenPriv->dri2.useInvalidate)
382 functions->Viewport = intel_viewport;
383
384 functions->Flush = intel_glFlush;
385 functions->Finish = intel_finish;
386 functions->GetString = intel_get_string;
387 functions->UpdateState = intel_update_state;
388
389 intelInitTextureFuncs(functions);
390 intelInitTextureImageFuncs(functions);
391 intelInitTextureSubImageFuncs(functions);
392 intelInitTextureCopyImageFuncs(functions);
393 intelInitCopyImageFuncs(functions);
394 intelInitClearFuncs(functions);
395 intelInitBufferFuncs(functions);
396 intelInitPixelFuncs(functions);
397 intelInitBufferObjectFuncs(functions);
398 brw_init_syncobj_functions(functions);
399 brw_init_object_purgeable_functions(functions);
400
401 brwInitFragProgFuncs( functions );
402 brw_init_common_queryobj_functions(functions);
403 if (brw->gen >= 8 || brw->is_haswell)
404 hsw_init_queryobj_functions(functions);
405 else if (brw->gen >= 6)
406 gen6_init_queryobj_functions(functions);
407 else
408 gen4_init_queryobj_functions(functions);
409 brw_init_compute_functions(functions);
410 brw_init_conditional_render_functions(functions);
411
412 functions->QueryInternalFormat = brw_query_internal_format;
413
414 functions->NewTransformFeedback = brw_new_transform_feedback;
415 functions->DeleteTransformFeedback = brw_delete_transform_feedback;
416 if (can_do_mi_math_and_lrr(brw->screen)) {
417 functions->BeginTransformFeedback = hsw_begin_transform_feedback;
418 functions->EndTransformFeedback = hsw_end_transform_feedback;
419 functions->PauseTransformFeedback = hsw_pause_transform_feedback;
420 functions->ResumeTransformFeedback = hsw_resume_transform_feedback;
421 } else if (brw->gen >= 7) {
422 functions->BeginTransformFeedback = gen7_begin_transform_feedback;
423 functions->EndTransformFeedback = gen7_end_transform_feedback;
424 functions->PauseTransformFeedback = gen7_pause_transform_feedback;
425 functions->ResumeTransformFeedback = gen7_resume_transform_feedback;
426 functions->GetTransformFeedbackVertexCount =
427 brw_get_transform_feedback_vertex_count;
428 } else {
429 functions->BeginTransformFeedback = brw_begin_transform_feedback;
430 functions->EndTransformFeedback = brw_end_transform_feedback;
431 functions->PauseTransformFeedback = brw_pause_transform_feedback;
432 functions->ResumeTransformFeedback = brw_resume_transform_feedback;
433 functions->GetTransformFeedbackVertexCount =
434 brw_get_transform_feedback_vertex_count;
435 }
436
437 if (brw->gen >= 6)
438 functions->GetSamplePosition = gen6_get_sample_position;
439 }
440
441 static void
442 brw_initialize_context_constants(struct brw_context *brw)
443 {
444 struct gl_context *ctx = &brw->ctx;
445 const struct brw_compiler *compiler = brw->screen->compiler;
446
447 const bool stage_exists[MESA_SHADER_STAGES] = {
448 [MESA_SHADER_VERTEX] = true,
449 [MESA_SHADER_TESS_CTRL] = brw->gen >= 7,
450 [MESA_SHADER_TESS_EVAL] = brw->gen >= 7,
451 [MESA_SHADER_GEOMETRY] = brw->gen >= 6,
452 [MESA_SHADER_FRAGMENT] = true,
453 [MESA_SHADER_COMPUTE] =
454 ((ctx->API == API_OPENGL_COMPAT || ctx->API == API_OPENGL_CORE) &&
455 ctx->Const.MaxComputeWorkGroupSize[0] >= 1024) ||
456 (ctx->API == API_OPENGLES2 &&
457 ctx->Const.MaxComputeWorkGroupSize[0] >= 128) ||
458 _mesa_extension_override_enables.ARB_compute_shader,
459 };
460
461 unsigned num_stages = 0;
462 for (int i = 0; i < MESA_SHADER_STAGES; i++) {
463 if (stage_exists[i])
464 num_stages++;
465 }
466
467 unsigned max_samplers =
468 brw->gen >= 8 || brw->is_haswell ? BRW_MAX_TEX_UNIT : 16;
469
470 ctx->Const.MaxDualSourceDrawBuffers = 1;
471 ctx->Const.MaxDrawBuffers = BRW_MAX_DRAW_BUFFERS;
472 ctx->Const.MaxCombinedShaderOutputResources =
473 MAX_IMAGE_UNITS + BRW_MAX_DRAW_BUFFERS;
474
475 /* The timestamp register we can read for glGetTimestamp() is
476 * sometimes only 32 bits, before scaling to nanoseconds (depending
477 * on kernel).
478 *
479 * Once scaled to nanoseconds the timestamp would roll over at a
480 * non-power-of-two, so an application couldn't use
481 * GL_QUERY_COUNTER_BITS to handle rollover correctly. Instead, we
482 * report 36 bits and truncate at that (rolling over 5 times as
483 * often as the HW counter), and when the 32-bit counter rolls
484 * over, it happens to also be at a rollover in the reported value
485 * from near (1<<36) to 0.
486 *
487 * The low 32 bits rolls over in ~343 seconds. Our 36-bit result
488 * rolls over every ~69 seconds.
489 */
490 ctx->Const.QueryCounterBits.Timestamp = 36;
491
492 ctx->Const.MaxTextureCoordUnits = 8; /* Mesa limit */
493 ctx->Const.MaxImageUnits = MAX_IMAGE_UNITS;
494 if (brw->gen >= 7) {
495 ctx->Const.MaxRenderbufferSize = 16384;
496 ctx->Const.MaxTextureLevels = MIN2(15 /* 16384 */, MAX_TEXTURE_LEVELS);
497 ctx->Const.MaxCubeTextureLevels = 15; /* 16384 */
498 } else {
499 ctx->Const.MaxRenderbufferSize = 8192;
500 ctx->Const.MaxTextureLevels = MIN2(14 /* 8192 */, MAX_TEXTURE_LEVELS);
501 ctx->Const.MaxCubeTextureLevels = 14; /* 8192 */
502 }
503 ctx->Const.Max3DTextureLevels = 12; /* 2048 */
504 ctx->Const.MaxArrayTextureLayers = brw->gen >= 7 ? 2048 : 512;
505 ctx->Const.MaxTextureMbytes = 1536;
506 ctx->Const.MaxTextureRectSize = 1 << 12;
507 ctx->Const.MaxTextureMaxAnisotropy = 16.0;
508 ctx->Const.MaxTextureLodBias = 15.0;
509 ctx->Const.StripTextureBorder = true;
510 if (brw->gen >= 7) {
511 ctx->Const.MaxProgramTextureGatherComponents = 4;
512 ctx->Const.MinProgramTextureGatherOffset = -32;
513 ctx->Const.MaxProgramTextureGatherOffset = 31;
514 } else if (brw->gen == 6) {
515 ctx->Const.MaxProgramTextureGatherComponents = 1;
516 ctx->Const.MinProgramTextureGatherOffset = -8;
517 ctx->Const.MaxProgramTextureGatherOffset = 7;
518 }
519
520 ctx->Const.MaxUniformBlockSize = 65536;
521
522 for (int i = 0; i < MESA_SHADER_STAGES; i++) {
523 struct gl_program_constants *prog = &ctx->Const.Program[i];
524
525 if (!stage_exists[i])
526 continue;
527
528 prog->MaxTextureImageUnits = max_samplers;
529
530 prog->MaxUniformBlocks = BRW_MAX_UBO;
531 prog->MaxCombinedUniformComponents =
532 prog->MaxUniformComponents +
533 ctx->Const.MaxUniformBlockSize / 4 * prog->MaxUniformBlocks;
534
535 prog->MaxAtomicCounters = MAX_ATOMIC_COUNTERS;
536 prog->MaxAtomicBuffers = BRW_MAX_ABO;
537 prog->MaxImageUniforms = compiler->scalar_stage[i] ? BRW_MAX_IMAGES : 0;
538 prog->MaxShaderStorageBlocks = BRW_MAX_SSBO;
539 }
540
541 ctx->Const.MaxTextureUnits =
542 MIN2(ctx->Const.MaxTextureCoordUnits,
543 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxTextureImageUnits);
544
545 ctx->Const.MaxUniformBufferBindings = num_stages * BRW_MAX_UBO;
546 ctx->Const.MaxCombinedUniformBlocks = num_stages * BRW_MAX_UBO;
547 ctx->Const.MaxCombinedAtomicBuffers = num_stages * BRW_MAX_ABO;
548 ctx->Const.MaxCombinedShaderStorageBlocks = num_stages * BRW_MAX_SSBO;
549 ctx->Const.MaxShaderStorageBufferBindings = num_stages * BRW_MAX_SSBO;
550 ctx->Const.MaxCombinedTextureImageUnits = num_stages * max_samplers;
551 ctx->Const.MaxCombinedImageUniforms = num_stages * BRW_MAX_IMAGES;
552
553
554 /* Hardware only supports a limited number of transform feedback buffers.
555 * So we need to override the Mesa default (which is based only on software
556 * limits).
557 */
558 ctx->Const.MaxTransformFeedbackBuffers = BRW_MAX_SOL_BUFFERS;
559
560 /* On Gen6, in the worst case, we use up one binding table entry per
561 * transform feedback component (see comments above the definition of
562 * BRW_MAX_SOL_BINDINGS, in brw_context.h), so we need to advertise a value
563 * for MAX_TRANSFORM_FEEDBACK_INTERLEAVED_COMPONENTS equal to
564 * BRW_MAX_SOL_BINDINGS.
565 *
566 * In "separate components" mode, we need to divide this value by
567 * BRW_MAX_SOL_BUFFERS, so that the total number of binding table entries
568 * used up by all buffers will not exceed BRW_MAX_SOL_BINDINGS.
569 */
570 ctx->Const.MaxTransformFeedbackInterleavedComponents = BRW_MAX_SOL_BINDINGS;
571 ctx->Const.MaxTransformFeedbackSeparateComponents =
572 BRW_MAX_SOL_BINDINGS / BRW_MAX_SOL_BUFFERS;
573
574 ctx->Const.AlwaysUseGetTransformFeedbackVertexCount =
575 !can_do_mi_math_and_lrr(brw->screen);
576
577 int max_samples;
578 const int *msaa_modes = intel_supported_msaa_modes(brw->screen);
579 const int clamp_max_samples =
580 driQueryOptioni(&brw->optionCache, "clamp_max_samples");
581
582 if (clamp_max_samples < 0) {
583 max_samples = msaa_modes[0];
584 } else {
585 /* Select the largest supported MSAA mode that does not exceed
586 * clamp_max_samples.
587 */
588 max_samples = 0;
589 for (int i = 0; msaa_modes[i] != 0; ++i) {
590 if (msaa_modes[i] <= clamp_max_samples) {
591 max_samples = msaa_modes[i];
592 break;
593 }
594 }
595 }
596
597 ctx->Const.MaxSamples = max_samples;
598 ctx->Const.MaxColorTextureSamples = max_samples;
599 ctx->Const.MaxDepthTextureSamples = max_samples;
600 ctx->Const.MaxIntegerSamples = max_samples;
601 ctx->Const.MaxImageSamples = 0;
602
603 /* gen6_set_sample_maps() sets SampleMap{2,4,8}x variables which are used
604 * to map indices of rectangular grid to sample numbers within a pixel.
605 * These variables are used by GL_EXT_framebuffer_multisample_blit_scaled
606 * extension implementation. For more details see the comment above
607 * gen6_set_sample_maps() definition.
608 */
609 gen6_set_sample_maps(ctx);
610
611 ctx->Const.MinLineWidth = 1.0;
612 ctx->Const.MinLineWidthAA = 1.0;
613 if (brw->gen >= 6) {
614 ctx->Const.MaxLineWidth = 7.375;
615 ctx->Const.MaxLineWidthAA = 7.375;
616 ctx->Const.LineWidthGranularity = 0.125;
617 } else {
618 ctx->Const.MaxLineWidth = 7.0;
619 ctx->Const.MaxLineWidthAA = 7.0;
620 ctx->Const.LineWidthGranularity = 0.5;
621 }
622
623 /* For non-antialiased lines, we have to round the line width to the
624 * nearest whole number. Make sure that we don't advertise a line
625 * width that, when rounded, will be beyond the actual hardware
626 * maximum.
627 */
628 assert(roundf(ctx->Const.MaxLineWidth) <= ctx->Const.MaxLineWidth);
629
630 ctx->Const.MinPointSize = 1.0;
631 ctx->Const.MinPointSizeAA = 1.0;
632 ctx->Const.MaxPointSize = 255.0;
633 ctx->Const.MaxPointSizeAA = 255.0;
634 ctx->Const.PointSizeGranularity = 1.0;
635
636 if (brw->gen >= 5 || brw->is_g4x)
637 ctx->Const.MaxClipPlanes = 8;
638
639 ctx->Const.GLSLTessLevelsAsInputs = true;
640 ctx->Const.LowerTCSPatchVerticesIn = brw->gen >= 8;
641 ctx->Const.LowerTESPatchVerticesIn = true;
642 ctx->Const.PrimitiveRestartForPatches = true;
643
644 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeInstructions = 16 * 1024;
645 ctx->Const.Program[MESA_SHADER_VERTEX].MaxAluInstructions = 0;
646 ctx->Const.Program[MESA_SHADER_VERTEX].MaxTexInstructions = 0;
647 ctx->Const.Program[MESA_SHADER_VERTEX].MaxTexIndirections = 0;
648 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeAluInstructions = 0;
649 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeTexInstructions = 0;
650 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeTexIndirections = 0;
651 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeAttribs = 16;
652 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeTemps = 256;
653 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeAddressRegs = 1;
654 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeParameters = 1024;
655 ctx->Const.Program[MESA_SHADER_VERTEX].MaxEnvParams =
656 MIN2(ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeParameters,
657 ctx->Const.Program[MESA_SHADER_VERTEX].MaxEnvParams);
658
659 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeInstructions = 1024;
660 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeAluInstructions = 1024;
661 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeTexInstructions = 1024;
662 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeTexIndirections = 1024;
663 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeAttribs = 12;
664 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeTemps = 256;
665 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeAddressRegs = 0;
666 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeParameters = 1024;
667 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxEnvParams =
668 MIN2(ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeParameters,
669 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxEnvParams);
670
671 /* Fragment shaders use real, 32-bit twos-complement integers for all
672 * integer types.
673 */
674 ctx->Const.Program[MESA_SHADER_FRAGMENT].LowInt.RangeMin = 31;
675 ctx->Const.Program[MESA_SHADER_FRAGMENT].LowInt.RangeMax = 30;
676 ctx->Const.Program[MESA_SHADER_FRAGMENT].LowInt.Precision = 0;
677 ctx->Const.Program[MESA_SHADER_FRAGMENT].HighInt = ctx->Const.Program[MESA_SHADER_FRAGMENT].LowInt;
678 ctx->Const.Program[MESA_SHADER_FRAGMENT].MediumInt = ctx->Const.Program[MESA_SHADER_FRAGMENT].LowInt;
679
680 ctx->Const.Program[MESA_SHADER_VERTEX].LowInt.RangeMin = 31;
681 ctx->Const.Program[MESA_SHADER_VERTEX].LowInt.RangeMax = 30;
682 ctx->Const.Program[MESA_SHADER_VERTEX].LowInt.Precision = 0;
683 ctx->Const.Program[MESA_SHADER_VERTEX].HighInt = ctx->Const.Program[MESA_SHADER_VERTEX].LowInt;
684 ctx->Const.Program[MESA_SHADER_VERTEX].MediumInt = ctx->Const.Program[MESA_SHADER_VERTEX].LowInt;
685
686 /* Gen6 converts quads to polygon in beginning of 3D pipeline,
687 * but we're not sure how it's actually done for vertex order,
688 * that affect provoking vertex decision. Always use last vertex
689 * convention for quad primitive which works as expected for now.
690 */
691 if (brw->gen >= 6)
692 ctx->Const.QuadsFollowProvokingVertexConvention = false;
693
694 ctx->Const.NativeIntegers = true;
695 ctx->Const.VertexID_is_zero_based = true;
696
697 /* Regarding the CMP instruction, the Ivybridge PRM says:
698 *
699 * "For each enabled channel 0b or 1b is assigned to the appropriate flag
700 * bit and 0/all zeros or all ones (e.g, byte 0xFF, word 0xFFFF, DWord
701 * 0xFFFFFFFF) is assigned to dst."
702 *
703 * but PRMs for earlier generations say
704 *
705 * "In dword format, one GRF may store up to 8 results. When the register
706 * is used later as a vector of Booleans, as only LSB at each channel
707 * contains meaning [sic] data, software should make sure all higher bits
708 * are masked out (e.g. by 'and-ing' an [sic] 0x01 constant)."
709 *
710 * We select the representation of a true boolean uniform to be ~0, and fix
711 * the results of Gen <= 5 CMP instruction's with -(result & 1).
712 */
713 ctx->Const.UniformBooleanTrue = ~0;
714
715 /* From the gen4 PRM, volume 4 page 127:
716 *
717 * "For SURFTYPE_BUFFER non-rendertarget surfaces, this field specifies
718 * the base address of the first element of the surface, computed in
719 * software by adding the surface base address to the byte offset of
720 * the element in the buffer."
721 *
722 * However, unaligned accesses are slower, so enforce buffer alignment.
723 */
724 ctx->Const.UniformBufferOffsetAlignment = 16;
725
726 /* ShaderStorageBufferOffsetAlignment should be a cacheline (64 bytes) so
727 * that we can safely have the CPU and GPU writing the same SSBO on
728 * non-cachecoherent systems (our Atom CPUs). With UBOs, the GPU never
729 * writes, so there's no problem. For an SSBO, the GPU and the CPU can
730 * be updating disjoint regions of the buffer simultaneously and that will
731 * break if the regions overlap the same cacheline.
732 */
733 ctx->Const.ShaderStorageBufferOffsetAlignment = 64;
734 ctx->Const.TextureBufferOffsetAlignment = 16;
735 ctx->Const.MaxTextureBufferSize = 128 * 1024 * 1024;
736
737 if (brw->gen >= 6) {
738 ctx->Const.MaxVarying = 32;
739 ctx->Const.Program[MESA_SHADER_VERTEX].MaxOutputComponents = 128;
740 ctx->Const.Program[MESA_SHADER_GEOMETRY].MaxInputComponents = 64;
741 ctx->Const.Program[MESA_SHADER_GEOMETRY].MaxOutputComponents = 128;
742 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxInputComponents = 128;
743 ctx->Const.Program[MESA_SHADER_TESS_CTRL].MaxInputComponents = 128;
744 ctx->Const.Program[MESA_SHADER_TESS_CTRL].MaxOutputComponents = 128;
745 ctx->Const.Program[MESA_SHADER_TESS_EVAL].MaxInputComponents = 128;
746 ctx->Const.Program[MESA_SHADER_TESS_EVAL].MaxOutputComponents = 128;
747 }
748
749 /* We want the GLSL compiler to emit code that uses condition codes */
750 for (int i = 0; i < MESA_SHADER_STAGES; i++) {
751 ctx->Const.ShaderCompilerOptions[i] =
752 brw->screen->compiler->glsl_compiler_options[i];
753 }
754
755 if (brw->gen >= 7) {
756 ctx->Const.MaxViewportWidth = 32768;
757 ctx->Const.MaxViewportHeight = 32768;
758 }
759
760 /* ARB_viewport_array, OES_viewport_array */
761 if (brw->gen >= 6) {
762 ctx->Const.MaxViewports = GEN6_NUM_VIEWPORTS;
763 ctx->Const.ViewportSubpixelBits = 0;
764
765 /* Cast to float before negating because MaxViewportWidth is unsigned.
766 */
767 ctx->Const.ViewportBounds.Min = -(float)ctx->Const.MaxViewportWidth;
768 ctx->Const.ViewportBounds.Max = ctx->Const.MaxViewportWidth;
769 }
770
771 /* ARB_gpu_shader5 */
772 if (brw->gen >= 7)
773 ctx->Const.MaxVertexStreams = MIN2(4, MAX_VERTEX_STREAMS);
774
775 /* ARB_framebuffer_no_attachments */
776 ctx->Const.MaxFramebufferWidth = 16384;
777 ctx->Const.MaxFramebufferHeight = 16384;
778 ctx->Const.MaxFramebufferLayers = ctx->Const.MaxArrayTextureLayers;
779 ctx->Const.MaxFramebufferSamples = max_samples;
780
781 /* OES_primitive_bounding_box */
782 ctx->Const.NoPrimitiveBoundingBoxOutput = true;
783 }
784
785 static void
786 brw_initialize_cs_context_constants(struct brw_context *brw)
787 {
788 struct gl_context *ctx = &brw->ctx;
789 const struct intel_screen *screen = brw->screen;
790 struct gen_device_info *devinfo = &brw->screen->devinfo;
791
792 /* FINISHME: Do this for all platforms that the kernel supports */
793 if (brw->is_cherryview &&
794 screen->subslice_total > 0 && screen->eu_total > 0) {
795 /* Logical CS threads = EUs per subslice * 7 threads per EU */
796 uint32_t max_cs_threads = screen->eu_total / screen->subslice_total * 7;
797
798 /* Fuse configurations may give more threads than expected, never less. */
799 if (max_cs_threads > devinfo->max_cs_threads)
800 devinfo->max_cs_threads = max_cs_threads;
801 }
802
803 /* Maximum number of scalar compute shader invocations that can be run in
804 * parallel in the same subslice assuming SIMD32 dispatch.
805 *
806 * We don't advertise more than 64 threads, because we are limited to 64 by
807 * our usage of thread_width_max in the gpgpu walker command. This only
808 * currently impacts Haswell, which otherwise might be able to advertise 70
809 * threads. With SIMD32 and 64 threads, Haswell still provides twice the
810 * required the number of invocation needed for ARB_compute_shader.
811 */
812 const unsigned max_threads = MIN2(64, devinfo->max_cs_threads);
813 const uint32_t max_invocations = 32 * max_threads;
814 ctx->Const.MaxComputeWorkGroupSize[0] = max_invocations;
815 ctx->Const.MaxComputeWorkGroupSize[1] = max_invocations;
816 ctx->Const.MaxComputeWorkGroupSize[2] = max_invocations;
817 ctx->Const.MaxComputeWorkGroupInvocations = max_invocations;
818 ctx->Const.MaxComputeSharedMemorySize = 64 * 1024;
819 }
820
821 /**
822 * Process driconf (drirc) options, setting appropriate context flags.
823 *
824 * intelInitExtensions still pokes at optionCache directly, in order to
825 * avoid advertising various extensions. No flags are set, so it makes
826 * sense to continue doing that there.
827 */
828 static void
829 brw_process_driconf_options(struct brw_context *brw)
830 {
831 struct gl_context *ctx = &brw->ctx;
832
833 driOptionCache *options = &brw->optionCache;
834 driParseConfigFiles(options, &brw->screen->optionCache,
835 brw->driContext->driScreenPriv->myNum, "i965");
836
837 int bo_reuse_mode = driQueryOptioni(options, "bo_reuse");
838 switch (bo_reuse_mode) {
839 case DRI_CONF_BO_REUSE_DISABLED:
840 break;
841 case DRI_CONF_BO_REUSE_ALL:
842 brw_bufmgr_enable_reuse(brw->bufmgr);
843 break;
844 }
845
846 if (INTEL_DEBUG & DEBUG_NO_HIZ) {
847 brw->has_hiz = false;
848 /* On gen6, you can only do separate stencil with HIZ. */
849 if (brw->gen == 6)
850 brw->has_separate_stencil = false;
851 }
852
853 if (driQueryOptionb(options, "always_flush_batch")) {
854 fprintf(stderr, "flushing batchbuffer before/after each draw call\n");
855 brw->always_flush_batch = true;
856 }
857
858 if (driQueryOptionb(options, "always_flush_cache")) {
859 fprintf(stderr, "flushing GPU caches before/after each draw call\n");
860 brw->always_flush_cache = true;
861 }
862
863 if (driQueryOptionb(options, "disable_throttling")) {
864 fprintf(stderr, "disabling flush throttling\n");
865 brw->disable_throttling = true;
866 }
867
868 brw->precompile = driQueryOptionb(&brw->optionCache, "shader_precompile");
869
870 if (driQueryOptionb(&brw->optionCache, "precise_trig"))
871 brw->screen->compiler->precise_trig = true;
872
873 ctx->Const.ForceGLSLExtensionsWarn =
874 driQueryOptionb(options, "force_glsl_extensions_warn");
875
876 ctx->Const.ForceGLSLVersion =
877 driQueryOptioni(options, "force_glsl_version");
878
879 ctx->Const.DisableGLSLLineContinuations =
880 driQueryOptionb(options, "disable_glsl_line_continuations");
881
882 ctx->Const.AllowGLSLExtensionDirectiveMidShader =
883 driQueryOptionb(options, "allow_glsl_extension_directive_midshader");
884
885 ctx->Const.AllowGLSLBuiltinVariableRedeclaration =
886 driQueryOptionb(options, "allow_glsl_builtin_variable_redeclaration");
887
888 ctx->Const.AllowHigherCompatVersion =
889 driQueryOptionb(options, "allow_higher_compat_version");
890
891 ctx->Const.ForceGLSLAbsSqrt =
892 driQueryOptionb(options, "force_glsl_abs_sqrt");
893
894 ctx->Const.GLSLZeroInit = driQueryOptionb(options, "glsl_zero_init");
895
896 brw->dual_color_blend_by_location =
897 driQueryOptionb(options, "dual_color_blend_by_location");
898 }
899
900 GLboolean
901 brwCreateContext(gl_api api,
902 const struct gl_config *mesaVis,
903 __DRIcontext *driContextPriv,
904 unsigned major_version,
905 unsigned minor_version,
906 uint32_t flags,
907 bool notify_reset,
908 unsigned *dri_ctx_error,
909 void *sharedContextPrivate)
910 {
911 struct gl_context *shareCtx = (struct gl_context *) sharedContextPrivate;
912 struct intel_screen *screen = driContextPriv->driScreenPriv->driverPrivate;
913 const struct gen_device_info *devinfo = &screen->devinfo;
914 struct dd_function_table functions;
915
916 /* Only allow the __DRI_CTX_FLAG_ROBUST_BUFFER_ACCESS flag if the kernel
917 * provides us with context reset notifications.
918 */
919 uint32_t allowed_flags = __DRI_CTX_FLAG_DEBUG
920 | __DRI_CTX_FLAG_FORWARD_COMPATIBLE;
921
922 if (screen->has_context_reset_notification)
923 allowed_flags |= __DRI_CTX_FLAG_ROBUST_BUFFER_ACCESS;
924
925 if (flags & ~allowed_flags) {
926 *dri_ctx_error = __DRI_CTX_ERROR_UNKNOWN_FLAG;
927 return false;
928 }
929
930 struct brw_context *brw = rzalloc(NULL, struct brw_context);
931 if (!brw) {
932 fprintf(stderr, "%s: failed to alloc context\n", __func__);
933 *dri_ctx_error = __DRI_CTX_ERROR_NO_MEMORY;
934 return false;
935 }
936
937 driContextPriv->driverPrivate = brw;
938 brw->driContext = driContextPriv;
939 brw->screen = screen;
940 brw->bufmgr = screen->bufmgr;
941
942 brw->gen = devinfo->gen;
943 brw->gt = devinfo->gt;
944 brw->is_g4x = devinfo->is_g4x;
945 brw->is_baytrail = devinfo->is_baytrail;
946 brw->is_haswell = devinfo->is_haswell;
947 brw->is_cherryview = devinfo->is_cherryview;
948 brw->is_broxton = devinfo->is_broxton;
949 brw->has_llc = devinfo->has_llc;
950 brw->has_hiz = devinfo->has_hiz_and_separate_stencil;
951 brw->has_separate_stencil = devinfo->has_hiz_and_separate_stencil;
952 brw->has_pln = devinfo->has_pln;
953 brw->has_compr4 = devinfo->has_compr4;
954 brw->has_surface_tile_offset = devinfo->has_surface_tile_offset;
955 brw->has_negative_rhw_bug = devinfo->has_negative_rhw_bug;
956 brw->needs_unlit_centroid_workaround =
957 devinfo->needs_unlit_centroid_workaround;
958
959 brw->must_use_separate_stencil = devinfo->must_use_separate_stencil;
960 brw->has_swizzling = screen->hw_has_swizzling;
961
962 isl_device_init(&brw->isl_dev, devinfo, screen->hw_has_swizzling);
963
964 brw->vs.base.stage = MESA_SHADER_VERTEX;
965 brw->tcs.base.stage = MESA_SHADER_TESS_CTRL;
966 brw->tes.base.stage = MESA_SHADER_TESS_EVAL;
967 brw->gs.base.stage = MESA_SHADER_GEOMETRY;
968 brw->wm.base.stage = MESA_SHADER_FRAGMENT;
969 if (brw->gen >= 8) {
970 gen8_init_vtable_surface_functions(brw);
971 brw->vtbl.emit_depth_stencil_hiz = gen8_emit_depth_stencil_hiz;
972 } else if (brw->gen >= 7) {
973 gen7_init_vtable_surface_functions(brw);
974 brw->vtbl.emit_depth_stencil_hiz = gen7_emit_depth_stencil_hiz;
975 } else if (brw->gen >= 6) {
976 gen6_init_vtable_surface_functions(brw);
977 brw->vtbl.emit_depth_stencil_hiz = gen6_emit_depth_stencil_hiz;
978 } else {
979 gen4_init_vtable_surface_functions(brw);
980 brw->vtbl.emit_depth_stencil_hiz = brw_emit_depth_stencil_hiz;
981 }
982
983 brw_init_driver_functions(brw, &functions);
984
985 if (notify_reset)
986 functions.GetGraphicsResetStatus = brw_get_graphics_reset_status;
987
988 struct gl_context *ctx = &brw->ctx;
989
990 if (!_mesa_initialize_context(ctx, api, mesaVis, shareCtx, &functions)) {
991 *dri_ctx_error = __DRI_CTX_ERROR_NO_MEMORY;
992 fprintf(stderr, "%s: failed to init mesa context\n", __func__);
993 intelDestroyContext(driContextPriv);
994 return false;
995 }
996
997 driContextSetFlags(ctx, flags);
998
999 /* Initialize the software rasterizer and helper modules.
1000 *
1001 * As of GL 3.1 core, the gen4+ driver doesn't need the swrast context for
1002 * software fallbacks (which we have to support on legacy GL to do weird
1003 * glDrawPixels(), glBitmap(), and other functions).
1004 */
1005 if (api != API_OPENGL_CORE && api != API_OPENGLES2) {
1006 _swrast_CreateContext(ctx);
1007 }
1008
1009 _vbo_CreateContext(ctx);
1010 if (ctx->swrast_context) {
1011 _tnl_CreateContext(ctx);
1012 TNL_CONTEXT(ctx)->Driver.RunPipeline = _tnl_run_pipeline;
1013 _swsetup_CreateContext(ctx);
1014
1015 /* Configure swrast to match hardware characteristics: */
1016 _swrast_allow_pixel_fog(ctx, false);
1017 _swrast_allow_vertex_fog(ctx, true);
1018 }
1019
1020 _mesa_meta_init(ctx);
1021
1022 brw_process_driconf_options(brw);
1023
1024 if (INTEL_DEBUG & DEBUG_PERF)
1025 brw->perf_debug = true;
1026
1027 brw_initialize_cs_context_constants(brw);
1028 brw_initialize_context_constants(brw);
1029
1030 ctx->Const.ResetStrategy = notify_reset
1031 ? GL_LOSE_CONTEXT_ON_RESET_ARB : GL_NO_RESET_NOTIFICATION_ARB;
1032
1033 /* Reinitialize the context point state. It depends on ctx->Const values. */
1034 _mesa_init_point(ctx);
1035
1036 intel_fbo_init(brw);
1037
1038 intel_batchbuffer_init(&brw->batch, brw->bufmgr, brw->has_llc);
1039
1040 if (brw->gen >= 6) {
1041 /* Create a new hardware context. Using a hardware context means that
1042 * our GPU state will be saved/restored on context switch, allowing us
1043 * to assume that the GPU is in the same state we left it in.
1044 *
1045 * This is required for transform feedback buffer offsets, query objects,
1046 * and also allows us to reduce how much state we have to emit.
1047 */
1048 brw->hw_ctx = brw_create_hw_context(brw->bufmgr);
1049
1050 if (!brw->hw_ctx) {
1051 fprintf(stderr, "Failed to create hardware context.\n");
1052 intelDestroyContext(driContextPriv);
1053 return false;
1054 }
1055 }
1056
1057 if (brw_init_pipe_control(brw, devinfo)) {
1058 *dri_ctx_error = __DRI_CTX_ERROR_NO_MEMORY;
1059 intelDestroyContext(driContextPriv);
1060 return false;
1061 }
1062
1063 brw_init_state(brw);
1064
1065 intelInitExtensions(ctx);
1066
1067 brw_init_surface_formats(brw);
1068
1069 brw_blorp_init(brw);
1070
1071 brw->urb.size = devinfo->urb.size;
1072
1073 if (brw->gen == 6)
1074 brw->urb.gs_present = false;
1075
1076 brw->prim_restart.in_progress = false;
1077 brw->prim_restart.enable_cut_index = false;
1078 brw->gs.enabled = false;
1079 brw->clip.viewport_count = 1;
1080
1081 brw->predicate.state = BRW_PREDICATE_STATE_RENDER;
1082
1083 brw->max_gtt_map_object_size = screen->max_gtt_map_object_size;
1084
1085 ctx->VertexProgram._MaintainTnlProgram = true;
1086 ctx->FragmentProgram._MaintainTexEnvProgram = true;
1087
1088 brw_draw_init( brw );
1089
1090 if ((flags & __DRI_CTX_FLAG_DEBUG) != 0) {
1091 /* Turn on some extra GL_ARB_debug_output generation. */
1092 brw->perf_debug = true;
1093 }
1094
1095 if ((flags & __DRI_CTX_FLAG_ROBUST_BUFFER_ACCESS) != 0) {
1096 ctx->Const.ContextFlags |= GL_CONTEXT_FLAG_ROBUST_ACCESS_BIT_ARB;
1097 ctx->Const.RobustAccess = GL_TRUE;
1098 }
1099
1100 if (INTEL_DEBUG & DEBUG_SHADER_TIME)
1101 brw_init_shader_time(brw);
1102
1103 _mesa_compute_version(ctx);
1104
1105 _mesa_initialize_dispatch_tables(ctx);
1106 _mesa_initialize_vbo_vtxfmt(ctx);
1107
1108 if (ctx->Extensions.INTEL_performance_query)
1109 brw_init_performance_queries(brw);
1110
1111 vbo_use_buffer_objects(ctx);
1112 vbo_always_unmap_buffers(ctx);
1113
1114 return true;
1115 }
1116
1117 void
1118 intelDestroyContext(__DRIcontext * driContextPriv)
1119 {
1120 struct brw_context *brw =
1121 (struct brw_context *) driContextPriv->driverPrivate;
1122 struct gl_context *ctx = &brw->ctx;
1123
1124 _mesa_meta_free(&brw->ctx);
1125
1126 if (INTEL_DEBUG & DEBUG_SHADER_TIME) {
1127 /* Force a report. */
1128 brw->shader_time.report_time = 0;
1129
1130 brw_collect_and_report_shader_time(brw);
1131 brw_destroy_shader_time(brw);
1132 }
1133
1134 if (brw->gen >= 6)
1135 blorp_finish(&brw->blorp);
1136
1137 brw_destroy_state(brw);
1138 brw_draw_destroy(brw);
1139
1140 brw_bo_unreference(brw->curbe.curbe_bo);
1141 if (brw->vs.base.scratch_bo)
1142 brw_bo_unreference(brw->vs.base.scratch_bo);
1143 if (brw->tcs.base.scratch_bo)
1144 brw_bo_unreference(brw->tcs.base.scratch_bo);
1145 if (brw->tes.base.scratch_bo)
1146 brw_bo_unreference(brw->tes.base.scratch_bo);
1147 if (brw->gs.base.scratch_bo)
1148 brw_bo_unreference(brw->gs.base.scratch_bo);
1149 if (brw->wm.base.scratch_bo)
1150 brw_bo_unreference(brw->wm.base.scratch_bo);
1151
1152 brw_destroy_hw_context(brw->bufmgr, brw->hw_ctx);
1153
1154 if (ctx->swrast_context) {
1155 _swsetup_DestroyContext(&brw->ctx);
1156 _tnl_DestroyContext(&brw->ctx);
1157 }
1158 _vbo_DestroyContext(&brw->ctx);
1159
1160 if (ctx->swrast_context)
1161 _swrast_DestroyContext(&brw->ctx);
1162
1163 brw_fini_pipe_control(brw);
1164 intel_batchbuffer_free(&brw->batch);
1165
1166 brw_bo_unreference(brw->throttle_batch[1]);
1167 brw_bo_unreference(brw->throttle_batch[0]);
1168 brw->throttle_batch[1] = NULL;
1169 brw->throttle_batch[0] = NULL;
1170
1171 driDestroyOptionCache(&brw->optionCache);
1172
1173 /* free the Mesa context */
1174 _mesa_free_context_data(&brw->ctx);
1175
1176 ralloc_free(brw);
1177 driContextPriv->driverPrivate = NULL;
1178 }
1179
1180 GLboolean
1181 intelUnbindContext(__DRIcontext * driContextPriv)
1182 {
1183 /* Unset current context and dispath table */
1184 _mesa_make_current(NULL, NULL, NULL);
1185
1186 return true;
1187 }
1188
1189 /**
1190 * Fixes up the context for GLES23 with our default-to-sRGB-capable behavior
1191 * on window system framebuffers.
1192 *
1193 * Desktop GL is fairly reasonable in its handling of sRGB: You can ask if
1194 * your renderbuffer can do sRGB encode, and you can flip a switch that does
1195 * sRGB encode if the renderbuffer can handle it. You can ask specifically
1196 * for a visual where you're guaranteed to be capable, but it turns out that
1197 * everyone just makes all their ARGB8888 visuals capable and doesn't offer
1198 * incapable ones, because there's no difference between the two in resources
1199 * used. Applications thus get built that accidentally rely on the default
1200 * visual choice being sRGB, so we make ours sRGB capable. Everything sounds
1201 * great...
1202 *
1203 * But for GLES2/3, they decided that it was silly to not turn on sRGB encode
1204 * for sRGB renderbuffers you made with the GL_EXT_texture_sRGB equivalent.
1205 * So they removed the enable knob and made it "if the renderbuffer is sRGB
1206 * capable, do sRGB encode". Then, for your window system renderbuffers, you
1207 * can ask for sRGB visuals and get sRGB encode, or not ask for sRGB visuals
1208 * and get no sRGB encode (assuming that both kinds of visual are available).
1209 * Thus our choice to support sRGB by default on our visuals for desktop would
1210 * result in broken rendering of GLES apps that aren't expecting sRGB encode.
1211 *
1212 * Unfortunately, renderbuffer setup happens before a context is created. So
1213 * in intel_screen.c we always set up sRGB, and here, if you're a GLES2/3
1214 * context (without an sRGB visual, though we don't have sRGB visuals exposed
1215 * yet), we go turn that back off before anyone finds out.
1216 */
1217 static void
1218 intel_gles3_srgb_workaround(struct brw_context *brw,
1219 struct gl_framebuffer *fb)
1220 {
1221 struct gl_context *ctx = &brw->ctx;
1222
1223 if (_mesa_is_desktop_gl(ctx) || !fb->Visual.sRGBCapable)
1224 return;
1225
1226 /* Some day when we support the sRGB capable bit on visuals available for
1227 * GLES, we'll need to respect that and not disable things here.
1228 */
1229 fb->Visual.sRGBCapable = false;
1230 for (int i = 0; i < BUFFER_COUNT; i++) {
1231 struct gl_renderbuffer *rb = fb->Attachment[i].Renderbuffer;
1232 if (rb)
1233 rb->Format = _mesa_get_srgb_format_linear(rb->Format);
1234 }
1235 }
1236
1237 GLboolean
1238 intelMakeCurrent(__DRIcontext * driContextPriv,
1239 __DRIdrawable * driDrawPriv,
1240 __DRIdrawable * driReadPriv)
1241 {
1242 struct brw_context *brw;
1243 GET_CURRENT_CONTEXT(curCtx);
1244
1245 if (driContextPriv)
1246 brw = (struct brw_context *) driContextPriv->driverPrivate;
1247 else
1248 brw = NULL;
1249
1250 /* According to the glXMakeCurrent() man page: "Pending commands to
1251 * the previous context, if any, are flushed before it is released."
1252 * But only flush if we're actually changing contexts.
1253 */
1254 if (brw_context(curCtx) && brw_context(curCtx) != brw) {
1255 _mesa_flush(curCtx);
1256 }
1257
1258 if (driContextPriv) {
1259 struct gl_context *ctx = &brw->ctx;
1260 struct gl_framebuffer *fb, *readFb;
1261
1262 if (driDrawPriv == NULL) {
1263 fb = _mesa_get_incomplete_framebuffer();
1264 } else {
1265 fb = driDrawPriv->driverPrivate;
1266 driContextPriv->dri2.draw_stamp = driDrawPriv->dri2.stamp - 1;
1267 }
1268
1269 if (driReadPriv == NULL) {
1270 readFb = _mesa_get_incomplete_framebuffer();
1271 } else {
1272 readFb = driReadPriv->driverPrivate;
1273 driContextPriv->dri2.read_stamp = driReadPriv->dri2.stamp - 1;
1274 }
1275
1276 /* The sRGB workaround changes the renderbuffer's format. We must change
1277 * the format before the renderbuffer's miptree get's allocated, otherwise
1278 * the formats of the renderbuffer and its miptree will differ.
1279 */
1280 intel_gles3_srgb_workaround(brw, fb);
1281 intel_gles3_srgb_workaround(brw, readFb);
1282
1283 /* If the context viewport hasn't been initialized, force a call out to
1284 * the loader to get buffers so we have a drawable size for the initial
1285 * viewport. */
1286 if (!brw->ctx.ViewportInitialized)
1287 intel_prepare_render(brw);
1288
1289 _mesa_make_current(ctx, fb, readFb);
1290 } else {
1291 _mesa_make_current(NULL, NULL, NULL);
1292 }
1293
1294 return true;
1295 }
1296
1297 void
1298 intel_resolve_for_dri2_flush(struct brw_context *brw,
1299 __DRIdrawable *drawable)
1300 {
1301 if (brw->gen < 6) {
1302 /* MSAA and fast color clear are not supported, so don't waste time
1303 * checking whether a resolve is needed.
1304 */
1305 return;
1306 }
1307
1308 struct gl_framebuffer *fb = drawable->driverPrivate;
1309 struct intel_renderbuffer *rb;
1310
1311 /* Usually, only the back buffer will need to be downsampled. However,
1312 * the front buffer will also need it if the user has rendered into it.
1313 */
1314 static const gl_buffer_index buffers[2] = {
1315 BUFFER_BACK_LEFT,
1316 BUFFER_FRONT_LEFT,
1317 };
1318
1319 for (int i = 0; i < 2; ++i) {
1320 rb = intel_get_renderbuffer(fb, buffers[i]);
1321 if (rb == NULL || rb->mt == NULL)
1322 continue;
1323 if (rb->mt->num_samples <= 1) {
1324 assert(rb->mt_layer == 0 && rb->mt_level == 0 &&
1325 rb->layer_count == 1);
1326 intel_miptree_prepare_access(brw, rb->mt, 0, 1, 0, 1, false, false);
1327 } else {
1328 intel_renderbuffer_downsample(brw, rb);
1329 }
1330 }
1331 }
1332
1333 static unsigned
1334 intel_bits_per_pixel(const struct intel_renderbuffer *rb)
1335 {
1336 return _mesa_get_format_bytes(intel_rb_format(rb)) * 8;
1337 }
1338
1339 static void
1340 intel_query_dri2_buffers(struct brw_context *brw,
1341 __DRIdrawable *drawable,
1342 __DRIbuffer **buffers,
1343 int *count);
1344
1345 static void
1346 intel_process_dri2_buffer(struct brw_context *brw,
1347 __DRIdrawable *drawable,
1348 __DRIbuffer *buffer,
1349 struct intel_renderbuffer *rb,
1350 const char *buffer_name);
1351
1352 static void
1353 intel_update_image_buffers(struct brw_context *brw, __DRIdrawable *drawable);
1354
1355 static void
1356 intel_update_dri2_buffers(struct brw_context *brw, __DRIdrawable *drawable)
1357 {
1358 struct gl_framebuffer *fb = drawable->driverPrivate;
1359 struct intel_renderbuffer *rb;
1360 __DRIbuffer *buffers = NULL;
1361 int count;
1362 const char *region_name;
1363
1364 /* Set this up front, so that in case our buffers get invalidated
1365 * while we're getting new buffers, we don't clobber the stamp and
1366 * thus ignore the invalidate. */
1367 drawable->lastStamp = drawable->dri2.stamp;
1368
1369 if (unlikely(INTEL_DEBUG & DEBUG_DRI))
1370 fprintf(stderr, "enter %s, drawable %p\n", __func__, drawable);
1371
1372 intel_query_dri2_buffers(brw, drawable, &buffers, &count);
1373
1374 if (buffers == NULL)
1375 return;
1376
1377 for (int i = 0; i < count; i++) {
1378 switch (buffers[i].attachment) {
1379 case __DRI_BUFFER_FRONT_LEFT:
1380 rb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT);
1381 region_name = "dri2 front buffer";
1382 break;
1383
1384 case __DRI_BUFFER_FAKE_FRONT_LEFT:
1385 rb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT);
1386 region_name = "dri2 fake front buffer";
1387 break;
1388
1389 case __DRI_BUFFER_BACK_LEFT:
1390 rb = intel_get_renderbuffer(fb, BUFFER_BACK_LEFT);
1391 region_name = "dri2 back buffer";
1392 break;
1393
1394 case __DRI_BUFFER_DEPTH:
1395 case __DRI_BUFFER_HIZ:
1396 case __DRI_BUFFER_DEPTH_STENCIL:
1397 case __DRI_BUFFER_STENCIL:
1398 case __DRI_BUFFER_ACCUM:
1399 default:
1400 fprintf(stderr,
1401 "unhandled buffer attach event, attachment type %d\n",
1402 buffers[i].attachment);
1403 return;
1404 }
1405
1406 intel_process_dri2_buffer(brw, drawable, &buffers[i], rb, region_name);
1407 }
1408
1409 }
1410
1411 void
1412 intel_update_renderbuffers(__DRIcontext *context, __DRIdrawable *drawable)
1413 {
1414 struct brw_context *brw = context->driverPrivate;
1415 __DRIscreen *dri_screen = brw->screen->driScrnPriv;
1416
1417 /* Set this up front, so that in case our buffers get invalidated
1418 * while we're getting new buffers, we don't clobber the stamp and
1419 * thus ignore the invalidate. */
1420 drawable->lastStamp = drawable->dri2.stamp;
1421
1422 if (unlikely(INTEL_DEBUG & DEBUG_DRI))
1423 fprintf(stderr, "enter %s, drawable %p\n", __func__, drawable);
1424
1425 if (dri_screen->image.loader)
1426 intel_update_image_buffers(brw, drawable);
1427 else
1428 intel_update_dri2_buffers(brw, drawable);
1429
1430 driUpdateFramebufferSize(&brw->ctx, drawable);
1431 }
1432
1433 /**
1434 * intel_prepare_render should be called anywhere that curent read/drawbuffer
1435 * state is required.
1436 */
1437 void
1438 intel_prepare_render(struct brw_context *brw)
1439 {
1440 struct gl_context *ctx = &brw->ctx;
1441 __DRIcontext *driContext = brw->driContext;
1442 __DRIdrawable *drawable;
1443
1444 drawable = driContext->driDrawablePriv;
1445 if (drawable && drawable->dri2.stamp != driContext->dri2.draw_stamp) {
1446 if (drawable->lastStamp != drawable->dri2.stamp)
1447 intel_update_renderbuffers(driContext, drawable);
1448 driContext->dri2.draw_stamp = drawable->dri2.stamp;
1449 }
1450
1451 drawable = driContext->driReadablePriv;
1452 if (drawable && drawable->dri2.stamp != driContext->dri2.read_stamp) {
1453 if (drawable->lastStamp != drawable->dri2.stamp)
1454 intel_update_renderbuffers(driContext, drawable);
1455 driContext->dri2.read_stamp = drawable->dri2.stamp;
1456 }
1457
1458 /* If we're currently rendering to the front buffer, the rendering
1459 * that will happen next will probably dirty the front buffer. So
1460 * mark it as dirty here.
1461 */
1462 if (_mesa_is_front_buffer_drawing(ctx->DrawBuffer))
1463 brw->front_buffer_dirty = true;
1464 }
1465
1466 /**
1467 * \brief Query DRI2 to obtain a DRIdrawable's buffers.
1468 *
1469 * To determine which DRI buffers to request, examine the renderbuffers
1470 * attached to the drawable's framebuffer. Then request the buffers with
1471 * DRI2GetBuffers() or DRI2GetBuffersWithFormat().
1472 *
1473 * This is called from intel_update_renderbuffers().
1474 *
1475 * \param drawable Drawable whose buffers are queried.
1476 * \param buffers [out] List of buffers returned by DRI2 query.
1477 * \param buffer_count [out] Number of buffers returned.
1478 *
1479 * \see intel_update_renderbuffers()
1480 * \see DRI2GetBuffers()
1481 * \see DRI2GetBuffersWithFormat()
1482 */
1483 static void
1484 intel_query_dri2_buffers(struct brw_context *brw,
1485 __DRIdrawable *drawable,
1486 __DRIbuffer **buffers,
1487 int *buffer_count)
1488 {
1489 __DRIscreen *dri_screen = brw->screen->driScrnPriv;
1490 struct gl_framebuffer *fb = drawable->driverPrivate;
1491 int i = 0;
1492 unsigned attachments[8];
1493
1494 struct intel_renderbuffer *front_rb;
1495 struct intel_renderbuffer *back_rb;
1496
1497 front_rb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT);
1498 back_rb = intel_get_renderbuffer(fb, BUFFER_BACK_LEFT);
1499
1500 memset(attachments, 0, sizeof(attachments));
1501 if ((_mesa_is_front_buffer_drawing(fb) ||
1502 _mesa_is_front_buffer_reading(fb) ||
1503 !back_rb) && front_rb) {
1504 /* If a fake front buffer is in use, then querying for
1505 * __DRI_BUFFER_FRONT_LEFT will cause the server to copy the image from
1506 * the real front buffer to the fake front buffer. So before doing the
1507 * query, we need to make sure all the pending drawing has landed in the
1508 * real front buffer.
1509 */
1510 intel_batchbuffer_flush(brw);
1511 intel_flush_front(&brw->ctx);
1512
1513 attachments[i++] = __DRI_BUFFER_FRONT_LEFT;
1514 attachments[i++] = intel_bits_per_pixel(front_rb);
1515 } else if (front_rb && brw->front_buffer_dirty) {
1516 /* We have pending front buffer rendering, but we aren't querying for a
1517 * front buffer. If the front buffer we have is a fake front buffer,
1518 * the X server is going to throw it away when it processes the query.
1519 * So before doing the query, make sure all the pending drawing has
1520 * landed in the real front buffer.
1521 */
1522 intel_batchbuffer_flush(brw);
1523 intel_flush_front(&brw->ctx);
1524 }
1525
1526 if (back_rb) {
1527 attachments[i++] = __DRI_BUFFER_BACK_LEFT;
1528 attachments[i++] = intel_bits_per_pixel(back_rb);
1529 }
1530
1531 assert(i <= ARRAY_SIZE(attachments));
1532
1533 *buffers =
1534 dri_screen->dri2.loader->getBuffersWithFormat(drawable,
1535 &drawable->w,
1536 &drawable->h,
1537 attachments, i / 2,
1538 buffer_count,
1539 drawable->loaderPrivate);
1540 }
1541
1542 /**
1543 * \brief Assign a DRI buffer's DRM region to a renderbuffer.
1544 *
1545 * This is called from intel_update_renderbuffers().
1546 *
1547 * \par Note:
1548 * DRI buffers whose attachment point is DRI2BufferStencil or
1549 * DRI2BufferDepthStencil are handled as special cases.
1550 *
1551 * \param buffer_name is a human readable name, such as "dri2 front buffer",
1552 * that is passed to brw_bo_gem_create_from_name().
1553 *
1554 * \see intel_update_renderbuffers()
1555 */
1556 static void
1557 intel_process_dri2_buffer(struct brw_context *brw,
1558 __DRIdrawable *drawable,
1559 __DRIbuffer *buffer,
1560 struct intel_renderbuffer *rb,
1561 const char *buffer_name)
1562 {
1563 struct gl_framebuffer *fb = drawable->driverPrivate;
1564 struct brw_bo *bo;
1565
1566 if (!rb)
1567 return;
1568
1569 unsigned num_samples = rb->Base.Base.NumSamples;
1570
1571 /* We try to avoid closing and reopening the same BO name, because the first
1572 * use of a mapping of the buffer involves a bunch of page faulting which is
1573 * moderately expensive.
1574 */
1575 struct intel_mipmap_tree *last_mt;
1576 if (num_samples == 0)
1577 last_mt = rb->mt;
1578 else
1579 last_mt = rb->singlesample_mt;
1580
1581 uint32_t old_name = 0;
1582 if (last_mt) {
1583 /* The bo already has a name because the miptree was created by a
1584 * previous call to intel_process_dri2_buffer(). If a bo already has a
1585 * name, then brw_bo_flink() is a low-cost getter. It does not
1586 * create a new name.
1587 */
1588 brw_bo_flink(last_mt->bo, &old_name);
1589 }
1590
1591 if (old_name == buffer->name)
1592 return;
1593
1594 if (unlikely(INTEL_DEBUG & DEBUG_DRI)) {
1595 fprintf(stderr,
1596 "attaching buffer %d, at %d, cpp %d, pitch %d\n",
1597 buffer->name, buffer->attachment,
1598 buffer->cpp, buffer->pitch);
1599 }
1600
1601 bo = brw_bo_gem_create_from_name(brw->bufmgr, buffer_name,
1602 buffer->name);
1603 if (!bo) {
1604 fprintf(stderr,
1605 "Failed to open BO for returned DRI2 buffer "
1606 "(%dx%d, %s, named %d).\n"
1607 "This is likely a bug in the X Server that will lead to a "
1608 "crash soon.\n",
1609 drawable->w, drawable->h, buffer_name, buffer->name);
1610 return;
1611 }
1612
1613 intel_update_winsys_renderbuffer_miptree(brw, rb, bo,
1614 drawable->w, drawable->h,
1615 buffer->pitch);
1616
1617 if (_mesa_is_front_buffer_drawing(fb) &&
1618 (buffer->attachment == __DRI_BUFFER_FRONT_LEFT ||
1619 buffer->attachment == __DRI_BUFFER_FAKE_FRONT_LEFT) &&
1620 rb->Base.Base.NumSamples > 1) {
1621 intel_renderbuffer_upsample(brw, rb);
1622 }
1623
1624 assert(rb->mt);
1625
1626 brw_bo_unreference(bo);
1627 }
1628
1629 /**
1630 * \brief Query DRI image loader to obtain a DRIdrawable's buffers.
1631 *
1632 * To determine which DRI buffers to request, examine the renderbuffers
1633 * attached to the drawable's framebuffer. Then request the buffers from
1634 * the image loader
1635 *
1636 * This is called from intel_update_renderbuffers().
1637 *
1638 * \param drawable Drawable whose buffers are queried.
1639 * \param buffers [out] List of buffers returned by DRI2 query.
1640 * \param buffer_count [out] Number of buffers returned.
1641 *
1642 * \see intel_update_renderbuffers()
1643 */
1644
1645 static void
1646 intel_update_image_buffer(struct brw_context *intel,
1647 __DRIdrawable *drawable,
1648 struct intel_renderbuffer *rb,
1649 __DRIimage *buffer,
1650 enum __DRIimageBufferMask buffer_type)
1651 {
1652 struct gl_framebuffer *fb = drawable->driverPrivate;
1653
1654 if (!rb || !buffer->bo)
1655 return;
1656
1657 unsigned num_samples = rb->Base.Base.NumSamples;
1658
1659 /* Check and see if we're already bound to the right
1660 * buffer object
1661 */
1662 struct intel_mipmap_tree *last_mt;
1663 if (num_samples == 0)
1664 last_mt = rb->mt;
1665 else
1666 last_mt = rb->singlesample_mt;
1667
1668 if (last_mt && last_mt->bo == buffer->bo)
1669 return;
1670
1671 intel_update_winsys_renderbuffer_miptree(intel, rb, buffer->bo,
1672 buffer->width, buffer->height,
1673 buffer->pitch);
1674
1675 if (_mesa_is_front_buffer_drawing(fb) &&
1676 buffer_type == __DRI_IMAGE_BUFFER_FRONT &&
1677 rb->Base.Base.NumSamples > 1) {
1678 intel_renderbuffer_upsample(intel, rb);
1679 }
1680 }
1681
1682 static void
1683 intel_update_image_buffers(struct brw_context *brw, __DRIdrawable *drawable)
1684 {
1685 struct gl_framebuffer *fb = drawable->driverPrivate;
1686 __DRIscreen *dri_screen = brw->screen->driScrnPriv;
1687 struct intel_renderbuffer *front_rb;
1688 struct intel_renderbuffer *back_rb;
1689 struct __DRIimageList images;
1690 mesa_format format;
1691 uint32_t buffer_mask = 0;
1692 int ret;
1693
1694 front_rb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT);
1695 back_rb = intel_get_renderbuffer(fb, BUFFER_BACK_LEFT);
1696
1697 if (back_rb)
1698 format = intel_rb_format(back_rb);
1699 else if (front_rb)
1700 format = intel_rb_format(front_rb);
1701 else
1702 return;
1703
1704 if (front_rb && (_mesa_is_front_buffer_drawing(fb) ||
1705 _mesa_is_front_buffer_reading(fb) || !back_rb)) {
1706 buffer_mask |= __DRI_IMAGE_BUFFER_FRONT;
1707 }
1708
1709 if (back_rb)
1710 buffer_mask |= __DRI_IMAGE_BUFFER_BACK;
1711
1712 ret = dri_screen->image.loader->getBuffers(drawable,
1713 driGLFormatToImageFormat(format),
1714 &drawable->dri2.stamp,
1715 drawable->loaderPrivate,
1716 buffer_mask,
1717 &images);
1718 if (!ret)
1719 return;
1720
1721 if (images.image_mask & __DRI_IMAGE_BUFFER_FRONT) {
1722 drawable->w = images.front->width;
1723 drawable->h = images.front->height;
1724 intel_update_image_buffer(brw,
1725 drawable,
1726 front_rb,
1727 images.front,
1728 __DRI_IMAGE_BUFFER_FRONT);
1729 }
1730
1731 if (images.image_mask & __DRI_IMAGE_BUFFER_BACK) {
1732 drawable->w = images.back->width;
1733 drawable->h = images.back->height;
1734 intel_update_image_buffer(brw,
1735 drawable,
1736 back_rb,
1737 images.back,
1738 __DRI_IMAGE_BUFFER_BACK);
1739 }
1740 }