i965: Move depth to the new resolve functions
[mesa.git] / src / mesa / drivers / dri / i965 / brw_context.c
1 /*
2 Copyright 2003 VMware, Inc.
3 Copyright (C) Intel Corp. 2006. All Rights Reserved.
4 Intel funded Tungsten Graphics to
5 develop this 3D driver.
6
7 Permission is hereby granted, free of charge, to any person obtaining
8 a copy of this software and associated documentation files (the
9 "Software"), to deal in the Software without restriction, including
10 without limitation the rights to use, copy, modify, merge, publish,
11 distribute, sublicense, and/or sell copies of the Software, and to
12 permit persons to whom the Software is furnished to do so, subject to
13 the following conditions:
14
15 The above copyright notice and this permission notice (including the
16 next paragraph) shall be included in all copies or substantial
17 portions of the Software.
18
19 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
20 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
22 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
23 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
24 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
25 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26
27 **********************************************************************/
28 /*
29 * Authors:
30 * Keith Whitwell <keithw@vmware.com>
31 */
32
33
34 #include "compiler/nir/nir.h"
35 #include "main/api_exec.h"
36 #include "main/context.h"
37 #include "main/fbobject.h"
38 #include "main/extensions.h"
39 #include "main/imports.h"
40 #include "main/macros.h"
41 #include "main/points.h"
42 #include "main/version.h"
43 #include "main/vtxfmt.h"
44 #include "main/texobj.h"
45 #include "main/framebuffer.h"
46
47 #include "vbo/vbo_context.h"
48
49 #include "drivers/common/driverfuncs.h"
50 #include "drivers/common/meta.h"
51 #include "utils.h"
52
53 #include "brw_context.h"
54 #include "brw_defines.h"
55 #include "brw_blorp.h"
56 #include "brw_draw.h"
57 #include "brw_state.h"
58
59 #include "intel_batchbuffer.h"
60 #include "intel_buffer_objects.h"
61 #include "intel_buffers.h"
62 #include "intel_fbo.h"
63 #include "intel_mipmap_tree.h"
64 #include "intel_pixel.h"
65 #include "intel_image.h"
66 #include "intel_tex.h"
67 #include "intel_tex_obj.h"
68
69 #include "swrast_setup/swrast_setup.h"
70 #include "tnl/tnl.h"
71 #include "tnl/t_pipeline.h"
72 #include "util/ralloc.h"
73 #include "util/debug.h"
74 #include "isl/isl.h"
75
76 /***************************************
77 * Mesa's Driver Functions
78 ***************************************/
79
80 const char *const brw_vendor_string = "Intel Open Source Technology Center";
81
82 static const char *
83 get_bsw_model(const struct intel_screen *screen)
84 {
85 switch (screen->eu_total) {
86 case 16:
87 return "405";
88 case 12:
89 return "400";
90 default:
91 return " ";
92 }
93 }
94
95 const char *
96 brw_get_renderer_string(const struct intel_screen *screen)
97 {
98 const char *chipset;
99 static char buffer[128];
100 char *bsw = NULL;
101
102 switch (screen->deviceID) {
103 #undef CHIPSET
104 #define CHIPSET(id, symbol, str) case id: chipset = str; break;
105 #include "pci_ids/i965_pci_ids.h"
106 default:
107 chipset = "Unknown Intel Chipset";
108 break;
109 }
110
111 /* Braswell branding is funny, so we have to fix it up here */
112 if (screen->deviceID == 0x22B1) {
113 bsw = strdup(chipset);
114 char *needle = strstr(bsw, "XXX");
115 if (needle) {
116 memcpy(needle, get_bsw_model(screen), 3);
117 chipset = bsw;
118 }
119 }
120
121 (void) driGetRendererString(buffer, chipset, 0);
122 free(bsw);
123 return buffer;
124 }
125
126 static const GLubyte *
127 intel_get_string(struct gl_context * ctx, GLenum name)
128 {
129 const struct brw_context *const brw = brw_context(ctx);
130
131 switch (name) {
132 case GL_VENDOR:
133 return (GLubyte *) brw_vendor_string;
134
135 case GL_RENDERER:
136 return
137 (GLubyte *) brw_get_renderer_string(brw->screen);
138
139 default:
140 return NULL;
141 }
142 }
143
144 static void
145 intel_viewport(struct gl_context *ctx)
146 {
147 struct brw_context *brw = brw_context(ctx);
148 __DRIcontext *driContext = brw->driContext;
149
150 if (_mesa_is_winsys_fbo(ctx->DrawBuffer)) {
151 if (driContext->driDrawablePriv)
152 dri2InvalidateDrawable(driContext->driDrawablePriv);
153 if (driContext->driReadablePriv)
154 dri2InvalidateDrawable(driContext->driReadablePriv);
155 }
156 }
157
158 static void
159 intel_update_framebuffer(struct gl_context *ctx,
160 struct gl_framebuffer *fb)
161 {
162 struct brw_context *brw = brw_context(ctx);
163
164 /* Quantize the derived default number of samples
165 */
166 fb->DefaultGeometry._NumSamples =
167 intel_quantize_num_samples(brw->screen,
168 fb->DefaultGeometry.NumSamples);
169 }
170
171 static bool
172 intel_disable_rb_aux_buffer(struct brw_context *brw, const struct brw_bo *bo)
173 {
174 const struct gl_framebuffer *fb = brw->ctx.DrawBuffer;
175 bool found = false;
176
177 for (unsigned i = 0; i < fb->_NumColorDrawBuffers; i++) {
178 const struct intel_renderbuffer *irb =
179 intel_renderbuffer(fb->_ColorDrawBuffers[i]);
180
181 if (irb && irb->mt->bo == bo) {
182 found = brw->draw_aux_buffer_disabled[i] = true;
183 }
184 }
185
186 return found;
187 }
188
189 static void
190 intel_update_state(struct gl_context * ctx, GLuint new_state)
191 {
192 struct brw_context *brw = brw_context(ctx);
193 struct intel_texture_object *tex_obj;
194 struct intel_renderbuffer *depth_irb;
195
196 if (ctx->swrast_context)
197 _swrast_InvalidateState(ctx, new_state);
198 _vbo_InvalidateState(ctx, new_state);
199
200 brw->NewGLState |= new_state;
201
202 _mesa_unlock_context_textures(ctx);
203
204 /* Resolve the depth buffer's HiZ buffer. */
205 depth_irb = intel_get_renderbuffer(ctx->DrawBuffer, BUFFER_DEPTH);
206 if (depth_irb && depth_irb->mt) {
207 intel_miptree_prepare_depth(brw, depth_irb->mt,
208 depth_irb->mt_level,
209 depth_irb->mt_layer,
210 depth_irb->layer_count);
211 }
212
213 memset(brw->draw_aux_buffer_disabled, 0,
214 sizeof(brw->draw_aux_buffer_disabled));
215
216 /* Resolve depth buffer and render cache of each enabled texture. */
217 int maxEnabledUnit = ctx->Texture._MaxEnabledTexImageUnit;
218 for (int i = 0; i <= maxEnabledUnit; i++) {
219 if (!ctx->Texture.Unit[i]._Current)
220 continue;
221 tex_obj = intel_texture_object(ctx->Texture.Unit[i]._Current);
222 if (!tex_obj || !tex_obj->mt)
223 continue;
224
225 /* We need inte_texture_object::_Format to be valid */
226 intel_finalize_mipmap_tree(brw, i);
227
228 bool aux_supported;
229 intel_miptree_prepare_texture(brw, tex_obj->mt, tex_obj->_Format,
230 &aux_supported);
231
232 if (!aux_supported && brw->gen >= 9 &&
233 intel_disable_rb_aux_buffer(brw, tex_obj->mt->bo)) {
234 perf_debug("Sampling renderbuffer with non-compressible format - "
235 "turning off compression");
236 }
237
238 brw_render_cache_set_check_flush(brw, tex_obj->mt->bo);
239
240 if (tex_obj->base.StencilSampling ||
241 tex_obj->mt->format == MESA_FORMAT_S_UINT8) {
242 intel_update_r8stencil(brw, tex_obj->mt);
243 }
244 }
245
246 /* Resolve color for each active shader image. */
247 for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
248 const struct gl_program *prog = ctx->_Shader->CurrentProgram[i];
249
250 if (unlikely(prog && prog->info.num_images)) {
251 for (unsigned j = 0; j < prog->info.num_images; j++) {
252 struct gl_image_unit *u =
253 &ctx->ImageUnits[prog->sh.ImageUnits[j]];
254 tex_obj = intel_texture_object(u->TexObj);
255
256 if (tex_obj && tex_obj->mt) {
257 intel_miptree_prepare_image(brw, tex_obj->mt);
258
259 if (intel_miptree_is_lossless_compressed(brw, tex_obj->mt) &&
260 intel_disable_rb_aux_buffer(brw, tex_obj->mt->bo)) {
261 perf_debug("Using renderbuffer as shader image - turning "
262 "off lossless compression");
263 }
264
265 brw_render_cache_set_check_flush(brw, tex_obj->mt->bo);
266 }
267 }
268 }
269 }
270
271 /* Resolve color buffers for non-coherent framebuffer fetch. */
272 if (!ctx->Extensions.MESA_shader_framebuffer_fetch &&
273 ctx->FragmentProgram._Current &&
274 ctx->FragmentProgram._Current->info.outputs_read) {
275 const struct gl_framebuffer *fb = ctx->DrawBuffer;
276
277 for (unsigned i = 0; i < fb->_NumColorDrawBuffers; i++) {
278 const struct intel_renderbuffer *irb =
279 intel_renderbuffer(fb->_ColorDrawBuffers[i]);
280
281 if (irb) {
282 intel_miptree_prepare_fb_fetch(brw, irb->mt, irb->mt_level,
283 irb->mt_layer, irb->layer_count);
284 }
285 }
286 }
287
288 struct gl_framebuffer *fb = ctx->DrawBuffer;
289 for (int i = 0; i < fb->_NumColorDrawBuffers; i++) {
290 struct intel_renderbuffer *irb =
291 intel_renderbuffer(fb->_ColorDrawBuffers[i]);
292
293 if (irb == NULL || irb->mt == NULL)
294 continue;
295
296 intel_miptree_prepare_render(brw, irb->mt, irb->mt_level,
297 irb->mt_layer, irb->layer_count,
298 ctx->Color.sRGBEnabled);
299 }
300
301 _mesa_lock_context_textures(ctx);
302
303 if (new_state & _NEW_BUFFERS) {
304 intel_update_framebuffer(ctx, ctx->DrawBuffer);
305 if (ctx->DrawBuffer != ctx->ReadBuffer)
306 intel_update_framebuffer(ctx, ctx->ReadBuffer);
307 }
308 }
309
310 #define flushFront(screen) ((screen)->image.loader ? (screen)->image.loader->flushFrontBuffer : (screen)->dri2.loader->flushFrontBuffer)
311
312 static void
313 intel_flush_front(struct gl_context *ctx)
314 {
315 struct brw_context *brw = brw_context(ctx);
316 __DRIcontext *driContext = brw->driContext;
317 __DRIdrawable *driDrawable = driContext->driDrawablePriv;
318 __DRIscreen *const dri_screen = brw->screen->driScrnPriv;
319
320 if (brw->front_buffer_dirty && _mesa_is_winsys_fbo(ctx->DrawBuffer)) {
321 if (flushFront(dri_screen) && driDrawable &&
322 driDrawable->loaderPrivate) {
323
324 /* Resolve before flushing FAKE_FRONT_LEFT to FRONT_LEFT.
325 *
326 * This potentially resolves both front and back buffer. It
327 * is unnecessary to resolve the back, but harms nothing except
328 * performance. And no one cares about front-buffer render
329 * performance.
330 */
331 intel_resolve_for_dri2_flush(brw, driDrawable);
332 intel_batchbuffer_flush(brw);
333
334 flushFront(dri_screen)(driDrawable, driDrawable->loaderPrivate);
335
336 /* We set the dirty bit in intel_prepare_render() if we're
337 * front buffer rendering once we get there.
338 */
339 brw->front_buffer_dirty = false;
340 }
341 }
342 }
343
344 static void
345 intel_glFlush(struct gl_context *ctx)
346 {
347 struct brw_context *brw = brw_context(ctx);
348
349 intel_batchbuffer_flush(brw);
350 intel_flush_front(ctx);
351
352 brw->need_flush_throttle = true;
353 }
354
355 static void
356 intel_finish(struct gl_context * ctx)
357 {
358 struct brw_context *brw = brw_context(ctx);
359
360 intel_glFlush(ctx);
361
362 if (brw->batch.last_bo)
363 brw_bo_wait_rendering(brw, brw->batch.last_bo);
364 }
365
366 static void
367 brw_init_driver_functions(struct brw_context *brw,
368 struct dd_function_table *functions)
369 {
370 _mesa_init_driver_functions(functions);
371
372 /* GLX uses DRI2 invalidate events to handle window resizing.
373 * Unfortunately, EGL does not - libEGL is written in XCB (not Xlib),
374 * which doesn't provide a mechanism for snooping the event queues.
375 *
376 * So EGL still relies on viewport hacks to handle window resizing.
377 * This should go away with DRI3000.
378 */
379 if (!brw->driContext->driScreenPriv->dri2.useInvalidate)
380 functions->Viewport = intel_viewport;
381
382 functions->Flush = intel_glFlush;
383 functions->Finish = intel_finish;
384 functions->GetString = intel_get_string;
385 functions->UpdateState = intel_update_state;
386
387 intelInitTextureFuncs(functions);
388 intelInitTextureImageFuncs(functions);
389 intelInitTextureSubImageFuncs(functions);
390 intelInitTextureCopyImageFuncs(functions);
391 intelInitCopyImageFuncs(functions);
392 intelInitClearFuncs(functions);
393 intelInitBufferFuncs(functions);
394 intelInitPixelFuncs(functions);
395 intelInitBufferObjectFuncs(functions);
396 brw_init_syncobj_functions(functions);
397 brw_init_object_purgeable_functions(functions);
398
399 brwInitFragProgFuncs( functions );
400 brw_init_common_queryobj_functions(functions);
401 if (brw->gen >= 8 || brw->is_haswell)
402 hsw_init_queryobj_functions(functions);
403 else if (brw->gen >= 6)
404 gen6_init_queryobj_functions(functions);
405 else
406 gen4_init_queryobj_functions(functions);
407 brw_init_compute_functions(functions);
408 if (brw->gen >= 7)
409 brw_init_conditional_render_functions(functions);
410
411 functions->QueryInternalFormat = brw_query_internal_format;
412
413 functions->NewTransformFeedback = brw_new_transform_feedback;
414 functions->DeleteTransformFeedback = brw_delete_transform_feedback;
415 if (can_do_mi_math_and_lrr(brw->screen)) {
416 functions->BeginTransformFeedback = hsw_begin_transform_feedback;
417 functions->EndTransformFeedback = hsw_end_transform_feedback;
418 functions->PauseTransformFeedback = hsw_pause_transform_feedback;
419 functions->ResumeTransformFeedback = hsw_resume_transform_feedback;
420 } else if (brw->gen >= 7) {
421 functions->BeginTransformFeedback = gen7_begin_transform_feedback;
422 functions->EndTransformFeedback = gen7_end_transform_feedback;
423 functions->PauseTransformFeedback = gen7_pause_transform_feedback;
424 functions->ResumeTransformFeedback = gen7_resume_transform_feedback;
425 functions->GetTransformFeedbackVertexCount =
426 brw_get_transform_feedback_vertex_count;
427 } else {
428 functions->BeginTransformFeedback = brw_begin_transform_feedback;
429 functions->EndTransformFeedback = brw_end_transform_feedback;
430 functions->PauseTransformFeedback = brw_pause_transform_feedback;
431 functions->ResumeTransformFeedback = brw_resume_transform_feedback;
432 functions->GetTransformFeedbackVertexCount =
433 brw_get_transform_feedback_vertex_count;
434 }
435
436 if (brw->gen >= 6)
437 functions->GetSamplePosition = gen6_get_sample_position;
438 }
439
440 static void
441 brw_initialize_context_constants(struct brw_context *brw)
442 {
443 struct gl_context *ctx = &brw->ctx;
444 const struct brw_compiler *compiler = brw->screen->compiler;
445
446 const bool stage_exists[MESA_SHADER_STAGES] = {
447 [MESA_SHADER_VERTEX] = true,
448 [MESA_SHADER_TESS_CTRL] = brw->gen >= 7,
449 [MESA_SHADER_TESS_EVAL] = brw->gen >= 7,
450 [MESA_SHADER_GEOMETRY] = brw->gen >= 6,
451 [MESA_SHADER_FRAGMENT] = true,
452 [MESA_SHADER_COMPUTE] =
453 ((ctx->API == API_OPENGL_COMPAT || ctx->API == API_OPENGL_CORE) &&
454 ctx->Const.MaxComputeWorkGroupSize[0] >= 1024) ||
455 (ctx->API == API_OPENGLES2 &&
456 ctx->Const.MaxComputeWorkGroupSize[0] >= 128) ||
457 _mesa_extension_override_enables.ARB_compute_shader,
458 };
459
460 unsigned num_stages = 0;
461 for (int i = 0; i < MESA_SHADER_STAGES; i++) {
462 if (stage_exists[i])
463 num_stages++;
464 }
465
466 unsigned max_samplers =
467 brw->gen >= 8 || brw->is_haswell ? BRW_MAX_TEX_UNIT : 16;
468
469 ctx->Const.MaxDualSourceDrawBuffers = 1;
470 ctx->Const.MaxDrawBuffers = BRW_MAX_DRAW_BUFFERS;
471 ctx->Const.MaxCombinedShaderOutputResources =
472 MAX_IMAGE_UNITS + BRW_MAX_DRAW_BUFFERS;
473
474 /* The timestamp register we can read for glGetTimestamp() is
475 * sometimes only 32 bits, before scaling to nanoseconds (depending
476 * on kernel).
477 *
478 * Once scaled to nanoseconds the timestamp would roll over at a
479 * non-power-of-two, so an application couldn't use
480 * GL_QUERY_COUNTER_BITS to handle rollover correctly. Instead, we
481 * report 36 bits and truncate at that (rolling over 5 times as
482 * often as the HW counter), and when the 32-bit counter rolls
483 * over, it happens to also be at a rollover in the reported value
484 * from near (1<<36) to 0.
485 *
486 * The low 32 bits rolls over in ~343 seconds. Our 36-bit result
487 * rolls over every ~69 seconds.
488 */
489 ctx->Const.QueryCounterBits.Timestamp = 36;
490
491 ctx->Const.MaxTextureCoordUnits = 8; /* Mesa limit */
492 ctx->Const.MaxImageUnits = MAX_IMAGE_UNITS;
493 if (brw->gen >= 7) {
494 ctx->Const.MaxRenderbufferSize = 16384;
495 ctx->Const.MaxTextureLevels = MIN2(15 /* 16384 */, MAX_TEXTURE_LEVELS);
496 ctx->Const.MaxCubeTextureLevels = 15; /* 16384 */
497 } else {
498 ctx->Const.MaxRenderbufferSize = 8192;
499 ctx->Const.MaxTextureLevels = MIN2(14 /* 8192 */, MAX_TEXTURE_LEVELS);
500 ctx->Const.MaxCubeTextureLevels = 14; /* 8192 */
501 }
502 ctx->Const.Max3DTextureLevels = 12; /* 2048 */
503 ctx->Const.MaxArrayTextureLayers = brw->gen >= 7 ? 2048 : 512;
504 ctx->Const.MaxTextureMbytes = 1536;
505 ctx->Const.MaxTextureRectSize = 1 << 12;
506 ctx->Const.MaxTextureMaxAnisotropy = 16.0;
507 ctx->Const.MaxTextureLodBias = 15.0;
508 ctx->Const.StripTextureBorder = true;
509 if (brw->gen >= 7) {
510 ctx->Const.MaxProgramTextureGatherComponents = 4;
511 ctx->Const.MinProgramTextureGatherOffset = -32;
512 ctx->Const.MaxProgramTextureGatherOffset = 31;
513 } else if (brw->gen == 6) {
514 ctx->Const.MaxProgramTextureGatherComponents = 1;
515 ctx->Const.MinProgramTextureGatherOffset = -8;
516 ctx->Const.MaxProgramTextureGatherOffset = 7;
517 }
518
519 ctx->Const.MaxUniformBlockSize = 65536;
520
521 for (int i = 0; i < MESA_SHADER_STAGES; i++) {
522 struct gl_program_constants *prog = &ctx->Const.Program[i];
523
524 if (!stage_exists[i])
525 continue;
526
527 prog->MaxTextureImageUnits = max_samplers;
528
529 prog->MaxUniformBlocks = BRW_MAX_UBO;
530 prog->MaxCombinedUniformComponents =
531 prog->MaxUniformComponents +
532 ctx->Const.MaxUniformBlockSize / 4 * prog->MaxUniformBlocks;
533
534 prog->MaxAtomicCounters = MAX_ATOMIC_COUNTERS;
535 prog->MaxAtomicBuffers = BRW_MAX_ABO;
536 prog->MaxImageUniforms = compiler->scalar_stage[i] ? BRW_MAX_IMAGES : 0;
537 prog->MaxShaderStorageBlocks = BRW_MAX_SSBO;
538 }
539
540 ctx->Const.MaxTextureUnits =
541 MIN2(ctx->Const.MaxTextureCoordUnits,
542 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxTextureImageUnits);
543
544 ctx->Const.MaxUniformBufferBindings = num_stages * BRW_MAX_UBO;
545 ctx->Const.MaxCombinedUniformBlocks = num_stages * BRW_MAX_UBO;
546 ctx->Const.MaxCombinedAtomicBuffers = num_stages * BRW_MAX_ABO;
547 ctx->Const.MaxCombinedShaderStorageBlocks = num_stages * BRW_MAX_SSBO;
548 ctx->Const.MaxShaderStorageBufferBindings = num_stages * BRW_MAX_SSBO;
549 ctx->Const.MaxCombinedTextureImageUnits = num_stages * max_samplers;
550 ctx->Const.MaxCombinedImageUniforms = num_stages * BRW_MAX_IMAGES;
551
552
553 /* Hardware only supports a limited number of transform feedback buffers.
554 * So we need to override the Mesa default (which is based only on software
555 * limits).
556 */
557 ctx->Const.MaxTransformFeedbackBuffers = BRW_MAX_SOL_BUFFERS;
558
559 /* On Gen6, in the worst case, we use up one binding table entry per
560 * transform feedback component (see comments above the definition of
561 * BRW_MAX_SOL_BINDINGS, in brw_context.h), so we need to advertise a value
562 * for MAX_TRANSFORM_FEEDBACK_INTERLEAVED_COMPONENTS equal to
563 * BRW_MAX_SOL_BINDINGS.
564 *
565 * In "separate components" mode, we need to divide this value by
566 * BRW_MAX_SOL_BUFFERS, so that the total number of binding table entries
567 * used up by all buffers will not exceed BRW_MAX_SOL_BINDINGS.
568 */
569 ctx->Const.MaxTransformFeedbackInterleavedComponents = BRW_MAX_SOL_BINDINGS;
570 ctx->Const.MaxTransformFeedbackSeparateComponents =
571 BRW_MAX_SOL_BINDINGS / BRW_MAX_SOL_BUFFERS;
572
573 ctx->Const.AlwaysUseGetTransformFeedbackVertexCount =
574 !can_do_mi_math_and_lrr(brw->screen);
575
576 int max_samples;
577 const int *msaa_modes = intel_supported_msaa_modes(brw->screen);
578 const int clamp_max_samples =
579 driQueryOptioni(&brw->optionCache, "clamp_max_samples");
580
581 if (clamp_max_samples < 0) {
582 max_samples = msaa_modes[0];
583 } else {
584 /* Select the largest supported MSAA mode that does not exceed
585 * clamp_max_samples.
586 */
587 max_samples = 0;
588 for (int i = 0; msaa_modes[i] != 0; ++i) {
589 if (msaa_modes[i] <= clamp_max_samples) {
590 max_samples = msaa_modes[i];
591 break;
592 }
593 }
594 }
595
596 ctx->Const.MaxSamples = max_samples;
597 ctx->Const.MaxColorTextureSamples = max_samples;
598 ctx->Const.MaxDepthTextureSamples = max_samples;
599 ctx->Const.MaxIntegerSamples = max_samples;
600 ctx->Const.MaxImageSamples = 0;
601
602 /* gen6_set_sample_maps() sets SampleMap{2,4,8}x variables which are used
603 * to map indices of rectangular grid to sample numbers within a pixel.
604 * These variables are used by GL_EXT_framebuffer_multisample_blit_scaled
605 * extension implementation. For more details see the comment above
606 * gen6_set_sample_maps() definition.
607 */
608 gen6_set_sample_maps(ctx);
609
610 ctx->Const.MinLineWidth = 1.0;
611 ctx->Const.MinLineWidthAA = 1.0;
612 if (brw->gen >= 6) {
613 ctx->Const.MaxLineWidth = 7.375;
614 ctx->Const.MaxLineWidthAA = 7.375;
615 ctx->Const.LineWidthGranularity = 0.125;
616 } else {
617 ctx->Const.MaxLineWidth = 7.0;
618 ctx->Const.MaxLineWidthAA = 7.0;
619 ctx->Const.LineWidthGranularity = 0.5;
620 }
621
622 /* For non-antialiased lines, we have to round the line width to the
623 * nearest whole number. Make sure that we don't advertise a line
624 * width that, when rounded, will be beyond the actual hardware
625 * maximum.
626 */
627 assert(roundf(ctx->Const.MaxLineWidth) <= ctx->Const.MaxLineWidth);
628
629 ctx->Const.MinPointSize = 1.0;
630 ctx->Const.MinPointSizeAA = 1.0;
631 ctx->Const.MaxPointSize = 255.0;
632 ctx->Const.MaxPointSizeAA = 255.0;
633 ctx->Const.PointSizeGranularity = 1.0;
634
635 if (brw->gen >= 5 || brw->is_g4x)
636 ctx->Const.MaxClipPlanes = 8;
637
638 ctx->Const.GLSLTessLevelsAsInputs = true;
639 ctx->Const.LowerTCSPatchVerticesIn = brw->gen >= 8;
640 ctx->Const.LowerTESPatchVerticesIn = true;
641 ctx->Const.PrimitiveRestartForPatches = true;
642
643 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeInstructions = 16 * 1024;
644 ctx->Const.Program[MESA_SHADER_VERTEX].MaxAluInstructions = 0;
645 ctx->Const.Program[MESA_SHADER_VERTEX].MaxTexInstructions = 0;
646 ctx->Const.Program[MESA_SHADER_VERTEX].MaxTexIndirections = 0;
647 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeAluInstructions = 0;
648 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeTexInstructions = 0;
649 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeTexIndirections = 0;
650 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeAttribs = 16;
651 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeTemps = 256;
652 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeAddressRegs = 1;
653 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeParameters = 1024;
654 ctx->Const.Program[MESA_SHADER_VERTEX].MaxEnvParams =
655 MIN2(ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeParameters,
656 ctx->Const.Program[MESA_SHADER_VERTEX].MaxEnvParams);
657
658 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeInstructions = 1024;
659 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeAluInstructions = 1024;
660 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeTexInstructions = 1024;
661 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeTexIndirections = 1024;
662 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeAttribs = 12;
663 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeTemps = 256;
664 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeAddressRegs = 0;
665 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeParameters = 1024;
666 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxEnvParams =
667 MIN2(ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeParameters,
668 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxEnvParams);
669
670 /* Fragment shaders use real, 32-bit twos-complement integers for all
671 * integer types.
672 */
673 ctx->Const.Program[MESA_SHADER_FRAGMENT].LowInt.RangeMin = 31;
674 ctx->Const.Program[MESA_SHADER_FRAGMENT].LowInt.RangeMax = 30;
675 ctx->Const.Program[MESA_SHADER_FRAGMENT].LowInt.Precision = 0;
676 ctx->Const.Program[MESA_SHADER_FRAGMENT].HighInt = ctx->Const.Program[MESA_SHADER_FRAGMENT].LowInt;
677 ctx->Const.Program[MESA_SHADER_FRAGMENT].MediumInt = ctx->Const.Program[MESA_SHADER_FRAGMENT].LowInt;
678
679 ctx->Const.Program[MESA_SHADER_VERTEX].LowInt.RangeMin = 31;
680 ctx->Const.Program[MESA_SHADER_VERTEX].LowInt.RangeMax = 30;
681 ctx->Const.Program[MESA_SHADER_VERTEX].LowInt.Precision = 0;
682 ctx->Const.Program[MESA_SHADER_VERTEX].HighInt = ctx->Const.Program[MESA_SHADER_VERTEX].LowInt;
683 ctx->Const.Program[MESA_SHADER_VERTEX].MediumInt = ctx->Const.Program[MESA_SHADER_VERTEX].LowInt;
684
685 /* Gen6 converts quads to polygon in beginning of 3D pipeline,
686 * but we're not sure how it's actually done for vertex order,
687 * that affect provoking vertex decision. Always use last vertex
688 * convention for quad primitive which works as expected for now.
689 */
690 if (brw->gen >= 6)
691 ctx->Const.QuadsFollowProvokingVertexConvention = false;
692
693 ctx->Const.NativeIntegers = true;
694 ctx->Const.VertexID_is_zero_based = true;
695
696 /* Regarding the CMP instruction, the Ivybridge PRM says:
697 *
698 * "For each enabled channel 0b or 1b is assigned to the appropriate flag
699 * bit and 0/all zeros or all ones (e.g, byte 0xFF, word 0xFFFF, DWord
700 * 0xFFFFFFFF) is assigned to dst."
701 *
702 * but PRMs for earlier generations say
703 *
704 * "In dword format, one GRF may store up to 8 results. When the register
705 * is used later as a vector of Booleans, as only LSB at each channel
706 * contains meaning [sic] data, software should make sure all higher bits
707 * are masked out (e.g. by 'and-ing' an [sic] 0x01 constant)."
708 *
709 * We select the representation of a true boolean uniform to be ~0, and fix
710 * the results of Gen <= 5 CMP instruction's with -(result & 1).
711 */
712 ctx->Const.UniformBooleanTrue = ~0;
713
714 /* From the gen4 PRM, volume 4 page 127:
715 *
716 * "For SURFTYPE_BUFFER non-rendertarget surfaces, this field specifies
717 * the base address of the first element of the surface, computed in
718 * software by adding the surface base address to the byte offset of
719 * the element in the buffer."
720 *
721 * However, unaligned accesses are slower, so enforce buffer alignment.
722 */
723 ctx->Const.UniformBufferOffsetAlignment = 16;
724
725 /* ShaderStorageBufferOffsetAlignment should be a cacheline (64 bytes) so
726 * that we can safely have the CPU and GPU writing the same SSBO on
727 * non-cachecoherent systems (our Atom CPUs). With UBOs, the GPU never
728 * writes, so there's no problem. For an SSBO, the GPU and the CPU can
729 * be updating disjoint regions of the buffer simultaneously and that will
730 * break if the regions overlap the same cacheline.
731 */
732 ctx->Const.ShaderStorageBufferOffsetAlignment = 64;
733 ctx->Const.TextureBufferOffsetAlignment = 16;
734 ctx->Const.MaxTextureBufferSize = 128 * 1024 * 1024;
735
736 if (brw->gen >= 6) {
737 ctx->Const.MaxVarying = 32;
738 ctx->Const.Program[MESA_SHADER_VERTEX].MaxOutputComponents = 128;
739 ctx->Const.Program[MESA_SHADER_GEOMETRY].MaxInputComponents = 64;
740 ctx->Const.Program[MESA_SHADER_GEOMETRY].MaxOutputComponents = 128;
741 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxInputComponents = 128;
742 ctx->Const.Program[MESA_SHADER_TESS_CTRL].MaxInputComponents = 128;
743 ctx->Const.Program[MESA_SHADER_TESS_CTRL].MaxOutputComponents = 128;
744 ctx->Const.Program[MESA_SHADER_TESS_EVAL].MaxInputComponents = 128;
745 ctx->Const.Program[MESA_SHADER_TESS_EVAL].MaxOutputComponents = 128;
746 }
747
748 /* We want the GLSL compiler to emit code that uses condition codes */
749 for (int i = 0; i < MESA_SHADER_STAGES; i++) {
750 ctx->Const.ShaderCompilerOptions[i] =
751 brw->screen->compiler->glsl_compiler_options[i];
752 }
753
754 if (brw->gen >= 7) {
755 ctx->Const.MaxViewportWidth = 32768;
756 ctx->Const.MaxViewportHeight = 32768;
757 }
758
759 /* ARB_viewport_array, OES_viewport_array */
760 if (brw->gen >= 6) {
761 ctx->Const.MaxViewports = GEN6_NUM_VIEWPORTS;
762 ctx->Const.ViewportSubpixelBits = 0;
763
764 /* Cast to float before negating because MaxViewportWidth is unsigned.
765 */
766 ctx->Const.ViewportBounds.Min = -(float)ctx->Const.MaxViewportWidth;
767 ctx->Const.ViewportBounds.Max = ctx->Const.MaxViewportWidth;
768 }
769
770 /* ARB_gpu_shader5 */
771 if (brw->gen >= 7)
772 ctx->Const.MaxVertexStreams = MIN2(4, MAX_VERTEX_STREAMS);
773
774 /* ARB_framebuffer_no_attachments */
775 ctx->Const.MaxFramebufferWidth = 16384;
776 ctx->Const.MaxFramebufferHeight = 16384;
777 ctx->Const.MaxFramebufferLayers = ctx->Const.MaxArrayTextureLayers;
778 ctx->Const.MaxFramebufferSamples = max_samples;
779
780 /* OES_primitive_bounding_box */
781 ctx->Const.NoPrimitiveBoundingBoxOutput = true;
782 }
783
784 static void
785 brw_initialize_cs_context_constants(struct brw_context *brw)
786 {
787 struct gl_context *ctx = &brw->ctx;
788 const struct intel_screen *screen = brw->screen;
789 struct gen_device_info *devinfo = &brw->screen->devinfo;
790
791 /* FINISHME: Do this for all platforms that the kernel supports */
792 if (brw->is_cherryview &&
793 screen->subslice_total > 0 && screen->eu_total > 0) {
794 /* Logical CS threads = EUs per subslice * 7 threads per EU */
795 uint32_t max_cs_threads = screen->eu_total / screen->subslice_total * 7;
796
797 /* Fuse configurations may give more threads than expected, never less. */
798 if (max_cs_threads > devinfo->max_cs_threads)
799 devinfo->max_cs_threads = max_cs_threads;
800 }
801
802 /* Maximum number of scalar compute shader invocations that can be run in
803 * parallel in the same subslice assuming SIMD32 dispatch.
804 *
805 * We don't advertise more than 64 threads, because we are limited to 64 by
806 * our usage of thread_width_max in the gpgpu walker command. This only
807 * currently impacts Haswell, which otherwise might be able to advertise 70
808 * threads. With SIMD32 and 64 threads, Haswell still provides twice the
809 * required the number of invocation needed for ARB_compute_shader.
810 */
811 const unsigned max_threads = MIN2(64, devinfo->max_cs_threads);
812 const uint32_t max_invocations = 32 * max_threads;
813 ctx->Const.MaxComputeWorkGroupSize[0] = max_invocations;
814 ctx->Const.MaxComputeWorkGroupSize[1] = max_invocations;
815 ctx->Const.MaxComputeWorkGroupSize[2] = max_invocations;
816 ctx->Const.MaxComputeWorkGroupInvocations = max_invocations;
817 ctx->Const.MaxComputeSharedMemorySize = 64 * 1024;
818 }
819
820 /**
821 * Process driconf (drirc) options, setting appropriate context flags.
822 *
823 * intelInitExtensions still pokes at optionCache directly, in order to
824 * avoid advertising various extensions. No flags are set, so it makes
825 * sense to continue doing that there.
826 */
827 static void
828 brw_process_driconf_options(struct brw_context *brw)
829 {
830 struct gl_context *ctx = &brw->ctx;
831
832 driOptionCache *options = &brw->optionCache;
833 driParseConfigFiles(options, &brw->screen->optionCache,
834 brw->driContext->driScreenPriv->myNum, "i965");
835
836 int bo_reuse_mode = driQueryOptioni(options, "bo_reuse");
837 switch (bo_reuse_mode) {
838 case DRI_CONF_BO_REUSE_DISABLED:
839 break;
840 case DRI_CONF_BO_REUSE_ALL:
841 brw_bufmgr_enable_reuse(brw->bufmgr);
842 break;
843 }
844
845 if (INTEL_DEBUG & DEBUG_NO_HIZ) {
846 brw->has_hiz = false;
847 /* On gen6, you can only do separate stencil with HIZ. */
848 if (brw->gen == 6)
849 brw->has_separate_stencil = false;
850 }
851
852 if (driQueryOptionb(options, "always_flush_batch")) {
853 fprintf(stderr, "flushing batchbuffer before/after each draw call\n");
854 brw->always_flush_batch = true;
855 }
856
857 if (driQueryOptionb(options, "always_flush_cache")) {
858 fprintf(stderr, "flushing GPU caches before/after each draw call\n");
859 brw->always_flush_cache = true;
860 }
861
862 if (driQueryOptionb(options, "disable_throttling")) {
863 fprintf(stderr, "disabling flush throttling\n");
864 brw->disable_throttling = true;
865 }
866
867 brw->precompile = driQueryOptionb(&brw->optionCache, "shader_precompile");
868
869 if (driQueryOptionb(&brw->optionCache, "precise_trig"))
870 brw->screen->compiler->precise_trig = true;
871
872 ctx->Const.ForceGLSLExtensionsWarn =
873 driQueryOptionb(options, "force_glsl_extensions_warn");
874
875 ctx->Const.ForceGLSLVersion =
876 driQueryOptioni(options, "force_glsl_version");
877
878 ctx->Const.DisableGLSLLineContinuations =
879 driQueryOptionb(options, "disable_glsl_line_continuations");
880
881 ctx->Const.AllowGLSLExtensionDirectiveMidShader =
882 driQueryOptionb(options, "allow_glsl_extension_directive_midshader");
883
884 ctx->Const.AllowGLSLBuiltinVariableRedeclaration =
885 driQueryOptionb(options, "allow_glsl_builtin_variable_redeclaration");
886
887 ctx->Const.AllowHigherCompatVersion =
888 driQueryOptionb(options, "allow_higher_compat_version");
889
890 ctx->Const.ForceGLSLAbsSqrt =
891 driQueryOptionb(options, "force_glsl_abs_sqrt");
892
893 ctx->Const.GLSLZeroInit = driQueryOptionb(options, "glsl_zero_init");
894
895 brw->dual_color_blend_by_location =
896 driQueryOptionb(options, "dual_color_blend_by_location");
897 }
898
899 GLboolean
900 brwCreateContext(gl_api api,
901 const struct gl_config *mesaVis,
902 __DRIcontext *driContextPriv,
903 unsigned major_version,
904 unsigned minor_version,
905 uint32_t flags,
906 bool notify_reset,
907 unsigned *dri_ctx_error,
908 void *sharedContextPrivate)
909 {
910 struct gl_context *shareCtx = (struct gl_context *) sharedContextPrivate;
911 struct intel_screen *screen = driContextPriv->driScreenPriv->driverPrivate;
912 const struct gen_device_info *devinfo = &screen->devinfo;
913 struct dd_function_table functions;
914
915 /* Only allow the __DRI_CTX_FLAG_ROBUST_BUFFER_ACCESS flag if the kernel
916 * provides us with context reset notifications.
917 */
918 uint32_t allowed_flags = __DRI_CTX_FLAG_DEBUG
919 | __DRI_CTX_FLAG_FORWARD_COMPATIBLE;
920
921 if (screen->has_context_reset_notification)
922 allowed_flags |= __DRI_CTX_FLAG_ROBUST_BUFFER_ACCESS;
923
924 if (flags & ~allowed_flags) {
925 *dri_ctx_error = __DRI_CTX_ERROR_UNKNOWN_FLAG;
926 return false;
927 }
928
929 struct brw_context *brw = rzalloc(NULL, struct brw_context);
930 if (!brw) {
931 fprintf(stderr, "%s: failed to alloc context\n", __func__);
932 *dri_ctx_error = __DRI_CTX_ERROR_NO_MEMORY;
933 return false;
934 }
935
936 driContextPriv->driverPrivate = brw;
937 brw->driContext = driContextPriv;
938 brw->screen = screen;
939 brw->bufmgr = screen->bufmgr;
940
941 brw->gen = devinfo->gen;
942 brw->gt = devinfo->gt;
943 brw->is_g4x = devinfo->is_g4x;
944 brw->is_baytrail = devinfo->is_baytrail;
945 brw->is_haswell = devinfo->is_haswell;
946 brw->is_cherryview = devinfo->is_cherryview;
947 brw->is_broxton = devinfo->is_broxton;
948 brw->has_llc = devinfo->has_llc;
949 brw->has_hiz = devinfo->has_hiz_and_separate_stencil;
950 brw->has_separate_stencil = devinfo->has_hiz_and_separate_stencil;
951 brw->has_pln = devinfo->has_pln;
952 brw->has_compr4 = devinfo->has_compr4;
953 brw->has_surface_tile_offset = devinfo->has_surface_tile_offset;
954 brw->has_negative_rhw_bug = devinfo->has_negative_rhw_bug;
955 brw->needs_unlit_centroid_workaround =
956 devinfo->needs_unlit_centroid_workaround;
957
958 brw->must_use_separate_stencil = devinfo->must_use_separate_stencil;
959 brw->has_swizzling = screen->hw_has_swizzling;
960
961 isl_device_init(&brw->isl_dev, devinfo, screen->hw_has_swizzling);
962
963 brw->vs.base.stage = MESA_SHADER_VERTEX;
964 brw->tcs.base.stage = MESA_SHADER_TESS_CTRL;
965 brw->tes.base.stage = MESA_SHADER_TESS_EVAL;
966 brw->gs.base.stage = MESA_SHADER_GEOMETRY;
967 brw->wm.base.stage = MESA_SHADER_FRAGMENT;
968 if (brw->gen >= 8) {
969 gen8_init_vtable_surface_functions(brw);
970 brw->vtbl.emit_depth_stencil_hiz = gen8_emit_depth_stencil_hiz;
971 } else if (brw->gen >= 7) {
972 gen7_init_vtable_surface_functions(brw);
973 brw->vtbl.emit_depth_stencil_hiz = gen7_emit_depth_stencil_hiz;
974 } else if (brw->gen >= 6) {
975 gen6_init_vtable_surface_functions(brw);
976 brw->vtbl.emit_depth_stencil_hiz = gen6_emit_depth_stencil_hiz;
977 } else {
978 gen4_init_vtable_surface_functions(brw);
979 brw->vtbl.emit_depth_stencil_hiz = brw_emit_depth_stencil_hiz;
980 }
981
982 brw_init_driver_functions(brw, &functions);
983
984 if (notify_reset)
985 functions.GetGraphicsResetStatus = brw_get_graphics_reset_status;
986
987 struct gl_context *ctx = &brw->ctx;
988
989 if (!_mesa_initialize_context(ctx, api, mesaVis, shareCtx, &functions)) {
990 *dri_ctx_error = __DRI_CTX_ERROR_NO_MEMORY;
991 fprintf(stderr, "%s: failed to init mesa context\n", __func__);
992 intelDestroyContext(driContextPriv);
993 return false;
994 }
995
996 driContextSetFlags(ctx, flags);
997
998 /* Initialize the software rasterizer and helper modules.
999 *
1000 * As of GL 3.1 core, the gen4+ driver doesn't need the swrast context for
1001 * software fallbacks (which we have to support on legacy GL to do weird
1002 * glDrawPixels(), glBitmap(), and other functions).
1003 */
1004 if (api != API_OPENGL_CORE && api != API_OPENGLES2) {
1005 _swrast_CreateContext(ctx);
1006 }
1007
1008 _vbo_CreateContext(ctx);
1009 if (ctx->swrast_context) {
1010 _tnl_CreateContext(ctx);
1011 TNL_CONTEXT(ctx)->Driver.RunPipeline = _tnl_run_pipeline;
1012 _swsetup_CreateContext(ctx);
1013
1014 /* Configure swrast to match hardware characteristics: */
1015 _swrast_allow_pixel_fog(ctx, false);
1016 _swrast_allow_vertex_fog(ctx, true);
1017 }
1018
1019 _mesa_meta_init(ctx);
1020
1021 brw_process_driconf_options(brw);
1022
1023 if (INTEL_DEBUG & DEBUG_PERF)
1024 brw->perf_debug = true;
1025
1026 brw_initialize_cs_context_constants(brw);
1027 brw_initialize_context_constants(brw);
1028
1029 ctx->Const.ResetStrategy = notify_reset
1030 ? GL_LOSE_CONTEXT_ON_RESET_ARB : GL_NO_RESET_NOTIFICATION_ARB;
1031
1032 /* Reinitialize the context point state. It depends on ctx->Const values. */
1033 _mesa_init_point(ctx);
1034
1035 intel_fbo_init(brw);
1036
1037 intel_batchbuffer_init(&brw->batch, brw->bufmgr, brw->has_llc);
1038
1039 if (brw->gen >= 6) {
1040 /* Create a new hardware context. Using a hardware context means that
1041 * our GPU state will be saved/restored on context switch, allowing us
1042 * to assume that the GPU is in the same state we left it in.
1043 *
1044 * This is required for transform feedback buffer offsets, query objects,
1045 * and also allows us to reduce how much state we have to emit.
1046 */
1047 brw->hw_ctx = brw_create_hw_context(brw->bufmgr);
1048
1049 if (!brw->hw_ctx) {
1050 fprintf(stderr, "Failed to create hardware context.\n");
1051 intelDestroyContext(driContextPriv);
1052 return false;
1053 }
1054 }
1055
1056 if (brw_init_pipe_control(brw, devinfo)) {
1057 *dri_ctx_error = __DRI_CTX_ERROR_NO_MEMORY;
1058 intelDestroyContext(driContextPriv);
1059 return false;
1060 }
1061
1062 brw_init_state(brw);
1063
1064 intelInitExtensions(ctx);
1065
1066 brw_init_surface_formats(brw);
1067
1068 brw_blorp_init(brw);
1069
1070 brw->urb.size = devinfo->urb.size;
1071
1072 if (brw->gen == 6)
1073 brw->urb.gs_present = false;
1074
1075 brw->prim_restart.in_progress = false;
1076 brw->prim_restart.enable_cut_index = false;
1077 brw->gs.enabled = false;
1078 brw->clip.viewport_count = 1;
1079
1080 brw->predicate.state = BRW_PREDICATE_STATE_RENDER;
1081
1082 brw->max_gtt_map_object_size = screen->max_gtt_map_object_size;
1083
1084 ctx->VertexProgram._MaintainTnlProgram = true;
1085 ctx->FragmentProgram._MaintainTexEnvProgram = true;
1086
1087 brw_draw_init( brw );
1088
1089 if ((flags & __DRI_CTX_FLAG_DEBUG) != 0) {
1090 /* Turn on some extra GL_ARB_debug_output generation. */
1091 brw->perf_debug = true;
1092 }
1093
1094 if ((flags & __DRI_CTX_FLAG_ROBUST_BUFFER_ACCESS) != 0) {
1095 ctx->Const.ContextFlags |= GL_CONTEXT_FLAG_ROBUST_ACCESS_BIT_ARB;
1096 ctx->Const.RobustAccess = GL_TRUE;
1097 }
1098
1099 if (INTEL_DEBUG & DEBUG_SHADER_TIME)
1100 brw_init_shader_time(brw);
1101
1102 _mesa_compute_version(ctx);
1103
1104 _mesa_initialize_dispatch_tables(ctx);
1105 _mesa_initialize_vbo_vtxfmt(ctx);
1106
1107 if (ctx->Extensions.INTEL_performance_query)
1108 brw_init_performance_queries(brw);
1109
1110 vbo_use_buffer_objects(ctx);
1111 vbo_always_unmap_buffers(ctx);
1112
1113 return true;
1114 }
1115
1116 void
1117 intelDestroyContext(__DRIcontext * driContextPriv)
1118 {
1119 struct brw_context *brw =
1120 (struct brw_context *) driContextPriv->driverPrivate;
1121 struct gl_context *ctx = &brw->ctx;
1122
1123 _mesa_meta_free(&brw->ctx);
1124
1125 if (INTEL_DEBUG & DEBUG_SHADER_TIME) {
1126 /* Force a report. */
1127 brw->shader_time.report_time = 0;
1128
1129 brw_collect_and_report_shader_time(brw);
1130 brw_destroy_shader_time(brw);
1131 }
1132
1133 if (brw->gen >= 6)
1134 blorp_finish(&brw->blorp);
1135
1136 brw_destroy_state(brw);
1137 brw_draw_destroy(brw);
1138
1139 brw_bo_unreference(brw->curbe.curbe_bo);
1140 if (brw->vs.base.scratch_bo)
1141 brw_bo_unreference(brw->vs.base.scratch_bo);
1142 if (brw->tcs.base.scratch_bo)
1143 brw_bo_unreference(brw->tcs.base.scratch_bo);
1144 if (brw->tes.base.scratch_bo)
1145 brw_bo_unreference(brw->tes.base.scratch_bo);
1146 if (brw->gs.base.scratch_bo)
1147 brw_bo_unreference(brw->gs.base.scratch_bo);
1148 if (brw->wm.base.scratch_bo)
1149 brw_bo_unreference(brw->wm.base.scratch_bo);
1150
1151 brw_destroy_hw_context(brw->bufmgr, brw->hw_ctx);
1152
1153 if (ctx->swrast_context) {
1154 _swsetup_DestroyContext(&brw->ctx);
1155 _tnl_DestroyContext(&brw->ctx);
1156 }
1157 _vbo_DestroyContext(&brw->ctx);
1158
1159 if (ctx->swrast_context)
1160 _swrast_DestroyContext(&brw->ctx);
1161
1162 brw_fini_pipe_control(brw);
1163 intel_batchbuffer_free(&brw->batch);
1164
1165 brw_bo_unreference(brw->throttle_batch[1]);
1166 brw_bo_unreference(brw->throttle_batch[0]);
1167 brw->throttle_batch[1] = NULL;
1168 brw->throttle_batch[0] = NULL;
1169
1170 driDestroyOptionCache(&brw->optionCache);
1171
1172 /* free the Mesa context */
1173 _mesa_free_context_data(&brw->ctx);
1174
1175 ralloc_free(brw);
1176 driContextPriv->driverPrivate = NULL;
1177 }
1178
1179 GLboolean
1180 intelUnbindContext(__DRIcontext * driContextPriv)
1181 {
1182 /* Unset current context and dispath table */
1183 _mesa_make_current(NULL, NULL, NULL);
1184
1185 return true;
1186 }
1187
1188 /**
1189 * Fixes up the context for GLES23 with our default-to-sRGB-capable behavior
1190 * on window system framebuffers.
1191 *
1192 * Desktop GL is fairly reasonable in its handling of sRGB: You can ask if
1193 * your renderbuffer can do sRGB encode, and you can flip a switch that does
1194 * sRGB encode if the renderbuffer can handle it. You can ask specifically
1195 * for a visual where you're guaranteed to be capable, but it turns out that
1196 * everyone just makes all their ARGB8888 visuals capable and doesn't offer
1197 * incapable ones, because there's no difference between the two in resources
1198 * used. Applications thus get built that accidentally rely on the default
1199 * visual choice being sRGB, so we make ours sRGB capable. Everything sounds
1200 * great...
1201 *
1202 * But for GLES2/3, they decided that it was silly to not turn on sRGB encode
1203 * for sRGB renderbuffers you made with the GL_EXT_texture_sRGB equivalent.
1204 * So they removed the enable knob and made it "if the renderbuffer is sRGB
1205 * capable, do sRGB encode". Then, for your window system renderbuffers, you
1206 * can ask for sRGB visuals and get sRGB encode, or not ask for sRGB visuals
1207 * and get no sRGB encode (assuming that both kinds of visual are available).
1208 * Thus our choice to support sRGB by default on our visuals for desktop would
1209 * result in broken rendering of GLES apps that aren't expecting sRGB encode.
1210 *
1211 * Unfortunately, renderbuffer setup happens before a context is created. So
1212 * in intel_screen.c we always set up sRGB, and here, if you're a GLES2/3
1213 * context (without an sRGB visual, though we don't have sRGB visuals exposed
1214 * yet), we go turn that back off before anyone finds out.
1215 */
1216 static void
1217 intel_gles3_srgb_workaround(struct brw_context *brw,
1218 struct gl_framebuffer *fb)
1219 {
1220 struct gl_context *ctx = &brw->ctx;
1221
1222 if (_mesa_is_desktop_gl(ctx) || !fb->Visual.sRGBCapable)
1223 return;
1224
1225 /* Some day when we support the sRGB capable bit on visuals available for
1226 * GLES, we'll need to respect that and not disable things here.
1227 */
1228 fb->Visual.sRGBCapable = false;
1229 for (int i = 0; i < BUFFER_COUNT; i++) {
1230 struct gl_renderbuffer *rb = fb->Attachment[i].Renderbuffer;
1231 if (rb)
1232 rb->Format = _mesa_get_srgb_format_linear(rb->Format);
1233 }
1234 }
1235
1236 GLboolean
1237 intelMakeCurrent(__DRIcontext * driContextPriv,
1238 __DRIdrawable * driDrawPriv,
1239 __DRIdrawable * driReadPriv)
1240 {
1241 struct brw_context *brw;
1242 GET_CURRENT_CONTEXT(curCtx);
1243
1244 if (driContextPriv)
1245 brw = (struct brw_context *) driContextPriv->driverPrivate;
1246 else
1247 brw = NULL;
1248
1249 /* According to the glXMakeCurrent() man page: "Pending commands to
1250 * the previous context, if any, are flushed before it is released."
1251 * But only flush if we're actually changing contexts.
1252 */
1253 if (brw_context(curCtx) && brw_context(curCtx) != brw) {
1254 _mesa_flush(curCtx);
1255 }
1256
1257 if (driContextPriv) {
1258 struct gl_context *ctx = &brw->ctx;
1259 struct gl_framebuffer *fb, *readFb;
1260
1261 if (driDrawPriv == NULL) {
1262 fb = _mesa_get_incomplete_framebuffer();
1263 } else {
1264 fb = driDrawPriv->driverPrivate;
1265 driContextPriv->dri2.draw_stamp = driDrawPriv->dri2.stamp - 1;
1266 }
1267
1268 if (driReadPriv == NULL) {
1269 readFb = _mesa_get_incomplete_framebuffer();
1270 } else {
1271 readFb = driReadPriv->driverPrivate;
1272 driContextPriv->dri2.read_stamp = driReadPriv->dri2.stamp - 1;
1273 }
1274
1275 /* The sRGB workaround changes the renderbuffer's format. We must change
1276 * the format before the renderbuffer's miptree get's allocated, otherwise
1277 * the formats of the renderbuffer and its miptree will differ.
1278 */
1279 intel_gles3_srgb_workaround(brw, fb);
1280 intel_gles3_srgb_workaround(brw, readFb);
1281
1282 /* If the context viewport hasn't been initialized, force a call out to
1283 * the loader to get buffers so we have a drawable size for the initial
1284 * viewport. */
1285 if (!brw->ctx.ViewportInitialized)
1286 intel_prepare_render(brw);
1287
1288 _mesa_make_current(ctx, fb, readFb);
1289 } else {
1290 _mesa_make_current(NULL, NULL, NULL);
1291 }
1292
1293 return true;
1294 }
1295
1296 void
1297 intel_resolve_for_dri2_flush(struct brw_context *brw,
1298 __DRIdrawable *drawable)
1299 {
1300 if (brw->gen < 6) {
1301 /* MSAA and fast color clear are not supported, so don't waste time
1302 * checking whether a resolve is needed.
1303 */
1304 return;
1305 }
1306
1307 struct gl_framebuffer *fb = drawable->driverPrivate;
1308 struct intel_renderbuffer *rb;
1309
1310 /* Usually, only the back buffer will need to be downsampled. However,
1311 * the front buffer will also need it if the user has rendered into it.
1312 */
1313 static const gl_buffer_index buffers[2] = {
1314 BUFFER_BACK_LEFT,
1315 BUFFER_FRONT_LEFT,
1316 };
1317
1318 for (int i = 0; i < 2; ++i) {
1319 rb = intel_get_renderbuffer(fb, buffers[i]);
1320 if (rb == NULL || rb->mt == NULL)
1321 continue;
1322 if (rb->mt->num_samples <= 1) {
1323 assert(rb->mt_layer == 0 && rb->mt_level == 0 &&
1324 rb->layer_count == 1);
1325 intel_miptree_prepare_access(brw, rb->mt, 0, 1, 0, 1, false, false);
1326 } else {
1327 intel_renderbuffer_downsample(brw, rb);
1328 }
1329 }
1330 }
1331
1332 static unsigned
1333 intel_bits_per_pixel(const struct intel_renderbuffer *rb)
1334 {
1335 return _mesa_get_format_bytes(intel_rb_format(rb)) * 8;
1336 }
1337
1338 static void
1339 intel_query_dri2_buffers(struct brw_context *brw,
1340 __DRIdrawable *drawable,
1341 __DRIbuffer **buffers,
1342 int *count);
1343
1344 static void
1345 intel_process_dri2_buffer(struct brw_context *brw,
1346 __DRIdrawable *drawable,
1347 __DRIbuffer *buffer,
1348 struct intel_renderbuffer *rb,
1349 const char *buffer_name);
1350
1351 static void
1352 intel_update_image_buffers(struct brw_context *brw, __DRIdrawable *drawable);
1353
1354 static void
1355 intel_update_dri2_buffers(struct brw_context *brw, __DRIdrawable *drawable)
1356 {
1357 struct gl_framebuffer *fb = drawable->driverPrivate;
1358 struct intel_renderbuffer *rb;
1359 __DRIbuffer *buffers = NULL;
1360 int count;
1361 const char *region_name;
1362
1363 /* Set this up front, so that in case our buffers get invalidated
1364 * while we're getting new buffers, we don't clobber the stamp and
1365 * thus ignore the invalidate. */
1366 drawable->lastStamp = drawable->dri2.stamp;
1367
1368 if (unlikely(INTEL_DEBUG & DEBUG_DRI))
1369 fprintf(stderr, "enter %s, drawable %p\n", __func__, drawable);
1370
1371 intel_query_dri2_buffers(brw, drawable, &buffers, &count);
1372
1373 if (buffers == NULL)
1374 return;
1375
1376 for (int i = 0; i < count; i++) {
1377 switch (buffers[i].attachment) {
1378 case __DRI_BUFFER_FRONT_LEFT:
1379 rb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT);
1380 region_name = "dri2 front buffer";
1381 break;
1382
1383 case __DRI_BUFFER_FAKE_FRONT_LEFT:
1384 rb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT);
1385 region_name = "dri2 fake front buffer";
1386 break;
1387
1388 case __DRI_BUFFER_BACK_LEFT:
1389 rb = intel_get_renderbuffer(fb, BUFFER_BACK_LEFT);
1390 region_name = "dri2 back buffer";
1391 break;
1392
1393 case __DRI_BUFFER_DEPTH:
1394 case __DRI_BUFFER_HIZ:
1395 case __DRI_BUFFER_DEPTH_STENCIL:
1396 case __DRI_BUFFER_STENCIL:
1397 case __DRI_BUFFER_ACCUM:
1398 default:
1399 fprintf(stderr,
1400 "unhandled buffer attach event, attachment type %d\n",
1401 buffers[i].attachment);
1402 return;
1403 }
1404
1405 intel_process_dri2_buffer(brw, drawable, &buffers[i], rb, region_name);
1406 }
1407
1408 }
1409
1410 void
1411 intel_update_renderbuffers(__DRIcontext *context, __DRIdrawable *drawable)
1412 {
1413 struct brw_context *brw = context->driverPrivate;
1414 __DRIscreen *dri_screen = brw->screen->driScrnPriv;
1415
1416 /* Set this up front, so that in case our buffers get invalidated
1417 * while we're getting new buffers, we don't clobber the stamp and
1418 * thus ignore the invalidate. */
1419 drawable->lastStamp = drawable->dri2.stamp;
1420
1421 if (unlikely(INTEL_DEBUG & DEBUG_DRI))
1422 fprintf(stderr, "enter %s, drawable %p\n", __func__, drawable);
1423
1424 if (dri_screen->image.loader)
1425 intel_update_image_buffers(brw, drawable);
1426 else
1427 intel_update_dri2_buffers(brw, drawable);
1428
1429 driUpdateFramebufferSize(&brw->ctx, drawable);
1430 }
1431
1432 /**
1433 * intel_prepare_render should be called anywhere that curent read/drawbuffer
1434 * state is required.
1435 */
1436 void
1437 intel_prepare_render(struct brw_context *brw)
1438 {
1439 struct gl_context *ctx = &brw->ctx;
1440 __DRIcontext *driContext = brw->driContext;
1441 __DRIdrawable *drawable;
1442
1443 drawable = driContext->driDrawablePriv;
1444 if (drawable && drawable->dri2.stamp != driContext->dri2.draw_stamp) {
1445 if (drawable->lastStamp != drawable->dri2.stamp)
1446 intel_update_renderbuffers(driContext, drawable);
1447 driContext->dri2.draw_stamp = drawable->dri2.stamp;
1448 }
1449
1450 drawable = driContext->driReadablePriv;
1451 if (drawable && drawable->dri2.stamp != driContext->dri2.read_stamp) {
1452 if (drawable->lastStamp != drawable->dri2.stamp)
1453 intel_update_renderbuffers(driContext, drawable);
1454 driContext->dri2.read_stamp = drawable->dri2.stamp;
1455 }
1456
1457 /* If we're currently rendering to the front buffer, the rendering
1458 * that will happen next will probably dirty the front buffer. So
1459 * mark it as dirty here.
1460 */
1461 if (_mesa_is_front_buffer_drawing(ctx->DrawBuffer))
1462 brw->front_buffer_dirty = true;
1463 }
1464
1465 /**
1466 * \brief Query DRI2 to obtain a DRIdrawable's buffers.
1467 *
1468 * To determine which DRI buffers to request, examine the renderbuffers
1469 * attached to the drawable's framebuffer. Then request the buffers with
1470 * DRI2GetBuffers() or DRI2GetBuffersWithFormat().
1471 *
1472 * This is called from intel_update_renderbuffers().
1473 *
1474 * \param drawable Drawable whose buffers are queried.
1475 * \param buffers [out] List of buffers returned by DRI2 query.
1476 * \param buffer_count [out] Number of buffers returned.
1477 *
1478 * \see intel_update_renderbuffers()
1479 * \see DRI2GetBuffers()
1480 * \see DRI2GetBuffersWithFormat()
1481 */
1482 static void
1483 intel_query_dri2_buffers(struct brw_context *brw,
1484 __DRIdrawable *drawable,
1485 __DRIbuffer **buffers,
1486 int *buffer_count)
1487 {
1488 __DRIscreen *dri_screen = brw->screen->driScrnPriv;
1489 struct gl_framebuffer *fb = drawable->driverPrivate;
1490 int i = 0;
1491 unsigned attachments[8];
1492
1493 struct intel_renderbuffer *front_rb;
1494 struct intel_renderbuffer *back_rb;
1495
1496 front_rb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT);
1497 back_rb = intel_get_renderbuffer(fb, BUFFER_BACK_LEFT);
1498
1499 memset(attachments, 0, sizeof(attachments));
1500 if ((_mesa_is_front_buffer_drawing(fb) ||
1501 _mesa_is_front_buffer_reading(fb) ||
1502 !back_rb) && front_rb) {
1503 /* If a fake front buffer is in use, then querying for
1504 * __DRI_BUFFER_FRONT_LEFT will cause the server to copy the image from
1505 * the real front buffer to the fake front buffer. So before doing the
1506 * query, we need to make sure all the pending drawing has landed in the
1507 * real front buffer.
1508 */
1509 intel_batchbuffer_flush(brw);
1510 intel_flush_front(&brw->ctx);
1511
1512 attachments[i++] = __DRI_BUFFER_FRONT_LEFT;
1513 attachments[i++] = intel_bits_per_pixel(front_rb);
1514 } else if (front_rb && brw->front_buffer_dirty) {
1515 /* We have pending front buffer rendering, but we aren't querying for a
1516 * front buffer. If the front buffer we have is a fake front buffer,
1517 * the X server is going to throw it away when it processes the query.
1518 * So before doing the query, make sure all the pending drawing has
1519 * landed in the real front buffer.
1520 */
1521 intel_batchbuffer_flush(brw);
1522 intel_flush_front(&brw->ctx);
1523 }
1524
1525 if (back_rb) {
1526 attachments[i++] = __DRI_BUFFER_BACK_LEFT;
1527 attachments[i++] = intel_bits_per_pixel(back_rb);
1528 }
1529
1530 assert(i <= ARRAY_SIZE(attachments));
1531
1532 *buffers =
1533 dri_screen->dri2.loader->getBuffersWithFormat(drawable,
1534 &drawable->w,
1535 &drawable->h,
1536 attachments, i / 2,
1537 buffer_count,
1538 drawable->loaderPrivate);
1539 }
1540
1541 /**
1542 * \brief Assign a DRI buffer's DRM region to a renderbuffer.
1543 *
1544 * This is called from intel_update_renderbuffers().
1545 *
1546 * \par Note:
1547 * DRI buffers whose attachment point is DRI2BufferStencil or
1548 * DRI2BufferDepthStencil are handled as special cases.
1549 *
1550 * \param buffer_name is a human readable name, such as "dri2 front buffer",
1551 * that is passed to brw_bo_gem_create_from_name().
1552 *
1553 * \see intel_update_renderbuffers()
1554 */
1555 static void
1556 intel_process_dri2_buffer(struct brw_context *brw,
1557 __DRIdrawable *drawable,
1558 __DRIbuffer *buffer,
1559 struct intel_renderbuffer *rb,
1560 const char *buffer_name)
1561 {
1562 struct gl_framebuffer *fb = drawable->driverPrivate;
1563 struct brw_bo *bo;
1564
1565 if (!rb)
1566 return;
1567
1568 unsigned num_samples = rb->Base.Base.NumSamples;
1569
1570 /* We try to avoid closing and reopening the same BO name, because the first
1571 * use of a mapping of the buffer involves a bunch of page faulting which is
1572 * moderately expensive.
1573 */
1574 struct intel_mipmap_tree *last_mt;
1575 if (num_samples == 0)
1576 last_mt = rb->mt;
1577 else
1578 last_mt = rb->singlesample_mt;
1579
1580 uint32_t old_name = 0;
1581 if (last_mt) {
1582 /* The bo already has a name because the miptree was created by a
1583 * previous call to intel_process_dri2_buffer(). If a bo already has a
1584 * name, then brw_bo_flink() is a low-cost getter. It does not
1585 * create a new name.
1586 */
1587 brw_bo_flink(last_mt->bo, &old_name);
1588 }
1589
1590 if (old_name == buffer->name)
1591 return;
1592
1593 if (unlikely(INTEL_DEBUG & DEBUG_DRI)) {
1594 fprintf(stderr,
1595 "attaching buffer %d, at %d, cpp %d, pitch %d\n",
1596 buffer->name, buffer->attachment,
1597 buffer->cpp, buffer->pitch);
1598 }
1599
1600 bo = brw_bo_gem_create_from_name(brw->bufmgr, buffer_name,
1601 buffer->name);
1602 if (!bo) {
1603 fprintf(stderr,
1604 "Failed to open BO for returned DRI2 buffer "
1605 "(%dx%d, %s, named %d).\n"
1606 "This is likely a bug in the X Server that will lead to a "
1607 "crash soon.\n",
1608 drawable->w, drawable->h, buffer_name, buffer->name);
1609 return;
1610 }
1611
1612 intel_update_winsys_renderbuffer_miptree(brw, rb, bo,
1613 drawable->w, drawable->h,
1614 buffer->pitch);
1615
1616 if (_mesa_is_front_buffer_drawing(fb) &&
1617 (buffer->attachment == __DRI_BUFFER_FRONT_LEFT ||
1618 buffer->attachment == __DRI_BUFFER_FAKE_FRONT_LEFT) &&
1619 rb->Base.Base.NumSamples > 1) {
1620 intel_renderbuffer_upsample(brw, rb);
1621 }
1622
1623 assert(rb->mt);
1624
1625 brw_bo_unreference(bo);
1626 }
1627
1628 /**
1629 * \brief Query DRI image loader to obtain a DRIdrawable's buffers.
1630 *
1631 * To determine which DRI buffers to request, examine the renderbuffers
1632 * attached to the drawable's framebuffer. Then request the buffers from
1633 * the image loader
1634 *
1635 * This is called from intel_update_renderbuffers().
1636 *
1637 * \param drawable Drawable whose buffers are queried.
1638 * \param buffers [out] List of buffers returned by DRI2 query.
1639 * \param buffer_count [out] Number of buffers returned.
1640 *
1641 * \see intel_update_renderbuffers()
1642 */
1643
1644 static void
1645 intel_update_image_buffer(struct brw_context *intel,
1646 __DRIdrawable *drawable,
1647 struct intel_renderbuffer *rb,
1648 __DRIimage *buffer,
1649 enum __DRIimageBufferMask buffer_type)
1650 {
1651 struct gl_framebuffer *fb = drawable->driverPrivate;
1652
1653 if (!rb || !buffer->bo)
1654 return;
1655
1656 unsigned num_samples = rb->Base.Base.NumSamples;
1657
1658 /* Check and see if we're already bound to the right
1659 * buffer object
1660 */
1661 struct intel_mipmap_tree *last_mt;
1662 if (num_samples == 0)
1663 last_mt = rb->mt;
1664 else
1665 last_mt = rb->singlesample_mt;
1666
1667 if (last_mt && last_mt->bo == buffer->bo)
1668 return;
1669
1670 intel_update_winsys_renderbuffer_miptree(intel, rb, buffer->bo,
1671 buffer->width, buffer->height,
1672 buffer->pitch);
1673
1674 if (_mesa_is_front_buffer_drawing(fb) &&
1675 buffer_type == __DRI_IMAGE_BUFFER_FRONT &&
1676 rb->Base.Base.NumSamples > 1) {
1677 intel_renderbuffer_upsample(intel, rb);
1678 }
1679 }
1680
1681 static void
1682 intel_update_image_buffers(struct brw_context *brw, __DRIdrawable *drawable)
1683 {
1684 struct gl_framebuffer *fb = drawable->driverPrivate;
1685 __DRIscreen *dri_screen = brw->screen->driScrnPriv;
1686 struct intel_renderbuffer *front_rb;
1687 struct intel_renderbuffer *back_rb;
1688 struct __DRIimageList images;
1689 mesa_format format;
1690 uint32_t buffer_mask = 0;
1691 int ret;
1692
1693 front_rb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT);
1694 back_rb = intel_get_renderbuffer(fb, BUFFER_BACK_LEFT);
1695
1696 if (back_rb)
1697 format = intel_rb_format(back_rb);
1698 else if (front_rb)
1699 format = intel_rb_format(front_rb);
1700 else
1701 return;
1702
1703 if (front_rb && (_mesa_is_front_buffer_drawing(fb) ||
1704 _mesa_is_front_buffer_reading(fb) || !back_rb)) {
1705 buffer_mask |= __DRI_IMAGE_BUFFER_FRONT;
1706 }
1707
1708 if (back_rb)
1709 buffer_mask |= __DRI_IMAGE_BUFFER_BACK;
1710
1711 ret = dri_screen->image.loader->getBuffers(drawable,
1712 driGLFormatToImageFormat(format),
1713 &drawable->dri2.stamp,
1714 drawable->loaderPrivate,
1715 buffer_mask,
1716 &images);
1717 if (!ret)
1718 return;
1719
1720 if (images.image_mask & __DRI_IMAGE_BUFFER_FRONT) {
1721 drawable->w = images.front->width;
1722 drawable->h = images.front->height;
1723 intel_update_image_buffer(brw,
1724 drawable,
1725 front_rb,
1726 images.front,
1727 __DRI_IMAGE_BUFFER_FRONT);
1728 }
1729
1730 if (images.image_mask & __DRI_IMAGE_BUFFER_BACK) {
1731 drawable->w = images.back->width;
1732 drawable->h = images.back->height;
1733 intel_update_image_buffer(brw,
1734 drawable,
1735 back_rb,
1736 images.back,
1737 __DRI_IMAGE_BUFFER_BACK);
1738 }
1739 }