mesa: replace ctx->Polygon._FrontBit with a helper function
[mesa.git] / src / mesa / drivers / dri / i965 / brw_context.c
1 /*
2 Copyright 2003 VMware, Inc.
3 Copyright (C) Intel Corp. 2006. All Rights Reserved.
4 Intel funded Tungsten Graphics to
5 develop this 3D driver.
6
7 Permission is hereby granted, free of charge, to any person obtaining
8 a copy of this software and associated documentation files (the
9 "Software"), to deal in the Software without restriction, including
10 without limitation the rights to use, copy, modify, merge, publish,
11 distribute, sublicense, and/or sell copies of the Software, and to
12 permit persons to whom the Software is furnished to do so, subject to
13 the following conditions:
14
15 The above copyright notice and this permission notice (including the
16 next paragraph) shall be included in all copies or substantial
17 portions of the Software.
18
19 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
20 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
22 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
23 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
24 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
25 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26
27 **********************************************************************/
28 /*
29 * Authors:
30 * Keith Whitwell <keithw@vmware.com>
31 */
32
33
34 #include "compiler/nir/nir.h"
35 #include "main/api_exec.h"
36 #include "main/context.h"
37 #include "main/fbobject.h"
38 #include "main/extensions.h"
39 #include "main/imports.h"
40 #include "main/macros.h"
41 #include "main/points.h"
42 #include "main/version.h"
43 #include "main/vtxfmt.h"
44 #include "main/texobj.h"
45 #include "main/framebuffer.h"
46 #include "main/stencil.h"
47 #include "main/state.h"
48
49 #include "vbo/vbo_context.h"
50
51 #include "drivers/common/driverfuncs.h"
52 #include "drivers/common/meta.h"
53 #include "utils.h"
54
55 #include "brw_context.h"
56 #include "brw_defines.h"
57 #include "brw_blorp.h"
58 #include "brw_draw.h"
59 #include "brw_state.h"
60
61 #include "intel_batchbuffer.h"
62 #include "intel_buffer_objects.h"
63 #include "intel_buffers.h"
64 #include "intel_fbo.h"
65 #include "intel_mipmap_tree.h"
66 #include "intel_pixel.h"
67 #include "intel_image.h"
68 #include "intel_tex.h"
69 #include "intel_tex_obj.h"
70
71 #include "swrast_setup/swrast_setup.h"
72 #include "tnl/tnl.h"
73 #include "tnl/t_pipeline.h"
74 #include "util/ralloc.h"
75 #include "util/debug.h"
76 #include "isl/isl.h"
77
78 /***************************************
79 * Mesa's Driver Functions
80 ***************************************/
81
82 const char *const brw_vendor_string = "Intel Open Source Technology Center";
83
84 static const char *
85 get_bsw_model(const struct intel_screen *screen)
86 {
87 switch (screen->eu_total) {
88 case 16:
89 return "405";
90 case 12:
91 return "400";
92 default:
93 return " ";
94 }
95 }
96
97 const char *
98 brw_get_renderer_string(const struct intel_screen *screen)
99 {
100 const char *chipset;
101 static char buffer[128];
102 char *bsw = NULL;
103
104 switch (screen->deviceID) {
105 #undef CHIPSET
106 #define CHIPSET(id, symbol, str) case id: chipset = str; break;
107 #include "pci_ids/i965_pci_ids.h"
108 default:
109 chipset = "Unknown Intel Chipset";
110 break;
111 }
112
113 /* Braswell branding is funny, so we have to fix it up here */
114 if (screen->deviceID == 0x22B1) {
115 bsw = strdup(chipset);
116 char *needle = strstr(bsw, "XXX");
117 if (needle) {
118 memcpy(needle, get_bsw_model(screen), 3);
119 chipset = bsw;
120 }
121 }
122
123 (void) driGetRendererString(buffer, chipset, 0);
124 free(bsw);
125 return buffer;
126 }
127
128 static const GLubyte *
129 intel_get_string(struct gl_context * ctx, GLenum name)
130 {
131 const struct brw_context *const brw = brw_context(ctx);
132
133 switch (name) {
134 case GL_VENDOR:
135 return (GLubyte *) brw_vendor_string;
136
137 case GL_RENDERER:
138 return
139 (GLubyte *) brw_get_renderer_string(brw->screen);
140
141 default:
142 return NULL;
143 }
144 }
145
146 static void
147 intel_viewport(struct gl_context *ctx)
148 {
149 struct brw_context *brw = brw_context(ctx);
150 __DRIcontext *driContext = brw->driContext;
151
152 if (_mesa_is_winsys_fbo(ctx->DrawBuffer)) {
153 if (driContext->driDrawablePriv)
154 dri2InvalidateDrawable(driContext->driDrawablePriv);
155 if (driContext->driReadablePriv)
156 dri2InvalidateDrawable(driContext->driReadablePriv);
157 }
158 }
159
160 static void
161 intel_update_framebuffer(struct gl_context *ctx,
162 struct gl_framebuffer *fb)
163 {
164 struct brw_context *brw = brw_context(ctx);
165
166 /* Quantize the derived default number of samples
167 */
168 fb->DefaultGeometry._NumSamples =
169 intel_quantize_num_samples(brw->screen,
170 fb->DefaultGeometry.NumSamples);
171 }
172
173 static bool
174 intel_disable_rb_aux_buffer(struct brw_context *brw, const struct brw_bo *bo)
175 {
176 const struct gl_framebuffer *fb = brw->ctx.DrawBuffer;
177 bool found = false;
178
179 for (unsigned i = 0; i < fb->_NumColorDrawBuffers; i++) {
180 const struct intel_renderbuffer *irb =
181 intel_renderbuffer(fb->_ColorDrawBuffers[i]);
182
183 if (irb && irb->mt->bo == bo) {
184 found = brw->draw_aux_buffer_disabled[i] = true;
185 }
186 }
187
188 return found;
189 }
190
191 static void
192 intel_update_state(struct gl_context * ctx)
193 {
194 GLuint new_state = ctx->NewState;
195 struct brw_context *brw = brw_context(ctx);
196 struct intel_texture_object *tex_obj;
197 struct intel_renderbuffer *depth_irb;
198
199 if (ctx->swrast_context)
200 _swrast_InvalidateState(ctx, new_state);
201
202 brw->NewGLState |= new_state;
203
204 _mesa_unlock_context_textures(ctx);
205
206 if (new_state & (_NEW_STENCIL | _NEW_BUFFERS)) {
207 brw->stencil_enabled = _mesa_stencil_is_enabled(ctx);
208 brw->stencil_two_sided = _mesa_stencil_is_two_sided(ctx);
209 brw->stencil_write_enabled =
210 _mesa_stencil_is_write_enabled(ctx, brw->stencil_two_sided);
211 }
212
213 if (new_state & _NEW_POLYGON)
214 brw->polygon_front_bit = _mesa_polygon_get_front_bit(ctx);
215
216 intel_prepare_render(brw);
217
218 /* Resolve the depth buffer's HiZ buffer. */
219 depth_irb = intel_get_renderbuffer(ctx->DrawBuffer, BUFFER_DEPTH);
220 if (depth_irb && depth_irb->mt) {
221 intel_miptree_prepare_depth(brw, depth_irb->mt,
222 depth_irb->mt_level,
223 depth_irb->mt_layer,
224 depth_irb->layer_count);
225 }
226
227 memset(brw->draw_aux_buffer_disabled, 0,
228 sizeof(brw->draw_aux_buffer_disabled));
229
230 /* Resolve depth buffer and render cache of each enabled texture. */
231 int maxEnabledUnit = ctx->Texture._MaxEnabledTexImageUnit;
232 for (int i = 0; i <= maxEnabledUnit; i++) {
233 if (!ctx->Texture.Unit[i]._Current)
234 continue;
235 tex_obj = intel_texture_object(ctx->Texture.Unit[i]._Current);
236 if (!tex_obj || !tex_obj->mt)
237 continue;
238
239 /* We need inte_texture_object::_Format to be valid */
240 intel_finalize_mipmap_tree(brw, i);
241
242 bool aux_supported;
243 intel_miptree_prepare_texture(brw, tex_obj->mt, tex_obj->_Format,
244 &aux_supported);
245
246 if (!aux_supported && brw->gen >= 9 &&
247 intel_disable_rb_aux_buffer(brw, tex_obj->mt->bo)) {
248 perf_debug("Sampling renderbuffer with non-compressible format - "
249 "turning off compression");
250 }
251
252 brw_render_cache_set_check_flush(brw, tex_obj->mt->bo);
253
254 if (tex_obj->base.StencilSampling ||
255 tex_obj->mt->format == MESA_FORMAT_S_UINT8) {
256 intel_update_r8stencil(brw, tex_obj->mt);
257 }
258 }
259
260 /* Resolve color for each active shader image. */
261 for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
262 const struct gl_program *prog = ctx->_Shader->CurrentProgram[i];
263
264 if (unlikely(prog && prog->info.num_images)) {
265 for (unsigned j = 0; j < prog->info.num_images; j++) {
266 struct gl_image_unit *u =
267 &ctx->ImageUnits[prog->sh.ImageUnits[j]];
268 tex_obj = intel_texture_object(u->TexObj);
269
270 if (tex_obj && tex_obj->mt) {
271 intel_miptree_prepare_image(brw, tex_obj->mt);
272
273 if (intel_miptree_is_lossless_compressed(brw, tex_obj->mt) &&
274 intel_disable_rb_aux_buffer(brw, tex_obj->mt->bo)) {
275 perf_debug("Using renderbuffer as shader image - turning "
276 "off lossless compression");
277 }
278
279 brw_render_cache_set_check_flush(brw, tex_obj->mt->bo);
280 }
281 }
282 }
283 }
284
285 /* Resolve color buffers for non-coherent framebuffer fetch. */
286 if (!ctx->Extensions.MESA_shader_framebuffer_fetch &&
287 ctx->FragmentProgram._Current &&
288 ctx->FragmentProgram._Current->info.outputs_read) {
289 const struct gl_framebuffer *fb = ctx->DrawBuffer;
290
291 for (unsigned i = 0; i < fb->_NumColorDrawBuffers; i++) {
292 const struct intel_renderbuffer *irb =
293 intel_renderbuffer(fb->_ColorDrawBuffers[i]);
294
295 if (irb) {
296 intel_miptree_prepare_fb_fetch(brw, irb->mt, irb->mt_level,
297 irb->mt_layer, irb->layer_count);
298 }
299 }
300 }
301
302 struct gl_framebuffer *fb = ctx->DrawBuffer;
303 for (int i = 0; i < fb->_NumColorDrawBuffers; i++) {
304 struct intel_renderbuffer *irb =
305 intel_renderbuffer(fb->_ColorDrawBuffers[i]);
306
307 if (irb == NULL || irb->mt == NULL)
308 continue;
309
310 intel_miptree_prepare_render(brw, irb->mt, irb->mt_level,
311 irb->mt_layer, irb->layer_count,
312 ctx->Color.sRGBEnabled);
313 }
314
315 _mesa_lock_context_textures(ctx);
316
317 if (new_state & _NEW_BUFFERS) {
318 intel_update_framebuffer(ctx, ctx->DrawBuffer);
319 if (ctx->DrawBuffer != ctx->ReadBuffer)
320 intel_update_framebuffer(ctx, ctx->ReadBuffer);
321 }
322 }
323
324 #define flushFront(screen) ((screen)->image.loader ? (screen)->image.loader->flushFrontBuffer : (screen)->dri2.loader->flushFrontBuffer)
325
326 static void
327 intel_flush_front(struct gl_context *ctx)
328 {
329 struct brw_context *brw = brw_context(ctx);
330 __DRIcontext *driContext = brw->driContext;
331 __DRIdrawable *driDrawable = driContext->driDrawablePriv;
332 __DRIscreen *const dri_screen = brw->screen->driScrnPriv;
333
334 if (brw->front_buffer_dirty && _mesa_is_winsys_fbo(ctx->DrawBuffer)) {
335 if (flushFront(dri_screen) && driDrawable &&
336 driDrawable->loaderPrivate) {
337
338 /* Resolve before flushing FAKE_FRONT_LEFT to FRONT_LEFT.
339 *
340 * This potentially resolves both front and back buffer. It
341 * is unnecessary to resolve the back, but harms nothing except
342 * performance. And no one cares about front-buffer render
343 * performance.
344 */
345 intel_resolve_for_dri2_flush(brw, driDrawable);
346 intel_batchbuffer_flush(brw);
347
348 flushFront(dri_screen)(driDrawable, driDrawable->loaderPrivate);
349
350 /* We set the dirty bit in intel_prepare_render() if we're
351 * front buffer rendering once we get there.
352 */
353 brw->front_buffer_dirty = false;
354 }
355 }
356 }
357
358 static void
359 intel_glFlush(struct gl_context *ctx)
360 {
361 struct brw_context *brw = brw_context(ctx);
362
363 intel_batchbuffer_flush(brw);
364 intel_flush_front(ctx);
365
366 brw->need_flush_throttle = true;
367 }
368
369 static void
370 intel_finish(struct gl_context * ctx)
371 {
372 struct brw_context *brw = brw_context(ctx);
373
374 intel_glFlush(ctx);
375
376 if (brw->batch.last_bo)
377 brw_bo_wait_rendering(brw, brw->batch.last_bo);
378 }
379
380 static void
381 brw_init_driver_functions(struct brw_context *brw,
382 struct dd_function_table *functions)
383 {
384 _mesa_init_driver_functions(functions);
385
386 /* GLX uses DRI2 invalidate events to handle window resizing.
387 * Unfortunately, EGL does not - libEGL is written in XCB (not Xlib),
388 * which doesn't provide a mechanism for snooping the event queues.
389 *
390 * So EGL still relies on viewport hacks to handle window resizing.
391 * This should go away with DRI3000.
392 */
393 if (!brw->driContext->driScreenPriv->dri2.useInvalidate)
394 functions->Viewport = intel_viewport;
395
396 functions->Flush = intel_glFlush;
397 functions->Finish = intel_finish;
398 functions->GetString = intel_get_string;
399 functions->UpdateState = intel_update_state;
400
401 intelInitTextureFuncs(functions);
402 intelInitTextureImageFuncs(functions);
403 intelInitTextureSubImageFuncs(functions);
404 intelInitTextureCopyImageFuncs(functions);
405 intelInitCopyImageFuncs(functions);
406 intelInitClearFuncs(functions);
407 intelInitBufferFuncs(functions);
408 intelInitPixelFuncs(functions);
409 intelInitBufferObjectFuncs(functions);
410 brw_init_syncobj_functions(functions);
411 brw_init_object_purgeable_functions(functions);
412
413 brwInitFragProgFuncs( functions );
414 brw_init_common_queryobj_functions(functions);
415 if (brw->gen >= 8 || brw->is_haswell)
416 hsw_init_queryobj_functions(functions);
417 else if (brw->gen >= 6)
418 gen6_init_queryobj_functions(functions);
419 else
420 gen4_init_queryobj_functions(functions);
421 brw_init_compute_functions(functions);
422 brw_init_conditional_render_functions(functions);
423
424 functions->QueryInternalFormat = brw_query_internal_format;
425
426 functions->NewTransformFeedback = brw_new_transform_feedback;
427 functions->DeleteTransformFeedback = brw_delete_transform_feedback;
428 if (can_do_mi_math_and_lrr(brw->screen)) {
429 functions->BeginTransformFeedback = hsw_begin_transform_feedback;
430 functions->EndTransformFeedback = hsw_end_transform_feedback;
431 functions->PauseTransformFeedback = hsw_pause_transform_feedback;
432 functions->ResumeTransformFeedback = hsw_resume_transform_feedback;
433 } else if (brw->gen >= 7) {
434 functions->BeginTransformFeedback = gen7_begin_transform_feedback;
435 functions->EndTransformFeedback = gen7_end_transform_feedback;
436 functions->PauseTransformFeedback = gen7_pause_transform_feedback;
437 functions->ResumeTransformFeedback = gen7_resume_transform_feedback;
438 functions->GetTransformFeedbackVertexCount =
439 brw_get_transform_feedback_vertex_count;
440 } else {
441 functions->BeginTransformFeedback = brw_begin_transform_feedback;
442 functions->EndTransformFeedback = brw_end_transform_feedback;
443 functions->PauseTransformFeedback = brw_pause_transform_feedback;
444 functions->ResumeTransformFeedback = brw_resume_transform_feedback;
445 functions->GetTransformFeedbackVertexCount =
446 brw_get_transform_feedback_vertex_count;
447 }
448
449 if (brw->gen >= 6)
450 functions->GetSamplePosition = gen6_get_sample_position;
451 }
452
453 static void
454 brw_initialize_context_constants(struct brw_context *brw)
455 {
456 struct gl_context *ctx = &brw->ctx;
457 const struct brw_compiler *compiler = brw->screen->compiler;
458
459 const bool stage_exists[MESA_SHADER_STAGES] = {
460 [MESA_SHADER_VERTEX] = true,
461 [MESA_SHADER_TESS_CTRL] = brw->gen >= 7,
462 [MESA_SHADER_TESS_EVAL] = brw->gen >= 7,
463 [MESA_SHADER_GEOMETRY] = brw->gen >= 6,
464 [MESA_SHADER_FRAGMENT] = true,
465 [MESA_SHADER_COMPUTE] =
466 ((ctx->API == API_OPENGL_COMPAT || ctx->API == API_OPENGL_CORE) &&
467 ctx->Const.MaxComputeWorkGroupSize[0] >= 1024) ||
468 (ctx->API == API_OPENGLES2 &&
469 ctx->Const.MaxComputeWorkGroupSize[0] >= 128) ||
470 _mesa_extension_override_enables.ARB_compute_shader,
471 };
472
473 unsigned num_stages = 0;
474 for (int i = 0; i < MESA_SHADER_STAGES; i++) {
475 if (stage_exists[i])
476 num_stages++;
477 }
478
479 unsigned max_samplers =
480 brw->gen >= 8 || brw->is_haswell ? BRW_MAX_TEX_UNIT : 16;
481
482 ctx->Const.MaxDualSourceDrawBuffers = 1;
483 ctx->Const.MaxDrawBuffers = BRW_MAX_DRAW_BUFFERS;
484 ctx->Const.MaxCombinedShaderOutputResources =
485 MAX_IMAGE_UNITS + BRW_MAX_DRAW_BUFFERS;
486
487 /* The timestamp register we can read for glGetTimestamp() is
488 * sometimes only 32 bits, before scaling to nanoseconds (depending
489 * on kernel).
490 *
491 * Once scaled to nanoseconds the timestamp would roll over at a
492 * non-power-of-two, so an application couldn't use
493 * GL_QUERY_COUNTER_BITS to handle rollover correctly. Instead, we
494 * report 36 bits and truncate at that (rolling over 5 times as
495 * often as the HW counter), and when the 32-bit counter rolls
496 * over, it happens to also be at a rollover in the reported value
497 * from near (1<<36) to 0.
498 *
499 * The low 32 bits rolls over in ~343 seconds. Our 36-bit result
500 * rolls over every ~69 seconds.
501 */
502 ctx->Const.QueryCounterBits.Timestamp = 36;
503
504 ctx->Const.MaxTextureCoordUnits = 8; /* Mesa limit */
505 ctx->Const.MaxImageUnits = MAX_IMAGE_UNITS;
506 if (brw->gen >= 7) {
507 ctx->Const.MaxRenderbufferSize = 16384;
508 ctx->Const.MaxTextureLevels = MIN2(15 /* 16384 */, MAX_TEXTURE_LEVELS);
509 ctx->Const.MaxCubeTextureLevels = 15; /* 16384 */
510 } else {
511 ctx->Const.MaxRenderbufferSize = 8192;
512 ctx->Const.MaxTextureLevels = MIN2(14 /* 8192 */, MAX_TEXTURE_LEVELS);
513 ctx->Const.MaxCubeTextureLevels = 14; /* 8192 */
514 }
515 ctx->Const.Max3DTextureLevels = 12; /* 2048 */
516 ctx->Const.MaxArrayTextureLayers = brw->gen >= 7 ? 2048 : 512;
517 ctx->Const.MaxTextureMbytes = 1536;
518 ctx->Const.MaxTextureRectSize = brw->gen >= 7 ? 16384 : 8192;
519 ctx->Const.MaxTextureMaxAnisotropy = 16.0;
520 ctx->Const.MaxTextureLodBias = 15.0;
521 ctx->Const.StripTextureBorder = true;
522 if (brw->gen >= 7) {
523 ctx->Const.MaxProgramTextureGatherComponents = 4;
524 ctx->Const.MinProgramTextureGatherOffset = -32;
525 ctx->Const.MaxProgramTextureGatherOffset = 31;
526 } else if (brw->gen == 6) {
527 ctx->Const.MaxProgramTextureGatherComponents = 1;
528 ctx->Const.MinProgramTextureGatherOffset = -8;
529 ctx->Const.MaxProgramTextureGatherOffset = 7;
530 }
531
532 ctx->Const.MaxUniformBlockSize = 65536;
533
534 for (int i = 0; i < MESA_SHADER_STAGES; i++) {
535 struct gl_program_constants *prog = &ctx->Const.Program[i];
536
537 if (!stage_exists[i])
538 continue;
539
540 prog->MaxTextureImageUnits = max_samplers;
541
542 prog->MaxUniformBlocks = BRW_MAX_UBO;
543 prog->MaxCombinedUniformComponents =
544 prog->MaxUniformComponents +
545 ctx->Const.MaxUniformBlockSize / 4 * prog->MaxUniformBlocks;
546
547 prog->MaxAtomicCounters = MAX_ATOMIC_COUNTERS;
548 prog->MaxAtomicBuffers = BRW_MAX_ABO;
549 prog->MaxImageUniforms = compiler->scalar_stage[i] ? BRW_MAX_IMAGES : 0;
550 prog->MaxShaderStorageBlocks = BRW_MAX_SSBO;
551 }
552
553 ctx->Const.MaxTextureUnits =
554 MIN2(ctx->Const.MaxTextureCoordUnits,
555 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxTextureImageUnits);
556
557 ctx->Const.MaxUniformBufferBindings = num_stages * BRW_MAX_UBO;
558 ctx->Const.MaxCombinedUniformBlocks = num_stages * BRW_MAX_UBO;
559 ctx->Const.MaxCombinedAtomicBuffers = num_stages * BRW_MAX_ABO;
560 ctx->Const.MaxCombinedShaderStorageBlocks = num_stages * BRW_MAX_SSBO;
561 ctx->Const.MaxShaderStorageBufferBindings = num_stages * BRW_MAX_SSBO;
562 ctx->Const.MaxCombinedTextureImageUnits = num_stages * max_samplers;
563 ctx->Const.MaxCombinedImageUniforms = num_stages * BRW_MAX_IMAGES;
564
565
566 /* Hardware only supports a limited number of transform feedback buffers.
567 * So we need to override the Mesa default (which is based only on software
568 * limits).
569 */
570 ctx->Const.MaxTransformFeedbackBuffers = BRW_MAX_SOL_BUFFERS;
571
572 /* On Gen6, in the worst case, we use up one binding table entry per
573 * transform feedback component (see comments above the definition of
574 * BRW_MAX_SOL_BINDINGS, in brw_context.h), so we need to advertise a value
575 * for MAX_TRANSFORM_FEEDBACK_INTERLEAVED_COMPONENTS equal to
576 * BRW_MAX_SOL_BINDINGS.
577 *
578 * In "separate components" mode, we need to divide this value by
579 * BRW_MAX_SOL_BUFFERS, so that the total number of binding table entries
580 * used up by all buffers will not exceed BRW_MAX_SOL_BINDINGS.
581 */
582 ctx->Const.MaxTransformFeedbackInterleavedComponents = BRW_MAX_SOL_BINDINGS;
583 ctx->Const.MaxTransformFeedbackSeparateComponents =
584 BRW_MAX_SOL_BINDINGS / BRW_MAX_SOL_BUFFERS;
585
586 ctx->Const.AlwaysUseGetTransformFeedbackVertexCount =
587 !can_do_mi_math_and_lrr(brw->screen);
588
589 int max_samples;
590 const int *msaa_modes = intel_supported_msaa_modes(brw->screen);
591 const int clamp_max_samples =
592 driQueryOptioni(&brw->optionCache, "clamp_max_samples");
593
594 if (clamp_max_samples < 0) {
595 max_samples = msaa_modes[0];
596 } else {
597 /* Select the largest supported MSAA mode that does not exceed
598 * clamp_max_samples.
599 */
600 max_samples = 0;
601 for (int i = 0; msaa_modes[i] != 0; ++i) {
602 if (msaa_modes[i] <= clamp_max_samples) {
603 max_samples = msaa_modes[i];
604 break;
605 }
606 }
607 }
608
609 ctx->Const.MaxSamples = max_samples;
610 ctx->Const.MaxColorTextureSamples = max_samples;
611 ctx->Const.MaxDepthTextureSamples = max_samples;
612 ctx->Const.MaxIntegerSamples = max_samples;
613 ctx->Const.MaxImageSamples = 0;
614
615 /* gen6_set_sample_maps() sets SampleMap{2,4,8}x variables which are used
616 * to map indices of rectangular grid to sample numbers within a pixel.
617 * These variables are used by GL_EXT_framebuffer_multisample_blit_scaled
618 * extension implementation. For more details see the comment above
619 * gen6_set_sample_maps() definition.
620 */
621 gen6_set_sample_maps(ctx);
622
623 ctx->Const.MinLineWidth = 1.0;
624 ctx->Const.MinLineWidthAA = 1.0;
625 if (brw->gen >= 6) {
626 ctx->Const.MaxLineWidth = 7.375;
627 ctx->Const.MaxLineWidthAA = 7.375;
628 ctx->Const.LineWidthGranularity = 0.125;
629 } else {
630 ctx->Const.MaxLineWidth = 7.0;
631 ctx->Const.MaxLineWidthAA = 7.0;
632 ctx->Const.LineWidthGranularity = 0.5;
633 }
634
635 /* For non-antialiased lines, we have to round the line width to the
636 * nearest whole number. Make sure that we don't advertise a line
637 * width that, when rounded, will be beyond the actual hardware
638 * maximum.
639 */
640 assert(roundf(ctx->Const.MaxLineWidth) <= ctx->Const.MaxLineWidth);
641
642 ctx->Const.MinPointSize = 1.0;
643 ctx->Const.MinPointSizeAA = 1.0;
644 ctx->Const.MaxPointSize = 255.0;
645 ctx->Const.MaxPointSizeAA = 255.0;
646 ctx->Const.PointSizeGranularity = 1.0;
647
648 if (brw->gen >= 5 || brw->is_g4x)
649 ctx->Const.MaxClipPlanes = 8;
650
651 ctx->Const.GLSLTessLevelsAsInputs = true;
652 ctx->Const.LowerTCSPatchVerticesIn = brw->gen >= 8;
653 ctx->Const.LowerTESPatchVerticesIn = true;
654 ctx->Const.PrimitiveRestartForPatches = true;
655
656 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeInstructions = 16 * 1024;
657 ctx->Const.Program[MESA_SHADER_VERTEX].MaxAluInstructions = 0;
658 ctx->Const.Program[MESA_SHADER_VERTEX].MaxTexInstructions = 0;
659 ctx->Const.Program[MESA_SHADER_VERTEX].MaxTexIndirections = 0;
660 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeAluInstructions = 0;
661 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeTexInstructions = 0;
662 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeTexIndirections = 0;
663 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeAttribs = 16;
664 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeTemps = 256;
665 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeAddressRegs = 1;
666 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeParameters = 1024;
667 ctx->Const.Program[MESA_SHADER_VERTEX].MaxEnvParams =
668 MIN2(ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeParameters,
669 ctx->Const.Program[MESA_SHADER_VERTEX].MaxEnvParams);
670
671 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeInstructions = 1024;
672 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeAluInstructions = 1024;
673 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeTexInstructions = 1024;
674 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeTexIndirections = 1024;
675 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeAttribs = 12;
676 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeTemps = 256;
677 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeAddressRegs = 0;
678 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeParameters = 1024;
679 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxEnvParams =
680 MIN2(ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeParameters,
681 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxEnvParams);
682
683 /* Fragment shaders use real, 32-bit twos-complement integers for all
684 * integer types.
685 */
686 ctx->Const.Program[MESA_SHADER_FRAGMENT].LowInt.RangeMin = 31;
687 ctx->Const.Program[MESA_SHADER_FRAGMENT].LowInt.RangeMax = 30;
688 ctx->Const.Program[MESA_SHADER_FRAGMENT].LowInt.Precision = 0;
689 ctx->Const.Program[MESA_SHADER_FRAGMENT].HighInt = ctx->Const.Program[MESA_SHADER_FRAGMENT].LowInt;
690 ctx->Const.Program[MESA_SHADER_FRAGMENT].MediumInt = ctx->Const.Program[MESA_SHADER_FRAGMENT].LowInt;
691
692 ctx->Const.Program[MESA_SHADER_VERTEX].LowInt.RangeMin = 31;
693 ctx->Const.Program[MESA_SHADER_VERTEX].LowInt.RangeMax = 30;
694 ctx->Const.Program[MESA_SHADER_VERTEX].LowInt.Precision = 0;
695 ctx->Const.Program[MESA_SHADER_VERTEX].HighInt = ctx->Const.Program[MESA_SHADER_VERTEX].LowInt;
696 ctx->Const.Program[MESA_SHADER_VERTEX].MediumInt = ctx->Const.Program[MESA_SHADER_VERTEX].LowInt;
697
698 /* Gen6 converts quads to polygon in beginning of 3D pipeline,
699 * but we're not sure how it's actually done for vertex order,
700 * that affect provoking vertex decision. Always use last vertex
701 * convention for quad primitive which works as expected for now.
702 */
703 if (brw->gen >= 6)
704 ctx->Const.QuadsFollowProvokingVertexConvention = false;
705
706 ctx->Const.NativeIntegers = true;
707 ctx->Const.VertexID_is_zero_based = true;
708
709 /* Regarding the CMP instruction, the Ivybridge PRM says:
710 *
711 * "For each enabled channel 0b or 1b is assigned to the appropriate flag
712 * bit and 0/all zeros or all ones (e.g, byte 0xFF, word 0xFFFF, DWord
713 * 0xFFFFFFFF) is assigned to dst."
714 *
715 * but PRMs for earlier generations say
716 *
717 * "In dword format, one GRF may store up to 8 results. When the register
718 * is used later as a vector of Booleans, as only LSB at each channel
719 * contains meaning [sic] data, software should make sure all higher bits
720 * are masked out (e.g. by 'and-ing' an [sic] 0x01 constant)."
721 *
722 * We select the representation of a true boolean uniform to be ~0, and fix
723 * the results of Gen <= 5 CMP instruction's with -(result & 1).
724 */
725 ctx->Const.UniformBooleanTrue = ~0;
726
727 /* From the gen4 PRM, volume 4 page 127:
728 *
729 * "For SURFTYPE_BUFFER non-rendertarget surfaces, this field specifies
730 * the base address of the first element of the surface, computed in
731 * software by adding the surface base address to the byte offset of
732 * the element in the buffer."
733 *
734 * However, unaligned accesses are slower, so enforce buffer alignment.
735 */
736 ctx->Const.UniformBufferOffsetAlignment = 16;
737
738 /* ShaderStorageBufferOffsetAlignment should be a cacheline (64 bytes) so
739 * that we can safely have the CPU and GPU writing the same SSBO on
740 * non-cachecoherent systems (our Atom CPUs). With UBOs, the GPU never
741 * writes, so there's no problem. For an SSBO, the GPU and the CPU can
742 * be updating disjoint regions of the buffer simultaneously and that will
743 * break if the regions overlap the same cacheline.
744 */
745 ctx->Const.ShaderStorageBufferOffsetAlignment = 64;
746 ctx->Const.TextureBufferOffsetAlignment = 16;
747 ctx->Const.MaxTextureBufferSize = 128 * 1024 * 1024;
748
749 if (brw->gen >= 6) {
750 ctx->Const.MaxVarying = 32;
751 ctx->Const.Program[MESA_SHADER_VERTEX].MaxOutputComponents = 128;
752 ctx->Const.Program[MESA_SHADER_GEOMETRY].MaxInputComponents = 64;
753 ctx->Const.Program[MESA_SHADER_GEOMETRY].MaxOutputComponents = 128;
754 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxInputComponents = 128;
755 ctx->Const.Program[MESA_SHADER_TESS_CTRL].MaxInputComponents = 128;
756 ctx->Const.Program[MESA_SHADER_TESS_CTRL].MaxOutputComponents = 128;
757 ctx->Const.Program[MESA_SHADER_TESS_EVAL].MaxInputComponents = 128;
758 ctx->Const.Program[MESA_SHADER_TESS_EVAL].MaxOutputComponents = 128;
759 }
760
761 /* We want the GLSL compiler to emit code that uses condition codes */
762 for (int i = 0; i < MESA_SHADER_STAGES; i++) {
763 ctx->Const.ShaderCompilerOptions[i] =
764 brw->screen->compiler->glsl_compiler_options[i];
765 }
766
767 if (brw->gen >= 7) {
768 ctx->Const.MaxViewportWidth = 32768;
769 ctx->Const.MaxViewportHeight = 32768;
770 }
771
772 /* ARB_viewport_array, OES_viewport_array */
773 if (brw->gen >= 6) {
774 ctx->Const.MaxViewports = GEN6_NUM_VIEWPORTS;
775 ctx->Const.ViewportSubpixelBits = 0;
776
777 /* Cast to float before negating because MaxViewportWidth is unsigned.
778 */
779 ctx->Const.ViewportBounds.Min = -(float)ctx->Const.MaxViewportWidth;
780 ctx->Const.ViewportBounds.Max = ctx->Const.MaxViewportWidth;
781 }
782
783 /* ARB_gpu_shader5 */
784 if (brw->gen >= 7)
785 ctx->Const.MaxVertexStreams = MIN2(4, MAX_VERTEX_STREAMS);
786
787 /* ARB_framebuffer_no_attachments */
788 ctx->Const.MaxFramebufferWidth = 16384;
789 ctx->Const.MaxFramebufferHeight = 16384;
790 ctx->Const.MaxFramebufferLayers = ctx->Const.MaxArrayTextureLayers;
791 ctx->Const.MaxFramebufferSamples = max_samples;
792
793 /* OES_primitive_bounding_box */
794 ctx->Const.NoPrimitiveBoundingBoxOutput = true;
795 }
796
797 static void
798 brw_initialize_cs_context_constants(struct brw_context *brw)
799 {
800 struct gl_context *ctx = &brw->ctx;
801 const struct intel_screen *screen = brw->screen;
802 struct gen_device_info *devinfo = &brw->screen->devinfo;
803
804 /* FINISHME: Do this for all platforms that the kernel supports */
805 if (brw->is_cherryview &&
806 screen->subslice_total > 0 && screen->eu_total > 0) {
807 /* Logical CS threads = EUs per subslice * 7 threads per EU */
808 uint32_t max_cs_threads = screen->eu_total / screen->subslice_total * 7;
809
810 /* Fuse configurations may give more threads than expected, never less. */
811 if (max_cs_threads > devinfo->max_cs_threads)
812 devinfo->max_cs_threads = max_cs_threads;
813 }
814
815 /* Maximum number of scalar compute shader invocations that can be run in
816 * parallel in the same subslice assuming SIMD32 dispatch.
817 *
818 * We don't advertise more than 64 threads, because we are limited to 64 by
819 * our usage of thread_width_max in the gpgpu walker command. This only
820 * currently impacts Haswell, which otherwise might be able to advertise 70
821 * threads. With SIMD32 and 64 threads, Haswell still provides twice the
822 * required the number of invocation needed for ARB_compute_shader.
823 */
824 const unsigned max_threads = MIN2(64, devinfo->max_cs_threads);
825 const uint32_t max_invocations = 32 * max_threads;
826 ctx->Const.MaxComputeWorkGroupSize[0] = max_invocations;
827 ctx->Const.MaxComputeWorkGroupSize[1] = max_invocations;
828 ctx->Const.MaxComputeWorkGroupSize[2] = max_invocations;
829 ctx->Const.MaxComputeWorkGroupInvocations = max_invocations;
830 ctx->Const.MaxComputeSharedMemorySize = 64 * 1024;
831 }
832
833 /**
834 * Process driconf (drirc) options, setting appropriate context flags.
835 *
836 * intelInitExtensions still pokes at optionCache directly, in order to
837 * avoid advertising various extensions. No flags are set, so it makes
838 * sense to continue doing that there.
839 */
840 static void
841 brw_process_driconf_options(struct brw_context *brw)
842 {
843 struct gl_context *ctx = &brw->ctx;
844
845 driOptionCache *options = &brw->optionCache;
846 driParseConfigFiles(options, &brw->screen->optionCache,
847 brw->driContext->driScreenPriv->myNum, "i965");
848
849 int bo_reuse_mode = driQueryOptioni(options, "bo_reuse");
850 switch (bo_reuse_mode) {
851 case DRI_CONF_BO_REUSE_DISABLED:
852 break;
853 case DRI_CONF_BO_REUSE_ALL:
854 brw_bufmgr_enable_reuse(brw->bufmgr);
855 break;
856 }
857
858 if (INTEL_DEBUG & DEBUG_NO_HIZ) {
859 brw->has_hiz = false;
860 /* On gen6, you can only do separate stencil with HIZ. */
861 if (brw->gen == 6)
862 brw->has_separate_stencil = false;
863 }
864
865 if (driQueryOptionb(options, "always_flush_batch")) {
866 fprintf(stderr, "flushing batchbuffer before/after each draw call\n");
867 brw->always_flush_batch = true;
868 }
869
870 if (driQueryOptionb(options, "always_flush_cache")) {
871 fprintf(stderr, "flushing GPU caches before/after each draw call\n");
872 brw->always_flush_cache = true;
873 }
874
875 if (driQueryOptionb(options, "disable_throttling")) {
876 fprintf(stderr, "disabling flush throttling\n");
877 brw->disable_throttling = true;
878 }
879
880 brw->precompile = driQueryOptionb(&brw->optionCache, "shader_precompile");
881
882 if (driQueryOptionb(&brw->optionCache, "precise_trig"))
883 brw->screen->compiler->precise_trig = true;
884
885 ctx->Const.ForceGLSLExtensionsWarn =
886 driQueryOptionb(options, "force_glsl_extensions_warn");
887
888 ctx->Const.ForceGLSLVersion =
889 driQueryOptioni(options, "force_glsl_version");
890
891 ctx->Const.DisableGLSLLineContinuations =
892 driQueryOptionb(options, "disable_glsl_line_continuations");
893
894 ctx->Const.AllowGLSLExtensionDirectiveMidShader =
895 driQueryOptionb(options, "allow_glsl_extension_directive_midshader");
896
897 ctx->Const.AllowGLSLBuiltinVariableRedeclaration =
898 driQueryOptionb(options, "allow_glsl_builtin_variable_redeclaration");
899
900 ctx->Const.AllowHigherCompatVersion =
901 driQueryOptionb(options, "allow_higher_compat_version");
902
903 ctx->Const.ForceGLSLAbsSqrt =
904 driQueryOptionb(options, "force_glsl_abs_sqrt");
905
906 ctx->Const.GLSLZeroInit = driQueryOptionb(options, "glsl_zero_init");
907
908 brw->dual_color_blend_by_location =
909 driQueryOptionb(options, "dual_color_blend_by_location");
910 }
911
912 GLboolean
913 brwCreateContext(gl_api api,
914 const struct gl_config *mesaVis,
915 __DRIcontext *driContextPriv,
916 unsigned major_version,
917 unsigned minor_version,
918 uint32_t flags,
919 bool notify_reset,
920 unsigned *dri_ctx_error,
921 void *sharedContextPrivate)
922 {
923 struct gl_context *shareCtx = (struct gl_context *) sharedContextPrivate;
924 struct intel_screen *screen = driContextPriv->driScreenPriv->driverPrivate;
925 const struct gen_device_info *devinfo = &screen->devinfo;
926 struct dd_function_table functions;
927
928 /* Only allow the __DRI_CTX_FLAG_ROBUST_BUFFER_ACCESS flag if the kernel
929 * provides us with context reset notifications.
930 */
931 uint32_t allowed_flags = __DRI_CTX_FLAG_DEBUG
932 | __DRI_CTX_FLAG_FORWARD_COMPATIBLE;
933
934 if (screen->has_context_reset_notification)
935 allowed_flags |= __DRI_CTX_FLAG_ROBUST_BUFFER_ACCESS;
936
937 if (flags & ~allowed_flags) {
938 *dri_ctx_error = __DRI_CTX_ERROR_UNKNOWN_FLAG;
939 return false;
940 }
941
942 struct brw_context *brw = rzalloc(NULL, struct brw_context);
943 if (!brw) {
944 fprintf(stderr, "%s: failed to alloc context\n", __func__);
945 *dri_ctx_error = __DRI_CTX_ERROR_NO_MEMORY;
946 return false;
947 }
948
949 driContextPriv->driverPrivate = brw;
950 brw->driContext = driContextPriv;
951 brw->screen = screen;
952 brw->bufmgr = screen->bufmgr;
953
954 brw->gen = devinfo->gen;
955 brw->gt = devinfo->gt;
956 brw->is_g4x = devinfo->is_g4x;
957 brw->is_baytrail = devinfo->is_baytrail;
958 brw->is_haswell = devinfo->is_haswell;
959 brw->is_cherryview = devinfo->is_cherryview;
960 brw->is_broxton = devinfo->is_broxton || devinfo->is_geminilake;
961 brw->has_llc = devinfo->has_llc;
962 brw->has_hiz = devinfo->has_hiz_and_separate_stencil;
963 brw->has_separate_stencil = devinfo->has_hiz_and_separate_stencil;
964 brw->has_pln = devinfo->has_pln;
965 brw->has_compr4 = devinfo->has_compr4;
966 brw->has_surface_tile_offset = devinfo->has_surface_tile_offset;
967 brw->has_negative_rhw_bug = devinfo->has_negative_rhw_bug;
968 brw->needs_unlit_centroid_workaround =
969 devinfo->needs_unlit_centroid_workaround;
970
971 brw->must_use_separate_stencil = devinfo->must_use_separate_stencil;
972 brw->has_swizzling = screen->hw_has_swizzling;
973
974 isl_device_init(&brw->isl_dev, devinfo, screen->hw_has_swizzling);
975
976 brw->vs.base.stage = MESA_SHADER_VERTEX;
977 brw->tcs.base.stage = MESA_SHADER_TESS_CTRL;
978 brw->tes.base.stage = MESA_SHADER_TESS_EVAL;
979 brw->gs.base.stage = MESA_SHADER_GEOMETRY;
980 brw->wm.base.stage = MESA_SHADER_FRAGMENT;
981 if (brw->gen >= 8) {
982 gen8_init_vtable_surface_functions(brw);
983 brw->vtbl.emit_depth_stencil_hiz = gen8_emit_depth_stencil_hiz;
984 } else if (brw->gen >= 7) {
985 gen7_init_vtable_surface_functions(brw);
986 brw->vtbl.emit_depth_stencil_hiz = gen7_emit_depth_stencil_hiz;
987 } else if (brw->gen >= 6) {
988 gen6_init_vtable_surface_functions(brw);
989 brw->vtbl.emit_depth_stencil_hiz = gen6_emit_depth_stencil_hiz;
990 } else {
991 gen4_init_vtable_surface_functions(brw);
992 brw->vtbl.emit_depth_stencil_hiz = brw_emit_depth_stencil_hiz;
993 }
994
995 brw_init_driver_functions(brw, &functions);
996
997 if (notify_reset)
998 functions.GetGraphicsResetStatus = brw_get_graphics_reset_status;
999
1000 struct gl_context *ctx = &brw->ctx;
1001
1002 if (!_mesa_initialize_context(ctx, api, mesaVis, shareCtx, &functions)) {
1003 *dri_ctx_error = __DRI_CTX_ERROR_NO_MEMORY;
1004 fprintf(stderr, "%s: failed to init mesa context\n", __func__);
1005 intelDestroyContext(driContextPriv);
1006 return false;
1007 }
1008
1009 driContextSetFlags(ctx, flags);
1010
1011 /* Initialize the software rasterizer and helper modules.
1012 *
1013 * As of GL 3.1 core, the gen4+ driver doesn't need the swrast context for
1014 * software fallbacks (which we have to support on legacy GL to do weird
1015 * glDrawPixels(), glBitmap(), and other functions).
1016 */
1017 if (api != API_OPENGL_CORE && api != API_OPENGLES2) {
1018 _swrast_CreateContext(ctx);
1019 }
1020
1021 _vbo_CreateContext(ctx);
1022 if (ctx->swrast_context) {
1023 _tnl_CreateContext(ctx);
1024 TNL_CONTEXT(ctx)->Driver.RunPipeline = _tnl_run_pipeline;
1025 _swsetup_CreateContext(ctx);
1026
1027 /* Configure swrast to match hardware characteristics: */
1028 _swrast_allow_pixel_fog(ctx, false);
1029 _swrast_allow_vertex_fog(ctx, true);
1030 }
1031
1032 _mesa_meta_init(ctx);
1033
1034 brw_process_driconf_options(brw);
1035
1036 if (INTEL_DEBUG & DEBUG_PERF)
1037 brw->perf_debug = true;
1038
1039 brw_initialize_cs_context_constants(brw);
1040 brw_initialize_context_constants(brw);
1041
1042 ctx->Const.ResetStrategy = notify_reset
1043 ? GL_LOSE_CONTEXT_ON_RESET_ARB : GL_NO_RESET_NOTIFICATION_ARB;
1044
1045 /* Reinitialize the context point state. It depends on ctx->Const values. */
1046 _mesa_init_point(ctx);
1047
1048 intel_fbo_init(brw);
1049
1050 intel_batchbuffer_init(&brw->batch, brw->bufmgr, brw->has_llc);
1051
1052 if (brw->gen >= 6) {
1053 /* Create a new hardware context. Using a hardware context means that
1054 * our GPU state will be saved/restored on context switch, allowing us
1055 * to assume that the GPU is in the same state we left it in.
1056 *
1057 * This is required for transform feedback buffer offsets, query objects,
1058 * and also allows us to reduce how much state we have to emit.
1059 */
1060 brw->hw_ctx = brw_create_hw_context(brw->bufmgr);
1061
1062 if (!brw->hw_ctx) {
1063 fprintf(stderr, "Failed to create hardware context.\n");
1064 intelDestroyContext(driContextPriv);
1065 return false;
1066 }
1067 }
1068
1069 if (brw_init_pipe_control(brw, devinfo)) {
1070 *dri_ctx_error = __DRI_CTX_ERROR_NO_MEMORY;
1071 intelDestroyContext(driContextPriv);
1072 return false;
1073 }
1074
1075 brw_init_state(brw);
1076
1077 intelInitExtensions(ctx);
1078
1079 brw_init_surface_formats(brw);
1080
1081 brw_blorp_init(brw);
1082
1083 brw->urb.size = devinfo->urb.size;
1084
1085 if (brw->gen == 6)
1086 brw->urb.gs_present = false;
1087
1088 brw->prim_restart.in_progress = false;
1089 brw->prim_restart.enable_cut_index = false;
1090 brw->gs.enabled = false;
1091 brw->clip.viewport_count = 1;
1092
1093 brw->predicate.state = BRW_PREDICATE_STATE_RENDER;
1094
1095 brw->max_gtt_map_object_size = screen->max_gtt_map_object_size;
1096
1097 ctx->VertexProgram._MaintainTnlProgram = true;
1098 ctx->FragmentProgram._MaintainTexEnvProgram = true;
1099
1100 brw_draw_init( brw );
1101
1102 if ((flags & __DRI_CTX_FLAG_DEBUG) != 0) {
1103 /* Turn on some extra GL_ARB_debug_output generation. */
1104 brw->perf_debug = true;
1105 }
1106
1107 if ((flags & __DRI_CTX_FLAG_ROBUST_BUFFER_ACCESS) != 0) {
1108 ctx->Const.ContextFlags |= GL_CONTEXT_FLAG_ROBUST_ACCESS_BIT_ARB;
1109 ctx->Const.RobustAccess = GL_TRUE;
1110 }
1111
1112 if (INTEL_DEBUG & DEBUG_SHADER_TIME)
1113 brw_init_shader_time(brw);
1114
1115 _mesa_compute_version(ctx);
1116
1117 _mesa_initialize_dispatch_tables(ctx);
1118 _mesa_initialize_vbo_vtxfmt(ctx);
1119
1120 if (ctx->Extensions.INTEL_performance_query)
1121 brw_init_performance_queries(brw);
1122
1123 vbo_use_buffer_objects(ctx);
1124 vbo_always_unmap_buffers(ctx);
1125
1126 return true;
1127 }
1128
1129 void
1130 intelDestroyContext(__DRIcontext * driContextPriv)
1131 {
1132 struct brw_context *brw =
1133 (struct brw_context *) driContextPriv->driverPrivate;
1134 struct gl_context *ctx = &brw->ctx;
1135
1136 _mesa_meta_free(&brw->ctx);
1137
1138 if (INTEL_DEBUG & DEBUG_SHADER_TIME) {
1139 /* Force a report. */
1140 brw->shader_time.report_time = 0;
1141
1142 brw_collect_and_report_shader_time(brw);
1143 brw_destroy_shader_time(brw);
1144 }
1145
1146 if (brw->gen >= 6)
1147 blorp_finish(&brw->blorp);
1148
1149 brw_destroy_state(brw);
1150 brw_draw_destroy(brw);
1151
1152 brw_bo_unreference(brw->curbe.curbe_bo);
1153 if (brw->vs.base.scratch_bo)
1154 brw_bo_unreference(brw->vs.base.scratch_bo);
1155 if (brw->tcs.base.scratch_bo)
1156 brw_bo_unreference(brw->tcs.base.scratch_bo);
1157 if (brw->tes.base.scratch_bo)
1158 brw_bo_unreference(brw->tes.base.scratch_bo);
1159 if (brw->gs.base.scratch_bo)
1160 brw_bo_unreference(brw->gs.base.scratch_bo);
1161 if (brw->wm.base.scratch_bo)
1162 brw_bo_unreference(brw->wm.base.scratch_bo);
1163
1164 brw_destroy_hw_context(brw->bufmgr, brw->hw_ctx);
1165
1166 if (ctx->swrast_context) {
1167 _swsetup_DestroyContext(&brw->ctx);
1168 _tnl_DestroyContext(&brw->ctx);
1169 }
1170 _vbo_DestroyContext(&brw->ctx);
1171
1172 if (ctx->swrast_context)
1173 _swrast_DestroyContext(&brw->ctx);
1174
1175 brw_fini_pipe_control(brw);
1176 intel_batchbuffer_free(&brw->batch);
1177
1178 brw_bo_unreference(brw->throttle_batch[1]);
1179 brw_bo_unreference(brw->throttle_batch[0]);
1180 brw->throttle_batch[1] = NULL;
1181 brw->throttle_batch[0] = NULL;
1182
1183 driDestroyOptionCache(&brw->optionCache);
1184
1185 /* free the Mesa context */
1186 _mesa_free_context_data(&brw->ctx);
1187
1188 ralloc_free(brw);
1189 driContextPriv->driverPrivate = NULL;
1190 }
1191
1192 GLboolean
1193 intelUnbindContext(__DRIcontext * driContextPriv)
1194 {
1195 /* Unset current context and dispath table */
1196 _mesa_make_current(NULL, NULL, NULL);
1197
1198 return true;
1199 }
1200
1201 /**
1202 * Fixes up the context for GLES23 with our default-to-sRGB-capable behavior
1203 * on window system framebuffers.
1204 *
1205 * Desktop GL is fairly reasonable in its handling of sRGB: You can ask if
1206 * your renderbuffer can do sRGB encode, and you can flip a switch that does
1207 * sRGB encode if the renderbuffer can handle it. You can ask specifically
1208 * for a visual where you're guaranteed to be capable, but it turns out that
1209 * everyone just makes all their ARGB8888 visuals capable and doesn't offer
1210 * incapable ones, because there's no difference between the two in resources
1211 * used. Applications thus get built that accidentally rely on the default
1212 * visual choice being sRGB, so we make ours sRGB capable. Everything sounds
1213 * great...
1214 *
1215 * But for GLES2/3, they decided that it was silly to not turn on sRGB encode
1216 * for sRGB renderbuffers you made with the GL_EXT_texture_sRGB equivalent.
1217 * So they removed the enable knob and made it "if the renderbuffer is sRGB
1218 * capable, do sRGB encode". Then, for your window system renderbuffers, you
1219 * can ask for sRGB visuals and get sRGB encode, or not ask for sRGB visuals
1220 * and get no sRGB encode (assuming that both kinds of visual are available).
1221 * Thus our choice to support sRGB by default on our visuals for desktop would
1222 * result in broken rendering of GLES apps that aren't expecting sRGB encode.
1223 *
1224 * Unfortunately, renderbuffer setup happens before a context is created. So
1225 * in intel_screen.c we always set up sRGB, and here, if you're a GLES2/3
1226 * context (without an sRGB visual, though we don't have sRGB visuals exposed
1227 * yet), we go turn that back off before anyone finds out.
1228 */
1229 static void
1230 intel_gles3_srgb_workaround(struct brw_context *brw,
1231 struct gl_framebuffer *fb)
1232 {
1233 struct gl_context *ctx = &brw->ctx;
1234
1235 if (_mesa_is_desktop_gl(ctx) || !fb->Visual.sRGBCapable)
1236 return;
1237
1238 /* Some day when we support the sRGB capable bit on visuals available for
1239 * GLES, we'll need to respect that and not disable things here.
1240 */
1241 fb->Visual.sRGBCapable = false;
1242 for (int i = 0; i < BUFFER_COUNT; i++) {
1243 struct gl_renderbuffer *rb = fb->Attachment[i].Renderbuffer;
1244 if (rb)
1245 rb->Format = _mesa_get_srgb_format_linear(rb->Format);
1246 }
1247 }
1248
1249 GLboolean
1250 intelMakeCurrent(__DRIcontext * driContextPriv,
1251 __DRIdrawable * driDrawPriv,
1252 __DRIdrawable * driReadPriv)
1253 {
1254 struct brw_context *brw;
1255 GET_CURRENT_CONTEXT(curCtx);
1256
1257 if (driContextPriv)
1258 brw = (struct brw_context *) driContextPriv->driverPrivate;
1259 else
1260 brw = NULL;
1261
1262 /* According to the glXMakeCurrent() man page: "Pending commands to
1263 * the previous context, if any, are flushed before it is released."
1264 * But only flush if we're actually changing contexts.
1265 */
1266 if (brw_context(curCtx) && brw_context(curCtx) != brw) {
1267 _mesa_flush(curCtx);
1268 }
1269
1270 if (driContextPriv) {
1271 struct gl_context *ctx = &brw->ctx;
1272 struct gl_framebuffer *fb, *readFb;
1273
1274 if (driDrawPriv == NULL) {
1275 fb = _mesa_get_incomplete_framebuffer();
1276 } else {
1277 fb = driDrawPriv->driverPrivate;
1278 driContextPriv->dri2.draw_stamp = driDrawPriv->dri2.stamp - 1;
1279 }
1280
1281 if (driReadPriv == NULL) {
1282 readFb = _mesa_get_incomplete_framebuffer();
1283 } else {
1284 readFb = driReadPriv->driverPrivate;
1285 driContextPriv->dri2.read_stamp = driReadPriv->dri2.stamp - 1;
1286 }
1287
1288 /* The sRGB workaround changes the renderbuffer's format. We must change
1289 * the format before the renderbuffer's miptree get's allocated, otherwise
1290 * the formats of the renderbuffer and its miptree will differ.
1291 */
1292 intel_gles3_srgb_workaround(brw, fb);
1293 intel_gles3_srgb_workaround(brw, readFb);
1294
1295 /* If the context viewport hasn't been initialized, force a call out to
1296 * the loader to get buffers so we have a drawable size for the initial
1297 * viewport. */
1298 if (!brw->ctx.ViewportInitialized)
1299 intel_prepare_render(brw);
1300
1301 _mesa_make_current(ctx, fb, readFb);
1302 } else {
1303 _mesa_make_current(NULL, NULL, NULL);
1304 }
1305
1306 return true;
1307 }
1308
1309 void
1310 intel_resolve_for_dri2_flush(struct brw_context *brw,
1311 __DRIdrawable *drawable)
1312 {
1313 if (brw->gen < 6) {
1314 /* MSAA and fast color clear are not supported, so don't waste time
1315 * checking whether a resolve is needed.
1316 */
1317 return;
1318 }
1319
1320 struct gl_framebuffer *fb = drawable->driverPrivate;
1321 struct intel_renderbuffer *rb;
1322
1323 /* Usually, only the back buffer will need to be downsampled. However,
1324 * the front buffer will also need it if the user has rendered into it.
1325 */
1326 static const gl_buffer_index buffers[2] = {
1327 BUFFER_BACK_LEFT,
1328 BUFFER_FRONT_LEFT,
1329 };
1330
1331 for (int i = 0; i < 2; ++i) {
1332 rb = intel_get_renderbuffer(fb, buffers[i]);
1333 if (rb == NULL || rb->mt == NULL)
1334 continue;
1335 if (rb->mt->num_samples <= 1) {
1336 assert(rb->mt_layer == 0 && rb->mt_level == 0 &&
1337 rb->layer_count == 1);
1338 intel_miptree_prepare_access(brw, rb->mt, 0, 1, 0, 1, false, false);
1339 } else {
1340 intel_renderbuffer_downsample(brw, rb);
1341 }
1342 }
1343 }
1344
1345 static unsigned
1346 intel_bits_per_pixel(const struct intel_renderbuffer *rb)
1347 {
1348 return _mesa_get_format_bytes(intel_rb_format(rb)) * 8;
1349 }
1350
1351 static void
1352 intel_query_dri2_buffers(struct brw_context *brw,
1353 __DRIdrawable *drawable,
1354 __DRIbuffer **buffers,
1355 int *count);
1356
1357 static void
1358 intel_process_dri2_buffer(struct brw_context *brw,
1359 __DRIdrawable *drawable,
1360 __DRIbuffer *buffer,
1361 struct intel_renderbuffer *rb,
1362 const char *buffer_name);
1363
1364 static void
1365 intel_update_image_buffers(struct brw_context *brw, __DRIdrawable *drawable);
1366
1367 static void
1368 intel_update_dri2_buffers(struct brw_context *brw, __DRIdrawable *drawable)
1369 {
1370 struct gl_framebuffer *fb = drawable->driverPrivate;
1371 struct intel_renderbuffer *rb;
1372 __DRIbuffer *buffers = NULL;
1373 int count;
1374 const char *region_name;
1375
1376 /* Set this up front, so that in case our buffers get invalidated
1377 * while we're getting new buffers, we don't clobber the stamp and
1378 * thus ignore the invalidate. */
1379 drawable->lastStamp = drawable->dri2.stamp;
1380
1381 if (unlikely(INTEL_DEBUG & DEBUG_DRI))
1382 fprintf(stderr, "enter %s, drawable %p\n", __func__, drawable);
1383
1384 intel_query_dri2_buffers(brw, drawable, &buffers, &count);
1385
1386 if (buffers == NULL)
1387 return;
1388
1389 for (int i = 0; i < count; i++) {
1390 switch (buffers[i].attachment) {
1391 case __DRI_BUFFER_FRONT_LEFT:
1392 rb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT);
1393 region_name = "dri2 front buffer";
1394 break;
1395
1396 case __DRI_BUFFER_FAKE_FRONT_LEFT:
1397 rb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT);
1398 region_name = "dri2 fake front buffer";
1399 break;
1400
1401 case __DRI_BUFFER_BACK_LEFT:
1402 rb = intel_get_renderbuffer(fb, BUFFER_BACK_LEFT);
1403 region_name = "dri2 back buffer";
1404 break;
1405
1406 case __DRI_BUFFER_DEPTH:
1407 case __DRI_BUFFER_HIZ:
1408 case __DRI_BUFFER_DEPTH_STENCIL:
1409 case __DRI_BUFFER_STENCIL:
1410 case __DRI_BUFFER_ACCUM:
1411 default:
1412 fprintf(stderr,
1413 "unhandled buffer attach event, attachment type %d\n",
1414 buffers[i].attachment);
1415 return;
1416 }
1417
1418 intel_process_dri2_buffer(brw, drawable, &buffers[i], rb, region_name);
1419 }
1420
1421 }
1422
1423 void
1424 intel_update_renderbuffers(__DRIcontext *context, __DRIdrawable *drawable)
1425 {
1426 struct brw_context *brw = context->driverPrivate;
1427 __DRIscreen *dri_screen = brw->screen->driScrnPriv;
1428
1429 /* Set this up front, so that in case our buffers get invalidated
1430 * while we're getting new buffers, we don't clobber the stamp and
1431 * thus ignore the invalidate. */
1432 drawable->lastStamp = drawable->dri2.stamp;
1433
1434 if (unlikely(INTEL_DEBUG & DEBUG_DRI))
1435 fprintf(stderr, "enter %s, drawable %p\n", __func__, drawable);
1436
1437 if (dri_screen->image.loader)
1438 intel_update_image_buffers(brw, drawable);
1439 else
1440 intel_update_dri2_buffers(brw, drawable);
1441
1442 driUpdateFramebufferSize(&brw->ctx, drawable);
1443 }
1444
1445 /**
1446 * intel_prepare_render should be called anywhere that curent read/drawbuffer
1447 * state is required.
1448 */
1449 void
1450 intel_prepare_render(struct brw_context *brw)
1451 {
1452 struct gl_context *ctx = &brw->ctx;
1453 __DRIcontext *driContext = brw->driContext;
1454 __DRIdrawable *drawable;
1455
1456 drawable = driContext->driDrawablePriv;
1457 if (drawable && drawable->dri2.stamp != driContext->dri2.draw_stamp) {
1458 if (drawable->lastStamp != drawable->dri2.stamp)
1459 intel_update_renderbuffers(driContext, drawable);
1460 driContext->dri2.draw_stamp = drawable->dri2.stamp;
1461 }
1462
1463 drawable = driContext->driReadablePriv;
1464 if (drawable && drawable->dri2.stamp != driContext->dri2.read_stamp) {
1465 if (drawable->lastStamp != drawable->dri2.stamp)
1466 intel_update_renderbuffers(driContext, drawable);
1467 driContext->dri2.read_stamp = drawable->dri2.stamp;
1468 }
1469
1470 /* If we're currently rendering to the front buffer, the rendering
1471 * that will happen next will probably dirty the front buffer. So
1472 * mark it as dirty here.
1473 */
1474 if (_mesa_is_front_buffer_drawing(ctx->DrawBuffer))
1475 brw->front_buffer_dirty = true;
1476 }
1477
1478 /**
1479 * \brief Query DRI2 to obtain a DRIdrawable's buffers.
1480 *
1481 * To determine which DRI buffers to request, examine the renderbuffers
1482 * attached to the drawable's framebuffer. Then request the buffers with
1483 * DRI2GetBuffers() or DRI2GetBuffersWithFormat().
1484 *
1485 * This is called from intel_update_renderbuffers().
1486 *
1487 * \param drawable Drawable whose buffers are queried.
1488 * \param buffers [out] List of buffers returned by DRI2 query.
1489 * \param buffer_count [out] Number of buffers returned.
1490 *
1491 * \see intel_update_renderbuffers()
1492 * \see DRI2GetBuffers()
1493 * \see DRI2GetBuffersWithFormat()
1494 */
1495 static void
1496 intel_query_dri2_buffers(struct brw_context *brw,
1497 __DRIdrawable *drawable,
1498 __DRIbuffer **buffers,
1499 int *buffer_count)
1500 {
1501 __DRIscreen *dri_screen = brw->screen->driScrnPriv;
1502 struct gl_framebuffer *fb = drawable->driverPrivate;
1503 int i = 0;
1504 unsigned attachments[8];
1505
1506 struct intel_renderbuffer *front_rb;
1507 struct intel_renderbuffer *back_rb;
1508
1509 front_rb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT);
1510 back_rb = intel_get_renderbuffer(fb, BUFFER_BACK_LEFT);
1511
1512 memset(attachments, 0, sizeof(attachments));
1513 if ((_mesa_is_front_buffer_drawing(fb) ||
1514 _mesa_is_front_buffer_reading(fb) ||
1515 !back_rb) && front_rb) {
1516 /* If a fake front buffer is in use, then querying for
1517 * __DRI_BUFFER_FRONT_LEFT will cause the server to copy the image from
1518 * the real front buffer to the fake front buffer. So before doing the
1519 * query, we need to make sure all the pending drawing has landed in the
1520 * real front buffer.
1521 */
1522 intel_batchbuffer_flush(brw);
1523 intel_flush_front(&brw->ctx);
1524
1525 attachments[i++] = __DRI_BUFFER_FRONT_LEFT;
1526 attachments[i++] = intel_bits_per_pixel(front_rb);
1527 } else if (front_rb && brw->front_buffer_dirty) {
1528 /* We have pending front buffer rendering, but we aren't querying for a
1529 * front buffer. If the front buffer we have is a fake front buffer,
1530 * the X server is going to throw it away when it processes the query.
1531 * So before doing the query, make sure all the pending drawing has
1532 * landed in the real front buffer.
1533 */
1534 intel_batchbuffer_flush(brw);
1535 intel_flush_front(&brw->ctx);
1536 }
1537
1538 if (back_rb) {
1539 attachments[i++] = __DRI_BUFFER_BACK_LEFT;
1540 attachments[i++] = intel_bits_per_pixel(back_rb);
1541 }
1542
1543 assert(i <= ARRAY_SIZE(attachments));
1544
1545 *buffers =
1546 dri_screen->dri2.loader->getBuffersWithFormat(drawable,
1547 &drawable->w,
1548 &drawable->h,
1549 attachments, i / 2,
1550 buffer_count,
1551 drawable->loaderPrivate);
1552 }
1553
1554 /**
1555 * \brief Assign a DRI buffer's DRM region to a renderbuffer.
1556 *
1557 * This is called from intel_update_renderbuffers().
1558 *
1559 * \par Note:
1560 * DRI buffers whose attachment point is DRI2BufferStencil or
1561 * DRI2BufferDepthStencil are handled as special cases.
1562 *
1563 * \param buffer_name is a human readable name, such as "dri2 front buffer",
1564 * that is passed to brw_bo_gem_create_from_name().
1565 *
1566 * \see intel_update_renderbuffers()
1567 */
1568 static void
1569 intel_process_dri2_buffer(struct brw_context *brw,
1570 __DRIdrawable *drawable,
1571 __DRIbuffer *buffer,
1572 struct intel_renderbuffer *rb,
1573 const char *buffer_name)
1574 {
1575 struct gl_framebuffer *fb = drawable->driverPrivate;
1576 struct brw_bo *bo;
1577
1578 if (!rb)
1579 return;
1580
1581 unsigned num_samples = rb->Base.Base.NumSamples;
1582
1583 /* We try to avoid closing and reopening the same BO name, because the first
1584 * use of a mapping of the buffer involves a bunch of page faulting which is
1585 * moderately expensive.
1586 */
1587 struct intel_mipmap_tree *last_mt;
1588 if (num_samples == 0)
1589 last_mt = rb->mt;
1590 else
1591 last_mt = rb->singlesample_mt;
1592
1593 uint32_t old_name = 0;
1594 if (last_mt) {
1595 /* The bo already has a name because the miptree was created by a
1596 * previous call to intel_process_dri2_buffer(). If a bo already has a
1597 * name, then brw_bo_flink() is a low-cost getter. It does not
1598 * create a new name.
1599 */
1600 brw_bo_flink(last_mt->bo, &old_name);
1601 }
1602
1603 if (old_name == buffer->name)
1604 return;
1605
1606 if (unlikely(INTEL_DEBUG & DEBUG_DRI)) {
1607 fprintf(stderr,
1608 "attaching buffer %d, at %d, cpp %d, pitch %d\n",
1609 buffer->name, buffer->attachment,
1610 buffer->cpp, buffer->pitch);
1611 }
1612
1613 bo = brw_bo_gem_create_from_name(brw->bufmgr, buffer_name,
1614 buffer->name);
1615 if (!bo) {
1616 fprintf(stderr,
1617 "Failed to open BO for returned DRI2 buffer "
1618 "(%dx%d, %s, named %d).\n"
1619 "This is likely a bug in the X Server that will lead to a "
1620 "crash soon.\n",
1621 drawable->w, drawable->h, buffer_name, buffer->name);
1622 return;
1623 }
1624
1625 intel_update_winsys_renderbuffer_miptree(brw, rb, bo,
1626 drawable->w, drawable->h,
1627 buffer->pitch);
1628
1629 if (_mesa_is_front_buffer_drawing(fb) &&
1630 (buffer->attachment == __DRI_BUFFER_FRONT_LEFT ||
1631 buffer->attachment == __DRI_BUFFER_FAKE_FRONT_LEFT) &&
1632 rb->Base.Base.NumSamples > 1) {
1633 intel_renderbuffer_upsample(brw, rb);
1634 }
1635
1636 assert(rb->mt);
1637
1638 brw_bo_unreference(bo);
1639 }
1640
1641 /**
1642 * \brief Query DRI image loader to obtain a DRIdrawable's buffers.
1643 *
1644 * To determine which DRI buffers to request, examine the renderbuffers
1645 * attached to the drawable's framebuffer. Then request the buffers from
1646 * the image loader
1647 *
1648 * This is called from intel_update_renderbuffers().
1649 *
1650 * \param drawable Drawable whose buffers are queried.
1651 * \param buffers [out] List of buffers returned by DRI2 query.
1652 * \param buffer_count [out] Number of buffers returned.
1653 *
1654 * \see intel_update_renderbuffers()
1655 */
1656
1657 static void
1658 intel_update_image_buffer(struct brw_context *intel,
1659 __DRIdrawable *drawable,
1660 struct intel_renderbuffer *rb,
1661 __DRIimage *buffer,
1662 enum __DRIimageBufferMask buffer_type)
1663 {
1664 struct gl_framebuffer *fb = drawable->driverPrivate;
1665
1666 if (!rb || !buffer->bo)
1667 return;
1668
1669 unsigned num_samples = rb->Base.Base.NumSamples;
1670
1671 /* Check and see if we're already bound to the right
1672 * buffer object
1673 */
1674 struct intel_mipmap_tree *last_mt;
1675 if (num_samples == 0)
1676 last_mt = rb->mt;
1677 else
1678 last_mt = rb->singlesample_mt;
1679
1680 if (last_mt && last_mt->bo == buffer->bo)
1681 return;
1682
1683 intel_update_winsys_renderbuffer_miptree(intel, rb, buffer->bo,
1684 buffer->width, buffer->height,
1685 buffer->pitch);
1686
1687 if (_mesa_is_front_buffer_drawing(fb) &&
1688 buffer_type == __DRI_IMAGE_BUFFER_FRONT &&
1689 rb->Base.Base.NumSamples > 1) {
1690 intel_renderbuffer_upsample(intel, rb);
1691 }
1692 }
1693
1694 static void
1695 intel_update_image_buffers(struct brw_context *brw, __DRIdrawable *drawable)
1696 {
1697 struct gl_framebuffer *fb = drawable->driverPrivate;
1698 __DRIscreen *dri_screen = brw->screen->driScrnPriv;
1699 struct intel_renderbuffer *front_rb;
1700 struct intel_renderbuffer *back_rb;
1701 struct __DRIimageList images;
1702 mesa_format format;
1703 uint32_t buffer_mask = 0;
1704 int ret;
1705
1706 front_rb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT);
1707 back_rb = intel_get_renderbuffer(fb, BUFFER_BACK_LEFT);
1708
1709 if (back_rb)
1710 format = intel_rb_format(back_rb);
1711 else if (front_rb)
1712 format = intel_rb_format(front_rb);
1713 else
1714 return;
1715
1716 if (front_rb && (_mesa_is_front_buffer_drawing(fb) ||
1717 _mesa_is_front_buffer_reading(fb) || !back_rb)) {
1718 buffer_mask |= __DRI_IMAGE_BUFFER_FRONT;
1719 }
1720
1721 if (back_rb)
1722 buffer_mask |= __DRI_IMAGE_BUFFER_BACK;
1723
1724 ret = dri_screen->image.loader->getBuffers(drawable,
1725 driGLFormatToImageFormat(format),
1726 &drawable->dri2.stamp,
1727 drawable->loaderPrivate,
1728 buffer_mask,
1729 &images);
1730 if (!ret)
1731 return;
1732
1733 if (images.image_mask & __DRI_IMAGE_BUFFER_FRONT) {
1734 drawable->w = images.front->width;
1735 drawable->h = images.front->height;
1736 intel_update_image_buffer(brw,
1737 drawable,
1738 front_rb,
1739 images.front,
1740 __DRI_IMAGE_BUFFER_FRONT);
1741 }
1742
1743 if (images.image_mask & __DRI_IMAGE_BUFFER_BACK) {
1744 drawable->w = images.back->width;
1745 drawable->h = images.back->height;
1746 intel_update_image_buffer(brw,
1747 drawable,
1748 back_rb,
1749 images.back,
1750 __DRI_IMAGE_BUFFER_BACK);
1751 }
1752 }