i965: Remove an unneeded render_cache_set_check_flush
[mesa.git] / src / mesa / drivers / dri / i965 / brw_context.c
1 /*
2 Copyright 2003 VMware, Inc.
3 Copyright (C) Intel Corp. 2006. All Rights Reserved.
4 Intel funded Tungsten Graphics to
5 develop this 3D driver.
6
7 Permission is hereby granted, free of charge, to any person obtaining
8 a copy of this software and associated documentation files (the
9 "Software"), to deal in the Software without restriction, including
10 without limitation the rights to use, copy, modify, merge, publish,
11 distribute, sublicense, and/or sell copies of the Software, and to
12 permit persons to whom the Software is furnished to do so, subject to
13 the following conditions:
14
15 The above copyright notice and this permission notice (including the
16 next paragraph) shall be included in all copies or substantial
17 portions of the Software.
18
19 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
20 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
22 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
23 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
24 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
25 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26
27 **********************************************************************/
28 /*
29 * Authors:
30 * Keith Whitwell <keithw@vmware.com>
31 */
32
33
34 #include "compiler/nir/nir.h"
35 #include "main/api_exec.h"
36 #include "main/context.h"
37 #include "main/fbobject.h"
38 #include "main/extensions.h"
39 #include "main/imports.h"
40 #include "main/macros.h"
41 #include "main/points.h"
42 #include "main/version.h"
43 #include "main/vtxfmt.h"
44 #include "main/texobj.h"
45 #include "main/framebuffer.h"
46
47 #include "vbo/vbo_context.h"
48
49 #include "drivers/common/driverfuncs.h"
50 #include "drivers/common/meta.h"
51 #include "utils.h"
52
53 #include "brw_context.h"
54 #include "brw_defines.h"
55 #include "brw_blorp.h"
56 #include "brw_draw.h"
57 #include "brw_state.h"
58
59 #include "intel_batchbuffer.h"
60 #include "intel_buffer_objects.h"
61 #include "intel_buffers.h"
62 #include "intel_fbo.h"
63 #include "intel_mipmap_tree.h"
64 #include "intel_pixel.h"
65 #include "intel_image.h"
66 #include "intel_tex.h"
67 #include "intel_tex_obj.h"
68
69 #include "swrast_setup/swrast_setup.h"
70 #include "tnl/tnl.h"
71 #include "tnl/t_pipeline.h"
72 #include "util/ralloc.h"
73 #include "util/debug.h"
74 #include "isl/isl.h"
75
76 /***************************************
77 * Mesa's Driver Functions
78 ***************************************/
79
80 const char *const brw_vendor_string = "Intel Open Source Technology Center";
81
82 static const char *
83 get_bsw_model(const struct intel_screen *screen)
84 {
85 switch (screen->eu_total) {
86 case 16:
87 return "405";
88 case 12:
89 return "400";
90 default:
91 return " ";
92 }
93 }
94
95 const char *
96 brw_get_renderer_string(const struct intel_screen *screen)
97 {
98 const char *chipset;
99 static char buffer[128];
100 char *bsw = NULL;
101
102 switch (screen->deviceID) {
103 #undef CHIPSET
104 #define CHIPSET(id, symbol, str) case id: chipset = str; break;
105 #include "pci_ids/i965_pci_ids.h"
106 default:
107 chipset = "Unknown Intel Chipset";
108 break;
109 }
110
111 /* Braswell branding is funny, so we have to fix it up here */
112 if (screen->deviceID == 0x22B1) {
113 bsw = strdup(chipset);
114 char *needle = strstr(bsw, "XXX");
115 if (needle) {
116 memcpy(needle, get_bsw_model(screen), 3);
117 chipset = bsw;
118 }
119 }
120
121 (void) driGetRendererString(buffer, chipset, 0);
122 free(bsw);
123 return buffer;
124 }
125
126 static const GLubyte *
127 intel_get_string(struct gl_context * ctx, GLenum name)
128 {
129 const struct brw_context *const brw = brw_context(ctx);
130
131 switch (name) {
132 case GL_VENDOR:
133 return (GLubyte *) brw_vendor_string;
134
135 case GL_RENDERER:
136 return
137 (GLubyte *) brw_get_renderer_string(brw->screen);
138
139 default:
140 return NULL;
141 }
142 }
143
144 static void
145 intel_viewport(struct gl_context *ctx)
146 {
147 struct brw_context *brw = brw_context(ctx);
148 __DRIcontext *driContext = brw->driContext;
149
150 if (_mesa_is_winsys_fbo(ctx->DrawBuffer)) {
151 if (driContext->driDrawablePriv)
152 dri2InvalidateDrawable(driContext->driDrawablePriv);
153 if (driContext->driReadablePriv)
154 dri2InvalidateDrawable(driContext->driReadablePriv);
155 }
156 }
157
158 static void
159 intel_update_framebuffer(struct gl_context *ctx,
160 struct gl_framebuffer *fb)
161 {
162 struct brw_context *brw = brw_context(ctx);
163
164 /* Quantize the derived default number of samples
165 */
166 fb->DefaultGeometry._NumSamples =
167 intel_quantize_num_samples(brw->screen,
168 fb->DefaultGeometry.NumSamples);
169 }
170
171 static bool
172 intel_disable_rb_aux_buffer(struct brw_context *brw, const struct brw_bo *bo)
173 {
174 const struct gl_framebuffer *fb = brw->ctx.DrawBuffer;
175 bool found = false;
176
177 for (unsigned i = 0; i < fb->_NumColorDrawBuffers; i++) {
178 const struct intel_renderbuffer *irb =
179 intel_renderbuffer(fb->_ColorDrawBuffers[i]);
180
181 if (irb && irb->mt->bo == bo) {
182 found = brw->draw_aux_buffer_disabled[i] = true;
183 }
184 }
185
186 return found;
187 }
188
189 static void
190 intel_update_state(struct gl_context * ctx, GLuint new_state)
191 {
192 struct brw_context *brw = brw_context(ctx);
193 struct intel_texture_object *tex_obj;
194 struct intel_renderbuffer *depth_irb;
195
196 if (ctx->swrast_context)
197 _swrast_InvalidateState(ctx, new_state);
198 _vbo_InvalidateState(ctx, new_state);
199
200 brw->NewGLState |= new_state;
201
202 _mesa_unlock_context_textures(ctx);
203
204 /* Resolve the depth buffer's HiZ buffer. */
205 depth_irb = intel_get_renderbuffer(ctx->DrawBuffer, BUFFER_DEPTH);
206 if (depth_irb && depth_irb->mt) {
207 intel_miptree_slice_resolve_hiz(brw, depth_irb->mt,
208 depth_irb->mt_level,
209 depth_irb->mt_layer);
210 }
211
212 memset(brw->draw_aux_buffer_disabled, 0,
213 sizeof(brw->draw_aux_buffer_disabled));
214
215 /* Resolve depth buffer and render cache of each enabled texture. */
216 int maxEnabledUnit = ctx->Texture._MaxEnabledTexImageUnit;
217 for (int i = 0; i <= maxEnabledUnit; i++) {
218 if (!ctx->Texture.Unit[i]._Current)
219 continue;
220 tex_obj = intel_texture_object(ctx->Texture.Unit[i]._Current);
221 if (!tex_obj || !tex_obj->mt)
222 continue;
223
224 /* We need inte_texture_object::_Format to be valid */
225 intel_finalize_mipmap_tree(brw, i);
226
227 bool aux_supported;
228 intel_miptree_prepare_texture(brw, tex_obj->mt, tex_obj->_Format,
229 &aux_supported);
230
231 if (!aux_supported && brw->gen >= 9 &&
232 intel_disable_rb_aux_buffer(brw, tex_obj->mt->bo)) {
233 perf_debug("Sampling renderbuffer with non-compressible format - "
234 "turning off compression");
235 }
236
237 brw_render_cache_set_check_flush(brw, tex_obj->mt->bo);
238
239 if (tex_obj->base.StencilSampling ||
240 tex_obj->mt->format == MESA_FORMAT_S_UINT8) {
241 intel_update_r8stencil(brw, tex_obj->mt);
242 }
243 }
244
245 /* Resolve color for each active shader image. */
246 for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
247 const struct gl_program *prog = ctx->_Shader->CurrentProgram[i];
248
249 if (unlikely(prog && prog->info.num_images)) {
250 for (unsigned j = 0; j < prog->info.num_images; j++) {
251 struct gl_image_unit *u =
252 &ctx->ImageUnits[prog->sh.ImageUnits[j]];
253 tex_obj = intel_texture_object(u->TexObj);
254
255 if (tex_obj && tex_obj->mt) {
256 /* Access to images is implemented using indirect messages
257 * against data port. Normal render target write understands
258 * lossless compression but unfortunately the typed/untyped
259 * read/write interface doesn't. Therefore even lossless
260 * compressed surfaces need to be resolved prior to accessing
261 * them. Hence skip setting INTEL_MIPTREE_IGNORE_CCS_E.
262 */
263 intel_miptree_all_slices_resolve_color(brw, tex_obj->mt, 0);
264
265 if (intel_miptree_is_lossless_compressed(brw, tex_obj->mt) &&
266 intel_disable_rb_aux_buffer(brw, tex_obj->mt->bo)) {
267 perf_debug("Using renderbuffer as shader image - turning "
268 "off lossless compression");
269 }
270
271 brw_render_cache_set_check_flush(brw, tex_obj->mt->bo);
272 }
273 }
274 }
275 }
276
277 /* Resolve color buffers for non-coherent framebuffer fetch. */
278 if (!ctx->Extensions.MESA_shader_framebuffer_fetch &&
279 ctx->FragmentProgram._Current &&
280 ctx->FragmentProgram._Current->info.outputs_read) {
281 const struct gl_framebuffer *fb = ctx->DrawBuffer;
282
283 for (unsigned i = 0; i < fb->_NumColorDrawBuffers; i++) {
284 const struct intel_renderbuffer *irb =
285 intel_renderbuffer(fb->_ColorDrawBuffers[i]);
286
287 if (irb) {
288 intel_miptree_resolve_color(brw, irb->mt,
289 irb->mt_level, 1,
290 irb->mt_layer, irb->layer_count,
291 INTEL_MIPTREE_IGNORE_CCS_E);
292 }
293 }
294 }
295
296 struct gl_framebuffer *fb = ctx->DrawBuffer;
297 for (int i = 0; i < fb->_NumColorDrawBuffers; i++) {
298 struct intel_renderbuffer *irb =
299 intel_renderbuffer(fb->_ColorDrawBuffers[i]);
300
301 if (irb == NULL || irb->mt == NULL)
302 continue;
303
304 intel_miptree_prepare_render(brw, irb->mt, irb->mt_level,
305 irb->mt_layer, irb->layer_count,
306 ctx->Color.sRGBEnabled);
307 }
308
309 _mesa_lock_context_textures(ctx);
310
311 if (new_state & _NEW_BUFFERS) {
312 intel_update_framebuffer(ctx, ctx->DrawBuffer);
313 if (ctx->DrawBuffer != ctx->ReadBuffer)
314 intel_update_framebuffer(ctx, ctx->ReadBuffer);
315 }
316 }
317
318 #define flushFront(screen) ((screen)->image.loader ? (screen)->image.loader->flushFrontBuffer : (screen)->dri2.loader->flushFrontBuffer)
319
320 static void
321 intel_flush_front(struct gl_context *ctx)
322 {
323 struct brw_context *brw = brw_context(ctx);
324 __DRIcontext *driContext = brw->driContext;
325 __DRIdrawable *driDrawable = driContext->driDrawablePriv;
326 __DRIscreen *const dri_screen = brw->screen->driScrnPriv;
327
328 if (brw->front_buffer_dirty && _mesa_is_winsys_fbo(ctx->DrawBuffer)) {
329 if (flushFront(dri_screen) && driDrawable &&
330 driDrawable->loaderPrivate) {
331
332 /* Resolve before flushing FAKE_FRONT_LEFT to FRONT_LEFT.
333 *
334 * This potentially resolves both front and back buffer. It
335 * is unnecessary to resolve the back, but harms nothing except
336 * performance. And no one cares about front-buffer render
337 * performance.
338 */
339 intel_resolve_for_dri2_flush(brw, driDrawable);
340 intel_batchbuffer_flush(brw);
341
342 flushFront(dri_screen)(driDrawable, driDrawable->loaderPrivate);
343
344 /* We set the dirty bit in intel_prepare_render() if we're
345 * front buffer rendering once we get there.
346 */
347 brw->front_buffer_dirty = false;
348 }
349 }
350 }
351
352 static void
353 intel_glFlush(struct gl_context *ctx)
354 {
355 struct brw_context *brw = brw_context(ctx);
356
357 intel_batchbuffer_flush(brw);
358 intel_flush_front(ctx);
359
360 brw->need_flush_throttle = true;
361 }
362
363 static void
364 intel_finish(struct gl_context * ctx)
365 {
366 struct brw_context *brw = brw_context(ctx);
367
368 intel_glFlush(ctx);
369
370 if (brw->batch.last_bo)
371 brw_bo_wait_rendering(brw, brw->batch.last_bo);
372 }
373
374 static void
375 brw_init_driver_functions(struct brw_context *brw,
376 struct dd_function_table *functions)
377 {
378 _mesa_init_driver_functions(functions);
379
380 /* GLX uses DRI2 invalidate events to handle window resizing.
381 * Unfortunately, EGL does not - libEGL is written in XCB (not Xlib),
382 * which doesn't provide a mechanism for snooping the event queues.
383 *
384 * So EGL still relies on viewport hacks to handle window resizing.
385 * This should go away with DRI3000.
386 */
387 if (!brw->driContext->driScreenPriv->dri2.useInvalidate)
388 functions->Viewport = intel_viewport;
389
390 functions->Flush = intel_glFlush;
391 functions->Finish = intel_finish;
392 functions->GetString = intel_get_string;
393 functions->UpdateState = intel_update_state;
394
395 intelInitTextureFuncs(functions);
396 intelInitTextureImageFuncs(functions);
397 intelInitTextureSubImageFuncs(functions);
398 intelInitTextureCopyImageFuncs(functions);
399 intelInitCopyImageFuncs(functions);
400 intelInitClearFuncs(functions);
401 intelInitBufferFuncs(functions);
402 intelInitPixelFuncs(functions);
403 intelInitBufferObjectFuncs(functions);
404 brw_init_syncobj_functions(functions);
405 brw_init_object_purgeable_functions(functions);
406
407 brwInitFragProgFuncs( functions );
408 brw_init_common_queryobj_functions(functions);
409 if (brw->gen >= 8 || brw->is_haswell)
410 hsw_init_queryobj_functions(functions);
411 else if (brw->gen >= 6)
412 gen6_init_queryobj_functions(functions);
413 else
414 gen4_init_queryobj_functions(functions);
415 brw_init_compute_functions(functions);
416 if (brw->gen >= 7)
417 brw_init_conditional_render_functions(functions);
418
419 functions->QueryInternalFormat = brw_query_internal_format;
420
421 functions->NewTransformFeedback = brw_new_transform_feedback;
422 functions->DeleteTransformFeedback = brw_delete_transform_feedback;
423 if (can_do_mi_math_and_lrr(brw->screen)) {
424 functions->BeginTransformFeedback = hsw_begin_transform_feedback;
425 functions->EndTransformFeedback = hsw_end_transform_feedback;
426 functions->PauseTransformFeedback = hsw_pause_transform_feedback;
427 functions->ResumeTransformFeedback = hsw_resume_transform_feedback;
428 } else if (brw->gen >= 7) {
429 functions->BeginTransformFeedback = gen7_begin_transform_feedback;
430 functions->EndTransformFeedback = gen7_end_transform_feedback;
431 functions->PauseTransformFeedback = gen7_pause_transform_feedback;
432 functions->ResumeTransformFeedback = gen7_resume_transform_feedback;
433 functions->GetTransformFeedbackVertexCount =
434 brw_get_transform_feedback_vertex_count;
435 } else {
436 functions->BeginTransformFeedback = brw_begin_transform_feedback;
437 functions->EndTransformFeedback = brw_end_transform_feedback;
438 functions->PauseTransformFeedback = brw_pause_transform_feedback;
439 functions->ResumeTransformFeedback = brw_resume_transform_feedback;
440 functions->GetTransformFeedbackVertexCount =
441 brw_get_transform_feedback_vertex_count;
442 }
443
444 if (brw->gen >= 6)
445 functions->GetSamplePosition = gen6_get_sample_position;
446 }
447
448 static void
449 brw_initialize_context_constants(struct brw_context *brw)
450 {
451 struct gl_context *ctx = &brw->ctx;
452 const struct brw_compiler *compiler = brw->screen->compiler;
453
454 const bool stage_exists[MESA_SHADER_STAGES] = {
455 [MESA_SHADER_VERTEX] = true,
456 [MESA_SHADER_TESS_CTRL] = brw->gen >= 7,
457 [MESA_SHADER_TESS_EVAL] = brw->gen >= 7,
458 [MESA_SHADER_GEOMETRY] = brw->gen >= 6,
459 [MESA_SHADER_FRAGMENT] = true,
460 [MESA_SHADER_COMPUTE] =
461 ((ctx->API == API_OPENGL_COMPAT || ctx->API == API_OPENGL_CORE) &&
462 ctx->Const.MaxComputeWorkGroupSize[0] >= 1024) ||
463 (ctx->API == API_OPENGLES2 &&
464 ctx->Const.MaxComputeWorkGroupSize[0] >= 128) ||
465 _mesa_extension_override_enables.ARB_compute_shader,
466 };
467
468 unsigned num_stages = 0;
469 for (int i = 0; i < MESA_SHADER_STAGES; i++) {
470 if (stage_exists[i])
471 num_stages++;
472 }
473
474 unsigned max_samplers =
475 brw->gen >= 8 || brw->is_haswell ? BRW_MAX_TEX_UNIT : 16;
476
477 ctx->Const.MaxDualSourceDrawBuffers = 1;
478 ctx->Const.MaxDrawBuffers = BRW_MAX_DRAW_BUFFERS;
479 ctx->Const.MaxCombinedShaderOutputResources =
480 MAX_IMAGE_UNITS + BRW_MAX_DRAW_BUFFERS;
481
482 /* The timestamp register we can read for glGetTimestamp() is
483 * sometimes only 32 bits, before scaling to nanoseconds (depending
484 * on kernel).
485 *
486 * Once scaled to nanoseconds the timestamp would roll over at a
487 * non-power-of-two, so an application couldn't use
488 * GL_QUERY_COUNTER_BITS to handle rollover correctly. Instead, we
489 * report 36 bits and truncate at that (rolling over 5 times as
490 * often as the HW counter), and when the 32-bit counter rolls
491 * over, it happens to also be at a rollover in the reported value
492 * from near (1<<36) to 0.
493 *
494 * The low 32 bits rolls over in ~343 seconds. Our 36-bit result
495 * rolls over every ~69 seconds.
496 */
497 ctx->Const.QueryCounterBits.Timestamp = 36;
498
499 ctx->Const.MaxTextureCoordUnits = 8; /* Mesa limit */
500 ctx->Const.MaxImageUnits = MAX_IMAGE_UNITS;
501 if (brw->gen >= 7) {
502 ctx->Const.MaxRenderbufferSize = 16384;
503 ctx->Const.MaxTextureLevels = MIN2(15 /* 16384 */, MAX_TEXTURE_LEVELS);
504 ctx->Const.MaxCubeTextureLevels = 15; /* 16384 */
505 } else {
506 ctx->Const.MaxRenderbufferSize = 8192;
507 ctx->Const.MaxTextureLevels = MIN2(14 /* 8192 */, MAX_TEXTURE_LEVELS);
508 ctx->Const.MaxCubeTextureLevels = 14; /* 8192 */
509 }
510 ctx->Const.Max3DTextureLevels = 12; /* 2048 */
511 ctx->Const.MaxArrayTextureLayers = brw->gen >= 7 ? 2048 : 512;
512 ctx->Const.MaxTextureMbytes = 1536;
513 ctx->Const.MaxTextureRectSize = 1 << 12;
514 ctx->Const.MaxTextureMaxAnisotropy = 16.0;
515 ctx->Const.MaxTextureLodBias = 15.0;
516 ctx->Const.StripTextureBorder = true;
517 if (brw->gen >= 7) {
518 ctx->Const.MaxProgramTextureGatherComponents = 4;
519 ctx->Const.MinProgramTextureGatherOffset = -32;
520 ctx->Const.MaxProgramTextureGatherOffset = 31;
521 } else if (brw->gen == 6) {
522 ctx->Const.MaxProgramTextureGatherComponents = 1;
523 ctx->Const.MinProgramTextureGatherOffset = -8;
524 ctx->Const.MaxProgramTextureGatherOffset = 7;
525 }
526
527 ctx->Const.MaxUniformBlockSize = 65536;
528
529 for (int i = 0; i < MESA_SHADER_STAGES; i++) {
530 struct gl_program_constants *prog = &ctx->Const.Program[i];
531
532 if (!stage_exists[i])
533 continue;
534
535 prog->MaxTextureImageUnits = max_samplers;
536
537 prog->MaxUniformBlocks = BRW_MAX_UBO;
538 prog->MaxCombinedUniformComponents =
539 prog->MaxUniformComponents +
540 ctx->Const.MaxUniformBlockSize / 4 * prog->MaxUniformBlocks;
541
542 prog->MaxAtomicCounters = MAX_ATOMIC_COUNTERS;
543 prog->MaxAtomicBuffers = BRW_MAX_ABO;
544 prog->MaxImageUniforms = compiler->scalar_stage[i] ? BRW_MAX_IMAGES : 0;
545 prog->MaxShaderStorageBlocks = BRW_MAX_SSBO;
546 }
547
548 ctx->Const.MaxTextureUnits =
549 MIN2(ctx->Const.MaxTextureCoordUnits,
550 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxTextureImageUnits);
551
552 ctx->Const.MaxUniformBufferBindings = num_stages * BRW_MAX_UBO;
553 ctx->Const.MaxCombinedUniformBlocks = num_stages * BRW_MAX_UBO;
554 ctx->Const.MaxCombinedAtomicBuffers = num_stages * BRW_MAX_ABO;
555 ctx->Const.MaxCombinedShaderStorageBlocks = num_stages * BRW_MAX_SSBO;
556 ctx->Const.MaxShaderStorageBufferBindings = num_stages * BRW_MAX_SSBO;
557 ctx->Const.MaxCombinedTextureImageUnits = num_stages * max_samplers;
558 ctx->Const.MaxCombinedImageUniforms = num_stages * BRW_MAX_IMAGES;
559
560
561 /* Hardware only supports a limited number of transform feedback buffers.
562 * So we need to override the Mesa default (which is based only on software
563 * limits).
564 */
565 ctx->Const.MaxTransformFeedbackBuffers = BRW_MAX_SOL_BUFFERS;
566
567 /* On Gen6, in the worst case, we use up one binding table entry per
568 * transform feedback component (see comments above the definition of
569 * BRW_MAX_SOL_BINDINGS, in brw_context.h), so we need to advertise a value
570 * for MAX_TRANSFORM_FEEDBACK_INTERLEAVED_COMPONENTS equal to
571 * BRW_MAX_SOL_BINDINGS.
572 *
573 * In "separate components" mode, we need to divide this value by
574 * BRW_MAX_SOL_BUFFERS, so that the total number of binding table entries
575 * used up by all buffers will not exceed BRW_MAX_SOL_BINDINGS.
576 */
577 ctx->Const.MaxTransformFeedbackInterleavedComponents = BRW_MAX_SOL_BINDINGS;
578 ctx->Const.MaxTransformFeedbackSeparateComponents =
579 BRW_MAX_SOL_BINDINGS / BRW_MAX_SOL_BUFFERS;
580
581 ctx->Const.AlwaysUseGetTransformFeedbackVertexCount =
582 !can_do_mi_math_and_lrr(brw->screen);
583
584 int max_samples;
585 const int *msaa_modes = intel_supported_msaa_modes(brw->screen);
586 const int clamp_max_samples =
587 driQueryOptioni(&brw->optionCache, "clamp_max_samples");
588
589 if (clamp_max_samples < 0) {
590 max_samples = msaa_modes[0];
591 } else {
592 /* Select the largest supported MSAA mode that does not exceed
593 * clamp_max_samples.
594 */
595 max_samples = 0;
596 for (int i = 0; msaa_modes[i] != 0; ++i) {
597 if (msaa_modes[i] <= clamp_max_samples) {
598 max_samples = msaa_modes[i];
599 break;
600 }
601 }
602 }
603
604 ctx->Const.MaxSamples = max_samples;
605 ctx->Const.MaxColorTextureSamples = max_samples;
606 ctx->Const.MaxDepthTextureSamples = max_samples;
607 ctx->Const.MaxIntegerSamples = max_samples;
608 ctx->Const.MaxImageSamples = 0;
609
610 /* gen6_set_sample_maps() sets SampleMap{2,4,8}x variables which are used
611 * to map indices of rectangular grid to sample numbers within a pixel.
612 * These variables are used by GL_EXT_framebuffer_multisample_blit_scaled
613 * extension implementation. For more details see the comment above
614 * gen6_set_sample_maps() definition.
615 */
616 gen6_set_sample_maps(ctx);
617
618 ctx->Const.MinLineWidth = 1.0;
619 ctx->Const.MinLineWidthAA = 1.0;
620 if (brw->gen >= 6) {
621 ctx->Const.MaxLineWidth = 7.375;
622 ctx->Const.MaxLineWidthAA = 7.375;
623 ctx->Const.LineWidthGranularity = 0.125;
624 } else {
625 ctx->Const.MaxLineWidth = 7.0;
626 ctx->Const.MaxLineWidthAA = 7.0;
627 ctx->Const.LineWidthGranularity = 0.5;
628 }
629
630 /* For non-antialiased lines, we have to round the line width to the
631 * nearest whole number. Make sure that we don't advertise a line
632 * width that, when rounded, will be beyond the actual hardware
633 * maximum.
634 */
635 assert(roundf(ctx->Const.MaxLineWidth) <= ctx->Const.MaxLineWidth);
636
637 ctx->Const.MinPointSize = 1.0;
638 ctx->Const.MinPointSizeAA = 1.0;
639 ctx->Const.MaxPointSize = 255.0;
640 ctx->Const.MaxPointSizeAA = 255.0;
641 ctx->Const.PointSizeGranularity = 1.0;
642
643 if (brw->gen >= 5 || brw->is_g4x)
644 ctx->Const.MaxClipPlanes = 8;
645
646 ctx->Const.GLSLTessLevelsAsInputs = true;
647 ctx->Const.LowerTCSPatchVerticesIn = brw->gen >= 8;
648 ctx->Const.LowerTESPatchVerticesIn = true;
649 ctx->Const.PrimitiveRestartForPatches = true;
650
651 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeInstructions = 16 * 1024;
652 ctx->Const.Program[MESA_SHADER_VERTEX].MaxAluInstructions = 0;
653 ctx->Const.Program[MESA_SHADER_VERTEX].MaxTexInstructions = 0;
654 ctx->Const.Program[MESA_SHADER_VERTEX].MaxTexIndirections = 0;
655 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeAluInstructions = 0;
656 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeTexInstructions = 0;
657 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeTexIndirections = 0;
658 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeAttribs = 16;
659 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeTemps = 256;
660 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeAddressRegs = 1;
661 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeParameters = 1024;
662 ctx->Const.Program[MESA_SHADER_VERTEX].MaxEnvParams =
663 MIN2(ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeParameters,
664 ctx->Const.Program[MESA_SHADER_VERTEX].MaxEnvParams);
665
666 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeInstructions = 1024;
667 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeAluInstructions = 1024;
668 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeTexInstructions = 1024;
669 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeTexIndirections = 1024;
670 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeAttribs = 12;
671 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeTemps = 256;
672 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeAddressRegs = 0;
673 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeParameters = 1024;
674 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxEnvParams =
675 MIN2(ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeParameters,
676 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxEnvParams);
677
678 /* Fragment shaders use real, 32-bit twos-complement integers for all
679 * integer types.
680 */
681 ctx->Const.Program[MESA_SHADER_FRAGMENT].LowInt.RangeMin = 31;
682 ctx->Const.Program[MESA_SHADER_FRAGMENT].LowInt.RangeMax = 30;
683 ctx->Const.Program[MESA_SHADER_FRAGMENT].LowInt.Precision = 0;
684 ctx->Const.Program[MESA_SHADER_FRAGMENT].HighInt = ctx->Const.Program[MESA_SHADER_FRAGMENT].LowInt;
685 ctx->Const.Program[MESA_SHADER_FRAGMENT].MediumInt = ctx->Const.Program[MESA_SHADER_FRAGMENT].LowInt;
686
687 ctx->Const.Program[MESA_SHADER_VERTEX].LowInt.RangeMin = 31;
688 ctx->Const.Program[MESA_SHADER_VERTEX].LowInt.RangeMax = 30;
689 ctx->Const.Program[MESA_SHADER_VERTEX].LowInt.Precision = 0;
690 ctx->Const.Program[MESA_SHADER_VERTEX].HighInt = ctx->Const.Program[MESA_SHADER_VERTEX].LowInt;
691 ctx->Const.Program[MESA_SHADER_VERTEX].MediumInt = ctx->Const.Program[MESA_SHADER_VERTEX].LowInt;
692
693 /* Gen6 converts quads to polygon in beginning of 3D pipeline,
694 * but we're not sure how it's actually done for vertex order,
695 * that affect provoking vertex decision. Always use last vertex
696 * convention for quad primitive which works as expected for now.
697 */
698 if (brw->gen >= 6)
699 ctx->Const.QuadsFollowProvokingVertexConvention = false;
700
701 ctx->Const.NativeIntegers = true;
702 ctx->Const.VertexID_is_zero_based = true;
703
704 /* Regarding the CMP instruction, the Ivybridge PRM says:
705 *
706 * "For each enabled channel 0b or 1b is assigned to the appropriate flag
707 * bit and 0/all zeros or all ones (e.g, byte 0xFF, word 0xFFFF, DWord
708 * 0xFFFFFFFF) is assigned to dst."
709 *
710 * but PRMs for earlier generations say
711 *
712 * "In dword format, one GRF may store up to 8 results. When the register
713 * is used later as a vector of Booleans, as only LSB at each channel
714 * contains meaning [sic] data, software should make sure all higher bits
715 * are masked out (e.g. by 'and-ing' an [sic] 0x01 constant)."
716 *
717 * We select the representation of a true boolean uniform to be ~0, and fix
718 * the results of Gen <= 5 CMP instruction's with -(result & 1).
719 */
720 ctx->Const.UniformBooleanTrue = ~0;
721
722 /* From the gen4 PRM, volume 4 page 127:
723 *
724 * "For SURFTYPE_BUFFER non-rendertarget surfaces, this field specifies
725 * the base address of the first element of the surface, computed in
726 * software by adding the surface base address to the byte offset of
727 * the element in the buffer."
728 *
729 * However, unaligned accesses are slower, so enforce buffer alignment.
730 */
731 ctx->Const.UniformBufferOffsetAlignment = 16;
732
733 /* ShaderStorageBufferOffsetAlignment should be a cacheline (64 bytes) so
734 * that we can safely have the CPU and GPU writing the same SSBO on
735 * non-cachecoherent systems (our Atom CPUs). With UBOs, the GPU never
736 * writes, so there's no problem. For an SSBO, the GPU and the CPU can
737 * be updating disjoint regions of the buffer simultaneously and that will
738 * break if the regions overlap the same cacheline.
739 */
740 ctx->Const.ShaderStorageBufferOffsetAlignment = 64;
741 ctx->Const.TextureBufferOffsetAlignment = 16;
742 ctx->Const.MaxTextureBufferSize = 128 * 1024 * 1024;
743
744 if (brw->gen >= 6) {
745 ctx->Const.MaxVarying = 32;
746 ctx->Const.Program[MESA_SHADER_VERTEX].MaxOutputComponents = 128;
747 ctx->Const.Program[MESA_SHADER_GEOMETRY].MaxInputComponents = 64;
748 ctx->Const.Program[MESA_SHADER_GEOMETRY].MaxOutputComponents = 128;
749 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxInputComponents = 128;
750 ctx->Const.Program[MESA_SHADER_TESS_CTRL].MaxInputComponents = 128;
751 ctx->Const.Program[MESA_SHADER_TESS_CTRL].MaxOutputComponents = 128;
752 ctx->Const.Program[MESA_SHADER_TESS_EVAL].MaxInputComponents = 128;
753 ctx->Const.Program[MESA_SHADER_TESS_EVAL].MaxOutputComponents = 128;
754 }
755
756 /* We want the GLSL compiler to emit code that uses condition codes */
757 for (int i = 0; i < MESA_SHADER_STAGES; i++) {
758 ctx->Const.ShaderCompilerOptions[i] =
759 brw->screen->compiler->glsl_compiler_options[i];
760 }
761
762 if (brw->gen >= 7) {
763 ctx->Const.MaxViewportWidth = 32768;
764 ctx->Const.MaxViewportHeight = 32768;
765 }
766
767 /* ARB_viewport_array, OES_viewport_array */
768 if (brw->gen >= 6) {
769 ctx->Const.MaxViewports = GEN6_NUM_VIEWPORTS;
770 ctx->Const.ViewportSubpixelBits = 0;
771
772 /* Cast to float before negating because MaxViewportWidth is unsigned.
773 */
774 ctx->Const.ViewportBounds.Min = -(float)ctx->Const.MaxViewportWidth;
775 ctx->Const.ViewportBounds.Max = ctx->Const.MaxViewportWidth;
776 }
777
778 /* ARB_gpu_shader5 */
779 if (brw->gen >= 7)
780 ctx->Const.MaxVertexStreams = MIN2(4, MAX_VERTEX_STREAMS);
781
782 /* ARB_framebuffer_no_attachments */
783 ctx->Const.MaxFramebufferWidth = 16384;
784 ctx->Const.MaxFramebufferHeight = 16384;
785 ctx->Const.MaxFramebufferLayers = ctx->Const.MaxArrayTextureLayers;
786 ctx->Const.MaxFramebufferSamples = max_samples;
787
788 /* OES_primitive_bounding_box */
789 ctx->Const.NoPrimitiveBoundingBoxOutput = true;
790 }
791
792 static void
793 brw_initialize_cs_context_constants(struct brw_context *brw)
794 {
795 struct gl_context *ctx = &brw->ctx;
796 const struct intel_screen *screen = brw->screen;
797 struct gen_device_info *devinfo = &brw->screen->devinfo;
798
799 /* FINISHME: Do this for all platforms that the kernel supports */
800 if (brw->is_cherryview &&
801 screen->subslice_total > 0 && screen->eu_total > 0) {
802 /* Logical CS threads = EUs per subslice * 7 threads per EU */
803 uint32_t max_cs_threads = screen->eu_total / screen->subslice_total * 7;
804
805 /* Fuse configurations may give more threads than expected, never less. */
806 if (max_cs_threads > devinfo->max_cs_threads)
807 devinfo->max_cs_threads = max_cs_threads;
808 }
809
810 /* Maximum number of scalar compute shader invocations that can be run in
811 * parallel in the same subslice assuming SIMD32 dispatch.
812 *
813 * We don't advertise more than 64 threads, because we are limited to 64 by
814 * our usage of thread_width_max in the gpgpu walker command. This only
815 * currently impacts Haswell, which otherwise might be able to advertise 70
816 * threads. With SIMD32 and 64 threads, Haswell still provides twice the
817 * required the number of invocation needed for ARB_compute_shader.
818 */
819 const unsigned max_threads = MIN2(64, devinfo->max_cs_threads);
820 const uint32_t max_invocations = 32 * max_threads;
821 ctx->Const.MaxComputeWorkGroupSize[0] = max_invocations;
822 ctx->Const.MaxComputeWorkGroupSize[1] = max_invocations;
823 ctx->Const.MaxComputeWorkGroupSize[2] = max_invocations;
824 ctx->Const.MaxComputeWorkGroupInvocations = max_invocations;
825 ctx->Const.MaxComputeSharedMemorySize = 64 * 1024;
826 }
827
828 /**
829 * Process driconf (drirc) options, setting appropriate context flags.
830 *
831 * intelInitExtensions still pokes at optionCache directly, in order to
832 * avoid advertising various extensions. No flags are set, so it makes
833 * sense to continue doing that there.
834 */
835 static void
836 brw_process_driconf_options(struct brw_context *brw)
837 {
838 struct gl_context *ctx = &brw->ctx;
839
840 driOptionCache *options = &brw->optionCache;
841 driParseConfigFiles(options, &brw->screen->optionCache,
842 brw->driContext->driScreenPriv->myNum, "i965");
843
844 int bo_reuse_mode = driQueryOptioni(options, "bo_reuse");
845 switch (bo_reuse_mode) {
846 case DRI_CONF_BO_REUSE_DISABLED:
847 break;
848 case DRI_CONF_BO_REUSE_ALL:
849 brw_bufmgr_enable_reuse(brw->bufmgr);
850 break;
851 }
852
853 if (INTEL_DEBUG & DEBUG_NO_HIZ) {
854 brw->has_hiz = false;
855 /* On gen6, you can only do separate stencil with HIZ. */
856 if (brw->gen == 6)
857 brw->has_separate_stencil = false;
858 }
859
860 if (driQueryOptionb(options, "always_flush_batch")) {
861 fprintf(stderr, "flushing batchbuffer before/after each draw call\n");
862 brw->always_flush_batch = true;
863 }
864
865 if (driQueryOptionb(options, "always_flush_cache")) {
866 fprintf(stderr, "flushing GPU caches before/after each draw call\n");
867 brw->always_flush_cache = true;
868 }
869
870 if (driQueryOptionb(options, "disable_throttling")) {
871 fprintf(stderr, "disabling flush throttling\n");
872 brw->disable_throttling = true;
873 }
874
875 brw->precompile = driQueryOptionb(&brw->optionCache, "shader_precompile");
876
877 if (driQueryOptionb(&brw->optionCache, "precise_trig"))
878 brw->screen->compiler->precise_trig = true;
879
880 ctx->Const.ForceGLSLExtensionsWarn =
881 driQueryOptionb(options, "force_glsl_extensions_warn");
882
883 ctx->Const.ForceGLSLVersion =
884 driQueryOptioni(options, "force_glsl_version");
885
886 ctx->Const.DisableGLSLLineContinuations =
887 driQueryOptionb(options, "disable_glsl_line_continuations");
888
889 ctx->Const.AllowGLSLExtensionDirectiveMidShader =
890 driQueryOptionb(options, "allow_glsl_extension_directive_midshader");
891
892 ctx->Const.AllowGLSLBuiltinVariableRedeclaration =
893 driQueryOptionb(options, "allow_glsl_builtin_variable_redeclaration");
894
895 ctx->Const.AllowHigherCompatVersion =
896 driQueryOptionb(options, "allow_higher_compat_version");
897
898 ctx->Const.ForceGLSLAbsSqrt =
899 driQueryOptionb(options, "force_glsl_abs_sqrt");
900
901 ctx->Const.GLSLZeroInit = driQueryOptionb(options, "glsl_zero_init");
902
903 brw->dual_color_blend_by_location =
904 driQueryOptionb(options, "dual_color_blend_by_location");
905 }
906
907 GLboolean
908 brwCreateContext(gl_api api,
909 const struct gl_config *mesaVis,
910 __DRIcontext *driContextPriv,
911 unsigned major_version,
912 unsigned minor_version,
913 uint32_t flags,
914 bool notify_reset,
915 unsigned *dri_ctx_error,
916 void *sharedContextPrivate)
917 {
918 struct gl_context *shareCtx = (struct gl_context *) sharedContextPrivate;
919 struct intel_screen *screen = driContextPriv->driScreenPriv->driverPrivate;
920 const struct gen_device_info *devinfo = &screen->devinfo;
921 struct dd_function_table functions;
922
923 /* Only allow the __DRI_CTX_FLAG_ROBUST_BUFFER_ACCESS flag if the kernel
924 * provides us with context reset notifications.
925 */
926 uint32_t allowed_flags = __DRI_CTX_FLAG_DEBUG
927 | __DRI_CTX_FLAG_FORWARD_COMPATIBLE;
928
929 if (screen->has_context_reset_notification)
930 allowed_flags |= __DRI_CTX_FLAG_ROBUST_BUFFER_ACCESS;
931
932 if (flags & ~allowed_flags) {
933 *dri_ctx_error = __DRI_CTX_ERROR_UNKNOWN_FLAG;
934 return false;
935 }
936
937 struct brw_context *brw = rzalloc(NULL, struct brw_context);
938 if (!brw) {
939 fprintf(stderr, "%s: failed to alloc context\n", __func__);
940 *dri_ctx_error = __DRI_CTX_ERROR_NO_MEMORY;
941 return false;
942 }
943
944 driContextPriv->driverPrivate = brw;
945 brw->driContext = driContextPriv;
946 brw->screen = screen;
947 brw->bufmgr = screen->bufmgr;
948
949 brw->gen = devinfo->gen;
950 brw->gt = devinfo->gt;
951 brw->is_g4x = devinfo->is_g4x;
952 brw->is_baytrail = devinfo->is_baytrail;
953 brw->is_haswell = devinfo->is_haswell;
954 brw->is_cherryview = devinfo->is_cherryview;
955 brw->is_broxton = devinfo->is_broxton;
956 brw->has_llc = devinfo->has_llc;
957 brw->has_hiz = devinfo->has_hiz_and_separate_stencil;
958 brw->has_separate_stencil = devinfo->has_hiz_and_separate_stencil;
959 brw->has_pln = devinfo->has_pln;
960 brw->has_compr4 = devinfo->has_compr4;
961 brw->has_surface_tile_offset = devinfo->has_surface_tile_offset;
962 brw->has_negative_rhw_bug = devinfo->has_negative_rhw_bug;
963 brw->needs_unlit_centroid_workaround =
964 devinfo->needs_unlit_centroid_workaround;
965
966 brw->must_use_separate_stencil = devinfo->must_use_separate_stencil;
967 brw->has_swizzling = screen->hw_has_swizzling;
968
969 isl_device_init(&brw->isl_dev, devinfo, screen->hw_has_swizzling);
970
971 brw->vs.base.stage = MESA_SHADER_VERTEX;
972 brw->tcs.base.stage = MESA_SHADER_TESS_CTRL;
973 brw->tes.base.stage = MESA_SHADER_TESS_EVAL;
974 brw->gs.base.stage = MESA_SHADER_GEOMETRY;
975 brw->wm.base.stage = MESA_SHADER_FRAGMENT;
976 if (brw->gen >= 8) {
977 gen8_init_vtable_surface_functions(brw);
978 brw->vtbl.emit_depth_stencil_hiz = gen8_emit_depth_stencil_hiz;
979 } else if (brw->gen >= 7) {
980 gen7_init_vtable_surface_functions(brw);
981 brw->vtbl.emit_depth_stencil_hiz = gen7_emit_depth_stencil_hiz;
982 } else if (brw->gen >= 6) {
983 gen6_init_vtable_surface_functions(brw);
984 brw->vtbl.emit_depth_stencil_hiz = gen6_emit_depth_stencil_hiz;
985 } else {
986 gen4_init_vtable_surface_functions(brw);
987 brw->vtbl.emit_depth_stencil_hiz = brw_emit_depth_stencil_hiz;
988 }
989
990 brw_init_driver_functions(brw, &functions);
991
992 if (notify_reset)
993 functions.GetGraphicsResetStatus = brw_get_graphics_reset_status;
994
995 struct gl_context *ctx = &brw->ctx;
996
997 if (!_mesa_initialize_context(ctx, api, mesaVis, shareCtx, &functions)) {
998 *dri_ctx_error = __DRI_CTX_ERROR_NO_MEMORY;
999 fprintf(stderr, "%s: failed to init mesa context\n", __func__);
1000 intelDestroyContext(driContextPriv);
1001 return false;
1002 }
1003
1004 driContextSetFlags(ctx, flags);
1005
1006 /* Initialize the software rasterizer and helper modules.
1007 *
1008 * As of GL 3.1 core, the gen4+ driver doesn't need the swrast context for
1009 * software fallbacks (which we have to support on legacy GL to do weird
1010 * glDrawPixels(), glBitmap(), and other functions).
1011 */
1012 if (api != API_OPENGL_CORE && api != API_OPENGLES2) {
1013 _swrast_CreateContext(ctx);
1014 }
1015
1016 _vbo_CreateContext(ctx);
1017 if (ctx->swrast_context) {
1018 _tnl_CreateContext(ctx);
1019 TNL_CONTEXT(ctx)->Driver.RunPipeline = _tnl_run_pipeline;
1020 _swsetup_CreateContext(ctx);
1021
1022 /* Configure swrast to match hardware characteristics: */
1023 _swrast_allow_pixel_fog(ctx, false);
1024 _swrast_allow_vertex_fog(ctx, true);
1025 }
1026
1027 _mesa_meta_init(ctx);
1028
1029 brw_process_driconf_options(brw);
1030
1031 if (INTEL_DEBUG & DEBUG_PERF)
1032 brw->perf_debug = true;
1033
1034 brw_initialize_cs_context_constants(brw);
1035 brw_initialize_context_constants(brw);
1036
1037 ctx->Const.ResetStrategy = notify_reset
1038 ? GL_LOSE_CONTEXT_ON_RESET_ARB : GL_NO_RESET_NOTIFICATION_ARB;
1039
1040 /* Reinitialize the context point state. It depends on ctx->Const values. */
1041 _mesa_init_point(ctx);
1042
1043 intel_fbo_init(brw);
1044
1045 intel_batchbuffer_init(&brw->batch, brw->bufmgr, brw->has_llc);
1046
1047 if (brw->gen >= 6) {
1048 /* Create a new hardware context. Using a hardware context means that
1049 * our GPU state will be saved/restored on context switch, allowing us
1050 * to assume that the GPU is in the same state we left it in.
1051 *
1052 * This is required for transform feedback buffer offsets, query objects,
1053 * and also allows us to reduce how much state we have to emit.
1054 */
1055 brw->hw_ctx = brw_create_hw_context(brw->bufmgr);
1056
1057 if (!brw->hw_ctx) {
1058 fprintf(stderr, "Failed to create hardware context.\n");
1059 intelDestroyContext(driContextPriv);
1060 return false;
1061 }
1062 }
1063
1064 if (brw_init_pipe_control(brw, devinfo)) {
1065 *dri_ctx_error = __DRI_CTX_ERROR_NO_MEMORY;
1066 intelDestroyContext(driContextPriv);
1067 return false;
1068 }
1069
1070 brw_init_state(brw);
1071
1072 intelInitExtensions(ctx);
1073
1074 brw_init_surface_formats(brw);
1075
1076 brw_blorp_init(brw);
1077
1078 brw->urb.size = devinfo->urb.size;
1079
1080 if (brw->gen == 6)
1081 brw->urb.gs_present = false;
1082
1083 brw->prim_restart.in_progress = false;
1084 brw->prim_restart.enable_cut_index = false;
1085 brw->gs.enabled = false;
1086 brw->clip.viewport_count = 1;
1087
1088 brw->predicate.state = BRW_PREDICATE_STATE_RENDER;
1089
1090 brw->max_gtt_map_object_size = screen->max_gtt_map_object_size;
1091
1092 ctx->VertexProgram._MaintainTnlProgram = true;
1093 ctx->FragmentProgram._MaintainTexEnvProgram = true;
1094
1095 brw_draw_init( brw );
1096
1097 if ((flags & __DRI_CTX_FLAG_DEBUG) != 0) {
1098 /* Turn on some extra GL_ARB_debug_output generation. */
1099 brw->perf_debug = true;
1100 }
1101
1102 if ((flags & __DRI_CTX_FLAG_ROBUST_BUFFER_ACCESS) != 0) {
1103 ctx->Const.ContextFlags |= GL_CONTEXT_FLAG_ROBUST_ACCESS_BIT_ARB;
1104 ctx->Const.RobustAccess = GL_TRUE;
1105 }
1106
1107 if (INTEL_DEBUG & DEBUG_SHADER_TIME)
1108 brw_init_shader_time(brw);
1109
1110 _mesa_compute_version(ctx);
1111
1112 _mesa_initialize_dispatch_tables(ctx);
1113 _mesa_initialize_vbo_vtxfmt(ctx);
1114
1115 if (ctx->Extensions.INTEL_performance_query)
1116 brw_init_performance_queries(brw);
1117
1118 vbo_use_buffer_objects(ctx);
1119 vbo_always_unmap_buffers(ctx);
1120
1121 return true;
1122 }
1123
1124 void
1125 intelDestroyContext(__DRIcontext * driContextPriv)
1126 {
1127 struct brw_context *brw =
1128 (struct brw_context *) driContextPriv->driverPrivate;
1129 struct gl_context *ctx = &brw->ctx;
1130
1131 _mesa_meta_free(&brw->ctx);
1132
1133 if (INTEL_DEBUG & DEBUG_SHADER_TIME) {
1134 /* Force a report. */
1135 brw->shader_time.report_time = 0;
1136
1137 brw_collect_and_report_shader_time(brw);
1138 brw_destroy_shader_time(brw);
1139 }
1140
1141 if (brw->gen >= 6)
1142 blorp_finish(&brw->blorp);
1143
1144 brw_destroy_state(brw);
1145 brw_draw_destroy(brw);
1146
1147 brw_bo_unreference(brw->curbe.curbe_bo);
1148 if (brw->vs.base.scratch_bo)
1149 brw_bo_unreference(brw->vs.base.scratch_bo);
1150 if (brw->tcs.base.scratch_bo)
1151 brw_bo_unreference(brw->tcs.base.scratch_bo);
1152 if (brw->tes.base.scratch_bo)
1153 brw_bo_unreference(brw->tes.base.scratch_bo);
1154 if (brw->gs.base.scratch_bo)
1155 brw_bo_unreference(brw->gs.base.scratch_bo);
1156 if (brw->wm.base.scratch_bo)
1157 brw_bo_unreference(brw->wm.base.scratch_bo);
1158
1159 brw_destroy_hw_context(brw->bufmgr, brw->hw_ctx);
1160
1161 if (ctx->swrast_context) {
1162 _swsetup_DestroyContext(&brw->ctx);
1163 _tnl_DestroyContext(&brw->ctx);
1164 }
1165 _vbo_DestroyContext(&brw->ctx);
1166
1167 if (ctx->swrast_context)
1168 _swrast_DestroyContext(&brw->ctx);
1169
1170 brw_fini_pipe_control(brw);
1171 intel_batchbuffer_free(&brw->batch);
1172
1173 brw_bo_unreference(brw->throttle_batch[1]);
1174 brw_bo_unreference(brw->throttle_batch[0]);
1175 brw->throttle_batch[1] = NULL;
1176 brw->throttle_batch[0] = NULL;
1177
1178 driDestroyOptionCache(&brw->optionCache);
1179
1180 /* free the Mesa context */
1181 _mesa_free_context_data(&brw->ctx);
1182
1183 ralloc_free(brw);
1184 driContextPriv->driverPrivate = NULL;
1185 }
1186
1187 GLboolean
1188 intelUnbindContext(__DRIcontext * driContextPriv)
1189 {
1190 /* Unset current context and dispath table */
1191 _mesa_make_current(NULL, NULL, NULL);
1192
1193 return true;
1194 }
1195
1196 /**
1197 * Fixes up the context for GLES23 with our default-to-sRGB-capable behavior
1198 * on window system framebuffers.
1199 *
1200 * Desktop GL is fairly reasonable in its handling of sRGB: You can ask if
1201 * your renderbuffer can do sRGB encode, and you can flip a switch that does
1202 * sRGB encode if the renderbuffer can handle it. You can ask specifically
1203 * for a visual where you're guaranteed to be capable, but it turns out that
1204 * everyone just makes all their ARGB8888 visuals capable and doesn't offer
1205 * incapable ones, because there's no difference between the two in resources
1206 * used. Applications thus get built that accidentally rely on the default
1207 * visual choice being sRGB, so we make ours sRGB capable. Everything sounds
1208 * great...
1209 *
1210 * But for GLES2/3, they decided that it was silly to not turn on sRGB encode
1211 * for sRGB renderbuffers you made with the GL_EXT_texture_sRGB equivalent.
1212 * So they removed the enable knob and made it "if the renderbuffer is sRGB
1213 * capable, do sRGB encode". Then, for your window system renderbuffers, you
1214 * can ask for sRGB visuals and get sRGB encode, or not ask for sRGB visuals
1215 * and get no sRGB encode (assuming that both kinds of visual are available).
1216 * Thus our choice to support sRGB by default on our visuals for desktop would
1217 * result in broken rendering of GLES apps that aren't expecting sRGB encode.
1218 *
1219 * Unfortunately, renderbuffer setup happens before a context is created. So
1220 * in intel_screen.c we always set up sRGB, and here, if you're a GLES2/3
1221 * context (without an sRGB visual, though we don't have sRGB visuals exposed
1222 * yet), we go turn that back off before anyone finds out.
1223 */
1224 static void
1225 intel_gles3_srgb_workaround(struct brw_context *brw,
1226 struct gl_framebuffer *fb)
1227 {
1228 struct gl_context *ctx = &brw->ctx;
1229
1230 if (_mesa_is_desktop_gl(ctx) || !fb->Visual.sRGBCapable)
1231 return;
1232
1233 /* Some day when we support the sRGB capable bit on visuals available for
1234 * GLES, we'll need to respect that and not disable things here.
1235 */
1236 fb->Visual.sRGBCapable = false;
1237 for (int i = 0; i < BUFFER_COUNT; i++) {
1238 struct gl_renderbuffer *rb = fb->Attachment[i].Renderbuffer;
1239 if (rb)
1240 rb->Format = _mesa_get_srgb_format_linear(rb->Format);
1241 }
1242 }
1243
1244 GLboolean
1245 intelMakeCurrent(__DRIcontext * driContextPriv,
1246 __DRIdrawable * driDrawPriv,
1247 __DRIdrawable * driReadPriv)
1248 {
1249 struct brw_context *brw;
1250 GET_CURRENT_CONTEXT(curCtx);
1251
1252 if (driContextPriv)
1253 brw = (struct brw_context *) driContextPriv->driverPrivate;
1254 else
1255 brw = NULL;
1256
1257 /* According to the glXMakeCurrent() man page: "Pending commands to
1258 * the previous context, if any, are flushed before it is released."
1259 * But only flush if we're actually changing contexts.
1260 */
1261 if (brw_context(curCtx) && brw_context(curCtx) != brw) {
1262 _mesa_flush(curCtx);
1263 }
1264
1265 if (driContextPriv) {
1266 struct gl_context *ctx = &brw->ctx;
1267 struct gl_framebuffer *fb, *readFb;
1268
1269 if (driDrawPriv == NULL) {
1270 fb = _mesa_get_incomplete_framebuffer();
1271 } else {
1272 fb = driDrawPriv->driverPrivate;
1273 driContextPriv->dri2.draw_stamp = driDrawPriv->dri2.stamp - 1;
1274 }
1275
1276 if (driReadPriv == NULL) {
1277 readFb = _mesa_get_incomplete_framebuffer();
1278 } else {
1279 readFb = driReadPriv->driverPrivate;
1280 driContextPriv->dri2.read_stamp = driReadPriv->dri2.stamp - 1;
1281 }
1282
1283 /* The sRGB workaround changes the renderbuffer's format. We must change
1284 * the format before the renderbuffer's miptree get's allocated, otherwise
1285 * the formats of the renderbuffer and its miptree will differ.
1286 */
1287 intel_gles3_srgb_workaround(brw, fb);
1288 intel_gles3_srgb_workaround(brw, readFb);
1289
1290 /* If the context viewport hasn't been initialized, force a call out to
1291 * the loader to get buffers so we have a drawable size for the initial
1292 * viewport. */
1293 if (!brw->ctx.ViewportInitialized)
1294 intel_prepare_render(brw);
1295
1296 _mesa_make_current(ctx, fb, readFb);
1297 } else {
1298 _mesa_make_current(NULL, NULL, NULL);
1299 }
1300
1301 return true;
1302 }
1303
1304 void
1305 intel_resolve_for_dri2_flush(struct brw_context *brw,
1306 __DRIdrawable *drawable)
1307 {
1308 if (brw->gen < 6) {
1309 /* MSAA and fast color clear are not supported, so don't waste time
1310 * checking whether a resolve is needed.
1311 */
1312 return;
1313 }
1314
1315 struct gl_framebuffer *fb = drawable->driverPrivate;
1316 struct intel_renderbuffer *rb;
1317
1318 /* Usually, only the back buffer will need to be downsampled. However,
1319 * the front buffer will also need it if the user has rendered into it.
1320 */
1321 static const gl_buffer_index buffers[2] = {
1322 BUFFER_BACK_LEFT,
1323 BUFFER_FRONT_LEFT,
1324 };
1325
1326 for (int i = 0; i < 2; ++i) {
1327 rb = intel_get_renderbuffer(fb, buffers[i]);
1328 if (rb == NULL || rb->mt == NULL)
1329 continue;
1330 if (rb->mt->num_samples <= 1) {
1331 assert(rb->mt_layer == 0 && rb->mt_level == 0 &&
1332 rb->layer_count == 1);
1333 intel_miptree_prepare_access(brw, rb->mt, 0, 1, 0, 1, false, false);
1334 } else {
1335 intel_renderbuffer_downsample(brw, rb);
1336 }
1337 }
1338 }
1339
1340 static unsigned
1341 intel_bits_per_pixel(const struct intel_renderbuffer *rb)
1342 {
1343 return _mesa_get_format_bytes(intel_rb_format(rb)) * 8;
1344 }
1345
1346 static void
1347 intel_query_dri2_buffers(struct brw_context *brw,
1348 __DRIdrawable *drawable,
1349 __DRIbuffer **buffers,
1350 int *count);
1351
1352 static void
1353 intel_process_dri2_buffer(struct brw_context *brw,
1354 __DRIdrawable *drawable,
1355 __DRIbuffer *buffer,
1356 struct intel_renderbuffer *rb,
1357 const char *buffer_name);
1358
1359 static void
1360 intel_update_image_buffers(struct brw_context *brw, __DRIdrawable *drawable);
1361
1362 static void
1363 intel_update_dri2_buffers(struct brw_context *brw, __DRIdrawable *drawable)
1364 {
1365 struct gl_framebuffer *fb = drawable->driverPrivate;
1366 struct intel_renderbuffer *rb;
1367 __DRIbuffer *buffers = NULL;
1368 int count;
1369 const char *region_name;
1370
1371 /* Set this up front, so that in case our buffers get invalidated
1372 * while we're getting new buffers, we don't clobber the stamp and
1373 * thus ignore the invalidate. */
1374 drawable->lastStamp = drawable->dri2.stamp;
1375
1376 if (unlikely(INTEL_DEBUG & DEBUG_DRI))
1377 fprintf(stderr, "enter %s, drawable %p\n", __func__, drawable);
1378
1379 intel_query_dri2_buffers(brw, drawable, &buffers, &count);
1380
1381 if (buffers == NULL)
1382 return;
1383
1384 for (int i = 0; i < count; i++) {
1385 switch (buffers[i].attachment) {
1386 case __DRI_BUFFER_FRONT_LEFT:
1387 rb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT);
1388 region_name = "dri2 front buffer";
1389 break;
1390
1391 case __DRI_BUFFER_FAKE_FRONT_LEFT:
1392 rb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT);
1393 region_name = "dri2 fake front buffer";
1394 break;
1395
1396 case __DRI_BUFFER_BACK_LEFT:
1397 rb = intel_get_renderbuffer(fb, BUFFER_BACK_LEFT);
1398 region_name = "dri2 back buffer";
1399 break;
1400
1401 case __DRI_BUFFER_DEPTH:
1402 case __DRI_BUFFER_HIZ:
1403 case __DRI_BUFFER_DEPTH_STENCIL:
1404 case __DRI_BUFFER_STENCIL:
1405 case __DRI_BUFFER_ACCUM:
1406 default:
1407 fprintf(stderr,
1408 "unhandled buffer attach event, attachment type %d\n",
1409 buffers[i].attachment);
1410 return;
1411 }
1412
1413 intel_process_dri2_buffer(brw, drawable, &buffers[i], rb, region_name);
1414 }
1415
1416 }
1417
1418 void
1419 intel_update_renderbuffers(__DRIcontext *context, __DRIdrawable *drawable)
1420 {
1421 struct brw_context *brw = context->driverPrivate;
1422 __DRIscreen *dri_screen = brw->screen->driScrnPriv;
1423
1424 /* Set this up front, so that in case our buffers get invalidated
1425 * while we're getting new buffers, we don't clobber the stamp and
1426 * thus ignore the invalidate. */
1427 drawable->lastStamp = drawable->dri2.stamp;
1428
1429 if (unlikely(INTEL_DEBUG & DEBUG_DRI))
1430 fprintf(stderr, "enter %s, drawable %p\n", __func__, drawable);
1431
1432 if (dri_screen->image.loader)
1433 intel_update_image_buffers(brw, drawable);
1434 else
1435 intel_update_dri2_buffers(brw, drawable);
1436
1437 driUpdateFramebufferSize(&brw->ctx, drawable);
1438 }
1439
1440 /**
1441 * intel_prepare_render should be called anywhere that curent read/drawbuffer
1442 * state is required.
1443 */
1444 void
1445 intel_prepare_render(struct brw_context *brw)
1446 {
1447 struct gl_context *ctx = &brw->ctx;
1448 __DRIcontext *driContext = brw->driContext;
1449 __DRIdrawable *drawable;
1450
1451 drawable = driContext->driDrawablePriv;
1452 if (drawable && drawable->dri2.stamp != driContext->dri2.draw_stamp) {
1453 if (drawable->lastStamp != drawable->dri2.stamp)
1454 intel_update_renderbuffers(driContext, drawable);
1455 driContext->dri2.draw_stamp = drawable->dri2.stamp;
1456 }
1457
1458 drawable = driContext->driReadablePriv;
1459 if (drawable && drawable->dri2.stamp != driContext->dri2.read_stamp) {
1460 if (drawable->lastStamp != drawable->dri2.stamp)
1461 intel_update_renderbuffers(driContext, drawable);
1462 driContext->dri2.read_stamp = drawable->dri2.stamp;
1463 }
1464
1465 /* If we're currently rendering to the front buffer, the rendering
1466 * that will happen next will probably dirty the front buffer. So
1467 * mark it as dirty here.
1468 */
1469 if (_mesa_is_front_buffer_drawing(ctx->DrawBuffer))
1470 brw->front_buffer_dirty = true;
1471 }
1472
1473 /**
1474 * \brief Query DRI2 to obtain a DRIdrawable's buffers.
1475 *
1476 * To determine which DRI buffers to request, examine the renderbuffers
1477 * attached to the drawable's framebuffer. Then request the buffers with
1478 * DRI2GetBuffers() or DRI2GetBuffersWithFormat().
1479 *
1480 * This is called from intel_update_renderbuffers().
1481 *
1482 * \param drawable Drawable whose buffers are queried.
1483 * \param buffers [out] List of buffers returned by DRI2 query.
1484 * \param buffer_count [out] Number of buffers returned.
1485 *
1486 * \see intel_update_renderbuffers()
1487 * \see DRI2GetBuffers()
1488 * \see DRI2GetBuffersWithFormat()
1489 */
1490 static void
1491 intel_query_dri2_buffers(struct brw_context *brw,
1492 __DRIdrawable *drawable,
1493 __DRIbuffer **buffers,
1494 int *buffer_count)
1495 {
1496 __DRIscreen *dri_screen = brw->screen->driScrnPriv;
1497 struct gl_framebuffer *fb = drawable->driverPrivate;
1498 int i = 0;
1499 unsigned attachments[8];
1500
1501 struct intel_renderbuffer *front_rb;
1502 struct intel_renderbuffer *back_rb;
1503
1504 front_rb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT);
1505 back_rb = intel_get_renderbuffer(fb, BUFFER_BACK_LEFT);
1506
1507 memset(attachments, 0, sizeof(attachments));
1508 if ((_mesa_is_front_buffer_drawing(fb) ||
1509 _mesa_is_front_buffer_reading(fb) ||
1510 !back_rb) && front_rb) {
1511 /* If a fake front buffer is in use, then querying for
1512 * __DRI_BUFFER_FRONT_LEFT will cause the server to copy the image from
1513 * the real front buffer to the fake front buffer. So before doing the
1514 * query, we need to make sure all the pending drawing has landed in the
1515 * real front buffer.
1516 */
1517 intel_batchbuffer_flush(brw);
1518 intel_flush_front(&brw->ctx);
1519
1520 attachments[i++] = __DRI_BUFFER_FRONT_LEFT;
1521 attachments[i++] = intel_bits_per_pixel(front_rb);
1522 } else if (front_rb && brw->front_buffer_dirty) {
1523 /* We have pending front buffer rendering, but we aren't querying for a
1524 * front buffer. If the front buffer we have is a fake front buffer,
1525 * the X server is going to throw it away when it processes the query.
1526 * So before doing the query, make sure all the pending drawing has
1527 * landed in the real front buffer.
1528 */
1529 intel_batchbuffer_flush(brw);
1530 intel_flush_front(&brw->ctx);
1531 }
1532
1533 if (back_rb) {
1534 attachments[i++] = __DRI_BUFFER_BACK_LEFT;
1535 attachments[i++] = intel_bits_per_pixel(back_rb);
1536 }
1537
1538 assert(i <= ARRAY_SIZE(attachments));
1539
1540 *buffers =
1541 dri_screen->dri2.loader->getBuffersWithFormat(drawable,
1542 &drawable->w,
1543 &drawable->h,
1544 attachments, i / 2,
1545 buffer_count,
1546 drawable->loaderPrivate);
1547 }
1548
1549 /**
1550 * \brief Assign a DRI buffer's DRM region to a renderbuffer.
1551 *
1552 * This is called from intel_update_renderbuffers().
1553 *
1554 * \par Note:
1555 * DRI buffers whose attachment point is DRI2BufferStencil or
1556 * DRI2BufferDepthStencil are handled as special cases.
1557 *
1558 * \param buffer_name is a human readable name, such as "dri2 front buffer",
1559 * that is passed to brw_bo_gem_create_from_name().
1560 *
1561 * \see intel_update_renderbuffers()
1562 */
1563 static void
1564 intel_process_dri2_buffer(struct brw_context *brw,
1565 __DRIdrawable *drawable,
1566 __DRIbuffer *buffer,
1567 struct intel_renderbuffer *rb,
1568 const char *buffer_name)
1569 {
1570 struct gl_framebuffer *fb = drawable->driverPrivate;
1571 struct brw_bo *bo;
1572
1573 if (!rb)
1574 return;
1575
1576 unsigned num_samples = rb->Base.Base.NumSamples;
1577
1578 /* We try to avoid closing and reopening the same BO name, because the first
1579 * use of a mapping of the buffer involves a bunch of page faulting which is
1580 * moderately expensive.
1581 */
1582 struct intel_mipmap_tree *last_mt;
1583 if (num_samples == 0)
1584 last_mt = rb->mt;
1585 else
1586 last_mt = rb->singlesample_mt;
1587
1588 uint32_t old_name = 0;
1589 if (last_mt) {
1590 /* The bo already has a name because the miptree was created by a
1591 * previous call to intel_process_dri2_buffer(). If a bo already has a
1592 * name, then brw_bo_flink() is a low-cost getter. It does not
1593 * create a new name.
1594 */
1595 brw_bo_flink(last_mt->bo, &old_name);
1596 }
1597
1598 if (old_name == buffer->name)
1599 return;
1600
1601 if (unlikely(INTEL_DEBUG & DEBUG_DRI)) {
1602 fprintf(stderr,
1603 "attaching buffer %d, at %d, cpp %d, pitch %d\n",
1604 buffer->name, buffer->attachment,
1605 buffer->cpp, buffer->pitch);
1606 }
1607
1608 bo = brw_bo_gem_create_from_name(brw->bufmgr, buffer_name,
1609 buffer->name);
1610 if (!bo) {
1611 fprintf(stderr,
1612 "Failed to open BO for returned DRI2 buffer "
1613 "(%dx%d, %s, named %d).\n"
1614 "This is likely a bug in the X Server that will lead to a "
1615 "crash soon.\n",
1616 drawable->w, drawable->h, buffer_name, buffer->name);
1617 return;
1618 }
1619
1620 intel_update_winsys_renderbuffer_miptree(brw, rb, bo,
1621 drawable->w, drawable->h,
1622 buffer->pitch);
1623
1624 if (_mesa_is_front_buffer_drawing(fb) &&
1625 (buffer->attachment == __DRI_BUFFER_FRONT_LEFT ||
1626 buffer->attachment == __DRI_BUFFER_FAKE_FRONT_LEFT) &&
1627 rb->Base.Base.NumSamples > 1) {
1628 intel_renderbuffer_upsample(brw, rb);
1629 }
1630
1631 assert(rb->mt);
1632
1633 brw_bo_unreference(bo);
1634 }
1635
1636 /**
1637 * \brief Query DRI image loader to obtain a DRIdrawable's buffers.
1638 *
1639 * To determine which DRI buffers to request, examine the renderbuffers
1640 * attached to the drawable's framebuffer. Then request the buffers from
1641 * the image loader
1642 *
1643 * This is called from intel_update_renderbuffers().
1644 *
1645 * \param drawable Drawable whose buffers are queried.
1646 * \param buffers [out] List of buffers returned by DRI2 query.
1647 * \param buffer_count [out] Number of buffers returned.
1648 *
1649 * \see intel_update_renderbuffers()
1650 */
1651
1652 static void
1653 intel_update_image_buffer(struct brw_context *intel,
1654 __DRIdrawable *drawable,
1655 struct intel_renderbuffer *rb,
1656 __DRIimage *buffer,
1657 enum __DRIimageBufferMask buffer_type)
1658 {
1659 struct gl_framebuffer *fb = drawable->driverPrivate;
1660
1661 if (!rb || !buffer->bo)
1662 return;
1663
1664 unsigned num_samples = rb->Base.Base.NumSamples;
1665
1666 /* Check and see if we're already bound to the right
1667 * buffer object
1668 */
1669 struct intel_mipmap_tree *last_mt;
1670 if (num_samples == 0)
1671 last_mt = rb->mt;
1672 else
1673 last_mt = rb->singlesample_mt;
1674
1675 if (last_mt && last_mt->bo == buffer->bo)
1676 return;
1677
1678 intel_update_winsys_renderbuffer_miptree(intel, rb, buffer->bo,
1679 buffer->width, buffer->height,
1680 buffer->pitch);
1681
1682 if (_mesa_is_front_buffer_drawing(fb) &&
1683 buffer_type == __DRI_IMAGE_BUFFER_FRONT &&
1684 rb->Base.Base.NumSamples > 1) {
1685 intel_renderbuffer_upsample(intel, rb);
1686 }
1687 }
1688
1689 static void
1690 intel_update_image_buffers(struct brw_context *brw, __DRIdrawable *drawable)
1691 {
1692 struct gl_framebuffer *fb = drawable->driverPrivate;
1693 __DRIscreen *dri_screen = brw->screen->driScrnPriv;
1694 struct intel_renderbuffer *front_rb;
1695 struct intel_renderbuffer *back_rb;
1696 struct __DRIimageList images;
1697 mesa_format format;
1698 uint32_t buffer_mask = 0;
1699 int ret;
1700
1701 front_rb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT);
1702 back_rb = intel_get_renderbuffer(fb, BUFFER_BACK_LEFT);
1703
1704 if (back_rb)
1705 format = intel_rb_format(back_rb);
1706 else if (front_rb)
1707 format = intel_rb_format(front_rb);
1708 else
1709 return;
1710
1711 if (front_rb && (_mesa_is_front_buffer_drawing(fb) ||
1712 _mesa_is_front_buffer_reading(fb) || !back_rb)) {
1713 buffer_mask |= __DRI_IMAGE_BUFFER_FRONT;
1714 }
1715
1716 if (back_rb)
1717 buffer_mask |= __DRI_IMAGE_BUFFER_BACK;
1718
1719 ret = dri_screen->image.loader->getBuffers(drawable,
1720 driGLFormatToImageFormat(format),
1721 &drawable->dri2.stamp,
1722 drawable->loaderPrivate,
1723 buffer_mask,
1724 &images);
1725 if (!ret)
1726 return;
1727
1728 if (images.image_mask & __DRI_IMAGE_BUFFER_FRONT) {
1729 drawable->w = images.front->width;
1730 drawable->h = images.front->height;
1731 intel_update_image_buffer(brw,
1732 drawable,
1733 front_rb,
1734 images.front,
1735 __DRI_IMAGE_BUFFER_FRONT);
1736 }
1737
1738 if (images.image_mask & __DRI_IMAGE_BUFFER_BACK) {
1739 drawable->w = images.back->width;
1740 drawable->h = images.back->height;
1741 intel_update_image_buffer(brw,
1742 drawable,
1743 back_rb,
1744 images.back,
1745 __DRI_IMAGE_BUFFER_BACK);
1746 }
1747 }