i965/miptree: Refactor intel_miptree_resolve_color
[mesa.git] / src / mesa / drivers / dri / i965 / brw_context.c
1 /*
2 Copyright 2003 VMware, Inc.
3 Copyright (C) Intel Corp. 2006. All Rights Reserved.
4 Intel funded Tungsten Graphics to
5 develop this 3D driver.
6
7 Permission is hereby granted, free of charge, to any person obtaining
8 a copy of this software and associated documentation files (the
9 "Software"), to deal in the Software without restriction, including
10 without limitation the rights to use, copy, modify, merge, publish,
11 distribute, sublicense, and/or sell copies of the Software, and to
12 permit persons to whom the Software is furnished to do so, subject to
13 the following conditions:
14
15 The above copyright notice and this permission notice (including the
16 next paragraph) shall be included in all copies or substantial
17 portions of the Software.
18
19 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
20 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
22 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
23 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
24 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
25 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26
27 **********************************************************************/
28 /*
29 * Authors:
30 * Keith Whitwell <keithw@vmware.com>
31 */
32
33
34 #include "compiler/nir/nir.h"
35 #include "main/api_exec.h"
36 #include "main/context.h"
37 #include "main/fbobject.h"
38 #include "main/extensions.h"
39 #include "main/imports.h"
40 #include "main/macros.h"
41 #include "main/points.h"
42 #include "main/version.h"
43 #include "main/vtxfmt.h"
44 #include "main/texobj.h"
45 #include "main/framebuffer.h"
46
47 #include "vbo/vbo_context.h"
48
49 #include "drivers/common/driverfuncs.h"
50 #include "drivers/common/meta.h"
51 #include "utils.h"
52
53 #include "brw_context.h"
54 #include "brw_defines.h"
55 #include "brw_blorp.h"
56 #include "brw_draw.h"
57 #include "brw_state.h"
58
59 #include "intel_batchbuffer.h"
60 #include "intel_buffer_objects.h"
61 #include "intel_buffers.h"
62 #include "intel_fbo.h"
63 #include "intel_mipmap_tree.h"
64 #include "intel_pixel.h"
65 #include "intel_image.h"
66 #include "intel_tex.h"
67 #include "intel_tex_obj.h"
68
69 #include "swrast_setup/swrast_setup.h"
70 #include "tnl/tnl.h"
71 #include "tnl/t_pipeline.h"
72 #include "util/ralloc.h"
73 #include "util/debug.h"
74 #include "isl/isl.h"
75
76 /***************************************
77 * Mesa's Driver Functions
78 ***************************************/
79
80 const char *const brw_vendor_string = "Intel Open Source Technology Center";
81
82 static const char *
83 get_bsw_model(const struct intel_screen *screen)
84 {
85 switch (screen->eu_total) {
86 case 16:
87 return "405";
88 case 12:
89 return "400";
90 default:
91 return " ";
92 }
93 }
94
95 const char *
96 brw_get_renderer_string(const struct intel_screen *screen)
97 {
98 const char *chipset;
99 static char buffer[128];
100 char *bsw = NULL;
101
102 switch (screen->deviceID) {
103 #undef CHIPSET
104 #define CHIPSET(id, symbol, str) case id: chipset = str; break;
105 #include "pci_ids/i965_pci_ids.h"
106 default:
107 chipset = "Unknown Intel Chipset";
108 break;
109 }
110
111 /* Braswell branding is funny, so we have to fix it up here */
112 if (screen->deviceID == 0x22B1) {
113 bsw = strdup(chipset);
114 char *needle = strstr(bsw, "XXX");
115 if (needle) {
116 memcpy(needle, get_bsw_model(screen), 3);
117 chipset = bsw;
118 }
119 }
120
121 (void) driGetRendererString(buffer, chipset, 0);
122 free(bsw);
123 return buffer;
124 }
125
126 static const GLubyte *
127 intel_get_string(struct gl_context * ctx, GLenum name)
128 {
129 const struct brw_context *const brw = brw_context(ctx);
130
131 switch (name) {
132 case GL_VENDOR:
133 return (GLubyte *) brw_vendor_string;
134
135 case GL_RENDERER:
136 return
137 (GLubyte *) brw_get_renderer_string(brw->screen);
138
139 default:
140 return NULL;
141 }
142 }
143
144 static void
145 intel_viewport(struct gl_context *ctx)
146 {
147 struct brw_context *brw = brw_context(ctx);
148 __DRIcontext *driContext = brw->driContext;
149
150 if (_mesa_is_winsys_fbo(ctx->DrawBuffer)) {
151 if (driContext->driDrawablePriv)
152 dri2InvalidateDrawable(driContext->driDrawablePriv);
153 if (driContext->driReadablePriv)
154 dri2InvalidateDrawable(driContext->driReadablePriv);
155 }
156 }
157
158 static void
159 intel_update_framebuffer(struct gl_context *ctx,
160 struct gl_framebuffer *fb)
161 {
162 struct brw_context *brw = brw_context(ctx);
163
164 /* Quantize the derived default number of samples
165 */
166 fb->DefaultGeometry._NumSamples =
167 intel_quantize_num_samples(brw->screen,
168 fb->DefaultGeometry.NumSamples);
169 }
170
171 static bool
172 intel_disable_rb_aux_buffer(struct brw_context *brw, const struct brw_bo *bo)
173 {
174 const struct gl_framebuffer *fb = brw->ctx.DrawBuffer;
175 bool found = false;
176
177 for (unsigned i = 0; i < fb->_NumColorDrawBuffers; i++) {
178 const struct intel_renderbuffer *irb =
179 intel_renderbuffer(fb->_ColorDrawBuffers[i]);
180
181 if (irb && irb->mt->bo == bo) {
182 found = brw->draw_aux_buffer_disabled[i] = true;
183 }
184 }
185
186 return found;
187 }
188
189 /* On Gen9 color buffers may be compressed by the hardware (lossless
190 * compression). There are, however, format restrictions and care needs to be
191 * taken that the sampler engine is capable for re-interpreting a buffer with
192 * format different the buffer was originally written with.
193 *
194 * For example, SRGB formats are not compressible and the sampler engine isn't
195 * capable of treating RGBA_UNORM as SRGB_ALPHA. In such a case the underlying
196 * color buffer needs to be resolved so that the sampling surface can be
197 * sampled as non-compressed (i.e., without the auxiliary MCS buffer being
198 * set).
199 */
200 static bool
201 intel_texture_view_requires_resolve(struct brw_context *brw,
202 struct intel_texture_object *intel_tex)
203 {
204 if (brw->gen < 9 ||
205 !intel_miptree_is_lossless_compressed(brw, intel_tex->mt))
206 return false;
207
208 const enum isl_format isl_format =
209 brw_isl_format_for_mesa_format(intel_tex->_Format);
210
211 if (isl_format_supports_ccs_e(&brw->screen->devinfo, isl_format))
212 return false;
213
214 perf_debug("Incompatible sampling format (%s) for rbc (%s)\n",
215 _mesa_get_format_name(intel_tex->_Format),
216 _mesa_get_format_name(intel_tex->mt->format));
217
218 if (intel_disable_rb_aux_buffer(brw, intel_tex->mt->bo))
219 perf_debug("Sampling renderbuffer with non-compressible format - "
220 "turning off compression");
221
222 return true;
223 }
224
225 static void
226 intel_update_state(struct gl_context * ctx, GLuint new_state)
227 {
228 struct brw_context *brw = brw_context(ctx);
229 struct intel_texture_object *tex_obj;
230 struct intel_renderbuffer *depth_irb;
231
232 if (ctx->swrast_context)
233 _swrast_InvalidateState(ctx, new_state);
234 _vbo_InvalidateState(ctx, new_state);
235
236 brw->NewGLState |= new_state;
237
238 _mesa_unlock_context_textures(ctx);
239
240 /* Resolve the depth buffer's HiZ buffer. */
241 depth_irb = intel_get_renderbuffer(ctx->DrawBuffer, BUFFER_DEPTH);
242 if (depth_irb)
243 intel_renderbuffer_resolve_hiz(brw, depth_irb);
244
245 memset(brw->draw_aux_buffer_disabled, 0,
246 sizeof(brw->draw_aux_buffer_disabled));
247
248 /* Resolve depth buffer and render cache of each enabled texture. */
249 int maxEnabledUnit = ctx->Texture._MaxEnabledTexImageUnit;
250 for (int i = 0; i <= maxEnabledUnit; i++) {
251 if (!ctx->Texture.Unit[i]._Current)
252 continue;
253 tex_obj = intel_texture_object(ctx->Texture.Unit[i]._Current);
254 if (!tex_obj || !tex_obj->mt)
255 continue;
256
257 /* We need inte_texture_object::_Format to be valid */
258 intel_finalize_mipmap_tree(brw, i);
259
260 if (intel_miptree_sample_with_hiz(brw, tex_obj->mt))
261 intel_miptree_all_slices_resolve_hiz(brw, tex_obj->mt);
262 else
263 intel_miptree_all_slices_resolve_depth(brw, tex_obj->mt);
264 /* Sampling engine understands lossless compression and resolving
265 * those surfaces should be skipped for performance reasons.
266 */
267 const int flags = intel_texture_view_requires_resolve(brw, tex_obj) ?
268 0 : INTEL_MIPTREE_IGNORE_CCS_E;
269 intel_miptree_all_slices_resolve_color(brw, tex_obj->mt, flags);
270 brw_render_cache_set_check_flush(brw, tex_obj->mt->bo);
271
272 if (tex_obj->base.StencilSampling ||
273 tex_obj->mt->format == MESA_FORMAT_S_UINT8) {
274 intel_update_r8stencil(brw, tex_obj->mt);
275 }
276 }
277
278 /* Resolve color for each active shader image. */
279 for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
280 const struct gl_program *prog = ctx->_Shader->CurrentProgram[i];
281
282 if (unlikely(prog && prog->info.num_images)) {
283 for (unsigned j = 0; j < prog->info.num_images; j++) {
284 struct gl_image_unit *u =
285 &ctx->ImageUnits[prog->sh.ImageUnits[j]];
286 tex_obj = intel_texture_object(u->TexObj);
287
288 if (tex_obj && tex_obj->mt) {
289 /* Access to images is implemented using indirect messages
290 * against data port. Normal render target write understands
291 * lossless compression but unfortunately the typed/untyped
292 * read/write interface doesn't. Therefore even lossless
293 * compressed surfaces need to be resolved prior to accessing
294 * them. Hence skip setting INTEL_MIPTREE_IGNORE_CCS_E.
295 */
296 intel_miptree_all_slices_resolve_color(brw, tex_obj->mt, 0);
297
298 if (intel_miptree_is_lossless_compressed(brw, tex_obj->mt) &&
299 intel_disable_rb_aux_buffer(brw, tex_obj->mt->bo)) {
300 perf_debug("Using renderbuffer as shader image - turning "
301 "off lossless compression");
302 }
303
304 brw_render_cache_set_check_flush(brw, tex_obj->mt->bo);
305 }
306 }
307 }
308 }
309
310 /* Resolve color buffers for non-coherent framebuffer fetch. */
311 if (!ctx->Extensions.MESA_shader_framebuffer_fetch &&
312 ctx->FragmentProgram._Current &&
313 ctx->FragmentProgram._Current->info.outputs_read) {
314 const struct gl_framebuffer *fb = ctx->DrawBuffer;
315
316 for (unsigned i = 0; i < fb->_NumColorDrawBuffers; i++) {
317 const struct intel_renderbuffer *irb =
318 intel_renderbuffer(fb->_ColorDrawBuffers[i]);
319
320 if (irb &&
321 intel_miptree_resolve_color(brw, irb->mt,
322 irb->mt_level, 1,
323 irb->mt_layer, irb->layer_count,
324 INTEL_MIPTREE_IGNORE_CCS_E))
325 brw_render_cache_set_check_flush(brw, irb->mt->bo);
326 }
327 }
328
329 /* If FRAMEBUFFER_SRGB is used on Gen9+ then we need to resolve any of the
330 * single-sampled color renderbuffers because the CCS buffer isn't
331 * supported for SRGB formats. This only matters if FRAMEBUFFER_SRGB is
332 * enabled because otherwise the surface state will be programmed with the
333 * linear equivalent format anyway.
334 */
335 if (brw->gen >= 9 && ctx->Color.sRGBEnabled) {
336 struct gl_framebuffer *fb = ctx->DrawBuffer;
337 for (int i = 0; i < fb->_NumColorDrawBuffers; i++) {
338 struct gl_renderbuffer *rb = fb->_ColorDrawBuffers[i];
339
340 if (rb == NULL)
341 continue;
342
343 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
344 struct intel_mipmap_tree *mt = irb->mt;
345
346 if (mt == NULL ||
347 mt->num_samples > 1 ||
348 _mesa_get_srgb_format_linear(mt->format) == mt->format)
349 continue;
350
351 /* Lossless compression is not supported for SRGB formats, it
352 * should be impossible to get here with such surfaces.
353 */
354 assert(!intel_miptree_is_lossless_compressed(brw, mt));
355 intel_miptree_all_slices_resolve_color(brw, mt, 0);
356 brw_render_cache_set_check_flush(brw, mt->bo);
357 }
358 }
359
360 _mesa_lock_context_textures(ctx);
361
362 if (new_state & _NEW_BUFFERS) {
363 intel_update_framebuffer(ctx, ctx->DrawBuffer);
364 if (ctx->DrawBuffer != ctx->ReadBuffer)
365 intel_update_framebuffer(ctx, ctx->ReadBuffer);
366 }
367 }
368
369 #define flushFront(screen) ((screen)->image.loader ? (screen)->image.loader->flushFrontBuffer : (screen)->dri2.loader->flushFrontBuffer)
370
371 static void
372 intel_flush_front(struct gl_context *ctx)
373 {
374 struct brw_context *brw = brw_context(ctx);
375 __DRIcontext *driContext = brw->driContext;
376 __DRIdrawable *driDrawable = driContext->driDrawablePriv;
377 __DRIscreen *const dri_screen = brw->screen->driScrnPriv;
378
379 if (brw->front_buffer_dirty && _mesa_is_winsys_fbo(ctx->DrawBuffer)) {
380 if (flushFront(dri_screen) && driDrawable &&
381 driDrawable->loaderPrivate) {
382
383 /* Resolve before flushing FAKE_FRONT_LEFT to FRONT_LEFT.
384 *
385 * This potentially resolves both front and back buffer. It
386 * is unnecessary to resolve the back, but harms nothing except
387 * performance. And no one cares about front-buffer render
388 * performance.
389 */
390 intel_resolve_for_dri2_flush(brw, driDrawable);
391 intel_batchbuffer_flush(brw);
392
393 flushFront(dri_screen)(driDrawable, driDrawable->loaderPrivate);
394
395 /* We set the dirty bit in intel_prepare_render() if we're
396 * front buffer rendering once we get there.
397 */
398 brw->front_buffer_dirty = false;
399 }
400 }
401 }
402
403 static void
404 intel_glFlush(struct gl_context *ctx)
405 {
406 struct brw_context *brw = brw_context(ctx);
407
408 intel_batchbuffer_flush(brw);
409 intel_flush_front(ctx);
410
411 brw->need_flush_throttle = true;
412 }
413
414 static void
415 intel_finish(struct gl_context * ctx)
416 {
417 struct brw_context *brw = brw_context(ctx);
418
419 intel_glFlush(ctx);
420
421 if (brw->batch.last_bo)
422 brw_bo_wait_rendering(brw, brw->batch.last_bo);
423 }
424
425 static void
426 brw_init_driver_functions(struct brw_context *brw,
427 struct dd_function_table *functions)
428 {
429 _mesa_init_driver_functions(functions);
430
431 /* GLX uses DRI2 invalidate events to handle window resizing.
432 * Unfortunately, EGL does not - libEGL is written in XCB (not Xlib),
433 * which doesn't provide a mechanism for snooping the event queues.
434 *
435 * So EGL still relies on viewport hacks to handle window resizing.
436 * This should go away with DRI3000.
437 */
438 if (!brw->driContext->driScreenPriv->dri2.useInvalidate)
439 functions->Viewport = intel_viewport;
440
441 functions->Flush = intel_glFlush;
442 functions->Finish = intel_finish;
443 functions->GetString = intel_get_string;
444 functions->UpdateState = intel_update_state;
445
446 intelInitTextureFuncs(functions);
447 intelInitTextureImageFuncs(functions);
448 intelInitTextureSubImageFuncs(functions);
449 intelInitTextureCopyImageFuncs(functions);
450 intelInitCopyImageFuncs(functions);
451 intelInitClearFuncs(functions);
452 intelInitBufferFuncs(functions);
453 intelInitPixelFuncs(functions);
454 intelInitBufferObjectFuncs(functions);
455 brw_init_syncobj_functions(functions);
456 brw_init_object_purgeable_functions(functions);
457
458 brwInitFragProgFuncs( functions );
459 brw_init_common_queryobj_functions(functions);
460 if (brw->gen >= 8 || brw->is_haswell)
461 hsw_init_queryobj_functions(functions);
462 else if (brw->gen >= 6)
463 gen6_init_queryobj_functions(functions);
464 else
465 gen4_init_queryobj_functions(functions);
466 brw_init_compute_functions(functions);
467 if (brw->gen >= 7)
468 brw_init_conditional_render_functions(functions);
469
470 functions->QueryInternalFormat = brw_query_internal_format;
471
472 functions->NewTransformFeedback = brw_new_transform_feedback;
473 functions->DeleteTransformFeedback = brw_delete_transform_feedback;
474 if (can_do_mi_math_and_lrr(brw->screen)) {
475 functions->BeginTransformFeedback = hsw_begin_transform_feedback;
476 functions->EndTransformFeedback = hsw_end_transform_feedback;
477 functions->PauseTransformFeedback = hsw_pause_transform_feedback;
478 functions->ResumeTransformFeedback = hsw_resume_transform_feedback;
479 } else if (brw->gen >= 7) {
480 functions->BeginTransformFeedback = gen7_begin_transform_feedback;
481 functions->EndTransformFeedback = gen7_end_transform_feedback;
482 functions->PauseTransformFeedback = gen7_pause_transform_feedback;
483 functions->ResumeTransformFeedback = gen7_resume_transform_feedback;
484 functions->GetTransformFeedbackVertexCount =
485 brw_get_transform_feedback_vertex_count;
486 } else {
487 functions->BeginTransformFeedback = brw_begin_transform_feedback;
488 functions->EndTransformFeedback = brw_end_transform_feedback;
489 functions->PauseTransformFeedback = brw_pause_transform_feedback;
490 functions->ResumeTransformFeedback = brw_resume_transform_feedback;
491 functions->GetTransformFeedbackVertexCount =
492 brw_get_transform_feedback_vertex_count;
493 }
494
495 if (brw->gen >= 6)
496 functions->GetSamplePosition = gen6_get_sample_position;
497 }
498
499 static void
500 brw_initialize_context_constants(struct brw_context *brw)
501 {
502 struct gl_context *ctx = &brw->ctx;
503 const struct brw_compiler *compiler = brw->screen->compiler;
504
505 const bool stage_exists[MESA_SHADER_STAGES] = {
506 [MESA_SHADER_VERTEX] = true,
507 [MESA_SHADER_TESS_CTRL] = brw->gen >= 7,
508 [MESA_SHADER_TESS_EVAL] = brw->gen >= 7,
509 [MESA_SHADER_GEOMETRY] = brw->gen >= 6,
510 [MESA_SHADER_FRAGMENT] = true,
511 [MESA_SHADER_COMPUTE] =
512 ((ctx->API == API_OPENGL_COMPAT || ctx->API == API_OPENGL_CORE) &&
513 ctx->Const.MaxComputeWorkGroupSize[0] >= 1024) ||
514 (ctx->API == API_OPENGLES2 &&
515 ctx->Const.MaxComputeWorkGroupSize[0] >= 128) ||
516 _mesa_extension_override_enables.ARB_compute_shader,
517 };
518
519 unsigned num_stages = 0;
520 for (int i = 0; i < MESA_SHADER_STAGES; i++) {
521 if (stage_exists[i])
522 num_stages++;
523 }
524
525 unsigned max_samplers =
526 brw->gen >= 8 || brw->is_haswell ? BRW_MAX_TEX_UNIT : 16;
527
528 ctx->Const.MaxDualSourceDrawBuffers = 1;
529 ctx->Const.MaxDrawBuffers = BRW_MAX_DRAW_BUFFERS;
530 ctx->Const.MaxCombinedShaderOutputResources =
531 MAX_IMAGE_UNITS + BRW_MAX_DRAW_BUFFERS;
532
533 /* The timestamp register we can read for glGetTimestamp() is
534 * sometimes only 32 bits, before scaling to nanoseconds (depending
535 * on kernel).
536 *
537 * Once scaled to nanoseconds the timestamp would roll over at a
538 * non-power-of-two, so an application couldn't use
539 * GL_QUERY_COUNTER_BITS to handle rollover correctly. Instead, we
540 * report 36 bits and truncate at that (rolling over 5 times as
541 * often as the HW counter), and when the 32-bit counter rolls
542 * over, it happens to also be at a rollover in the reported value
543 * from near (1<<36) to 0.
544 *
545 * The low 32 bits rolls over in ~343 seconds. Our 36-bit result
546 * rolls over every ~69 seconds.
547 */
548 ctx->Const.QueryCounterBits.Timestamp = 36;
549
550 ctx->Const.MaxTextureCoordUnits = 8; /* Mesa limit */
551 ctx->Const.MaxImageUnits = MAX_IMAGE_UNITS;
552 if (brw->gen >= 7) {
553 ctx->Const.MaxRenderbufferSize = 16384;
554 ctx->Const.MaxTextureLevels = MIN2(15 /* 16384 */, MAX_TEXTURE_LEVELS);
555 ctx->Const.MaxCubeTextureLevels = 15; /* 16384 */
556 } else {
557 ctx->Const.MaxRenderbufferSize = 8192;
558 ctx->Const.MaxTextureLevels = MIN2(14 /* 8192 */, MAX_TEXTURE_LEVELS);
559 ctx->Const.MaxCubeTextureLevels = 14; /* 8192 */
560 }
561 ctx->Const.Max3DTextureLevels = 12; /* 2048 */
562 ctx->Const.MaxArrayTextureLayers = brw->gen >= 7 ? 2048 : 512;
563 ctx->Const.MaxTextureMbytes = 1536;
564 ctx->Const.MaxTextureRectSize = 1 << 12;
565 ctx->Const.MaxTextureMaxAnisotropy = 16.0;
566 ctx->Const.MaxTextureLodBias = 15.0;
567 ctx->Const.StripTextureBorder = true;
568 if (brw->gen >= 7) {
569 ctx->Const.MaxProgramTextureGatherComponents = 4;
570 ctx->Const.MinProgramTextureGatherOffset = -32;
571 ctx->Const.MaxProgramTextureGatherOffset = 31;
572 } else if (brw->gen == 6) {
573 ctx->Const.MaxProgramTextureGatherComponents = 1;
574 ctx->Const.MinProgramTextureGatherOffset = -8;
575 ctx->Const.MaxProgramTextureGatherOffset = 7;
576 }
577
578 ctx->Const.MaxUniformBlockSize = 65536;
579
580 for (int i = 0; i < MESA_SHADER_STAGES; i++) {
581 struct gl_program_constants *prog = &ctx->Const.Program[i];
582
583 if (!stage_exists[i])
584 continue;
585
586 prog->MaxTextureImageUnits = max_samplers;
587
588 prog->MaxUniformBlocks = BRW_MAX_UBO;
589 prog->MaxCombinedUniformComponents =
590 prog->MaxUniformComponents +
591 ctx->Const.MaxUniformBlockSize / 4 * prog->MaxUniformBlocks;
592
593 prog->MaxAtomicCounters = MAX_ATOMIC_COUNTERS;
594 prog->MaxAtomicBuffers = BRW_MAX_ABO;
595 prog->MaxImageUniforms = compiler->scalar_stage[i] ? BRW_MAX_IMAGES : 0;
596 prog->MaxShaderStorageBlocks = BRW_MAX_SSBO;
597 }
598
599 ctx->Const.MaxTextureUnits =
600 MIN2(ctx->Const.MaxTextureCoordUnits,
601 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxTextureImageUnits);
602
603 ctx->Const.MaxUniformBufferBindings = num_stages * BRW_MAX_UBO;
604 ctx->Const.MaxCombinedUniformBlocks = num_stages * BRW_MAX_UBO;
605 ctx->Const.MaxCombinedAtomicBuffers = num_stages * BRW_MAX_ABO;
606 ctx->Const.MaxCombinedShaderStorageBlocks = num_stages * BRW_MAX_SSBO;
607 ctx->Const.MaxShaderStorageBufferBindings = num_stages * BRW_MAX_SSBO;
608 ctx->Const.MaxCombinedTextureImageUnits = num_stages * max_samplers;
609 ctx->Const.MaxCombinedImageUniforms = num_stages * BRW_MAX_IMAGES;
610
611
612 /* Hardware only supports a limited number of transform feedback buffers.
613 * So we need to override the Mesa default (which is based only on software
614 * limits).
615 */
616 ctx->Const.MaxTransformFeedbackBuffers = BRW_MAX_SOL_BUFFERS;
617
618 /* On Gen6, in the worst case, we use up one binding table entry per
619 * transform feedback component (see comments above the definition of
620 * BRW_MAX_SOL_BINDINGS, in brw_context.h), so we need to advertise a value
621 * for MAX_TRANSFORM_FEEDBACK_INTERLEAVED_COMPONENTS equal to
622 * BRW_MAX_SOL_BINDINGS.
623 *
624 * In "separate components" mode, we need to divide this value by
625 * BRW_MAX_SOL_BUFFERS, so that the total number of binding table entries
626 * used up by all buffers will not exceed BRW_MAX_SOL_BINDINGS.
627 */
628 ctx->Const.MaxTransformFeedbackInterleavedComponents = BRW_MAX_SOL_BINDINGS;
629 ctx->Const.MaxTransformFeedbackSeparateComponents =
630 BRW_MAX_SOL_BINDINGS / BRW_MAX_SOL_BUFFERS;
631
632 ctx->Const.AlwaysUseGetTransformFeedbackVertexCount =
633 !can_do_mi_math_and_lrr(brw->screen);
634
635 int max_samples;
636 const int *msaa_modes = intel_supported_msaa_modes(brw->screen);
637 const int clamp_max_samples =
638 driQueryOptioni(&brw->optionCache, "clamp_max_samples");
639
640 if (clamp_max_samples < 0) {
641 max_samples = msaa_modes[0];
642 } else {
643 /* Select the largest supported MSAA mode that does not exceed
644 * clamp_max_samples.
645 */
646 max_samples = 0;
647 for (int i = 0; msaa_modes[i] != 0; ++i) {
648 if (msaa_modes[i] <= clamp_max_samples) {
649 max_samples = msaa_modes[i];
650 break;
651 }
652 }
653 }
654
655 ctx->Const.MaxSamples = max_samples;
656 ctx->Const.MaxColorTextureSamples = max_samples;
657 ctx->Const.MaxDepthTextureSamples = max_samples;
658 ctx->Const.MaxIntegerSamples = max_samples;
659 ctx->Const.MaxImageSamples = 0;
660
661 /* gen6_set_sample_maps() sets SampleMap{2,4,8}x variables which are used
662 * to map indices of rectangular grid to sample numbers within a pixel.
663 * These variables are used by GL_EXT_framebuffer_multisample_blit_scaled
664 * extension implementation. For more details see the comment above
665 * gen6_set_sample_maps() definition.
666 */
667 gen6_set_sample_maps(ctx);
668
669 ctx->Const.MinLineWidth = 1.0;
670 ctx->Const.MinLineWidthAA = 1.0;
671 if (brw->gen >= 6) {
672 ctx->Const.MaxLineWidth = 7.375;
673 ctx->Const.MaxLineWidthAA = 7.375;
674 ctx->Const.LineWidthGranularity = 0.125;
675 } else {
676 ctx->Const.MaxLineWidth = 7.0;
677 ctx->Const.MaxLineWidthAA = 7.0;
678 ctx->Const.LineWidthGranularity = 0.5;
679 }
680
681 /* For non-antialiased lines, we have to round the line width to the
682 * nearest whole number. Make sure that we don't advertise a line
683 * width that, when rounded, will be beyond the actual hardware
684 * maximum.
685 */
686 assert(roundf(ctx->Const.MaxLineWidth) <= ctx->Const.MaxLineWidth);
687
688 ctx->Const.MinPointSize = 1.0;
689 ctx->Const.MinPointSizeAA = 1.0;
690 ctx->Const.MaxPointSize = 255.0;
691 ctx->Const.MaxPointSizeAA = 255.0;
692 ctx->Const.PointSizeGranularity = 1.0;
693
694 if (brw->gen >= 5 || brw->is_g4x)
695 ctx->Const.MaxClipPlanes = 8;
696
697 ctx->Const.GLSLTessLevelsAsInputs = true;
698 ctx->Const.LowerTCSPatchVerticesIn = brw->gen >= 8;
699 ctx->Const.LowerTESPatchVerticesIn = true;
700 ctx->Const.PrimitiveRestartForPatches = true;
701
702 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeInstructions = 16 * 1024;
703 ctx->Const.Program[MESA_SHADER_VERTEX].MaxAluInstructions = 0;
704 ctx->Const.Program[MESA_SHADER_VERTEX].MaxTexInstructions = 0;
705 ctx->Const.Program[MESA_SHADER_VERTEX].MaxTexIndirections = 0;
706 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeAluInstructions = 0;
707 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeTexInstructions = 0;
708 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeTexIndirections = 0;
709 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeAttribs = 16;
710 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeTemps = 256;
711 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeAddressRegs = 1;
712 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeParameters = 1024;
713 ctx->Const.Program[MESA_SHADER_VERTEX].MaxEnvParams =
714 MIN2(ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeParameters,
715 ctx->Const.Program[MESA_SHADER_VERTEX].MaxEnvParams);
716
717 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeInstructions = 1024;
718 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeAluInstructions = 1024;
719 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeTexInstructions = 1024;
720 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeTexIndirections = 1024;
721 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeAttribs = 12;
722 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeTemps = 256;
723 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeAddressRegs = 0;
724 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeParameters = 1024;
725 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxEnvParams =
726 MIN2(ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeParameters,
727 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxEnvParams);
728
729 /* Fragment shaders use real, 32-bit twos-complement integers for all
730 * integer types.
731 */
732 ctx->Const.Program[MESA_SHADER_FRAGMENT].LowInt.RangeMin = 31;
733 ctx->Const.Program[MESA_SHADER_FRAGMENT].LowInt.RangeMax = 30;
734 ctx->Const.Program[MESA_SHADER_FRAGMENT].LowInt.Precision = 0;
735 ctx->Const.Program[MESA_SHADER_FRAGMENT].HighInt = ctx->Const.Program[MESA_SHADER_FRAGMENT].LowInt;
736 ctx->Const.Program[MESA_SHADER_FRAGMENT].MediumInt = ctx->Const.Program[MESA_SHADER_FRAGMENT].LowInt;
737
738 ctx->Const.Program[MESA_SHADER_VERTEX].LowInt.RangeMin = 31;
739 ctx->Const.Program[MESA_SHADER_VERTEX].LowInt.RangeMax = 30;
740 ctx->Const.Program[MESA_SHADER_VERTEX].LowInt.Precision = 0;
741 ctx->Const.Program[MESA_SHADER_VERTEX].HighInt = ctx->Const.Program[MESA_SHADER_VERTEX].LowInt;
742 ctx->Const.Program[MESA_SHADER_VERTEX].MediumInt = ctx->Const.Program[MESA_SHADER_VERTEX].LowInt;
743
744 /* Gen6 converts quads to polygon in beginning of 3D pipeline,
745 * but we're not sure how it's actually done for vertex order,
746 * that affect provoking vertex decision. Always use last vertex
747 * convention for quad primitive which works as expected for now.
748 */
749 if (brw->gen >= 6)
750 ctx->Const.QuadsFollowProvokingVertexConvention = false;
751
752 ctx->Const.NativeIntegers = true;
753 ctx->Const.VertexID_is_zero_based = true;
754
755 /* Regarding the CMP instruction, the Ivybridge PRM says:
756 *
757 * "For each enabled channel 0b or 1b is assigned to the appropriate flag
758 * bit and 0/all zeros or all ones (e.g, byte 0xFF, word 0xFFFF, DWord
759 * 0xFFFFFFFF) is assigned to dst."
760 *
761 * but PRMs for earlier generations say
762 *
763 * "In dword format, one GRF may store up to 8 results. When the register
764 * is used later as a vector of Booleans, as only LSB at each channel
765 * contains meaning [sic] data, software should make sure all higher bits
766 * are masked out (e.g. by 'and-ing' an [sic] 0x01 constant)."
767 *
768 * We select the representation of a true boolean uniform to be ~0, and fix
769 * the results of Gen <= 5 CMP instruction's with -(result & 1).
770 */
771 ctx->Const.UniformBooleanTrue = ~0;
772
773 /* From the gen4 PRM, volume 4 page 127:
774 *
775 * "For SURFTYPE_BUFFER non-rendertarget surfaces, this field specifies
776 * the base address of the first element of the surface, computed in
777 * software by adding the surface base address to the byte offset of
778 * the element in the buffer."
779 *
780 * However, unaligned accesses are slower, so enforce buffer alignment.
781 */
782 ctx->Const.UniformBufferOffsetAlignment = 16;
783
784 /* ShaderStorageBufferOffsetAlignment should be a cacheline (64 bytes) so
785 * that we can safely have the CPU and GPU writing the same SSBO on
786 * non-cachecoherent systems (our Atom CPUs). With UBOs, the GPU never
787 * writes, so there's no problem. For an SSBO, the GPU and the CPU can
788 * be updating disjoint regions of the buffer simultaneously and that will
789 * break if the regions overlap the same cacheline.
790 */
791 ctx->Const.ShaderStorageBufferOffsetAlignment = 64;
792 ctx->Const.TextureBufferOffsetAlignment = 16;
793 ctx->Const.MaxTextureBufferSize = 128 * 1024 * 1024;
794
795 if (brw->gen >= 6) {
796 ctx->Const.MaxVarying = 32;
797 ctx->Const.Program[MESA_SHADER_VERTEX].MaxOutputComponents = 128;
798 ctx->Const.Program[MESA_SHADER_GEOMETRY].MaxInputComponents = 64;
799 ctx->Const.Program[MESA_SHADER_GEOMETRY].MaxOutputComponents = 128;
800 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxInputComponents = 128;
801 ctx->Const.Program[MESA_SHADER_TESS_CTRL].MaxInputComponents = 128;
802 ctx->Const.Program[MESA_SHADER_TESS_CTRL].MaxOutputComponents = 128;
803 ctx->Const.Program[MESA_SHADER_TESS_EVAL].MaxInputComponents = 128;
804 ctx->Const.Program[MESA_SHADER_TESS_EVAL].MaxOutputComponents = 128;
805 }
806
807 /* We want the GLSL compiler to emit code that uses condition codes */
808 for (int i = 0; i < MESA_SHADER_STAGES; i++) {
809 ctx->Const.ShaderCompilerOptions[i] =
810 brw->screen->compiler->glsl_compiler_options[i];
811 }
812
813 if (brw->gen >= 7) {
814 ctx->Const.MaxViewportWidth = 32768;
815 ctx->Const.MaxViewportHeight = 32768;
816 }
817
818 /* ARB_viewport_array, OES_viewport_array */
819 if (brw->gen >= 6) {
820 ctx->Const.MaxViewports = GEN6_NUM_VIEWPORTS;
821 ctx->Const.ViewportSubpixelBits = 0;
822
823 /* Cast to float before negating because MaxViewportWidth is unsigned.
824 */
825 ctx->Const.ViewportBounds.Min = -(float)ctx->Const.MaxViewportWidth;
826 ctx->Const.ViewportBounds.Max = ctx->Const.MaxViewportWidth;
827 }
828
829 /* ARB_gpu_shader5 */
830 if (brw->gen >= 7)
831 ctx->Const.MaxVertexStreams = MIN2(4, MAX_VERTEX_STREAMS);
832
833 /* ARB_framebuffer_no_attachments */
834 ctx->Const.MaxFramebufferWidth = 16384;
835 ctx->Const.MaxFramebufferHeight = 16384;
836 ctx->Const.MaxFramebufferLayers = ctx->Const.MaxArrayTextureLayers;
837 ctx->Const.MaxFramebufferSamples = max_samples;
838
839 /* OES_primitive_bounding_box */
840 ctx->Const.NoPrimitiveBoundingBoxOutput = true;
841 }
842
843 static void
844 brw_initialize_cs_context_constants(struct brw_context *brw)
845 {
846 struct gl_context *ctx = &brw->ctx;
847 const struct intel_screen *screen = brw->screen;
848 struct gen_device_info *devinfo = &brw->screen->devinfo;
849
850 /* FINISHME: Do this for all platforms that the kernel supports */
851 if (brw->is_cherryview &&
852 screen->subslice_total > 0 && screen->eu_total > 0) {
853 /* Logical CS threads = EUs per subslice * 7 threads per EU */
854 uint32_t max_cs_threads = screen->eu_total / screen->subslice_total * 7;
855
856 /* Fuse configurations may give more threads than expected, never less. */
857 if (max_cs_threads > devinfo->max_cs_threads)
858 devinfo->max_cs_threads = max_cs_threads;
859 }
860
861 /* Maximum number of scalar compute shader invocations that can be run in
862 * parallel in the same subslice assuming SIMD32 dispatch.
863 *
864 * We don't advertise more than 64 threads, because we are limited to 64 by
865 * our usage of thread_width_max in the gpgpu walker command. This only
866 * currently impacts Haswell, which otherwise might be able to advertise 70
867 * threads. With SIMD32 and 64 threads, Haswell still provides twice the
868 * required the number of invocation needed for ARB_compute_shader.
869 */
870 const unsigned max_threads = MIN2(64, devinfo->max_cs_threads);
871 const uint32_t max_invocations = 32 * max_threads;
872 ctx->Const.MaxComputeWorkGroupSize[0] = max_invocations;
873 ctx->Const.MaxComputeWorkGroupSize[1] = max_invocations;
874 ctx->Const.MaxComputeWorkGroupSize[2] = max_invocations;
875 ctx->Const.MaxComputeWorkGroupInvocations = max_invocations;
876 ctx->Const.MaxComputeSharedMemorySize = 64 * 1024;
877 }
878
879 /**
880 * Process driconf (drirc) options, setting appropriate context flags.
881 *
882 * intelInitExtensions still pokes at optionCache directly, in order to
883 * avoid advertising various extensions. No flags are set, so it makes
884 * sense to continue doing that there.
885 */
886 static void
887 brw_process_driconf_options(struct brw_context *brw)
888 {
889 struct gl_context *ctx = &brw->ctx;
890
891 driOptionCache *options = &brw->optionCache;
892 driParseConfigFiles(options, &brw->screen->optionCache,
893 brw->driContext->driScreenPriv->myNum, "i965");
894
895 int bo_reuse_mode = driQueryOptioni(options, "bo_reuse");
896 switch (bo_reuse_mode) {
897 case DRI_CONF_BO_REUSE_DISABLED:
898 break;
899 case DRI_CONF_BO_REUSE_ALL:
900 brw_bufmgr_enable_reuse(brw->bufmgr);
901 break;
902 }
903
904 if (INTEL_DEBUG & DEBUG_NO_HIZ) {
905 brw->has_hiz = false;
906 /* On gen6, you can only do separate stencil with HIZ. */
907 if (brw->gen == 6)
908 brw->has_separate_stencil = false;
909 }
910
911 if (driQueryOptionb(options, "always_flush_batch")) {
912 fprintf(stderr, "flushing batchbuffer before/after each draw call\n");
913 brw->always_flush_batch = true;
914 }
915
916 if (driQueryOptionb(options, "always_flush_cache")) {
917 fprintf(stderr, "flushing GPU caches before/after each draw call\n");
918 brw->always_flush_cache = true;
919 }
920
921 if (driQueryOptionb(options, "disable_throttling")) {
922 fprintf(stderr, "disabling flush throttling\n");
923 brw->disable_throttling = true;
924 }
925
926 brw->precompile = driQueryOptionb(&brw->optionCache, "shader_precompile");
927
928 if (driQueryOptionb(&brw->optionCache, "precise_trig"))
929 brw->screen->compiler->precise_trig = true;
930
931 ctx->Const.ForceGLSLExtensionsWarn =
932 driQueryOptionb(options, "force_glsl_extensions_warn");
933
934 ctx->Const.ForceGLSLVersion =
935 driQueryOptioni(options, "force_glsl_version");
936
937 ctx->Const.DisableGLSLLineContinuations =
938 driQueryOptionb(options, "disable_glsl_line_continuations");
939
940 ctx->Const.AllowGLSLExtensionDirectiveMidShader =
941 driQueryOptionb(options, "allow_glsl_extension_directive_midshader");
942
943 ctx->Const.AllowGLSLBuiltinVariableRedeclaration =
944 driQueryOptionb(options, "allow_glsl_builtin_variable_redeclaration");
945
946 ctx->Const.AllowHigherCompatVersion =
947 driQueryOptionb(options, "allow_higher_compat_version");
948
949 ctx->Const.ForceGLSLAbsSqrt =
950 driQueryOptionb(options, "force_glsl_abs_sqrt");
951
952 ctx->Const.GLSLZeroInit = driQueryOptionb(options, "glsl_zero_init");
953
954 brw->dual_color_blend_by_location =
955 driQueryOptionb(options, "dual_color_blend_by_location");
956 }
957
958 GLboolean
959 brwCreateContext(gl_api api,
960 const struct gl_config *mesaVis,
961 __DRIcontext *driContextPriv,
962 unsigned major_version,
963 unsigned minor_version,
964 uint32_t flags,
965 bool notify_reset,
966 unsigned *dri_ctx_error,
967 void *sharedContextPrivate)
968 {
969 struct gl_context *shareCtx = (struct gl_context *) sharedContextPrivate;
970 struct intel_screen *screen = driContextPriv->driScreenPriv->driverPrivate;
971 const struct gen_device_info *devinfo = &screen->devinfo;
972 struct dd_function_table functions;
973
974 /* Only allow the __DRI_CTX_FLAG_ROBUST_BUFFER_ACCESS flag if the kernel
975 * provides us with context reset notifications.
976 */
977 uint32_t allowed_flags = __DRI_CTX_FLAG_DEBUG
978 | __DRI_CTX_FLAG_FORWARD_COMPATIBLE;
979
980 if (screen->has_context_reset_notification)
981 allowed_flags |= __DRI_CTX_FLAG_ROBUST_BUFFER_ACCESS;
982
983 if (flags & ~allowed_flags) {
984 *dri_ctx_error = __DRI_CTX_ERROR_UNKNOWN_FLAG;
985 return false;
986 }
987
988 struct brw_context *brw = rzalloc(NULL, struct brw_context);
989 if (!brw) {
990 fprintf(stderr, "%s: failed to alloc context\n", __func__);
991 *dri_ctx_error = __DRI_CTX_ERROR_NO_MEMORY;
992 return false;
993 }
994
995 driContextPriv->driverPrivate = brw;
996 brw->driContext = driContextPriv;
997 brw->screen = screen;
998 brw->bufmgr = screen->bufmgr;
999
1000 brw->gen = devinfo->gen;
1001 brw->gt = devinfo->gt;
1002 brw->is_g4x = devinfo->is_g4x;
1003 brw->is_baytrail = devinfo->is_baytrail;
1004 brw->is_haswell = devinfo->is_haswell;
1005 brw->is_cherryview = devinfo->is_cherryview;
1006 brw->is_broxton = devinfo->is_broxton;
1007 brw->has_llc = devinfo->has_llc;
1008 brw->has_hiz = devinfo->has_hiz_and_separate_stencil;
1009 brw->has_separate_stencil = devinfo->has_hiz_and_separate_stencil;
1010 brw->has_pln = devinfo->has_pln;
1011 brw->has_compr4 = devinfo->has_compr4;
1012 brw->has_surface_tile_offset = devinfo->has_surface_tile_offset;
1013 brw->has_negative_rhw_bug = devinfo->has_negative_rhw_bug;
1014 brw->needs_unlit_centroid_workaround =
1015 devinfo->needs_unlit_centroid_workaround;
1016
1017 brw->must_use_separate_stencil = devinfo->must_use_separate_stencil;
1018 brw->has_swizzling = screen->hw_has_swizzling;
1019
1020 isl_device_init(&brw->isl_dev, devinfo, screen->hw_has_swizzling);
1021
1022 brw->vs.base.stage = MESA_SHADER_VERTEX;
1023 brw->tcs.base.stage = MESA_SHADER_TESS_CTRL;
1024 brw->tes.base.stage = MESA_SHADER_TESS_EVAL;
1025 brw->gs.base.stage = MESA_SHADER_GEOMETRY;
1026 brw->wm.base.stage = MESA_SHADER_FRAGMENT;
1027 if (brw->gen >= 8) {
1028 gen8_init_vtable_surface_functions(brw);
1029 brw->vtbl.emit_depth_stencil_hiz = gen8_emit_depth_stencil_hiz;
1030 } else if (brw->gen >= 7) {
1031 gen7_init_vtable_surface_functions(brw);
1032 brw->vtbl.emit_depth_stencil_hiz = gen7_emit_depth_stencil_hiz;
1033 } else if (brw->gen >= 6) {
1034 gen6_init_vtable_surface_functions(brw);
1035 brw->vtbl.emit_depth_stencil_hiz = gen6_emit_depth_stencil_hiz;
1036 } else {
1037 gen4_init_vtable_surface_functions(brw);
1038 brw->vtbl.emit_depth_stencil_hiz = brw_emit_depth_stencil_hiz;
1039 }
1040
1041 brw_init_driver_functions(brw, &functions);
1042
1043 if (notify_reset)
1044 functions.GetGraphicsResetStatus = brw_get_graphics_reset_status;
1045
1046 struct gl_context *ctx = &brw->ctx;
1047
1048 if (!_mesa_initialize_context(ctx, api, mesaVis, shareCtx, &functions)) {
1049 *dri_ctx_error = __DRI_CTX_ERROR_NO_MEMORY;
1050 fprintf(stderr, "%s: failed to init mesa context\n", __func__);
1051 intelDestroyContext(driContextPriv);
1052 return false;
1053 }
1054
1055 driContextSetFlags(ctx, flags);
1056
1057 /* Initialize the software rasterizer and helper modules.
1058 *
1059 * As of GL 3.1 core, the gen4+ driver doesn't need the swrast context for
1060 * software fallbacks (which we have to support on legacy GL to do weird
1061 * glDrawPixels(), glBitmap(), and other functions).
1062 */
1063 if (api != API_OPENGL_CORE && api != API_OPENGLES2) {
1064 _swrast_CreateContext(ctx);
1065 }
1066
1067 _vbo_CreateContext(ctx);
1068 if (ctx->swrast_context) {
1069 _tnl_CreateContext(ctx);
1070 TNL_CONTEXT(ctx)->Driver.RunPipeline = _tnl_run_pipeline;
1071 _swsetup_CreateContext(ctx);
1072
1073 /* Configure swrast to match hardware characteristics: */
1074 _swrast_allow_pixel_fog(ctx, false);
1075 _swrast_allow_vertex_fog(ctx, true);
1076 }
1077
1078 _mesa_meta_init(ctx);
1079
1080 brw_process_driconf_options(brw);
1081
1082 if (INTEL_DEBUG & DEBUG_PERF)
1083 brw->perf_debug = true;
1084
1085 brw_initialize_cs_context_constants(brw);
1086 brw_initialize_context_constants(brw);
1087
1088 ctx->Const.ResetStrategy = notify_reset
1089 ? GL_LOSE_CONTEXT_ON_RESET_ARB : GL_NO_RESET_NOTIFICATION_ARB;
1090
1091 /* Reinitialize the context point state. It depends on ctx->Const values. */
1092 _mesa_init_point(ctx);
1093
1094 intel_fbo_init(brw);
1095
1096 intel_batchbuffer_init(&brw->batch, brw->bufmgr, brw->has_llc);
1097
1098 if (brw->gen >= 6) {
1099 /* Create a new hardware context. Using a hardware context means that
1100 * our GPU state will be saved/restored on context switch, allowing us
1101 * to assume that the GPU is in the same state we left it in.
1102 *
1103 * This is required for transform feedback buffer offsets, query objects,
1104 * and also allows us to reduce how much state we have to emit.
1105 */
1106 brw->hw_ctx = brw_create_hw_context(brw->bufmgr);
1107
1108 if (!brw->hw_ctx) {
1109 fprintf(stderr, "Failed to create hardware context.\n");
1110 intelDestroyContext(driContextPriv);
1111 return false;
1112 }
1113 }
1114
1115 if (brw_init_pipe_control(brw, devinfo)) {
1116 *dri_ctx_error = __DRI_CTX_ERROR_NO_MEMORY;
1117 intelDestroyContext(driContextPriv);
1118 return false;
1119 }
1120
1121 brw_init_state(brw);
1122
1123 intelInitExtensions(ctx);
1124
1125 brw_init_surface_formats(brw);
1126
1127 brw_blorp_init(brw);
1128
1129 brw->urb.size = devinfo->urb.size;
1130
1131 if (brw->gen == 6)
1132 brw->urb.gs_present = false;
1133
1134 brw->prim_restart.in_progress = false;
1135 brw->prim_restart.enable_cut_index = false;
1136 brw->gs.enabled = false;
1137 brw->clip.viewport_count = 1;
1138
1139 brw->predicate.state = BRW_PREDICATE_STATE_RENDER;
1140
1141 brw->max_gtt_map_object_size = screen->max_gtt_map_object_size;
1142
1143 ctx->VertexProgram._MaintainTnlProgram = true;
1144 ctx->FragmentProgram._MaintainTexEnvProgram = true;
1145
1146 brw_draw_init( brw );
1147
1148 if ((flags & __DRI_CTX_FLAG_DEBUG) != 0) {
1149 /* Turn on some extra GL_ARB_debug_output generation. */
1150 brw->perf_debug = true;
1151 }
1152
1153 if ((flags & __DRI_CTX_FLAG_ROBUST_BUFFER_ACCESS) != 0) {
1154 ctx->Const.ContextFlags |= GL_CONTEXT_FLAG_ROBUST_ACCESS_BIT_ARB;
1155 ctx->Const.RobustAccess = GL_TRUE;
1156 }
1157
1158 if (INTEL_DEBUG & DEBUG_SHADER_TIME)
1159 brw_init_shader_time(brw);
1160
1161 _mesa_compute_version(ctx);
1162
1163 _mesa_initialize_dispatch_tables(ctx);
1164 _mesa_initialize_vbo_vtxfmt(ctx);
1165
1166 if (ctx->Extensions.INTEL_performance_query)
1167 brw_init_performance_queries(brw);
1168
1169 vbo_use_buffer_objects(ctx);
1170 vbo_always_unmap_buffers(ctx);
1171
1172 return true;
1173 }
1174
1175 void
1176 intelDestroyContext(__DRIcontext * driContextPriv)
1177 {
1178 struct brw_context *brw =
1179 (struct brw_context *) driContextPriv->driverPrivate;
1180 struct gl_context *ctx = &brw->ctx;
1181
1182 _mesa_meta_free(&brw->ctx);
1183
1184 if (INTEL_DEBUG & DEBUG_SHADER_TIME) {
1185 /* Force a report. */
1186 brw->shader_time.report_time = 0;
1187
1188 brw_collect_and_report_shader_time(brw);
1189 brw_destroy_shader_time(brw);
1190 }
1191
1192 if (brw->gen >= 6)
1193 blorp_finish(&brw->blorp);
1194
1195 brw_destroy_state(brw);
1196 brw_draw_destroy(brw);
1197
1198 brw_bo_unreference(brw->curbe.curbe_bo);
1199 if (brw->vs.base.scratch_bo)
1200 brw_bo_unreference(brw->vs.base.scratch_bo);
1201 if (brw->tcs.base.scratch_bo)
1202 brw_bo_unreference(brw->tcs.base.scratch_bo);
1203 if (brw->tes.base.scratch_bo)
1204 brw_bo_unreference(brw->tes.base.scratch_bo);
1205 if (brw->gs.base.scratch_bo)
1206 brw_bo_unreference(brw->gs.base.scratch_bo);
1207 if (brw->wm.base.scratch_bo)
1208 brw_bo_unreference(brw->wm.base.scratch_bo);
1209
1210 brw_destroy_hw_context(brw->bufmgr, brw->hw_ctx);
1211
1212 if (ctx->swrast_context) {
1213 _swsetup_DestroyContext(&brw->ctx);
1214 _tnl_DestroyContext(&brw->ctx);
1215 }
1216 _vbo_DestroyContext(&brw->ctx);
1217
1218 if (ctx->swrast_context)
1219 _swrast_DestroyContext(&brw->ctx);
1220
1221 brw_fini_pipe_control(brw);
1222 intel_batchbuffer_free(&brw->batch);
1223
1224 brw_bo_unreference(brw->throttle_batch[1]);
1225 brw_bo_unreference(brw->throttle_batch[0]);
1226 brw->throttle_batch[1] = NULL;
1227 brw->throttle_batch[0] = NULL;
1228
1229 driDestroyOptionCache(&brw->optionCache);
1230
1231 /* free the Mesa context */
1232 _mesa_free_context_data(&brw->ctx);
1233
1234 ralloc_free(brw);
1235 driContextPriv->driverPrivate = NULL;
1236 }
1237
1238 GLboolean
1239 intelUnbindContext(__DRIcontext * driContextPriv)
1240 {
1241 /* Unset current context and dispath table */
1242 _mesa_make_current(NULL, NULL, NULL);
1243
1244 return true;
1245 }
1246
1247 /**
1248 * Fixes up the context for GLES23 with our default-to-sRGB-capable behavior
1249 * on window system framebuffers.
1250 *
1251 * Desktop GL is fairly reasonable in its handling of sRGB: You can ask if
1252 * your renderbuffer can do sRGB encode, and you can flip a switch that does
1253 * sRGB encode if the renderbuffer can handle it. You can ask specifically
1254 * for a visual where you're guaranteed to be capable, but it turns out that
1255 * everyone just makes all their ARGB8888 visuals capable and doesn't offer
1256 * incapable ones, because there's no difference between the two in resources
1257 * used. Applications thus get built that accidentally rely on the default
1258 * visual choice being sRGB, so we make ours sRGB capable. Everything sounds
1259 * great...
1260 *
1261 * But for GLES2/3, they decided that it was silly to not turn on sRGB encode
1262 * for sRGB renderbuffers you made with the GL_EXT_texture_sRGB equivalent.
1263 * So they removed the enable knob and made it "if the renderbuffer is sRGB
1264 * capable, do sRGB encode". Then, for your window system renderbuffers, you
1265 * can ask for sRGB visuals and get sRGB encode, or not ask for sRGB visuals
1266 * and get no sRGB encode (assuming that both kinds of visual are available).
1267 * Thus our choice to support sRGB by default on our visuals for desktop would
1268 * result in broken rendering of GLES apps that aren't expecting sRGB encode.
1269 *
1270 * Unfortunately, renderbuffer setup happens before a context is created. So
1271 * in intel_screen.c we always set up sRGB, and here, if you're a GLES2/3
1272 * context (without an sRGB visual, though we don't have sRGB visuals exposed
1273 * yet), we go turn that back off before anyone finds out.
1274 */
1275 static void
1276 intel_gles3_srgb_workaround(struct brw_context *brw,
1277 struct gl_framebuffer *fb)
1278 {
1279 struct gl_context *ctx = &brw->ctx;
1280
1281 if (_mesa_is_desktop_gl(ctx) || !fb->Visual.sRGBCapable)
1282 return;
1283
1284 /* Some day when we support the sRGB capable bit on visuals available for
1285 * GLES, we'll need to respect that and not disable things here.
1286 */
1287 fb->Visual.sRGBCapable = false;
1288 for (int i = 0; i < BUFFER_COUNT; i++) {
1289 struct gl_renderbuffer *rb = fb->Attachment[i].Renderbuffer;
1290 if (rb)
1291 rb->Format = _mesa_get_srgb_format_linear(rb->Format);
1292 }
1293 }
1294
1295 GLboolean
1296 intelMakeCurrent(__DRIcontext * driContextPriv,
1297 __DRIdrawable * driDrawPriv,
1298 __DRIdrawable * driReadPriv)
1299 {
1300 struct brw_context *brw;
1301 GET_CURRENT_CONTEXT(curCtx);
1302
1303 if (driContextPriv)
1304 brw = (struct brw_context *) driContextPriv->driverPrivate;
1305 else
1306 brw = NULL;
1307
1308 /* According to the glXMakeCurrent() man page: "Pending commands to
1309 * the previous context, if any, are flushed before it is released."
1310 * But only flush if we're actually changing contexts.
1311 */
1312 if (brw_context(curCtx) && brw_context(curCtx) != brw) {
1313 _mesa_flush(curCtx);
1314 }
1315
1316 if (driContextPriv) {
1317 struct gl_context *ctx = &brw->ctx;
1318 struct gl_framebuffer *fb, *readFb;
1319
1320 if (driDrawPriv == NULL) {
1321 fb = _mesa_get_incomplete_framebuffer();
1322 } else {
1323 fb = driDrawPriv->driverPrivate;
1324 driContextPriv->dri2.draw_stamp = driDrawPriv->dri2.stamp - 1;
1325 }
1326
1327 if (driReadPriv == NULL) {
1328 readFb = _mesa_get_incomplete_framebuffer();
1329 } else {
1330 readFb = driReadPriv->driverPrivate;
1331 driContextPriv->dri2.read_stamp = driReadPriv->dri2.stamp - 1;
1332 }
1333
1334 /* The sRGB workaround changes the renderbuffer's format. We must change
1335 * the format before the renderbuffer's miptree get's allocated, otherwise
1336 * the formats of the renderbuffer and its miptree will differ.
1337 */
1338 intel_gles3_srgb_workaround(brw, fb);
1339 intel_gles3_srgb_workaround(brw, readFb);
1340
1341 /* If the context viewport hasn't been initialized, force a call out to
1342 * the loader to get buffers so we have a drawable size for the initial
1343 * viewport. */
1344 if (!brw->ctx.ViewportInitialized)
1345 intel_prepare_render(brw);
1346
1347 _mesa_make_current(ctx, fb, readFb);
1348 } else {
1349 _mesa_make_current(NULL, NULL, NULL);
1350 }
1351
1352 return true;
1353 }
1354
1355 void
1356 intel_resolve_for_dri2_flush(struct brw_context *brw,
1357 __DRIdrawable *drawable)
1358 {
1359 if (brw->gen < 6) {
1360 /* MSAA and fast color clear are not supported, so don't waste time
1361 * checking whether a resolve is needed.
1362 */
1363 return;
1364 }
1365
1366 struct gl_framebuffer *fb = drawable->driverPrivate;
1367 struct intel_renderbuffer *rb;
1368
1369 /* Usually, only the back buffer will need to be downsampled. However,
1370 * the front buffer will also need it if the user has rendered into it.
1371 */
1372 static const gl_buffer_index buffers[2] = {
1373 BUFFER_BACK_LEFT,
1374 BUFFER_FRONT_LEFT,
1375 };
1376
1377 for (int i = 0; i < 2; ++i) {
1378 rb = intel_get_renderbuffer(fb, buffers[i]);
1379 if (rb == NULL || rb->mt == NULL)
1380 continue;
1381 if (rb->mt->num_samples <= 1) {
1382 assert(rb->mt_layer == 0 && rb->mt_level == 0 &&
1383 rb->layer_count == 1);
1384 intel_miptree_resolve_color(brw, rb->mt, 0, 1, 0, 1, 0);
1385 } else {
1386 intel_renderbuffer_downsample(brw, rb);
1387 }
1388 }
1389 }
1390
1391 static unsigned
1392 intel_bits_per_pixel(const struct intel_renderbuffer *rb)
1393 {
1394 return _mesa_get_format_bytes(intel_rb_format(rb)) * 8;
1395 }
1396
1397 static void
1398 intel_query_dri2_buffers(struct brw_context *brw,
1399 __DRIdrawable *drawable,
1400 __DRIbuffer **buffers,
1401 int *count);
1402
1403 static void
1404 intel_process_dri2_buffer(struct brw_context *brw,
1405 __DRIdrawable *drawable,
1406 __DRIbuffer *buffer,
1407 struct intel_renderbuffer *rb,
1408 const char *buffer_name);
1409
1410 static void
1411 intel_update_image_buffers(struct brw_context *brw, __DRIdrawable *drawable);
1412
1413 static void
1414 intel_update_dri2_buffers(struct brw_context *brw, __DRIdrawable *drawable)
1415 {
1416 struct gl_framebuffer *fb = drawable->driverPrivate;
1417 struct intel_renderbuffer *rb;
1418 __DRIbuffer *buffers = NULL;
1419 int count;
1420 const char *region_name;
1421
1422 /* Set this up front, so that in case our buffers get invalidated
1423 * while we're getting new buffers, we don't clobber the stamp and
1424 * thus ignore the invalidate. */
1425 drawable->lastStamp = drawable->dri2.stamp;
1426
1427 if (unlikely(INTEL_DEBUG & DEBUG_DRI))
1428 fprintf(stderr, "enter %s, drawable %p\n", __func__, drawable);
1429
1430 intel_query_dri2_buffers(brw, drawable, &buffers, &count);
1431
1432 if (buffers == NULL)
1433 return;
1434
1435 for (int i = 0; i < count; i++) {
1436 switch (buffers[i].attachment) {
1437 case __DRI_BUFFER_FRONT_LEFT:
1438 rb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT);
1439 region_name = "dri2 front buffer";
1440 break;
1441
1442 case __DRI_BUFFER_FAKE_FRONT_LEFT:
1443 rb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT);
1444 region_name = "dri2 fake front buffer";
1445 break;
1446
1447 case __DRI_BUFFER_BACK_LEFT:
1448 rb = intel_get_renderbuffer(fb, BUFFER_BACK_LEFT);
1449 region_name = "dri2 back buffer";
1450 break;
1451
1452 case __DRI_BUFFER_DEPTH:
1453 case __DRI_BUFFER_HIZ:
1454 case __DRI_BUFFER_DEPTH_STENCIL:
1455 case __DRI_BUFFER_STENCIL:
1456 case __DRI_BUFFER_ACCUM:
1457 default:
1458 fprintf(stderr,
1459 "unhandled buffer attach event, attachment type %d\n",
1460 buffers[i].attachment);
1461 return;
1462 }
1463
1464 intel_process_dri2_buffer(brw, drawable, &buffers[i], rb, region_name);
1465 }
1466
1467 }
1468
1469 void
1470 intel_update_renderbuffers(__DRIcontext *context, __DRIdrawable *drawable)
1471 {
1472 struct brw_context *brw = context->driverPrivate;
1473 __DRIscreen *dri_screen = brw->screen->driScrnPriv;
1474
1475 /* Set this up front, so that in case our buffers get invalidated
1476 * while we're getting new buffers, we don't clobber the stamp and
1477 * thus ignore the invalidate. */
1478 drawable->lastStamp = drawable->dri2.stamp;
1479
1480 if (unlikely(INTEL_DEBUG & DEBUG_DRI))
1481 fprintf(stderr, "enter %s, drawable %p\n", __func__, drawable);
1482
1483 if (dri_screen->image.loader)
1484 intel_update_image_buffers(brw, drawable);
1485 else
1486 intel_update_dri2_buffers(brw, drawable);
1487
1488 driUpdateFramebufferSize(&brw->ctx, drawable);
1489 }
1490
1491 /**
1492 * intel_prepare_render should be called anywhere that curent read/drawbuffer
1493 * state is required.
1494 */
1495 void
1496 intel_prepare_render(struct brw_context *brw)
1497 {
1498 struct gl_context *ctx = &brw->ctx;
1499 __DRIcontext *driContext = brw->driContext;
1500 __DRIdrawable *drawable;
1501
1502 drawable = driContext->driDrawablePriv;
1503 if (drawable && drawable->dri2.stamp != driContext->dri2.draw_stamp) {
1504 if (drawable->lastStamp != drawable->dri2.stamp)
1505 intel_update_renderbuffers(driContext, drawable);
1506 driContext->dri2.draw_stamp = drawable->dri2.stamp;
1507 }
1508
1509 drawable = driContext->driReadablePriv;
1510 if (drawable && drawable->dri2.stamp != driContext->dri2.read_stamp) {
1511 if (drawable->lastStamp != drawable->dri2.stamp)
1512 intel_update_renderbuffers(driContext, drawable);
1513 driContext->dri2.read_stamp = drawable->dri2.stamp;
1514 }
1515
1516 /* If we're currently rendering to the front buffer, the rendering
1517 * that will happen next will probably dirty the front buffer. So
1518 * mark it as dirty here.
1519 */
1520 if (_mesa_is_front_buffer_drawing(ctx->DrawBuffer))
1521 brw->front_buffer_dirty = true;
1522 }
1523
1524 /**
1525 * \brief Query DRI2 to obtain a DRIdrawable's buffers.
1526 *
1527 * To determine which DRI buffers to request, examine the renderbuffers
1528 * attached to the drawable's framebuffer. Then request the buffers with
1529 * DRI2GetBuffers() or DRI2GetBuffersWithFormat().
1530 *
1531 * This is called from intel_update_renderbuffers().
1532 *
1533 * \param drawable Drawable whose buffers are queried.
1534 * \param buffers [out] List of buffers returned by DRI2 query.
1535 * \param buffer_count [out] Number of buffers returned.
1536 *
1537 * \see intel_update_renderbuffers()
1538 * \see DRI2GetBuffers()
1539 * \see DRI2GetBuffersWithFormat()
1540 */
1541 static void
1542 intel_query_dri2_buffers(struct brw_context *brw,
1543 __DRIdrawable *drawable,
1544 __DRIbuffer **buffers,
1545 int *buffer_count)
1546 {
1547 __DRIscreen *dri_screen = brw->screen->driScrnPriv;
1548 struct gl_framebuffer *fb = drawable->driverPrivate;
1549 int i = 0;
1550 unsigned attachments[8];
1551
1552 struct intel_renderbuffer *front_rb;
1553 struct intel_renderbuffer *back_rb;
1554
1555 front_rb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT);
1556 back_rb = intel_get_renderbuffer(fb, BUFFER_BACK_LEFT);
1557
1558 memset(attachments, 0, sizeof(attachments));
1559 if ((_mesa_is_front_buffer_drawing(fb) ||
1560 _mesa_is_front_buffer_reading(fb) ||
1561 !back_rb) && front_rb) {
1562 /* If a fake front buffer is in use, then querying for
1563 * __DRI_BUFFER_FRONT_LEFT will cause the server to copy the image from
1564 * the real front buffer to the fake front buffer. So before doing the
1565 * query, we need to make sure all the pending drawing has landed in the
1566 * real front buffer.
1567 */
1568 intel_batchbuffer_flush(brw);
1569 intel_flush_front(&brw->ctx);
1570
1571 attachments[i++] = __DRI_BUFFER_FRONT_LEFT;
1572 attachments[i++] = intel_bits_per_pixel(front_rb);
1573 } else if (front_rb && brw->front_buffer_dirty) {
1574 /* We have pending front buffer rendering, but we aren't querying for a
1575 * front buffer. If the front buffer we have is a fake front buffer,
1576 * the X server is going to throw it away when it processes the query.
1577 * So before doing the query, make sure all the pending drawing has
1578 * landed in the real front buffer.
1579 */
1580 intel_batchbuffer_flush(brw);
1581 intel_flush_front(&brw->ctx);
1582 }
1583
1584 if (back_rb) {
1585 attachments[i++] = __DRI_BUFFER_BACK_LEFT;
1586 attachments[i++] = intel_bits_per_pixel(back_rb);
1587 }
1588
1589 assert(i <= ARRAY_SIZE(attachments));
1590
1591 *buffers =
1592 dri_screen->dri2.loader->getBuffersWithFormat(drawable,
1593 &drawable->w,
1594 &drawable->h,
1595 attachments, i / 2,
1596 buffer_count,
1597 drawable->loaderPrivate);
1598 }
1599
1600 /**
1601 * \brief Assign a DRI buffer's DRM region to a renderbuffer.
1602 *
1603 * This is called from intel_update_renderbuffers().
1604 *
1605 * \par Note:
1606 * DRI buffers whose attachment point is DRI2BufferStencil or
1607 * DRI2BufferDepthStencil are handled as special cases.
1608 *
1609 * \param buffer_name is a human readable name, such as "dri2 front buffer",
1610 * that is passed to brw_bo_gem_create_from_name().
1611 *
1612 * \see intel_update_renderbuffers()
1613 */
1614 static void
1615 intel_process_dri2_buffer(struct brw_context *brw,
1616 __DRIdrawable *drawable,
1617 __DRIbuffer *buffer,
1618 struct intel_renderbuffer *rb,
1619 const char *buffer_name)
1620 {
1621 struct gl_framebuffer *fb = drawable->driverPrivate;
1622 struct brw_bo *bo;
1623
1624 if (!rb)
1625 return;
1626
1627 unsigned num_samples = rb->Base.Base.NumSamples;
1628
1629 /* We try to avoid closing and reopening the same BO name, because the first
1630 * use of a mapping of the buffer involves a bunch of page faulting which is
1631 * moderately expensive.
1632 */
1633 struct intel_mipmap_tree *last_mt;
1634 if (num_samples == 0)
1635 last_mt = rb->mt;
1636 else
1637 last_mt = rb->singlesample_mt;
1638
1639 uint32_t old_name = 0;
1640 if (last_mt) {
1641 /* The bo already has a name because the miptree was created by a
1642 * previous call to intel_process_dri2_buffer(). If a bo already has a
1643 * name, then brw_bo_flink() is a low-cost getter. It does not
1644 * create a new name.
1645 */
1646 brw_bo_flink(last_mt->bo, &old_name);
1647 }
1648
1649 if (old_name == buffer->name)
1650 return;
1651
1652 if (unlikely(INTEL_DEBUG & DEBUG_DRI)) {
1653 fprintf(stderr,
1654 "attaching buffer %d, at %d, cpp %d, pitch %d\n",
1655 buffer->name, buffer->attachment,
1656 buffer->cpp, buffer->pitch);
1657 }
1658
1659 bo = brw_bo_gem_create_from_name(brw->bufmgr, buffer_name,
1660 buffer->name);
1661 if (!bo) {
1662 fprintf(stderr,
1663 "Failed to open BO for returned DRI2 buffer "
1664 "(%dx%d, %s, named %d).\n"
1665 "This is likely a bug in the X Server that will lead to a "
1666 "crash soon.\n",
1667 drawable->w, drawable->h, buffer_name, buffer->name);
1668 return;
1669 }
1670
1671 intel_update_winsys_renderbuffer_miptree(brw, rb, bo,
1672 drawable->w, drawable->h,
1673 buffer->pitch);
1674
1675 if (_mesa_is_front_buffer_drawing(fb) &&
1676 (buffer->attachment == __DRI_BUFFER_FRONT_LEFT ||
1677 buffer->attachment == __DRI_BUFFER_FAKE_FRONT_LEFT) &&
1678 rb->Base.Base.NumSamples > 1) {
1679 intel_renderbuffer_upsample(brw, rb);
1680 }
1681
1682 assert(rb->mt);
1683
1684 brw_bo_unreference(bo);
1685 }
1686
1687 /**
1688 * \brief Query DRI image loader to obtain a DRIdrawable's buffers.
1689 *
1690 * To determine which DRI buffers to request, examine the renderbuffers
1691 * attached to the drawable's framebuffer. Then request the buffers from
1692 * the image loader
1693 *
1694 * This is called from intel_update_renderbuffers().
1695 *
1696 * \param drawable Drawable whose buffers are queried.
1697 * \param buffers [out] List of buffers returned by DRI2 query.
1698 * \param buffer_count [out] Number of buffers returned.
1699 *
1700 * \see intel_update_renderbuffers()
1701 */
1702
1703 static void
1704 intel_update_image_buffer(struct brw_context *intel,
1705 __DRIdrawable *drawable,
1706 struct intel_renderbuffer *rb,
1707 __DRIimage *buffer,
1708 enum __DRIimageBufferMask buffer_type)
1709 {
1710 struct gl_framebuffer *fb = drawable->driverPrivate;
1711
1712 if (!rb || !buffer->bo)
1713 return;
1714
1715 unsigned num_samples = rb->Base.Base.NumSamples;
1716
1717 /* Check and see if we're already bound to the right
1718 * buffer object
1719 */
1720 struct intel_mipmap_tree *last_mt;
1721 if (num_samples == 0)
1722 last_mt = rb->mt;
1723 else
1724 last_mt = rb->singlesample_mt;
1725
1726 if (last_mt && last_mt->bo == buffer->bo)
1727 return;
1728
1729 intel_update_winsys_renderbuffer_miptree(intel, rb, buffer->bo,
1730 buffer->width, buffer->height,
1731 buffer->pitch);
1732
1733 if (_mesa_is_front_buffer_drawing(fb) &&
1734 buffer_type == __DRI_IMAGE_BUFFER_FRONT &&
1735 rb->Base.Base.NumSamples > 1) {
1736 intel_renderbuffer_upsample(intel, rb);
1737 }
1738 }
1739
1740 static void
1741 intel_update_image_buffers(struct brw_context *brw, __DRIdrawable *drawable)
1742 {
1743 struct gl_framebuffer *fb = drawable->driverPrivate;
1744 __DRIscreen *dri_screen = brw->screen->driScrnPriv;
1745 struct intel_renderbuffer *front_rb;
1746 struct intel_renderbuffer *back_rb;
1747 struct __DRIimageList images;
1748 mesa_format format;
1749 uint32_t buffer_mask = 0;
1750 int ret;
1751
1752 front_rb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT);
1753 back_rb = intel_get_renderbuffer(fb, BUFFER_BACK_LEFT);
1754
1755 if (back_rb)
1756 format = intel_rb_format(back_rb);
1757 else if (front_rb)
1758 format = intel_rb_format(front_rb);
1759 else
1760 return;
1761
1762 if (front_rb && (_mesa_is_front_buffer_drawing(fb) ||
1763 _mesa_is_front_buffer_reading(fb) || !back_rb)) {
1764 buffer_mask |= __DRI_IMAGE_BUFFER_FRONT;
1765 }
1766
1767 if (back_rb)
1768 buffer_mask |= __DRI_IMAGE_BUFFER_BACK;
1769
1770 ret = dri_screen->image.loader->getBuffers(drawable,
1771 driGLFormatToImageFormat(format),
1772 &drawable->dri2.stamp,
1773 drawable->loaderPrivate,
1774 buffer_mask,
1775 &images);
1776 if (!ret)
1777 return;
1778
1779 if (images.image_mask & __DRI_IMAGE_BUFFER_FRONT) {
1780 drawable->w = images.front->width;
1781 drawable->h = images.front->height;
1782 intel_update_image_buffer(brw,
1783 drawable,
1784 front_rb,
1785 images.front,
1786 __DRI_IMAGE_BUFFER_FRONT);
1787 }
1788
1789 if (images.image_mask & __DRI_IMAGE_BUFFER_BACK) {
1790 drawable->w = images.back->width;
1791 drawable->h = images.back->height;
1792 intel_update_image_buffer(brw,
1793 drawable,
1794 back_rb,
1795 images.back,
1796 __DRI_IMAGE_BUFFER_BACK);
1797 }
1798 }