i965: Use the new resolve function for several simple cases
[mesa.git] / src / mesa / drivers / dri / i965 / brw_context.c
1 /*
2 Copyright 2003 VMware, Inc.
3 Copyright (C) Intel Corp. 2006. All Rights Reserved.
4 Intel funded Tungsten Graphics to
5 develop this 3D driver.
6
7 Permission is hereby granted, free of charge, to any person obtaining
8 a copy of this software and associated documentation files (the
9 "Software"), to deal in the Software without restriction, including
10 without limitation the rights to use, copy, modify, merge, publish,
11 distribute, sublicense, and/or sell copies of the Software, and to
12 permit persons to whom the Software is furnished to do so, subject to
13 the following conditions:
14
15 The above copyright notice and this permission notice (including the
16 next paragraph) shall be included in all copies or substantial
17 portions of the Software.
18
19 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
20 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
22 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
23 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
24 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
25 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26
27 **********************************************************************/
28 /*
29 * Authors:
30 * Keith Whitwell <keithw@vmware.com>
31 */
32
33
34 #include "compiler/nir/nir.h"
35 #include "main/api_exec.h"
36 #include "main/context.h"
37 #include "main/fbobject.h"
38 #include "main/extensions.h"
39 #include "main/imports.h"
40 #include "main/macros.h"
41 #include "main/points.h"
42 #include "main/version.h"
43 #include "main/vtxfmt.h"
44 #include "main/texobj.h"
45 #include "main/framebuffer.h"
46
47 #include "vbo/vbo_context.h"
48
49 #include "drivers/common/driverfuncs.h"
50 #include "drivers/common/meta.h"
51 #include "utils.h"
52
53 #include "brw_context.h"
54 #include "brw_defines.h"
55 #include "brw_blorp.h"
56 #include "brw_draw.h"
57 #include "brw_state.h"
58
59 #include "intel_batchbuffer.h"
60 #include "intel_buffer_objects.h"
61 #include "intel_buffers.h"
62 #include "intel_fbo.h"
63 #include "intel_mipmap_tree.h"
64 #include "intel_pixel.h"
65 #include "intel_image.h"
66 #include "intel_tex.h"
67 #include "intel_tex_obj.h"
68
69 #include "swrast_setup/swrast_setup.h"
70 #include "tnl/tnl.h"
71 #include "tnl/t_pipeline.h"
72 #include "util/ralloc.h"
73 #include "util/debug.h"
74 #include "isl/isl.h"
75
76 /***************************************
77 * Mesa's Driver Functions
78 ***************************************/
79
80 const char *const brw_vendor_string = "Intel Open Source Technology Center";
81
82 static const char *
83 get_bsw_model(const struct intel_screen *screen)
84 {
85 switch (screen->eu_total) {
86 case 16:
87 return "405";
88 case 12:
89 return "400";
90 default:
91 return " ";
92 }
93 }
94
95 const char *
96 brw_get_renderer_string(const struct intel_screen *screen)
97 {
98 const char *chipset;
99 static char buffer[128];
100 char *bsw = NULL;
101
102 switch (screen->deviceID) {
103 #undef CHIPSET
104 #define CHIPSET(id, symbol, str) case id: chipset = str; break;
105 #include "pci_ids/i965_pci_ids.h"
106 default:
107 chipset = "Unknown Intel Chipset";
108 break;
109 }
110
111 /* Braswell branding is funny, so we have to fix it up here */
112 if (screen->deviceID == 0x22B1) {
113 bsw = strdup(chipset);
114 char *needle = strstr(bsw, "XXX");
115 if (needle) {
116 memcpy(needle, get_bsw_model(screen), 3);
117 chipset = bsw;
118 }
119 }
120
121 (void) driGetRendererString(buffer, chipset, 0);
122 free(bsw);
123 return buffer;
124 }
125
126 static const GLubyte *
127 intel_get_string(struct gl_context * ctx, GLenum name)
128 {
129 const struct brw_context *const brw = brw_context(ctx);
130
131 switch (name) {
132 case GL_VENDOR:
133 return (GLubyte *) brw_vendor_string;
134
135 case GL_RENDERER:
136 return
137 (GLubyte *) brw_get_renderer_string(brw->screen);
138
139 default:
140 return NULL;
141 }
142 }
143
144 static void
145 intel_viewport(struct gl_context *ctx)
146 {
147 struct brw_context *brw = brw_context(ctx);
148 __DRIcontext *driContext = brw->driContext;
149
150 if (_mesa_is_winsys_fbo(ctx->DrawBuffer)) {
151 if (driContext->driDrawablePriv)
152 dri2InvalidateDrawable(driContext->driDrawablePriv);
153 if (driContext->driReadablePriv)
154 dri2InvalidateDrawable(driContext->driReadablePriv);
155 }
156 }
157
158 static void
159 intel_update_framebuffer(struct gl_context *ctx,
160 struct gl_framebuffer *fb)
161 {
162 struct brw_context *brw = brw_context(ctx);
163
164 /* Quantize the derived default number of samples
165 */
166 fb->DefaultGeometry._NumSamples =
167 intel_quantize_num_samples(brw->screen,
168 fb->DefaultGeometry.NumSamples);
169 }
170
171 static bool
172 intel_disable_rb_aux_buffer(struct brw_context *brw, const struct brw_bo *bo)
173 {
174 const struct gl_framebuffer *fb = brw->ctx.DrawBuffer;
175 bool found = false;
176
177 for (unsigned i = 0; i < fb->_NumColorDrawBuffers; i++) {
178 const struct intel_renderbuffer *irb =
179 intel_renderbuffer(fb->_ColorDrawBuffers[i]);
180
181 if (irb && irb->mt->bo == bo) {
182 found = brw->draw_aux_buffer_disabled[i] = true;
183 }
184 }
185
186 return found;
187 }
188
189 /* On Gen9 color buffers may be compressed by the hardware (lossless
190 * compression). There are, however, format restrictions and care needs to be
191 * taken that the sampler engine is capable for re-interpreting a buffer with
192 * format different the buffer was originally written with.
193 *
194 * For example, SRGB formats are not compressible and the sampler engine isn't
195 * capable of treating RGBA_UNORM as SRGB_ALPHA. In such a case the underlying
196 * color buffer needs to be resolved so that the sampling surface can be
197 * sampled as non-compressed (i.e., without the auxiliary MCS buffer being
198 * set).
199 */
200 static bool
201 intel_texture_view_requires_resolve(struct brw_context *brw,
202 struct intel_texture_object *intel_tex)
203 {
204 if (brw->gen < 9 ||
205 !intel_miptree_is_lossless_compressed(brw, intel_tex->mt))
206 return false;
207
208 const enum isl_format isl_format =
209 brw_isl_format_for_mesa_format(intel_tex->_Format);
210
211 if (isl_format_supports_ccs_e(&brw->screen->devinfo, isl_format))
212 return false;
213
214 perf_debug("Incompatible sampling format (%s) for rbc (%s)\n",
215 _mesa_get_format_name(intel_tex->_Format),
216 _mesa_get_format_name(intel_tex->mt->format));
217
218 if (intel_disable_rb_aux_buffer(brw, intel_tex->mt->bo))
219 perf_debug("Sampling renderbuffer with non-compressible format - "
220 "turning off compression");
221
222 return true;
223 }
224
225 static void
226 intel_update_state(struct gl_context * ctx, GLuint new_state)
227 {
228 struct brw_context *brw = brw_context(ctx);
229 struct intel_texture_object *tex_obj;
230 struct intel_renderbuffer *depth_irb;
231
232 if (ctx->swrast_context)
233 _swrast_InvalidateState(ctx, new_state);
234 _vbo_InvalidateState(ctx, new_state);
235
236 brw->NewGLState |= new_state;
237
238 _mesa_unlock_context_textures(ctx);
239
240 /* Resolve the depth buffer's HiZ buffer. */
241 depth_irb = intel_get_renderbuffer(ctx->DrawBuffer, BUFFER_DEPTH);
242 if (depth_irb && depth_irb->mt) {
243 intel_miptree_slice_resolve_hiz(brw, depth_irb->mt,
244 depth_irb->mt_level,
245 depth_irb->mt_layer);
246 }
247
248 memset(brw->draw_aux_buffer_disabled, 0,
249 sizeof(brw->draw_aux_buffer_disabled));
250
251 /* Resolve depth buffer and render cache of each enabled texture. */
252 int maxEnabledUnit = ctx->Texture._MaxEnabledTexImageUnit;
253 for (int i = 0; i <= maxEnabledUnit; i++) {
254 if (!ctx->Texture.Unit[i]._Current)
255 continue;
256 tex_obj = intel_texture_object(ctx->Texture.Unit[i]._Current);
257 if (!tex_obj || !tex_obj->mt)
258 continue;
259
260 /* We need inte_texture_object::_Format to be valid */
261 intel_finalize_mipmap_tree(brw, i);
262
263 if (intel_miptree_sample_with_hiz(brw, tex_obj->mt))
264 intel_miptree_all_slices_resolve_hiz(brw, tex_obj->mt);
265 else
266 intel_miptree_all_slices_resolve_depth(brw, tex_obj->mt);
267 /* Sampling engine understands lossless compression and resolving
268 * those surfaces should be skipped for performance reasons.
269 */
270 const int flags = intel_texture_view_requires_resolve(brw, tex_obj) ?
271 0 : INTEL_MIPTREE_IGNORE_CCS_E;
272 intel_miptree_all_slices_resolve_color(brw, tex_obj->mt, flags);
273 brw_render_cache_set_check_flush(brw, tex_obj->mt->bo);
274
275 if (tex_obj->base.StencilSampling ||
276 tex_obj->mt->format == MESA_FORMAT_S_UINT8) {
277 intel_update_r8stencil(brw, tex_obj->mt);
278 }
279 }
280
281 /* Resolve color for each active shader image. */
282 for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
283 const struct gl_program *prog = ctx->_Shader->CurrentProgram[i];
284
285 if (unlikely(prog && prog->info.num_images)) {
286 for (unsigned j = 0; j < prog->info.num_images; j++) {
287 struct gl_image_unit *u =
288 &ctx->ImageUnits[prog->sh.ImageUnits[j]];
289 tex_obj = intel_texture_object(u->TexObj);
290
291 if (tex_obj && tex_obj->mt) {
292 /* Access to images is implemented using indirect messages
293 * against data port. Normal render target write understands
294 * lossless compression but unfortunately the typed/untyped
295 * read/write interface doesn't. Therefore even lossless
296 * compressed surfaces need to be resolved prior to accessing
297 * them. Hence skip setting INTEL_MIPTREE_IGNORE_CCS_E.
298 */
299 intel_miptree_all_slices_resolve_color(brw, tex_obj->mt, 0);
300
301 if (intel_miptree_is_lossless_compressed(brw, tex_obj->mt) &&
302 intel_disable_rb_aux_buffer(brw, tex_obj->mt->bo)) {
303 perf_debug("Using renderbuffer as shader image - turning "
304 "off lossless compression");
305 }
306
307 brw_render_cache_set_check_flush(brw, tex_obj->mt->bo);
308 }
309 }
310 }
311 }
312
313 /* Resolve color buffers for non-coherent framebuffer fetch. */
314 if (!ctx->Extensions.MESA_shader_framebuffer_fetch &&
315 ctx->FragmentProgram._Current &&
316 ctx->FragmentProgram._Current->info.outputs_read) {
317 const struct gl_framebuffer *fb = ctx->DrawBuffer;
318
319 for (unsigned i = 0; i < fb->_NumColorDrawBuffers; i++) {
320 const struct intel_renderbuffer *irb =
321 intel_renderbuffer(fb->_ColorDrawBuffers[i]);
322
323 if (irb &&
324 intel_miptree_resolve_color(brw, irb->mt,
325 irb->mt_level, 1,
326 irb->mt_layer, irb->layer_count,
327 INTEL_MIPTREE_IGNORE_CCS_E))
328 brw_render_cache_set_check_flush(brw, irb->mt->bo);
329 }
330 }
331
332 struct gl_framebuffer *fb = ctx->DrawBuffer;
333 for (int i = 0; i < fb->_NumColorDrawBuffers; i++) {
334 struct intel_renderbuffer *irb =
335 intel_renderbuffer(fb->_ColorDrawBuffers[i]);
336
337 if (irb == NULL || irb->mt == NULL)
338 continue;
339
340 struct intel_mipmap_tree *mt = irb->mt;
341
342 /* If FRAMEBUFFER_SRGB is used on Gen9+ then we need to resolve any of
343 * the single-sampled color renderbuffers because the CCS buffer isn't
344 * supported for SRGB formats. This only matters if FRAMEBUFFER_SRGB is
345 * enabled because otherwise the surface state will be programmed with
346 * the linear equivalent format anyway.
347 */
348 if (brw->gen >= 9 && ctx->Color.sRGBEnabled && mt->num_samples <= 1 &&
349 _mesa_get_srgb_format_linear(mt->format) != mt->format) {
350
351 /* Lossless compression is not supported for SRGB formats, it
352 * should be impossible to get here with such surfaces.
353 */
354 assert(!intel_miptree_is_lossless_compressed(brw, mt));
355 intel_miptree_all_slices_resolve_color(brw, mt, 0);
356 brw_render_cache_set_check_flush(brw, mt->bo);
357 }
358
359 /* For layered rendering non-compressed fast cleared buffers need to be
360 * resolved. Surface state can carry only one fast color clear value
361 * while each layer may have its own fast clear color value. For
362 * compressed buffers color value is available in the color buffer.
363 */
364 if (irb->layer_count > 1 &&
365 !(irb->mt->aux_disable & INTEL_AUX_DISABLE_CCS) &&
366 !intel_miptree_is_lossless_compressed(brw, mt)) {
367 assert(brw->gen >= 8);
368
369 intel_miptree_resolve_color(brw, mt, irb->mt_level, 1,
370 irb->mt_layer, irb->layer_count, 0);
371 }
372 }
373
374 _mesa_lock_context_textures(ctx);
375
376 if (new_state & _NEW_BUFFERS) {
377 intel_update_framebuffer(ctx, ctx->DrawBuffer);
378 if (ctx->DrawBuffer != ctx->ReadBuffer)
379 intel_update_framebuffer(ctx, ctx->ReadBuffer);
380 }
381 }
382
383 #define flushFront(screen) ((screen)->image.loader ? (screen)->image.loader->flushFrontBuffer : (screen)->dri2.loader->flushFrontBuffer)
384
385 static void
386 intel_flush_front(struct gl_context *ctx)
387 {
388 struct brw_context *brw = brw_context(ctx);
389 __DRIcontext *driContext = brw->driContext;
390 __DRIdrawable *driDrawable = driContext->driDrawablePriv;
391 __DRIscreen *const dri_screen = brw->screen->driScrnPriv;
392
393 if (brw->front_buffer_dirty && _mesa_is_winsys_fbo(ctx->DrawBuffer)) {
394 if (flushFront(dri_screen) && driDrawable &&
395 driDrawable->loaderPrivate) {
396
397 /* Resolve before flushing FAKE_FRONT_LEFT to FRONT_LEFT.
398 *
399 * This potentially resolves both front and back buffer. It
400 * is unnecessary to resolve the back, but harms nothing except
401 * performance. And no one cares about front-buffer render
402 * performance.
403 */
404 intel_resolve_for_dri2_flush(brw, driDrawable);
405 intel_batchbuffer_flush(brw);
406
407 flushFront(dri_screen)(driDrawable, driDrawable->loaderPrivate);
408
409 /* We set the dirty bit in intel_prepare_render() if we're
410 * front buffer rendering once we get there.
411 */
412 brw->front_buffer_dirty = false;
413 }
414 }
415 }
416
417 static void
418 intel_glFlush(struct gl_context *ctx)
419 {
420 struct brw_context *brw = brw_context(ctx);
421
422 intel_batchbuffer_flush(brw);
423 intel_flush_front(ctx);
424
425 brw->need_flush_throttle = true;
426 }
427
428 static void
429 intel_finish(struct gl_context * ctx)
430 {
431 struct brw_context *brw = brw_context(ctx);
432
433 intel_glFlush(ctx);
434
435 if (brw->batch.last_bo)
436 brw_bo_wait_rendering(brw, brw->batch.last_bo);
437 }
438
439 static void
440 brw_init_driver_functions(struct brw_context *brw,
441 struct dd_function_table *functions)
442 {
443 _mesa_init_driver_functions(functions);
444
445 /* GLX uses DRI2 invalidate events to handle window resizing.
446 * Unfortunately, EGL does not - libEGL is written in XCB (not Xlib),
447 * which doesn't provide a mechanism for snooping the event queues.
448 *
449 * So EGL still relies on viewport hacks to handle window resizing.
450 * This should go away with DRI3000.
451 */
452 if (!brw->driContext->driScreenPriv->dri2.useInvalidate)
453 functions->Viewport = intel_viewport;
454
455 functions->Flush = intel_glFlush;
456 functions->Finish = intel_finish;
457 functions->GetString = intel_get_string;
458 functions->UpdateState = intel_update_state;
459
460 intelInitTextureFuncs(functions);
461 intelInitTextureImageFuncs(functions);
462 intelInitTextureSubImageFuncs(functions);
463 intelInitTextureCopyImageFuncs(functions);
464 intelInitCopyImageFuncs(functions);
465 intelInitClearFuncs(functions);
466 intelInitBufferFuncs(functions);
467 intelInitPixelFuncs(functions);
468 intelInitBufferObjectFuncs(functions);
469 brw_init_syncobj_functions(functions);
470 brw_init_object_purgeable_functions(functions);
471
472 brwInitFragProgFuncs( functions );
473 brw_init_common_queryobj_functions(functions);
474 if (brw->gen >= 8 || brw->is_haswell)
475 hsw_init_queryobj_functions(functions);
476 else if (brw->gen >= 6)
477 gen6_init_queryobj_functions(functions);
478 else
479 gen4_init_queryobj_functions(functions);
480 brw_init_compute_functions(functions);
481 if (brw->gen >= 7)
482 brw_init_conditional_render_functions(functions);
483
484 functions->QueryInternalFormat = brw_query_internal_format;
485
486 functions->NewTransformFeedback = brw_new_transform_feedback;
487 functions->DeleteTransformFeedback = brw_delete_transform_feedback;
488 if (can_do_mi_math_and_lrr(brw->screen)) {
489 functions->BeginTransformFeedback = hsw_begin_transform_feedback;
490 functions->EndTransformFeedback = hsw_end_transform_feedback;
491 functions->PauseTransformFeedback = hsw_pause_transform_feedback;
492 functions->ResumeTransformFeedback = hsw_resume_transform_feedback;
493 } else if (brw->gen >= 7) {
494 functions->BeginTransformFeedback = gen7_begin_transform_feedback;
495 functions->EndTransformFeedback = gen7_end_transform_feedback;
496 functions->PauseTransformFeedback = gen7_pause_transform_feedback;
497 functions->ResumeTransformFeedback = gen7_resume_transform_feedback;
498 functions->GetTransformFeedbackVertexCount =
499 brw_get_transform_feedback_vertex_count;
500 } else {
501 functions->BeginTransformFeedback = brw_begin_transform_feedback;
502 functions->EndTransformFeedback = brw_end_transform_feedback;
503 functions->PauseTransformFeedback = brw_pause_transform_feedback;
504 functions->ResumeTransformFeedback = brw_resume_transform_feedback;
505 functions->GetTransformFeedbackVertexCount =
506 brw_get_transform_feedback_vertex_count;
507 }
508
509 if (brw->gen >= 6)
510 functions->GetSamplePosition = gen6_get_sample_position;
511 }
512
513 static void
514 brw_initialize_context_constants(struct brw_context *brw)
515 {
516 struct gl_context *ctx = &brw->ctx;
517 const struct brw_compiler *compiler = brw->screen->compiler;
518
519 const bool stage_exists[MESA_SHADER_STAGES] = {
520 [MESA_SHADER_VERTEX] = true,
521 [MESA_SHADER_TESS_CTRL] = brw->gen >= 7,
522 [MESA_SHADER_TESS_EVAL] = brw->gen >= 7,
523 [MESA_SHADER_GEOMETRY] = brw->gen >= 6,
524 [MESA_SHADER_FRAGMENT] = true,
525 [MESA_SHADER_COMPUTE] =
526 ((ctx->API == API_OPENGL_COMPAT || ctx->API == API_OPENGL_CORE) &&
527 ctx->Const.MaxComputeWorkGroupSize[0] >= 1024) ||
528 (ctx->API == API_OPENGLES2 &&
529 ctx->Const.MaxComputeWorkGroupSize[0] >= 128) ||
530 _mesa_extension_override_enables.ARB_compute_shader,
531 };
532
533 unsigned num_stages = 0;
534 for (int i = 0; i < MESA_SHADER_STAGES; i++) {
535 if (stage_exists[i])
536 num_stages++;
537 }
538
539 unsigned max_samplers =
540 brw->gen >= 8 || brw->is_haswell ? BRW_MAX_TEX_UNIT : 16;
541
542 ctx->Const.MaxDualSourceDrawBuffers = 1;
543 ctx->Const.MaxDrawBuffers = BRW_MAX_DRAW_BUFFERS;
544 ctx->Const.MaxCombinedShaderOutputResources =
545 MAX_IMAGE_UNITS + BRW_MAX_DRAW_BUFFERS;
546
547 /* The timestamp register we can read for glGetTimestamp() is
548 * sometimes only 32 bits, before scaling to nanoseconds (depending
549 * on kernel).
550 *
551 * Once scaled to nanoseconds the timestamp would roll over at a
552 * non-power-of-two, so an application couldn't use
553 * GL_QUERY_COUNTER_BITS to handle rollover correctly. Instead, we
554 * report 36 bits and truncate at that (rolling over 5 times as
555 * often as the HW counter), and when the 32-bit counter rolls
556 * over, it happens to also be at a rollover in the reported value
557 * from near (1<<36) to 0.
558 *
559 * The low 32 bits rolls over in ~343 seconds. Our 36-bit result
560 * rolls over every ~69 seconds.
561 */
562 ctx->Const.QueryCounterBits.Timestamp = 36;
563
564 ctx->Const.MaxTextureCoordUnits = 8; /* Mesa limit */
565 ctx->Const.MaxImageUnits = MAX_IMAGE_UNITS;
566 if (brw->gen >= 7) {
567 ctx->Const.MaxRenderbufferSize = 16384;
568 ctx->Const.MaxTextureLevels = MIN2(15 /* 16384 */, MAX_TEXTURE_LEVELS);
569 ctx->Const.MaxCubeTextureLevels = 15; /* 16384 */
570 } else {
571 ctx->Const.MaxRenderbufferSize = 8192;
572 ctx->Const.MaxTextureLevels = MIN2(14 /* 8192 */, MAX_TEXTURE_LEVELS);
573 ctx->Const.MaxCubeTextureLevels = 14; /* 8192 */
574 }
575 ctx->Const.Max3DTextureLevels = 12; /* 2048 */
576 ctx->Const.MaxArrayTextureLayers = brw->gen >= 7 ? 2048 : 512;
577 ctx->Const.MaxTextureMbytes = 1536;
578 ctx->Const.MaxTextureRectSize = 1 << 12;
579 ctx->Const.MaxTextureMaxAnisotropy = 16.0;
580 ctx->Const.MaxTextureLodBias = 15.0;
581 ctx->Const.StripTextureBorder = true;
582 if (brw->gen >= 7) {
583 ctx->Const.MaxProgramTextureGatherComponents = 4;
584 ctx->Const.MinProgramTextureGatherOffset = -32;
585 ctx->Const.MaxProgramTextureGatherOffset = 31;
586 } else if (brw->gen == 6) {
587 ctx->Const.MaxProgramTextureGatherComponents = 1;
588 ctx->Const.MinProgramTextureGatherOffset = -8;
589 ctx->Const.MaxProgramTextureGatherOffset = 7;
590 }
591
592 ctx->Const.MaxUniformBlockSize = 65536;
593
594 for (int i = 0; i < MESA_SHADER_STAGES; i++) {
595 struct gl_program_constants *prog = &ctx->Const.Program[i];
596
597 if (!stage_exists[i])
598 continue;
599
600 prog->MaxTextureImageUnits = max_samplers;
601
602 prog->MaxUniformBlocks = BRW_MAX_UBO;
603 prog->MaxCombinedUniformComponents =
604 prog->MaxUniformComponents +
605 ctx->Const.MaxUniformBlockSize / 4 * prog->MaxUniformBlocks;
606
607 prog->MaxAtomicCounters = MAX_ATOMIC_COUNTERS;
608 prog->MaxAtomicBuffers = BRW_MAX_ABO;
609 prog->MaxImageUniforms = compiler->scalar_stage[i] ? BRW_MAX_IMAGES : 0;
610 prog->MaxShaderStorageBlocks = BRW_MAX_SSBO;
611 }
612
613 ctx->Const.MaxTextureUnits =
614 MIN2(ctx->Const.MaxTextureCoordUnits,
615 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxTextureImageUnits);
616
617 ctx->Const.MaxUniformBufferBindings = num_stages * BRW_MAX_UBO;
618 ctx->Const.MaxCombinedUniformBlocks = num_stages * BRW_MAX_UBO;
619 ctx->Const.MaxCombinedAtomicBuffers = num_stages * BRW_MAX_ABO;
620 ctx->Const.MaxCombinedShaderStorageBlocks = num_stages * BRW_MAX_SSBO;
621 ctx->Const.MaxShaderStorageBufferBindings = num_stages * BRW_MAX_SSBO;
622 ctx->Const.MaxCombinedTextureImageUnits = num_stages * max_samplers;
623 ctx->Const.MaxCombinedImageUniforms = num_stages * BRW_MAX_IMAGES;
624
625
626 /* Hardware only supports a limited number of transform feedback buffers.
627 * So we need to override the Mesa default (which is based only on software
628 * limits).
629 */
630 ctx->Const.MaxTransformFeedbackBuffers = BRW_MAX_SOL_BUFFERS;
631
632 /* On Gen6, in the worst case, we use up one binding table entry per
633 * transform feedback component (see comments above the definition of
634 * BRW_MAX_SOL_BINDINGS, in brw_context.h), so we need to advertise a value
635 * for MAX_TRANSFORM_FEEDBACK_INTERLEAVED_COMPONENTS equal to
636 * BRW_MAX_SOL_BINDINGS.
637 *
638 * In "separate components" mode, we need to divide this value by
639 * BRW_MAX_SOL_BUFFERS, so that the total number of binding table entries
640 * used up by all buffers will not exceed BRW_MAX_SOL_BINDINGS.
641 */
642 ctx->Const.MaxTransformFeedbackInterleavedComponents = BRW_MAX_SOL_BINDINGS;
643 ctx->Const.MaxTransformFeedbackSeparateComponents =
644 BRW_MAX_SOL_BINDINGS / BRW_MAX_SOL_BUFFERS;
645
646 ctx->Const.AlwaysUseGetTransformFeedbackVertexCount =
647 !can_do_mi_math_and_lrr(brw->screen);
648
649 int max_samples;
650 const int *msaa_modes = intel_supported_msaa_modes(brw->screen);
651 const int clamp_max_samples =
652 driQueryOptioni(&brw->optionCache, "clamp_max_samples");
653
654 if (clamp_max_samples < 0) {
655 max_samples = msaa_modes[0];
656 } else {
657 /* Select the largest supported MSAA mode that does not exceed
658 * clamp_max_samples.
659 */
660 max_samples = 0;
661 for (int i = 0; msaa_modes[i] != 0; ++i) {
662 if (msaa_modes[i] <= clamp_max_samples) {
663 max_samples = msaa_modes[i];
664 break;
665 }
666 }
667 }
668
669 ctx->Const.MaxSamples = max_samples;
670 ctx->Const.MaxColorTextureSamples = max_samples;
671 ctx->Const.MaxDepthTextureSamples = max_samples;
672 ctx->Const.MaxIntegerSamples = max_samples;
673 ctx->Const.MaxImageSamples = 0;
674
675 /* gen6_set_sample_maps() sets SampleMap{2,4,8}x variables which are used
676 * to map indices of rectangular grid to sample numbers within a pixel.
677 * These variables are used by GL_EXT_framebuffer_multisample_blit_scaled
678 * extension implementation. For more details see the comment above
679 * gen6_set_sample_maps() definition.
680 */
681 gen6_set_sample_maps(ctx);
682
683 ctx->Const.MinLineWidth = 1.0;
684 ctx->Const.MinLineWidthAA = 1.0;
685 if (brw->gen >= 6) {
686 ctx->Const.MaxLineWidth = 7.375;
687 ctx->Const.MaxLineWidthAA = 7.375;
688 ctx->Const.LineWidthGranularity = 0.125;
689 } else {
690 ctx->Const.MaxLineWidth = 7.0;
691 ctx->Const.MaxLineWidthAA = 7.0;
692 ctx->Const.LineWidthGranularity = 0.5;
693 }
694
695 /* For non-antialiased lines, we have to round the line width to the
696 * nearest whole number. Make sure that we don't advertise a line
697 * width that, when rounded, will be beyond the actual hardware
698 * maximum.
699 */
700 assert(roundf(ctx->Const.MaxLineWidth) <= ctx->Const.MaxLineWidth);
701
702 ctx->Const.MinPointSize = 1.0;
703 ctx->Const.MinPointSizeAA = 1.0;
704 ctx->Const.MaxPointSize = 255.0;
705 ctx->Const.MaxPointSizeAA = 255.0;
706 ctx->Const.PointSizeGranularity = 1.0;
707
708 if (brw->gen >= 5 || brw->is_g4x)
709 ctx->Const.MaxClipPlanes = 8;
710
711 ctx->Const.GLSLTessLevelsAsInputs = true;
712 ctx->Const.LowerTCSPatchVerticesIn = brw->gen >= 8;
713 ctx->Const.LowerTESPatchVerticesIn = true;
714 ctx->Const.PrimitiveRestartForPatches = true;
715
716 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeInstructions = 16 * 1024;
717 ctx->Const.Program[MESA_SHADER_VERTEX].MaxAluInstructions = 0;
718 ctx->Const.Program[MESA_SHADER_VERTEX].MaxTexInstructions = 0;
719 ctx->Const.Program[MESA_SHADER_VERTEX].MaxTexIndirections = 0;
720 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeAluInstructions = 0;
721 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeTexInstructions = 0;
722 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeTexIndirections = 0;
723 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeAttribs = 16;
724 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeTemps = 256;
725 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeAddressRegs = 1;
726 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeParameters = 1024;
727 ctx->Const.Program[MESA_SHADER_VERTEX].MaxEnvParams =
728 MIN2(ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeParameters,
729 ctx->Const.Program[MESA_SHADER_VERTEX].MaxEnvParams);
730
731 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeInstructions = 1024;
732 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeAluInstructions = 1024;
733 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeTexInstructions = 1024;
734 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeTexIndirections = 1024;
735 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeAttribs = 12;
736 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeTemps = 256;
737 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeAddressRegs = 0;
738 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeParameters = 1024;
739 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxEnvParams =
740 MIN2(ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeParameters,
741 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxEnvParams);
742
743 /* Fragment shaders use real, 32-bit twos-complement integers for all
744 * integer types.
745 */
746 ctx->Const.Program[MESA_SHADER_FRAGMENT].LowInt.RangeMin = 31;
747 ctx->Const.Program[MESA_SHADER_FRAGMENT].LowInt.RangeMax = 30;
748 ctx->Const.Program[MESA_SHADER_FRAGMENT].LowInt.Precision = 0;
749 ctx->Const.Program[MESA_SHADER_FRAGMENT].HighInt = ctx->Const.Program[MESA_SHADER_FRAGMENT].LowInt;
750 ctx->Const.Program[MESA_SHADER_FRAGMENT].MediumInt = ctx->Const.Program[MESA_SHADER_FRAGMENT].LowInt;
751
752 ctx->Const.Program[MESA_SHADER_VERTEX].LowInt.RangeMin = 31;
753 ctx->Const.Program[MESA_SHADER_VERTEX].LowInt.RangeMax = 30;
754 ctx->Const.Program[MESA_SHADER_VERTEX].LowInt.Precision = 0;
755 ctx->Const.Program[MESA_SHADER_VERTEX].HighInt = ctx->Const.Program[MESA_SHADER_VERTEX].LowInt;
756 ctx->Const.Program[MESA_SHADER_VERTEX].MediumInt = ctx->Const.Program[MESA_SHADER_VERTEX].LowInt;
757
758 /* Gen6 converts quads to polygon in beginning of 3D pipeline,
759 * but we're not sure how it's actually done for vertex order,
760 * that affect provoking vertex decision. Always use last vertex
761 * convention for quad primitive which works as expected for now.
762 */
763 if (brw->gen >= 6)
764 ctx->Const.QuadsFollowProvokingVertexConvention = false;
765
766 ctx->Const.NativeIntegers = true;
767 ctx->Const.VertexID_is_zero_based = true;
768
769 /* Regarding the CMP instruction, the Ivybridge PRM says:
770 *
771 * "For each enabled channel 0b or 1b is assigned to the appropriate flag
772 * bit and 0/all zeros or all ones (e.g, byte 0xFF, word 0xFFFF, DWord
773 * 0xFFFFFFFF) is assigned to dst."
774 *
775 * but PRMs for earlier generations say
776 *
777 * "In dword format, one GRF may store up to 8 results. When the register
778 * is used later as a vector of Booleans, as only LSB at each channel
779 * contains meaning [sic] data, software should make sure all higher bits
780 * are masked out (e.g. by 'and-ing' an [sic] 0x01 constant)."
781 *
782 * We select the representation of a true boolean uniform to be ~0, and fix
783 * the results of Gen <= 5 CMP instruction's with -(result & 1).
784 */
785 ctx->Const.UniformBooleanTrue = ~0;
786
787 /* From the gen4 PRM, volume 4 page 127:
788 *
789 * "For SURFTYPE_BUFFER non-rendertarget surfaces, this field specifies
790 * the base address of the first element of the surface, computed in
791 * software by adding the surface base address to the byte offset of
792 * the element in the buffer."
793 *
794 * However, unaligned accesses are slower, so enforce buffer alignment.
795 */
796 ctx->Const.UniformBufferOffsetAlignment = 16;
797
798 /* ShaderStorageBufferOffsetAlignment should be a cacheline (64 bytes) so
799 * that we can safely have the CPU and GPU writing the same SSBO on
800 * non-cachecoherent systems (our Atom CPUs). With UBOs, the GPU never
801 * writes, so there's no problem. For an SSBO, the GPU and the CPU can
802 * be updating disjoint regions of the buffer simultaneously and that will
803 * break if the regions overlap the same cacheline.
804 */
805 ctx->Const.ShaderStorageBufferOffsetAlignment = 64;
806 ctx->Const.TextureBufferOffsetAlignment = 16;
807 ctx->Const.MaxTextureBufferSize = 128 * 1024 * 1024;
808
809 if (brw->gen >= 6) {
810 ctx->Const.MaxVarying = 32;
811 ctx->Const.Program[MESA_SHADER_VERTEX].MaxOutputComponents = 128;
812 ctx->Const.Program[MESA_SHADER_GEOMETRY].MaxInputComponents = 64;
813 ctx->Const.Program[MESA_SHADER_GEOMETRY].MaxOutputComponents = 128;
814 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxInputComponents = 128;
815 ctx->Const.Program[MESA_SHADER_TESS_CTRL].MaxInputComponents = 128;
816 ctx->Const.Program[MESA_SHADER_TESS_CTRL].MaxOutputComponents = 128;
817 ctx->Const.Program[MESA_SHADER_TESS_EVAL].MaxInputComponents = 128;
818 ctx->Const.Program[MESA_SHADER_TESS_EVAL].MaxOutputComponents = 128;
819 }
820
821 /* We want the GLSL compiler to emit code that uses condition codes */
822 for (int i = 0; i < MESA_SHADER_STAGES; i++) {
823 ctx->Const.ShaderCompilerOptions[i] =
824 brw->screen->compiler->glsl_compiler_options[i];
825 }
826
827 if (brw->gen >= 7) {
828 ctx->Const.MaxViewportWidth = 32768;
829 ctx->Const.MaxViewportHeight = 32768;
830 }
831
832 /* ARB_viewport_array, OES_viewport_array */
833 if (brw->gen >= 6) {
834 ctx->Const.MaxViewports = GEN6_NUM_VIEWPORTS;
835 ctx->Const.ViewportSubpixelBits = 0;
836
837 /* Cast to float before negating because MaxViewportWidth is unsigned.
838 */
839 ctx->Const.ViewportBounds.Min = -(float)ctx->Const.MaxViewportWidth;
840 ctx->Const.ViewportBounds.Max = ctx->Const.MaxViewportWidth;
841 }
842
843 /* ARB_gpu_shader5 */
844 if (brw->gen >= 7)
845 ctx->Const.MaxVertexStreams = MIN2(4, MAX_VERTEX_STREAMS);
846
847 /* ARB_framebuffer_no_attachments */
848 ctx->Const.MaxFramebufferWidth = 16384;
849 ctx->Const.MaxFramebufferHeight = 16384;
850 ctx->Const.MaxFramebufferLayers = ctx->Const.MaxArrayTextureLayers;
851 ctx->Const.MaxFramebufferSamples = max_samples;
852
853 /* OES_primitive_bounding_box */
854 ctx->Const.NoPrimitiveBoundingBoxOutput = true;
855 }
856
857 static void
858 brw_initialize_cs_context_constants(struct brw_context *brw)
859 {
860 struct gl_context *ctx = &brw->ctx;
861 const struct intel_screen *screen = brw->screen;
862 struct gen_device_info *devinfo = &brw->screen->devinfo;
863
864 /* FINISHME: Do this for all platforms that the kernel supports */
865 if (brw->is_cherryview &&
866 screen->subslice_total > 0 && screen->eu_total > 0) {
867 /* Logical CS threads = EUs per subslice * 7 threads per EU */
868 uint32_t max_cs_threads = screen->eu_total / screen->subslice_total * 7;
869
870 /* Fuse configurations may give more threads than expected, never less. */
871 if (max_cs_threads > devinfo->max_cs_threads)
872 devinfo->max_cs_threads = max_cs_threads;
873 }
874
875 /* Maximum number of scalar compute shader invocations that can be run in
876 * parallel in the same subslice assuming SIMD32 dispatch.
877 *
878 * We don't advertise more than 64 threads, because we are limited to 64 by
879 * our usage of thread_width_max in the gpgpu walker command. This only
880 * currently impacts Haswell, which otherwise might be able to advertise 70
881 * threads. With SIMD32 and 64 threads, Haswell still provides twice the
882 * required the number of invocation needed for ARB_compute_shader.
883 */
884 const unsigned max_threads = MIN2(64, devinfo->max_cs_threads);
885 const uint32_t max_invocations = 32 * max_threads;
886 ctx->Const.MaxComputeWorkGroupSize[0] = max_invocations;
887 ctx->Const.MaxComputeWorkGroupSize[1] = max_invocations;
888 ctx->Const.MaxComputeWorkGroupSize[2] = max_invocations;
889 ctx->Const.MaxComputeWorkGroupInvocations = max_invocations;
890 ctx->Const.MaxComputeSharedMemorySize = 64 * 1024;
891 }
892
893 /**
894 * Process driconf (drirc) options, setting appropriate context flags.
895 *
896 * intelInitExtensions still pokes at optionCache directly, in order to
897 * avoid advertising various extensions. No flags are set, so it makes
898 * sense to continue doing that there.
899 */
900 static void
901 brw_process_driconf_options(struct brw_context *brw)
902 {
903 struct gl_context *ctx = &brw->ctx;
904
905 driOptionCache *options = &brw->optionCache;
906 driParseConfigFiles(options, &brw->screen->optionCache,
907 brw->driContext->driScreenPriv->myNum, "i965");
908
909 int bo_reuse_mode = driQueryOptioni(options, "bo_reuse");
910 switch (bo_reuse_mode) {
911 case DRI_CONF_BO_REUSE_DISABLED:
912 break;
913 case DRI_CONF_BO_REUSE_ALL:
914 brw_bufmgr_enable_reuse(brw->bufmgr);
915 break;
916 }
917
918 if (INTEL_DEBUG & DEBUG_NO_HIZ) {
919 brw->has_hiz = false;
920 /* On gen6, you can only do separate stencil with HIZ. */
921 if (brw->gen == 6)
922 brw->has_separate_stencil = false;
923 }
924
925 if (driQueryOptionb(options, "always_flush_batch")) {
926 fprintf(stderr, "flushing batchbuffer before/after each draw call\n");
927 brw->always_flush_batch = true;
928 }
929
930 if (driQueryOptionb(options, "always_flush_cache")) {
931 fprintf(stderr, "flushing GPU caches before/after each draw call\n");
932 brw->always_flush_cache = true;
933 }
934
935 if (driQueryOptionb(options, "disable_throttling")) {
936 fprintf(stderr, "disabling flush throttling\n");
937 brw->disable_throttling = true;
938 }
939
940 brw->precompile = driQueryOptionb(&brw->optionCache, "shader_precompile");
941
942 if (driQueryOptionb(&brw->optionCache, "precise_trig"))
943 brw->screen->compiler->precise_trig = true;
944
945 ctx->Const.ForceGLSLExtensionsWarn =
946 driQueryOptionb(options, "force_glsl_extensions_warn");
947
948 ctx->Const.ForceGLSLVersion =
949 driQueryOptioni(options, "force_glsl_version");
950
951 ctx->Const.DisableGLSLLineContinuations =
952 driQueryOptionb(options, "disable_glsl_line_continuations");
953
954 ctx->Const.AllowGLSLExtensionDirectiveMidShader =
955 driQueryOptionb(options, "allow_glsl_extension_directive_midshader");
956
957 ctx->Const.AllowGLSLBuiltinVariableRedeclaration =
958 driQueryOptionb(options, "allow_glsl_builtin_variable_redeclaration");
959
960 ctx->Const.AllowHigherCompatVersion =
961 driQueryOptionb(options, "allow_higher_compat_version");
962
963 ctx->Const.ForceGLSLAbsSqrt =
964 driQueryOptionb(options, "force_glsl_abs_sqrt");
965
966 ctx->Const.GLSLZeroInit = driQueryOptionb(options, "glsl_zero_init");
967
968 brw->dual_color_blend_by_location =
969 driQueryOptionb(options, "dual_color_blend_by_location");
970 }
971
972 GLboolean
973 brwCreateContext(gl_api api,
974 const struct gl_config *mesaVis,
975 __DRIcontext *driContextPriv,
976 unsigned major_version,
977 unsigned minor_version,
978 uint32_t flags,
979 bool notify_reset,
980 unsigned *dri_ctx_error,
981 void *sharedContextPrivate)
982 {
983 struct gl_context *shareCtx = (struct gl_context *) sharedContextPrivate;
984 struct intel_screen *screen = driContextPriv->driScreenPriv->driverPrivate;
985 const struct gen_device_info *devinfo = &screen->devinfo;
986 struct dd_function_table functions;
987
988 /* Only allow the __DRI_CTX_FLAG_ROBUST_BUFFER_ACCESS flag if the kernel
989 * provides us with context reset notifications.
990 */
991 uint32_t allowed_flags = __DRI_CTX_FLAG_DEBUG
992 | __DRI_CTX_FLAG_FORWARD_COMPATIBLE;
993
994 if (screen->has_context_reset_notification)
995 allowed_flags |= __DRI_CTX_FLAG_ROBUST_BUFFER_ACCESS;
996
997 if (flags & ~allowed_flags) {
998 *dri_ctx_error = __DRI_CTX_ERROR_UNKNOWN_FLAG;
999 return false;
1000 }
1001
1002 struct brw_context *brw = rzalloc(NULL, struct brw_context);
1003 if (!brw) {
1004 fprintf(stderr, "%s: failed to alloc context\n", __func__);
1005 *dri_ctx_error = __DRI_CTX_ERROR_NO_MEMORY;
1006 return false;
1007 }
1008
1009 driContextPriv->driverPrivate = brw;
1010 brw->driContext = driContextPriv;
1011 brw->screen = screen;
1012 brw->bufmgr = screen->bufmgr;
1013
1014 brw->gen = devinfo->gen;
1015 brw->gt = devinfo->gt;
1016 brw->is_g4x = devinfo->is_g4x;
1017 brw->is_baytrail = devinfo->is_baytrail;
1018 brw->is_haswell = devinfo->is_haswell;
1019 brw->is_cherryview = devinfo->is_cherryview;
1020 brw->is_broxton = devinfo->is_broxton;
1021 brw->has_llc = devinfo->has_llc;
1022 brw->has_hiz = devinfo->has_hiz_and_separate_stencil;
1023 brw->has_separate_stencil = devinfo->has_hiz_and_separate_stencil;
1024 brw->has_pln = devinfo->has_pln;
1025 brw->has_compr4 = devinfo->has_compr4;
1026 brw->has_surface_tile_offset = devinfo->has_surface_tile_offset;
1027 brw->has_negative_rhw_bug = devinfo->has_negative_rhw_bug;
1028 brw->needs_unlit_centroid_workaround =
1029 devinfo->needs_unlit_centroid_workaround;
1030
1031 brw->must_use_separate_stencil = devinfo->must_use_separate_stencil;
1032 brw->has_swizzling = screen->hw_has_swizzling;
1033
1034 isl_device_init(&brw->isl_dev, devinfo, screen->hw_has_swizzling);
1035
1036 brw->vs.base.stage = MESA_SHADER_VERTEX;
1037 brw->tcs.base.stage = MESA_SHADER_TESS_CTRL;
1038 brw->tes.base.stage = MESA_SHADER_TESS_EVAL;
1039 brw->gs.base.stage = MESA_SHADER_GEOMETRY;
1040 brw->wm.base.stage = MESA_SHADER_FRAGMENT;
1041 if (brw->gen >= 8) {
1042 gen8_init_vtable_surface_functions(brw);
1043 brw->vtbl.emit_depth_stencil_hiz = gen8_emit_depth_stencil_hiz;
1044 } else if (brw->gen >= 7) {
1045 gen7_init_vtable_surface_functions(brw);
1046 brw->vtbl.emit_depth_stencil_hiz = gen7_emit_depth_stencil_hiz;
1047 } else if (brw->gen >= 6) {
1048 gen6_init_vtable_surface_functions(brw);
1049 brw->vtbl.emit_depth_stencil_hiz = gen6_emit_depth_stencil_hiz;
1050 } else {
1051 gen4_init_vtable_surface_functions(brw);
1052 brw->vtbl.emit_depth_stencil_hiz = brw_emit_depth_stencil_hiz;
1053 }
1054
1055 brw_init_driver_functions(brw, &functions);
1056
1057 if (notify_reset)
1058 functions.GetGraphicsResetStatus = brw_get_graphics_reset_status;
1059
1060 struct gl_context *ctx = &brw->ctx;
1061
1062 if (!_mesa_initialize_context(ctx, api, mesaVis, shareCtx, &functions)) {
1063 *dri_ctx_error = __DRI_CTX_ERROR_NO_MEMORY;
1064 fprintf(stderr, "%s: failed to init mesa context\n", __func__);
1065 intelDestroyContext(driContextPriv);
1066 return false;
1067 }
1068
1069 driContextSetFlags(ctx, flags);
1070
1071 /* Initialize the software rasterizer and helper modules.
1072 *
1073 * As of GL 3.1 core, the gen4+ driver doesn't need the swrast context for
1074 * software fallbacks (which we have to support on legacy GL to do weird
1075 * glDrawPixels(), glBitmap(), and other functions).
1076 */
1077 if (api != API_OPENGL_CORE && api != API_OPENGLES2) {
1078 _swrast_CreateContext(ctx);
1079 }
1080
1081 _vbo_CreateContext(ctx);
1082 if (ctx->swrast_context) {
1083 _tnl_CreateContext(ctx);
1084 TNL_CONTEXT(ctx)->Driver.RunPipeline = _tnl_run_pipeline;
1085 _swsetup_CreateContext(ctx);
1086
1087 /* Configure swrast to match hardware characteristics: */
1088 _swrast_allow_pixel_fog(ctx, false);
1089 _swrast_allow_vertex_fog(ctx, true);
1090 }
1091
1092 _mesa_meta_init(ctx);
1093
1094 brw_process_driconf_options(brw);
1095
1096 if (INTEL_DEBUG & DEBUG_PERF)
1097 brw->perf_debug = true;
1098
1099 brw_initialize_cs_context_constants(brw);
1100 brw_initialize_context_constants(brw);
1101
1102 ctx->Const.ResetStrategy = notify_reset
1103 ? GL_LOSE_CONTEXT_ON_RESET_ARB : GL_NO_RESET_NOTIFICATION_ARB;
1104
1105 /* Reinitialize the context point state. It depends on ctx->Const values. */
1106 _mesa_init_point(ctx);
1107
1108 intel_fbo_init(brw);
1109
1110 intel_batchbuffer_init(&brw->batch, brw->bufmgr, brw->has_llc);
1111
1112 if (brw->gen >= 6) {
1113 /* Create a new hardware context. Using a hardware context means that
1114 * our GPU state will be saved/restored on context switch, allowing us
1115 * to assume that the GPU is in the same state we left it in.
1116 *
1117 * This is required for transform feedback buffer offsets, query objects,
1118 * and also allows us to reduce how much state we have to emit.
1119 */
1120 brw->hw_ctx = brw_create_hw_context(brw->bufmgr);
1121
1122 if (!brw->hw_ctx) {
1123 fprintf(stderr, "Failed to create hardware context.\n");
1124 intelDestroyContext(driContextPriv);
1125 return false;
1126 }
1127 }
1128
1129 if (brw_init_pipe_control(brw, devinfo)) {
1130 *dri_ctx_error = __DRI_CTX_ERROR_NO_MEMORY;
1131 intelDestroyContext(driContextPriv);
1132 return false;
1133 }
1134
1135 brw_init_state(brw);
1136
1137 intelInitExtensions(ctx);
1138
1139 brw_init_surface_formats(brw);
1140
1141 brw_blorp_init(brw);
1142
1143 brw->urb.size = devinfo->urb.size;
1144
1145 if (brw->gen == 6)
1146 brw->urb.gs_present = false;
1147
1148 brw->prim_restart.in_progress = false;
1149 brw->prim_restart.enable_cut_index = false;
1150 brw->gs.enabled = false;
1151 brw->clip.viewport_count = 1;
1152
1153 brw->predicate.state = BRW_PREDICATE_STATE_RENDER;
1154
1155 brw->max_gtt_map_object_size = screen->max_gtt_map_object_size;
1156
1157 ctx->VertexProgram._MaintainTnlProgram = true;
1158 ctx->FragmentProgram._MaintainTexEnvProgram = true;
1159
1160 brw_draw_init( brw );
1161
1162 if ((flags & __DRI_CTX_FLAG_DEBUG) != 0) {
1163 /* Turn on some extra GL_ARB_debug_output generation. */
1164 brw->perf_debug = true;
1165 }
1166
1167 if ((flags & __DRI_CTX_FLAG_ROBUST_BUFFER_ACCESS) != 0) {
1168 ctx->Const.ContextFlags |= GL_CONTEXT_FLAG_ROBUST_ACCESS_BIT_ARB;
1169 ctx->Const.RobustAccess = GL_TRUE;
1170 }
1171
1172 if (INTEL_DEBUG & DEBUG_SHADER_TIME)
1173 brw_init_shader_time(brw);
1174
1175 _mesa_compute_version(ctx);
1176
1177 _mesa_initialize_dispatch_tables(ctx);
1178 _mesa_initialize_vbo_vtxfmt(ctx);
1179
1180 if (ctx->Extensions.INTEL_performance_query)
1181 brw_init_performance_queries(brw);
1182
1183 vbo_use_buffer_objects(ctx);
1184 vbo_always_unmap_buffers(ctx);
1185
1186 return true;
1187 }
1188
1189 void
1190 intelDestroyContext(__DRIcontext * driContextPriv)
1191 {
1192 struct brw_context *brw =
1193 (struct brw_context *) driContextPriv->driverPrivate;
1194 struct gl_context *ctx = &brw->ctx;
1195
1196 _mesa_meta_free(&brw->ctx);
1197
1198 if (INTEL_DEBUG & DEBUG_SHADER_TIME) {
1199 /* Force a report. */
1200 brw->shader_time.report_time = 0;
1201
1202 brw_collect_and_report_shader_time(brw);
1203 brw_destroy_shader_time(brw);
1204 }
1205
1206 if (brw->gen >= 6)
1207 blorp_finish(&brw->blorp);
1208
1209 brw_destroy_state(brw);
1210 brw_draw_destroy(brw);
1211
1212 brw_bo_unreference(brw->curbe.curbe_bo);
1213 if (brw->vs.base.scratch_bo)
1214 brw_bo_unreference(brw->vs.base.scratch_bo);
1215 if (brw->tcs.base.scratch_bo)
1216 brw_bo_unreference(brw->tcs.base.scratch_bo);
1217 if (brw->tes.base.scratch_bo)
1218 brw_bo_unreference(brw->tes.base.scratch_bo);
1219 if (brw->gs.base.scratch_bo)
1220 brw_bo_unreference(brw->gs.base.scratch_bo);
1221 if (brw->wm.base.scratch_bo)
1222 brw_bo_unreference(brw->wm.base.scratch_bo);
1223
1224 brw_destroy_hw_context(brw->bufmgr, brw->hw_ctx);
1225
1226 if (ctx->swrast_context) {
1227 _swsetup_DestroyContext(&brw->ctx);
1228 _tnl_DestroyContext(&brw->ctx);
1229 }
1230 _vbo_DestroyContext(&brw->ctx);
1231
1232 if (ctx->swrast_context)
1233 _swrast_DestroyContext(&brw->ctx);
1234
1235 brw_fini_pipe_control(brw);
1236 intel_batchbuffer_free(&brw->batch);
1237
1238 brw_bo_unreference(brw->throttle_batch[1]);
1239 brw_bo_unreference(brw->throttle_batch[0]);
1240 brw->throttle_batch[1] = NULL;
1241 brw->throttle_batch[0] = NULL;
1242
1243 driDestroyOptionCache(&brw->optionCache);
1244
1245 /* free the Mesa context */
1246 _mesa_free_context_data(&brw->ctx);
1247
1248 ralloc_free(brw);
1249 driContextPriv->driverPrivate = NULL;
1250 }
1251
1252 GLboolean
1253 intelUnbindContext(__DRIcontext * driContextPriv)
1254 {
1255 /* Unset current context and dispath table */
1256 _mesa_make_current(NULL, NULL, NULL);
1257
1258 return true;
1259 }
1260
1261 /**
1262 * Fixes up the context for GLES23 with our default-to-sRGB-capable behavior
1263 * on window system framebuffers.
1264 *
1265 * Desktop GL is fairly reasonable in its handling of sRGB: You can ask if
1266 * your renderbuffer can do sRGB encode, and you can flip a switch that does
1267 * sRGB encode if the renderbuffer can handle it. You can ask specifically
1268 * for a visual where you're guaranteed to be capable, but it turns out that
1269 * everyone just makes all their ARGB8888 visuals capable and doesn't offer
1270 * incapable ones, because there's no difference between the two in resources
1271 * used. Applications thus get built that accidentally rely on the default
1272 * visual choice being sRGB, so we make ours sRGB capable. Everything sounds
1273 * great...
1274 *
1275 * But for GLES2/3, they decided that it was silly to not turn on sRGB encode
1276 * for sRGB renderbuffers you made with the GL_EXT_texture_sRGB equivalent.
1277 * So they removed the enable knob and made it "if the renderbuffer is sRGB
1278 * capable, do sRGB encode". Then, for your window system renderbuffers, you
1279 * can ask for sRGB visuals and get sRGB encode, or not ask for sRGB visuals
1280 * and get no sRGB encode (assuming that both kinds of visual are available).
1281 * Thus our choice to support sRGB by default on our visuals for desktop would
1282 * result in broken rendering of GLES apps that aren't expecting sRGB encode.
1283 *
1284 * Unfortunately, renderbuffer setup happens before a context is created. So
1285 * in intel_screen.c we always set up sRGB, and here, if you're a GLES2/3
1286 * context (without an sRGB visual, though we don't have sRGB visuals exposed
1287 * yet), we go turn that back off before anyone finds out.
1288 */
1289 static void
1290 intel_gles3_srgb_workaround(struct brw_context *brw,
1291 struct gl_framebuffer *fb)
1292 {
1293 struct gl_context *ctx = &brw->ctx;
1294
1295 if (_mesa_is_desktop_gl(ctx) || !fb->Visual.sRGBCapable)
1296 return;
1297
1298 /* Some day when we support the sRGB capable bit on visuals available for
1299 * GLES, we'll need to respect that and not disable things here.
1300 */
1301 fb->Visual.sRGBCapable = false;
1302 for (int i = 0; i < BUFFER_COUNT; i++) {
1303 struct gl_renderbuffer *rb = fb->Attachment[i].Renderbuffer;
1304 if (rb)
1305 rb->Format = _mesa_get_srgb_format_linear(rb->Format);
1306 }
1307 }
1308
1309 GLboolean
1310 intelMakeCurrent(__DRIcontext * driContextPriv,
1311 __DRIdrawable * driDrawPriv,
1312 __DRIdrawable * driReadPriv)
1313 {
1314 struct brw_context *brw;
1315 GET_CURRENT_CONTEXT(curCtx);
1316
1317 if (driContextPriv)
1318 brw = (struct brw_context *) driContextPriv->driverPrivate;
1319 else
1320 brw = NULL;
1321
1322 /* According to the glXMakeCurrent() man page: "Pending commands to
1323 * the previous context, if any, are flushed before it is released."
1324 * But only flush if we're actually changing contexts.
1325 */
1326 if (brw_context(curCtx) && brw_context(curCtx) != brw) {
1327 _mesa_flush(curCtx);
1328 }
1329
1330 if (driContextPriv) {
1331 struct gl_context *ctx = &brw->ctx;
1332 struct gl_framebuffer *fb, *readFb;
1333
1334 if (driDrawPriv == NULL) {
1335 fb = _mesa_get_incomplete_framebuffer();
1336 } else {
1337 fb = driDrawPriv->driverPrivate;
1338 driContextPriv->dri2.draw_stamp = driDrawPriv->dri2.stamp - 1;
1339 }
1340
1341 if (driReadPriv == NULL) {
1342 readFb = _mesa_get_incomplete_framebuffer();
1343 } else {
1344 readFb = driReadPriv->driverPrivate;
1345 driContextPriv->dri2.read_stamp = driReadPriv->dri2.stamp - 1;
1346 }
1347
1348 /* The sRGB workaround changes the renderbuffer's format. We must change
1349 * the format before the renderbuffer's miptree get's allocated, otherwise
1350 * the formats of the renderbuffer and its miptree will differ.
1351 */
1352 intel_gles3_srgb_workaround(brw, fb);
1353 intel_gles3_srgb_workaround(brw, readFb);
1354
1355 /* If the context viewport hasn't been initialized, force a call out to
1356 * the loader to get buffers so we have a drawable size for the initial
1357 * viewport. */
1358 if (!brw->ctx.ViewportInitialized)
1359 intel_prepare_render(brw);
1360
1361 _mesa_make_current(ctx, fb, readFb);
1362 } else {
1363 _mesa_make_current(NULL, NULL, NULL);
1364 }
1365
1366 return true;
1367 }
1368
1369 void
1370 intel_resolve_for_dri2_flush(struct brw_context *brw,
1371 __DRIdrawable *drawable)
1372 {
1373 if (brw->gen < 6) {
1374 /* MSAA and fast color clear are not supported, so don't waste time
1375 * checking whether a resolve is needed.
1376 */
1377 return;
1378 }
1379
1380 struct gl_framebuffer *fb = drawable->driverPrivate;
1381 struct intel_renderbuffer *rb;
1382
1383 /* Usually, only the back buffer will need to be downsampled. However,
1384 * the front buffer will also need it if the user has rendered into it.
1385 */
1386 static const gl_buffer_index buffers[2] = {
1387 BUFFER_BACK_LEFT,
1388 BUFFER_FRONT_LEFT,
1389 };
1390
1391 for (int i = 0; i < 2; ++i) {
1392 rb = intel_get_renderbuffer(fb, buffers[i]);
1393 if (rb == NULL || rb->mt == NULL)
1394 continue;
1395 if (rb->mt->num_samples <= 1) {
1396 assert(rb->mt_layer == 0 && rb->mt_level == 0 &&
1397 rb->layer_count == 1);
1398 intel_miptree_prepare_access(brw, rb->mt, 0, 1, 0, 1, false, false);
1399 } else {
1400 intel_renderbuffer_downsample(brw, rb);
1401 }
1402 }
1403 }
1404
1405 static unsigned
1406 intel_bits_per_pixel(const struct intel_renderbuffer *rb)
1407 {
1408 return _mesa_get_format_bytes(intel_rb_format(rb)) * 8;
1409 }
1410
1411 static void
1412 intel_query_dri2_buffers(struct brw_context *brw,
1413 __DRIdrawable *drawable,
1414 __DRIbuffer **buffers,
1415 int *count);
1416
1417 static void
1418 intel_process_dri2_buffer(struct brw_context *brw,
1419 __DRIdrawable *drawable,
1420 __DRIbuffer *buffer,
1421 struct intel_renderbuffer *rb,
1422 const char *buffer_name);
1423
1424 static void
1425 intel_update_image_buffers(struct brw_context *brw, __DRIdrawable *drawable);
1426
1427 static void
1428 intel_update_dri2_buffers(struct brw_context *brw, __DRIdrawable *drawable)
1429 {
1430 struct gl_framebuffer *fb = drawable->driverPrivate;
1431 struct intel_renderbuffer *rb;
1432 __DRIbuffer *buffers = NULL;
1433 int count;
1434 const char *region_name;
1435
1436 /* Set this up front, so that in case our buffers get invalidated
1437 * while we're getting new buffers, we don't clobber the stamp and
1438 * thus ignore the invalidate. */
1439 drawable->lastStamp = drawable->dri2.stamp;
1440
1441 if (unlikely(INTEL_DEBUG & DEBUG_DRI))
1442 fprintf(stderr, "enter %s, drawable %p\n", __func__, drawable);
1443
1444 intel_query_dri2_buffers(brw, drawable, &buffers, &count);
1445
1446 if (buffers == NULL)
1447 return;
1448
1449 for (int i = 0; i < count; i++) {
1450 switch (buffers[i].attachment) {
1451 case __DRI_BUFFER_FRONT_LEFT:
1452 rb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT);
1453 region_name = "dri2 front buffer";
1454 break;
1455
1456 case __DRI_BUFFER_FAKE_FRONT_LEFT:
1457 rb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT);
1458 region_name = "dri2 fake front buffer";
1459 break;
1460
1461 case __DRI_BUFFER_BACK_LEFT:
1462 rb = intel_get_renderbuffer(fb, BUFFER_BACK_LEFT);
1463 region_name = "dri2 back buffer";
1464 break;
1465
1466 case __DRI_BUFFER_DEPTH:
1467 case __DRI_BUFFER_HIZ:
1468 case __DRI_BUFFER_DEPTH_STENCIL:
1469 case __DRI_BUFFER_STENCIL:
1470 case __DRI_BUFFER_ACCUM:
1471 default:
1472 fprintf(stderr,
1473 "unhandled buffer attach event, attachment type %d\n",
1474 buffers[i].attachment);
1475 return;
1476 }
1477
1478 intel_process_dri2_buffer(brw, drawable, &buffers[i], rb, region_name);
1479 }
1480
1481 }
1482
1483 void
1484 intel_update_renderbuffers(__DRIcontext *context, __DRIdrawable *drawable)
1485 {
1486 struct brw_context *brw = context->driverPrivate;
1487 __DRIscreen *dri_screen = brw->screen->driScrnPriv;
1488
1489 /* Set this up front, so that in case our buffers get invalidated
1490 * while we're getting new buffers, we don't clobber the stamp and
1491 * thus ignore the invalidate. */
1492 drawable->lastStamp = drawable->dri2.stamp;
1493
1494 if (unlikely(INTEL_DEBUG & DEBUG_DRI))
1495 fprintf(stderr, "enter %s, drawable %p\n", __func__, drawable);
1496
1497 if (dri_screen->image.loader)
1498 intel_update_image_buffers(brw, drawable);
1499 else
1500 intel_update_dri2_buffers(brw, drawable);
1501
1502 driUpdateFramebufferSize(&brw->ctx, drawable);
1503 }
1504
1505 /**
1506 * intel_prepare_render should be called anywhere that curent read/drawbuffer
1507 * state is required.
1508 */
1509 void
1510 intel_prepare_render(struct brw_context *brw)
1511 {
1512 struct gl_context *ctx = &brw->ctx;
1513 __DRIcontext *driContext = brw->driContext;
1514 __DRIdrawable *drawable;
1515
1516 drawable = driContext->driDrawablePriv;
1517 if (drawable && drawable->dri2.stamp != driContext->dri2.draw_stamp) {
1518 if (drawable->lastStamp != drawable->dri2.stamp)
1519 intel_update_renderbuffers(driContext, drawable);
1520 driContext->dri2.draw_stamp = drawable->dri2.stamp;
1521 }
1522
1523 drawable = driContext->driReadablePriv;
1524 if (drawable && drawable->dri2.stamp != driContext->dri2.read_stamp) {
1525 if (drawable->lastStamp != drawable->dri2.stamp)
1526 intel_update_renderbuffers(driContext, drawable);
1527 driContext->dri2.read_stamp = drawable->dri2.stamp;
1528 }
1529
1530 /* If we're currently rendering to the front buffer, the rendering
1531 * that will happen next will probably dirty the front buffer. So
1532 * mark it as dirty here.
1533 */
1534 if (_mesa_is_front_buffer_drawing(ctx->DrawBuffer))
1535 brw->front_buffer_dirty = true;
1536 }
1537
1538 /**
1539 * \brief Query DRI2 to obtain a DRIdrawable's buffers.
1540 *
1541 * To determine which DRI buffers to request, examine the renderbuffers
1542 * attached to the drawable's framebuffer. Then request the buffers with
1543 * DRI2GetBuffers() or DRI2GetBuffersWithFormat().
1544 *
1545 * This is called from intel_update_renderbuffers().
1546 *
1547 * \param drawable Drawable whose buffers are queried.
1548 * \param buffers [out] List of buffers returned by DRI2 query.
1549 * \param buffer_count [out] Number of buffers returned.
1550 *
1551 * \see intel_update_renderbuffers()
1552 * \see DRI2GetBuffers()
1553 * \see DRI2GetBuffersWithFormat()
1554 */
1555 static void
1556 intel_query_dri2_buffers(struct brw_context *brw,
1557 __DRIdrawable *drawable,
1558 __DRIbuffer **buffers,
1559 int *buffer_count)
1560 {
1561 __DRIscreen *dri_screen = brw->screen->driScrnPriv;
1562 struct gl_framebuffer *fb = drawable->driverPrivate;
1563 int i = 0;
1564 unsigned attachments[8];
1565
1566 struct intel_renderbuffer *front_rb;
1567 struct intel_renderbuffer *back_rb;
1568
1569 front_rb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT);
1570 back_rb = intel_get_renderbuffer(fb, BUFFER_BACK_LEFT);
1571
1572 memset(attachments, 0, sizeof(attachments));
1573 if ((_mesa_is_front_buffer_drawing(fb) ||
1574 _mesa_is_front_buffer_reading(fb) ||
1575 !back_rb) && front_rb) {
1576 /* If a fake front buffer is in use, then querying for
1577 * __DRI_BUFFER_FRONT_LEFT will cause the server to copy the image from
1578 * the real front buffer to the fake front buffer. So before doing the
1579 * query, we need to make sure all the pending drawing has landed in the
1580 * real front buffer.
1581 */
1582 intel_batchbuffer_flush(brw);
1583 intel_flush_front(&brw->ctx);
1584
1585 attachments[i++] = __DRI_BUFFER_FRONT_LEFT;
1586 attachments[i++] = intel_bits_per_pixel(front_rb);
1587 } else if (front_rb && brw->front_buffer_dirty) {
1588 /* We have pending front buffer rendering, but we aren't querying for a
1589 * front buffer. If the front buffer we have is a fake front buffer,
1590 * the X server is going to throw it away when it processes the query.
1591 * So before doing the query, make sure all the pending drawing has
1592 * landed in the real front buffer.
1593 */
1594 intel_batchbuffer_flush(brw);
1595 intel_flush_front(&brw->ctx);
1596 }
1597
1598 if (back_rb) {
1599 attachments[i++] = __DRI_BUFFER_BACK_LEFT;
1600 attachments[i++] = intel_bits_per_pixel(back_rb);
1601 }
1602
1603 assert(i <= ARRAY_SIZE(attachments));
1604
1605 *buffers =
1606 dri_screen->dri2.loader->getBuffersWithFormat(drawable,
1607 &drawable->w,
1608 &drawable->h,
1609 attachments, i / 2,
1610 buffer_count,
1611 drawable->loaderPrivate);
1612 }
1613
1614 /**
1615 * \brief Assign a DRI buffer's DRM region to a renderbuffer.
1616 *
1617 * This is called from intel_update_renderbuffers().
1618 *
1619 * \par Note:
1620 * DRI buffers whose attachment point is DRI2BufferStencil or
1621 * DRI2BufferDepthStencil are handled as special cases.
1622 *
1623 * \param buffer_name is a human readable name, such as "dri2 front buffer",
1624 * that is passed to brw_bo_gem_create_from_name().
1625 *
1626 * \see intel_update_renderbuffers()
1627 */
1628 static void
1629 intel_process_dri2_buffer(struct brw_context *brw,
1630 __DRIdrawable *drawable,
1631 __DRIbuffer *buffer,
1632 struct intel_renderbuffer *rb,
1633 const char *buffer_name)
1634 {
1635 struct gl_framebuffer *fb = drawable->driverPrivate;
1636 struct brw_bo *bo;
1637
1638 if (!rb)
1639 return;
1640
1641 unsigned num_samples = rb->Base.Base.NumSamples;
1642
1643 /* We try to avoid closing and reopening the same BO name, because the first
1644 * use of a mapping of the buffer involves a bunch of page faulting which is
1645 * moderately expensive.
1646 */
1647 struct intel_mipmap_tree *last_mt;
1648 if (num_samples == 0)
1649 last_mt = rb->mt;
1650 else
1651 last_mt = rb->singlesample_mt;
1652
1653 uint32_t old_name = 0;
1654 if (last_mt) {
1655 /* The bo already has a name because the miptree was created by a
1656 * previous call to intel_process_dri2_buffer(). If a bo already has a
1657 * name, then brw_bo_flink() is a low-cost getter. It does not
1658 * create a new name.
1659 */
1660 brw_bo_flink(last_mt->bo, &old_name);
1661 }
1662
1663 if (old_name == buffer->name)
1664 return;
1665
1666 if (unlikely(INTEL_DEBUG & DEBUG_DRI)) {
1667 fprintf(stderr,
1668 "attaching buffer %d, at %d, cpp %d, pitch %d\n",
1669 buffer->name, buffer->attachment,
1670 buffer->cpp, buffer->pitch);
1671 }
1672
1673 bo = brw_bo_gem_create_from_name(brw->bufmgr, buffer_name,
1674 buffer->name);
1675 if (!bo) {
1676 fprintf(stderr,
1677 "Failed to open BO for returned DRI2 buffer "
1678 "(%dx%d, %s, named %d).\n"
1679 "This is likely a bug in the X Server that will lead to a "
1680 "crash soon.\n",
1681 drawable->w, drawable->h, buffer_name, buffer->name);
1682 return;
1683 }
1684
1685 intel_update_winsys_renderbuffer_miptree(brw, rb, bo,
1686 drawable->w, drawable->h,
1687 buffer->pitch);
1688
1689 if (_mesa_is_front_buffer_drawing(fb) &&
1690 (buffer->attachment == __DRI_BUFFER_FRONT_LEFT ||
1691 buffer->attachment == __DRI_BUFFER_FAKE_FRONT_LEFT) &&
1692 rb->Base.Base.NumSamples > 1) {
1693 intel_renderbuffer_upsample(brw, rb);
1694 }
1695
1696 assert(rb->mt);
1697
1698 brw_bo_unreference(bo);
1699 }
1700
1701 /**
1702 * \brief Query DRI image loader to obtain a DRIdrawable's buffers.
1703 *
1704 * To determine which DRI buffers to request, examine the renderbuffers
1705 * attached to the drawable's framebuffer. Then request the buffers from
1706 * the image loader
1707 *
1708 * This is called from intel_update_renderbuffers().
1709 *
1710 * \param drawable Drawable whose buffers are queried.
1711 * \param buffers [out] List of buffers returned by DRI2 query.
1712 * \param buffer_count [out] Number of buffers returned.
1713 *
1714 * \see intel_update_renderbuffers()
1715 */
1716
1717 static void
1718 intel_update_image_buffer(struct brw_context *intel,
1719 __DRIdrawable *drawable,
1720 struct intel_renderbuffer *rb,
1721 __DRIimage *buffer,
1722 enum __DRIimageBufferMask buffer_type)
1723 {
1724 struct gl_framebuffer *fb = drawable->driverPrivate;
1725
1726 if (!rb || !buffer->bo)
1727 return;
1728
1729 unsigned num_samples = rb->Base.Base.NumSamples;
1730
1731 /* Check and see if we're already bound to the right
1732 * buffer object
1733 */
1734 struct intel_mipmap_tree *last_mt;
1735 if (num_samples == 0)
1736 last_mt = rb->mt;
1737 else
1738 last_mt = rb->singlesample_mt;
1739
1740 if (last_mt && last_mt->bo == buffer->bo)
1741 return;
1742
1743 intel_update_winsys_renderbuffer_miptree(intel, rb, buffer->bo,
1744 buffer->width, buffer->height,
1745 buffer->pitch);
1746
1747 if (_mesa_is_front_buffer_drawing(fb) &&
1748 buffer_type == __DRI_IMAGE_BUFFER_FRONT &&
1749 rb->Base.Base.NumSamples > 1) {
1750 intel_renderbuffer_upsample(intel, rb);
1751 }
1752 }
1753
1754 static void
1755 intel_update_image_buffers(struct brw_context *brw, __DRIdrawable *drawable)
1756 {
1757 struct gl_framebuffer *fb = drawable->driverPrivate;
1758 __DRIscreen *dri_screen = brw->screen->driScrnPriv;
1759 struct intel_renderbuffer *front_rb;
1760 struct intel_renderbuffer *back_rb;
1761 struct __DRIimageList images;
1762 mesa_format format;
1763 uint32_t buffer_mask = 0;
1764 int ret;
1765
1766 front_rb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT);
1767 back_rb = intel_get_renderbuffer(fb, BUFFER_BACK_LEFT);
1768
1769 if (back_rb)
1770 format = intel_rb_format(back_rb);
1771 else if (front_rb)
1772 format = intel_rb_format(front_rb);
1773 else
1774 return;
1775
1776 if (front_rb && (_mesa_is_front_buffer_drawing(fb) ||
1777 _mesa_is_front_buffer_reading(fb) || !back_rb)) {
1778 buffer_mask |= __DRI_IMAGE_BUFFER_FRONT;
1779 }
1780
1781 if (back_rb)
1782 buffer_mask |= __DRI_IMAGE_BUFFER_BACK;
1783
1784 ret = dri_screen->image.loader->getBuffers(drawable,
1785 driGLFormatToImageFormat(format),
1786 &drawable->dri2.stamp,
1787 drawable->loaderPrivate,
1788 buffer_mask,
1789 &images);
1790 if (!ret)
1791 return;
1792
1793 if (images.image_mask & __DRI_IMAGE_BUFFER_FRONT) {
1794 drawable->w = images.front->width;
1795 drawable->h = images.front->height;
1796 intel_update_image_buffer(brw,
1797 drawable,
1798 front_rb,
1799 images.front,
1800 __DRI_IMAGE_BUFFER_FRONT);
1801 }
1802
1803 if (images.image_mask & __DRI_IMAGE_BUFFER_BACK) {
1804 drawable->w = images.back->width;
1805 drawable->h = images.back->height;
1806 intel_update_image_buffer(brw,
1807 drawable,
1808 back_rb,
1809 images.back,
1810 __DRI_IMAGE_BUFFER_BACK);
1811 }
1812 }