i965: Update 3DSTATE_{DEPTH,STENCIL,...}_BUFFER and such for Broadwell.
[mesa.git] / src / mesa / drivers / dri / i965 / brw_context.c
1 /*
2 Copyright 2003 VMware, Inc.
3 Copyright (C) Intel Corp. 2006. All Rights Reserved.
4 Intel funded Tungsten Graphics to
5 develop this 3D driver.
6
7 Permission is hereby granted, free of charge, to any person obtaining
8 a copy of this software and associated documentation files (the
9 "Software"), to deal in the Software without restriction, including
10 without limitation the rights to use, copy, modify, merge, publish,
11 distribute, sublicense, and/or sell copies of the Software, and to
12 permit persons to whom the Software is furnished to do so, subject to
13 the following conditions:
14
15 The above copyright notice and this permission notice (including the
16 next paragraph) shall be included in all copies or substantial
17 portions of the Software.
18
19 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
20 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
22 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
23 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
24 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
25 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26
27 **********************************************************************/
28 /*
29 * Authors:
30 * Keith Whitwell <keithw@vmware.com>
31 */
32
33
34 #include "main/api_exec.h"
35 #include "main/context.h"
36 #include "main/fbobject.h"
37 #include "main/imports.h"
38 #include "main/macros.h"
39 #include "main/points.h"
40 #include "main/version.h"
41 #include "main/vtxfmt.h"
42
43 #include "vbo/vbo_context.h"
44
45 #include "drivers/common/driverfuncs.h"
46 #include "drivers/common/meta.h"
47 #include "utils.h"
48
49 #include "brw_context.h"
50 #include "brw_defines.h"
51 #include "brw_draw.h"
52 #include "brw_state.h"
53
54 #include "intel_batchbuffer.h"
55 #include "intel_buffer_objects.h"
56 #include "intel_buffers.h"
57 #include "intel_fbo.h"
58 #include "intel_mipmap_tree.h"
59 #include "intel_pixel.h"
60 #include "intel_regions.h"
61 #include "intel_tex.h"
62 #include "intel_tex_obj.h"
63
64 #include "swrast_setup/swrast_setup.h"
65 #include "tnl/tnl.h"
66 #include "tnl/t_pipeline.h"
67 #include "glsl/ralloc.h"
68
69 /***************************************
70 * Mesa's Driver Functions
71 ***************************************/
72
73 static size_t
74 brw_query_samples_for_format(struct gl_context *ctx, GLenum target,
75 GLenum internalFormat, int samples[16])
76 {
77 struct brw_context *brw = brw_context(ctx);
78
79 (void) target;
80
81 switch (brw->gen) {
82 case 7:
83 samples[0] = 8;
84 samples[1] = 4;
85 return 2;
86
87 case 6:
88 samples[0] = 4;
89 return 1;
90
91 default:
92 samples[0] = 1;
93 return 1;
94 }
95 }
96
97 const char *const brw_vendor_string = "Intel Open Source Technology Center";
98
99 const char *
100 brw_get_renderer_string(unsigned deviceID)
101 {
102 const char *chipset;
103 static char buffer[128];
104
105 switch (deviceID) {
106 #undef CHIPSET
107 #define CHIPSET(id, symbol, str) case id: chipset = str; break;
108 #include "pci_ids/i965_pci_ids.h"
109 default:
110 chipset = "Unknown Intel Chipset";
111 break;
112 }
113
114 (void) driGetRendererString(buffer, chipset, 0);
115 return buffer;
116 }
117
118 static const GLubyte *
119 intelGetString(struct gl_context * ctx, GLenum name)
120 {
121 const struct brw_context *const brw = brw_context(ctx);
122
123 switch (name) {
124 case GL_VENDOR:
125 return (GLubyte *) brw_vendor_string;
126
127 case GL_RENDERER:
128 return
129 (GLubyte *) brw_get_renderer_string(brw->intelScreen->deviceID);
130
131 default:
132 return NULL;
133 }
134 }
135
136 static void
137 intel_viewport(struct gl_context *ctx)
138 {
139 struct brw_context *brw = brw_context(ctx);
140 __DRIcontext *driContext = brw->driContext;
141
142 if (_mesa_is_winsys_fbo(ctx->DrawBuffer)) {
143 dri2InvalidateDrawable(driContext->driDrawablePriv);
144 dri2InvalidateDrawable(driContext->driReadablePriv);
145 }
146 }
147
148 static void
149 intelInvalidateState(struct gl_context * ctx, GLuint new_state)
150 {
151 struct brw_context *brw = brw_context(ctx);
152
153 if (ctx->swrast_context)
154 _swrast_InvalidateState(ctx, new_state);
155 _vbo_InvalidateState(ctx, new_state);
156
157 brw->NewGLState |= new_state;
158 }
159
160 #define flushFront(screen) ((screen)->image.loader ? (screen)->image.loader->flushFrontBuffer : (screen)->dri2.loader->flushFrontBuffer)
161
162 static void
163 intel_flush_front(struct gl_context *ctx)
164 {
165 struct brw_context *brw = brw_context(ctx);
166 __DRIcontext *driContext = brw->driContext;
167 __DRIdrawable *driDrawable = driContext->driDrawablePriv;
168 __DRIscreen *const screen = brw->intelScreen->driScrnPriv;
169
170 if (brw->front_buffer_dirty && _mesa_is_winsys_fbo(ctx->DrawBuffer)) {
171 if (flushFront(screen) && driDrawable &&
172 driDrawable->loaderPrivate) {
173
174 /* Resolve before flushing FAKE_FRONT_LEFT to FRONT_LEFT.
175 *
176 * This potentially resolves both front and back buffer. It
177 * is unnecessary to resolve the back, but harms nothing except
178 * performance. And no one cares about front-buffer render
179 * performance.
180 */
181 intel_resolve_for_dri2_flush(brw, driDrawable);
182 intel_batchbuffer_flush(brw);
183
184 flushFront(screen)(driDrawable, driDrawable->loaderPrivate);
185
186 /* We set the dirty bit in intel_prepare_render() if we're
187 * front buffer rendering once we get there.
188 */
189 brw->front_buffer_dirty = false;
190 }
191 }
192 }
193
194 static void
195 intel_glFlush(struct gl_context *ctx)
196 {
197 struct brw_context *brw = brw_context(ctx);
198
199 intel_batchbuffer_flush(brw);
200 intel_flush_front(ctx);
201 if (brw->is_front_buffer_rendering)
202 brw->need_throttle = true;
203 }
204
205 void
206 intelFinish(struct gl_context * ctx)
207 {
208 struct brw_context *brw = brw_context(ctx);
209
210 intel_glFlush(ctx);
211
212 if (brw->batch.last_bo)
213 drm_intel_bo_wait_rendering(brw->batch.last_bo);
214 }
215
216 static void
217 brw_init_driver_functions(struct brw_context *brw,
218 struct dd_function_table *functions)
219 {
220 _mesa_init_driver_functions(functions);
221
222 /* GLX uses DRI2 invalidate events to handle window resizing.
223 * Unfortunately, EGL does not - libEGL is written in XCB (not Xlib),
224 * which doesn't provide a mechanism for snooping the event queues.
225 *
226 * So EGL still relies on viewport hacks to handle window resizing.
227 * This should go away with DRI3000.
228 */
229 if (!brw->driContext->driScreenPriv->dri2.useInvalidate)
230 functions->Viewport = intel_viewport;
231
232 functions->Flush = intel_glFlush;
233 functions->Finish = intelFinish;
234 functions->GetString = intelGetString;
235 functions->UpdateState = intelInvalidateState;
236
237 intelInitTextureFuncs(functions);
238 intelInitTextureImageFuncs(functions);
239 intelInitTextureSubImageFuncs(functions);
240 intelInitTextureCopyImageFuncs(functions);
241 intelInitClearFuncs(functions);
242 intelInitBufferFuncs(functions);
243 intelInitPixelFuncs(functions);
244 intelInitBufferObjectFuncs(functions);
245 intel_init_syncobj_functions(functions);
246 brw_init_object_purgeable_functions(functions);
247
248 brwInitFragProgFuncs( functions );
249 brw_init_common_queryobj_functions(functions);
250 if (brw->gen >= 6)
251 gen6_init_queryobj_functions(functions);
252 else
253 gen4_init_queryobj_functions(functions);
254
255 functions->QuerySamplesForFormat = brw_query_samples_for_format;
256
257 functions->NewTransformFeedback = brw_new_transform_feedback;
258 functions->DeleteTransformFeedback = brw_delete_transform_feedback;
259 functions->GetTransformFeedbackVertexCount =
260 brw_get_transform_feedback_vertex_count;
261 if (brw->gen >= 7) {
262 functions->BeginTransformFeedback = gen7_begin_transform_feedback;
263 functions->EndTransformFeedback = gen7_end_transform_feedback;
264 functions->PauseTransformFeedback = gen7_pause_transform_feedback;
265 functions->ResumeTransformFeedback = gen7_resume_transform_feedback;
266 } else {
267 functions->BeginTransformFeedback = brw_begin_transform_feedback;
268 functions->EndTransformFeedback = brw_end_transform_feedback;
269 }
270
271 if (brw->gen >= 6)
272 functions->GetSamplePosition = gen6_get_sample_position;
273 }
274
275 static void
276 brw_initialize_context_constants(struct brw_context *brw)
277 {
278 struct gl_context *ctx = &brw->ctx;
279
280 unsigned max_samplers =
281 brw->gen >= 8 || brw->is_haswell ? BRW_MAX_TEX_UNIT : 16;
282
283 ctx->Const.QueryCounterBits.Timestamp = 36;
284
285 ctx->Const.StripTextureBorder = true;
286
287 ctx->Const.MaxDualSourceDrawBuffers = 1;
288 ctx->Const.MaxDrawBuffers = BRW_MAX_DRAW_BUFFERS;
289 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxTextureImageUnits = max_samplers;
290 ctx->Const.MaxTextureCoordUnits = 8; /* Mesa limit */
291 ctx->Const.MaxTextureUnits =
292 MIN2(ctx->Const.MaxTextureCoordUnits,
293 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxTextureImageUnits);
294 ctx->Const.Program[MESA_SHADER_VERTEX].MaxTextureImageUnits = max_samplers;
295 if (brw->gen >= 7)
296 ctx->Const.Program[MESA_SHADER_GEOMETRY].MaxTextureImageUnits = max_samplers;
297 else
298 ctx->Const.Program[MESA_SHADER_GEOMETRY].MaxTextureImageUnits = 0;
299 ctx->Const.MaxCombinedTextureImageUnits =
300 ctx->Const.Program[MESA_SHADER_VERTEX].MaxTextureImageUnits +
301 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxTextureImageUnits +
302 ctx->Const.Program[MESA_SHADER_GEOMETRY].MaxTextureImageUnits;
303
304 ctx->Const.MaxTextureLevels = 14; /* 8192 */
305 if (ctx->Const.MaxTextureLevels > MAX_TEXTURE_LEVELS)
306 ctx->Const.MaxTextureLevels = MAX_TEXTURE_LEVELS;
307 ctx->Const.Max3DTextureLevels = 9;
308 ctx->Const.MaxCubeTextureLevels = 12;
309
310 if (brw->gen >= 7)
311 ctx->Const.MaxArrayTextureLayers = 2048;
312 else
313 ctx->Const.MaxArrayTextureLayers = 512;
314
315 ctx->Const.MaxTextureRectSize = 1 << 12;
316
317 ctx->Const.MaxTextureMaxAnisotropy = 16.0;
318
319 ctx->Const.MaxRenderbufferSize = 8192;
320
321 /* Hardware only supports a limited number of transform feedback buffers.
322 * So we need to override the Mesa default (which is based only on software
323 * limits).
324 */
325 ctx->Const.MaxTransformFeedbackBuffers = BRW_MAX_SOL_BUFFERS;
326
327 /* On Gen6, in the worst case, we use up one binding table entry per
328 * transform feedback component (see comments above the definition of
329 * BRW_MAX_SOL_BINDINGS, in brw_context.h), so we need to advertise a value
330 * for MAX_TRANSFORM_FEEDBACK_INTERLEAVED_COMPONENTS equal to
331 * BRW_MAX_SOL_BINDINGS.
332 *
333 * In "separate components" mode, we need to divide this value by
334 * BRW_MAX_SOL_BUFFERS, so that the total number of binding table entries
335 * used up by all buffers will not exceed BRW_MAX_SOL_BINDINGS.
336 */
337 ctx->Const.MaxTransformFeedbackInterleavedComponents = BRW_MAX_SOL_BINDINGS;
338 ctx->Const.MaxTransformFeedbackSeparateComponents =
339 BRW_MAX_SOL_BINDINGS / BRW_MAX_SOL_BUFFERS;
340
341 ctx->Const.AlwaysUseGetTransformFeedbackVertexCount = true;
342
343 int max_samples;
344 const int *msaa_modes = intel_supported_msaa_modes(brw->intelScreen);
345 const int clamp_max_samples =
346 driQueryOptioni(&brw->optionCache, "clamp_max_samples");
347
348 if (clamp_max_samples < 0) {
349 max_samples = msaa_modes[0];
350 } else {
351 /* Select the largest supported MSAA mode that does not exceed
352 * clamp_max_samples.
353 */
354 max_samples = 0;
355 for (int i = 0; msaa_modes[i] != 0; ++i) {
356 if (msaa_modes[i] <= clamp_max_samples) {
357 max_samples = msaa_modes[i];
358 break;
359 }
360 }
361 }
362
363 ctx->Const.MaxSamples = max_samples;
364 ctx->Const.MaxColorTextureSamples = max_samples;
365 ctx->Const.MaxDepthTextureSamples = max_samples;
366 ctx->Const.MaxIntegerSamples = max_samples;
367
368 if (brw->gen >= 7)
369 ctx->Const.MaxProgramTextureGatherComponents = 4;
370
371 ctx->Const.MinLineWidth = 1.0;
372 ctx->Const.MinLineWidthAA = 1.0;
373 ctx->Const.MaxLineWidth = 5.0;
374 ctx->Const.MaxLineWidthAA = 5.0;
375 ctx->Const.LineWidthGranularity = 0.5;
376
377 ctx->Const.MinPointSize = 1.0;
378 ctx->Const.MinPointSizeAA = 1.0;
379 ctx->Const.MaxPointSize = 255.0;
380 ctx->Const.MaxPointSizeAA = 255.0;
381 ctx->Const.PointSizeGranularity = 1.0;
382
383 if (brw->gen >= 5 || brw->is_g4x)
384 ctx->Const.MaxClipPlanes = 8;
385
386 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeInstructions = 16 * 1024;
387 ctx->Const.Program[MESA_SHADER_VERTEX].MaxAluInstructions = 0;
388 ctx->Const.Program[MESA_SHADER_VERTEX].MaxTexInstructions = 0;
389 ctx->Const.Program[MESA_SHADER_VERTEX].MaxTexIndirections = 0;
390 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeAluInstructions = 0;
391 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeTexInstructions = 0;
392 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeTexIndirections = 0;
393 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeAttribs = 16;
394 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeTemps = 256;
395 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeAddressRegs = 1;
396 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeParameters = 1024;
397 ctx->Const.Program[MESA_SHADER_VERTEX].MaxEnvParams =
398 MIN2(ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeParameters,
399 ctx->Const.Program[MESA_SHADER_VERTEX].MaxEnvParams);
400
401 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeInstructions = 1024;
402 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeAluInstructions = 1024;
403 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeTexInstructions = 1024;
404 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeTexIndirections = 1024;
405 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeAttribs = 12;
406 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeTemps = 256;
407 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeAddressRegs = 0;
408 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeParameters = 1024;
409 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxEnvParams =
410 MIN2(ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeParameters,
411 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxEnvParams);
412
413 /* Fragment shaders use real, 32-bit twos-complement integers for all
414 * integer types.
415 */
416 ctx->Const.Program[MESA_SHADER_FRAGMENT].LowInt.RangeMin = 31;
417 ctx->Const.Program[MESA_SHADER_FRAGMENT].LowInt.RangeMax = 30;
418 ctx->Const.Program[MESA_SHADER_FRAGMENT].LowInt.Precision = 0;
419 ctx->Const.Program[MESA_SHADER_FRAGMENT].HighInt = ctx->Const.Program[MESA_SHADER_FRAGMENT].LowInt;
420 ctx->Const.Program[MESA_SHADER_FRAGMENT].MediumInt = ctx->Const.Program[MESA_SHADER_FRAGMENT].LowInt;
421
422 if (brw->gen >= 7) {
423 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxAtomicCounters = MAX_ATOMIC_COUNTERS;
424 ctx->Const.Program[MESA_SHADER_VERTEX].MaxAtomicCounters = MAX_ATOMIC_COUNTERS;
425 ctx->Const.Program[MESA_SHADER_GEOMETRY].MaxAtomicCounters = MAX_ATOMIC_COUNTERS;
426 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxAtomicBuffers = BRW_MAX_ABO;
427 ctx->Const.Program[MESA_SHADER_VERTEX].MaxAtomicBuffers = BRW_MAX_ABO;
428 ctx->Const.Program[MESA_SHADER_GEOMETRY].MaxAtomicBuffers = BRW_MAX_ABO;
429 ctx->Const.MaxCombinedAtomicBuffers = 3 * BRW_MAX_ABO;
430 }
431
432 /* Gen6 converts quads to polygon in beginning of 3D pipeline,
433 * but we're not sure how it's actually done for vertex order,
434 * that affect provoking vertex decision. Always use last vertex
435 * convention for quad primitive which works as expected for now.
436 */
437 if (brw->gen >= 6)
438 ctx->Const.QuadsFollowProvokingVertexConvention = false;
439
440 ctx->Const.NativeIntegers = true;
441 ctx->Const.UniformBooleanTrue = 1;
442
443 /* From the gen4 PRM, volume 4 page 127:
444 *
445 * "For SURFTYPE_BUFFER non-rendertarget surfaces, this field specifies
446 * the base address of the first element of the surface, computed in
447 * software by adding the surface base address to the byte offset of
448 * the element in the buffer."
449 *
450 * However, unaligned accesses are slower, so enforce buffer alignment.
451 */
452 ctx->Const.UniformBufferOffsetAlignment = 16;
453 ctx->Const.TextureBufferOffsetAlignment = 16;
454
455 if (brw->gen >= 6) {
456 ctx->Const.MaxVarying = 32;
457 ctx->Const.Program[MESA_SHADER_VERTEX].MaxOutputComponents = 128;
458 ctx->Const.Program[MESA_SHADER_GEOMETRY].MaxInputComponents = 64;
459 ctx->Const.Program[MESA_SHADER_GEOMETRY].MaxOutputComponents = 128;
460 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxInputComponents = 128;
461 }
462
463 /* We want the GLSL compiler to emit code that uses condition codes */
464 for (int i = 0; i < MESA_SHADER_STAGES; i++) {
465 ctx->ShaderCompilerOptions[i].MaxIfDepth = brw->gen < 6 ? 16 : UINT_MAX;
466 ctx->ShaderCompilerOptions[i].EmitCondCodes = true;
467 ctx->ShaderCompilerOptions[i].EmitNoNoise = true;
468 ctx->ShaderCompilerOptions[i].EmitNoMainReturn = true;
469 ctx->ShaderCompilerOptions[i].EmitNoIndirectInput = true;
470 ctx->ShaderCompilerOptions[i].EmitNoIndirectOutput = true;
471
472 ctx->ShaderCompilerOptions[i].EmitNoIndirectUniform =
473 (i == MESA_SHADER_FRAGMENT);
474 ctx->ShaderCompilerOptions[i].EmitNoIndirectTemp =
475 (i == MESA_SHADER_FRAGMENT);
476 ctx->ShaderCompilerOptions[i].LowerClipDistance = true;
477 }
478
479 ctx->ShaderCompilerOptions[MESA_SHADER_VERTEX].OptimizeForAOS = true;
480 ctx->ShaderCompilerOptions[MESA_SHADER_GEOMETRY].OptimizeForAOS = true;
481
482 /* ARB_viewport_array */
483 if (brw->gen >= 7 && ctx->API == API_OPENGL_CORE) {
484 ctx->Const.MaxViewports = GEN7_NUM_VIEWPORTS;
485 ctx->Const.ViewportSubpixelBits = 0;
486
487 /* Cast to float before negating becuase MaxViewportWidth is unsigned.
488 */
489 ctx->Const.ViewportBounds.Min = -(float)ctx->Const.MaxViewportWidth;
490 ctx->Const.ViewportBounds.Max = ctx->Const.MaxViewportWidth;
491 }
492 }
493
494 /**
495 * Process driconf (drirc) options, setting appropriate context flags.
496 *
497 * intelInitExtensions still pokes at optionCache directly, in order to
498 * avoid advertising various extensions. No flags are set, so it makes
499 * sense to continue doing that there.
500 */
501 static void
502 brw_process_driconf_options(struct brw_context *brw)
503 {
504 struct gl_context *ctx = &brw->ctx;
505
506 driOptionCache *options = &brw->optionCache;
507 driParseConfigFiles(options, &brw->intelScreen->optionCache,
508 brw->driContext->driScreenPriv->myNum, "i965");
509
510 int bo_reuse_mode = driQueryOptioni(options, "bo_reuse");
511 switch (bo_reuse_mode) {
512 case DRI_CONF_BO_REUSE_DISABLED:
513 break;
514 case DRI_CONF_BO_REUSE_ALL:
515 intel_bufmgr_gem_enable_reuse(brw->bufmgr);
516 break;
517 }
518
519 if (!driQueryOptionb(options, "hiz")) {
520 brw->has_hiz = false;
521 /* On gen6, you can only do separate stencil with HIZ. */
522 if (brw->gen == 6)
523 brw->has_separate_stencil = false;
524 }
525
526 if (driQueryOptionb(options, "always_flush_batch")) {
527 fprintf(stderr, "flushing batchbuffer before/after each draw call\n");
528 brw->always_flush_batch = true;
529 }
530
531 if (driQueryOptionb(options, "always_flush_cache")) {
532 fprintf(stderr, "flushing GPU caches before/after each draw call\n");
533 brw->always_flush_cache = true;
534 }
535
536 if (driQueryOptionb(options, "disable_throttling")) {
537 fprintf(stderr, "disabling flush throttling\n");
538 brw->disable_throttling = true;
539 }
540
541 brw->disable_derivative_optimization =
542 driQueryOptionb(&brw->optionCache, "disable_derivative_optimization");
543
544 brw->precompile = driQueryOptionb(&brw->optionCache, "shader_precompile");
545
546 ctx->Const.ForceGLSLExtensionsWarn =
547 driQueryOptionb(options, "force_glsl_extensions_warn");
548
549 ctx->Const.DisableGLSLLineContinuations =
550 driQueryOptionb(options, "disable_glsl_line_continuations");
551 }
552
553 GLboolean
554 brwCreateContext(gl_api api,
555 const struct gl_config *mesaVis,
556 __DRIcontext *driContextPriv,
557 unsigned major_version,
558 unsigned minor_version,
559 uint32_t flags,
560 bool notify_reset,
561 unsigned *dri_ctx_error,
562 void *sharedContextPrivate)
563 {
564 __DRIscreen *sPriv = driContextPriv->driScreenPriv;
565 struct gl_context *shareCtx = (struct gl_context *) sharedContextPrivate;
566 struct intel_screen *screen = sPriv->driverPrivate;
567 const struct brw_device_info *devinfo = screen->devinfo;
568 struct dd_function_table functions;
569 struct gl_config visual;
570
571 /* Only allow the __DRI_CTX_FLAG_ROBUST_BUFFER_ACCESS flag if the kernel
572 * provides us with context reset notifications.
573 */
574 uint32_t allowed_flags = __DRI_CTX_FLAG_DEBUG
575 | __DRI_CTX_FLAG_FORWARD_COMPATIBLE;
576
577 if (screen->has_context_reset_notification)
578 allowed_flags |= __DRI_CTX_FLAG_ROBUST_BUFFER_ACCESS;
579
580 if (flags & ~allowed_flags) {
581 *dri_ctx_error = __DRI_CTX_ERROR_UNKNOWN_FLAG;
582 return false;
583 }
584
585 struct brw_context *brw = rzalloc(NULL, struct brw_context);
586 if (!brw) {
587 printf("%s: failed to alloc context\n", __FUNCTION__);
588 *dri_ctx_error = __DRI_CTX_ERROR_NO_MEMORY;
589 return false;
590 }
591
592 driContextPriv->driverPrivate = brw;
593 brw->driContext = driContextPriv;
594 brw->intelScreen = screen;
595 brw->bufmgr = screen->bufmgr;
596
597 brw->gen = devinfo->gen;
598 brw->gt = devinfo->gt;
599 brw->is_g4x = devinfo->is_g4x;
600 brw->is_baytrail = devinfo->is_baytrail;
601 brw->is_haswell = devinfo->is_haswell;
602 brw->has_llc = devinfo->has_llc;
603 brw->has_hiz = devinfo->has_hiz_and_separate_stencil && brw->gen < 8;
604 brw->has_separate_stencil = devinfo->has_hiz_and_separate_stencil;
605 brw->has_pln = devinfo->has_pln;
606 brw->has_compr4 = devinfo->has_compr4;
607 brw->has_surface_tile_offset = devinfo->has_surface_tile_offset;
608 brw->has_negative_rhw_bug = devinfo->has_negative_rhw_bug;
609 brw->needs_unlit_centroid_workaround =
610 devinfo->needs_unlit_centroid_workaround;
611
612 brw->must_use_separate_stencil = screen->hw_must_use_separate_stencil;
613 brw->has_swizzling = screen->hw_has_swizzling;
614
615 if (brw->gen >= 8) {
616 gen8_init_vtable_surface_functions(brw);
617 gen7_init_vtable_sampler_functions(brw);
618 brw->vtbl.emit_depth_stencil_hiz = gen8_emit_depth_stencil_hiz;
619 } else if (brw->gen >= 7) {
620 gen7_init_vtable_surface_functions(brw);
621 gen7_init_vtable_sampler_functions(brw);
622 brw->vtbl.emit_depth_stencil_hiz = gen7_emit_depth_stencil_hiz;
623 } else {
624 gen4_init_vtable_surface_functions(brw);
625 gen4_init_vtable_sampler_functions(brw);
626 brw->vtbl.emit_depth_stencil_hiz = brw_emit_depth_stencil_hiz;
627 }
628
629 brw_init_driver_functions(brw, &functions);
630
631 if (notify_reset)
632 functions.GetGraphicsResetStatus = brw_get_graphics_reset_status;
633
634 struct gl_context *ctx = &brw->ctx;
635
636 if (mesaVis == NULL) {
637 memset(&visual, 0, sizeof visual);
638 mesaVis = &visual;
639 }
640
641 if (!_mesa_initialize_context(ctx, api, mesaVis, shareCtx, &functions)) {
642 *dri_ctx_error = __DRI_CTX_ERROR_NO_MEMORY;
643 printf("%s: failed to init mesa context\n", __FUNCTION__);
644 intelDestroyContext(driContextPriv);
645 return false;
646 }
647
648 driContextSetFlags(ctx, flags);
649
650 /* Initialize the software rasterizer and helper modules.
651 *
652 * As of GL 3.1 core, the gen4+ driver doesn't need the swrast context for
653 * software fallbacks (which we have to support on legacy GL to do weird
654 * glDrawPixels(), glBitmap(), and other functions).
655 */
656 if (api != API_OPENGL_CORE && api != API_OPENGLES2) {
657 _swrast_CreateContext(ctx);
658 }
659
660 _vbo_CreateContext(ctx);
661 if (ctx->swrast_context) {
662 _tnl_CreateContext(ctx);
663 TNL_CONTEXT(ctx)->Driver.RunPipeline = _tnl_run_pipeline;
664 _swsetup_CreateContext(ctx);
665
666 /* Configure swrast to match hardware characteristics: */
667 _swrast_allow_pixel_fog(ctx, false);
668 _swrast_allow_vertex_fog(ctx, true);
669 }
670
671 _mesa_meta_init(ctx);
672
673 brw_process_driconf_options(brw);
674 brw_process_intel_debug_variable(brw);
675 brw_initialize_context_constants(brw);
676
677 ctx->Const.ResetStrategy = notify_reset
678 ? GL_LOSE_CONTEXT_ON_RESET_ARB : GL_NO_RESET_NOTIFICATION_ARB;
679
680 /* Reinitialize the context point state. It depends on ctx->Const values. */
681 _mesa_init_point(ctx);
682
683 intel_batchbuffer_init(brw);
684
685 brw_init_state(brw);
686
687 intelInitExtensions(ctx);
688
689 intel_fbo_init(brw);
690
691 if (brw->gen >= 6) {
692 /* Create a new hardware context. Using a hardware context means that
693 * our GPU state will be saved/restored on context switch, allowing us
694 * to assume that the GPU is in the same state we left it in.
695 *
696 * This is required for transform feedback buffer offsets, query objects,
697 * and also allows us to reduce how much state we have to emit.
698 */
699 brw->hw_ctx = drm_intel_gem_context_create(brw->bufmgr);
700
701 if (!brw->hw_ctx) {
702 fprintf(stderr, "Gen6+ requires Kernel 3.6 or later.\n");
703 intelDestroyContext(driContextPriv);
704 return false;
705 }
706 }
707
708 brw_init_surface_formats(brw);
709
710 if (brw->is_g4x || brw->gen >= 5) {
711 brw->CMD_VF_STATISTICS = GM45_3DSTATE_VF_STATISTICS;
712 brw->CMD_PIPELINE_SELECT = CMD_PIPELINE_SELECT_GM45;
713 } else {
714 brw->CMD_VF_STATISTICS = GEN4_3DSTATE_VF_STATISTICS;
715 brw->CMD_PIPELINE_SELECT = CMD_PIPELINE_SELECT_965;
716 }
717
718 brw->max_vs_threads = devinfo->max_vs_threads;
719 brw->max_gs_threads = devinfo->max_gs_threads;
720 brw->max_wm_threads = devinfo->max_wm_threads;
721 brw->urb.size = devinfo->urb.size;
722 brw->urb.min_vs_entries = devinfo->urb.min_vs_entries;
723 brw->urb.max_vs_entries = devinfo->urb.max_vs_entries;
724 brw->urb.max_gs_entries = devinfo->urb.max_gs_entries;
725
726 /* Estimate the size of the mappable aperture into the GTT. There's an
727 * ioctl to get the whole GTT size, but not one to get the mappable subset.
728 * It turns out it's basically always 256MB, though some ancient hardware
729 * was smaller.
730 */
731 uint32_t gtt_size = 256 * 1024 * 1024;
732
733 /* We don't want to map two objects such that a memcpy between them would
734 * just fault one mapping in and then the other over and over forever. So
735 * we would need to divide the GTT size by 2. Additionally, some GTT is
736 * taken up by things like the framebuffer and the ringbuffer and such, so
737 * be more conservative.
738 */
739 brw->max_gtt_map_object_size = gtt_size / 4;
740
741 if (brw->gen == 6)
742 brw->urb.gen6_gs_previously_active = false;
743
744 brw->prim_restart.in_progress = false;
745 brw->prim_restart.enable_cut_index = false;
746 brw->gs.enabled = false;
747
748 if (brw->gen < 6) {
749 brw->curbe.last_buf = calloc(1, 4096);
750 brw->curbe.next_buf = calloc(1, 4096);
751 }
752
753 ctx->VertexProgram._MaintainTnlProgram = true;
754 ctx->FragmentProgram._MaintainTexEnvProgram = true;
755
756 brw_draw_init( brw );
757
758 if ((flags & __DRI_CTX_FLAG_DEBUG) != 0) {
759 /* Turn on some extra GL_ARB_debug_output generation. */
760 brw->perf_debug = true;
761 }
762
763 if ((flags & __DRI_CTX_FLAG_ROBUST_BUFFER_ACCESS) != 0)
764 ctx->Const.ContextFlags |= GL_CONTEXT_FLAG_ROBUST_ACCESS_BIT_ARB;
765
766 brw_fs_alloc_reg_sets(brw);
767 brw_vec4_alloc_reg_set(brw);
768
769 if (INTEL_DEBUG & DEBUG_SHADER_TIME)
770 brw_init_shader_time(brw);
771
772 _mesa_compute_version(ctx);
773
774 _mesa_initialize_dispatch_tables(ctx);
775 _mesa_initialize_vbo_vtxfmt(ctx);
776
777 if (ctx->Extensions.AMD_performance_monitor) {
778 brw_init_performance_monitors(brw);
779 }
780
781 return true;
782 }
783
784 void
785 intelDestroyContext(__DRIcontext * driContextPriv)
786 {
787 struct brw_context *brw =
788 (struct brw_context *) driContextPriv->driverPrivate;
789 struct gl_context *ctx = &brw->ctx;
790
791 assert(brw); /* should never be null */
792 if (!brw)
793 return;
794
795 /* Dump a final BMP in case the application doesn't call SwapBuffers */
796 if (INTEL_DEBUG & DEBUG_AUB) {
797 intel_batchbuffer_flush(brw);
798 aub_dump_bmp(&brw->ctx);
799 }
800
801 _mesa_meta_free(&brw->ctx);
802
803 if (INTEL_DEBUG & DEBUG_SHADER_TIME) {
804 /* Force a report. */
805 brw->shader_time.report_time = 0;
806
807 brw_collect_and_report_shader_time(brw);
808 brw_destroy_shader_time(brw);
809 }
810
811 brw_destroy_state(brw);
812 brw_draw_destroy(brw);
813
814 drm_intel_bo_unreference(brw->curbe.curbe_bo);
815 drm_intel_bo_unreference(brw->vs.base.const_bo);
816 drm_intel_bo_unreference(brw->wm.base.const_bo);
817
818 free(brw->curbe.last_buf);
819 free(brw->curbe.next_buf);
820
821 drm_intel_gem_context_destroy(brw->hw_ctx);
822
823 if (ctx->swrast_context) {
824 _swsetup_DestroyContext(&brw->ctx);
825 _tnl_DestroyContext(&brw->ctx);
826 }
827 _vbo_DestroyContext(&brw->ctx);
828
829 if (ctx->swrast_context)
830 _swrast_DestroyContext(&brw->ctx);
831
832 intel_batchbuffer_free(brw);
833
834 drm_intel_bo_unreference(brw->first_post_swapbuffers_batch);
835 brw->first_post_swapbuffers_batch = NULL;
836
837 driDestroyOptionCache(&brw->optionCache);
838
839 /* free the Mesa context */
840 _mesa_free_context_data(&brw->ctx);
841
842 ralloc_free(brw);
843 driContextPriv->driverPrivate = NULL;
844 }
845
846 GLboolean
847 intelUnbindContext(__DRIcontext * driContextPriv)
848 {
849 /* Unset current context and dispath table */
850 _mesa_make_current(NULL, NULL, NULL);
851
852 return true;
853 }
854
855 /**
856 * Fixes up the context for GLES23 with our default-to-sRGB-capable behavior
857 * on window system framebuffers.
858 *
859 * Desktop GL is fairly reasonable in its handling of sRGB: You can ask if
860 * your renderbuffer can do sRGB encode, and you can flip a switch that does
861 * sRGB encode if the renderbuffer can handle it. You can ask specifically
862 * for a visual where you're guaranteed to be capable, but it turns out that
863 * everyone just makes all their ARGB8888 visuals capable and doesn't offer
864 * incapable ones, becuase there's no difference between the two in resources
865 * used. Applications thus get built that accidentally rely on the default
866 * visual choice being sRGB, so we make ours sRGB capable. Everything sounds
867 * great...
868 *
869 * But for GLES2/3, they decided that it was silly to not turn on sRGB encode
870 * for sRGB renderbuffers you made with the GL_EXT_texture_sRGB equivalent.
871 * So they removed the enable knob and made it "if the renderbuffer is sRGB
872 * capable, do sRGB encode". Then, for your window system renderbuffers, you
873 * can ask for sRGB visuals and get sRGB encode, or not ask for sRGB visuals
874 * and get no sRGB encode (assuming that both kinds of visual are available).
875 * Thus our choice to support sRGB by default on our visuals for desktop would
876 * result in broken rendering of GLES apps that aren't expecting sRGB encode.
877 *
878 * Unfortunately, renderbuffer setup happens before a context is created. So
879 * in intel_screen.c we always set up sRGB, and here, if you're a GLES2/3
880 * context (without an sRGB visual, though we don't have sRGB visuals exposed
881 * yet), we go turn that back off before anyone finds out.
882 */
883 static void
884 intel_gles3_srgb_workaround(struct brw_context *brw,
885 struct gl_framebuffer *fb)
886 {
887 struct gl_context *ctx = &brw->ctx;
888
889 if (_mesa_is_desktop_gl(ctx) || !fb->Visual.sRGBCapable)
890 return;
891
892 /* Some day when we support the sRGB capable bit on visuals available for
893 * GLES, we'll need to respect that and not disable things here.
894 */
895 fb->Visual.sRGBCapable = false;
896 for (int i = 0; i < BUFFER_COUNT; i++) {
897 if (fb->Attachment[i].Renderbuffer &&
898 fb->Attachment[i].Renderbuffer->Format == MESA_FORMAT_B8G8R8A8_SRGB) {
899 fb->Attachment[i].Renderbuffer->Format = MESA_FORMAT_B8G8R8A8_UNORM;
900 }
901 }
902 }
903
904 GLboolean
905 intelMakeCurrent(__DRIcontext * driContextPriv,
906 __DRIdrawable * driDrawPriv,
907 __DRIdrawable * driReadPriv)
908 {
909 struct brw_context *brw;
910 GET_CURRENT_CONTEXT(curCtx);
911
912 if (driContextPriv)
913 brw = (struct brw_context *) driContextPriv->driverPrivate;
914 else
915 brw = NULL;
916
917 /* According to the glXMakeCurrent() man page: "Pending commands to
918 * the previous context, if any, are flushed before it is released."
919 * But only flush if we're actually changing contexts.
920 */
921 if (brw_context(curCtx) && brw_context(curCtx) != brw) {
922 _mesa_flush(curCtx);
923 }
924
925 if (driContextPriv) {
926 struct gl_context *ctx = &brw->ctx;
927 struct gl_framebuffer *fb, *readFb;
928
929 if (driDrawPriv == NULL && driReadPriv == NULL) {
930 fb = _mesa_get_incomplete_framebuffer();
931 readFb = _mesa_get_incomplete_framebuffer();
932 } else {
933 fb = driDrawPriv->driverPrivate;
934 readFb = driReadPriv->driverPrivate;
935 driContextPriv->dri2.draw_stamp = driDrawPriv->dri2.stamp - 1;
936 driContextPriv->dri2.read_stamp = driReadPriv->dri2.stamp - 1;
937 }
938
939 /* The sRGB workaround changes the renderbuffer's format. We must change
940 * the format before the renderbuffer's miptree get's allocated, otherwise
941 * the formats of the renderbuffer and its miptree will differ.
942 */
943 intel_gles3_srgb_workaround(brw, fb);
944 intel_gles3_srgb_workaround(brw, readFb);
945
946 /* If the context viewport hasn't been initialized, force a call out to
947 * the loader to get buffers so we have a drawable size for the initial
948 * viewport. */
949 if (!brw->ctx.ViewportInitialized)
950 intel_prepare_render(brw);
951
952 _mesa_make_current(ctx, fb, readFb);
953 } else {
954 _mesa_make_current(NULL, NULL, NULL);
955 }
956
957 return true;
958 }
959
960 void
961 intel_resolve_for_dri2_flush(struct brw_context *brw,
962 __DRIdrawable *drawable)
963 {
964 if (brw->gen < 6) {
965 /* MSAA and fast color clear are not supported, so don't waste time
966 * checking whether a resolve is needed.
967 */
968 return;
969 }
970
971 struct gl_framebuffer *fb = drawable->driverPrivate;
972 struct intel_renderbuffer *rb;
973
974 /* Usually, only the back buffer will need to be downsampled. However,
975 * the front buffer will also need it if the user has rendered into it.
976 */
977 static const gl_buffer_index buffers[2] = {
978 BUFFER_BACK_LEFT,
979 BUFFER_FRONT_LEFT,
980 };
981
982 for (int i = 0; i < 2; ++i) {
983 rb = intel_get_renderbuffer(fb, buffers[i]);
984 if (rb == NULL || rb->mt == NULL)
985 continue;
986 if (rb->mt->num_samples <= 1)
987 intel_miptree_resolve_color(brw, rb->mt);
988 else
989 intel_miptree_downsample(brw, rb->mt);
990 }
991 }
992
993 static unsigned
994 intel_bits_per_pixel(const struct intel_renderbuffer *rb)
995 {
996 return _mesa_get_format_bytes(intel_rb_format(rb)) * 8;
997 }
998
999 static void
1000 intel_query_dri2_buffers(struct brw_context *brw,
1001 __DRIdrawable *drawable,
1002 __DRIbuffer **buffers,
1003 int *count);
1004
1005 static void
1006 intel_process_dri2_buffer(struct brw_context *brw,
1007 __DRIdrawable *drawable,
1008 __DRIbuffer *buffer,
1009 struct intel_renderbuffer *rb,
1010 const char *buffer_name);
1011
1012 static void
1013 intel_update_image_buffers(struct brw_context *brw, __DRIdrawable *drawable);
1014
1015 static void
1016 intel_update_dri2_buffers(struct brw_context *brw, __DRIdrawable *drawable)
1017 {
1018 struct gl_framebuffer *fb = drawable->driverPrivate;
1019 struct intel_renderbuffer *rb;
1020 __DRIbuffer *buffers = NULL;
1021 int i, count;
1022 const char *region_name;
1023
1024 /* Set this up front, so that in case our buffers get invalidated
1025 * while we're getting new buffers, we don't clobber the stamp and
1026 * thus ignore the invalidate. */
1027 drawable->lastStamp = drawable->dri2.stamp;
1028
1029 if (unlikely(INTEL_DEBUG & DEBUG_DRI))
1030 fprintf(stderr, "enter %s, drawable %p\n", __func__, drawable);
1031
1032 intel_query_dri2_buffers(brw, drawable, &buffers, &count);
1033
1034 if (buffers == NULL)
1035 return;
1036
1037 for (i = 0; i < count; i++) {
1038 switch (buffers[i].attachment) {
1039 case __DRI_BUFFER_FRONT_LEFT:
1040 rb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT);
1041 region_name = "dri2 front buffer";
1042 break;
1043
1044 case __DRI_BUFFER_FAKE_FRONT_LEFT:
1045 rb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT);
1046 region_name = "dri2 fake front buffer";
1047 break;
1048
1049 case __DRI_BUFFER_BACK_LEFT:
1050 rb = intel_get_renderbuffer(fb, BUFFER_BACK_LEFT);
1051 region_name = "dri2 back buffer";
1052 break;
1053
1054 case __DRI_BUFFER_DEPTH:
1055 case __DRI_BUFFER_HIZ:
1056 case __DRI_BUFFER_DEPTH_STENCIL:
1057 case __DRI_BUFFER_STENCIL:
1058 case __DRI_BUFFER_ACCUM:
1059 default:
1060 fprintf(stderr,
1061 "unhandled buffer attach event, attachment type %d\n",
1062 buffers[i].attachment);
1063 return;
1064 }
1065
1066 intel_process_dri2_buffer(brw, drawable, &buffers[i], rb, region_name);
1067 }
1068
1069 }
1070
1071 void
1072 intel_update_renderbuffers(__DRIcontext *context, __DRIdrawable *drawable)
1073 {
1074 struct brw_context *brw = context->driverPrivate;
1075 __DRIscreen *screen = brw->intelScreen->driScrnPriv;
1076
1077 /* Set this up front, so that in case our buffers get invalidated
1078 * while we're getting new buffers, we don't clobber the stamp and
1079 * thus ignore the invalidate. */
1080 drawable->lastStamp = drawable->dri2.stamp;
1081
1082 if (unlikely(INTEL_DEBUG & DEBUG_DRI))
1083 fprintf(stderr, "enter %s, drawable %p\n", __func__, drawable);
1084
1085 if (screen->image.loader)
1086 intel_update_image_buffers(brw, drawable);
1087 else
1088 intel_update_dri2_buffers(brw, drawable);
1089
1090 driUpdateFramebufferSize(&brw->ctx, drawable);
1091 }
1092
1093 /**
1094 * intel_prepare_render should be called anywhere that curent read/drawbuffer
1095 * state is required.
1096 */
1097 void
1098 intel_prepare_render(struct brw_context *brw)
1099 {
1100 __DRIcontext *driContext = brw->driContext;
1101 __DRIdrawable *drawable;
1102
1103 drawable = driContext->driDrawablePriv;
1104 if (drawable && drawable->dri2.stamp != driContext->dri2.draw_stamp) {
1105 if (drawable->lastStamp != drawable->dri2.stamp)
1106 intel_update_renderbuffers(driContext, drawable);
1107 driContext->dri2.draw_stamp = drawable->dri2.stamp;
1108 }
1109
1110 drawable = driContext->driReadablePriv;
1111 if (drawable && drawable->dri2.stamp != driContext->dri2.read_stamp) {
1112 if (drawable->lastStamp != drawable->dri2.stamp)
1113 intel_update_renderbuffers(driContext, drawable);
1114 driContext->dri2.read_stamp = drawable->dri2.stamp;
1115 }
1116
1117 /* If we're currently rendering to the front buffer, the rendering
1118 * that will happen next will probably dirty the front buffer. So
1119 * mark it as dirty here.
1120 */
1121 if (brw->is_front_buffer_rendering)
1122 brw->front_buffer_dirty = true;
1123
1124 /* Wait for the swapbuffers before the one we just emitted, so we
1125 * don't get too many swaps outstanding for apps that are GPU-heavy
1126 * but not CPU-heavy.
1127 *
1128 * We're using intelDRI2Flush (called from the loader before
1129 * swapbuffer) and glFlush (for front buffer rendering) as the
1130 * indicator that a frame is done and then throttle when we get
1131 * here as we prepare to render the next frame. At this point for
1132 * round trips for swap/copy and getting new buffers are done and
1133 * we'll spend less time waiting on the GPU.
1134 *
1135 * Unfortunately, we don't have a handle to the batch containing
1136 * the swap, and getting our hands on that doesn't seem worth it,
1137 * so we just us the first batch we emitted after the last swap.
1138 */
1139 if (brw->need_throttle && brw->first_post_swapbuffers_batch) {
1140 if (!brw->disable_throttling)
1141 drm_intel_bo_wait_rendering(brw->first_post_swapbuffers_batch);
1142 drm_intel_bo_unreference(brw->first_post_swapbuffers_batch);
1143 brw->first_post_swapbuffers_batch = NULL;
1144 brw->need_throttle = false;
1145 }
1146 }
1147
1148 /**
1149 * \brief Query DRI2 to obtain a DRIdrawable's buffers.
1150 *
1151 * To determine which DRI buffers to request, examine the renderbuffers
1152 * attached to the drawable's framebuffer. Then request the buffers with
1153 * DRI2GetBuffers() or DRI2GetBuffersWithFormat().
1154 *
1155 * This is called from intel_update_renderbuffers().
1156 *
1157 * \param drawable Drawable whose buffers are queried.
1158 * \param buffers [out] List of buffers returned by DRI2 query.
1159 * \param buffer_count [out] Number of buffers returned.
1160 *
1161 * \see intel_update_renderbuffers()
1162 * \see DRI2GetBuffers()
1163 * \see DRI2GetBuffersWithFormat()
1164 */
1165 static void
1166 intel_query_dri2_buffers(struct brw_context *brw,
1167 __DRIdrawable *drawable,
1168 __DRIbuffer **buffers,
1169 int *buffer_count)
1170 {
1171 __DRIscreen *screen = brw->intelScreen->driScrnPriv;
1172 struct gl_framebuffer *fb = drawable->driverPrivate;
1173 int i = 0;
1174 unsigned attachments[8];
1175
1176 struct intel_renderbuffer *front_rb;
1177 struct intel_renderbuffer *back_rb;
1178
1179 front_rb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT);
1180 back_rb = intel_get_renderbuffer(fb, BUFFER_BACK_LEFT);
1181
1182 memset(attachments, 0, sizeof(attachments));
1183 if ((brw->is_front_buffer_rendering ||
1184 brw->is_front_buffer_reading ||
1185 !back_rb) && front_rb) {
1186 /* If a fake front buffer is in use, then querying for
1187 * __DRI_BUFFER_FRONT_LEFT will cause the server to copy the image from
1188 * the real front buffer to the fake front buffer. So before doing the
1189 * query, we need to make sure all the pending drawing has landed in the
1190 * real front buffer.
1191 */
1192 intel_batchbuffer_flush(brw);
1193 intel_flush_front(&brw->ctx);
1194
1195 attachments[i++] = __DRI_BUFFER_FRONT_LEFT;
1196 attachments[i++] = intel_bits_per_pixel(front_rb);
1197 } else if (front_rb && brw->front_buffer_dirty) {
1198 /* We have pending front buffer rendering, but we aren't querying for a
1199 * front buffer. If the front buffer we have is a fake front buffer,
1200 * the X server is going to throw it away when it processes the query.
1201 * So before doing the query, make sure all the pending drawing has
1202 * landed in the real front buffer.
1203 */
1204 intel_batchbuffer_flush(brw);
1205 intel_flush_front(&brw->ctx);
1206 }
1207
1208 if (back_rb) {
1209 attachments[i++] = __DRI_BUFFER_BACK_LEFT;
1210 attachments[i++] = intel_bits_per_pixel(back_rb);
1211 }
1212
1213 assert(i <= ARRAY_SIZE(attachments));
1214
1215 *buffers = screen->dri2.loader->getBuffersWithFormat(drawable,
1216 &drawable->w,
1217 &drawable->h,
1218 attachments, i / 2,
1219 buffer_count,
1220 drawable->loaderPrivate);
1221 }
1222
1223 /**
1224 * \brief Assign a DRI buffer's DRM region to a renderbuffer.
1225 *
1226 * This is called from intel_update_renderbuffers().
1227 *
1228 * \par Note:
1229 * DRI buffers whose attachment point is DRI2BufferStencil or
1230 * DRI2BufferDepthStencil are handled as special cases.
1231 *
1232 * \param buffer_name is a human readable name, such as "dri2 front buffer",
1233 * that is passed to intel_region_alloc_for_handle().
1234 *
1235 * \see intel_update_renderbuffers()
1236 * \see intel_region_alloc_for_handle()
1237 */
1238 static void
1239 intel_process_dri2_buffer(struct brw_context *brw,
1240 __DRIdrawable *drawable,
1241 __DRIbuffer *buffer,
1242 struct intel_renderbuffer *rb,
1243 const char *buffer_name)
1244 {
1245 struct intel_region *region = NULL;
1246
1247 if (!rb)
1248 return;
1249
1250 unsigned num_samples = rb->Base.Base.NumSamples;
1251
1252 /* We try to avoid closing and reopening the same BO name, because the first
1253 * use of a mapping of the buffer involves a bunch of page faulting which is
1254 * moderately expensive.
1255 */
1256 if (num_samples == 0) {
1257 if (rb->mt &&
1258 rb->mt->region &&
1259 rb->mt->region->name == buffer->name)
1260 return;
1261 } else {
1262 if (rb->mt &&
1263 rb->mt->singlesample_mt &&
1264 rb->mt->singlesample_mt->region &&
1265 rb->mt->singlesample_mt->region->name == buffer->name)
1266 return;
1267 }
1268
1269 if (unlikely(INTEL_DEBUG & DEBUG_DRI)) {
1270 fprintf(stderr,
1271 "attaching buffer %d, at %d, cpp %d, pitch %d\n",
1272 buffer->name, buffer->attachment,
1273 buffer->cpp, buffer->pitch);
1274 }
1275
1276 intel_miptree_release(&rb->mt);
1277 region = intel_region_alloc_for_handle(brw->intelScreen,
1278 buffer->cpp,
1279 drawable->w,
1280 drawable->h,
1281 buffer->pitch,
1282 buffer->name,
1283 buffer_name);
1284 if (!region)
1285 return;
1286
1287 rb->mt = intel_miptree_create_for_dri2_buffer(brw,
1288 buffer->attachment,
1289 intel_rb_format(rb),
1290 num_samples,
1291 region);
1292 intel_region_release(&region);
1293 }
1294
1295 /**
1296 * \brief Query DRI image loader to obtain a DRIdrawable's buffers.
1297 *
1298 * To determine which DRI buffers to request, examine the renderbuffers
1299 * attached to the drawable's framebuffer. Then request the buffers from
1300 * the image loader
1301 *
1302 * This is called from intel_update_renderbuffers().
1303 *
1304 * \param drawable Drawable whose buffers are queried.
1305 * \param buffers [out] List of buffers returned by DRI2 query.
1306 * \param buffer_count [out] Number of buffers returned.
1307 *
1308 * \see intel_update_renderbuffers()
1309 */
1310
1311 static void
1312 intel_update_image_buffer(struct brw_context *intel,
1313 __DRIdrawable *drawable,
1314 struct intel_renderbuffer *rb,
1315 __DRIimage *buffer,
1316 enum __DRIimageBufferMask buffer_type)
1317 {
1318 struct intel_region *region = buffer->region;
1319
1320 if (!rb || !region)
1321 return;
1322
1323 unsigned num_samples = rb->Base.Base.NumSamples;
1324
1325 /* Check and see if we're already bound to the right
1326 * buffer object
1327 */
1328 if (num_samples == 0) {
1329 if (rb->mt &&
1330 rb->mt->region &&
1331 rb->mt->region->bo == region->bo)
1332 return;
1333 } else {
1334 if (rb->mt &&
1335 rb->mt->singlesample_mt &&
1336 rb->mt->singlesample_mt->region &&
1337 rb->mt->singlesample_mt->region->bo == region->bo)
1338 return;
1339 }
1340
1341 intel_miptree_release(&rb->mt);
1342 rb->mt = intel_miptree_create_for_image_buffer(intel,
1343 buffer_type,
1344 intel_rb_format(rb),
1345 num_samples,
1346 region);
1347 }
1348
1349 static void
1350 intel_update_image_buffers(struct brw_context *brw, __DRIdrawable *drawable)
1351 {
1352 struct gl_framebuffer *fb = drawable->driverPrivate;
1353 __DRIscreen *screen = brw->intelScreen->driScrnPriv;
1354 struct intel_renderbuffer *front_rb;
1355 struct intel_renderbuffer *back_rb;
1356 struct __DRIimageList images;
1357 unsigned int format;
1358 uint32_t buffer_mask = 0;
1359
1360 front_rb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT);
1361 back_rb = intel_get_renderbuffer(fb, BUFFER_BACK_LEFT);
1362
1363 if (back_rb)
1364 format = intel_rb_format(back_rb);
1365 else if (front_rb)
1366 format = intel_rb_format(front_rb);
1367 else
1368 return;
1369
1370 if ((brw->is_front_buffer_rendering || brw->is_front_buffer_reading || !back_rb) && front_rb)
1371 buffer_mask |= __DRI_IMAGE_BUFFER_FRONT;
1372
1373 if (back_rb)
1374 buffer_mask |= __DRI_IMAGE_BUFFER_BACK;
1375
1376 (*screen->image.loader->getBuffers) (drawable,
1377 driGLFormatToImageFormat(format),
1378 &drawable->dri2.stamp,
1379 drawable->loaderPrivate,
1380 buffer_mask,
1381 &images);
1382
1383 if (images.image_mask & __DRI_IMAGE_BUFFER_FRONT) {
1384 drawable->w = images.front->width;
1385 drawable->h = images.front->height;
1386 intel_update_image_buffer(brw,
1387 drawable,
1388 front_rb,
1389 images.front,
1390 __DRI_IMAGE_BUFFER_FRONT);
1391 }
1392 if (images.image_mask & __DRI_IMAGE_BUFFER_BACK) {
1393 drawable->w = images.back->width;
1394 drawable->h = images.back->height;
1395 intel_update_image_buffer(brw,
1396 drawable,
1397 back_rb,
1398 images.back,
1399 __DRI_IMAGE_BUFFER_BACK);
1400 }
1401 }