s/Tungsten Graphics/VMware/
[mesa.git] / src / mesa / drivers / dri / i965 / brw_context.c
1 /*
2 Copyright 2003 VMware, Inc.
3 Copyright (C) Intel Corp. 2006. All Rights Reserved.
4 Intel funded Tungsten Graphics to
5 develop this 3D driver.
6
7 Permission is hereby granted, free of charge, to any person obtaining
8 a copy of this software and associated documentation files (the
9 "Software"), to deal in the Software without restriction, including
10 without limitation the rights to use, copy, modify, merge, publish,
11 distribute, sublicense, and/or sell copies of the Software, and to
12 permit persons to whom the Software is furnished to do so, subject to
13 the following conditions:
14
15 The above copyright notice and this permission notice (including the
16 next paragraph) shall be included in all copies or substantial
17 portions of the Software.
18
19 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
20 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
22 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
23 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
24 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
25 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26
27 **********************************************************************/
28 /*
29 * Authors:
30 * Keith Whitwell <keithw@vmware.com>
31 */
32
33
34 #include "main/api_exec.h"
35 #include "main/context.h"
36 #include "main/fbobject.h"
37 #include "main/imports.h"
38 #include "main/macros.h"
39 #include "main/points.h"
40 #include "main/version.h"
41 #include "main/vtxfmt.h"
42
43 #include "vbo/vbo_context.h"
44
45 #include "drivers/common/driverfuncs.h"
46 #include "drivers/common/meta.h"
47 #include "utils.h"
48
49 #include "brw_context.h"
50 #include "brw_defines.h"
51 #include "brw_draw.h"
52 #include "brw_state.h"
53
54 #include "intel_batchbuffer.h"
55 #include "intel_buffer_objects.h"
56 #include "intel_buffers.h"
57 #include "intel_fbo.h"
58 #include "intel_mipmap_tree.h"
59 #include "intel_pixel.h"
60 #include "intel_regions.h"
61 #include "intel_tex.h"
62 #include "intel_tex_obj.h"
63
64 #include "swrast_setup/swrast_setup.h"
65 #include "tnl/tnl.h"
66 #include "tnl/t_pipeline.h"
67 #include "glsl/ralloc.h"
68
69 /***************************************
70 * Mesa's Driver Functions
71 ***************************************/
72
73 static size_t
74 brw_query_samples_for_format(struct gl_context *ctx, GLenum target,
75 GLenum internalFormat, int samples[16])
76 {
77 struct brw_context *brw = brw_context(ctx);
78
79 (void) target;
80
81 switch (brw->gen) {
82 case 7:
83 samples[0] = 8;
84 samples[1] = 4;
85 return 2;
86
87 case 6:
88 samples[0] = 4;
89 return 1;
90
91 default:
92 samples[0] = 1;
93 return 1;
94 }
95 }
96
97 const char *const brw_vendor_string = "Intel Open Source Technology Center";
98
99 const char *
100 brw_get_renderer_string(unsigned deviceID)
101 {
102 const char *chipset;
103 static char buffer[128];
104
105 switch (deviceID) {
106 #undef CHIPSET
107 #define CHIPSET(id, symbol, str) case id: chipset = str; break;
108 #include "pci_ids/i965_pci_ids.h"
109 default:
110 chipset = "Unknown Intel Chipset";
111 break;
112 }
113
114 (void) driGetRendererString(buffer, chipset, 0);
115 return buffer;
116 }
117
118 static const GLubyte *
119 intelGetString(struct gl_context * ctx, GLenum name)
120 {
121 const struct brw_context *const brw = brw_context(ctx);
122
123 switch (name) {
124 case GL_VENDOR:
125 return (GLubyte *) brw_vendor_string;
126
127 case GL_RENDERER:
128 return
129 (GLubyte *) brw_get_renderer_string(brw->intelScreen->deviceID);
130
131 default:
132 return NULL;
133 }
134 }
135
136 static void
137 intel_viewport(struct gl_context *ctx)
138 {
139 struct brw_context *brw = brw_context(ctx);
140 __DRIcontext *driContext = brw->driContext;
141
142 if (_mesa_is_winsys_fbo(ctx->DrawBuffer)) {
143 dri2InvalidateDrawable(driContext->driDrawablePriv);
144 dri2InvalidateDrawable(driContext->driReadablePriv);
145 }
146 }
147
148 static void
149 intelInvalidateState(struct gl_context * ctx, GLuint new_state)
150 {
151 struct brw_context *brw = brw_context(ctx);
152
153 if (ctx->swrast_context)
154 _swrast_InvalidateState(ctx, new_state);
155 _vbo_InvalidateState(ctx, new_state);
156
157 brw->NewGLState |= new_state;
158 }
159
160 #define flushFront(screen) ((screen)->image.loader ? (screen)->image.loader->flushFrontBuffer : (screen)->dri2.loader->flushFrontBuffer)
161
162 static void
163 intel_flush_front(struct gl_context *ctx)
164 {
165 struct brw_context *brw = brw_context(ctx);
166 __DRIcontext *driContext = brw->driContext;
167 __DRIdrawable *driDrawable = driContext->driDrawablePriv;
168 __DRIscreen *const screen = brw->intelScreen->driScrnPriv;
169
170 if (brw->front_buffer_dirty && _mesa_is_winsys_fbo(ctx->DrawBuffer)) {
171 if (flushFront(screen) && driDrawable &&
172 driDrawable->loaderPrivate) {
173
174 /* Resolve before flushing FAKE_FRONT_LEFT to FRONT_LEFT.
175 *
176 * This potentially resolves both front and back buffer. It
177 * is unnecessary to resolve the back, but harms nothing except
178 * performance. And no one cares about front-buffer render
179 * performance.
180 */
181 intel_resolve_for_dri2_flush(brw, driDrawable);
182 intel_batchbuffer_flush(brw);
183
184 flushFront(screen)(driDrawable, driDrawable->loaderPrivate);
185
186 /* We set the dirty bit in intel_prepare_render() if we're
187 * front buffer rendering once we get there.
188 */
189 brw->front_buffer_dirty = false;
190 }
191 }
192 }
193
194 static void
195 intel_glFlush(struct gl_context *ctx)
196 {
197 struct brw_context *brw = brw_context(ctx);
198
199 intel_batchbuffer_flush(brw);
200 intel_flush_front(ctx);
201 if (brw->is_front_buffer_rendering)
202 brw->need_throttle = true;
203 }
204
205 void
206 intelFinish(struct gl_context * ctx)
207 {
208 struct brw_context *brw = brw_context(ctx);
209
210 intel_glFlush(ctx);
211
212 if (brw->batch.last_bo)
213 drm_intel_bo_wait_rendering(brw->batch.last_bo);
214 }
215
216 static void
217 brw_init_driver_functions(struct brw_context *brw,
218 struct dd_function_table *functions)
219 {
220 _mesa_init_driver_functions(functions);
221
222 /* GLX uses DRI2 invalidate events to handle window resizing.
223 * Unfortunately, EGL does not - libEGL is written in XCB (not Xlib),
224 * which doesn't provide a mechanism for snooping the event queues.
225 *
226 * So EGL still relies on viewport hacks to handle window resizing.
227 * This should go away with DRI3000.
228 */
229 if (!brw->driContext->driScreenPriv->dri2.useInvalidate)
230 functions->Viewport = intel_viewport;
231
232 functions->Flush = intel_glFlush;
233 functions->Finish = intelFinish;
234 functions->GetString = intelGetString;
235 functions->UpdateState = intelInvalidateState;
236
237 intelInitTextureFuncs(functions);
238 intelInitTextureImageFuncs(functions);
239 intelInitTextureSubImageFuncs(functions);
240 intelInitTextureCopyImageFuncs(functions);
241 intelInitClearFuncs(functions);
242 intelInitBufferFuncs(functions);
243 intelInitPixelFuncs(functions);
244 intelInitBufferObjectFuncs(functions);
245 intel_init_syncobj_functions(functions);
246 brw_init_object_purgeable_functions(functions);
247
248 brwInitFragProgFuncs( functions );
249 brw_init_common_queryobj_functions(functions);
250 if (brw->gen >= 6)
251 gen6_init_queryobj_functions(functions);
252 else
253 gen4_init_queryobj_functions(functions);
254
255 functions->QuerySamplesForFormat = brw_query_samples_for_format;
256
257 functions->NewTransformFeedback = brw_new_transform_feedback;
258 functions->DeleteTransformFeedback = brw_delete_transform_feedback;
259 functions->GetTransformFeedbackVertexCount =
260 brw_get_transform_feedback_vertex_count;
261 if (brw->gen >= 7) {
262 functions->BeginTransformFeedback = gen7_begin_transform_feedback;
263 functions->EndTransformFeedback = gen7_end_transform_feedback;
264 functions->PauseTransformFeedback = gen7_pause_transform_feedback;
265 functions->ResumeTransformFeedback = gen7_resume_transform_feedback;
266 } else {
267 functions->BeginTransformFeedback = brw_begin_transform_feedback;
268 functions->EndTransformFeedback = brw_end_transform_feedback;
269 }
270
271 if (brw->gen >= 6)
272 functions->GetSamplePosition = gen6_get_sample_position;
273 }
274
275 static void
276 brw_initialize_context_constants(struct brw_context *brw)
277 {
278 struct gl_context *ctx = &brw->ctx;
279
280 ctx->Const.QueryCounterBits.Timestamp = 36;
281
282 ctx->Const.StripTextureBorder = true;
283
284 ctx->Const.MaxDualSourceDrawBuffers = 1;
285 ctx->Const.MaxDrawBuffers = BRW_MAX_DRAW_BUFFERS;
286 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxTextureImageUnits = BRW_MAX_TEX_UNIT;
287 ctx->Const.MaxTextureCoordUnits = 8; /* Mesa limit */
288 ctx->Const.MaxTextureUnits =
289 MIN2(ctx->Const.MaxTextureCoordUnits,
290 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxTextureImageUnits);
291 ctx->Const.Program[MESA_SHADER_VERTEX].MaxTextureImageUnits = BRW_MAX_TEX_UNIT;
292 if (brw->gen >= 7)
293 ctx->Const.Program[MESA_SHADER_GEOMETRY].MaxTextureImageUnits = BRW_MAX_TEX_UNIT;
294 else
295 ctx->Const.Program[MESA_SHADER_GEOMETRY].MaxTextureImageUnits = 0;
296 ctx->Const.MaxCombinedTextureImageUnits =
297 ctx->Const.Program[MESA_SHADER_VERTEX].MaxTextureImageUnits +
298 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxTextureImageUnits +
299 ctx->Const.Program[MESA_SHADER_GEOMETRY].MaxTextureImageUnits;
300
301 ctx->Const.MaxTextureLevels = 14; /* 8192 */
302 if (ctx->Const.MaxTextureLevels > MAX_TEXTURE_LEVELS)
303 ctx->Const.MaxTextureLevels = MAX_TEXTURE_LEVELS;
304 ctx->Const.Max3DTextureLevels = 9;
305 ctx->Const.MaxCubeTextureLevels = 12;
306
307 if (brw->gen >= 7)
308 ctx->Const.MaxArrayTextureLayers = 2048;
309 else
310 ctx->Const.MaxArrayTextureLayers = 512;
311
312 ctx->Const.MaxTextureRectSize = 1 << 12;
313
314 ctx->Const.MaxTextureMaxAnisotropy = 16.0;
315
316 ctx->Const.MaxRenderbufferSize = 8192;
317
318 /* Hardware only supports a limited number of transform feedback buffers.
319 * So we need to override the Mesa default (which is based only on software
320 * limits).
321 */
322 ctx->Const.MaxTransformFeedbackBuffers = BRW_MAX_SOL_BUFFERS;
323
324 /* On Gen6, in the worst case, we use up one binding table entry per
325 * transform feedback component (see comments above the definition of
326 * BRW_MAX_SOL_BINDINGS, in brw_context.h), so we need to advertise a value
327 * for MAX_TRANSFORM_FEEDBACK_INTERLEAVED_COMPONENTS equal to
328 * BRW_MAX_SOL_BINDINGS.
329 *
330 * In "separate components" mode, we need to divide this value by
331 * BRW_MAX_SOL_BUFFERS, so that the total number of binding table entries
332 * used up by all buffers will not exceed BRW_MAX_SOL_BINDINGS.
333 */
334 ctx->Const.MaxTransformFeedbackInterleavedComponents = BRW_MAX_SOL_BINDINGS;
335 ctx->Const.MaxTransformFeedbackSeparateComponents =
336 BRW_MAX_SOL_BINDINGS / BRW_MAX_SOL_BUFFERS;
337
338 ctx->Const.AlwaysUseGetTransformFeedbackVertexCount = true;
339
340 int max_samples;
341 const int *msaa_modes = intel_supported_msaa_modes(brw->intelScreen);
342 const int clamp_max_samples =
343 driQueryOptioni(&brw->optionCache, "clamp_max_samples");
344
345 if (clamp_max_samples < 0) {
346 max_samples = msaa_modes[0];
347 } else {
348 /* Select the largest supported MSAA mode that does not exceed
349 * clamp_max_samples.
350 */
351 max_samples = 0;
352 for (int i = 0; msaa_modes[i] != 0; ++i) {
353 if (msaa_modes[i] <= clamp_max_samples) {
354 max_samples = msaa_modes[i];
355 break;
356 }
357 }
358 }
359
360 ctx->Const.MaxSamples = max_samples;
361 ctx->Const.MaxColorTextureSamples = max_samples;
362 ctx->Const.MaxDepthTextureSamples = max_samples;
363 ctx->Const.MaxIntegerSamples = max_samples;
364
365 if (brw->gen >= 7)
366 ctx->Const.MaxProgramTextureGatherComponents = 4;
367
368 ctx->Const.MinLineWidth = 1.0;
369 ctx->Const.MinLineWidthAA = 1.0;
370 ctx->Const.MaxLineWidth = 5.0;
371 ctx->Const.MaxLineWidthAA = 5.0;
372 ctx->Const.LineWidthGranularity = 0.5;
373
374 ctx->Const.MinPointSize = 1.0;
375 ctx->Const.MinPointSizeAA = 1.0;
376 ctx->Const.MaxPointSize = 255.0;
377 ctx->Const.MaxPointSizeAA = 255.0;
378 ctx->Const.PointSizeGranularity = 1.0;
379
380 if (brw->gen >= 5 || brw->is_g4x)
381 ctx->Const.MaxClipPlanes = 8;
382
383 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeInstructions = 16 * 1024;
384 ctx->Const.Program[MESA_SHADER_VERTEX].MaxAluInstructions = 0;
385 ctx->Const.Program[MESA_SHADER_VERTEX].MaxTexInstructions = 0;
386 ctx->Const.Program[MESA_SHADER_VERTEX].MaxTexIndirections = 0;
387 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeAluInstructions = 0;
388 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeTexInstructions = 0;
389 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeTexIndirections = 0;
390 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeAttribs = 16;
391 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeTemps = 256;
392 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeAddressRegs = 1;
393 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeParameters = 1024;
394 ctx->Const.Program[MESA_SHADER_VERTEX].MaxEnvParams =
395 MIN2(ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeParameters,
396 ctx->Const.Program[MESA_SHADER_VERTEX].MaxEnvParams);
397
398 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeInstructions = 1024;
399 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeAluInstructions = 1024;
400 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeTexInstructions = 1024;
401 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeTexIndirections = 1024;
402 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeAttribs = 12;
403 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeTemps = 256;
404 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeAddressRegs = 0;
405 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeParameters = 1024;
406 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxEnvParams =
407 MIN2(ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeParameters,
408 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxEnvParams);
409
410 /* Fragment shaders use real, 32-bit twos-complement integers for all
411 * integer types.
412 */
413 ctx->Const.Program[MESA_SHADER_FRAGMENT].LowInt.RangeMin = 31;
414 ctx->Const.Program[MESA_SHADER_FRAGMENT].LowInt.RangeMax = 30;
415 ctx->Const.Program[MESA_SHADER_FRAGMENT].LowInt.Precision = 0;
416 ctx->Const.Program[MESA_SHADER_FRAGMENT].HighInt = ctx->Const.Program[MESA_SHADER_FRAGMENT].LowInt;
417 ctx->Const.Program[MESA_SHADER_FRAGMENT].MediumInt = ctx->Const.Program[MESA_SHADER_FRAGMENT].LowInt;
418
419 if (brw->gen >= 7) {
420 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxAtomicCounters = MAX_ATOMIC_COUNTERS;
421 ctx->Const.Program[MESA_SHADER_VERTEX].MaxAtomicCounters = MAX_ATOMIC_COUNTERS;
422 ctx->Const.Program[MESA_SHADER_GEOMETRY].MaxAtomicCounters = MAX_ATOMIC_COUNTERS;
423 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxAtomicBuffers = BRW_MAX_ABO;
424 ctx->Const.Program[MESA_SHADER_VERTEX].MaxAtomicBuffers = BRW_MAX_ABO;
425 ctx->Const.Program[MESA_SHADER_GEOMETRY].MaxAtomicBuffers = BRW_MAX_ABO;
426 ctx->Const.MaxCombinedAtomicBuffers = 3 * BRW_MAX_ABO;
427 }
428
429 /* Gen6 converts quads to polygon in beginning of 3D pipeline,
430 * but we're not sure how it's actually done for vertex order,
431 * that affect provoking vertex decision. Always use last vertex
432 * convention for quad primitive which works as expected for now.
433 */
434 if (brw->gen >= 6)
435 ctx->Const.QuadsFollowProvokingVertexConvention = false;
436
437 ctx->Const.NativeIntegers = true;
438 ctx->Const.UniformBooleanTrue = 1;
439
440 /* From the gen4 PRM, volume 4 page 127:
441 *
442 * "For SURFTYPE_BUFFER non-rendertarget surfaces, this field specifies
443 * the base address of the first element of the surface, computed in
444 * software by adding the surface base address to the byte offset of
445 * the element in the buffer."
446 *
447 * However, unaligned accesses are slower, so enforce buffer alignment.
448 */
449 ctx->Const.UniformBufferOffsetAlignment = 16;
450 ctx->Const.TextureBufferOffsetAlignment = 16;
451
452 if (brw->gen >= 6) {
453 ctx->Const.MaxVarying = 32;
454 ctx->Const.Program[MESA_SHADER_VERTEX].MaxOutputComponents = 128;
455 ctx->Const.Program[MESA_SHADER_GEOMETRY].MaxInputComponents = 64;
456 ctx->Const.Program[MESA_SHADER_GEOMETRY].MaxOutputComponents = 128;
457 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxInputComponents = 128;
458 }
459
460 /* We want the GLSL compiler to emit code that uses condition codes */
461 for (int i = 0; i < MESA_SHADER_STAGES; i++) {
462 ctx->ShaderCompilerOptions[i].MaxIfDepth = brw->gen < 6 ? 16 : UINT_MAX;
463 ctx->ShaderCompilerOptions[i].EmitCondCodes = true;
464 ctx->ShaderCompilerOptions[i].EmitNoNoise = true;
465 ctx->ShaderCompilerOptions[i].EmitNoMainReturn = true;
466 ctx->ShaderCompilerOptions[i].EmitNoIndirectInput = true;
467 ctx->ShaderCompilerOptions[i].EmitNoIndirectOutput = true;
468
469 ctx->ShaderCompilerOptions[i].EmitNoIndirectUniform =
470 (i == MESA_SHADER_FRAGMENT);
471 ctx->ShaderCompilerOptions[i].EmitNoIndirectTemp =
472 (i == MESA_SHADER_FRAGMENT);
473 ctx->ShaderCompilerOptions[i].LowerClipDistance = true;
474 }
475
476 ctx->ShaderCompilerOptions[MESA_SHADER_VERTEX].PreferDP4 = true;
477 }
478
479 /**
480 * Process driconf (drirc) options, setting appropriate context flags.
481 *
482 * intelInitExtensions still pokes at optionCache directly, in order to
483 * avoid advertising various extensions. No flags are set, so it makes
484 * sense to continue doing that there.
485 */
486 static void
487 brw_process_driconf_options(struct brw_context *brw)
488 {
489 struct gl_context *ctx = &brw->ctx;
490
491 driOptionCache *options = &brw->optionCache;
492 driParseConfigFiles(options, &brw->intelScreen->optionCache,
493 brw->driContext->driScreenPriv->myNum, "i965");
494
495 int bo_reuse_mode = driQueryOptioni(options, "bo_reuse");
496 switch (bo_reuse_mode) {
497 case DRI_CONF_BO_REUSE_DISABLED:
498 break;
499 case DRI_CONF_BO_REUSE_ALL:
500 intel_bufmgr_gem_enable_reuse(brw->bufmgr);
501 break;
502 }
503
504 if (!driQueryOptionb(options, "hiz")) {
505 brw->has_hiz = false;
506 /* On gen6, you can only do separate stencil with HIZ. */
507 if (brw->gen == 6)
508 brw->has_separate_stencil = false;
509 }
510
511 if (driQueryOptionb(options, "always_flush_batch")) {
512 fprintf(stderr, "flushing batchbuffer before/after each draw call\n");
513 brw->always_flush_batch = true;
514 }
515
516 if (driQueryOptionb(options, "always_flush_cache")) {
517 fprintf(stderr, "flushing GPU caches before/after each draw call\n");
518 brw->always_flush_cache = true;
519 }
520
521 if (driQueryOptionb(options, "disable_throttling")) {
522 fprintf(stderr, "disabling flush throttling\n");
523 brw->disable_throttling = true;
524 }
525
526 brw->disable_derivative_optimization =
527 driQueryOptionb(&brw->optionCache, "disable_derivative_optimization");
528
529 brw->precompile = driQueryOptionb(&brw->optionCache, "shader_precompile");
530
531 ctx->Const.ForceGLSLExtensionsWarn =
532 driQueryOptionb(options, "force_glsl_extensions_warn");
533
534 ctx->Const.DisableGLSLLineContinuations =
535 driQueryOptionb(options, "disable_glsl_line_continuations");
536 }
537
538 GLboolean
539 brwCreateContext(gl_api api,
540 const struct gl_config *mesaVis,
541 __DRIcontext *driContextPriv,
542 unsigned major_version,
543 unsigned minor_version,
544 uint32_t flags,
545 bool notify_reset,
546 unsigned *dri_ctx_error,
547 void *sharedContextPrivate)
548 {
549 __DRIscreen *sPriv = driContextPriv->driScreenPriv;
550 struct gl_context *shareCtx = (struct gl_context *) sharedContextPrivate;
551 struct intel_screen *screen = sPriv->driverPrivate;
552 const struct brw_device_info *devinfo = screen->devinfo;
553 struct dd_function_table functions;
554 struct gl_config visual;
555
556 /* Only allow the __DRI_CTX_FLAG_ROBUST_BUFFER_ACCESS flag if the kernel
557 * provides us with context reset notifications.
558 */
559 uint32_t allowed_flags = __DRI_CTX_FLAG_DEBUG
560 | __DRI_CTX_FLAG_FORWARD_COMPATIBLE;
561
562 if (screen->has_context_reset_notification)
563 allowed_flags |= __DRI_CTX_FLAG_ROBUST_BUFFER_ACCESS;
564
565 if (flags & ~allowed_flags) {
566 *dri_ctx_error = __DRI_CTX_ERROR_UNKNOWN_FLAG;
567 return false;
568 }
569
570 struct brw_context *brw = rzalloc(NULL, struct brw_context);
571 if (!brw) {
572 printf("%s: failed to alloc context\n", __FUNCTION__);
573 *dri_ctx_error = __DRI_CTX_ERROR_NO_MEMORY;
574 return false;
575 }
576
577 driContextPriv->driverPrivate = brw;
578 brw->driContext = driContextPriv;
579 brw->intelScreen = screen;
580 brw->bufmgr = screen->bufmgr;
581
582 brw->gen = devinfo->gen;
583 brw->gt = devinfo->gt;
584 brw->is_g4x = devinfo->is_g4x;
585 brw->is_baytrail = devinfo->is_baytrail;
586 brw->is_haswell = devinfo->is_haswell;
587 brw->has_llc = devinfo->has_llc;
588 brw->has_hiz = devinfo->has_hiz_and_separate_stencil && brw->gen < 8;
589 brw->has_separate_stencil = devinfo->has_hiz_and_separate_stencil;
590 brw->has_pln = devinfo->has_pln;
591 brw->has_compr4 = devinfo->has_compr4;
592 brw->has_surface_tile_offset = devinfo->has_surface_tile_offset;
593 brw->has_negative_rhw_bug = devinfo->has_negative_rhw_bug;
594 brw->needs_unlit_centroid_workaround =
595 devinfo->needs_unlit_centroid_workaround;
596
597 brw->must_use_separate_stencil = screen->hw_must_use_separate_stencil;
598 brw->has_swizzling = screen->hw_has_swizzling;
599
600 if (brw->gen >= 7) {
601 gen7_init_vtable_surface_functions(brw);
602 gen7_init_vtable_sampler_functions(brw);
603 brw->vtbl.emit_depth_stencil_hiz = gen7_emit_depth_stencil_hiz;
604 } else {
605 gen4_init_vtable_surface_functions(brw);
606 gen4_init_vtable_sampler_functions(brw);
607 brw->vtbl.emit_depth_stencil_hiz = brw_emit_depth_stencil_hiz;
608 }
609
610 brw_init_driver_functions(brw, &functions);
611
612 if (notify_reset)
613 functions.GetGraphicsResetStatus = brw_get_graphics_reset_status;
614
615 struct gl_context *ctx = &brw->ctx;
616
617 if (mesaVis == NULL) {
618 memset(&visual, 0, sizeof visual);
619 mesaVis = &visual;
620 }
621
622 if (!_mesa_initialize_context(ctx, api, mesaVis, shareCtx, &functions)) {
623 *dri_ctx_error = __DRI_CTX_ERROR_NO_MEMORY;
624 printf("%s: failed to init mesa context\n", __FUNCTION__);
625 intelDestroyContext(driContextPriv);
626 return false;
627 }
628
629 driContextSetFlags(ctx, flags);
630
631 /* Initialize the software rasterizer and helper modules.
632 *
633 * As of GL 3.1 core, the gen4+ driver doesn't need the swrast context for
634 * software fallbacks (which we have to support on legacy GL to do weird
635 * glDrawPixels(), glBitmap(), and other functions).
636 */
637 if (api != API_OPENGL_CORE && api != API_OPENGLES2) {
638 _swrast_CreateContext(ctx);
639 }
640
641 _vbo_CreateContext(ctx);
642 if (ctx->swrast_context) {
643 _tnl_CreateContext(ctx);
644 TNL_CONTEXT(ctx)->Driver.RunPipeline = _tnl_run_pipeline;
645 _swsetup_CreateContext(ctx);
646
647 /* Configure swrast to match hardware characteristics: */
648 _swrast_allow_pixel_fog(ctx, false);
649 _swrast_allow_vertex_fog(ctx, true);
650 }
651
652 _mesa_meta_init(ctx);
653
654 brw_process_driconf_options(brw);
655 brw_process_intel_debug_variable(brw);
656 brw_initialize_context_constants(brw);
657
658 ctx->Const.ResetStrategy = notify_reset
659 ? GL_LOSE_CONTEXT_ON_RESET_ARB : GL_NO_RESET_NOTIFICATION_ARB;
660
661 /* Reinitialize the context point state. It depends on ctx->Const values. */
662 _mesa_init_point(ctx);
663
664 intel_batchbuffer_init(brw);
665
666 brw_init_state(brw);
667
668 intelInitExtensions(ctx);
669
670 intel_fbo_init(brw);
671
672 if (brw->gen >= 6) {
673 /* Create a new hardware context. Using a hardware context means that
674 * our GPU state will be saved/restored on context switch, allowing us
675 * to assume that the GPU is in the same state we left it in.
676 *
677 * This is required for transform feedback buffer offsets, query objects,
678 * and also allows us to reduce how much state we have to emit.
679 */
680 brw->hw_ctx = drm_intel_gem_context_create(brw->bufmgr);
681
682 if (!brw->hw_ctx) {
683 fprintf(stderr, "Gen6+ requires Kernel 3.6 or later.\n");
684 intelDestroyContext(driContextPriv);
685 return false;
686 }
687 }
688
689 brw_init_surface_formats(brw);
690
691 if (brw->is_g4x || brw->gen >= 5) {
692 brw->CMD_VF_STATISTICS = GM45_3DSTATE_VF_STATISTICS;
693 brw->CMD_PIPELINE_SELECT = CMD_PIPELINE_SELECT_GM45;
694 } else {
695 brw->CMD_VF_STATISTICS = GEN4_3DSTATE_VF_STATISTICS;
696 brw->CMD_PIPELINE_SELECT = CMD_PIPELINE_SELECT_965;
697 }
698
699 brw->max_vs_threads = devinfo->max_vs_threads;
700 brw->max_gs_threads = devinfo->max_gs_threads;
701 brw->max_wm_threads = devinfo->max_wm_threads;
702 brw->urb.size = devinfo->urb.size;
703 brw->urb.min_vs_entries = devinfo->urb.min_vs_entries;
704 brw->urb.max_vs_entries = devinfo->urb.max_vs_entries;
705 brw->urb.max_gs_entries = devinfo->urb.max_gs_entries;
706
707 /* Estimate the size of the mappable aperture into the GTT. There's an
708 * ioctl to get the whole GTT size, but not one to get the mappable subset.
709 * It turns out it's basically always 256MB, though some ancient hardware
710 * was smaller.
711 */
712 uint32_t gtt_size = 256 * 1024 * 1024;
713
714 /* We don't want to map two objects such that a memcpy between them would
715 * just fault one mapping in and then the other over and over forever. So
716 * we would need to divide the GTT size by 2. Additionally, some GTT is
717 * taken up by things like the framebuffer and the ringbuffer and such, so
718 * be more conservative.
719 */
720 brw->max_gtt_map_object_size = gtt_size / 4;
721
722 if (brw->gen == 6)
723 brw->urb.gen6_gs_previously_active = false;
724
725 brw->prim_restart.in_progress = false;
726 brw->prim_restart.enable_cut_index = false;
727 brw->gs.enabled = false;
728
729 if (brw->gen < 6) {
730 brw->curbe.last_buf = calloc(1, 4096);
731 brw->curbe.next_buf = calloc(1, 4096);
732 }
733
734 ctx->VertexProgram._MaintainTnlProgram = true;
735 ctx->FragmentProgram._MaintainTexEnvProgram = true;
736
737 brw_draw_init( brw );
738
739 if ((flags & __DRI_CTX_FLAG_DEBUG) != 0) {
740 /* Turn on some extra GL_ARB_debug_output generation. */
741 brw->perf_debug = true;
742 }
743
744 if ((flags & __DRI_CTX_FLAG_ROBUST_BUFFER_ACCESS) != 0)
745 ctx->Const.ContextFlags |= GL_CONTEXT_FLAG_ROBUST_ACCESS_BIT_ARB;
746
747 brw_fs_alloc_reg_sets(brw);
748 brw_vec4_alloc_reg_set(brw);
749
750 if (INTEL_DEBUG & DEBUG_SHADER_TIME)
751 brw_init_shader_time(brw);
752
753 _mesa_compute_version(ctx);
754
755 _mesa_initialize_dispatch_tables(ctx);
756 _mesa_initialize_vbo_vtxfmt(ctx);
757
758 if (ctx->Extensions.AMD_performance_monitor) {
759 brw_init_performance_monitors(brw);
760 }
761
762 return true;
763 }
764
765 void
766 intelDestroyContext(__DRIcontext * driContextPriv)
767 {
768 struct brw_context *brw =
769 (struct brw_context *) driContextPriv->driverPrivate;
770 struct gl_context *ctx = &brw->ctx;
771
772 assert(brw); /* should never be null */
773 if (!brw)
774 return;
775
776 /* Dump a final BMP in case the application doesn't call SwapBuffers */
777 if (INTEL_DEBUG & DEBUG_AUB) {
778 intel_batchbuffer_flush(brw);
779 aub_dump_bmp(&brw->ctx);
780 }
781
782 _mesa_meta_free(&brw->ctx);
783
784 if (INTEL_DEBUG & DEBUG_SHADER_TIME) {
785 /* Force a report. */
786 brw->shader_time.report_time = 0;
787
788 brw_collect_and_report_shader_time(brw);
789 brw_destroy_shader_time(brw);
790 }
791
792 brw_destroy_state(brw);
793 brw_draw_destroy(brw);
794
795 drm_intel_bo_unreference(brw->curbe.curbe_bo);
796 drm_intel_bo_unreference(brw->vs.base.const_bo);
797 drm_intel_bo_unreference(brw->wm.base.const_bo);
798
799 free(brw->curbe.last_buf);
800 free(brw->curbe.next_buf);
801
802 drm_intel_gem_context_destroy(brw->hw_ctx);
803
804 if (ctx->swrast_context) {
805 _swsetup_DestroyContext(&brw->ctx);
806 _tnl_DestroyContext(&brw->ctx);
807 }
808 _vbo_DestroyContext(&brw->ctx);
809
810 if (ctx->swrast_context)
811 _swrast_DestroyContext(&brw->ctx);
812
813 intel_batchbuffer_free(brw);
814
815 drm_intel_bo_unreference(brw->first_post_swapbuffers_batch);
816 brw->first_post_swapbuffers_batch = NULL;
817
818 driDestroyOptionCache(&brw->optionCache);
819
820 /* free the Mesa context */
821 _mesa_free_context_data(&brw->ctx);
822
823 ralloc_free(brw);
824 driContextPriv->driverPrivate = NULL;
825 }
826
827 GLboolean
828 intelUnbindContext(__DRIcontext * driContextPriv)
829 {
830 /* Unset current context and dispath table */
831 _mesa_make_current(NULL, NULL, NULL);
832
833 return true;
834 }
835
836 /**
837 * Fixes up the context for GLES23 with our default-to-sRGB-capable behavior
838 * on window system framebuffers.
839 *
840 * Desktop GL is fairly reasonable in its handling of sRGB: You can ask if
841 * your renderbuffer can do sRGB encode, and you can flip a switch that does
842 * sRGB encode if the renderbuffer can handle it. You can ask specifically
843 * for a visual where you're guaranteed to be capable, but it turns out that
844 * everyone just makes all their ARGB8888 visuals capable and doesn't offer
845 * incapable ones, becuase there's no difference between the two in resources
846 * used. Applications thus get built that accidentally rely on the default
847 * visual choice being sRGB, so we make ours sRGB capable. Everything sounds
848 * great...
849 *
850 * But for GLES2/3, they decided that it was silly to not turn on sRGB encode
851 * for sRGB renderbuffers you made with the GL_EXT_texture_sRGB equivalent.
852 * So they removed the enable knob and made it "if the renderbuffer is sRGB
853 * capable, do sRGB encode". Then, for your window system renderbuffers, you
854 * can ask for sRGB visuals and get sRGB encode, or not ask for sRGB visuals
855 * and get no sRGB encode (assuming that both kinds of visual are available).
856 * Thus our choice to support sRGB by default on our visuals for desktop would
857 * result in broken rendering of GLES apps that aren't expecting sRGB encode.
858 *
859 * Unfortunately, renderbuffer setup happens before a context is created. So
860 * in intel_screen.c we always set up sRGB, and here, if you're a GLES2/3
861 * context (without an sRGB visual, though we don't have sRGB visuals exposed
862 * yet), we go turn that back off before anyone finds out.
863 */
864 static void
865 intel_gles3_srgb_workaround(struct brw_context *brw,
866 struct gl_framebuffer *fb)
867 {
868 struct gl_context *ctx = &brw->ctx;
869
870 if (_mesa_is_desktop_gl(ctx) || !fb->Visual.sRGBCapable)
871 return;
872
873 /* Some day when we support the sRGB capable bit on visuals available for
874 * GLES, we'll need to respect that and not disable things here.
875 */
876 fb->Visual.sRGBCapable = false;
877 for (int i = 0; i < BUFFER_COUNT; i++) {
878 if (fb->Attachment[i].Renderbuffer &&
879 fb->Attachment[i].Renderbuffer->Format == MESA_FORMAT_SARGB8) {
880 fb->Attachment[i].Renderbuffer->Format = MESA_FORMAT_ARGB8888;
881 }
882 }
883 }
884
885 GLboolean
886 intelMakeCurrent(__DRIcontext * driContextPriv,
887 __DRIdrawable * driDrawPriv,
888 __DRIdrawable * driReadPriv)
889 {
890 struct brw_context *brw;
891 GET_CURRENT_CONTEXT(curCtx);
892
893 if (driContextPriv)
894 brw = (struct brw_context *) driContextPriv->driverPrivate;
895 else
896 brw = NULL;
897
898 /* According to the glXMakeCurrent() man page: "Pending commands to
899 * the previous context, if any, are flushed before it is released."
900 * But only flush if we're actually changing contexts.
901 */
902 if (brw_context(curCtx) && brw_context(curCtx) != brw) {
903 _mesa_flush(curCtx);
904 }
905
906 if (driContextPriv) {
907 struct gl_context *ctx = &brw->ctx;
908 struct gl_framebuffer *fb, *readFb;
909
910 if (driDrawPriv == NULL && driReadPriv == NULL) {
911 fb = _mesa_get_incomplete_framebuffer();
912 readFb = _mesa_get_incomplete_framebuffer();
913 } else {
914 fb = driDrawPriv->driverPrivate;
915 readFb = driReadPriv->driverPrivate;
916 driContextPriv->dri2.draw_stamp = driDrawPriv->dri2.stamp - 1;
917 driContextPriv->dri2.read_stamp = driReadPriv->dri2.stamp - 1;
918 }
919
920 /* The sRGB workaround changes the renderbuffer's format. We must change
921 * the format before the renderbuffer's miptree get's allocated, otherwise
922 * the formats of the renderbuffer and its miptree will differ.
923 */
924 intel_gles3_srgb_workaround(brw, fb);
925 intel_gles3_srgb_workaround(brw, readFb);
926
927 intel_prepare_render(brw);
928 _mesa_make_current(ctx, fb, readFb);
929 } else {
930 _mesa_make_current(NULL, NULL, NULL);
931 }
932
933 return true;
934 }
935
936 void
937 intel_resolve_for_dri2_flush(struct brw_context *brw,
938 __DRIdrawable *drawable)
939 {
940 if (brw->gen < 6) {
941 /* MSAA and fast color clear are not supported, so don't waste time
942 * checking whether a resolve is needed.
943 */
944 return;
945 }
946
947 struct gl_framebuffer *fb = drawable->driverPrivate;
948 struct intel_renderbuffer *rb;
949
950 /* Usually, only the back buffer will need to be downsampled. However,
951 * the front buffer will also need it if the user has rendered into it.
952 */
953 static const gl_buffer_index buffers[2] = {
954 BUFFER_BACK_LEFT,
955 BUFFER_FRONT_LEFT,
956 };
957
958 for (int i = 0; i < 2; ++i) {
959 rb = intel_get_renderbuffer(fb, buffers[i]);
960 if (rb == NULL || rb->mt == NULL)
961 continue;
962 if (rb->mt->num_samples <= 1)
963 intel_miptree_resolve_color(brw, rb->mt);
964 else
965 intel_miptree_downsample(brw, rb->mt);
966 }
967 }
968
969 static unsigned
970 intel_bits_per_pixel(const struct intel_renderbuffer *rb)
971 {
972 return _mesa_get_format_bytes(intel_rb_format(rb)) * 8;
973 }
974
975 static void
976 intel_query_dri2_buffers(struct brw_context *brw,
977 __DRIdrawable *drawable,
978 __DRIbuffer **buffers,
979 int *count);
980
981 static void
982 intel_process_dri2_buffer(struct brw_context *brw,
983 __DRIdrawable *drawable,
984 __DRIbuffer *buffer,
985 struct intel_renderbuffer *rb,
986 const char *buffer_name);
987
988 static void
989 intel_update_image_buffers(struct brw_context *brw, __DRIdrawable *drawable);
990
991 static void
992 intel_update_dri2_buffers(struct brw_context *brw, __DRIdrawable *drawable)
993 {
994 struct gl_framebuffer *fb = drawable->driverPrivate;
995 struct intel_renderbuffer *rb;
996 __DRIbuffer *buffers = NULL;
997 int i, count;
998 const char *region_name;
999
1000 /* Set this up front, so that in case our buffers get invalidated
1001 * while we're getting new buffers, we don't clobber the stamp and
1002 * thus ignore the invalidate. */
1003 drawable->lastStamp = drawable->dri2.stamp;
1004
1005 if (unlikely(INTEL_DEBUG & DEBUG_DRI))
1006 fprintf(stderr, "enter %s, drawable %p\n", __func__, drawable);
1007
1008 intel_query_dri2_buffers(brw, drawable, &buffers, &count);
1009
1010 if (buffers == NULL)
1011 return;
1012
1013 for (i = 0; i < count; i++) {
1014 switch (buffers[i].attachment) {
1015 case __DRI_BUFFER_FRONT_LEFT:
1016 rb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT);
1017 region_name = "dri2 front buffer";
1018 break;
1019
1020 case __DRI_BUFFER_FAKE_FRONT_LEFT:
1021 rb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT);
1022 region_name = "dri2 fake front buffer";
1023 break;
1024
1025 case __DRI_BUFFER_BACK_LEFT:
1026 rb = intel_get_renderbuffer(fb, BUFFER_BACK_LEFT);
1027 region_name = "dri2 back buffer";
1028 break;
1029
1030 case __DRI_BUFFER_DEPTH:
1031 case __DRI_BUFFER_HIZ:
1032 case __DRI_BUFFER_DEPTH_STENCIL:
1033 case __DRI_BUFFER_STENCIL:
1034 case __DRI_BUFFER_ACCUM:
1035 default:
1036 fprintf(stderr,
1037 "unhandled buffer attach event, attachment type %d\n",
1038 buffers[i].attachment);
1039 return;
1040 }
1041
1042 intel_process_dri2_buffer(brw, drawable, &buffers[i], rb, region_name);
1043 }
1044
1045 }
1046
1047 void
1048 intel_update_renderbuffers(__DRIcontext *context, __DRIdrawable *drawable)
1049 {
1050 struct brw_context *brw = context->driverPrivate;
1051 __DRIscreen *screen = brw->intelScreen->driScrnPriv;
1052
1053 /* Set this up front, so that in case our buffers get invalidated
1054 * while we're getting new buffers, we don't clobber the stamp and
1055 * thus ignore the invalidate. */
1056 drawable->lastStamp = drawable->dri2.stamp;
1057
1058 if (unlikely(INTEL_DEBUG & DEBUG_DRI))
1059 fprintf(stderr, "enter %s, drawable %p\n", __func__, drawable);
1060
1061 if (screen->image.loader)
1062 intel_update_image_buffers(brw, drawable);
1063 else
1064 intel_update_dri2_buffers(brw, drawable);
1065
1066 driUpdateFramebufferSize(&brw->ctx, drawable);
1067 }
1068
1069 /**
1070 * intel_prepare_render should be called anywhere that curent read/drawbuffer
1071 * state is required.
1072 */
1073 void
1074 intel_prepare_render(struct brw_context *brw)
1075 {
1076 __DRIcontext *driContext = brw->driContext;
1077 __DRIdrawable *drawable;
1078
1079 drawable = driContext->driDrawablePriv;
1080 if (drawable && drawable->dri2.stamp != driContext->dri2.draw_stamp) {
1081 if (drawable->lastStamp != drawable->dri2.stamp)
1082 intel_update_renderbuffers(driContext, drawable);
1083 driContext->dri2.draw_stamp = drawable->dri2.stamp;
1084 }
1085
1086 drawable = driContext->driReadablePriv;
1087 if (drawable && drawable->dri2.stamp != driContext->dri2.read_stamp) {
1088 if (drawable->lastStamp != drawable->dri2.stamp)
1089 intel_update_renderbuffers(driContext, drawable);
1090 driContext->dri2.read_stamp = drawable->dri2.stamp;
1091 }
1092
1093 /* If we're currently rendering to the front buffer, the rendering
1094 * that will happen next will probably dirty the front buffer. So
1095 * mark it as dirty here.
1096 */
1097 if (brw->is_front_buffer_rendering)
1098 brw->front_buffer_dirty = true;
1099
1100 /* Wait for the swapbuffers before the one we just emitted, so we
1101 * don't get too many swaps outstanding for apps that are GPU-heavy
1102 * but not CPU-heavy.
1103 *
1104 * We're using intelDRI2Flush (called from the loader before
1105 * swapbuffer) and glFlush (for front buffer rendering) as the
1106 * indicator that a frame is done and then throttle when we get
1107 * here as we prepare to render the next frame. At this point for
1108 * round trips for swap/copy and getting new buffers are done and
1109 * we'll spend less time waiting on the GPU.
1110 *
1111 * Unfortunately, we don't have a handle to the batch containing
1112 * the swap, and getting our hands on that doesn't seem worth it,
1113 * so we just us the first batch we emitted after the last swap.
1114 */
1115 if (brw->need_throttle && brw->first_post_swapbuffers_batch) {
1116 if (!brw->disable_throttling)
1117 drm_intel_bo_wait_rendering(brw->first_post_swapbuffers_batch);
1118 drm_intel_bo_unreference(brw->first_post_swapbuffers_batch);
1119 brw->first_post_swapbuffers_batch = NULL;
1120 brw->need_throttle = false;
1121 }
1122 }
1123
1124 /**
1125 * \brief Query DRI2 to obtain a DRIdrawable's buffers.
1126 *
1127 * To determine which DRI buffers to request, examine the renderbuffers
1128 * attached to the drawable's framebuffer. Then request the buffers with
1129 * DRI2GetBuffers() or DRI2GetBuffersWithFormat().
1130 *
1131 * This is called from intel_update_renderbuffers().
1132 *
1133 * \param drawable Drawable whose buffers are queried.
1134 * \param buffers [out] List of buffers returned by DRI2 query.
1135 * \param buffer_count [out] Number of buffers returned.
1136 *
1137 * \see intel_update_renderbuffers()
1138 * \see DRI2GetBuffers()
1139 * \see DRI2GetBuffersWithFormat()
1140 */
1141 static void
1142 intel_query_dri2_buffers(struct brw_context *brw,
1143 __DRIdrawable *drawable,
1144 __DRIbuffer **buffers,
1145 int *buffer_count)
1146 {
1147 __DRIscreen *screen = brw->intelScreen->driScrnPriv;
1148 struct gl_framebuffer *fb = drawable->driverPrivate;
1149 int i = 0;
1150 unsigned attachments[8];
1151
1152 struct intel_renderbuffer *front_rb;
1153 struct intel_renderbuffer *back_rb;
1154
1155 front_rb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT);
1156 back_rb = intel_get_renderbuffer(fb, BUFFER_BACK_LEFT);
1157
1158 memset(attachments, 0, sizeof(attachments));
1159 if ((brw->is_front_buffer_rendering ||
1160 brw->is_front_buffer_reading ||
1161 !back_rb) && front_rb) {
1162 /* If a fake front buffer is in use, then querying for
1163 * __DRI_BUFFER_FRONT_LEFT will cause the server to copy the image from
1164 * the real front buffer to the fake front buffer. So before doing the
1165 * query, we need to make sure all the pending drawing has landed in the
1166 * real front buffer.
1167 */
1168 intel_batchbuffer_flush(brw);
1169 intel_flush_front(&brw->ctx);
1170
1171 attachments[i++] = __DRI_BUFFER_FRONT_LEFT;
1172 attachments[i++] = intel_bits_per_pixel(front_rb);
1173 } else if (front_rb && brw->front_buffer_dirty) {
1174 /* We have pending front buffer rendering, but we aren't querying for a
1175 * front buffer. If the front buffer we have is a fake front buffer,
1176 * the X server is going to throw it away when it processes the query.
1177 * So before doing the query, make sure all the pending drawing has
1178 * landed in the real front buffer.
1179 */
1180 intel_batchbuffer_flush(brw);
1181 intel_flush_front(&brw->ctx);
1182 }
1183
1184 if (back_rb) {
1185 attachments[i++] = __DRI_BUFFER_BACK_LEFT;
1186 attachments[i++] = intel_bits_per_pixel(back_rb);
1187 }
1188
1189 assert(i <= ARRAY_SIZE(attachments));
1190
1191 *buffers = screen->dri2.loader->getBuffersWithFormat(drawable,
1192 &drawable->w,
1193 &drawable->h,
1194 attachments, i / 2,
1195 buffer_count,
1196 drawable->loaderPrivate);
1197 }
1198
1199 /**
1200 * \brief Assign a DRI buffer's DRM region to a renderbuffer.
1201 *
1202 * This is called from intel_update_renderbuffers().
1203 *
1204 * \par Note:
1205 * DRI buffers whose attachment point is DRI2BufferStencil or
1206 * DRI2BufferDepthStencil are handled as special cases.
1207 *
1208 * \param buffer_name is a human readable name, such as "dri2 front buffer",
1209 * that is passed to intel_region_alloc_for_handle().
1210 *
1211 * \see intel_update_renderbuffers()
1212 * \see intel_region_alloc_for_handle()
1213 */
1214 static void
1215 intel_process_dri2_buffer(struct brw_context *brw,
1216 __DRIdrawable *drawable,
1217 __DRIbuffer *buffer,
1218 struct intel_renderbuffer *rb,
1219 const char *buffer_name)
1220 {
1221 struct intel_region *region = NULL;
1222
1223 if (!rb)
1224 return;
1225
1226 unsigned num_samples = rb->Base.Base.NumSamples;
1227
1228 /* We try to avoid closing and reopening the same BO name, because the first
1229 * use of a mapping of the buffer involves a bunch of page faulting which is
1230 * moderately expensive.
1231 */
1232 if (num_samples == 0) {
1233 if (rb->mt &&
1234 rb->mt->region &&
1235 rb->mt->region->name == buffer->name)
1236 return;
1237 } else {
1238 if (rb->mt &&
1239 rb->mt->singlesample_mt &&
1240 rb->mt->singlesample_mt->region &&
1241 rb->mt->singlesample_mt->region->name == buffer->name)
1242 return;
1243 }
1244
1245 if (unlikely(INTEL_DEBUG & DEBUG_DRI)) {
1246 fprintf(stderr,
1247 "attaching buffer %d, at %d, cpp %d, pitch %d\n",
1248 buffer->name, buffer->attachment,
1249 buffer->cpp, buffer->pitch);
1250 }
1251
1252 intel_miptree_release(&rb->mt);
1253 region = intel_region_alloc_for_handle(brw->intelScreen,
1254 buffer->cpp,
1255 drawable->w,
1256 drawable->h,
1257 buffer->pitch,
1258 buffer->name,
1259 buffer_name);
1260 if (!region)
1261 return;
1262
1263 rb->mt = intel_miptree_create_for_dri2_buffer(brw,
1264 buffer->attachment,
1265 intel_rb_format(rb),
1266 num_samples,
1267 region);
1268 intel_region_release(&region);
1269 }
1270
1271 /**
1272 * \brief Query DRI image loader to obtain a DRIdrawable's buffers.
1273 *
1274 * To determine which DRI buffers to request, examine the renderbuffers
1275 * attached to the drawable's framebuffer. Then request the buffers from
1276 * the image loader
1277 *
1278 * This is called from intel_update_renderbuffers().
1279 *
1280 * \param drawable Drawable whose buffers are queried.
1281 * \param buffers [out] List of buffers returned by DRI2 query.
1282 * \param buffer_count [out] Number of buffers returned.
1283 *
1284 * \see intel_update_renderbuffers()
1285 */
1286
1287 static void
1288 intel_update_image_buffer(struct brw_context *intel,
1289 __DRIdrawable *drawable,
1290 struct intel_renderbuffer *rb,
1291 __DRIimage *buffer,
1292 enum __DRIimageBufferMask buffer_type)
1293 {
1294 struct intel_region *region = buffer->region;
1295
1296 if (!rb || !region)
1297 return;
1298
1299 unsigned num_samples = rb->Base.Base.NumSamples;
1300
1301 /* Check and see if we're already bound to the right
1302 * buffer object
1303 */
1304 if (num_samples == 0) {
1305 if (rb->mt &&
1306 rb->mt->region &&
1307 rb->mt->region->bo == region->bo)
1308 return;
1309 } else {
1310 if (rb->mt &&
1311 rb->mt->singlesample_mt &&
1312 rb->mt->singlesample_mt->region &&
1313 rb->mt->singlesample_mt->region->bo == region->bo)
1314 return;
1315 }
1316
1317 intel_miptree_release(&rb->mt);
1318 rb->mt = intel_miptree_create_for_image_buffer(intel,
1319 buffer_type,
1320 intel_rb_format(rb),
1321 num_samples,
1322 region);
1323 }
1324
1325 static void
1326 intel_update_image_buffers(struct brw_context *brw, __DRIdrawable *drawable)
1327 {
1328 struct gl_framebuffer *fb = drawable->driverPrivate;
1329 __DRIscreen *screen = brw->intelScreen->driScrnPriv;
1330 struct intel_renderbuffer *front_rb;
1331 struct intel_renderbuffer *back_rb;
1332 struct __DRIimageList images;
1333 unsigned int format;
1334 uint32_t buffer_mask = 0;
1335
1336 front_rb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT);
1337 back_rb = intel_get_renderbuffer(fb, BUFFER_BACK_LEFT);
1338
1339 if (back_rb)
1340 format = intel_rb_format(back_rb);
1341 else if (front_rb)
1342 format = intel_rb_format(front_rb);
1343 else
1344 return;
1345
1346 if ((brw->is_front_buffer_rendering || brw->is_front_buffer_reading || !back_rb) && front_rb)
1347 buffer_mask |= __DRI_IMAGE_BUFFER_FRONT;
1348
1349 if (back_rb)
1350 buffer_mask |= __DRI_IMAGE_BUFFER_BACK;
1351
1352 (*screen->image.loader->getBuffers) (drawable,
1353 driGLFormatToImageFormat(format),
1354 &drawable->dri2.stamp,
1355 drawable->loaderPrivate,
1356 buffer_mask,
1357 &images);
1358
1359 if (images.image_mask & __DRI_IMAGE_BUFFER_FRONT) {
1360 drawable->w = images.front->width;
1361 drawable->h = images.front->height;
1362 intel_update_image_buffer(brw,
1363 drawable,
1364 front_rb,
1365 images.front,
1366 __DRI_IMAGE_BUFFER_FRONT);
1367 }
1368 if (images.image_mask & __DRI_IMAGE_BUFFER_BACK) {
1369 drawable->w = images.back->width;
1370 drawable->h = images.back->height;
1371 intel_update_image_buffer(brw,
1372 drawable,
1373 back_rb,
1374 images.back,
1375 __DRI_IMAGE_BUFFER_BACK);
1376 }
1377 }