i965: Split gen6 depth hiz state out from brw
[mesa.git] / src / mesa / drivers / dri / i965 / brw_context.c
1 /*
2 Copyright 2003 VMware, Inc.
3 Copyright (C) Intel Corp. 2006. All Rights Reserved.
4 Intel funded Tungsten Graphics to
5 develop this 3D driver.
6
7 Permission is hereby granted, free of charge, to any person obtaining
8 a copy of this software and associated documentation files (the
9 "Software"), to deal in the Software without restriction, including
10 without limitation the rights to use, copy, modify, merge, publish,
11 distribute, sublicense, and/or sell copies of the Software, and to
12 permit persons to whom the Software is furnished to do so, subject to
13 the following conditions:
14
15 The above copyright notice and this permission notice (including the
16 next paragraph) shall be included in all copies or substantial
17 portions of the Software.
18
19 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
20 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
22 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
23 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
24 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
25 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26
27 **********************************************************************/
28 /*
29 * Authors:
30 * Keith Whitwell <keithw@vmware.com>
31 */
32
33
34 #include "main/api_exec.h"
35 #include "main/context.h"
36 #include "main/fbobject.h"
37 #include "main/extensions.h"
38 #include "main/imports.h"
39 #include "main/macros.h"
40 #include "main/points.h"
41 #include "main/version.h"
42 #include "main/vtxfmt.h"
43 #include "main/texobj.h"
44
45 #include "vbo/vbo_context.h"
46
47 #include "drivers/common/driverfuncs.h"
48 #include "drivers/common/meta.h"
49 #include "utils.h"
50
51 #include "brw_context.h"
52 #include "brw_defines.h"
53 #include "brw_draw.h"
54 #include "brw_state.h"
55
56 #include "intel_batchbuffer.h"
57 #include "intel_buffer_objects.h"
58 #include "intel_buffers.h"
59 #include "intel_fbo.h"
60 #include "intel_mipmap_tree.h"
61 #include "intel_pixel.h"
62 #include "intel_image.h"
63 #include "intel_tex.h"
64 #include "intel_tex_obj.h"
65
66 #include "swrast_setup/swrast_setup.h"
67 #include "tnl/tnl.h"
68 #include "tnl/t_pipeline.h"
69 #include "util/ralloc.h"
70
71 /***************************************
72 * Mesa's Driver Functions
73 ***************************************/
74
75 static size_t
76 brw_query_samples_for_format(struct gl_context *ctx, GLenum target,
77 GLenum internalFormat, int samples[16])
78 {
79 struct brw_context *brw = brw_context(ctx);
80
81 (void) target;
82
83 switch (brw->gen) {
84 case 8:
85 samples[0] = 8;
86 samples[1] = 4;
87 samples[2] = 2;
88 return 3;
89
90 case 7:
91 samples[0] = 8;
92 samples[1] = 4;
93 return 2;
94
95 case 6:
96 samples[0] = 4;
97 return 1;
98
99 default:
100 samples[0] = 1;
101 return 1;
102 }
103 }
104
105 const char *const brw_vendor_string = "Intel Open Source Technology Center";
106
107 const char *
108 brw_get_renderer_string(unsigned deviceID)
109 {
110 const char *chipset;
111 static char buffer[128];
112
113 switch (deviceID) {
114 #undef CHIPSET
115 #define CHIPSET(id, symbol, str) case id: chipset = str; break;
116 #include "pci_ids/i965_pci_ids.h"
117 default:
118 chipset = "Unknown Intel Chipset";
119 break;
120 }
121
122 (void) driGetRendererString(buffer, chipset, 0);
123 return buffer;
124 }
125
126 static const GLubyte *
127 intel_get_string(struct gl_context * ctx, GLenum name)
128 {
129 const struct brw_context *const brw = brw_context(ctx);
130
131 switch (name) {
132 case GL_VENDOR:
133 return (GLubyte *) brw_vendor_string;
134
135 case GL_RENDERER:
136 return
137 (GLubyte *) brw_get_renderer_string(brw->intelScreen->deviceID);
138
139 default:
140 return NULL;
141 }
142 }
143
144 static void
145 intel_viewport(struct gl_context *ctx)
146 {
147 struct brw_context *brw = brw_context(ctx);
148 __DRIcontext *driContext = brw->driContext;
149
150 if (_mesa_is_winsys_fbo(ctx->DrawBuffer)) {
151 dri2InvalidateDrawable(driContext->driDrawablePriv);
152 dri2InvalidateDrawable(driContext->driReadablePriv);
153 }
154 }
155
156 static void
157 intel_update_state(struct gl_context * ctx, GLuint new_state)
158 {
159 struct brw_context *brw = brw_context(ctx);
160 struct intel_texture_object *tex_obj;
161 struct intel_renderbuffer *depth_irb;
162
163 if (ctx->swrast_context)
164 _swrast_InvalidateState(ctx, new_state);
165 _vbo_InvalidateState(ctx, new_state);
166
167 brw->NewGLState |= new_state;
168
169 _mesa_unlock_context_textures(ctx);
170
171 /* Resolve the depth buffer's HiZ buffer. */
172 depth_irb = intel_get_renderbuffer(ctx->DrawBuffer, BUFFER_DEPTH);
173 if (depth_irb)
174 intel_renderbuffer_resolve_hiz(brw, depth_irb);
175
176 /* Resolve depth buffer and render cache of each enabled texture. */
177 int maxEnabledUnit = ctx->Texture._MaxEnabledTexImageUnit;
178 for (int i = 0; i <= maxEnabledUnit; i++) {
179 if (!ctx->Texture.Unit[i]._Current)
180 continue;
181 tex_obj = intel_texture_object(ctx->Texture.Unit[i]._Current);
182 if (!tex_obj || !tex_obj->mt)
183 continue;
184 intel_miptree_all_slices_resolve_depth(brw, tex_obj->mt);
185 intel_miptree_resolve_color(brw, tex_obj->mt);
186 brw_render_cache_set_check_flush(brw, tex_obj->mt->bo);
187 }
188
189 _mesa_lock_context_textures(ctx);
190 }
191
192 #define flushFront(screen) ((screen)->image.loader ? (screen)->image.loader->flushFrontBuffer : (screen)->dri2.loader->flushFrontBuffer)
193
194 static void
195 intel_flush_front(struct gl_context *ctx)
196 {
197 struct brw_context *brw = brw_context(ctx);
198 __DRIcontext *driContext = brw->driContext;
199 __DRIdrawable *driDrawable = driContext->driDrawablePriv;
200 __DRIscreen *const screen = brw->intelScreen->driScrnPriv;
201
202 if (brw->front_buffer_dirty && _mesa_is_winsys_fbo(ctx->DrawBuffer)) {
203 if (flushFront(screen) && driDrawable &&
204 driDrawable->loaderPrivate) {
205
206 /* Resolve before flushing FAKE_FRONT_LEFT to FRONT_LEFT.
207 *
208 * This potentially resolves both front and back buffer. It
209 * is unnecessary to resolve the back, but harms nothing except
210 * performance. And no one cares about front-buffer render
211 * performance.
212 */
213 intel_resolve_for_dri2_flush(brw, driDrawable);
214 intel_batchbuffer_flush(brw);
215
216 flushFront(screen)(driDrawable, driDrawable->loaderPrivate);
217
218 /* We set the dirty bit in intel_prepare_render() if we're
219 * front buffer rendering once we get there.
220 */
221 brw->front_buffer_dirty = false;
222 }
223 }
224 }
225
226 static void
227 intel_glFlush(struct gl_context *ctx)
228 {
229 struct brw_context *brw = brw_context(ctx);
230
231 intel_batchbuffer_flush(brw);
232 intel_flush_front(ctx);
233 if (brw_is_front_buffer_drawing(ctx->DrawBuffer))
234 brw->need_throttle = true;
235 }
236
237 static void
238 intel_finish(struct gl_context * ctx)
239 {
240 struct brw_context *brw = brw_context(ctx);
241
242 intel_glFlush(ctx);
243
244 if (brw->batch.last_bo)
245 drm_intel_bo_wait_rendering(brw->batch.last_bo);
246 }
247
248 static void
249 brw_init_driver_functions(struct brw_context *brw,
250 struct dd_function_table *functions)
251 {
252 _mesa_init_driver_functions(functions);
253
254 /* GLX uses DRI2 invalidate events to handle window resizing.
255 * Unfortunately, EGL does not - libEGL is written in XCB (not Xlib),
256 * which doesn't provide a mechanism for snooping the event queues.
257 *
258 * So EGL still relies on viewport hacks to handle window resizing.
259 * This should go away with DRI3000.
260 */
261 if (!brw->driContext->driScreenPriv->dri2.useInvalidate)
262 functions->Viewport = intel_viewport;
263
264 functions->Flush = intel_glFlush;
265 functions->Finish = intel_finish;
266 functions->GetString = intel_get_string;
267 functions->UpdateState = intel_update_state;
268
269 intelInitTextureFuncs(functions);
270 intelInitTextureImageFuncs(functions);
271 intelInitTextureSubImageFuncs(functions);
272 intelInitTextureCopyImageFuncs(functions);
273 intelInitCopyImageFuncs(functions);
274 intelInitClearFuncs(functions);
275 intelInitBufferFuncs(functions);
276 intelInitPixelFuncs(functions);
277 intelInitBufferObjectFuncs(functions);
278 intel_init_syncobj_functions(functions);
279 brw_init_object_purgeable_functions(functions);
280
281 brwInitFragProgFuncs( functions );
282 brw_init_common_queryobj_functions(functions);
283 if (brw->gen >= 6)
284 gen6_init_queryobj_functions(functions);
285 else
286 gen4_init_queryobj_functions(functions);
287
288 functions->QuerySamplesForFormat = brw_query_samples_for_format;
289
290 functions->NewTransformFeedback = brw_new_transform_feedback;
291 functions->DeleteTransformFeedback = brw_delete_transform_feedback;
292 functions->GetTransformFeedbackVertexCount =
293 brw_get_transform_feedback_vertex_count;
294 if (brw->gen >= 7) {
295 functions->BeginTransformFeedback = gen7_begin_transform_feedback;
296 functions->EndTransformFeedback = gen7_end_transform_feedback;
297 functions->PauseTransformFeedback = gen7_pause_transform_feedback;
298 functions->ResumeTransformFeedback = gen7_resume_transform_feedback;
299 } else {
300 functions->BeginTransformFeedback = brw_begin_transform_feedback;
301 functions->EndTransformFeedback = brw_end_transform_feedback;
302 }
303
304 if (brw->gen >= 6)
305 functions->GetSamplePosition = gen6_get_sample_position;
306 }
307
308 static void
309 brw_initialize_context_constants(struct brw_context *brw)
310 {
311 struct gl_context *ctx = &brw->ctx;
312
313 unsigned max_samplers =
314 brw->gen >= 8 || brw->is_haswell ? BRW_MAX_TEX_UNIT : 16;
315
316 ctx->Const.QueryCounterBits.Timestamp = 36;
317
318 ctx->Const.StripTextureBorder = true;
319
320 ctx->Const.MaxDualSourceDrawBuffers = 1;
321 ctx->Const.MaxDrawBuffers = BRW_MAX_DRAW_BUFFERS;
322 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxTextureImageUnits = max_samplers;
323 ctx->Const.MaxTextureCoordUnits = 8; /* Mesa limit */
324 ctx->Const.MaxTextureUnits =
325 MIN2(ctx->Const.MaxTextureCoordUnits,
326 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxTextureImageUnits);
327 ctx->Const.Program[MESA_SHADER_VERTEX].MaxTextureImageUnits = max_samplers;
328 if (brw->gen >= 7)
329 ctx->Const.Program[MESA_SHADER_GEOMETRY].MaxTextureImageUnits = max_samplers;
330 else
331 ctx->Const.Program[MESA_SHADER_GEOMETRY].MaxTextureImageUnits = 0;
332 if (_mesa_extension_override_enables.ARB_compute_shader) {
333 ctx->Const.Program[MESA_SHADER_COMPUTE].MaxTextureImageUnits = BRW_MAX_TEX_UNIT;
334 ctx->Const.MaxUniformBufferBindings += 12;
335 } else {
336 ctx->Const.Program[MESA_SHADER_COMPUTE].MaxTextureImageUnits = 0;
337 }
338 ctx->Const.MaxCombinedTextureImageUnits =
339 ctx->Const.Program[MESA_SHADER_VERTEX].MaxTextureImageUnits +
340 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxTextureImageUnits +
341 ctx->Const.Program[MESA_SHADER_GEOMETRY].MaxTextureImageUnits +
342 ctx->Const.Program[MESA_SHADER_COMPUTE].MaxTextureImageUnits;
343
344 ctx->Const.MaxTextureLevels = 14; /* 8192 */
345 if (ctx->Const.MaxTextureLevels > MAX_TEXTURE_LEVELS)
346 ctx->Const.MaxTextureLevels = MAX_TEXTURE_LEVELS;
347 ctx->Const.Max3DTextureLevels = 12; /* 2048 */
348 ctx->Const.MaxCubeTextureLevels = 14; /* 8192 */
349 ctx->Const.MaxTextureMbytes = 1536;
350
351 if (brw->gen >= 7)
352 ctx->Const.MaxArrayTextureLayers = 2048;
353 else
354 ctx->Const.MaxArrayTextureLayers = 512;
355
356 ctx->Const.MaxTextureRectSize = 1 << 12;
357
358 ctx->Const.MaxTextureMaxAnisotropy = 16.0;
359
360 ctx->Const.MaxRenderbufferSize = 8192;
361
362 /* Hardware only supports a limited number of transform feedback buffers.
363 * So we need to override the Mesa default (which is based only on software
364 * limits).
365 */
366 ctx->Const.MaxTransformFeedbackBuffers = BRW_MAX_SOL_BUFFERS;
367
368 /* On Gen6, in the worst case, we use up one binding table entry per
369 * transform feedback component (see comments above the definition of
370 * BRW_MAX_SOL_BINDINGS, in brw_context.h), so we need to advertise a value
371 * for MAX_TRANSFORM_FEEDBACK_INTERLEAVED_COMPONENTS equal to
372 * BRW_MAX_SOL_BINDINGS.
373 *
374 * In "separate components" mode, we need to divide this value by
375 * BRW_MAX_SOL_BUFFERS, so that the total number of binding table entries
376 * used up by all buffers will not exceed BRW_MAX_SOL_BINDINGS.
377 */
378 ctx->Const.MaxTransformFeedbackInterleavedComponents = BRW_MAX_SOL_BINDINGS;
379 ctx->Const.MaxTransformFeedbackSeparateComponents =
380 BRW_MAX_SOL_BINDINGS / BRW_MAX_SOL_BUFFERS;
381
382 ctx->Const.AlwaysUseGetTransformFeedbackVertexCount = true;
383
384 int max_samples;
385 const int *msaa_modes = intel_supported_msaa_modes(brw->intelScreen);
386 const int clamp_max_samples =
387 driQueryOptioni(&brw->optionCache, "clamp_max_samples");
388
389 if (clamp_max_samples < 0) {
390 max_samples = msaa_modes[0];
391 } else {
392 /* Select the largest supported MSAA mode that does not exceed
393 * clamp_max_samples.
394 */
395 max_samples = 0;
396 for (int i = 0; msaa_modes[i] != 0; ++i) {
397 if (msaa_modes[i] <= clamp_max_samples) {
398 max_samples = msaa_modes[i];
399 break;
400 }
401 }
402 }
403
404 ctx->Const.MaxSamples = max_samples;
405 ctx->Const.MaxColorTextureSamples = max_samples;
406 ctx->Const.MaxDepthTextureSamples = max_samples;
407 ctx->Const.MaxIntegerSamples = max_samples;
408
409 if (brw->gen >= 7)
410 ctx->Const.MaxProgramTextureGatherComponents = 4;
411 else if (brw->gen == 6)
412 ctx->Const.MaxProgramTextureGatherComponents = 1;
413
414 ctx->Const.MinLineWidth = 1.0;
415 ctx->Const.MinLineWidthAA = 1.0;
416 ctx->Const.MaxLineWidth = 5.0;
417 ctx->Const.MaxLineWidthAA = 5.0;
418 ctx->Const.LineWidthGranularity = 0.5;
419
420 ctx->Const.MinPointSize = 1.0;
421 ctx->Const.MinPointSizeAA = 1.0;
422 ctx->Const.MaxPointSize = 255.0;
423 ctx->Const.MaxPointSizeAA = 255.0;
424 ctx->Const.PointSizeGranularity = 1.0;
425
426 if (brw->gen >= 5 || brw->is_g4x)
427 ctx->Const.MaxClipPlanes = 8;
428
429 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeInstructions = 16 * 1024;
430 ctx->Const.Program[MESA_SHADER_VERTEX].MaxAluInstructions = 0;
431 ctx->Const.Program[MESA_SHADER_VERTEX].MaxTexInstructions = 0;
432 ctx->Const.Program[MESA_SHADER_VERTEX].MaxTexIndirections = 0;
433 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeAluInstructions = 0;
434 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeTexInstructions = 0;
435 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeTexIndirections = 0;
436 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeAttribs = 16;
437 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeTemps = 256;
438 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeAddressRegs = 1;
439 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeParameters = 1024;
440 ctx->Const.Program[MESA_SHADER_VERTEX].MaxEnvParams =
441 MIN2(ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeParameters,
442 ctx->Const.Program[MESA_SHADER_VERTEX].MaxEnvParams);
443
444 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeInstructions = 1024;
445 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeAluInstructions = 1024;
446 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeTexInstructions = 1024;
447 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeTexIndirections = 1024;
448 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeAttribs = 12;
449 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeTemps = 256;
450 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeAddressRegs = 0;
451 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeParameters = 1024;
452 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxEnvParams =
453 MIN2(ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeParameters,
454 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxEnvParams);
455
456 /* Fragment shaders use real, 32-bit twos-complement integers for all
457 * integer types.
458 */
459 ctx->Const.Program[MESA_SHADER_FRAGMENT].LowInt.RangeMin = 31;
460 ctx->Const.Program[MESA_SHADER_FRAGMENT].LowInt.RangeMax = 30;
461 ctx->Const.Program[MESA_SHADER_FRAGMENT].LowInt.Precision = 0;
462 ctx->Const.Program[MESA_SHADER_FRAGMENT].HighInt = ctx->Const.Program[MESA_SHADER_FRAGMENT].LowInt;
463 ctx->Const.Program[MESA_SHADER_FRAGMENT].MediumInt = ctx->Const.Program[MESA_SHADER_FRAGMENT].LowInt;
464
465 if (brw->gen >= 7) {
466 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxAtomicCounters = MAX_ATOMIC_COUNTERS;
467 ctx->Const.Program[MESA_SHADER_VERTEX].MaxAtomicCounters = MAX_ATOMIC_COUNTERS;
468 ctx->Const.Program[MESA_SHADER_GEOMETRY].MaxAtomicCounters = MAX_ATOMIC_COUNTERS;
469 ctx->Const.Program[MESA_SHADER_COMPUTE].MaxAtomicCounters = MAX_ATOMIC_COUNTERS;
470 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxAtomicBuffers = BRW_MAX_ABO;
471 ctx->Const.Program[MESA_SHADER_VERTEX].MaxAtomicBuffers = BRW_MAX_ABO;
472 ctx->Const.Program[MESA_SHADER_GEOMETRY].MaxAtomicBuffers = BRW_MAX_ABO;
473 ctx->Const.Program[MESA_SHADER_COMPUTE].MaxAtomicBuffers = BRW_MAX_ABO;
474 ctx->Const.MaxCombinedAtomicBuffers = 3 * BRW_MAX_ABO;
475 }
476
477 /* Gen6 converts quads to polygon in beginning of 3D pipeline,
478 * but we're not sure how it's actually done for vertex order,
479 * that affect provoking vertex decision. Always use last vertex
480 * convention for quad primitive which works as expected for now.
481 */
482 if (brw->gen >= 6)
483 ctx->Const.QuadsFollowProvokingVertexConvention = false;
484
485 ctx->Const.NativeIntegers = true;
486 ctx->Const.UniformBooleanTrue = 1;
487
488 /* From the gen4 PRM, volume 4 page 127:
489 *
490 * "For SURFTYPE_BUFFER non-rendertarget surfaces, this field specifies
491 * the base address of the first element of the surface, computed in
492 * software by adding the surface base address to the byte offset of
493 * the element in the buffer."
494 *
495 * However, unaligned accesses are slower, so enforce buffer alignment.
496 */
497 ctx->Const.UniformBufferOffsetAlignment = 16;
498 ctx->Const.TextureBufferOffsetAlignment = 16;
499
500 if (brw->gen >= 6) {
501 ctx->Const.MaxVarying = 32;
502 ctx->Const.Program[MESA_SHADER_VERTEX].MaxOutputComponents = 128;
503 ctx->Const.Program[MESA_SHADER_GEOMETRY].MaxInputComponents = 64;
504 ctx->Const.Program[MESA_SHADER_GEOMETRY].MaxOutputComponents = 128;
505 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxInputComponents = 128;
506 }
507
508 /* We want the GLSL compiler to emit code that uses condition codes */
509 for (int i = 0; i < MESA_SHADER_STAGES; i++) {
510 ctx->Const.ShaderCompilerOptions[i].MaxIfDepth = brw->gen < 6 ? 16 : UINT_MAX;
511 ctx->Const.ShaderCompilerOptions[i].EmitCondCodes = true;
512 ctx->Const.ShaderCompilerOptions[i].EmitNoNoise = true;
513 ctx->Const.ShaderCompilerOptions[i].EmitNoMainReturn = true;
514 ctx->Const.ShaderCompilerOptions[i].EmitNoIndirectInput = true;
515 ctx->Const.ShaderCompilerOptions[i].EmitNoIndirectOutput =
516 (i == MESA_SHADER_FRAGMENT);
517 ctx->Const.ShaderCompilerOptions[i].EmitNoIndirectTemp =
518 (i == MESA_SHADER_FRAGMENT);
519 ctx->Const.ShaderCompilerOptions[i].EmitNoIndirectUniform = false;
520 ctx->Const.ShaderCompilerOptions[i].LowerClipDistance = true;
521 }
522
523 ctx->Const.ShaderCompilerOptions[MESA_SHADER_VERTEX].OptimizeForAOS = true;
524 ctx->Const.ShaderCompilerOptions[MESA_SHADER_GEOMETRY].OptimizeForAOS = true;
525
526 /* ARB_viewport_array */
527 if (brw->gen >= 7 && ctx->API == API_OPENGL_CORE) {
528 ctx->Const.MaxViewports = GEN7_NUM_VIEWPORTS;
529 ctx->Const.ViewportSubpixelBits = 0;
530
531 /* Cast to float before negating becuase MaxViewportWidth is unsigned.
532 */
533 ctx->Const.ViewportBounds.Min = -(float)ctx->Const.MaxViewportWidth;
534 ctx->Const.ViewportBounds.Max = ctx->Const.MaxViewportWidth;
535 }
536
537 /* ARB_gpu_shader5 */
538 if (brw->gen >= 7)
539 ctx->Const.MaxVertexStreams = MIN2(4, MAX_VERTEX_STREAMS);
540 }
541
542 /**
543 * Process driconf (drirc) options, setting appropriate context flags.
544 *
545 * intelInitExtensions still pokes at optionCache directly, in order to
546 * avoid advertising various extensions. No flags are set, so it makes
547 * sense to continue doing that there.
548 */
549 static void
550 brw_process_driconf_options(struct brw_context *brw)
551 {
552 struct gl_context *ctx = &brw->ctx;
553
554 driOptionCache *options = &brw->optionCache;
555 driParseConfigFiles(options, &brw->intelScreen->optionCache,
556 brw->driContext->driScreenPriv->myNum, "i965");
557
558 int bo_reuse_mode = driQueryOptioni(options, "bo_reuse");
559 switch (bo_reuse_mode) {
560 case DRI_CONF_BO_REUSE_DISABLED:
561 break;
562 case DRI_CONF_BO_REUSE_ALL:
563 intel_bufmgr_gem_enable_reuse(brw->bufmgr);
564 break;
565 }
566
567 if (!driQueryOptionb(options, "hiz")) {
568 brw->has_hiz = false;
569 /* On gen6, you can only do separate stencil with HIZ. */
570 if (brw->gen == 6)
571 brw->has_separate_stencil = false;
572 }
573
574 if (driQueryOptionb(options, "always_flush_batch")) {
575 fprintf(stderr, "flushing batchbuffer before/after each draw call\n");
576 brw->always_flush_batch = true;
577 }
578
579 if (driQueryOptionb(options, "always_flush_cache")) {
580 fprintf(stderr, "flushing GPU caches before/after each draw call\n");
581 brw->always_flush_cache = true;
582 }
583
584 if (driQueryOptionb(options, "disable_throttling")) {
585 fprintf(stderr, "disabling flush throttling\n");
586 brw->disable_throttling = true;
587 }
588
589 brw->disable_derivative_optimization =
590 driQueryOptionb(&brw->optionCache, "disable_derivative_optimization");
591
592 brw->precompile = driQueryOptionb(&brw->optionCache, "shader_precompile");
593
594 ctx->Const.ForceGLSLExtensionsWarn =
595 driQueryOptionb(options, "force_glsl_extensions_warn");
596
597 ctx->Const.DisableGLSLLineContinuations =
598 driQueryOptionb(options, "disable_glsl_line_continuations");
599
600 ctx->Const.AllowGLSLExtensionDirectiveMidShader =
601 driQueryOptionb(options, "allow_glsl_extension_directive_midshader");
602 }
603
604 GLboolean
605 brwCreateContext(gl_api api,
606 const struct gl_config *mesaVis,
607 __DRIcontext *driContextPriv,
608 unsigned major_version,
609 unsigned minor_version,
610 uint32_t flags,
611 bool notify_reset,
612 unsigned *dri_ctx_error,
613 void *sharedContextPrivate)
614 {
615 __DRIscreen *sPriv = driContextPriv->driScreenPriv;
616 struct gl_context *shareCtx = (struct gl_context *) sharedContextPrivate;
617 struct intel_screen *screen = sPriv->driverPrivate;
618 const struct brw_device_info *devinfo = screen->devinfo;
619 struct dd_function_table functions;
620
621 /* Only allow the __DRI_CTX_FLAG_ROBUST_BUFFER_ACCESS flag if the kernel
622 * provides us with context reset notifications.
623 */
624 uint32_t allowed_flags = __DRI_CTX_FLAG_DEBUG
625 | __DRI_CTX_FLAG_FORWARD_COMPATIBLE;
626
627 if (screen->has_context_reset_notification)
628 allowed_flags |= __DRI_CTX_FLAG_ROBUST_BUFFER_ACCESS;
629
630 if (flags & ~allowed_flags) {
631 *dri_ctx_error = __DRI_CTX_ERROR_UNKNOWN_FLAG;
632 return false;
633 }
634
635 struct brw_context *brw = rzalloc(NULL, struct brw_context);
636 if (!brw) {
637 fprintf(stderr, "%s: failed to alloc context\n", __FUNCTION__);
638 *dri_ctx_error = __DRI_CTX_ERROR_NO_MEMORY;
639 return false;
640 }
641
642 driContextPriv->driverPrivate = brw;
643 brw->driContext = driContextPriv;
644 brw->intelScreen = screen;
645 brw->bufmgr = screen->bufmgr;
646
647 brw->gen = devinfo->gen;
648 brw->gt = devinfo->gt;
649 brw->is_g4x = devinfo->is_g4x;
650 brw->is_baytrail = devinfo->is_baytrail;
651 brw->is_haswell = devinfo->is_haswell;
652 brw->is_cherryview = devinfo->is_cherryview;
653 brw->has_llc = devinfo->has_llc;
654 brw->has_hiz = devinfo->has_hiz_and_separate_stencil;
655 brw->has_separate_stencil = devinfo->has_hiz_and_separate_stencil;
656 brw->has_pln = devinfo->has_pln;
657 brw->has_compr4 = devinfo->has_compr4;
658 brw->has_surface_tile_offset = devinfo->has_surface_tile_offset;
659 brw->has_negative_rhw_bug = devinfo->has_negative_rhw_bug;
660 brw->needs_unlit_centroid_workaround =
661 devinfo->needs_unlit_centroid_workaround;
662
663 brw->must_use_separate_stencil = screen->hw_must_use_separate_stencil;
664 brw->has_swizzling = screen->hw_has_swizzling;
665
666 brw->vs.base.stage = MESA_SHADER_VERTEX;
667 brw->gs.base.stage = MESA_SHADER_GEOMETRY;
668 brw->wm.base.stage = MESA_SHADER_FRAGMENT;
669 if (brw->gen >= 8) {
670 gen8_init_vtable_surface_functions(brw);
671 brw->vtbl.emit_depth_stencil_hiz = gen8_emit_depth_stencil_hiz;
672 } else if (brw->gen >= 7) {
673 gen7_init_vtable_surface_functions(brw);
674 brw->vtbl.emit_depth_stencil_hiz = gen7_emit_depth_stencil_hiz;
675 } else if (brw->gen >= 6) {
676 gen6_init_vtable_surface_functions(brw);
677 brw->vtbl.emit_depth_stencil_hiz = gen6_emit_depth_stencil_hiz;
678 } else {
679 gen4_init_vtable_surface_functions(brw);
680 brw->vtbl.emit_depth_stencil_hiz = brw_emit_depth_stencil_hiz;
681 }
682
683 brw_init_driver_functions(brw, &functions);
684
685 if (notify_reset)
686 functions.GetGraphicsResetStatus = brw_get_graphics_reset_status;
687
688 struct gl_context *ctx = &brw->ctx;
689
690 if (!_mesa_initialize_context(ctx, api, mesaVis, shareCtx, &functions)) {
691 *dri_ctx_error = __DRI_CTX_ERROR_NO_MEMORY;
692 fprintf(stderr, "%s: failed to init mesa context\n", __FUNCTION__);
693 intelDestroyContext(driContextPriv);
694 return false;
695 }
696
697 driContextSetFlags(ctx, flags);
698
699 /* Initialize the software rasterizer and helper modules.
700 *
701 * As of GL 3.1 core, the gen4+ driver doesn't need the swrast context for
702 * software fallbacks (which we have to support on legacy GL to do weird
703 * glDrawPixels(), glBitmap(), and other functions).
704 */
705 if (api != API_OPENGL_CORE && api != API_OPENGLES2) {
706 _swrast_CreateContext(ctx);
707 }
708
709 _vbo_CreateContext(ctx);
710 if (ctx->swrast_context) {
711 _tnl_CreateContext(ctx);
712 TNL_CONTEXT(ctx)->Driver.RunPipeline = _tnl_run_pipeline;
713 _swsetup_CreateContext(ctx);
714
715 /* Configure swrast to match hardware characteristics: */
716 _swrast_allow_pixel_fog(ctx, false);
717 _swrast_allow_vertex_fog(ctx, true);
718 }
719
720 _mesa_meta_init(ctx);
721
722 brw_process_driconf_options(brw);
723 brw_process_intel_debug_variable(brw);
724 brw_initialize_context_constants(brw);
725
726 ctx->Const.ResetStrategy = notify_reset
727 ? GL_LOSE_CONTEXT_ON_RESET_ARB : GL_NO_RESET_NOTIFICATION_ARB;
728
729 /* Reinitialize the context point state. It depends on ctx->Const values. */
730 _mesa_init_point(ctx);
731
732 intel_fbo_init(brw);
733
734 intel_batchbuffer_init(brw);
735
736 if (brw->gen >= 6) {
737 /* Create a new hardware context. Using a hardware context means that
738 * our GPU state will be saved/restored on context switch, allowing us
739 * to assume that the GPU is in the same state we left it in.
740 *
741 * This is required for transform feedback buffer offsets, query objects,
742 * and also allows us to reduce how much state we have to emit.
743 */
744 brw->hw_ctx = drm_intel_gem_context_create(brw->bufmgr);
745
746 if (!brw->hw_ctx) {
747 fprintf(stderr, "Gen6+ requires Kernel 3.6 or later.\n");
748 intelDestroyContext(driContextPriv);
749 return false;
750 }
751 }
752
753 brw_init_state(brw);
754
755 intelInitExtensions(ctx);
756
757 brw_init_surface_formats(brw);
758
759 brw->max_vs_threads = devinfo->max_vs_threads;
760 brw->max_gs_threads = devinfo->max_gs_threads;
761 brw->max_wm_threads = devinfo->max_wm_threads;
762 brw->urb.size = devinfo->urb.size;
763 brw->urb.min_vs_entries = devinfo->urb.min_vs_entries;
764 brw->urb.max_vs_entries = devinfo->urb.max_vs_entries;
765 brw->urb.max_gs_entries = devinfo->urb.max_gs_entries;
766
767 /* Estimate the size of the mappable aperture into the GTT. There's an
768 * ioctl to get the whole GTT size, but not one to get the mappable subset.
769 * It turns out it's basically always 256MB, though some ancient hardware
770 * was smaller.
771 */
772 uint32_t gtt_size = 256 * 1024 * 1024;
773
774 /* We don't want to map two objects such that a memcpy between them would
775 * just fault one mapping in and then the other over and over forever. So
776 * we would need to divide the GTT size by 2. Additionally, some GTT is
777 * taken up by things like the framebuffer and the ringbuffer and such, so
778 * be more conservative.
779 */
780 brw->max_gtt_map_object_size = gtt_size / 4;
781
782 if (brw->gen == 6)
783 brw->urb.gen6_gs_previously_active = false;
784
785 brw->prim_restart.in_progress = false;
786 brw->prim_restart.enable_cut_index = false;
787 brw->gs.enabled = false;
788 brw->sf.viewport_transform_enable = true;
789
790 ctx->VertexProgram._MaintainTnlProgram = true;
791 ctx->FragmentProgram._MaintainTexEnvProgram = true;
792
793 brw_draw_init( brw );
794
795 if ((flags & __DRI_CTX_FLAG_DEBUG) != 0) {
796 /* Turn on some extra GL_ARB_debug_output generation. */
797 brw->perf_debug = true;
798 }
799
800 if ((flags & __DRI_CTX_FLAG_ROBUST_BUFFER_ACCESS) != 0)
801 ctx->Const.ContextFlags |= GL_CONTEXT_FLAG_ROBUST_ACCESS_BIT_ARB;
802
803 if (INTEL_DEBUG & DEBUG_SHADER_TIME)
804 brw_init_shader_time(brw);
805
806 _mesa_compute_version(ctx);
807
808 _mesa_initialize_dispatch_tables(ctx);
809 _mesa_initialize_vbo_vtxfmt(ctx);
810
811 if (ctx->Extensions.AMD_performance_monitor) {
812 brw_init_performance_monitors(brw);
813 }
814
815 vbo_use_buffer_objects(ctx);
816 vbo_always_unmap_buffers(ctx);
817
818 return true;
819 }
820
821 void
822 intelDestroyContext(__DRIcontext * driContextPriv)
823 {
824 struct brw_context *brw =
825 (struct brw_context *) driContextPriv->driverPrivate;
826 struct gl_context *ctx = &brw->ctx;
827
828 assert(brw); /* should never be null */
829 if (!brw)
830 return;
831
832 /* Dump a final BMP in case the application doesn't call SwapBuffers */
833 if (INTEL_DEBUG & DEBUG_AUB) {
834 intel_batchbuffer_flush(brw);
835 aub_dump_bmp(&brw->ctx);
836 }
837
838 _mesa_meta_free(&brw->ctx);
839 brw_meta_fast_clear_free(brw);
840
841 if (INTEL_DEBUG & DEBUG_SHADER_TIME) {
842 /* Force a report. */
843 brw->shader_time.report_time = 0;
844
845 brw_collect_and_report_shader_time(brw);
846 brw_destroy_shader_time(brw);
847 }
848
849 brw_destroy_state(brw);
850 brw_draw_destroy(brw);
851
852 drm_intel_bo_unreference(brw->curbe.curbe_bo);
853
854 drm_intel_gem_context_destroy(brw->hw_ctx);
855
856 if (ctx->swrast_context) {
857 _swsetup_DestroyContext(&brw->ctx);
858 _tnl_DestroyContext(&brw->ctx);
859 }
860 _vbo_DestroyContext(&brw->ctx);
861
862 if (ctx->swrast_context)
863 _swrast_DestroyContext(&brw->ctx);
864
865 intel_batchbuffer_free(brw);
866
867 drm_intel_bo_unreference(brw->first_post_swapbuffers_batch);
868 brw->first_post_swapbuffers_batch = NULL;
869
870 driDestroyOptionCache(&brw->optionCache);
871
872 /* free the Mesa context */
873 _mesa_free_context_data(&brw->ctx);
874
875 ralloc_free(brw);
876 driContextPriv->driverPrivate = NULL;
877 }
878
879 GLboolean
880 intelUnbindContext(__DRIcontext * driContextPriv)
881 {
882 /* Unset current context and dispath table */
883 _mesa_make_current(NULL, NULL, NULL);
884
885 return true;
886 }
887
888 /**
889 * Fixes up the context for GLES23 with our default-to-sRGB-capable behavior
890 * on window system framebuffers.
891 *
892 * Desktop GL is fairly reasonable in its handling of sRGB: You can ask if
893 * your renderbuffer can do sRGB encode, and you can flip a switch that does
894 * sRGB encode if the renderbuffer can handle it. You can ask specifically
895 * for a visual where you're guaranteed to be capable, but it turns out that
896 * everyone just makes all their ARGB8888 visuals capable and doesn't offer
897 * incapable ones, becuase there's no difference between the two in resources
898 * used. Applications thus get built that accidentally rely on the default
899 * visual choice being sRGB, so we make ours sRGB capable. Everything sounds
900 * great...
901 *
902 * But for GLES2/3, they decided that it was silly to not turn on sRGB encode
903 * for sRGB renderbuffers you made with the GL_EXT_texture_sRGB equivalent.
904 * So they removed the enable knob and made it "if the renderbuffer is sRGB
905 * capable, do sRGB encode". Then, for your window system renderbuffers, you
906 * can ask for sRGB visuals and get sRGB encode, or not ask for sRGB visuals
907 * and get no sRGB encode (assuming that both kinds of visual are available).
908 * Thus our choice to support sRGB by default on our visuals for desktop would
909 * result in broken rendering of GLES apps that aren't expecting sRGB encode.
910 *
911 * Unfortunately, renderbuffer setup happens before a context is created. So
912 * in intel_screen.c we always set up sRGB, and here, if you're a GLES2/3
913 * context (without an sRGB visual, though we don't have sRGB visuals exposed
914 * yet), we go turn that back off before anyone finds out.
915 */
916 static void
917 intel_gles3_srgb_workaround(struct brw_context *brw,
918 struct gl_framebuffer *fb)
919 {
920 struct gl_context *ctx = &brw->ctx;
921
922 if (_mesa_is_desktop_gl(ctx) || !fb->Visual.sRGBCapable)
923 return;
924
925 /* Some day when we support the sRGB capable bit on visuals available for
926 * GLES, we'll need to respect that and not disable things here.
927 */
928 fb->Visual.sRGBCapable = false;
929 for (int i = 0; i < BUFFER_COUNT; i++) {
930 if (fb->Attachment[i].Renderbuffer &&
931 fb->Attachment[i].Renderbuffer->Format == MESA_FORMAT_B8G8R8A8_SRGB) {
932 fb->Attachment[i].Renderbuffer->Format = MESA_FORMAT_B8G8R8A8_UNORM;
933 }
934 }
935 }
936
937 GLboolean
938 intelMakeCurrent(__DRIcontext * driContextPriv,
939 __DRIdrawable * driDrawPriv,
940 __DRIdrawable * driReadPriv)
941 {
942 struct brw_context *brw;
943 GET_CURRENT_CONTEXT(curCtx);
944
945 if (driContextPriv)
946 brw = (struct brw_context *) driContextPriv->driverPrivate;
947 else
948 brw = NULL;
949
950 /* According to the glXMakeCurrent() man page: "Pending commands to
951 * the previous context, if any, are flushed before it is released."
952 * But only flush if we're actually changing contexts.
953 */
954 if (brw_context(curCtx) && brw_context(curCtx) != brw) {
955 _mesa_flush(curCtx);
956 }
957
958 if (driContextPriv) {
959 struct gl_context *ctx = &brw->ctx;
960 struct gl_framebuffer *fb, *readFb;
961
962 if (driDrawPriv == NULL && driReadPriv == NULL) {
963 fb = _mesa_get_incomplete_framebuffer();
964 readFb = _mesa_get_incomplete_framebuffer();
965 } else {
966 fb = driDrawPriv->driverPrivate;
967 readFb = driReadPriv->driverPrivate;
968 driContextPriv->dri2.draw_stamp = driDrawPriv->dri2.stamp - 1;
969 driContextPriv->dri2.read_stamp = driReadPriv->dri2.stamp - 1;
970 }
971
972 /* The sRGB workaround changes the renderbuffer's format. We must change
973 * the format before the renderbuffer's miptree get's allocated, otherwise
974 * the formats of the renderbuffer and its miptree will differ.
975 */
976 intel_gles3_srgb_workaround(brw, fb);
977 intel_gles3_srgb_workaround(brw, readFb);
978
979 /* If the context viewport hasn't been initialized, force a call out to
980 * the loader to get buffers so we have a drawable size for the initial
981 * viewport. */
982 if (!brw->ctx.ViewportInitialized)
983 intel_prepare_render(brw);
984
985 _mesa_make_current(ctx, fb, readFb);
986 } else {
987 _mesa_make_current(NULL, NULL, NULL);
988 }
989
990 return true;
991 }
992
993 void
994 intel_resolve_for_dri2_flush(struct brw_context *brw,
995 __DRIdrawable *drawable)
996 {
997 if (brw->gen < 6) {
998 /* MSAA and fast color clear are not supported, so don't waste time
999 * checking whether a resolve is needed.
1000 */
1001 return;
1002 }
1003
1004 struct gl_framebuffer *fb = drawable->driverPrivate;
1005 struct intel_renderbuffer *rb;
1006
1007 /* Usually, only the back buffer will need to be downsampled. However,
1008 * the front buffer will also need it if the user has rendered into it.
1009 */
1010 static const gl_buffer_index buffers[2] = {
1011 BUFFER_BACK_LEFT,
1012 BUFFER_FRONT_LEFT,
1013 };
1014
1015 for (int i = 0; i < 2; ++i) {
1016 rb = intel_get_renderbuffer(fb, buffers[i]);
1017 if (rb == NULL || rb->mt == NULL)
1018 continue;
1019 if (rb->mt->num_samples <= 1)
1020 intel_miptree_resolve_color(brw, rb->mt);
1021 else
1022 intel_renderbuffer_downsample(brw, rb);
1023 }
1024 }
1025
1026 static unsigned
1027 intel_bits_per_pixel(const struct intel_renderbuffer *rb)
1028 {
1029 return _mesa_get_format_bytes(intel_rb_format(rb)) * 8;
1030 }
1031
1032 static void
1033 intel_query_dri2_buffers(struct brw_context *brw,
1034 __DRIdrawable *drawable,
1035 __DRIbuffer **buffers,
1036 int *count);
1037
1038 static void
1039 intel_process_dri2_buffer(struct brw_context *brw,
1040 __DRIdrawable *drawable,
1041 __DRIbuffer *buffer,
1042 struct intel_renderbuffer *rb,
1043 const char *buffer_name);
1044
1045 static void
1046 intel_update_image_buffers(struct brw_context *brw, __DRIdrawable *drawable);
1047
1048 static void
1049 intel_update_dri2_buffers(struct brw_context *brw, __DRIdrawable *drawable)
1050 {
1051 struct gl_framebuffer *fb = drawable->driverPrivate;
1052 struct intel_renderbuffer *rb;
1053 __DRIbuffer *buffers = NULL;
1054 int i, count;
1055 const char *region_name;
1056
1057 /* Set this up front, so that in case our buffers get invalidated
1058 * while we're getting new buffers, we don't clobber the stamp and
1059 * thus ignore the invalidate. */
1060 drawable->lastStamp = drawable->dri2.stamp;
1061
1062 if (unlikely(INTEL_DEBUG & DEBUG_DRI))
1063 fprintf(stderr, "enter %s, drawable %p\n", __func__, drawable);
1064
1065 intel_query_dri2_buffers(brw, drawable, &buffers, &count);
1066
1067 if (buffers == NULL)
1068 return;
1069
1070 for (i = 0; i < count; i++) {
1071 switch (buffers[i].attachment) {
1072 case __DRI_BUFFER_FRONT_LEFT:
1073 rb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT);
1074 region_name = "dri2 front buffer";
1075 break;
1076
1077 case __DRI_BUFFER_FAKE_FRONT_LEFT:
1078 rb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT);
1079 region_name = "dri2 fake front buffer";
1080 break;
1081
1082 case __DRI_BUFFER_BACK_LEFT:
1083 rb = intel_get_renderbuffer(fb, BUFFER_BACK_LEFT);
1084 region_name = "dri2 back buffer";
1085 break;
1086
1087 case __DRI_BUFFER_DEPTH:
1088 case __DRI_BUFFER_HIZ:
1089 case __DRI_BUFFER_DEPTH_STENCIL:
1090 case __DRI_BUFFER_STENCIL:
1091 case __DRI_BUFFER_ACCUM:
1092 default:
1093 fprintf(stderr,
1094 "unhandled buffer attach event, attachment type %d\n",
1095 buffers[i].attachment);
1096 return;
1097 }
1098
1099 intel_process_dri2_buffer(brw, drawable, &buffers[i], rb, region_name);
1100 }
1101
1102 }
1103
1104 void
1105 intel_update_renderbuffers(__DRIcontext *context, __DRIdrawable *drawable)
1106 {
1107 struct brw_context *brw = context->driverPrivate;
1108 __DRIscreen *screen = brw->intelScreen->driScrnPriv;
1109
1110 /* Set this up front, so that in case our buffers get invalidated
1111 * while we're getting new buffers, we don't clobber the stamp and
1112 * thus ignore the invalidate. */
1113 drawable->lastStamp = drawable->dri2.stamp;
1114
1115 if (unlikely(INTEL_DEBUG & DEBUG_DRI))
1116 fprintf(stderr, "enter %s, drawable %p\n", __func__, drawable);
1117
1118 if (screen->image.loader)
1119 intel_update_image_buffers(brw, drawable);
1120 else
1121 intel_update_dri2_buffers(brw, drawable);
1122
1123 driUpdateFramebufferSize(&brw->ctx, drawable);
1124 }
1125
1126 /**
1127 * intel_prepare_render should be called anywhere that curent read/drawbuffer
1128 * state is required.
1129 */
1130 void
1131 intel_prepare_render(struct brw_context *brw)
1132 {
1133 struct gl_context *ctx = &brw->ctx;
1134 __DRIcontext *driContext = brw->driContext;
1135 __DRIdrawable *drawable;
1136
1137 drawable = driContext->driDrawablePriv;
1138 if (drawable && drawable->dri2.stamp != driContext->dri2.draw_stamp) {
1139 if (drawable->lastStamp != drawable->dri2.stamp)
1140 intel_update_renderbuffers(driContext, drawable);
1141 driContext->dri2.draw_stamp = drawable->dri2.stamp;
1142 }
1143
1144 drawable = driContext->driReadablePriv;
1145 if (drawable && drawable->dri2.stamp != driContext->dri2.read_stamp) {
1146 if (drawable->lastStamp != drawable->dri2.stamp)
1147 intel_update_renderbuffers(driContext, drawable);
1148 driContext->dri2.read_stamp = drawable->dri2.stamp;
1149 }
1150
1151 /* If we're currently rendering to the front buffer, the rendering
1152 * that will happen next will probably dirty the front buffer. So
1153 * mark it as dirty here.
1154 */
1155 if (brw_is_front_buffer_drawing(ctx->DrawBuffer))
1156 brw->front_buffer_dirty = true;
1157
1158 /* Wait for the swapbuffers before the one we just emitted, so we
1159 * don't get too many swaps outstanding for apps that are GPU-heavy
1160 * but not CPU-heavy.
1161 *
1162 * We're using intelDRI2Flush (called from the loader before
1163 * swapbuffer) and glFlush (for front buffer rendering) as the
1164 * indicator that a frame is done and then throttle when we get
1165 * here as we prepare to render the next frame. At this point for
1166 * round trips for swap/copy and getting new buffers are done and
1167 * we'll spend less time waiting on the GPU.
1168 *
1169 * Unfortunately, we don't have a handle to the batch containing
1170 * the swap, and getting our hands on that doesn't seem worth it,
1171 * so we just us the first batch we emitted after the last swap.
1172 */
1173 if (brw->need_throttle && brw->first_post_swapbuffers_batch) {
1174 if (!brw->disable_throttling)
1175 drm_intel_bo_wait_rendering(brw->first_post_swapbuffers_batch);
1176 drm_intel_bo_unreference(brw->first_post_swapbuffers_batch);
1177 brw->first_post_swapbuffers_batch = NULL;
1178 brw->need_throttle = false;
1179 }
1180 }
1181
1182 /**
1183 * \brief Query DRI2 to obtain a DRIdrawable's buffers.
1184 *
1185 * To determine which DRI buffers to request, examine the renderbuffers
1186 * attached to the drawable's framebuffer. Then request the buffers with
1187 * DRI2GetBuffers() or DRI2GetBuffersWithFormat().
1188 *
1189 * This is called from intel_update_renderbuffers().
1190 *
1191 * \param drawable Drawable whose buffers are queried.
1192 * \param buffers [out] List of buffers returned by DRI2 query.
1193 * \param buffer_count [out] Number of buffers returned.
1194 *
1195 * \see intel_update_renderbuffers()
1196 * \see DRI2GetBuffers()
1197 * \see DRI2GetBuffersWithFormat()
1198 */
1199 static void
1200 intel_query_dri2_buffers(struct brw_context *brw,
1201 __DRIdrawable *drawable,
1202 __DRIbuffer **buffers,
1203 int *buffer_count)
1204 {
1205 __DRIscreen *screen = brw->intelScreen->driScrnPriv;
1206 struct gl_framebuffer *fb = drawable->driverPrivate;
1207 int i = 0;
1208 unsigned attachments[8];
1209
1210 struct intel_renderbuffer *front_rb;
1211 struct intel_renderbuffer *back_rb;
1212
1213 front_rb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT);
1214 back_rb = intel_get_renderbuffer(fb, BUFFER_BACK_LEFT);
1215
1216 memset(attachments, 0, sizeof(attachments));
1217 if ((brw_is_front_buffer_drawing(fb) ||
1218 brw_is_front_buffer_reading(fb) ||
1219 !back_rb) && front_rb) {
1220 /* If a fake front buffer is in use, then querying for
1221 * __DRI_BUFFER_FRONT_LEFT will cause the server to copy the image from
1222 * the real front buffer to the fake front buffer. So before doing the
1223 * query, we need to make sure all the pending drawing has landed in the
1224 * real front buffer.
1225 */
1226 intel_batchbuffer_flush(brw);
1227 intel_flush_front(&brw->ctx);
1228
1229 attachments[i++] = __DRI_BUFFER_FRONT_LEFT;
1230 attachments[i++] = intel_bits_per_pixel(front_rb);
1231 } else if (front_rb && brw->front_buffer_dirty) {
1232 /* We have pending front buffer rendering, but we aren't querying for a
1233 * front buffer. If the front buffer we have is a fake front buffer,
1234 * the X server is going to throw it away when it processes the query.
1235 * So before doing the query, make sure all the pending drawing has
1236 * landed in the real front buffer.
1237 */
1238 intel_batchbuffer_flush(brw);
1239 intel_flush_front(&brw->ctx);
1240 }
1241
1242 if (back_rb) {
1243 attachments[i++] = __DRI_BUFFER_BACK_LEFT;
1244 attachments[i++] = intel_bits_per_pixel(back_rb);
1245 }
1246
1247 assert(i <= ARRAY_SIZE(attachments));
1248
1249 *buffers = screen->dri2.loader->getBuffersWithFormat(drawable,
1250 &drawable->w,
1251 &drawable->h,
1252 attachments, i / 2,
1253 buffer_count,
1254 drawable->loaderPrivate);
1255 }
1256
1257 /**
1258 * \brief Assign a DRI buffer's DRM region to a renderbuffer.
1259 *
1260 * This is called from intel_update_renderbuffers().
1261 *
1262 * \par Note:
1263 * DRI buffers whose attachment point is DRI2BufferStencil or
1264 * DRI2BufferDepthStencil are handled as special cases.
1265 *
1266 * \param buffer_name is a human readable name, such as "dri2 front buffer",
1267 * that is passed to drm_intel_bo_gem_create_from_name().
1268 *
1269 * \see intel_update_renderbuffers()
1270 */
1271 static void
1272 intel_process_dri2_buffer(struct brw_context *brw,
1273 __DRIdrawable *drawable,
1274 __DRIbuffer *buffer,
1275 struct intel_renderbuffer *rb,
1276 const char *buffer_name)
1277 {
1278 struct gl_framebuffer *fb = drawable->driverPrivate;
1279 drm_intel_bo *bo;
1280
1281 if (!rb)
1282 return;
1283
1284 unsigned num_samples = rb->Base.Base.NumSamples;
1285
1286 /* We try to avoid closing and reopening the same BO name, because the first
1287 * use of a mapping of the buffer involves a bunch of page faulting which is
1288 * moderately expensive.
1289 */
1290 struct intel_mipmap_tree *last_mt;
1291 if (num_samples == 0)
1292 last_mt = rb->mt;
1293 else
1294 last_mt = rb->singlesample_mt;
1295
1296 uint32_t old_name = 0;
1297 if (last_mt) {
1298 /* The bo already has a name because the miptree was created by a
1299 * previous call to intel_process_dri2_buffer(). If a bo already has a
1300 * name, then drm_intel_bo_flink() is a low-cost getter. It does not
1301 * create a new name.
1302 */
1303 drm_intel_bo_flink(last_mt->bo, &old_name);
1304 }
1305
1306 if (old_name == buffer->name)
1307 return;
1308
1309 if (unlikely(INTEL_DEBUG & DEBUG_DRI)) {
1310 fprintf(stderr,
1311 "attaching buffer %d, at %d, cpp %d, pitch %d\n",
1312 buffer->name, buffer->attachment,
1313 buffer->cpp, buffer->pitch);
1314 }
1315
1316 intel_miptree_release(&rb->mt);
1317 bo = drm_intel_bo_gem_create_from_name(brw->bufmgr, buffer_name,
1318 buffer->name);
1319 if (!bo) {
1320 fprintf(stderr,
1321 "Failed to open BO for returned DRI2 buffer "
1322 "(%dx%d, %s, named %d).\n"
1323 "This is likely a bug in the X Server that will lead to a "
1324 "crash soon.\n",
1325 drawable->w, drawable->h, buffer_name, buffer->name);
1326 return;
1327 }
1328
1329 intel_update_winsys_renderbuffer_miptree(brw, rb, bo,
1330 drawable->w, drawable->h,
1331 buffer->pitch);
1332
1333 if (brw_is_front_buffer_drawing(fb) &&
1334 (buffer->attachment == __DRI_BUFFER_FRONT_LEFT ||
1335 buffer->attachment == __DRI_BUFFER_FAKE_FRONT_LEFT) &&
1336 rb->Base.Base.NumSamples > 1) {
1337 intel_renderbuffer_upsample(brw, rb);
1338 }
1339
1340 assert(rb->mt);
1341
1342 drm_intel_bo_unreference(bo);
1343 }
1344
1345 /**
1346 * \brief Query DRI image loader to obtain a DRIdrawable's buffers.
1347 *
1348 * To determine which DRI buffers to request, examine the renderbuffers
1349 * attached to the drawable's framebuffer. Then request the buffers from
1350 * the image loader
1351 *
1352 * This is called from intel_update_renderbuffers().
1353 *
1354 * \param drawable Drawable whose buffers are queried.
1355 * \param buffers [out] List of buffers returned by DRI2 query.
1356 * \param buffer_count [out] Number of buffers returned.
1357 *
1358 * \see intel_update_renderbuffers()
1359 */
1360
1361 static void
1362 intel_update_image_buffer(struct brw_context *intel,
1363 __DRIdrawable *drawable,
1364 struct intel_renderbuffer *rb,
1365 __DRIimage *buffer,
1366 enum __DRIimageBufferMask buffer_type)
1367 {
1368 struct gl_framebuffer *fb = drawable->driverPrivate;
1369
1370 if (!rb || !buffer->bo)
1371 return;
1372
1373 unsigned num_samples = rb->Base.Base.NumSamples;
1374
1375 /* Check and see if we're already bound to the right
1376 * buffer object
1377 */
1378 struct intel_mipmap_tree *last_mt;
1379 if (num_samples == 0)
1380 last_mt = rb->mt;
1381 else
1382 last_mt = rb->singlesample_mt;
1383
1384 if (last_mt && last_mt->bo == buffer->bo)
1385 return;
1386
1387 intel_update_winsys_renderbuffer_miptree(intel, rb, buffer->bo,
1388 buffer->width, buffer->height,
1389 buffer->pitch);
1390
1391 if (brw_is_front_buffer_drawing(fb) &&
1392 buffer_type == __DRI_IMAGE_BUFFER_FRONT &&
1393 rb->Base.Base.NumSamples > 1) {
1394 intel_renderbuffer_upsample(intel, rb);
1395 }
1396 }
1397
1398 static void
1399 intel_update_image_buffers(struct brw_context *brw, __DRIdrawable *drawable)
1400 {
1401 struct gl_framebuffer *fb = drawable->driverPrivate;
1402 __DRIscreen *screen = brw->intelScreen->driScrnPriv;
1403 struct intel_renderbuffer *front_rb;
1404 struct intel_renderbuffer *back_rb;
1405 struct __DRIimageList images;
1406 unsigned int format;
1407 uint32_t buffer_mask = 0;
1408
1409 front_rb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT);
1410 back_rb = intel_get_renderbuffer(fb, BUFFER_BACK_LEFT);
1411
1412 if (back_rb)
1413 format = intel_rb_format(back_rb);
1414 else if (front_rb)
1415 format = intel_rb_format(front_rb);
1416 else
1417 return;
1418
1419 if (front_rb && (brw_is_front_buffer_drawing(fb) ||
1420 brw_is_front_buffer_reading(fb) || !back_rb)) {
1421 buffer_mask |= __DRI_IMAGE_BUFFER_FRONT;
1422 }
1423
1424 if (back_rb)
1425 buffer_mask |= __DRI_IMAGE_BUFFER_BACK;
1426
1427 (*screen->image.loader->getBuffers) (drawable,
1428 driGLFormatToImageFormat(format),
1429 &drawable->dri2.stamp,
1430 drawable->loaderPrivate,
1431 buffer_mask,
1432 &images);
1433
1434 if (images.image_mask & __DRI_IMAGE_BUFFER_FRONT) {
1435 drawable->w = images.front->width;
1436 drawable->h = images.front->height;
1437 intel_update_image_buffer(brw,
1438 drawable,
1439 front_rb,
1440 images.front,
1441 __DRI_IMAGE_BUFFER_FRONT);
1442 }
1443 if (images.image_mask & __DRI_IMAGE_BUFFER_BACK) {
1444 drawable->w = images.back->width;
1445 drawable->h = images.back->height;
1446 intel_update_image_buffer(brw,
1447 drawable,
1448 back_rb,
1449 images.back,
1450 __DRI_IMAGE_BUFFER_BACK);
1451 }
1452 }