i965/skl: Report more accurate number of samples for format
[mesa.git] / src / mesa / drivers / dri / i965 / brw_context.c
1 /*
2 Copyright 2003 VMware, Inc.
3 Copyright (C) Intel Corp. 2006. All Rights Reserved.
4 Intel funded Tungsten Graphics to
5 develop this 3D driver.
6
7 Permission is hereby granted, free of charge, to any person obtaining
8 a copy of this software and associated documentation files (the
9 "Software"), to deal in the Software without restriction, including
10 without limitation the rights to use, copy, modify, merge, publish,
11 distribute, sublicense, and/or sell copies of the Software, and to
12 permit persons to whom the Software is furnished to do so, subject to
13 the following conditions:
14
15 The above copyright notice and this permission notice (including the
16 next paragraph) shall be included in all copies or substantial
17 portions of the Software.
18
19 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
20 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
22 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
23 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
24 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
25 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26
27 **********************************************************************/
28 /*
29 * Authors:
30 * Keith Whitwell <keithw@vmware.com>
31 */
32
33
34 #include "main/api_exec.h"
35 #include "main/context.h"
36 #include "main/fbobject.h"
37 #include "main/extensions.h"
38 #include "main/imports.h"
39 #include "main/macros.h"
40 #include "main/points.h"
41 #include "main/version.h"
42 #include "main/vtxfmt.h"
43 #include "main/texobj.h"
44
45 #include "vbo/vbo_context.h"
46
47 #include "drivers/common/driverfuncs.h"
48 #include "drivers/common/meta.h"
49 #include "utils.h"
50
51 #include "brw_context.h"
52 #include "brw_defines.h"
53 #include "brw_draw.h"
54 #include "brw_state.h"
55
56 #include "intel_batchbuffer.h"
57 #include "intel_buffer_objects.h"
58 #include "intel_buffers.h"
59 #include "intel_fbo.h"
60 #include "intel_mipmap_tree.h"
61 #include "intel_pixel.h"
62 #include "intel_image.h"
63 #include "intel_tex.h"
64 #include "intel_tex_obj.h"
65
66 #include "swrast_setup/swrast_setup.h"
67 #include "tnl/tnl.h"
68 #include "tnl/t_pipeline.h"
69 #include "util/ralloc.h"
70
71 /***************************************
72 * Mesa's Driver Functions
73 ***************************************/
74
75 static size_t
76 brw_query_samples_for_format(struct gl_context *ctx, GLenum target,
77 GLenum internalFormat, int samples[16])
78 {
79 struct brw_context *brw = brw_context(ctx);
80
81 (void) target;
82
83 switch (brw->gen) {
84 case 9:
85 case 8:
86 samples[0] = 8;
87 samples[1] = 4;
88 samples[2] = 2;
89 return 3;
90
91 case 7:
92 samples[0] = 8;
93 samples[1] = 4;
94 return 2;
95
96 case 6:
97 samples[0] = 4;
98 return 1;
99
100 default:
101 assert(brw->gen < 6);
102 samples[0] = 1;
103 return 1;
104 }
105 }
106
107 const char *const brw_vendor_string = "Intel Open Source Technology Center";
108
109 const char *
110 brw_get_renderer_string(unsigned deviceID)
111 {
112 const char *chipset;
113 static char buffer[128];
114
115 switch (deviceID) {
116 #undef CHIPSET
117 #define CHIPSET(id, symbol, str) case id: chipset = str; break;
118 #include "pci_ids/i965_pci_ids.h"
119 default:
120 chipset = "Unknown Intel Chipset";
121 break;
122 }
123
124 (void) driGetRendererString(buffer, chipset, 0);
125 return buffer;
126 }
127
128 static const GLubyte *
129 intel_get_string(struct gl_context * ctx, GLenum name)
130 {
131 const struct brw_context *const brw = brw_context(ctx);
132
133 switch (name) {
134 case GL_VENDOR:
135 return (GLubyte *) brw_vendor_string;
136
137 case GL_RENDERER:
138 return
139 (GLubyte *) brw_get_renderer_string(brw->intelScreen->deviceID);
140
141 default:
142 return NULL;
143 }
144 }
145
146 static void
147 intel_viewport(struct gl_context *ctx)
148 {
149 struct brw_context *brw = brw_context(ctx);
150 __DRIcontext *driContext = brw->driContext;
151
152 if (_mesa_is_winsys_fbo(ctx->DrawBuffer)) {
153 dri2InvalidateDrawable(driContext->driDrawablePriv);
154 dri2InvalidateDrawable(driContext->driReadablePriv);
155 }
156 }
157
158 static void
159 intel_update_state(struct gl_context * ctx, GLuint new_state)
160 {
161 struct brw_context *brw = brw_context(ctx);
162 struct intel_texture_object *tex_obj;
163 struct intel_renderbuffer *depth_irb;
164
165 if (ctx->swrast_context)
166 _swrast_InvalidateState(ctx, new_state);
167 _vbo_InvalidateState(ctx, new_state);
168
169 brw->NewGLState |= new_state;
170
171 _mesa_unlock_context_textures(ctx);
172
173 /* Resolve the depth buffer's HiZ buffer. */
174 depth_irb = intel_get_renderbuffer(ctx->DrawBuffer, BUFFER_DEPTH);
175 if (depth_irb)
176 intel_renderbuffer_resolve_hiz(brw, depth_irb);
177
178 /* Resolve depth buffer and render cache of each enabled texture. */
179 int maxEnabledUnit = ctx->Texture._MaxEnabledTexImageUnit;
180 for (int i = 0; i <= maxEnabledUnit; i++) {
181 if (!ctx->Texture.Unit[i]._Current)
182 continue;
183 tex_obj = intel_texture_object(ctx->Texture.Unit[i]._Current);
184 if (!tex_obj || !tex_obj->mt)
185 continue;
186 intel_miptree_all_slices_resolve_depth(brw, tex_obj->mt);
187 intel_miptree_resolve_color(brw, tex_obj->mt);
188 brw_render_cache_set_check_flush(brw, tex_obj->mt->bo);
189 }
190
191 _mesa_lock_context_textures(ctx);
192 }
193
194 #define flushFront(screen) ((screen)->image.loader ? (screen)->image.loader->flushFrontBuffer : (screen)->dri2.loader->flushFrontBuffer)
195
196 static void
197 intel_flush_front(struct gl_context *ctx)
198 {
199 struct brw_context *brw = brw_context(ctx);
200 __DRIcontext *driContext = brw->driContext;
201 __DRIdrawable *driDrawable = driContext->driDrawablePriv;
202 __DRIscreen *const screen = brw->intelScreen->driScrnPriv;
203
204 if (brw->front_buffer_dirty && _mesa_is_winsys_fbo(ctx->DrawBuffer)) {
205 if (flushFront(screen) && driDrawable &&
206 driDrawable->loaderPrivate) {
207
208 /* Resolve before flushing FAKE_FRONT_LEFT to FRONT_LEFT.
209 *
210 * This potentially resolves both front and back buffer. It
211 * is unnecessary to resolve the back, but harms nothing except
212 * performance. And no one cares about front-buffer render
213 * performance.
214 */
215 intel_resolve_for_dri2_flush(brw, driDrawable);
216 intel_batchbuffer_flush(brw);
217
218 flushFront(screen)(driDrawable, driDrawable->loaderPrivate);
219
220 /* We set the dirty bit in intel_prepare_render() if we're
221 * front buffer rendering once we get there.
222 */
223 brw->front_buffer_dirty = false;
224 }
225 }
226 }
227
228 static void
229 intel_glFlush(struct gl_context *ctx)
230 {
231 struct brw_context *brw = brw_context(ctx);
232
233 intel_batchbuffer_flush(brw);
234 intel_flush_front(ctx);
235 if (brw_is_front_buffer_drawing(ctx->DrawBuffer))
236 brw->need_throttle = true;
237 }
238
239 static void
240 intel_finish(struct gl_context * ctx)
241 {
242 struct brw_context *brw = brw_context(ctx);
243
244 intel_glFlush(ctx);
245
246 if (brw->batch.last_bo)
247 drm_intel_bo_wait_rendering(brw->batch.last_bo);
248 }
249
250 static void
251 brw_init_driver_functions(struct brw_context *brw,
252 struct dd_function_table *functions)
253 {
254 _mesa_init_driver_functions(functions);
255
256 /* GLX uses DRI2 invalidate events to handle window resizing.
257 * Unfortunately, EGL does not - libEGL is written in XCB (not Xlib),
258 * which doesn't provide a mechanism for snooping the event queues.
259 *
260 * So EGL still relies on viewport hacks to handle window resizing.
261 * This should go away with DRI3000.
262 */
263 if (!brw->driContext->driScreenPriv->dri2.useInvalidate)
264 functions->Viewport = intel_viewport;
265
266 functions->Flush = intel_glFlush;
267 functions->Finish = intel_finish;
268 functions->GetString = intel_get_string;
269 functions->UpdateState = intel_update_state;
270
271 intelInitTextureFuncs(functions);
272 intelInitTextureImageFuncs(functions);
273 intelInitTextureSubImageFuncs(functions);
274 intelInitTextureCopyImageFuncs(functions);
275 intelInitCopyImageFuncs(functions);
276 intelInitClearFuncs(functions);
277 intelInitBufferFuncs(functions);
278 intelInitPixelFuncs(functions);
279 intelInitBufferObjectFuncs(functions);
280 intel_init_syncobj_functions(functions);
281 brw_init_object_purgeable_functions(functions);
282
283 brwInitFragProgFuncs( functions );
284 brw_init_common_queryobj_functions(functions);
285 if (brw->gen >= 6)
286 gen6_init_queryobj_functions(functions);
287 else
288 gen4_init_queryobj_functions(functions);
289
290 functions->QuerySamplesForFormat = brw_query_samples_for_format;
291
292 functions->NewTransformFeedback = brw_new_transform_feedback;
293 functions->DeleteTransformFeedback = brw_delete_transform_feedback;
294 functions->GetTransformFeedbackVertexCount =
295 brw_get_transform_feedback_vertex_count;
296 if (brw->gen >= 7) {
297 functions->BeginTransformFeedback = gen7_begin_transform_feedback;
298 functions->EndTransformFeedback = gen7_end_transform_feedback;
299 functions->PauseTransformFeedback = gen7_pause_transform_feedback;
300 functions->ResumeTransformFeedback = gen7_resume_transform_feedback;
301 } else {
302 functions->BeginTransformFeedback = brw_begin_transform_feedback;
303 functions->EndTransformFeedback = brw_end_transform_feedback;
304 }
305
306 if (brw->gen >= 6)
307 functions->GetSamplePosition = gen6_get_sample_position;
308 }
309
310 static void
311 brw_initialize_context_constants(struct brw_context *brw)
312 {
313 struct gl_context *ctx = &brw->ctx;
314
315 unsigned max_samplers =
316 brw->gen >= 8 || brw->is_haswell ? BRW_MAX_TEX_UNIT : 16;
317
318 ctx->Const.QueryCounterBits.Timestamp = 36;
319
320 ctx->Const.StripTextureBorder = true;
321
322 ctx->Const.MaxDualSourceDrawBuffers = 1;
323 ctx->Const.MaxDrawBuffers = BRW_MAX_DRAW_BUFFERS;
324 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxTextureImageUnits = max_samplers;
325 ctx->Const.MaxTextureCoordUnits = 8; /* Mesa limit */
326 ctx->Const.MaxTextureUnits =
327 MIN2(ctx->Const.MaxTextureCoordUnits,
328 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxTextureImageUnits);
329 ctx->Const.Program[MESA_SHADER_VERTEX].MaxTextureImageUnits = max_samplers;
330 if (brw->gen >= 6)
331 ctx->Const.Program[MESA_SHADER_GEOMETRY].MaxTextureImageUnits = max_samplers;
332 else
333 ctx->Const.Program[MESA_SHADER_GEOMETRY].MaxTextureImageUnits = 0;
334 if (_mesa_extension_override_enables.ARB_compute_shader) {
335 ctx->Const.Program[MESA_SHADER_COMPUTE].MaxTextureImageUnits = BRW_MAX_TEX_UNIT;
336 ctx->Const.MaxUniformBufferBindings += 12;
337 } else {
338 ctx->Const.Program[MESA_SHADER_COMPUTE].MaxTextureImageUnits = 0;
339 }
340 ctx->Const.MaxCombinedTextureImageUnits =
341 ctx->Const.Program[MESA_SHADER_VERTEX].MaxTextureImageUnits +
342 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxTextureImageUnits +
343 ctx->Const.Program[MESA_SHADER_GEOMETRY].MaxTextureImageUnits +
344 ctx->Const.Program[MESA_SHADER_COMPUTE].MaxTextureImageUnits;
345
346 ctx->Const.MaxTextureLevels = 14; /* 8192 */
347 if (ctx->Const.MaxTextureLevels > MAX_TEXTURE_LEVELS)
348 ctx->Const.MaxTextureLevels = MAX_TEXTURE_LEVELS;
349 ctx->Const.Max3DTextureLevels = 12; /* 2048 */
350 ctx->Const.MaxCubeTextureLevels = 14; /* 8192 */
351 ctx->Const.MaxTextureMbytes = 1536;
352
353 if (brw->gen >= 7)
354 ctx->Const.MaxArrayTextureLayers = 2048;
355 else
356 ctx->Const.MaxArrayTextureLayers = 512;
357
358 ctx->Const.MaxTextureRectSize = 1 << 12;
359
360 ctx->Const.MaxTextureMaxAnisotropy = 16.0;
361
362 ctx->Const.MaxRenderbufferSize = 8192;
363
364 /* Hardware only supports a limited number of transform feedback buffers.
365 * So we need to override the Mesa default (which is based only on software
366 * limits).
367 */
368 ctx->Const.MaxTransformFeedbackBuffers = BRW_MAX_SOL_BUFFERS;
369
370 /* On Gen6, in the worst case, we use up one binding table entry per
371 * transform feedback component (see comments above the definition of
372 * BRW_MAX_SOL_BINDINGS, in brw_context.h), so we need to advertise a value
373 * for MAX_TRANSFORM_FEEDBACK_INTERLEAVED_COMPONENTS equal to
374 * BRW_MAX_SOL_BINDINGS.
375 *
376 * In "separate components" mode, we need to divide this value by
377 * BRW_MAX_SOL_BUFFERS, so that the total number of binding table entries
378 * used up by all buffers will not exceed BRW_MAX_SOL_BINDINGS.
379 */
380 ctx->Const.MaxTransformFeedbackInterleavedComponents = BRW_MAX_SOL_BINDINGS;
381 ctx->Const.MaxTransformFeedbackSeparateComponents =
382 BRW_MAX_SOL_BINDINGS / BRW_MAX_SOL_BUFFERS;
383
384 ctx->Const.AlwaysUseGetTransformFeedbackVertexCount = true;
385
386 int max_samples;
387 const int *msaa_modes = intel_supported_msaa_modes(brw->intelScreen);
388 const int clamp_max_samples =
389 driQueryOptioni(&brw->optionCache, "clamp_max_samples");
390
391 if (clamp_max_samples < 0) {
392 max_samples = msaa_modes[0];
393 } else {
394 /* Select the largest supported MSAA mode that does not exceed
395 * clamp_max_samples.
396 */
397 max_samples = 0;
398 for (int i = 0; msaa_modes[i] != 0; ++i) {
399 if (msaa_modes[i] <= clamp_max_samples) {
400 max_samples = msaa_modes[i];
401 break;
402 }
403 }
404 }
405
406 ctx->Const.MaxSamples = max_samples;
407 ctx->Const.MaxColorTextureSamples = max_samples;
408 ctx->Const.MaxDepthTextureSamples = max_samples;
409 ctx->Const.MaxIntegerSamples = max_samples;
410
411 /* gen6_set_sample_maps() sets SampleMap{2,4,8}x variables which are used
412 * to map indices of rectangular grid to sample numbers within a pixel.
413 * These variables are used by GL_EXT_framebuffer_multisample_blit_scaled
414 * extension implementation. For more details see the comment above
415 * gen6_set_sample_maps() definition.
416 */
417 gen6_set_sample_maps(ctx);
418
419 if (brw->gen >= 7)
420 ctx->Const.MaxProgramTextureGatherComponents = 4;
421 else if (brw->gen == 6)
422 ctx->Const.MaxProgramTextureGatherComponents = 1;
423
424 ctx->Const.MinLineWidth = 1.0;
425 ctx->Const.MinLineWidthAA = 1.0;
426 if (brw->gen >= 9 || brw->is_cherryview) {
427 ctx->Const.MaxLineWidth = 40.0;
428 ctx->Const.MaxLineWidthAA = 40.0;
429 ctx->Const.LineWidthGranularity = 0.125;
430 } else if (brw->gen >= 6) {
431 ctx->Const.MaxLineWidth = 7.875;
432 ctx->Const.MaxLineWidthAA = 7.875;
433 ctx->Const.LineWidthGranularity = 0.125;
434 } else {
435 ctx->Const.MaxLineWidth = 7.0;
436 ctx->Const.MaxLineWidthAA = 7.0;
437 ctx->Const.LineWidthGranularity = 0.5;
438 }
439
440 ctx->Const.MinPointSize = 1.0;
441 ctx->Const.MinPointSizeAA = 1.0;
442 ctx->Const.MaxPointSize = 255.0;
443 ctx->Const.MaxPointSizeAA = 255.0;
444 ctx->Const.PointSizeGranularity = 1.0;
445
446 if (brw->gen >= 5 || brw->is_g4x)
447 ctx->Const.MaxClipPlanes = 8;
448
449 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeInstructions = 16 * 1024;
450 ctx->Const.Program[MESA_SHADER_VERTEX].MaxAluInstructions = 0;
451 ctx->Const.Program[MESA_SHADER_VERTEX].MaxTexInstructions = 0;
452 ctx->Const.Program[MESA_SHADER_VERTEX].MaxTexIndirections = 0;
453 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeAluInstructions = 0;
454 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeTexInstructions = 0;
455 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeTexIndirections = 0;
456 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeAttribs = 16;
457 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeTemps = 256;
458 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeAddressRegs = 1;
459 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeParameters = 1024;
460 ctx->Const.Program[MESA_SHADER_VERTEX].MaxEnvParams =
461 MIN2(ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeParameters,
462 ctx->Const.Program[MESA_SHADER_VERTEX].MaxEnvParams);
463
464 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeInstructions = 1024;
465 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeAluInstructions = 1024;
466 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeTexInstructions = 1024;
467 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeTexIndirections = 1024;
468 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeAttribs = 12;
469 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeTemps = 256;
470 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeAddressRegs = 0;
471 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeParameters = 1024;
472 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxEnvParams =
473 MIN2(ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeParameters,
474 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxEnvParams);
475
476 /* Fragment shaders use real, 32-bit twos-complement integers for all
477 * integer types.
478 */
479 ctx->Const.Program[MESA_SHADER_FRAGMENT].LowInt.RangeMin = 31;
480 ctx->Const.Program[MESA_SHADER_FRAGMENT].LowInt.RangeMax = 30;
481 ctx->Const.Program[MESA_SHADER_FRAGMENT].LowInt.Precision = 0;
482 ctx->Const.Program[MESA_SHADER_FRAGMENT].HighInt = ctx->Const.Program[MESA_SHADER_FRAGMENT].LowInt;
483 ctx->Const.Program[MESA_SHADER_FRAGMENT].MediumInt = ctx->Const.Program[MESA_SHADER_FRAGMENT].LowInt;
484
485 if (brw->gen >= 7) {
486 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxAtomicCounters = MAX_ATOMIC_COUNTERS;
487 ctx->Const.Program[MESA_SHADER_VERTEX].MaxAtomicCounters = MAX_ATOMIC_COUNTERS;
488 ctx->Const.Program[MESA_SHADER_GEOMETRY].MaxAtomicCounters = MAX_ATOMIC_COUNTERS;
489 ctx->Const.Program[MESA_SHADER_COMPUTE].MaxAtomicCounters = MAX_ATOMIC_COUNTERS;
490 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxAtomicBuffers = BRW_MAX_ABO;
491 ctx->Const.Program[MESA_SHADER_VERTEX].MaxAtomicBuffers = BRW_MAX_ABO;
492 ctx->Const.Program[MESA_SHADER_GEOMETRY].MaxAtomicBuffers = BRW_MAX_ABO;
493 ctx->Const.Program[MESA_SHADER_COMPUTE].MaxAtomicBuffers = BRW_MAX_ABO;
494 ctx->Const.MaxCombinedAtomicBuffers = 3 * BRW_MAX_ABO;
495 }
496
497 /* Gen6 converts quads to polygon in beginning of 3D pipeline,
498 * but we're not sure how it's actually done for vertex order,
499 * that affect provoking vertex decision. Always use last vertex
500 * convention for quad primitive which works as expected for now.
501 */
502 if (brw->gen >= 6)
503 ctx->Const.QuadsFollowProvokingVertexConvention = false;
504
505 ctx->Const.NativeIntegers = true;
506 ctx->Const.VertexID_is_zero_based = true;
507
508 /* Regarding the CMP instruction, the Ivybridge PRM says:
509 *
510 * "For each enabled channel 0b or 1b is assigned to the appropriate flag
511 * bit and 0/all zeros or all ones (e.g, byte 0xFF, word 0xFFFF, DWord
512 * 0xFFFFFFFF) is assigned to dst."
513 *
514 * but PRMs for earlier generations say
515 *
516 * "In dword format, one GRF may store up to 8 results. When the register
517 * is used later as a vector of Booleans, as only LSB at each channel
518 * contains meaning [sic] data, software should make sure all higher bits
519 * are masked out (e.g. by 'and-ing' an [sic] 0x01 constant)."
520 *
521 * We select the representation of a true boolean uniform to be ~0, and fix
522 * the results of Gen <= 5 CMP instruction's with -(result & 1).
523 */
524 ctx->Const.UniformBooleanTrue = ~0;
525
526 /* From the gen4 PRM, volume 4 page 127:
527 *
528 * "For SURFTYPE_BUFFER non-rendertarget surfaces, this field specifies
529 * the base address of the first element of the surface, computed in
530 * software by adding the surface base address to the byte offset of
531 * the element in the buffer."
532 *
533 * However, unaligned accesses are slower, so enforce buffer alignment.
534 */
535 ctx->Const.UniformBufferOffsetAlignment = 16;
536 ctx->Const.TextureBufferOffsetAlignment = 16;
537
538 if (brw->gen >= 6) {
539 ctx->Const.MaxVarying = 32;
540 ctx->Const.Program[MESA_SHADER_VERTEX].MaxOutputComponents = 128;
541 ctx->Const.Program[MESA_SHADER_GEOMETRY].MaxInputComponents = 64;
542 ctx->Const.Program[MESA_SHADER_GEOMETRY].MaxOutputComponents = 128;
543 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxInputComponents = 128;
544 }
545
546 /* We want the GLSL compiler to emit code that uses condition codes */
547 for (int i = 0; i < MESA_SHADER_STAGES; i++) {
548 ctx->Const.ShaderCompilerOptions[i].MaxIfDepth = brw->gen < 6 ? 16 : UINT_MAX;
549 ctx->Const.ShaderCompilerOptions[i].EmitCondCodes = true;
550 ctx->Const.ShaderCompilerOptions[i].EmitNoNoise = true;
551 ctx->Const.ShaderCompilerOptions[i].EmitNoMainReturn = true;
552 ctx->Const.ShaderCompilerOptions[i].EmitNoIndirectInput = true;
553 ctx->Const.ShaderCompilerOptions[i].EmitNoIndirectOutput =
554 (i == MESA_SHADER_FRAGMENT);
555 ctx->Const.ShaderCompilerOptions[i].EmitNoIndirectTemp =
556 (i == MESA_SHADER_FRAGMENT);
557 ctx->Const.ShaderCompilerOptions[i].EmitNoIndirectUniform = false;
558 ctx->Const.ShaderCompilerOptions[i].LowerClipDistance = true;
559 }
560
561 ctx->Const.ShaderCompilerOptions[MESA_SHADER_VERTEX].OptimizeForAOS = true;
562 ctx->Const.ShaderCompilerOptions[MESA_SHADER_GEOMETRY].OptimizeForAOS = true;
563
564 if (brw->scalar_vs) {
565 /* If we're using the scalar backend for vertex shaders, we need to
566 * configure these accordingly.
567 */
568 ctx->Const.ShaderCompilerOptions[MESA_SHADER_VERTEX].EmitNoIndirectOutput = true;
569 ctx->Const.ShaderCompilerOptions[MESA_SHADER_VERTEX].EmitNoIndirectTemp = true;
570 ctx->Const.ShaderCompilerOptions[MESA_SHADER_VERTEX].OptimizeForAOS = false;
571 }
572
573 /* ARB_viewport_array */
574 if (brw->gen >= 7 && ctx->API == API_OPENGL_CORE) {
575 ctx->Const.MaxViewports = GEN7_NUM_VIEWPORTS;
576 ctx->Const.ViewportSubpixelBits = 0;
577
578 /* Cast to float before negating becuase MaxViewportWidth is unsigned.
579 */
580 ctx->Const.ViewportBounds.Min = -(float)ctx->Const.MaxViewportWidth;
581 ctx->Const.ViewportBounds.Max = ctx->Const.MaxViewportWidth;
582 }
583
584 /* ARB_gpu_shader5 */
585 if (brw->gen >= 7)
586 ctx->Const.MaxVertexStreams = MIN2(4, MAX_VERTEX_STREAMS);
587 }
588
589 /**
590 * Process driconf (drirc) options, setting appropriate context flags.
591 *
592 * intelInitExtensions still pokes at optionCache directly, in order to
593 * avoid advertising various extensions. No flags are set, so it makes
594 * sense to continue doing that there.
595 */
596 static void
597 brw_process_driconf_options(struct brw_context *brw)
598 {
599 struct gl_context *ctx = &brw->ctx;
600
601 driOptionCache *options = &brw->optionCache;
602 driParseConfigFiles(options, &brw->intelScreen->optionCache,
603 brw->driContext->driScreenPriv->myNum, "i965");
604
605 int bo_reuse_mode = driQueryOptioni(options, "bo_reuse");
606 switch (bo_reuse_mode) {
607 case DRI_CONF_BO_REUSE_DISABLED:
608 break;
609 case DRI_CONF_BO_REUSE_ALL:
610 intel_bufmgr_gem_enable_reuse(brw->bufmgr);
611 break;
612 }
613
614 if (!driQueryOptionb(options, "hiz")) {
615 brw->has_hiz = false;
616 /* On gen6, you can only do separate stencil with HIZ. */
617 if (brw->gen == 6)
618 brw->has_separate_stencil = false;
619 }
620
621 if (driQueryOptionb(options, "always_flush_batch")) {
622 fprintf(stderr, "flushing batchbuffer before/after each draw call\n");
623 brw->always_flush_batch = true;
624 }
625
626 if (driQueryOptionb(options, "always_flush_cache")) {
627 fprintf(stderr, "flushing GPU caches before/after each draw call\n");
628 brw->always_flush_cache = true;
629 }
630
631 if (driQueryOptionb(options, "disable_throttling")) {
632 fprintf(stderr, "disabling flush throttling\n");
633 brw->disable_throttling = true;
634 }
635
636 brw->precompile = driQueryOptionb(&brw->optionCache, "shader_precompile");
637
638 ctx->Const.ForceGLSLExtensionsWarn =
639 driQueryOptionb(options, "force_glsl_extensions_warn");
640
641 ctx->Const.DisableGLSLLineContinuations =
642 driQueryOptionb(options, "disable_glsl_line_continuations");
643
644 ctx->Const.AllowGLSLExtensionDirectiveMidShader =
645 driQueryOptionb(options, "allow_glsl_extension_directive_midshader");
646 }
647
648 GLboolean
649 brwCreateContext(gl_api api,
650 const struct gl_config *mesaVis,
651 __DRIcontext *driContextPriv,
652 unsigned major_version,
653 unsigned minor_version,
654 uint32_t flags,
655 bool notify_reset,
656 unsigned *dri_ctx_error,
657 void *sharedContextPrivate)
658 {
659 __DRIscreen *sPriv = driContextPriv->driScreenPriv;
660 struct gl_context *shareCtx = (struct gl_context *) sharedContextPrivate;
661 struct intel_screen *screen = sPriv->driverPrivate;
662 const struct brw_device_info *devinfo = screen->devinfo;
663 struct dd_function_table functions;
664
665 /* Only allow the __DRI_CTX_FLAG_ROBUST_BUFFER_ACCESS flag if the kernel
666 * provides us with context reset notifications.
667 */
668 uint32_t allowed_flags = __DRI_CTX_FLAG_DEBUG
669 | __DRI_CTX_FLAG_FORWARD_COMPATIBLE;
670
671 if (screen->has_context_reset_notification)
672 allowed_flags |= __DRI_CTX_FLAG_ROBUST_BUFFER_ACCESS;
673
674 if (flags & ~allowed_flags) {
675 *dri_ctx_error = __DRI_CTX_ERROR_UNKNOWN_FLAG;
676 return false;
677 }
678
679 struct brw_context *brw = rzalloc(NULL, struct brw_context);
680 if (!brw) {
681 fprintf(stderr, "%s: failed to alloc context\n", __FUNCTION__);
682 *dri_ctx_error = __DRI_CTX_ERROR_NO_MEMORY;
683 return false;
684 }
685
686 driContextPriv->driverPrivate = brw;
687 brw->driContext = driContextPriv;
688 brw->intelScreen = screen;
689 brw->bufmgr = screen->bufmgr;
690
691 brw->gen = devinfo->gen;
692 brw->gt = devinfo->gt;
693 brw->is_g4x = devinfo->is_g4x;
694 brw->is_baytrail = devinfo->is_baytrail;
695 brw->is_haswell = devinfo->is_haswell;
696 brw->is_cherryview = devinfo->is_cherryview;
697 brw->has_llc = devinfo->has_llc;
698 brw->has_hiz = devinfo->has_hiz_and_separate_stencil;
699 brw->has_separate_stencil = devinfo->has_hiz_and_separate_stencil;
700 brw->has_pln = devinfo->has_pln;
701 brw->has_compr4 = devinfo->has_compr4;
702 brw->has_surface_tile_offset = devinfo->has_surface_tile_offset;
703 brw->has_negative_rhw_bug = devinfo->has_negative_rhw_bug;
704 brw->needs_unlit_centroid_workaround =
705 devinfo->needs_unlit_centroid_workaround;
706
707 brw->must_use_separate_stencil = screen->hw_must_use_separate_stencil;
708 brw->has_swizzling = screen->hw_has_swizzling;
709
710 brw->vs.base.stage = MESA_SHADER_VERTEX;
711 brw->gs.base.stage = MESA_SHADER_GEOMETRY;
712 brw->wm.base.stage = MESA_SHADER_FRAGMENT;
713 if (brw->gen >= 8) {
714 gen8_init_vtable_surface_functions(brw);
715 brw->vtbl.emit_depth_stencil_hiz = gen8_emit_depth_stencil_hiz;
716 } else if (brw->gen >= 7) {
717 gen7_init_vtable_surface_functions(brw);
718 brw->vtbl.emit_depth_stencil_hiz = gen7_emit_depth_stencil_hiz;
719 } else if (brw->gen >= 6) {
720 gen6_init_vtable_surface_functions(brw);
721 brw->vtbl.emit_depth_stencil_hiz = gen6_emit_depth_stencil_hiz;
722 } else {
723 gen4_init_vtable_surface_functions(brw);
724 brw->vtbl.emit_depth_stencil_hiz = brw_emit_depth_stencil_hiz;
725 }
726
727 brw_init_driver_functions(brw, &functions);
728
729 if (notify_reset)
730 functions.GetGraphicsResetStatus = brw_get_graphics_reset_status;
731
732 struct gl_context *ctx = &brw->ctx;
733
734 if (!_mesa_initialize_context(ctx, api, mesaVis, shareCtx, &functions)) {
735 *dri_ctx_error = __DRI_CTX_ERROR_NO_MEMORY;
736 fprintf(stderr, "%s: failed to init mesa context\n", __FUNCTION__);
737 intelDestroyContext(driContextPriv);
738 return false;
739 }
740
741 driContextSetFlags(ctx, flags);
742
743 /* Initialize the software rasterizer and helper modules.
744 *
745 * As of GL 3.1 core, the gen4+ driver doesn't need the swrast context for
746 * software fallbacks (which we have to support on legacy GL to do weird
747 * glDrawPixels(), glBitmap(), and other functions).
748 */
749 if (api != API_OPENGL_CORE && api != API_OPENGLES2) {
750 _swrast_CreateContext(ctx);
751 }
752
753 _vbo_CreateContext(ctx);
754 if (ctx->swrast_context) {
755 _tnl_CreateContext(ctx);
756 TNL_CONTEXT(ctx)->Driver.RunPipeline = _tnl_run_pipeline;
757 _swsetup_CreateContext(ctx);
758
759 /* Configure swrast to match hardware characteristics: */
760 _swrast_allow_pixel_fog(ctx, false);
761 _swrast_allow_vertex_fog(ctx, true);
762 }
763
764 _mesa_meta_init(ctx);
765
766 brw_process_driconf_options(brw);
767 brw_process_intel_debug_variable(brw);
768
769 if (brw->gen >= 8 && !(INTEL_DEBUG & DEBUG_VEC4VS))
770 brw->scalar_vs = true;
771
772 brw_initialize_context_constants(brw);
773
774 ctx->Const.ResetStrategy = notify_reset
775 ? GL_LOSE_CONTEXT_ON_RESET_ARB : GL_NO_RESET_NOTIFICATION_ARB;
776
777 /* Reinitialize the context point state. It depends on ctx->Const values. */
778 _mesa_init_point(ctx);
779
780 intel_fbo_init(brw);
781
782 intel_batchbuffer_init(brw);
783
784 if (brw->gen >= 6) {
785 /* Create a new hardware context. Using a hardware context means that
786 * our GPU state will be saved/restored on context switch, allowing us
787 * to assume that the GPU is in the same state we left it in.
788 *
789 * This is required for transform feedback buffer offsets, query objects,
790 * and also allows us to reduce how much state we have to emit.
791 */
792 brw->hw_ctx = drm_intel_gem_context_create(brw->bufmgr);
793
794 if (!brw->hw_ctx) {
795 fprintf(stderr, "Gen6+ requires Kernel 3.6 or later.\n");
796 intelDestroyContext(driContextPriv);
797 return false;
798 }
799 }
800
801 brw_init_state(brw);
802
803 intelInitExtensions(ctx);
804
805 brw_init_surface_formats(brw);
806
807 brw->max_vs_threads = devinfo->max_vs_threads;
808 brw->max_gs_threads = devinfo->max_gs_threads;
809 brw->max_wm_threads = devinfo->max_wm_threads;
810 brw->urb.size = devinfo->urb.size;
811 brw->urb.min_vs_entries = devinfo->urb.min_vs_entries;
812 brw->urb.max_vs_entries = devinfo->urb.max_vs_entries;
813 brw->urb.max_gs_entries = devinfo->urb.max_gs_entries;
814
815 /* Estimate the size of the mappable aperture into the GTT. There's an
816 * ioctl to get the whole GTT size, but not one to get the mappable subset.
817 * It turns out it's basically always 256MB, though some ancient hardware
818 * was smaller.
819 */
820 uint32_t gtt_size = 256 * 1024 * 1024;
821
822 /* We don't want to map two objects such that a memcpy between them would
823 * just fault one mapping in and then the other over and over forever. So
824 * we would need to divide the GTT size by 2. Additionally, some GTT is
825 * taken up by things like the framebuffer and the ringbuffer and such, so
826 * be more conservative.
827 */
828 brw->max_gtt_map_object_size = gtt_size / 4;
829
830 if (brw->gen == 6)
831 brw->urb.gs_present = false;
832
833 brw->prim_restart.in_progress = false;
834 brw->prim_restart.enable_cut_index = false;
835 brw->gs.enabled = false;
836 brw->sf.viewport_transform_enable = true;
837
838 ctx->VertexProgram._MaintainTnlProgram = true;
839 ctx->FragmentProgram._MaintainTexEnvProgram = true;
840
841 brw_draw_init( brw );
842
843 if ((flags & __DRI_CTX_FLAG_DEBUG) != 0) {
844 /* Turn on some extra GL_ARB_debug_output generation. */
845 brw->perf_debug = true;
846 }
847
848 if ((flags & __DRI_CTX_FLAG_ROBUST_BUFFER_ACCESS) != 0)
849 ctx->Const.ContextFlags |= GL_CONTEXT_FLAG_ROBUST_ACCESS_BIT_ARB;
850
851 if (INTEL_DEBUG & DEBUG_SHADER_TIME)
852 brw_init_shader_time(brw);
853
854 _mesa_compute_version(ctx);
855
856 _mesa_initialize_dispatch_tables(ctx);
857 _mesa_initialize_vbo_vtxfmt(ctx);
858
859 if (ctx->Extensions.AMD_performance_monitor) {
860 brw_init_performance_monitors(brw);
861 }
862
863 vbo_use_buffer_objects(ctx);
864 vbo_always_unmap_buffers(ctx);
865
866 return true;
867 }
868
869 void
870 intelDestroyContext(__DRIcontext * driContextPriv)
871 {
872 struct brw_context *brw =
873 (struct brw_context *) driContextPriv->driverPrivate;
874 struct gl_context *ctx = &brw->ctx;
875
876 assert(brw); /* should never be null */
877 if (!brw)
878 return;
879
880 /* Dump a final BMP in case the application doesn't call SwapBuffers */
881 if (INTEL_DEBUG & DEBUG_AUB) {
882 intel_batchbuffer_flush(brw);
883 aub_dump_bmp(&brw->ctx);
884 }
885
886 _mesa_meta_free(&brw->ctx);
887 brw_meta_fast_clear_free(brw);
888
889 if (INTEL_DEBUG & DEBUG_SHADER_TIME) {
890 /* Force a report. */
891 brw->shader_time.report_time = 0;
892
893 brw_collect_and_report_shader_time(brw);
894 brw_destroy_shader_time(brw);
895 }
896
897 brw_destroy_state(brw);
898 brw_draw_destroy(brw);
899
900 drm_intel_bo_unreference(brw->curbe.curbe_bo);
901
902 drm_intel_gem_context_destroy(brw->hw_ctx);
903
904 if (ctx->swrast_context) {
905 _swsetup_DestroyContext(&brw->ctx);
906 _tnl_DestroyContext(&brw->ctx);
907 }
908 _vbo_DestroyContext(&brw->ctx);
909
910 if (ctx->swrast_context)
911 _swrast_DestroyContext(&brw->ctx);
912
913 intel_batchbuffer_free(brw);
914
915 drm_intel_bo_unreference(brw->first_post_swapbuffers_batch);
916 brw->first_post_swapbuffers_batch = NULL;
917
918 driDestroyOptionCache(&brw->optionCache);
919
920 /* free the Mesa context */
921 _mesa_free_context_data(&brw->ctx);
922
923 ralloc_free(brw);
924 driContextPriv->driverPrivate = NULL;
925 }
926
927 GLboolean
928 intelUnbindContext(__DRIcontext * driContextPriv)
929 {
930 /* Unset current context and dispath table */
931 _mesa_make_current(NULL, NULL, NULL);
932
933 return true;
934 }
935
936 /**
937 * Fixes up the context for GLES23 with our default-to-sRGB-capable behavior
938 * on window system framebuffers.
939 *
940 * Desktop GL is fairly reasonable in its handling of sRGB: You can ask if
941 * your renderbuffer can do sRGB encode, and you can flip a switch that does
942 * sRGB encode if the renderbuffer can handle it. You can ask specifically
943 * for a visual where you're guaranteed to be capable, but it turns out that
944 * everyone just makes all their ARGB8888 visuals capable and doesn't offer
945 * incapable ones, becuase there's no difference between the two in resources
946 * used. Applications thus get built that accidentally rely on the default
947 * visual choice being sRGB, so we make ours sRGB capable. Everything sounds
948 * great...
949 *
950 * But for GLES2/3, they decided that it was silly to not turn on sRGB encode
951 * for sRGB renderbuffers you made with the GL_EXT_texture_sRGB equivalent.
952 * So they removed the enable knob and made it "if the renderbuffer is sRGB
953 * capable, do sRGB encode". Then, for your window system renderbuffers, you
954 * can ask for sRGB visuals and get sRGB encode, or not ask for sRGB visuals
955 * and get no sRGB encode (assuming that both kinds of visual are available).
956 * Thus our choice to support sRGB by default on our visuals for desktop would
957 * result in broken rendering of GLES apps that aren't expecting sRGB encode.
958 *
959 * Unfortunately, renderbuffer setup happens before a context is created. So
960 * in intel_screen.c we always set up sRGB, and here, if you're a GLES2/3
961 * context (without an sRGB visual, though we don't have sRGB visuals exposed
962 * yet), we go turn that back off before anyone finds out.
963 */
964 static void
965 intel_gles3_srgb_workaround(struct brw_context *brw,
966 struct gl_framebuffer *fb)
967 {
968 struct gl_context *ctx = &brw->ctx;
969
970 if (_mesa_is_desktop_gl(ctx) || !fb->Visual.sRGBCapable)
971 return;
972
973 /* Some day when we support the sRGB capable bit on visuals available for
974 * GLES, we'll need to respect that and not disable things here.
975 */
976 fb->Visual.sRGBCapable = false;
977 for (int i = 0; i < BUFFER_COUNT; i++) {
978 if (fb->Attachment[i].Renderbuffer &&
979 fb->Attachment[i].Renderbuffer->Format == MESA_FORMAT_B8G8R8A8_SRGB) {
980 fb->Attachment[i].Renderbuffer->Format = MESA_FORMAT_B8G8R8A8_UNORM;
981 }
982 }
983 }
984
985 GLboolean
986 intelMakeCurrent(__DRIcontext * driContextPriv,
987 __DRIdrawable * driDrawPriv,
988 __DRIdrawable * driReadPriv)
989 {
990 struct brw_context *brw;
991 GET_CURRENT_CONTEXT(curCtx);
992
993 if (driContextPriv)
994 brw = (struct brw_context *) driContextPriv->driverPrivate;
995 else
996 brw = NULL;
997
998 /* According to the glXMakeCurrent() man page: "Pending commands to
999 * the previous context, if any, are flushed before it is released."
1000 * But only flush if we're actually changing contexts.
1001 */
1002 if (brw_context(curCtx) && brw_context(curCtx) != brw) {
1003 _mesa_flush(curCtx);
1004 }
1005
1006 if (driContextPriv) {
1007 struct gl_context *ctx = &brw->ctx;
1008 struct gl_framebuffer *fb, *readFb;
1009
1010 if (driDrawPriv == NULL) {
1011 fb = _mesa_get_incomplete_framebuffer();
1012 } else {
1013 fb = driDrawPriv->driverPrivate;
1014 driContextPriv->dri2.draw_stamp = driDrawPriv->dri2.stamp - 1;
1015 }
1016
1017 if (driReadPriv == NULL) {
1018 readFb = _mesa_get_incomplete_framebuffer();
1019 } else {
1020 readFb = driReadPriv->driverPrivate;
1021 driContextPriv->dri2.read_stamp = driReadPriv->dri2.stamp - 1;
1022 }
1023
1024 /* The sRGB workaround changes the renderbuffer's format. We must change
1025 * the format before the renderbuffer's miptree get's allocated, otherwise
1026 * the formats of the renderbuffer and its miptree will differ.
1027 */
1028 intel_gles3_srgb_workaround(brw, fb);
1029 intel_gles3_srgb_workaround(brw, readFb);
1030
1031 /* If the context viewport hasn't been initialized, force a call out to
1032 * the loader to get buffers so we have a drawable size for the initial
1033 * viewport. */
1034 if (!brw->ctx.ViewportInitialized)
1035 intel_prepare_render(brw);
1036
1037 _mesa_make_current(ctx, fb, readFb);
1038 } else {
1039 _mesa_make_current(NULL, NULL, NULL);
1040 }
1041
1042 return true;
1043 }
1044
1045 void
1046 intel_resolve_for_dri2_flush(struct brw_context *brw,
1047 __DRIdrawable *drawable)
1048 {
1049 if (brw->gen < 6) {
1050 /* MSAA and fast color clear are not supported, so don't waste time
1051 * checking whether a resolve is needed.
1052 */
1053 return;
1054 }
1055
1056 struct gl_framebuffer *fb = drawable->driverPrivate;
1057 struct intel_renderbuffer *rb;
1058
1059 /* Usually, only the back buffer will need to be downsampled. However,
1060 * the front buffer will also need it if the user has rendered into it.
1061 */
1062 static const gl_buffer_index buffers[2] = {
1063 BUFFER_BACK_LEFT,
1064 BUFFER_FRONT_LEFT,
1065 };
1066
1067 for (int i = 0; i < 2; ++i) {
1068 rb = intel_get_renderbuffer(fb, buffers[i]);
1069 if (rb == NULL || rb->mt == NULL)
1070 continue;
1071 if (rb->mt->num_samples <= 1)
1072 intel_miptree_resolve_color(brw, rb->mt);
1073 else
1074 intel_renderbuffer_downsample(brw, rb);
1075 }
1076 }
1077
1078 static unsigned
1079 intel_bits_per_pixel(const struct intel_renderbuffer *rb)
1080 {
1081 return _mesa_get_format_bytes(intel_rb_format(rb)) * 8;
1082 }
1083
1084 static void
1085 intel_query_dri2_buffers(struct brw_context *brw,
1086 __DRIdrawable *drawable,
1087 __DRIbuffer **buffers,
1088 int *count);
1089
1090 static void
1091 intel_process_dri2_buffer(struct brw_context *brw,
1092 __DRIdrawable *drawable,
1093 __DRIbuffer *buffer,
1094 struct intel_renderbuffer *rb,
1095 const char *buffer_name);
1096
1097 static void
1098 intel_update_image_buffers(struct brw_context *brw, __DRIdrawable *drawable);
1099
1100 static void
1101 intel_update_dri2_buffers(struct brw_context *brw, __DRIdrawable *drawable)
1102 {
1103 struct gl_framebuffer *fb = drawable->driverPrivate;
1104 struct intel_renderbuffer *rb;
1105 __DRIbuffer *buffers = NULL;
1106 int i, count;
1107 const char *region_name;
1108
1109 /* Set this up front, so that in case our buffers get invalidated
1110 * while we're getting new buffers, we don't clobber the stamp and
1111 * thus ignore the invalidate. */
1112 drawable->lastStamp = drawable->dri2.stamp;
1113
1114 if (unlikely(INTEL_DEBUG & DEBUG_DRI))
1115 fprintf(stderr, "enter %s, drawable %p\n", __func__, drawable);
1116
1117 intel_query_dri2_buffers(brw, drawable, &buffers, &count);
1118
1119 if (buffers == NULL)
1120 return;
1121
1122 for (i = 0; i < count; i++) {
1123 switch (buffers[i].attachment) {
1124 case __DRI_BUFFER_FRONT_LEFT:
1125 rb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT);
1126 region_name = "dri2 front buffer";
1127 break;
1128
1129 case __DRI_BUFFER_FAKE_FRONT_LEFT:
1130 rb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT);
1131 region_name = "dri2 fake front buffer";
1132 break;
1133
1134 case __DRI_BUFFER_BACK_LEFT:
1135 rb = intel_get_renderbuffer(fb, BUFFER_BACK_LEFT);
1136 region_name = "dri2 back buffer";
1137 break;
1138
1139 case __DRI_BUFFER_DEPTH:
1140 case __DRI_BUFFER_HIZ:
1141 case __DRI_BUFFER_DEPTH_STENCIL:
1142 case __DRI_BUFFER_STENCIL:
1143 case __DRI_BUFFER_ACCUM:
1144 default:
1145 fprintf(stderr,
1146 "unhandled buffer attach event, attachment type %d\n",
1147 buffers[i].attachment);
1148 return;
1149 }
1150
1151 intel_process_dri2_buffer(brw, drawable, &buffers[i], rb, region_name);
1152 }
1153
1154 }
1155
1156 void
1157 intel_update_renderbuffers(__DRIcontext *context, __DRIdrawable *drawable)
1158 {
1159 struct brw_context *brw = context->driverPrivate;
1160 __DRIscreen *screen = brw->intelScreen->driScrnPriv;
1161
1162 /* Set this up front, so that in case our buffers get invalidated
1163 * while we're getting new buffers, we don't clobber the stamp and
1164 * thus ignore the invalidate. */
1165 drawable->lastStamp = drawable->dri2.stamp;
1166
1167 if (unlikely(INTEL_DEBUG & DEBUG_DRI))
1168 fprintf(stderr, "enter %s, drawable %p\n", __func__, drawable);
1169
1170 if (screen->image.loader)
1171 intel_update_image_buffers(brw, drawable);
1172 else
1173 intel_update_dri2_buffers(brw, drawable);
1174
1175 driUpdateFramebufferSize(&brw->ctx, drawable);
1176 }
1177
1178 /**
1179 * intel_prepare_render should be called anywhere that curent read/drawbuffer
1180 * state is required.
1181 */
1182 void
1183 intel_prepare_render(struct brw_context *brw)
1184 {
1185 struct gl_context *ctx = &brw->ctx;
1186 __DRIcontext *driContext = brw->driContext;
1187 __DRIdrawable *drawable;
1188
1189 drawable = driContext->driDrawablePriv;
1190 if (drawable && drawable->dri2.stamp != driContext->dri2.draw_stamp) {
1191 if (drawable->lastStamp != drawable->dri2.stamp)
1192 intel_update_renderbuffers(driContext, drawable);
1193 driContext->dri2.draw_stamp = drawable->dri2.stamp;
1194 }
1195
1196 drawable = driContext->driReadablePriv;
1197 if (drawable && drawable->dri2.stamp != driContext->dri2.read_stamp) {
1198 if (drawable->lastStamp != drawable->dri2.stamp)
1199 intel_update_renderbuffers(driContext, drawable);
1200 driContext->dri2.read_stamp = drawable->dri2.stamp;
1201 }
1202
1203 /* If we're currently rendering to the front buffer, the rendering
1204 * that will happen next will probably dirty the front buffer. So
1205 * mark it as dirty here.
1206 */
1207 if (brw_is_front_buffer_drawing(ctx->DrawBuffer))
1208 brw->front_buffer_dirty = true;
1209
1210 /* Wait for the swapbuffers before the one we just emitted, so we
1211 * don't get too many swaps outstanding for apps that are GPU-heavy
1212 * but not CPU-heavy.
1213 *
1214 * We're using intelDRI2Flush (called from the loader before
1215 * swapbuffer) and glFlush (for front buffer rendering) as the
1216 * indicator that a frame is done and then throttle when we get
1217 * here as we prepare to render the next frame. At this point for
1218 * round trips for swap/copy and getting new buffers are done and
1219 * we'll spend less time waiting on the GPU.
1220 *
1221 * Unfortunately, we don't have a handle to the batch containing
1222 * the swap, and getting our hands on that doesn't seem worth it,
1223 * so we just us the first batch we emitted after the last swap.
1224 */
1225 if (brw->need_throttle && brw->first_post_swapbuffers_batch) {
1226 if (!brw->disable_throttling)
1227 drm_intel_bo_wait_rendering(brw->first_post_swapbuffers_batch);
1228 drm_intel_bo_unreference(brw->first_post_swapbuffers_batch);
1229 brw->first_post_swapbuffers_batch = NULL;
1230 brw->need_throttle = false;
1231 }
1232 }
1233
1234 /**
1235 * \brief Query DRI2 to obtain a DRIdrawable's buffers.
1236 *
1237 * To determine which DRI buffers to request, examine the renderbuffers
1238 * attached to the drawable's framebuffer. Then request the buffers with
1239 * DRI2GetBuffers() or DRI2GetBuffersWithFormat().
1240 *
1241 * This is called from intel_update_renderbuffers().
1242 *
1243 * \param drawable Drawable whose buffers are queried.
1244 * \param buffers [out] List of buffers returned by DRI2 query.
1245 * \param buffer_count [out] Number of buffers returned.
1246 *
1247 * \see intel_update_renderbuffers()
1248 * \see DRI2GetBuffers()
1249 * \see DRI2GetBuffersWithFormat()
1250 */
1251 static void
1252 intel_query_dri2_buffers(struct brw_context *brw,
1253 __DRIdrawable *drawable,
1254 __DRIbuffer **buffers,
1255 int *buffer_count)
1256 {
1257 __DRIscreen *screen = brw->intelScreen->driScrnPriv;
1258 struct gl_framebuffer *fb = drawable->driverPrivate;
1259 int i = 0;
1260 unsigned attachments[8];
1261
1262 struct intel_renderbuffer *front_rb;
1263 struct intel_renderbuffer *back_rb;
1264
1265 front_rb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT);
1266 back_rb = intel_get_renderbuffer(fb, BUFFER_BACK_LEFT);
1267
1268 memset(attachments, 0, sizeof(attachments));
1269 if ((brw_is_front_buffer_drawing(fb) ||
1270 brw_is_front_buffer_reading(fb) ||
1271 !back_rb) && front_rb) {
1272 /* If a fake front buffer is in use, then querying for
1273 * __DRI_BUFFER_FRONT_LEFT will cause the server to copy the image from
1274 * the real front buffer to the fake front buffer. So before doing the
1275 * query, we need to make sure all the pending drawing has landed in the
1276 * real front buffer.
1277 */
1278 intel_batchbuffer_flush(brw);
1279 intel_flush_front(&brw->ctx);
1280
1281 attachments[i++] = __DRI_BUFFER_FRONT_LEFT;
1282 attachments[i++] = intel_bits_per_pixel(front_rb);
1283 } else if (front_rb && brw->front_buffer_dirty) {
1284 /* We have pending front buffer rendering, but we aren't querying for a
1285 * front buffer. If the front buffer we have is a fake front buffer,
1286 * the X server is going to throw it away when it processes the query.
1287 * So before doing the query, make sure all the pending drawing has
1288 * landed in the real front buffer.
1289 */
1290 intel_batchbuffer_flush(brw);
1291 intel_flush_front(&brw->ctx);
1292 }
1293
1294 if (back_rb) {
1295 attachments[i++] = __DRI_BUFFER_BACK_LEFT;
1296 attachments[i++] = intel_bits_per_pixel(back_rb);
1297 }
1298
1299 assert(i <= ARRAY_SIZE(attachments));
1300
1301 *buffers = screen->dri2.loader->getBuffersWithFormat(drawable,
1302 &drawable->w,
1303 &drawable->h,
1304 attachments, i / 2,
1305 buffer_count,
1306 drawable->loaderPrivate);
1307 }
1308
1309 /**
1310 * \brief Assign a DRI buffer's DRM region to a renderbuffer.
1311 *
1312 * This is called from intel_update_renderbuffers().
1313 *
1314 * \par Note:
1315 * DRI buffers whose attachment point is DRI2BufferStencil or
1316 * DRI2BufferDepthStencil are handled as special cases.
1317 *
1318 * \param buffer_name is a human readable name, such as "dri2 front buffer",
1319 * that is passed to drm_intel_bo_gem_create_from_name().
1320 *
1321 * \see intel_update_renderbuffers()
1322 */
1323 static void
1324 intel_process_dri2_buffer(struct brw_context *brw,
1325 __DRIdrawable *drawable,
1326 __DRIbuffer *buffer,
1327 struct intel_renderbuffer *rb,
1328 const char *buffer_name)
1329 {
1330 struct gl_framebuffer *fb = drawable->driverPrivate;
1331 drm_intel_bo *bo;
1332
1333 if (!rb)
1334 return;
1335
1336 unsigned num_samples = rb->Base.Base.NumSamples;
1337
1338 /* We try to avoid closing and reopening the same BO name, because the first
1339 * use of a mapping of the buffer involves a bunch of page faulting which is
1340 * moderately expensive.
1341 */
1342 struct intel_mipmap_tree *last_mt;
1343 if (num_samples == 0)
1344 last_mt = rb->mt;
1345 else
1346 last_mt = rb->singlesample_mt;
1347
1348 uint32_t old_name = 0;
1349 if (last_mt) {
1350 /* The bo already has a name because the miptree was created by a
1351 * previous call to intel_process_dri2_buffer(). If a bo already has a
1352 * name, then drm_intel_bo_flink() is a low-cost getter. It does not
1353 * create a new name.
1354 */
1355 drm_intel_bo_flink(last_mt->bo, &old_name);
1356 }
1357
1358 if (old_name == buffer->name)
1359 return;
1360
1361 if (unlikely(INTEL_DEBUG & DEBUG_DRI)) {
1362 fprintf(stderr,
1363 "attaching buffer %d, at %d, cpp %d, pitch %d\n",
1364 buffer->name, buffer->attachment,
1365 buffer->cpp, buffer->pitch);
1366 }
1367
1368 intel_miptree_release(&rb->mt);
1369 bo = drm_intel_bo_gem_create_from_name(brw->bufmgr, buffer_name,
1370 buffer->name);
1371 if (!bo) {
1372 fprintf(stderr,
1373 "Failed to open BO for returned DRI2 buffer "
1374 "(%dx%d, %s, named %d).\n"
1375 "This is likely a bug in the X Server that will lead to a "
1376 "crash soon.\n",
1377 drawable->w, drawable->h, buffer_name, buffer->name);
1378 return;
1379 }
1380
1381 intel_update_winsys_renderbuffer_miptree(brw, rb, bo,
1382 drawable->w, drawable->h,
1383 buffer->pitch);
1384
1385 if (brw_is_front_buffer_drawing(fb) &&
1386 (buffer->attachment == __DRI_BUFFER_FRONT_LEFT ||
1387 buffer->attachment == __DRI_BUFFER_FAKE_FRONT_LEFT) &&
1388 rb->Base.Base.NumSamples > 1) {
1389 intel_renderbuffer_upsample(brw, rb);
1390 }
1391
1392 assert(rb->mt);
1393
1394 drm_intel_bo_unreference(bo);
1395 }
1396
1397 /**
1398 * \brief Query DRI image loader to obtain a DRIdrawable's buffers.
1399 *
1400 * To determine which DRI buffers to request, examine the renderbuffers
1401 * attached to the drawable's framebuffer. Then request the buffers from
1402 * the image loader
1403 *
1404 * This is called from intel_update_renderbuffers().
1405 *
1406 * \param drawable Drawable whose buffers are queried.
1407 * \param buffers [out] List of buffers returned by DRI2 query.
1408 * \param buffer_count [out] Number of buffers returned.
1409 *
1410 * \see intel_update_renderbuffers()
1411 */
1412
1413 static void
1414 intel_update_image_buffer(struct brw_context *intel,
1415 __DRIdrawable *drawable,
1416 struct intel_renderbuffer *rb,
1417 __DRIimage *buffer,
1418 enum __DRIimageBufferMask buffer_type)
1419 {
1420 struct gl_framebuffer *fb = drawable->driverPrivate;
1421
1422 if (!rb || !buffer->bo)
1423 return;
1424
1425 unsigned num_samples = rb->Base.Base.NumSamples;
1426
1427 /* Check and see if we're already bound to the right
1428 * buffer object
1429 */
1430 struct intel_mipmap_tree *last_mt;
1431 if (num_samples == 0)
1432 last_mt = rb->mt;
1433 else
1434 last_mt = rb->singlesample_mt;
1435
1436 if (last_mt && last_mt->bo == buffer->bo)
1437 return;
1438
1439 intel_update_winsys_renderbuffer_miptree(intel, rb, buffer->bo,
1440 buffer->width, buffer->height,
1441 buffer->pitch);
1442
1443 if (brw_is_front_buffer_drawing(fb) &&
1444 buffer_type == __DRI_IMAGE_BUFFER_FRONT &&
1445 rb->Base.Base.NumSamples > 1) {
1446 intel_renderbuffer_upsample(intel, rb);
1447 }
1448 }
1449
1450 static void
1451 intel_update_image_buffers(struct brw_context *brw, __DRIdrawable *drawable)
1452 {
1453 struct gl_framebuffer *fb = drawable->driverPrivate;
1454 __DRIscreen *screen = brw->intelScreen->driScrnPriv;
1455 struct intel_renderbuffer *front_rb;
1456 struct intel_renderbuffer *back_rb;
1457 struct __DRIimageList images;
1458 unsigned int format;
1459 uint32_t buffer_mask = 0;
1460
1461 front_rb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT);
1462 back_rb = intel_get_renderbuffer(fb, BUFFER_BACK_LEFT);
1463
1464 if (back_rb)
1465 format = intel_rb_format(back_rb);
1466 else if (front_rb)
1467 format = intel_rb_format(front_rb);
1468 else
1469 return;
1470
1471 if (front_rb && (brw_is_front_buffer_drawing(fb) ||
1472 brw_is_front_buffer_reading(fb) || !back_rb)) {
1473 buffer_mask |= __DRI_IMAGE_BUFFER_FRONT;
1474 }
1475
1476 if (back_rb)
1477 buffer_mask |= __DRI_IMAGE_BUFFER_BACK;
1478
1479 (*screen->image.loader->getBuffers) (drawable,
1480 driGLFormatToImageFormat(format),
1481 &drawable->dri2.stamp,
1482 drawable->loaderPrivate,
1483 buffer_mask,
1484 &images);
1485
1486 if (images.image_mask & __DRI_IMAGE_BUFFER_FRONT) {
1487 drawable->w = images.front->width;
1488 drawable->h = images.front->height;
1489 intel_update_image_buffer(brw,
1490 drawable,
1491 front_rb,
1492 images.front,
1493 __DRI_IMAGE_BUFFER_FRONT);
1494 }
1495 if (images.image_mask & __DRI_IMAGE_BUFFER_BACK) {
1496 drawable->w = images.back->width;
1497 drawable->h = images.back->height;
1498 intel_update_image_buffer(brw,
1499 drawable,
1500 back_rb,
1501 images.back,
1502 __DRI_IMAGE_BUFFER_BACK);
1503 }
1504 }