i965: Generate vs code using scalar backend for BDW+
[mesa.git] / src / mesa / drivers / dri / i965 / brw_context.c
1 /*
2 Copyright 2003 VMware, Inc.
3 Copyright (C) Intel Corp. 2006. All Rights Reserved.
4 Intel funded Tungsten Graphics to
5 develop this 3D driver.
6
7 Permission is hereby granted, free of charge, to any person obtaining
8 a copy of this software and associated documentation files (the
9 "Software"), to deal in the Software without restriction, including
10 without limitation the rights to use, copy, modify, merge, publish,
11 distribute, sublicense, and/or sell copies of the Software, and to
12 permit persons to whom the Software is furnished to do so, subject to
13 the following conditions:
14
15 The above copyright notice and this permission notice (including the
16 next paragraph) shall be included in all copies or substantial
17 portions of the Software.
18
19 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
20 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
22 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
23 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
24 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
25 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26
27 **********************************************************************/
28 /*
29 * Authors:
30 * Keith Whitwell <keithw@vmware.com>
31 */
32
33
34 #include "main/api_exec.h"
35 #include "main/context.h"
36 #include "main/fbobject.h"
37 #include "main/extensions.h"
38 #include "main/imports.h"
39 #include "main/macros.h"
40 #include "main/points.h"
41 #include "main/version.h"
42 #include "main/vtxfmt.h"
43 #include "main/texobj.h"
44
45 #include "vbo/vbo_context.h"
46
47 #include "drivers/common/driverfuncs.h"
48 #include "drivers/common/meta.h"
49 #include "utils.h"
50
51 #include "brw_context.h"
52 #include "brw_defines.h"
53 #include "brw_draw.h"
54 #include "brw_state.h"
55
56 #include "intel_batchbuffer.h"
57 #include "intel_buffer_objects.h"
58 #include "intel_buffers.h"
59 #include "intel_fbo.h"
60 #include "intel_mipmap_tree.h"
61 #include "intel_pixel.h"
62 #include "intel_image.h"
63 #include "intel_tex.h"
64 #include "intel_tex_obj.h"
65
66 #include "swrast_setup/swrast_setup.h"
67 #include "tnl/tnl.h"
68 #include "tnl/t_pipeline.h"
69 #include "util/ralloc.h"
70
71 /***************************************
72 * Mesa's Driver Functions
73 ***************************************/
74
75 static size_t
76 brw_query_samples_for_format(struct gl_context *ctx, GLenum target,
77 GLenum internalFormat, int samples[16])
78 {
79 struct brw_context *brw = brw_context(ctx);
80
81 (void) target;
82
83 switch (brw->gen) {
84 case 8:
85 samples[0] = 8;
86 samples[1] = 4;
87 samples[2] = 2;
88 return 3;
89
90 case 7:
91 samples[0] = 8;
92 samples[1] = 4;
93 return 2;
94
95 case 6:
96 samples[0] = 4;
97 return 1;
98
99 default:
100 samples[0] = 1;
101 return 1;
102 }
103 }
104
105 const char *const brw_vendor_string = "Intel Open Source Technology Center";
106
107 const char *
108 brw_get_renderer_string(unsigned deviceID)
109 {
110 const char *chipset;
111 static char buffer[128];
112
113 switch (deviceID) {
114 #undef CHIPSET
115 #define CHIPSET(id, symbol, str) case id: chipset = str; break;
116 #include "pci_ids/i965_pci_ids.h"
117 default:
118 chipset = "Unknown Intel Chipset";
119 break;
120 }
121
122 (void) driGetRendererString(buffer, chipset, 0);
123 return buffer;
124 }
125
126 static const GLubyte *
127 intel_get_string(struct gl_context * ctx, GLenum name)
128 {
129 const struct brw_context *const brw = brw_context(ctx);
130
131 switch (name) {
132 case GL_VENDOR:
133 return (GLubyte *) brw_vendor_string;
134
135 case GL_RENDERER:
136 return
137 (GLubyte *) brw_get_renderer_string(brw->intelScreen->deviceID);
138
139 default:
140 return NULL;
141 }
142 }
143
144 static void
145 intel_viewport(struct gl_context *ctx)
146 {
147 struct brw_context *brw = brw_context(ctx);
148 __DRIcontext *driContext = brw->driContext;
149
150 if (_mesa_is_winsys_fbo(ctx->DrawBuffer)) {
151 dri2InvalidateDrawable(driContext->driDrawablePriv);
152 dri2InvalidateDrawable(driContext->driReadablePriv);
153 }
154 }
155
156 static void
157 intel_update_state(struct gl_context * ctx, GLuint new_state)
158 {
159 struct brw_context *brw = brw_context(ctx);
160 struct intel_texture_object *tex_obj;
161 struct intel_renderbuffer *depth_irb;
162
163 if (ctx->swrast_context)
164 _swrast_InvalidateState(ctx, new_state);
165 _vbo_InvalidateState(ctx, new_state);
166
167 brw->NewGLState |= new_state;
168
169 _mesa_unlock_context_textures(ctx);
170
171 /* Resolve the depth buffer's HiZ buffer. */
172 depth_irb = intel_get_renderbuffer(ctx->DrawBuffer, BUFFER_DEPTH);
173 if (depth_irb)
174 intel_renderbuffer_resolve_hiz(brw, depth_irb);
175
176 /* Resolve depth buffer and render cache of each enabled texture. */
177 int maxEnabledUnit = ctx->Texture._MaxEnabledTexImageUnit;
178 for (int i = 0; i <= maxEnabledUnit; i++) {
179 if (!ctx->Texture.Unit[i]._Current)
180 continue;
181 tex_obj = intel_texture_object(ctx->Texture.Unit[i]._Current);
182 if (!tex_obj || !tex_obj->mt)
183 continue;
184 intel_miptree_all_slices_resolve_depth(brw, tex_obj->mt);
185 intel_miptree_resolve_color(brw, tex_obj->mt);
186 brw_render_cache_set_check_flush(brw, tex_obj->mt->bo);
187 }
188
189 _mesa_lock_context_textures(ctx);
190 }
191
192 #define flushFront(screen) ((screen)->image.loader ? (screen)->image.loader->flushFrontBuffer : (screen)->dri2.loader->flushFrontBuffer)
193
194 static void
195 intel_flush_front(struct gl_context *ctx)
196 {
197 struct brw_context *brw = brw_context(ctx);
198 __DRIcontext *driContext = brw->driContext;
199 __DRIdrawable *driDrawable = driContext->driDrawablePriv;
200 __DRIscreen *const screen = brw->intelScreen->driScrnPriv;
201
202 if (brw->front_buffer_dirty && _mesa_is_winsys_fbo(ctx->DrawBuffer)) {
203 if (flushFront(screen) && driDrawable &&
204 driDrawable->loaderPrivate) {
205
206 /* Resolve before flushing FAKE_FRONT_LEFT to FRONT_LEFT.
207 *
208 * This potentially resolves both front and back buffer. It
209 * is unnecessary to resolve the back, but harms nothing except
210 * performance. And no one cares about front-buffer render
211 * performance.
212 */
213 intel_resolve_for_dri2_flush(brw, driDrawable);
214 intel_batchbuffer_flush(brw);
215
216 flushFront(screen)(driDrawable, driDrawable->loaderPrivate);
217
218 /* We set the dirty bit in intel_prepare_render() if we're
219 * front buffer rendering once we get there.
220 */
221 brw->front_buffer_dirty = false;
222 }
223 }
224 }
225
226 static void
227 intel_glFlush(struct gl_context *ctx)
228 {
229 struct brw_context *brw = brw_context(ctx);
230
231 intel_batchbuffer_flush(brw);
232 intel_flush_front(ctx);
233 if (brw_is_front_buffer_drawing(ctx->DrawBuffer))
234 brw->need_throttle = true;
235 }
236
237 static void
238 intel_finish(struct gl_context * ctx)
239 {
240 struct brw_context *brw = brw_context(ctx);
241
242 intel_glFlush(ctx);
243
244 if (brw->batch.last_bo)
245 drm_intel_bo_wait_rendering(brw->batch.last_bo);
246 }
247
248 static void
249 brw_init_driver_functions(struct brw_context *brw,
250 struct dd_function_table *functions)
251 {
252 _mesa_init_driver_functions(functions);
253
254 /* GLX uses DRI2 invalidate events to handle window resizing.
255 * Unfortunately, EGL does not - libEGL is written in XCB (not Xlib),
256 * which doesn't provide a mechanism for snooping the event queues.
257 *
258 * So EGL still relies on viewport hacks to handle window resizing.
259 * This should go away with DRI3000.
260 */
261 if (!brw->driContext->driScreenPriv->dri2.useInvalidate)
262 functions->Viewport = intel_viewport;
263
264 functions->Flush = intel_glFlush;
265 functions->Finish = intel_finish;
266 functions->GetString = intel_get_string;
267 functions->UpdateState = intel_update_state;
268
269 intelInitTextureFuncs(functions);
270 intelInitTextureImageFuncs(functions);
271 intelInitTextureSubImageFuncs(functions);
272 intelInitTextureCopyImageFuncs(functions);
273 intelInitCopyImageFuncs(functions);
274 intelInitClearFuncs(functions);
275 intelInitBufferFuncs(functions);
276 intelInitPixelFuncs(functions);
277 intelInitBufferObjectFuncs(functions);
278 intel_init_syncobj_functions(functions);
279 brw_init_object_purgeable_functions(functions);
280
281 brwInitFragProgFuncs( functions );
282 brw_init_common_queryobj_functions(functions);
283 if (brw->gen >= 6)
284 gen6_init_queryobj_functions(functions);
285 else
286 gen4_init_queryobj_functions(functions);
287
288 functions->QuerySamplesForFormat = brw_query_samples_for_format;
289
290 functions->NewTransformFeedback = brw_new_transform_feedback;
291 functions->DeleteTransformFeedback = brw_delete_transform_feedback;
292 functions->GetTransformFeedbackVertexCount =
293 brw_get_transform_feedback_vertex_count;
294 if (brw->gen >= 7) {
295 functions->BeginTransformFeedback = gen7_begin_transform_feedback;
296 functions->EndTransformFeedback = gen7_end_transform_feedback;
297 functions->PauseTransformFeedback = gen7_pause_transform_feedback;
298 functions->ResumeTransformFeedback = gen7_resume_transform_feedback;
299 } else {
300 functions->BeginTransformFeedback = brw_begin_transform_feedback;
301 functions->EndTransformFeedback = brw_end_transform_feedback;
302 }
303
304 if (brw->gen >= 6)
305 functions->GetSamplePosition = gen6_get_sample_position;
306 }
307
308 static void
309 brw_initialize_context_constants(struct brw_context *brw)
310 {
311 struct gl_context *ctx = &brw->ctx;
312
313 unsigned max_samplers =
314 brw->gen >= 8 || brw->is_haswell ? BRW_MAX_TEX_UNIT : 16;
315
316 ctx->Const.QueryCounterBits.Timestamp = 36;
317
318 ctx->Const.StripTextureBorder = true;
319
320 ctx->Const.MaxDualSourceDrawBuffers = 1;
321 ctx->Const.MaxDrawBuffers = BRW_MAX_DRAW_BUFFERS;
322 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxTextureImageUnits = max_samplers;
323 ctx->Const.MaxTextureCoordUnits = 8; /* Mesa limit */
324 ctx->Const.MaxTextureUnits =
325 MIN2(ctx->Const.MaxTextureCoordUnits,
326 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxTextureImageUnits);
327 ctx->Const.Program[MESA_SHADER_VERTEX].MaxTextureImageUnits = max_samplers;
328 if (brw->gen >= 6)
329 ctx->Const.Program[MESA_SHADER_GEOMETRY].MaxTextureImageUnits = max_samplers;
330 else
331 ctx->Const.Program[MESA_SHADER_GEOMETRY].MaxTextureImageUnits = 0;
332 if (_mesa_extension_override_enables.ARB_compute_shader) {
333 ctx->Const.Program[MESA_SHADER_COMPUTE].MaxTextureImageUnits = BRW_MAX_TEX_UNIT;
334 ctx->Const.MaxUniformBufferBindings += 12;
335 } else {
336 ctx->Const.Program[MESA_SHADER_COMPUTE].MaxTextureImageUnits = 0;
337 }
338 ctx->Const.MaxCombinedTextureImageUnits =
339 ctx->Const.Program[MESA_SHADER_VERTEX].MaxTextureImageUnits +
340 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxTextureImageUnits +
341 ctx->Const.Program[MESA_SHADER_GEOMETRY].MaxTextureImageUnits +
342 ctx->Const.Program[MESA_SHADER_COMPUTE].MaxTextureImageUnits;
343
344 ctx->Const.MaxTextureLevels = 14; /* 8192 */
345 if (ctx->Const.MaxTextureLevels > MAX_TEXTURE_LEVELS)
346 ctx->Const.MaxTextureLevels = MAX_TEXTURE_LEVELS;
347 ctx->Const.Max3DTextureLevels = 12; /* 2048 */
348 ctx->Const.MaxCubeTextureLevels = 14; /* 8192 */
349 ctx->Const.MaxTextureMbytes = 1536;
350
351 if (brw->gen >= 7)
352 ctx->Const.MaxArrayTextureLayers = 2048;
353 else
354 ctx->Const.MaxArrayTextureLayers = 512;
355
356 ctx->Const.MaxTextureRectSize = 1 << 12;
357
358 ctx->Const.MaxTextureMaxAnisotropy = 16.0;
359
360 ctx->Const.MaxRenderbufferSize = 8192;
361
362 /* Hardware only supports a limited number of transform feedback buffers.
363 * So we need to override the Mesa default (which is based only on software
364 * limits).
365 */
366 ctx->Const.MaxTransformFeedbackBuffers = BRW_MAX_SOL_BUFFERS;
367
368 /* On Gen6, in the worst case, we use up one binding table entry per
369 * transform feedback component (see comments above the definition of
370 * BRW_MAX_SOL_BINDINGS, in brw_context.h), so we need to advertise a value
371 * for MAX_TRANSFORM_FEEDBACK_INTERLEAVED_COMPONENTS equal to
372 * BRW_MAX_SOL_BINDINGS.
373 *
374 * In "separate components" mode, we need to divide this value by
375 * BRW_MAX_SOL_BUFFERS, so that the total number of binding table entries
376 * used up by all buffers will not exceed BRW_MAX_SOL_BINDINGS.
377 */
378 ctx->Const.MaxTransformFeedbackInterleavedComponents = BRW_MAX_SOL_BINDINGS;
379 ctx->Const.MaxTransformFeedbackSeparateComponents =
380 BRW_MAX_SOL_BINDINGS / BRW_MAX_SOL_BUFFERS;
381
382 ctx->Const.AlwaysUseGetTransformFeedbackVertexCount = true;
383
384 int max_samples;
385 const int *msaa_modes = intel_supported_msaa_modes(brw->intelScreen);
386 const int clamp_max_samples =
387 driQueryOptioni(&brw->optionCache, "clamp_max_samples");
388
389 if (clamp_max_samples < 0) {
390 max_samples = msaa_modes[0];
391 } else {
392 /* Select the largest supported MSAA mode that does not exceed
393 * clamp_max_samples.
394 */
395 max_samples = 0;
396 for (int i = 0; msaa_modes[i] != 0; ++i) {
397 if (msaa_modes[i] <= clamp_max_samples) {
398 max_samples = msaa_modes[i];
399 break;
400 }
401 }
402 }
403
404 ctx->Const.MaxSamples = max_samples;
405 ctx->Const.MaxColorTextureSamples = max_samples;
406 ctx->Const.MaxDepthTextureSamples = max_samples;
407 ctx->Const.MaxIntegerSamples = max_samples;
408
409 /* gen6_set_sample_maps() sets SampleMap{2,4,8}x variables which are used
410 * to map indices of rectangular grid to sample numbers within a pixel.
411 * These variables are used by GL_EXT_framebuffer_multisample_blit_scaled
412 * extension implementation. For more details see the comment above
413 * gen6_set_sample_maps() definition.
414 */
415 gen6_set_sample_maps(ctx);
416
417 if (brw->gen >= 7)
418 ctx->Const.MaxProgramTextureGatherComponents = 4;
419 else if (brw->gen == 6)
420 ctx->Const.MaxProgramTextureGatherComponents = 1;
421
422 ctx->Const.MinLineWidth = 1.0;
423 ctx->Const.MinLineWidthAA = 1.0;
424 if (brw->gen >= 9 || brw->is_cherryview) {
425 ctx->Const.MaxLineWidth = 40.0;
426 ctx->Const.MaxLineWidthAA = 40.0;
427 ctx->Const.LineWidthGranularity = 0.125;
428 } else if (brw->gen >= 6) {
429 ctx->Const.MaxLineWidth = 7.875;
430 ctx->Const.MaxLineWidthAA = 7.875;
431 ctx->Const.LineWidthGranularity = 0.125;
432 } else {
433 ctx->Const.MaxLineWidth = 7.0;
434 ctx->Const.MaxLineWidthAA = 7.0;
435 ctx->Const.LineWidthGranularity = 0.5;
436 }
437
438 ctx->Const.MinPointSize = 1.0;
439 ctx->Const.MinPointSizeAA = 1.0;
440 ctx->Const.MaxPointSize = 255.0;
441 ctx->Const.MaxPointSizeAA = 255.0;
442 ctx->Const.PointSizeGranularity = 1.0;
443
444 if (brw->gen >= 5 || brw->is_g4x)
445 ctx->Const.MaxClipPlanes = 8;
446
447 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeInstructions = 16 * 1024;
448 ctx->Const.Program[MESA_SHADER_VERTEX].MaxAluInstructions = 0;
449 ctx->Const.Program[MESA_SHADER_VERTEX].MaxTexInstructions = 0;
450 ctx->Const.Program[MESA_SHADER_VERTEX].MaxTexIndirections = 0;
451 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeAluInstructions = 0;
452 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeTexInstructions = 0;
453 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeTexIndirections = 0;
454 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeAttribs = 16;
455 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeTemps = 256;
456 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeAddressRegs = 1;
457 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeParameters = 1024;
458 ctx->Const.Program[MESA_SHADER_VERTEX].MaxEnvParams =
459 MIN2(ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeParameters,
460 ctx->Const.Program[MESA_SHADER_VERTEX].MaxEnvParams);
461
462 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeInstructions = 1024;
463 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeAluInstructions = 1024;
464 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeTexInstructions = 1024;
465 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeTexIndirections = 1024;
466 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeAttribs = 12;
467 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeTemps = 256;
468 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeAddressRegs = 0;
469 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeParameters = 1024;
470 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxEnvParams =
471 MIN2(ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeParameters,
472 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxEnvParams);
473
474 /* Fragment shaders use real, 32-bit twos-complement integers for all
475 * integer types.
476 */
477 ctx->Const.Program[MESA_SHADER_FRAGMENT].LowInt.RangeMin = 31;
478 ctx->Const.Program[MESA_SHADER_FRAGMENT].LowInt.RangeMax = 30;
479 ctx->Const.Program[MESA_SHADER_FRAGMENT].LowInt.Precision = 0;
480 ctx->Const.Program[MESA_SHADER_FRAGMENT].HighInt = ctx->Const.Program[MESA_SHADER_FRAGMENT].LowInt;
481 ctx->Const.Program[MESA_SHADER_FRAGMENT].MediumInt = ctx->Const.Program[MESA_SHADER_FRAGMENT].LowInt;
482
483 if (brw->gen >= 7) {
484 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxAtomicCounters = MAX_ATOMIC_COUNTERS;
485 ctx->Const.Program[MESA_SHADER_VERTEX].MaxAtomicCounters = MAX_ATOMIC_COUNTERS;
486 ctx->Const.Program[MESA_SHADER_GEOMETRY].MaxAtomicCounters = MAX_ATOMIC_COUNTERS;
487 ctx->Const.Program[MESA_SHADER_COMPUTE].MaxAtomicCounters = MAX_ATOMIC_COUNTERS;
488 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxAtomicBuffers = BRW_MAX_ABO;
489 ctx->Const.Program[MESA_SHADER_VERTEX].MaxAtomicBuffers = BRW_MAX_ABO;
490 ctx->Const.Program[MESA_SHADER_GEOMETRY].MaxAtomicBuffers = BRW_MAX_ABO;
491 ctx->Const.Program[MESA_SHADER_COMPUTE].MaxAtomicBuffers = BRW_MAX_ABO;
492 ctx->Const.MaxCombinedAtomicBuffers = 3 * BRW_MAX_ABO;
493 }
494
495 /* Gen6 converts quads to polygon in beginning of 3D pipeline,
496 * but we're not sure how it's actually done for vertex order,
497 * that affect provoking vertex decision. Always use last vertex
498 * convention for quad primitive which works as expected for now.
499 */
500 if (brw->gen >= 6)
501 ctx->Const.QuadsFollowProvokingVertexConvention = false;
502
503 ctx->Const.NativeIntegers = true;
504 ctx->Const.VertexID_is_zero_based = true;
505
506 /* Regarding the CMP instruction, the Ivybridge PRM says:
507 *
508 * "For each enabled channel 0b or 1b is assigned to the appropriate flag
509 * bit and 0/all zeros or all ones (e.g, byte 0xFF, word 0xFFFF, DWord
510 * 0xFFFFFFFF) is assigned to dst."
511 *
512 * but PRMs for earlier generations say
513 *
514 * "In dword format, one GRF may store up to 8 results. When the register
515 * is used later as a vector of Booleans, as only LSB at each channel
516 * contains meaning [sic] data, software should make sure all higher bits
517 * are masked out (e.g. by 'and-ing' an [sic] 0x01 constant)."
518 *
519 * We select the representation of a true boolean uniform to be ~0, and fix
520 * the results of Gen <= 5 CMP instruction's with -(result & 1).
521 */
522 ctx->Const.UniformBooleanTrue = ~0;
523
524 /* From the gen4 PRM, volume 4 page 127:
525 *
526 * "For SURFTYPE_BUFFER non-rendertarget surfaces, this field specifies
527 * the base address of the first element of the surface, computed in
528 * software by adding the surface base address to the byte offset of
529 * the element in the buffer."
530 *
531 * However, unaligned accesses are slower, so enforce buffer alignment.
532 */
533 ctx->Const.UniformBufferOffsetAlignment = 16;
534 ctx->Const.TextureBufferOffsetAlignment = 16;
535
536 if (brw->gen >= 6) {
537 ctx->Const.MaxVarying = 32;
538 ctx->Const.Program[MESA_SHADER_VERTEX].MaxOutputComponents = 128;
539 ctx->Const.Program[MESA_SHADER_GEOMETRY].MaxInputComponents = 64;
540 ctx->Const.Program[MESA_SHADER_GEOMETRY].MaxOutputComponents = 128;
541 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxInputComponents = 128;
542 }
543
544 /* We want the GLSL compiler to emit code that uses condition codes */
545 for (int i = 0; i < MESA_SHADER_STAGES; i++) {
546 ctx->Const.ShaderCompilerOptions[i].MaxIfDepth = brw->gen < 6 ? 16 : UINT_MAX;
547 ctx->Const.ShaderCompilerOptions[i].EmitCondCodes = true;
548 ctx->Const.ShaderCompilerOptions[i].EmitNoNoise = true;
549 ctx->Const.ShaderCompilerOptions[i].EmitNoMainReturn = true;
550 ctx->Const.ShaderCompilerOptions[i].EmitNoIndirectInput = true;
551 ctx->Const.ShaderCompilerOptions[i].EmitNoIndirectOutput =
552 (i == MESA_SHADER_FRAGMENT);
553 ctx->Const.ShaderCompilerOptions[i].EmitNoIndirectTemp =
554 (i == MESA_SHADER_FRAGMENT);
555 ctx->Const.ShaderCompilerOptions[i].EmitNoIndirectUniform = false;
556 ctx->Const.ShaderCompilerOptions[i].LowerClipDistance = true;
557 }
558
559 ctx->Const.ShaderCompilerOptions[MESA_SHADER_VERTEX].OptimizeForAOS = true;
560 ctx->Const.ShaderCompilerOptions[MESA_SHADER_GEOMETRY].OptimizeForAOS = true;
561
562 if (brw->scalar_vs) {
563 /* If we're using the scalar backend for vertex shaders, we need to
564 * configure these accordingly.
565 */
566 ctx->Const.ShaderCompilerOptions[MESA_SHADER_VERTEX].EmitNoIndirectOutput = true;
567 ctx->Const.ShaderCompilerOptions[MESA_SHADER_VERTEX].EmitNoIndirectTemp = true;
568 ctx->Const.ShaderCompilerOptions[MESA_SHADER_VERTEX].OptimizeForAOS = false;
569 }
570
571 /* ARB_viewport_array */
572 if (brw->gen >= 7 && ctx->API == API_OPENGL_CORE) {
573 ctx->Const.MaxViewports = GEN7_NUM_VIEWPORTS;
574 ctx->Const.ViewportSubpixelBits = 0;
575
576 /* Cast to float before negating becuase MaxViewportWidth is unsigned.
577 */
578 ctx->Const.ViewportBounds.Min = -(float)ctx->Const.MaxViewportWidth;
579 ctx->Const.ViewportBounds.Max = ctx->Const.MaxViewportWidth;
580 }
581
582 /* ARB_gpu_shader5 */
583 if (brw->gen >= 7)
584 ctx->Const.MaxVertexStreams = MIN2(4, MAX_VERTEX_STREAMS);
585 }
586
587 /**
588 * Process driconf (drirc) options, setting appropriate context flags.
589 *
590 * intelInitExtensions still pokes at optionCache directly, in order to
591 * avoid advertising various extensions. No flags are set, so it makes
592 * sense to continue doing that there.
593 */
594 static void
595 brw_process_driconf_options(struct brw_context *brw)
596 {
597 struct gl_context *ctx = &brw->ctx;
598
599 driOptionCache *options = &brw->optionCache;
600 driParseConfigFiles(options, &brw->intelScreen->optionCache,
601 brw->driContext->driScreenPriv->myNum, "i965");
602
603 int bo_reuse_mode = driQueryOptioni(options, "bo_reuse");
604 switch (bo_reuse_mode) {
605 case DRI_CONF_BO_REUSE_DISABLED:
606 break;
607 case DRI_CONF_BO_REUSE_ALL:
608 intel_bufmgr_gem_enable_reuse(brw->bufmgr);
609 break;
610 }
611
612 if (!driQueryOptionb(options, "hiz")) {
613 brw->has_hiz = false;
614 /* On gen6, you can only do separate stencil with HIZ. */
615 if (brw->gen == 6)
616 brw->has_separate_stencil = false;
617 }
618
619 if (driQueryOptionb(options, "always_flush_batch")) {
620 fprintf(stderr, "flushing batchbuffer before/after each draw call\n");
621 brw->always_flush_batch = true;
622 }
623
624 if (driQueryOptionb(options, "always_flush_cache")) {
625 fprintf(stderr, "flushing GPU caches before/after each draw call\n");
626 brw->always_flush_cache = true;
627 }
628
629 if (driQueryOptionb(options, "disable_throttling")) {
630 fprintf(stderr, "disabling flush throttling\n");
631 brw->disable_throttling = true;
632 }
633
634 brw->precompile = driQueryOptionb(&brw->optionCache, "shader_precompile");
635
636 ctx->Const.ForceGLSLExtensionsWarn =
637 driQueryOptionb(options, "force_glsl_extensions_warn");
638
639 ctx->Const.DisableGLSLLineContinuations =
640 driQueryOptionb(options, "disable_glsl_line_continuations");
641
642 ctx->Const.AllowGLSLExtensionDirectiveMidShader =
643 driQueryOptionb(options, "allow_glsl_extension_directive_midshader");
644 }
645
646 GLboolean
647 brwCreateContext(gl_api api,
648 const struct gl_config *mesaVis,
649 __DRIcontext *driContextPriv,
650 unsigned major_version,
651 unsigned minor_version,
652 uint32_t flags,
653 bool notify_reset,
654 unsigned *dri_ctx_error,
655 void *sharedContextPrivate)
656 {
657 __DRIscreen *sPriv = driContextPriv->driScreenPriv;
658 struct gl_context *shareCtx = (struct gl_context *) sharedContextPrivate;
659 struct intel_screen *screen = sPriv->driverPrivate;
660 const struct brw_device_info *devinfo = screen->devinfo;
661 struct dd_function_table functions;
662
663 /* Only allow the __DRI_CTX_FLAG_ROBUST_BUFFER_ACCESS flag if the kernel
664 * provides us with context reset notifications.
665 */
666 uint32_t allowed_flags = __DRI_CTX_FLAG_DEBUG
667 | __DRI_CTX_FLAG_FORWARD_COMPATIBLE;
668
669 if (screen->has_context_reset_notification)
670 allowed_flags |= __DRI_CTX_FLAG_ROBUST_BUFFER_ACCESS;
671
672 if (flags & ~allowed_flags) {
673 *dri_ctx_error = __DRI_CTX_ERROR_UNKNOWN_FLAG;
674 return false;
675 }
676
677 struct brw_context *brw = rzalloc(NULL, struct brw_context);
678 if (!brw) {
679 fprintf(stderr, "%s: failed to alloc context\n", __FUNCTION__);
680 *dri_ctx_error = __DRI_CTX_ERROR_NO_MEMORY;
681 return false;
682 }
683
684 driContextPriv->driverPrivate = brw;
685 brw->driContext = driContextPriv;
686 brw->intelScreen = screen;
687 brw->bufmgr = screen->bufmgr;
688
689 brw->gen = devinfo->gen;
690 brw->gt = devinfo->gt;
691 brw->is_g4x = devinfo->is_g4x;
692 brw->is_baytrail = devinfo->is_baytrail;
693 brw->is_haswell = devinfo->is_haswell;
694 brw->is_cherryview = devinfo->is_cherryview;
695 brw->has_llc = devinfo->has_llc;
696 brw->has_hiz = devinfo->has_hiz_and_separate_stencil;
697 brw->has_separate_stencil = devinfo->has_hiz_and_separate_stencil;
698 brw->has_pln = devinfo->has_pln;
699 brw->has_compr4 = devinfo->has_compr4;
700 brw->has_surface_tile_offset = devinfo->has_surface_tile_offset;
701 brw->has_negative_rhw_bug = devinfo->has_negative_rhw_bug;
702 brw->needs_unlit_centroid_workaround =
703 devinfo->needs_unlit_centroid_workaround;
704
705 brw->must_use_separate_stencil = screen->hw_must_use_separate_stencil;
706 brw->has_swizzling = screen->hw_has_swizzling;
707
708 brw->vs.base.stage = MESA_SHADER_VERTEX;
709 brw->gs.base.stage = MESA_SHADER_GEOMETRY;
710 brw->wm.base.stage = MESA_SHADER_FRAGMENT;
711 if (brw->gen >= 8) {
712 gen8_init_vtable_surface_functions(brw);
713 brw->vtbl.emit_depth_stencil_hiz = gen8_emit_depth_stencil_hiz;
714 } else if (brw->gen >= 7) {
715 gen7_init_vtable_surface_functions(brw);
716 brw->vtbl.emit_depth_stencil_hiz = gen7_emit_depth_stencil_hiz;
717 } else if (brw->gen >= 6) {
718 gen6_init_vtable_surface_functions(brw);
719 brw->vtbl.emit_depth_stencil_hiz = gen6_emit_depth_stencil_hiz;
720 } else {
721 gen4_init_vtable_surface_functions(brw);
722 brw->vtbl.emit_depth_stencil_hiz = brw_emit_depth_stencil_hiz;
723 }
724
725 brw_init_driver_functions(brw, &functions);
726
727 if (notify_reset)
728 functions.GetGraphicsResetStatus = brw_get_graphics_reset_status;
729
730 struct gl_context *ctx = &brw->ctx;
731
732 if (!_mesa_initialize_context(ctx, api, mesaVis, shareCtx, &functions)) {
733 *dri_ctx_error = __DRI_CTX_ERROR_NO_MEMORY;
734 fprintf(stderr, "%s: failed to init mesa context\n", __FUNCTION__);
735 intelDestroyContext(driContextPriv);
736 return false;
737 }
738
739 driContextSetFlags(ctx, flags);
740
741 /* Initialize the software rasterizer and helper modules.
742 *
743 * As of GL 3.1 core, the gen4+ driver doesn't need the swrast context for
744 * software fallbacks (which we have to support on legacy GL to do weird
745 * glDrawPixels(), glBitmap(), and other functions).
746 */
747 if (api != API_OPENGL_CORE && api != API_OPENGLES2) {
748 _swrast_CreateContext(ctx);
749 }
750
751 _vbo_CreateContext(ctx);
752 if (ctx->swrast_context) {
753 _tnl_CreateContext(ctx);
754 TNL_CONTEXT(ctx)->Driver.RunPipeline = _tnl_run_pipeline;
755 _swsetup_CreateContext(ctx);
756
757 /* Configure swrast to match hardware characteristics: */
758 _swrast_allow_pixel_fog(ctx, false);
759 _swrast_allow_vertex_fog(ctx, true);
760 }
761
762 _mesa_meta_init(ctx);
763
764 brw_process_driconf_options(brw);
765 brw_process_intel_debug_variable(brw);
766
767 if (brw->gen >= 8 && !(INTEL_DEBUG & DEBUG_VEC4VS))
768 brw->scalar_vs = true;
769
770 brw_initialize_context_constants(brw);
771
772 ctx->Const.ResetStrategy = notify_reset
773 ? GL_LOSE_CONTEXT_ON_RESET_ARB : GL_NO_RESET_NOTIFICATION_ARB;
774
775 /* Reinitialize the context point state. It depends on ctx->Const values. */
776 _mesa_init_point(ctx);
777
778 intel_fbo_init(brw);
779
780 intel_batchbuffer_init(brw);
781
782 if (brw->gen >= 6) {
783 /* Create a new hardware context. Using a hardware context means that
784 * our GPU state will be saved/restored on context switch, allowing us
785 * to assume that the GPU is in the same state we left it in.
786 *
787 * This is required for transform feedback buffer offsets, query objects,
788 * and also allows us to reduce how much state we have to emit.
789 */
790 brw->hw_ctx = drm_intel_gem_context_create(brw->bufmgr);
791
792 if (!brw->hw_ctx) {
793 fprintf(stderr, "Gen6+ requires Kernel 3.6 or later.\n");
794 intelDestroyContext(driContextPriv);
795 return false;
796 }
797 }
798
799 brw_init_state(brw);
800
801 intelInitExtensions(ctx);
802
803 brw_init_surface_formats(brw);
804
805 brw->max_vs_threads = devinfo->max_vs_threads;
806 brw->max_gs_threads = devinfo->max_gs_threads;
807 brw->max_wm_threads = devinfo->max_wm_threads;
808 brw->urb.size = devinfo->urb.size;
809 brw->urb.min_vs_entries = devinfo->urb.min_vs_entries;
810 brw->urb.max_vs_entries = devinfo->urb.max_vs_entries;
811 brw->urb.max_gs_entries = devinfo->urb.max_gs_entries;
812
813 /* Estimate the size of the mappable aperture into the GTT. There's an
814 * ioctl to get the whole GTT size, but not one to get the mappable subset.
815 * It turns out it's basically always 256MB, though some ancient hardware
816 * was smaller.
817 */
818 uint32_t gtt_size = 256 * 1024 * 1024;
819
820 /* We don't want to map two objects such that a memcpy between them would
821 * just fault one mapping in and then the other over and over forever. So
822 * we would need to divide the GTT size by 2. Additionally, some GTT is
823 * taken up by things like the framebuffer and the ringbuffer and such, so
824 * be more conservative.
825 */
826 brw->max_gtt_map_object_size = gtt_size / 4;
827
828 if (brw->gen == 6)
829 brw->urb.gs_present = false;
830
831 brw->prim_restart.in_progress = false;
832 brw->prim_restart.enable_cut_index = false;
833 brw->gs.enabled = false;
834 brw->sf.viewport_transform_enable = true;
835
836 ctx->VertexProgram._MaintainTnlProgram = true;
837 ctx->FragmentProgram._MaintainTexEnvProgram = true;
838
839 brw_draw_init( brw );
840
841 if ((flags & __DRI_CTX_FLAG_DEBUG) != 0) {
842 /* Turn on some extra GL_ARB_debug_output generation. */
843 brw->perf_debug = true;
844 }
845
846 if ((flags & __DRI_CTX_FLAG_ROBUST_BUFFER_ACCESS) != 0)
847 ctx->Const.ContextFlags |= GL_CONTEXT_FLAG_ROBUST_ACCESS_BIT_ARB;
848
849 if (INTEL_DEBUG & DEBUG_SHADER_TIME)
850 brw_init_shader_time(brw);
851
852 _mesa_compute_version(ctx);
853
854 _mesa_initialize_dispatch_tables(ctx);
855 _mesa_initialize_vbo_vtxfmt(ctx);
856
857 if (ctx->Extensions.AMD_performance_monitor) {
858 brw_init_performance_monitors(brw);
859 }
860
861 vbo_use_buffer_objects(ctx);
862 vbo_always_unmap_buffers(ctx);
863
864 return true;
865 }
866
867 void
868 intelDestroyContext(__DRIcontext * driContextPriv)
869 {
870 struct brw_context *brw =
871 (struct brw_context *) driContextPriv->driverPrivate;
872 struct gl_context *ctx = &brw->ctx;
873
874 assert(brw); /* should never be null */
875 if (!brw)
876 return;
877
878 /* Dump a final BMP in case the application doesn't call SwapBuffers */
879 if (INTEL_DEBUG & DEBUG_AUB) {
880 intel_batchbuffer_flush(brw);
881 aub_dump_bmp(&brw->ctx);
882 }
883
884 _mesa_meta_free(&brw->ctx);
885 brw_meta_fast_clear_free(brw);
886
887 if (INTEL_DEBUG & DEBUG_SHADER_TIME) {
888 /* Force a report. */
889 brw->shader_time.report_time = 0;
890
891 brw_collect_and_report_shader_time(brw);
892 brw_destroy_shader_time(brw);
893 }
894
895 brw_destroy_state(brw);
896 brw_draw_destroy(brw);
897
898 drm_intel_bo_unreference(brw->curbe.curbe_bo);
899
900 drm_intel_gem_context_destroy(brw->hw_ctx);
901
902 if (ctx->swrast_context) {
903 _swsetup_DestroyContext(&brw->ctx);
904 _tnl_DestroyContext(&brw->ctx);
905 }
906 _vbo_DestroyContext(&brw->ctx);
907
908 if (ctx->swrast_context)
909 _swrast_DestroyContext(&brw->ctx);
910
911 intel_batchbuffer_free(brw);
912
913 drm_intel_bo_unreference(brw->first_post_swapbuffers_batch);
914 brw->first_post_swapbuffers_batch = NULL;
915
916 driDestroyOptionCache(&brw->optionCache);
917
918 /* free the Mesa context */
919 _mesa_free_context_data(&brw->ctx);
920
921 ralloc_free(brw);
922 driContextPriv->driverPrivate = NULL;
923 }
924
925 GLboolean
926 intelUnbindContext(__DRIcontext * driContextPriv)
927 {
928 /* Unset current context and dispath table */
929 _mesa_make_current(NULL, NULL, NULL);
930
931 return true;
932 }
933
934 /**
935 * Fixes up the context for GLES23 with our default-to-sRGB-capable behavior
936 * on window system framebuffers.
937 *
938 * Desktop GL is fairly reasonable in its handling of sRGB: You can ask if
939 * your renderbuffer can do sRGB encode, and you can flip a switch that does
940 * sRGB encode if the renderbuffer can handle it. You can ask specifically
941 * for a visual where you're guaranteed to be capable, but it turns out that
942 * everyone just makes all their ARGB8888 visuals capable and doesn't offer
943 * incapable ones, becuase there's no difference between the two in resources
944 * used. Applications thus get built that accidentally rely on the default
945 * visual choice being sRGB, so we make ours sRGB capable. Everything sounds
946 * great...
947 *
948 * But for GLES2/3, they decided that it was silly to not turn on sRGB encode
949 * for sRGB renderbuffers you made with the GL_EXT_texture_sRGB equivalent.
950 * So they removed the enable knob and made it "if the renderbuffer is sRGB
951 * capable, do sRGB encode". Then, for your window system renderbuffers, you
952 * can ask for sRGB visuals and get sRGB encode, or not ask for sRGB visuals
953 * and get no sRGB encode (assuming that both kinds of visual are available).
954 * Thus our choice to support sRGB by default on our visuals for desktop would
955 * result in broken rendering of GLES apps that aren't expecting sRGB encode.
956 *
957 * Unfortunately, renderbuffer setup happens before a context is created. So
958 * in intel_screen.c we always set up sRGB, and here, if you're a GLES2/3
959 * context (without an sRGB visual, though we don't have sRGB visuals exposed
960 * yet), we go turn that back off before anyone finds out.
961 */
962 static void
963 intel_gles3_srgb_workaround(struct brw_context *brw,
964 struct gl_framebuffer *fb)
965 {
966 struct gl_context *ctx = &brw->ctx;
967
968 if (_mesa_is_desktop_gl(ctx) || !fb->Visual.sRGBCapable)
969 return;
970
971 /* Some day when we support the sRGB capable bit on visuals available for
972 * GLES, we'll need to respect that and not disable things here.
973 */
974 fb->Visual.sRGBCapable = false;
975 for (int i = 0; i < BUFFER_COUNT; i++) {
976 if (fb->Attachment[i].Renderbuffer &&
977 fb->Attachment[i].Renderbuffer->Format == MESA_FORMAT_B8G8R8A8_SRGB) {
978 fb->Attachment[i].Renderbuffer->Format = MESA_FORMAT_B8G8R8A8_UNORM;
979 }
980 }
981 }
982
983 GLboolean
984 intelMakeCurrent(__DRIcontext * driContextPriv,
985 __DRIdrawable * driDrawPriv,
986 __DRIdrawable * driReadPriv)
987 {
988 struct brw_context *brw;
989 GET_CURRENT_CONTEXT(curCtx);
990
991 if (driContextPriv)
992 brw = (struct brw_context *) driContextPriv->driverPrivate;
993 else
994 brw = NULL;
995
996 /* According to the glXMakeCurrent() man page: "Pending commands to
997 * the previous context, if any, are flushed before it is released."
998 * But only flush if we're actually changing contexts.
999 */
1000 if (brw_context(curCtx) && brw_context(curCtx) != brw) {
1001 _mesa_flush(curCtx);
1002 }
1003
1004 if (driContextPriv) {
1005 struct gl_context *ctx = &brw->ctx;
1006 struct gl_framebuffer *fb, *readFb;
1007
1008 if (driDrawPriv == NULL) {
1009 fb = _mesa_get_incomplete_framebuffer();
1010 } else {
1011 fb = driDrawPriv->driverPrivate;
1012 driContextPriv->dri2.draw_stamp = driDrawPriv->dri2.stamp - 1;
1013 }
1014
1015 if (driReadPriv == NULL) {
1016 readFb = _mesa_get_incomplete_framebuffer();
1017 } else {
1018 readFb = driReadPriv->driverPrivate;
1019 driContextPriv->dri2.read_stamp = driReadPriv->dri2.stamp - 1;
1020 }
1021
1022 /* The sRGB workaround changes the renderbuffer's format. We must change
1023 * the format before the renderbuffer's miptree get's allocated, otherwise
1024 * the formats of the renderbuffer and its miptree will differ.
1025 */
1026 intel_gles3_srgb_workaround(brw, fb);
1027 intel_gles3_srgb_workaround(brw, readFb);
1028
1029 /* If the context viewport hasn't been initialized, force a call out to
1030 * the loader to get buffers so we have a drawable size for the initial
1031 * viewport. */
1032 if (!brw->ctx.ViewportInitialized)
1033 intel_prepare_render(brw);
1034
1035 _mesa_make_current(ctx, fb, readFb);
1036 } else {
1037 _mesa_make_current(NULL, NULL, NULL);
1038 }
1039
1040 return true;
1041 }
1042
1043 void
1044 intel_resolve_for_dri2_flush(struct brw_context *brw,
1045 __DRIdrawable *drawable)
1046 {
1047 if (brw->gen < 6) {
1048 /* MSAA and fast color clear are not supported, so don't waste time
1049 * checking whether a resolve is needed.
1050 */
1051 return;
1052 }
1053
1054 struct gl_framebuffer *fb = drawable->driverPrivate;
1055 struct intel_renderbuffer *rb;
1056
1057 /* Usually, only the back buffer will need to be downsampled. However,
1058 * the front buffer will also need it if the user has rendered into it.
1059 */
1060 static const gl_buffer_index buffers[2] = {
1061 BUFFER_BACK_LEFT,
1062 BUFFER_FRONT_LEFT,
1063 };
1064
1065 for (int i = 0; i < 2; ++i) {
1066 rb = intel_get_renderbuffer(fb, buffers[i]);
1067 if (rb == NULL || rb->mt == NULL)
1068 continue;
1069 if (rb->mt->num_samples <= 1)
1070 intel_miptree_resolve_color(brw, rb->mt);
1071 else
1072 intel_renderbuffer_downsample(brw, rb);
1073 }
1074 }
1075
1076 static unsigned
1077 intel_bits_per_pixel(const struct intel_renderbuffer *rb)
1078 {
1079 return _mesa_get_format_bytes(intel_rb_format(rb)) * 8;
1080 }
1081
1082 static void
1083 intel_query_dri2_buffers(struct brw_context *brw,
1084 __DRIdrawable *drawable,
1085 __DRIbuffer **buffers,
1086 int *count);
1087
1088 static void
1089 intel_process_dri2_buffer(struct brw_context *brw,
1090 __DRIdrawable *drawable,
1091 __DRIbuffer *buffer,
1092 struct intel_renderbuffer *rb,
1093 const char *buffer_name);
1094
1095 static void
1096 intel_update_image_buffers(struct brw_context *brw, __DRIdrawable *drawable);
1097
1098 static void
1099 intel_update_dri2_buffers(struct brw_context *brw, __DRIdrawable *drawable)
1100 {
1101 struct gl_framebuffer *fb = drawable->driverPrivate;
1102 struct intel_renderbuffer *rb;
1103 __DRIbuffer *buffers = NULL;
1104 int i, count;
1105 const char *region_name;
1106
1107 /* Set this up front, so that in case our buffers get invalidated
1108 * while we're getting new buffers, we don't clobber the stamp and
1109 * thus ignore the invalidate. */
1110 drawable->lastStamp = drawable->dri2.stamp;
1111
1112 if (unlikely(INTEL_DEBUG & DEBUG_DRI))
1113 fprintf(stderr, "enter %s, drawable %p\n", __func__, drawable);
1114
1115 intel_query_dri2_buffers(brw, drawable, &buffers, &count);
1116
1117 if (buffers == NULL)
1118 return;
1119
1120 for (i = 0; i < count; i++) {
1121 switch (buffers[i].attachment) {
1122 case __DRI_BUFFER_FRONT_LEFT:
1123 rb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT);
1124 region_name = "dri2 front buffer";
1125 break;
1126
1127 case __DRI_BUFFER_FAKE_FRONT_LEFT:
1128 rb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT);
1129 region_name = "dri2 fake front buffer";
1130 break;
1131
1132 case __DRI_BUFFER_BACK_LEFT:
1133 rb = intel_get_renderbuffer(fb, BUFFER_BACK_LEFT);
1134 region_name = "dri2 back buffer";
1135 break;
1136
1137 case __DRI_BUFFER_DEPTH:
1138 case __DRI_BUFFER_HIZ:
1139 case __DRI_BUFFER_DEPTH_STENCIL:
1140 case __DRI_BUFFER_STENCIL:
1141 case __DRI_BUFFER_ACCUM:
1142 default:
1143 fprintf(stderr,
1144 "unhandled buffer attach event, attachment type %d\n",
1145 buffers[i].attachment);
1146 return;
1147 }
1148
1149 intel_process_dri2_buffer(brw, drawable, &buffers[i], rb, region_name);
1150 }
1151
1152 }
1153
1154 void
1155 intel_update_renderbuffers(__DRIcontext *context, __DRIdrawable *drawable)
1156 {
1157 struct brw_context *brw = context->driverPrivate;
1158 __DRIscreen *screen = brw->intelScreen->driScrnPriv;
1159
1160 /* Set this up front, so that in case our buffers get invalidated
1161 * while we're getting new buffers, we don't clobber the stamp and
1162 * thus ignore the invalidate. */
1163 drawable->lastStamp = drawable->dri2.stamp;
1164
1165 if (unlikely(INTEL_DEBUG & DEBUG_DRI))
1166 fprintf(stderr, "enter %s, drawable %p\n", __func__, drawable);
1167
1168 if (screen->image.loader)
1169 intel_update_image_buffers(brw, drawable);
1170 else
1171 intel_update_dri2_buffers(brw, drawable);
1172
1173 driUpdateFramebufferSize(&brw->ctx, drawable);
1174 }
1175
1176 /**
1177 * intel_prepare_render should be called anywhere that curent read/drawbuffer
1178 * state is required.
1179 */
1180 void
1181 intel_prepare_render(struct brw_context *brw)
1182 {
1183 struct gl_context *ctx = &brw->ctx;
1184 __DRIcontext *driContext = brw->driContext;
1185 __DRIdrawable *drawable;
1186
1187 drawable = driContext->driDrawablePriv;
1188 if (drawable && drawable->dri2.stamp != driContext->dri2.draw_stamp) {
1189 if (drawable->lastStamp != drawable->dri2.stamp)
1190 intel_update_renderbuffers(driContext, drawable);
1191 driContext->dri2.draw_stamp = drawable->dri2.stamp;
1192 }
1193
1194 drawable = driContext->driReadablePriv;
1195 if (drawable && drawable->dri2.stamp != driContext->dri2.read_stamp) {
1196 if (drawable->lastStamp != drawable->dri2.stamp)
1197 intel_update_renderbuffers(driContext, drawable);
1198 driContext->dri2.read_stamp = drawable->dri2.stamp;
1199 }
1200
1201 /* If we're currently rendering to the front buffer, the rendering
1202 * that will happen next will probably dirty the front buffer. So
1203 * mark it as dirty here.
1204 */
1205 if (brw_is_front_buffer_drawing(ctx->DrawBuffer))
1206 brw->front_buffer_dirty = true;
1207
1208 /* Wait for the swapbuffers before the one we just emitted, so we
1209 * don't get too many swaps outstanding for apps that are GPU-heavy
1210 * but not CPU-heavy.
1211 *
1212 * We're using intelDRI2Flush (called from the loader before
1213 * swapbuffer) and glFlush (for front buffer rendering) as the
1214 * indicator that a frame is done and then throttle when we get
1215 * here as we prepare to render the next frame. At this point for
1216 * round trips for swap/copy and getting new buffers are done and
1217 * we'll spend less time waiting on the GPU.
1218 *
1219 * Unfortunately, we don't have a handle to the batch containing
1220 * the swap, and getting our hands on that doesn't seem worth it,
1221 * so we just us the first batch we emitted after the last swap.
1222 */
1223 if (brw->need_throttle && brw->first_post_swapbuffers_batch) {
1224 if (!brw->disable_throttling)
1225 drm_intel_bo_wait_rendering(brw->first_post_swapbuffers_batch);
1226 drm_intel_bo_unreference(brw->first_post_swapbuffers_batch);
1227 brw->first_post_swapbuffers_batch = NULL;
1228 brw->need_throttle = false;
1229 }
1230 }
1231
1232 /**
1233 * \brief Query DRI2 to obtain a DRIdrawable's buffers.
1234 *
1235 * To determine which DRI buffers to request, examine the renderbuffers
1236 * attached to the drawable's framebuffer. Then request the buffers with
1237 * DRI2GetBuffers() or DRI2GetBuffersWithFormat().
1238 *
1239 * This is called from intel_update_renderbuffers().
1240 *
1241 * \param drawable Drawable whose buffers are queried.
1242 * \param buffers [out] List of buffers returned by DRI2 query.
1243 * \param buffer_count [out] Number of buffers returned.
1244 *
1245 * \see intel_update_renderbuffers()
1246 * \see DRI2GetBuffers()
1247 * \see DRI2GetBuffersWithFormat()
1248 */
1249 static void
1250 intel_query_dri2_buffers(struct brw_context *brw,
1251 __DRIdrawable *drawable,
1252 __DRIbuffer **buffers,
1253 int *buffer_count)
1254 {
1255 __DRIscreen *screen = brw->intelScreen->driScrnPriv;
1256 struct gl_framebuffer *fb = drawable->driverPrivate;
1257 int i = 0;
1258 unsigned attachments[8];
1259
1260 struct intel_renderbuffer *front_rb;
1261 struct intel_renderbuffer *back_rb;
1262
1263 front_rb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT);
1264 back_rb = intel_get_renderbuffer(fb, BUFFER_BACK_LEFT);
1265
1266 memset(attachments, 0, sizeof(attachments));
1267 if ((brw_is_front_buffer_drawing(fb) ||
1268 brw_is_front_buffer_reading(fb) ||
1269 !back_rb) && front_rb) {
1270 /* If a fake front buffer is in use, then querying for
1271 * __DRI_BUFFER_FRONT_LEFT will cause the server to copy the image from
1272 * the real front buffer to the fake front buffer. So before doing the
1273 * query, we need to make sure all the pending drawing has landed in the
1274 * real front buffer.
1275 */
1276 intel_batchbuffer_flush(brw);
1277 intel_flush_front(&brw->ctx);
1278
1279 attachments[i++] = __DRI_BUFFER_FRONT_LEFT;
1280 attachments[i++] = intel_bits_per_pixel(front_rb);
1281 } else if (front_rb && brw->front_buffer_dirty) {
1282 /* We have pending front buffer rendering, but we aren't querying for a
1283 * front buffer. If the front buffer we have is a fake front buffer,
1284 * the X server is going to throw it away when it processes the query.
1285 * So before doing the query, make sure all the pending drawing has
1286 * landed in the real front buffer.
1287 */
1288 intel_batchbuffer_flush(brw);
1289 intel_flush_front(&brw->ctx);
1290 }
1291
1292 if (back_rb) {
1293 attachments[i++] = __DRI_BUFFER_BACK_LEFT;
1294 attachments[i++] = intel_bits_per_pixel(back_rb);
1295 }
1296
1297 assert(i <= ARRAY_SIZE(attachments));
1298
1299 *buffers = screen->dri2.loader->getBuffersWithFormat(drawable,
1300 &drawable->w,
1301 &drawable->h,
1302 attachments, i / 2,
1303 buffer_count,
1304 drawable->loaderPrivate);
1305 }
1306
1307 /**
1308 * \brief Assign a DRI buffer's DRM region to a renderbuffer.
1309 *
1310 * This is called from intel_update_renderbuffers().
1311 *
1312 * \par Note:
1313 * DRI buffers whose attachment point is DRI2BufferStencil or
1314 * DRI2BufferDepthStencil are handled as special cases.
1315 *
1316 * \param buffer_name is a human readable name, such as "dri2 front buffer",
1317 * that is passed to drm_intel_bo_gem_create_from_name().
1318 *
1319 * \see intel_update_renderbuffers()
1320 */
1321 static void
1322 intel_process_dri2_buffer(struct brw_context *brw,
1323 __DRIdrawable *drawable,
1324 __DRIbuffer *buffer,
1325 struct intel_renderbuffer *rb,
1326 const char *buffer_name)
1327 {
1328 struct gl_framebuffer *fb = drawable->driverPrivate;
1329 drm_intel_bo *bo;
1330
1331 if (!rb)
1332 return;
1333
1334 unsigned num_samples = rb->Base.Base.NumSamples;
1335
1336 /* We try to avoid closing and reopening the same BO name, because the first
1337 * use of a mapping of the buffer involves a bunch of page faulting which is
1338 * moderately expensive.
1339 */
1340 struct intel_mipmap_tree *last_mt;
1341 if (num_samples == 0)
1342 last_mt = rb->mt;
1343 else
1344 last_mt = rb->singlesample_mt;
1345
1346 uint32_t old_name = 0;
1347 if (last_mt) {
1348 /* The bo already has a name because the miptree was created by a
1349 * previous call to intel_process_dri2_buffer(). If a bo already has a
1350 * name, then drm_intel_bo_flink() is a low-cost getter. It does not
1351 * create a new name.
1352 */
1353 drm_intel_bo_flink(last_mt->bo, &old_name);
1354 }
1355
1356 if (old_name == buffer->name)
1357 return;
1358
1359 if (unlikely(INTEL_DEBUG & DEBUG_DRI)) {
1360 fprintf(stderr,
1361 "attaching buffer %d, at %d, cpp %d, pitch %d\n",
1362 buffer->name, buffer->attachment,
1363 buffer->cpp, buffer->pitch);
1364 }
1365
1366 intel_miptree_release(&rb->mt);
1367 bo = drm_intel_bo_gem_create_from_name(brw->bufmgr, buffer_name,
1368 buffer->name);
1369 if (!bo) {
1370 fprintf(stderr,
1371 "Failed to open BO for returned DRI2 buffer "
1372 "(%dx%d, %s, named %d).\n"
1373 "This is likely a bug in the X Server that will lead to a "
1374 "crash soon.\n",
1375 drawable->w, drawable->h, buffer_name, buffer->name);
1376 return;
1377 }
1378
1379 intel_update_winsys_renderbuffer_miptree(brw, rb, bo,
1380 drawable->w, drawable->h,
1381 buffer->pitch);
1382
1383 if (brw_is_front_buffer_drawing(fb) &&
1384 (buffer->attachment == __DRI_BUFFER_FRONT_LEFT ||
1385 buffer->attachment == __DRI_BUFFER_FAKE_FRONT_LEFT) &&
1386 rb->Base.Base.NumSamples > 1) {
1387 intel_renderbuffer_upsample(brw, rb);
1388 }
1389
1390 assert(rb->mt);
1391
1392 drm_intel_bo_unreference(bo);
1393 }
1394
1395 /**
1396 * \brief Query DRI image loader to obtain a DRIdrawable's buffers.
1397 *
1398 * To determine which DRI buffers to request, examine the renderbuffers
1399 * attached to the drawable's framebuffer. Then request the buffers from
1400 * the image loader
1401 *
1402 * This is called from intel_update_renderbuffers().
1403 *
1404 * \param drawable Drawable whose buffers are queried.
1405 * \param buffers [out] List of buffers returned by DRI2 query.
1406 * \param buffer_count [out] Number of buffers returned.
1407 *
1408 * \see intel_update_renderbuffers()
1409 */
1410
1411 static void
1412 intel_update_image_buffer(struct brw_context *intel,
1413 __DRIdrawable *drawable,
1414 struct intel_renderbuffer *rb,
1415 __DRIimage *buffer,
1416 enum __DRIimageBufferMask buffer_type)
1417 {
1418 struct gl_framebuffer *fb = drawable->driverPrivate;
1419
1420 if (!rb || !buffer->bo)
1421 return;
1422
1423 unsigned num_samples = rb->Base.Base.NumSamples;
1424
1425 /* Check and see if we're already bound to the right
1426 * buffer object
1427 */
1428 struct intel_mipmap_tree *last_mt;
1429 if (num_samples == 0)
1430 last_mt = rb->mt;
1431 else
1432 last_mt = rb->singlesample_mt;
1433
1434 if (last_mt && last_mt->bo == buffer->bo)
1435 return;
1436
1437 intel_update_winsys_renderbuffer_miptree(intel, rb, buffer->bo,
1438 buffer->width, buffer->height,
1439 buffer->pitch);
1440
1441 if (brw_is_front_buffer_drawing(fb) &&
1442 buffer_type == __DRI_IMAGE_BUFFER_FRONT &&
1443 rb->Base.Base.NumSamples > 1) {
1444 intel_renderbuffer_upsample(intel, rb);
1445 }
1446 }
1447
1448 static void
1449 intel_update_image_buffers(struct brw_context *brw, __DRIdrawable *drawable)
1450 {
1451 struct gl_framebuffer *fb = drawable->driverPrivate;
1452 __DRIscreen *screen = brw->intelScreen->driScrnPriv;
1453 struct intel_renderbuffer *front_rb;
1454 struct intel_renderbuffer *back_rb;
1455 struct __DRIimageList images;
1456 unsigned int format;
1457 uint32_t buffer_mask = 0;
1458
1459 front_rb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT);
1460 back_rb = intel_get_renderbuffer(fb, BUFFER_BACK_LEFT);
1461
1462 if (back_rb)
1463 format = intel_rb_format(back_rb);
1464 else if (front_rb)
1465 format = intel_rb_format(front_rb);
1466 else
1467 return;
1468
1469 if (front_rb && (brw_is_front_buffer_drawing(fb) ||
1470 brw_is_front_buffer_reading(fb) || !back_rb)) {
1471 buffer_mask |= __DRI_IMAGE_BUFFER_FRONT;
1472 }
1473
1474 if (back_rb)
1475 buffer_mask |= __DRI_IMAGE_BUFFER_BACK;
1476
1477 (*screen->image.loader->getBuffers) (drawable,
1478 driGLFormatToImageFormat(format),
1479 &drawable->dri2.stamp,
1480 drawable->loaderPrivate,
1481 buffer_mask,
1482 &images);
1483
1484 if (images.image_mask & __DRI_IMAGE_BUFFER_FRONT) {
1485 drawable->w = images.front->width;
1486 drawable->h = images.front->height;
1487 intel_update_image_buffer(brw,
1488 drawable,
1489 front_rb,
1490 images.front,
1491 __DRI_IMAGE_BUFFER_FRONT);
1492 }
1493 if (images.image_mask & __DRI_IMAGE_BUFFER_BACK) {
1494 drawable->w = images.back->width;
1495 drawable->h = images.back->height;
1496 intel_update_image_buffer(brw,
1497 drawable,
1498 back_rb,
1499 images.back,
1500 __DRI_IMAGE_BUFFER_BACK);
1501 }
1502 }