i965: enable ARB_framebuffer_no_attachments for Gen7+
[mesa.git] / src / mesa / drivers / dri / i965 / brw_context.c
1 /*
2 Copyright 2003 VMware, Inc.
3 Copyright (C) Intel Corp. 2006. All Rights Reserved.
4 Intel funded Tungsten Graphics to
5 develop this 3D driver.
6
7 Permission is hereby granted, free of charge, to any person obtaining
8 a copy of this software and associated documentation files (the
9 "Software"), to deal in the Software without restriction, including
10 without limitation the rights to use, copy, modify, merge, publish,
11 distribute, sublicense, and/or sell copies of the Software, and to
12 permit persons to whom the Software is furnished to do so, subject to
13 the following conditions:
14
15 The above copyright notice and this permission notice (including the
16 next paragraph) shall be included in all copies or substantial
17 portions of the Software.
18
19 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
20 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
22 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
23 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
24 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
25 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26
27 **********************************************************************/
28 /*
29 * Authors:
30 * Keith Whitwell <keithw@vmware.com>
31 */
32
33
34 #include "main/api_exec.h"
35 #include "main/context.h"
36 #include "main/fbobject.h"
37 #include "main/extensions.h"
38 #include "main/imports.h"
39 #include "main/macros.h"
40 #include "main/points.h"
41 #include "main/version.h"
42 #include "main/vtxfmt.h"
43 #include "main/texobj.h"
44
45 #include "vbo/vbo_context.h"
46
47 #include "drivers/common/driverfuncs.h"
48 #include "drivers/common/meta.h"
49 #include "utils.h"
50
51 #include "brw_context.h"
52 #include "brw_defines.h"
53 #include "brw_draw.h"
54 #include "brw_state.h"
55
56 #include "intel_batchbuffer.h"
57 #include "intel_buffer_objects.h"
58 #include "intel_buffers.h"
59 #include "intel_fbo.h"
60 #include "intel_mipmap_tree.h"
61 #include "intel_pixel.h"
62 #include "intel_image.h"
63 #include "intel_tex.h"
64 #include "intel_tex_obj.h"
65
66 #include "swrast_setup/swrast_setup.h"
67 #include "tnl/tnl.h"
68 #include "tnl/t_pipeline.h"
69 #include "util/ralloc.h"
70
71 #include "glsl/nir/nir.h"
72
73 /***************************************
74 * Mesa's Driver Functions
75 ***************************************/
76
77 static size_t
78 brw_query_samples_for_format(struct gl_context *ctx, GLenum target,
79 GLenum internalFormat, int samples[16])
80 {
81 struct brw_context *brw = brw_context(ctx);
82
83 (void) target;
84
85 switch (brw->gen) {
86 case 9:
87 case 8:
88 samples[0] = 8;
89 samples[1] = 4;
90 samples[2] = 2;
91 return 3;
92
93 case 7:
94 samples[0] = 8;
95 samples[1] = 4;
96 return 2;
97
98 case 6:
99 samples[0] = 4;
100 return 1;
101
102 default:
103 assert(brw->gen < 6);
104 samples[0] = 1;
105 return 1;
106 }
107 }
108
109 const char *const brw_vendor_string = "Intel Open Source Technology Center";
110
111 const char *
112 brw_get_renderer_string(unsigned deviceID)
113 {
114 const char *chipset;
115 static char buffer[128];
116
117 switch (deviceID) {
118 #undef CHIPSET
119 #define CHIPSET(id, symbol, str) case id: chipset = str; break;
120 #include "pci_ids/i965_pci_ids.h"
121 default:
122 chipset = "Unknown Intel Chipset";
123 break;
124 }
125
126 (void) driGetRendererString(buffer, chipset, 0);
127 return buffer;
128 }
129
130 static const GLubyte *
131 intel_get_string(struct gl_context * ctx, GLenum name)
132 {
133 const struct brw_context *const brw = brw_context(ctx);
134
135 switch (name) {
136 case GL_VENDOR:
137 return (GLubyte *) brw_vendor_string;
138
139 case GL_RENDERER:
140 return
141 (GLubyte *) brw_get_renderer_string(brw->intelScreen->deviceID);
142
143 default:
144 return NULL;
145 }
146 }
147
148 static void
149 intel_viewport(struct gl_context *ctx)
150 {
151 struct brw_context *brw = brw_context(ctx);
152 __DRIcontext *driContext = brw->driContext;
153
154 if (_mesa_is_winsys_fbo(ctx->DrawBuffer)) {
155 dri2InvalidateDrawable(driContext->driDrawablePriv);
156 dri2InvalidateDrawable(driContext->driReadablePriv);
157 }
158 }
159
160 static void
161 intel_update_state(struct gl_context * ctx, GLuint new_state)
162 {
163 struct brw_context *brw = brw_context(ctx);
164 struct intel_texture_object *tex_obj;
165 struct intel_renderbuffer *depth_irb;
166
167 if (ctx->swrast_context)
168 _swrast_InvalidateState(ctx, new_state);
169 _vbo_InvalidateState(ctx, new_state);
170
171 brw->NewGLState |= new_state;
172
173 _mesa_unlock_context_textures(ctx);
174
175 /* Resolve the depth buffer's HiZ buffer. */
176 depth_irb = intel_get_renderbuffer(ctx->DrawBuffer, BUFFER_DEPTH);
177 if (depth_irb)
178 intel_renderbuffer_resolve_hiz(brw, depth_irb);
179
180 /* Resolve depth buffer and render cache of each enabled texture. */
181 int maxEnabledUnit = ctx->Texture._MaxEnabledTexImageUnit;
182 for (int i = 0; i <= maxEnabledUnit; i++) {
183 if (!ctx->Texture.Unit[i]._Current)
184 continue;
185 tex_obj = intel_texture_object(ctx->Texture.Unit[i]._Current);
186 if (!tex_obj || !tex_obj->mt)
187 continue;
188 intel_miptree_all_slices_resolve_depth(brw, tex_obj->mt);
189 intel_miptree_resolve_color(brw, tex_obj->mt);
190 brw_render_cache_set_check_flush(brw, tex_obj->mt->bo);
191 }
192
193 _mesa_lock_context_textures(ctx);
194 }
195
196 #define flushFront(screen) ((screen)->image.loader ? (screen)->image.loader->flushFrontBuffer : (screen)->dri2.loader->flushFrontBuffer)
197
198 static void
199 intel_flush_front(struct gl_context *ctx)
200 {
201 struct brw_context *brw = brw_context(ctx);
202 __DRIcontext *driContext = brw->driContext;
203 __DRIdrawable *driDrawable = driContext->driDrawablePriv;
204 __DRIscreen *const screen = brw->intelScreen->driScrnPriv;
205
206 if (brw->front_buffer_dirty && _mesa_is_winsys_fbo(ctx->DrawBuffer)) {
207 if (flushFront(screen) && driDrawable &&
208 driDrawable->loaderPrivate) {
209
210 /* Resolve before flushing FAKE_FRONT_LEFT to FRONT_LEFT.
211 *
212 * This potentially resolves both front and back buffer. It
213 * is unnecessary to resolve the back, but harms nothing except
214 * performance. And no one cares about front-buffer render
215 * performance.
216 */
217 intel_resolve_for_dri2_flush(brw, driDrawable);
218 intel_batchbuffer_flush(brw);
219
220 flushFront(screen)(driDrawable, driDrawable->loaderPrivate);
221
222 /* We set the dirty bit in intel_prepare_render() if we're
223 * front buffer rendering once we get there.
224 */
225 brw->front_buffer_dirty = false;
226 }
227 }
228 }
229
230 static void
231 intel_glFlush(struct gl_context *ctx)
232 {
233 struct brw_context *brw = brw_context(ctx);
234
235 intel_batchbuffer_flush(brw);
236 intel_flush_front(ctx);
237
238 brw->need_flush_throttle = true;
239 }
240
241 static void
242 intel_finish(struct gl_context * ctx)
243 {
244 struct brw_context *brw = brw_context(ctx);
245
246 intel_glFlush(ctx);
247
248 if (brw->batch.last_bo)
249 drm_intel_bo_wait_rendering(brw->batch.last_bo);
250 }
251
252 static void
253 brw_init_driver_functions(struct brw_context *brw,
254 struct dd_function_table *functions)
255 {
256 _mesa_init_driver_functions(functions);
257
258 /* GLX uses DRI2 invalidate events to handle window resizing.
259 * Unfortunately, EGL does not - libEGL is written in XCB (not Xlib),
260 * which doesn't provide a mechanism for snooping the event queues.
261 *
262 * So EGL still relies on viewport hacks to handle window resizing.
263 * This should go away with DRI3000.
264 */
265 if (!brw->driContext->driScreenPriv->dri2.useInvalidate)
266 functions->Viewport = intel_viewport;
267
268 functions->Flush = intel_glFlush;
269 functions->Finish = intel_finish;
270 functions->GetString = intel_get_string;
271 functions->UpdateState = intel_update_state;
272
273 intelInitTextureFuncs(functions);
274 intelInitTextureImageFuncs(functions);
275 intelInitTextureSubImageFuncs(functions);
276 intelInitTextureCopyImageFuncs(functions);
277 intelInitCopyImageFuncs(functions);
278 intelInitClearFuncs(functions);
279 intelInitBufferFuncs(functions);
280 intelInitPixelFuncs(functions);
281 intelInitBufferObjectFuncs(functions);
282 intel_init_syncobj_functions(functions);
283 brw_init_object_purgeable_functions(functions);
284
285 brwInitFragProgFuncs( functions );
286 brw_init_common_queryobj_functions(functions);
287 if (brw->gen >= 6)
288 gen6_init_queryobj_functions(functions);
289 else
290 gen4_init_queryobj_functions(functions);
291 brw_init_compute_functions(functions);
292 if (brw->gen >= 7)
293 brw_init_conditional_render_functions(functions);
294
295 functions->QuerySamplesForFormat = brw_query_samples_for_format;
296
297 functions->NewTransformFeedback = brw_new_transform_feedback;
298 functions->DeleteTransformFeedback = brw_delete_transform_feedback;
299 functions->GetTransformFeedbackVertexCount =
300 brw_get_transform_feedback_vertex_count;
301 if (brw->gen >= 7) {
302 functions->BeginTransformFeedback = gen7_begin_transform_feedback;
303 functions->EndTransformFeedback = gen7_end_transform_feedback;
304 functions->PauseTransformFeedback = gen7_pause_transform_feedback;
305 functions->ResumeTransformFeedback = gen7_resume_transform_feedback;
306 } else {
307 functions->BeginTransformFeedback = brw_begin_transform_feedback;
308 functions->EndTransformFeedback = brw_end_transform_feedback;
309 }
310
311 if (brw->gen >= 6)
312 functions->GetSamplePosition = gen6_get_sample_position;
313 }
314
315 static void
316 brw_initialize_context_constants(struct brw_context *brw)
317 {
318 struct gl_context *ctx = &brw->ctx;
319
320 unsigned max_samplers =
321 brw->gen >= 8 || brw->is_haswell ? BRW_MAX_TEX_UNIT : 16;
322
323 ctx->Const.QueryCounterBits.Timestamp = 36;
324
325 ctx->Const.StripTextureBorder = true;
326
327 ctx->Const.MaxDualSourceDrawBuffers = 1;
328 ctx->Const.MaxDrawBuffers = BRW_MAX_DRAW_BUFFERS;
329 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxTextureImageUnits = max_samplers;
330 ctx->Const.MaxTextureCoordUnits = 8; /* Mesa limit */
331 ctx->Const.MaxTextureUnits =
332 MIN2(ctx->Const.MaxTextureCoordUnits,
333 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxTextureImageUnits);
334 ctx->Const.Program[MESA_SHADER_VERTEX].MaxTextureImageUnits = max_samplers;
335 if (brw->gen >= 6)
336 ctx->Const.Program[MESA_SHADER_GEOMETRY].MaxTextureImageUnits = max_samplers;
337 else
338 ctx->Const.Program[MESA_SHADER_GEOMETRY].MaxTextureImageUnits = 0;
339 if (_mesa_extension_override_enables.ARB_compute_shader) {
340 ctx->Const.Program[MESA_SHADER_COMPUTE].MaxTextureImageUnits = BRW_MAX_TEX_UNIT;
341 ctx->Const.MaxUniformBufferBindings += 12;
342 } else {
343 ctx->Const.Program[MESA_SHADER_COMPUTE].MaxTextureImageUnits = 0;
344 }
345 ctx->Const.MaxCombinedTextureImageUnits =
346 ctx->Const.Program[MESA_SHADER_VERTEX].MaxTextureImageUnits +
347 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxTextureImageUnits +
348 ctx->Const.Program[MESA_SHADER_GEOMETRY].MaxTextureImageUnits +
349 ctx->Const.Program[MESA_SHADER_COMPUTE].MaxTextureImageUnits;
350
351 ctx->Const.MaxTextureLevels = 14; /* 8192 */
352 if (ctx->Const.MaxTextureLevels > MAX_TEXTURE_LEVELS)
353 ctx->Const.MaxTextureLevels = MAX_TEXTURE_LEVELS;
354 ctx->Const.Max3DTextureLevels = 12; /* 2048 */
355 ctx->Const.MaxCubeTextureLevels = 14; /* 8192 */
356 ctx->Const.MaxTextureMbytes = 1536;
357
358 if (brw->gen >= 7)
359 ctx->Const.MaxArrayTextureLayers = 2048;
360 else
361 ctx->Const.MaxArrayTextureLayers = 512;
362
363 ctx->Const.MaxTextureRectSize = 1 << 12;
364
365 ctx->Const.MaxTextureMaxAnisotropy = 16.0;
366
367 ctx->Const.MaxRenderbufferSize = 8192;
368
369 /* Hardware only supports a limited number of transform feedback buffers.
370 * So we need to override the Mesa default (which is based only on software
371 * limits).
372 */
373 ctx->Const.MaxTransformFeedbackBuffers = BRW_MAX_SOL_BUFFERS;
374
375 /* On Gen6, in the worst case, we use up one binding table entry per
376 * transform feedback component (see comments above the definition of
377 * BRW_MAX_SOL_BINDINGS, in brw_context.h), so we need to advertise a value
378 * for MAX_TRANSFORM_FEEDBACK_INTERLEAVED_COMPONENTS equal to
379 * BRW_MAX_SOL_BINDINGS.
380 *
381 * In "separate components" mode, we need to divide this value by
382 * BRW_MAX_SOL_BUFFERS, so that the total number of binding table entries
383 * used up by all buffers will not exceed BRW_MAX_SOL_BINDINGS.
384 */
385 ctx->Const.MaxTransformFeedbackInterleavedComponents = BRW_MAX_SOL_BINDINGS;
386 ctx->Const.MaxTransformFeedbackSeparateComponents =
387 BRW_MAX_SOL_BINDINGS / BRW_MAX_SOL_BUFFERS;
388
389 ctx->Const.AlwaysUseGetTransformFeedbackVertexCount = true;
390
391 int max_samples;
392 const int *msaa_modes = intel_supported_msaa_modes(brw->intelScreen);
393 const int clamp_max_samples =
394 driQueryOptioni(&brw->optionCache, "clamp_max_samples");
395
396 if (clamp_max_samples < 0) {
397 max_samples = msaa_modes[0];
398 } else {
399 /* Select the largest supported MSAA mode that does not exceed
400 * clamp_max_samples.
401 */
402 max_samples = 0;
403 for (int i = 0; msaa_modes[i] != 0; ++i) {
404 if (msaa_modes[i] <= clamp_max_samples) {
405 max_samples = msaa_modes[i];
406 break;
407 }
408 }
409 }
410
411 ctx->Const.MaxSamples = max_samples;
412 ctx->Const.MaxColorTextureSamples = max_samples;
413 ctx->Const.MaxDepthTextureSamples = max_samples;
414 ctx->Const.MaxIntegerSamples = max_samples;
415
416 /* gen6_set_sample_maps() sets SampleMap{2,4,8}x variables which are used
417 * to map indices of rectangular grid to sample numbers within a pixel.
418 * These variables are used by GL_EXT_framebuffer_multisample_blit_scaled
419 * extension implementation. For more details see the comment above
420 * gen6_set_sample_maps() definition.
421 */
422 gen6_set_sample_maps(ctx);
423
424 if (brw->gen >= 7)
425 ctx->Const.MaxProgramTextureGatherComponents = 4;
426 else if (brw->gen == 6)
427 ctx->Const.MaxProgramTextureGatherComponents = 1;
428
429 ctx->Const.MinLineWidth = 1.0;
430 ctx->Const.MinLineWidthAA = 1.0;
431 if (brw->gen >= 6) {
432 ctx->Const.MaxLineWidth = 7.375;
433 ctx->Const.MaxLineWidthAA = 7.375;
434 ctx->Const.LineWidthGranularity = 0.125;
435 } else {
436 ctx->Const.MaxLineWidth = 7.0;
437 ctx->Const.MaxLineWidthAA = 7.0;
438 ctx->Const.LineWidthGranularity = 0.5;
439 }
440
441 /* For non-antialiased lines, we have to round the line width to the
442 * nearest whole number. Make sure that we don't advertise a line
443 * width that, when rounded, will be beyond the actual hardware
444 * maximum.
445 */
446 assert(roundf(ctx->Const.MaxLineWidth) <= ctx->Const.MaxLineWidth);
447
448 ctx->Const.MinPointSize = 1.0;
449 ctx->Const.MinPointSizeAA = 1.0;
450 ctx->Const.MaxPointSize = 255.0;
451 ctx->Const.MaxPointSizeAA = 255.0;
452 ctx->Const.PointSizeGranularity = 1.0;
453
454 if (brw->gen >= 5 || brw->is_g4x)
455 ctx->Const.MaxClipPlanes = 8;
456
457 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeInstructions = 16 * 1024;
458 ctx->Const.Program[MESA_SHADER_VERTEX].MaxAluInstructions = 0;
459 ctx->Const.Program[MESA_SHADER_VERTEX].MaxTexInstructions = 0;
460 ctx->Const.Program[MESA_SHADER_VERTEX].MaxTexIndirections = 0;
461 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeAluInstructions = 0;
462 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeTexInstructions = 0;
463 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeTexIndirections = 0;
464 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeAttribs = 16;
465 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeTemps = 256;
466 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeAddressRegs = 1;
467 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeParameters = 1024;
468 ctx->Const.Program[MESA_SHADER_VERTEX].MaxEnvParams =
469 MIN2(ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeParameters,
470 ctx->Const.Program[MESA_SHADER_VERTEX].MaxEnvParams);
471
472 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeInstructions = 1024;
473 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeAluInstructions = 1024;
474 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeTexInstructions = 1024;
475 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeTexIndirections = 1024;
476 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeAttribs = 12;
477 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeTemps = 256;
478 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeAddressRegs = 0;
479 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeParameters = 1024;
480 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxEnvParams =
481 MIN2(ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeParameters,
482 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxEnvParams);
483
484 /* Fragment shaders use real, 32-bit twos-complement integers for all
485 * integer types.
486 */
487 ctx->Const.Program[MESA_SHADER_FRAGMENT].LowInt.RangeMin = 31;
488 ctx->Const.Program[MESA_SHADER_FRAGMENT].LowInt.RangeMax = 30;
489 ctx->Const.Program[MESA_SHADER_FRAGMENT].LowInt.Precision = 0;
490 ctx->Const.Program[MESA_SHADER_FRAGMENT].HighInt = ctx->Const.Program[MESA_SHADER_FRAGMENT].LowInt;
491 ctx->Const.Program[MESA_SHADER_FRAGMENT].MediumInt = ctx->Const.Program[MESA_SHADER_FRAGMENT].LowInt;
492
493 ctx->Const.Program[MESA_SHADER_VERTEX].LowInt.RangeMin = 31;
494 ctx->Const.Program[MESA_SHADER_VERTEX].LowInt.RangeMax = 30;
495 ctx->Const.Program[MESA_SHADER_VERTEX].LowInt.Precision = 0;
496 ctx->Const.Program[MESA_SHADER_VERTEX].HighInt = ctx->Const.Program[MESA_SHADER_VERTEX].LowInt;
497 ctx->Const.Program[MESA_SHADER_VERTEX].MediumInt = ctx->Const.Program[MESA_SHADER_VERTEX].LowInt;
498
499 if (brw->gen >= 7) {
500 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxAtomicCounters = MAX_ATOMIC_COUNTERS;
501 ctx->Const.Program[MESA_SHADER_VERTEX].MaxAtomicCounters = MAX_ATOMIC_COUNTERS;
502 ctx->Const.Program[MESA_SHADER_GEOMETRY].MaxAtomicCounters = MAX_ATOMIC_COUNTERS;
503 ctx->Const.Program[MESA_SHADER_COMPUTE].MaxAtomicCounters = MAX_ATOMIC_COUNTERS;
504 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxAtomicBuffers = BRW_MAX_ABO;
505 ctx->Const.Program[MESA_SHADER_VERTEX].MaxAtomicBuffers = BRW_MAX_ABO;
506 ctx->Const.Program[MESA_SHADER_GEOMETRY].MaxAtomicBuffers = BRW_MAX_ABO;
507 ctx->Const.Program[MESA_SHADER_COMPUTE].MaxAtomicBuffers = BRW_MAX_ABO;
508 ctx->Const.MaxCombinedAtomicBuffers = 3 * BRW_MAX_ABO;
509 }
510
511 /* Gen6 converts quads to polygon in beginning of 3D pipeline,
512 * but we're not sure how it's actually done for vertex order,
513 * that affect provoking vertex decision. Always use last vertex
514 * convention for quad primitive which works as expected for now.
515 */
516 if (brw->gen >= 6)
517 ctx->Const.QuadsFollowProvokingVertexConvention = false;
518
519 ctx->Const.NativeIntegers = true;
520 ctx->Const.VertexID_is_zero_based = true;
521
522 /* Regarding the CMP instruction, the Ivybridge PRM says:
523 *
524 * "For each enabled channel 0b or 1b is assigned to the appropriate flag
525 * bit and 0/all zeros or all ones (e.g, byte 0xFF, word 0xFFFF, DWord
526 * 0xFFFFFFFF) is assigned to dst."
527 *
528 * but PRMs for earlier generations say
529 *
530 * "In dword format, one GRF may store up to 8 results. When the register
531 * is used later as a vector of Booleans, as only LSB at each channel
532 * contains meaning [sic] data, software should make sure all higher bits
533 * are masked out (e.g. by 'and-ing' an [sic] 0x01 constant)."
534 *
535 * We select the representation of a true boolean uniform to be ~0, and fix
536 * the results of Gen <= 5 CMP instruction's with -(result & 1).
537 */
538 ctx->Const.UniformBooleanTrue = ~0;
539
540 /* From the gen4 PRM, volume 4 page 127:
541 *
542 * "For SURFTYPE_BUFFER non-rendertarget surfaces, this field specifies
543 * the base address of the first element of the surface, computed in
544 * software by adding the surface base address to the byte offset of
545 * the element in the buffer."
546 *
547 * However, unaligned accesses are slower, so enforce buffer alignment.
548 */
549 ctx->Const.UniformBufferOffsetAlignment = 16;
550 ctx->Const.TextureBufferOffsetAlignment = 16;
551 ctx->Const.MaxTextureBufferSize = 128 * 1024 * 1024;
552
553 if (brw->gen >= 6) {
554 ctx->Const.MaxVarying = 32;
555 ctx->Const.Program[MESA_SHADER_VERTEX].MaxOutputComponents = 128;
556 ctx->Const.Program[MESA_SHADER_GEOMETRY].MaxInputComponents = 64;
557 ctx->Const.Program[MESA_SHADER_GEOMETRY].MaxOutputComponents = 128;
558 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxInputComponents = 128;
559 }
560
561 static const nir_shader_compiler_options nir_options = {
562 .native_integers = true,
563 /* In order to help allow for better CSE at the NIR level we tell NIR
564 * to split all ffma instructions during opt_algebraic and we then
565 * re-combine them as a later step.
566 */
567 .lower_ffma = true,
568 .lower_sub = true,
569 };
570
571 /* We want the GLSL compiler to emit code that uses condition codes */
572 for (int i = 0; i < MESA_SHADER_STAGES; i++) {
573 ctx->Const.ShaderCompilerOptions[i].MaxIfDepth = brw->gen < 6 ? 16 : UINT_MAX;
574 ctx->Const.ShaderCompilerOptions[i].EmitCondCodes = true;
575 ctx->Const.ShaderCompilerOptions[i].EmitNoNoise = true;
576 ctx->Const.ShaderCompilerOptions[i].EmitNoMainReturn = true;
577 ctx->Const.ShaderCompilerOptions[i].EmitNoIndirectInput = true;
578 ctx->Const.ShaderCompilerOptions[i].EmitNoIndirectOutput =
579 (i == MESA_SHADER_FRAGMENT);
580 ctx->Const.ShaderCompilerOptions[i].EmitNoIndirectTemp =
581 (i == MESA_SHADER_FRAGMENT);
582 ctx->Const.ShaderCompilerOptions[i].EmitNoIndirectUniform = false;
583 ctx->Const.ShaderCompilerOptions[i].LowerClipDistance = true;
584 }
585
586 ctx->Const.ShaderCompilerOptions[MESA_SHADER_VERTEX].OptimizeForAOS = true;
587 ctx->Const.ShaderCompilerOptions[MESA_SHADER_GEOMETRY].OptimizeForAOS = true;
588
589 if (brw->scalar_vs) {
590 /* If we're using the scalar backend for vertex shaders, we need to
591 * configure these accordingly.
592 */
593 ctx->Const.ShaderCompilerOptions[MESA_SHADER_VERTEX].EmitNoIndirectOutput = true;
594 ctx->Const.ShaderCompilerOptions[MESA_SHADER_VERTEX].EmitNoIndirectTemp = true;
595 ctx->Const.ShaderCompilerOptions[MESA_SHADER_VERTEX].OptimizeForAOS = false;
596
597 ctx->Const.ShaderCompilerOptions[MESA_SHADER_VERTEX].NirOptions = &nir_options;
598 }
599
600 ctx->Const.ShaderCompilerOptions[MESA_SHADER_FRAGMENT].NirOptions = &nir_options;
601 ctx->Const.ShaderCompilerOptions[MESA_SHADER_COMPUTE].NirOptions = &nir_options;
602
603 /* ARB_viewport_array */
604 if (brw->gen >= 6 && ctx->API == API_OPENGL_CORE) {
605 ctx->Const.MaxViewports = GEN6_NUM_VIEWPORTS;
606 ctx->Const.ViewportSubpixelBits = 0;
607
608 /* Cast to float before negating because MaxViewportWidth is unsigned.
609 */
610 ctx->Const.ViewportBounds.Min = -(float)ctx->Const.MaxViewportWidth;
611 ctx->Const.ViewportBounds.Max = ctx->Const.MaxViewportWidth;
612 }
613
614 /* ARB_gpu_shader5 */
615 if (brw->gen >= 7)
616 ctx->Const.MaxVertexStreams = MIN2(4, MAX_VERTEX_STREAMS);
617
618 /* ARB_framebuffer_no_attachments */
619 ctx->Const.MaxFramebufferWidth = ctx->Const.MaxViewportWidth;
620 ctx->Const.MaxFramebufferHeight = ctx->Const.MaxViewportHeight;
621 ctx->Const.MaxFramebufferLayers = ctx->Const.MaxArrayTextureLayers;
622 ctx->Const.MaxFramebufferSamples = max_samples;
623 }
624
625 static void
626 brw_adjust_cs_context_constants(struct brw_context *brw)
627 {
628 struct gl_context *ctx = &brw->ctx;
629
630 /* For ES, we set these constants based on SIMD8.
631 *
632 * TODO: Once we can always generate SIMD16, we should update this.
633 *
634 * For GL, we assume we can generate a SIMD16 program, but this currently
635 * is not always true. This allows us to run more test cases, and will be
636 * required based on desktop GL compute shader requirements.
637 */
638 const int simd_size = ctx->API == API_OPENGL_CORE ? 16 : 8;
639
640 const uint32_t max_invocations = simd_size * brw->max_cs_threads;
641 ctx->Const.MaxComputeWorkGroupSize[0] = max_invocations;
642 ctx->Const.MaxComputeWorkGroupSize[1] = max_invocations;
643 ctx->Const.MaxComputeWorkGroupSize[2] = max_invocations;
644 ctx->Const.MaxComputeWorkGroupInvocations = max_invocations;
645 }
646
647 /**
648 * Process driconf (drirc) options, setting appropriate context flags.
649 *
650 * intelInitExtensions still pokes at optionCache directly, in order to
651 * avoid advertising various extensions. No flags are set, so it makes
652 * sense to continue doing that there.
653 */
654 static void
655 brw_process_driconf_options(struct brw_context *brw)
656 {
657 struct gl_context *ctx = &brw->ctx;
658
659 driOptionCache *options = &brw->optionCache;
660 driParseConfigFiles(options, &brw->intelScreen->optionCache,
661 brw->driContext->driScreenPriv->myNum, "i965");
662
663 int bo_reuse_mode = driQueryOptioni(options, "bo_reuse");
664 switch (bo_reuse_mode) {
665 case DRI_CONF_BO_REUSE_DISABLED:
666 break;
667 case DRI_CONF_BO_REUSE_ALL:
668 intel_bufmgr_gem_enable_reuse(brw->bufmgr);
669 break;
670 }
671
672 if (!driQueryOptionb(options, "hiz")) {
673 brw->has_hiz = false;
674 /* On gen6, you can only do separate stencil with HIZ. */
675 if (brw->gen == 6)
676 brw->has_separate_stencil = false;
677 }
678
679 if (driQueryOptionb(options, "always_flush_batch")) {
680 fprintf(stderr, "flushing batchbuffer before/after each draw call\n");
681 brw->always_flush_batch = true;
682 }
683
684 if (driQueryOptionb(options, "always_flush_cache")) {
685 fprintf(stderr, "flushing GPU caches before/after each draw call\n");
686 brw->always_flush_cache = true;
687 }
688
689 if (driQueryOptionb(options, "disable_throttling")) {
690 fprintf(stderr, "disabling flush throttling\n");
691 brw->disable_throttling = true;
692 }
693
694 brw->precompile = driQueryOptionb(&brw->optionCache, "shader_precompile");
695
696 ctx->Const.ForceGLSLExtensionsWarn =
697 driQueryOptionb(options, "force_glsl_extensions_warn");
698
699 ctx->Const.DisableGLSLLineContinuations =
700 driQueryOptionb(options, "disable_glsl_line_continuations");
701
702 ctx->Const.AllowGLSLExtensionDirectiveMidShader =
703 driQueryOptionb(options, "allow_glsl_extension_directive_midshader");
704 }
705
706 GLboolean
707 brwCreateContext(gl_api api,
708 const struct gl_config *mesaVis,
709 __DRIcontext *driContextPriv,
710 unsigned major_version,
711 unsigned minor_version,
712 uint32_t flags,
713 bool notify_reset,
714 unsigned *dri_ctx_error,
715 void *sharedContextPrivate)
716 {
717 __DRIscreen *sPriv = driContextPriv->driScreenPriv;
718 struct gl_context *shareCtx = (struct gl_context *) sharedContextPrivate;
719 struct intel_screen *screen = sPriv->driverPrivate;
720 const struct brw_device_info *devinfo = screen->devinfo;
721 struct dd_function_table functions;
722
723 /* Only allow the __DRI_CTX_FLAG_ROBUST_BUFFER_ACCESS flag if the kernel
724 * provides us with context reset notifications.
725 */
726 uint32_t allowed_flags = __DRI_CTX_FLAG_DEBUG
727 | __DRI_CTX_FLAG_FORWARD_COMPATIBLE;
728
729 if (screen->has_context_reset_notification)
730 allowed_flags |= __DRI_CTX_FLAG_ROBUST_BUFFER_ACCESS;
731
732 if (flags & ~allowed_flags) {
733 *dri_ctx_error = __DRI_CTX_ERROR_UNKNOWN_FLAG;
734 return false;
735 }
736
737 struct brw_context *brw = rzalloc(NULL, struct brw_context);
738 if (!brw) {
739 fprintf(stderr, "%s: failed to alloc context\n", __func__);
740 *dri_ctx_error = __DRI_CTX_ERROR_NO_MEMORY;
741 return false;
742 }
743
744 driContextPriv->driverPrivate = brw;
745 brw->driContext = driContextPriv;
746 brw->intelScreen = screen;
747 brw->bufmgr = screen->bufmgr;
748
749 brw->gen = devinfo->gen;
750 brw->gt = devinfo->gt;
751 brw->is_g4x = devinfo->is_g4x;
752 brw->is_baytrail = devinfo->is_baytrail;
753 brw->is_haswell = devinfo->is_haswell;
754 brw->is_cherryview = devinfo->is_cherryview;
755 brw->has_llc = devinfo->has_llc;
756 brw->has_hiz = devinfo->has_hiz_and_separate_stencil;
757 brw->has_separate_stencil = devinfo->has_hiz_and_separate_stencil;
758 brw->has_pln = devinfo->has_pln;
759 brw->has_compr4 = devinfo->has_compr4;
760 brw->has_surface_tile_offset = devinfo->has_surface_tile_offset;
761 brw->has_negative_rhw_bug = devinfo->has_negative_rhw_bug;
762 brw->needs_unlit_centroid_workaround =
763 devinfo->needs_unlit_centroid_workaround;
764
765 brw->must_use_separate_stencil = screen->hw_must_use_separate_stencil;
766 brw->has_swizzling = screen->hw_has_swizzling;
767
768 brw->vs.base.stage = MESA_SHADER_VERTEX;
769 brw->gs.base.stage = MESA_SHADER_GEOMETRY;
770 brw->wm.base.stage = MESA_SHADER_FRAGMENT;
771 if (brw->gen >= 8) {
772 gen8_init_vtable_surface_functions(brw);
773 brw->vtbl.emit_depth_stencil_hiz = gen8_emit_depth_stencil_hiz;
774 } else if (brw->gen >= 7) {
775 gen7_init_vtable_surface_functions(brw);
776 brw->vtbl.emit_depth_stencil_hiz = gen7_emit_depth_stencil_hiz;
777 } else if (brw->gen >= 6) {
778 gen6_init_vtable_surface_functions(brw);
779 brw->vtbl.emit_depth_stencil_hiz = gen6_emit_depth_stencil_hiz;
780 } else {
781 gen4_init_vtable_surface_functions(brw);
782 brw->vtbl.emit_depth_stencil_hiz = brw_emit_depth_stencil_hiz;
783 }
784
785 brw_init_driver_functions(brw, &functions);
786
787 if (notify_reset)
788 functions.GetGraphicsResetStatus = brw_get_graphics_reset_status;
789
790 struct gl_context *ctx = &brw->ctx;
791
792 if (!_mesa_initialize_context(ctx, api, mesaVis, shareCtx, &functions)) {
793 *dri_ctx_error = __DRI_CTX_ERROR_NO_MEMORY;
794 fprintf(stderr, "%s: failed to init mesa context\n", __func__);
795 intelDestroyContext(driContextPriv);
796 return false;
797 }
798
799 driContextSetFlags(ctx, flags);
800
801 /* Initialize the software rasterizer and helper modules.
802 *
803 * As of GL 3.1 core, the gen4+ driver doesn't need the swrast context for
804 * software fallbacks (which we have to support on legacy GL to do weird
805 * glDrawPixels(), glBitmap(), and other functions).
806 */
807 if (api != API_OPENGL_CORE && api != API_OPENGLES2) {
808 _swrast_CreateContext(ctx);
809 }
810
811 _vbo_CreateContext(ctx);
812 if (ctx->swrast_context) {
813 _tnl_CreateContext(ctx);
814 TNL_CONTEXT(ctx)->Driver.RunPipeline = _tnl_run_pipeline;
815 _swsetup_CreateContext(ctx);
816
817 /* Configure swrast to match hardware characteristics: */
818 _swrast_allow_pixel_fog(ctx, false);
819 _swrast_allow_vertex_fog(ctx, true);
820 }
821
822 _mesa_meta_init(ctx);
823
824 brw_process_driconf_options(brw);
825 brw_process_intel_debug_variable(brw);
826
827 if (brw->gen >= 8 && !(INTEL_DEBUG & DEBUG_VEC4VS))
828 brw->scalar_vs = true;
829
830 brw_initialize_context_constants(brw);
831
832 ctx->Const.ResetStrategy = notify_reset
833 ? GL_LOSE_CONTEXT_ON_RESET_ARB : GL_NO_RESET_NOTIFICATION_ARB;
834
835 /* Reinitialize the context point state. It depends on ctx->Const values. */
836 _mesa_init_point(ctx);
837
838 intel_fbo_init(brw);
839
840 intel_batchbuffer_init(brw);
841
842 if (brw->gen >= 6) {
843 /* Create a new hardware context. Using a hardware context means that
844 * our GPU state will be saved/restored on context switch, allowing us
845 * to assume that the GPU is in the same state we left it in.
846 *
847 * This is required for transform feedback buffer offsets, query objects,
848 * and also allows us to reduce how much state we have to emit.
849 */
850 brw->hw_ctx = drm_intel_gem_context_create(brw->bufmgr);
851
852 if (!brw->hw_ctx) {
853 fprintf(stderr, "Gen6+ requires Kernel 3.6 or later.\n");
854 intelDestroyContext(driContextPriv);
855 return false;
856 }
857 }
858
859 brw_init_state(brw);
860
861 intelInitExtensions(ctx);
862
863 brw_init_surface_formats(brw);
864
865 brw->max_vs_threads = devinfo->max_vs_threads;
866 brw->max_hs_threads = devinfo->max_hs_threads;
867 brw->max_ds_threads = devinfo->max_ds_threads;
868 brw->max_gs_threads = devinfo->max_gs_threads;
869 brw->max_wm_threads = devinfo->max_wm_threads;
870 brw->max_cs_threads = devinfo->max_cs_threads;
871 brw->urb.size = devinfo->urb.size;
872 brw->urb.min_vs_entries = devinfo->urb.min_vs_entries;
873 brw->urb.max_vs_entries = devinfo->urb.max_vs_entries;
874 brw->urb.max_hs_entries = devinfo->urb.max_hs_entries;
875 brw->urb.max_ds_entries = devinfo->urb.max_ds_entries;
876 brw->urb.max_gs_entries = devinfo->urb.max_gs_entries;
877
878 brw_adjust_cs_context_constants(brw);
879
880 /* Estimate the size of the mappable aperture into the GTT. There's an
881 * ioctl to get the whole GTT size, but not one to get the mappable subset.
882 * It turns out it's basically always 256MB, though some ancient hardware
883 * was smaller.
884 */
885 uint32_t gtt_size = 256 * 1024 * 1024;
886
887 /* We don't want to map two objects such that a memcpy between them would
888 * just fault one mapping in and then the other over and over forever. So
889 * we would need to divide the GTT size by 2. Additionally, some GTT is
890 * taken up by things like the framebuffer and the ringbuffer and such, so
891 * be more conservative.
892 */
893 brw->max_gtt_map_object_size = gtt_size / 4;
894
895 if (brw->gen == 6)
896 brw->urb.gs_present = false;
897
898 brw->prim_restart.in_progress = false;
899 brw->prim_restart.enable_cut_index = false;
900 brw->gs.enabled = false;
901 brw->sf.viewport_transform_enable = true;
902
903 brw->predicate.state = BRW_PREDICATE_STATE_RENDER;
904
905 ctx->VertexProgram._MaintainTnlProgram = true;
906 ctx->FragmentProgram._MaintainTexEnvProgram = true;
907
908 brw_draw_init( brw );
909
910 if ((flags & __DRI_CTX_FLAG_DEBUG) != 0) {
911 /* Turn on some extra GL_ARB_debug_output generation. */
912 brw->perf_debug = true;
913 }
914
915 if ((flags & __DRI_CTX_FLAG_ROBUST_BUFFER_ACCESS) != 0)
916 ctx->Const.ContextFlags |= GL_CONTEXT_FLAG_ROBUST_ACCESS_BIT_ARB;
917
918 if (INTEL_DEBUG & DEBUG_SHADER_TIME)
919 brw_init_shader_time(brw);
920
921 _mesa_compute_version(ctx);
922
923 _mesa_initialize_dispatch_tables(ctx);
924 _mesa_initialize_vbo_vtxfmt(ctx);
925
926 if (ctx->Extensions.AMD_performance_monitor) {
927 brw_init_performance_monitors(brw);
928 }
929
930 vbo_use_buffer_objects(ctx);
931 vbo_always_unmap_buffers(ctx);
932
933 return true;
934 }
935
936 void
937 intelDestroyContext(__DRIcontext * driContextPriv)
938 {
939 struct brw_context *brw =
940 (struct brw_context *) driContextPriv->driverPrivate;
941 struct gl_context *ctx = &brw->ctx;
942
943 /* Dump a final BMP in case the application doesn't call SwapBuffers */
944 if (INTEL_DEBUG & DEBUG_AUB) {
945 intel_batchbuffer_flush(brw);
946 aub_dump_bmp(&brw->ctx);
947 }
948
949 _mesa_meta_free(&brw->ctx);
950 brw_meta_fast_clear_free(brw);
951
952 if (INTEL_DEBUG & DEBUG_SHADER_TIME) {
953 /* Force a report. */
954 brw->shader_time.report_time = 0;
955
956 brw_collect_and_report_shader_time(brw);
957 brw_destroy_shader_time(brw);
958 }
959
960 brw_destroy_state(brw);
961 brw_draw_destroy(brw);
962
963 drm_intel_bo_unreference(brw->curbe.curbe_bo);
964 if (brw->vs.base.scratch_bo)
965 drm_intel_bo_unreference(brw->vs.base.scratch_bo);
966 if (brw->gs.base.scratch_bo)
967 drm_intel_bo_unreference(brw->gs.base.scratch_bo);
968 if (brw->wm.base.scratch_bo)
969 drm_intel_bo_unreference(brw->wm.base.scratch_bo);
970
971 drm_intel_gem_context_destroy(brw->hw_ctx);
972
973 if (ctx->swrast_context) {
974 _swsetup_DestroyContext(&brw->ctx);
975 _tnl_DestroyContext(&brw->ctx);
976 }
977 _vbo_DestroyContext(&brw->ctx);
978
979 if (ctx->swrast_context)
980 _swrast_DestroyContext(&brw->ctx);
981
982 intel_batchbuffer_free(brw);
983
984 drm_intel_bo_unreference(brw->throttle_batch[1]);
985 drm_intel_bo_unreference(brw->throttle_batch[0]);
986 brw->throttle_batch[1] = NULL;
987 brw->throttle_batch[0] = NULL;
988
989 driDestroyOptionCache(&brw->optionCache);
990
991 /* free the Mesa context */
992 _mesa_free_context_data(&brw->ctx);
993
994 ralloc_free(brw);
995 driContextPriv->driverPrivate = NULL;
996 }
997
998 GLboolean
999 intelUnbindContext(__DRIcontext * driContextPriv)
1000 {
1001 /* Unset current context and dispath table */
1002 _mesa_make_current(NULL, NULL, NULL);
1003
1004 return true;
1005 }
1006
1007 /**
1008 * Fixes up the context for GLES23 with our default-to-sRGB-capable behavior
1009 * on window system framebuffers.
1010 *
1011 * Desktop GL is fairly reasonable in its handling of sRGB: You can ask if
1012 * your renderbuffer can do sRGB encode, and you can flip a switch that does
1013 * sRGB encode if the renderbuffer can handle it. You can ask specifically
1014 * for a visual where you're guaranteed to be capable, but it turns out that
1015 * everyone just makes all their ARGB8888 visuals capable and doesn't offer
1016 * incapable ones, because there's no difference between the two in resources
1017 * used. Applications thus get built that accidentally rely on the default
1018 * visual choice being sRGB, so we make ours sRGB capable. Everything sounds
1019 * great...
1020 *
1021 * But for GLES2/3, they decided that it was silly to not turn on sRGB encode
1022 * for sRGB renderbuffers you made with the GL_EXT_texture_sRGB equivalent.
1023 * So they removed the enable knob and made it "if the renderbuffer is sRGB
1024 * capable, do sRGB encode". Then, for your window system renderbuffers, you
1025 * can ask for sRGB visuals and get sRGB encode, or not ask for sRGB visuals
1026 * and get no sRGB encode (assuming that both kinds of visual are available).
1027 * Thus our choice to support sRGB by default on our visuals for desktop would
1028 * result in broken rendering of GLES apps that aren't expecting sRGB encode.
1029 *
1030 * Unfortunately, renderbuffer setup happens before a context is created. So
1031 * in intel_screen.c we always set up sRGB, and here, if you're a GLES2/3
1032 * context (without an sRGB visual, though we don't have sRGB visuals exposed
1033 * yet), we go turn that back off before anyone finds out.
1034 */
1035 static void
1036 intel_gles3_srgb_workaround(struct brw_context *brw,
1037 struct gl_framebuffer *fb)
1038 {
1039 struct gl_context *ctx = &brw->ctx;
1040
1041 if (_mesa_is_desktop_gl(ctx) || !fb->Visual.sRGBCapable)
1042 return;
1043
1044 /* Some day when we support the sRGB capable bit on visuals available for
1045 * GLES, we'll need to respect that and not disable things here.
1046 */
1047 fb->Visual.sRGBCapable = false;
1048 for (int i = 0; i < BUFFER_COUNT; i++) {
1049 if (fb->Attachment[i].Renderbuffer &&
1050 fb->Attachment[i].Renderbuffer->Format == MESA_FORMAT_B8G8R8A8_SRGB) {
1051 fb->Attachment[i].Renderbuffer->Format = MESA_FORMAT_B8G8R8A8_UNORM;
1052 }
1053 }
1054 }
1055
1056 GLboolean
1057 intelMakeCurrent(__DRIcontext * driContextPriv,
1058 __DRIdrawable * driDrawPriv,
1059 __DRIdrawable * driReadPriv)
1060 {
1061 struct brw_context *brw;
1062 GET_CURRENT_CONTEXT(curCtx);
1063
1064 if (driContextPriv)
1065 brw = (struct brw_context *) driContextPriv->driverPrivate;
1066 else
1067 brw = NULL;
1068
1069 /* According to the glXMakeCurrent() man page: "Pending commands to
1070 * the previous context, if any, are flushed before it is released."
1071 * But only flush if we're actually changing contexts.
1072 */
1073 if (brw_context(curCtx) && brw_context(curCtx) != brw) {
1074 _mesa_flush(curCtx);
1075 }
1076
1077 if (driContextPriv) {
1078 struct gl_context *ctx = &brw->ctx;
1079 struct gl_framebuffer *fb, *readFb;
1080
1081 if (driDrawPriv == NULL) {
1082 fb = _mesa_get_incomplete_framebuffer();
1083 } else {
1084 fb = driDrawPriv->driverPrivate;
1085 driContextPriv->dri2.draw_stamp = driDrawPriv->dri2.stamp - 1;
1086 }
1087
1088 if (driReadPriv == NULL) {
1089 readFb = _mesa_get_incomplete_framebuffer();
1090 } else {
1091 readFb = driReadPriv->driverPrivate;
1092 driContextPriv->dri2.read_stamp = driReadPriv->dri2.stamp - 1;
1093 }
1094
1095 /* The sRGB workaround changes the renderbuffer's format. We must change
1096 * the format before the renderbuffer's miptree get's allocated, otherwise
1097 * the formats of the renderbuffer and its miptree will differ.
1098 */
1099 intel_gles3_srgb_workaround(brw, fb);
1100 intel_gles3_srgb_workaround(brw, readFb);
1101
1102 /* If the context viewport hasn't been initialized, force a call out to
1103 * the loader to get buffers so we have a drawable size for the initial
1104 * viewport. */
1105 if (!brw->ctx.ViewportInitialized)
1106 intel_prepare_render(brw);
1107
1108 _mesa_make_current(ctx, fb, readFb);
1109 } else {
1110 _mesa_make_current(NULL, NULL, NULL);
1111 }
1112
1113 return true;
1114 }
1115
1116 void
1117 intel_resolve_for_dri2_flush(struct brw_context *brw,
1118 __DRIdrawable *drawable)
1119 {
1120 if (brw->gen < 6) {
1121 /* MSAA and fast color clear are not supported, so don't waste time
1122 * checking whether a resolve is needed.
1123 */
1124 return;
1125 }
1126
1127 struct gl_framebuffer *fb = drawable->driverPrivate;
1128 struct intel_renderbuffer *rb;
1129
1130 /* Usually, only the back buffer will need to be downsampled. However,
1131 * the front buffer will also need it if the user has rendered into it.
1132 */
1133 static const gl_buffer_index buffers[2] = {
1134 BUFFER_BACK_LEFT,
1135 BUFFER_FRONT_LEFT,
1136 };
1137
1138 for (int i = 0; i < 2; ++i) {
1139 rb = intel_get_renderbuffer(fb, buffers[i]);
1140 if (rb == NULL || rb->mt == NULL)
1141 continue;
1142 if (rb->mt->num_samples <= 1)
1143 intel_miptree_resolve_color(brw, rb->mt);
1144 else
1145 intel_renderbuffer_downsample(brw, rb);
1146 }
1147 }
1148
1149 static unsigned
1150 intel_bits_per_pixel(const struct intel_renderbuffer *rb)
1151 {
1152 return _mesa_get_format_bytes(intel_rb_format(rb)) * 8;
1153 }
1154
1155 static void
1156 intel_query_dri2_buffers(struct brw_context *brw,
1157 __DRIdrawable *drawable,
1158 __DRIbuffer **buffers,
1159 int *count);
1160
1161 static void
1162 intel_process_dri2_buffer(struct brw_context *brw,
1163 __DRIdrawable *drawable,
1164 __DRIbuffer *buffer,
1165 struct intel_renderbuffer *rb,
1166 const char *buffer_name);
1167
1168 static void
1169 intel_update_image_buffers(struct brw_context *brw, __DRIdrawable *drawable);
1170
1171 static void
1172 intel_update_dri2_buffers(struct brw_context *brw, __DRIdrawable *drawable)
1173 {
1174 struct gl_framebuffer *fb = drawable->driverPrivate;
1175 struct intel_renderbuffer *rb;
1176 __DRIbuffer *buffers = NULL;
1177 int i, count;
1178 const char *region_name;
1179
1180 /* Set this up front, so that in case our buffers get invalidated
1181 * while we're getting new buffers, we don't clobber the stamp and
1182 * thus ignore the invalidate. */
1183 drawable->lastStamp = drawable->dri2.stamp;
1184
1185 if (unlikely(INTEL_DEBUG & DEBUG_DRI))
1186 fprintf(stderr, "enter %s, drawable %p\n", __func__, drawable);
1187
1188 intel_query_dri2_buffers(brw, drawable, &buffers, &count);
1189
1190 if (buffers == NULL)
1191 return;
1192
1193 for (i = 0; i < count; i++) {
1194 switch (buffers[i].attachment) {
1195 case __DRI_BUFFER_FRONT_LEFT:
1196 rb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT);
1197 region_name = "dri2 front buffer";
1198 break;
1199
1200 case __DRI_BUFFER_FAKE_FRONT_LEFT:
1201 rb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT);
1202 region_name = "dri2 fake front buffer";
1203 break;
1204
1205 case __DRI_BUFFER_BACK_LEFT:
1206 rb = intel_get_renderbuffer(fb, BUFFER_BACK_LEFT);
1207 region_name = "dri2 back buffer";
1208 break;
1209
1210 case __DRI_BUFFER_DEPTH:
1211 case __DRI_BUFFER_HIZ:
1212 case __DRI_BUFFER_DEPTH_STENCIL:
1213 case __DRI_BUFFER_STENCIL:
1214 case __DRI_BUFFER_ACCUM:
1215 default:
1216 fprintf(stderr,
1217 "unhandled buffer attach event, attachment type %d\n",
1218 buffers[i].attachment);
1219 return;
1220 }
1221
1222 intel_process_dri2_buffer(brw, drawable, &buffers[i], rb, region_name);
1223 }
1224
1225 }
1226
1227 void
1228 intel_update_renderbuffers(__DRIcontext *context, __DRIdrawable *drawable)
1229 {
1230 struct brw_context *brw = context->driverPrivate;
1231 __DRIscreen *screen = brw->intelScreen->driScrnPriv;
1232
1233 /* Set this up front, so that in case our buffers get invalidated
1234 * while we're getting new buffers, we don't clobber the stamp and
1235 * thus ignore the invalidate. */
1236 drawable->lastStamp = drawable->dri2.stamp;
1237
1238 if (unlikely(INTEL_DEBUG & DEBUG_DRI))
1239 fprintf(stderr, "enter %s, drawable %p\n", __func__, drawable);
1240
1241 if (screen->image.loader)
1242 intel_update_image_buffers(brw, drawable);
1243 else
1244 intel_update_dri2_buffers(brw, drawable);
1245
1246 driUpdateFramebufferSize(&brw->ctx, drawable);
1247 }
1248
1249 /**
1250 * intel_prepare_render should be called anywhere that curent read/drawbuffer
1251 * state is required.
1252 */
1253 void
1254 intel_prepare_render(struct brw_context *brw)
1255 {
1256 struct gl_context *ctx = &brw->ctx;
1257 __DRIcontext *driContext = brw->driContext;
1258 __DRIdrawable *drawable;
1259
1260 drawable = driContext->driDrawablePriv;
1261 if (drawable && drawable->dri2.stamp != driContext->dri2.draw_stamp) {
1262 if (drawable->lastStamp != drawable->dri2.stamp)
1263 intel_update_renderbuffers(driContext, drawable);
1264 driContext->dri2.draw_stamp = drawable->dri2.stamp;
1265 }
1266
1267 drawable = driContext->driReadablePriv;
1268 if (drawable && drawable->dri2.stamp != driContext->dri2.read_stamp) {
1269 if (drawable->lastStamp != drawable->dri2.stamp)
1270 intel_update_renderbuffers(driContext, drawable);
1271 driContext->dri2.read_stamp = drawable->dri2.stamp;
1272 }
1273
1274 /* If we're currently rendering to the front buffer, the rendering
1275 * that will happen next will probably dirty the front buffer. So
1276 * mark it as dirty here.
1277 */
1278 if (brw_is_front_buffer_drawing(ctx->DrawBuffer))
1279 brw->front_buffer_dirty = true;
1280 }
1281
1282 /**
1283 * \brief Query DRI2 to obtain a DRIdrawable's buffers.
1284 *
1285 * To determine which DRI buffers to request, examine the renderbuffers
1286 * attached to the drawable's framebuffer. Then request the buffers with
1287 * DRI2GetBuffers() or DRI2GetBuffersWithFormat().
1288 *
1289 * This is called from intel_update_renderbuffers().
1290 *
1291 * \param drawable Drawable whose buffers are queried.
1292 * \param buffers [out] List of buffers returned by DRI2 query.
1293 * \param buffer_count [out] Number of buffers returned.
1294 *
1295 * \see intel_update_renderbuffers()
1296 * \see DRI2GetBuffers()
1297 * \see DRI2GetBuffersWithFormat()
1298 */
1299 static void
1300 intel_query_dri2_buffers(struct brw_context *brw,
1301 __DRIdrawable *drawable,
1302 __DRIbuffer **buffers,
1303 int *buffer_count)
1304 {
1305 __DRIscreen *screen = brw->intelScreen->driScrnPriv;
1306 struct gl_framebuffer *fb = drawable->driverPrivate;
1307 int i = 0;
1308 unsigned attachments[8];
1309
1310 struct intel_renderbuffer *front_rb;
1311 struct intel_renderbuffer *back_rb;
1312
1313 front_rb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT);
1314 back_rb = intel_get_renderbuffer(fb, BUFFER_BACK_LEFT);
1315
1316 memset(attachments, 0, sizeof(attachments));
1317 if ((brw_is_front_buffer_drawing(fb) ||
1318 brw_is_front_buffer_reading(fb) ||
1319 !back_rb) && front_rb) {
1320 /* If a fake front buffer is in use, then querying for
1321 * __DRI_BUFFER_FRONT_LEFT will cause the server to copy the image from
1322 * the real front buffer to the fake front buffer. So before doing the
1323 * query, we need to make sure all the pending drawing has landed in the
1324 * real front buffer.
1325 */
1326 intel_batchbuffer_flush(brw);
1327 intel_flush_front(&brw->ctx);
1328
1329 attachments[i++] = __DRI_BUFFER_FRONT_LEFT;
1330 attachments[i++] = intel_bits_per_pixel(front_rb);
1331 } else if (front_rb && brw->front_buffer_dirty) {
1332 /* We have pending front buffer rendering, but we aren't querying for a
1333 * front buffer. If the front buffer we have is a fake front buffer,
1334 * the X server is going to throw it away when it processes the query.
1335 * So before doing the query, make sure all the pending drawing has
1336 * landed in the real front buffer.
1337 */
1338 intel_batchbuffer_flush(brw);
1339 intel_flush_front(&brw->ctx);
1340 }
1341
1342 if (back_rb) {
1343 attachments[i++] = __DRI_BUFFER_BACK_LEFT;
1344 attachments[i++] = intel_bits_per_pixel(back_rb);
1345 }
1346
1347 assert(i <= ARRAY_SIZE(attachments));
1348
1349 *buffers = screen->dri2.loader->getBuffersWithFormat(drawable,
1350 &drawable->w,
1351 &drawable->h,
1352 attachments, i / 2,
1353 buffer_count,
1354 drawable->loaderPrivate);
1355 }
1356
1357 /**
1358 * \brief Assign a DRI buffer's DRM region to a renderbuffer.
1359 *
1360 * This is called from intel_update_renderbuffers().
1361 *
1362 * \par Note:
1363 * DRI buffers whose attachment point is DRI2BufferStencil or
1364 * DRI2BufferDepthStencil are handled as special cases.
1365 *
1366 * \param buffer_name is a human readable name, such as "dri2 front buffer",
1367 * that is passed to drm_intel_bo_gem_create_from_name().
1368 *
1369 * \see intel_update_renderbuffers()
1370 */
1371 static void
1372 intel_process_dri2_buffer(struct brw_context *brw,
1373 __DRIdrawable *drawable,
1374 __DRIbuffer *buffer,
1375 struct intel_renderbuffer *rb,
1376 const char *buffer_name)
1377 {
1378 struct gl_framebuffer *fb = drawable->driverPrivate;
1379 drm_intel_bo *bo;
1380
1381 if (!rb)
1382 return;
1383
1384 unsigned num_samples = rb->Base.Base.NumSamples;
1385
1386 /* We try to avoid closing and reopening the same BO name, because the first
1387 * use of a mapping of the buffer involves a bunch of page faulting which is
1388 * moderately expensive.
1389 */
1390 struct intel_mipmap_tree *last_mt;
1391 if (num_samples == 0)
1392 last_mt = rb->mt;
1393 else
1394 last_mt = rb->singlesample_mt;
1395
1396 uint32_t old_name = 0;
1397 if (last_mt) {
1398 /* The bo already has a name because the miptree was created by a
1399 * previous call to intel_process_dri2_buffer(). If a bo already has a
1400 * name, then drm_intel_bo_flink() is a low-cost getter. It does not
1401 * create a new name.
1402 */
1403 drm_intel_bo_flink(last_mt->bo, &old_name);
1404 }
1405
1406 if (old_name == buffer->name)
1407 return;
1408
1409 if (unlikely(INTEL_DEBUG & DEBUG_DRI)) {
1410 fprintf(stderr,
1411 "attaching buffer %d, at %d, cpp %d, pitch %d\n",
1412 buffer->name, buffer->attachment,
1413 buffer->cpp, buffer->pitch);
1414 }
1415
1416 intel_miptree_release(&rb->mt);
1417 bo = drm_intel_bo_gem_create_from_name(brw->bufmgr, buffer_name,
1418 buffer->name);
1419 if (!bo) {
1420 fprintf(stderr,
1421 "Failed to open BO for returned DRI2 buffer "
1422 "(%dx%d, %s, named %d).\n"
1423 "This is likely a bug in the X Server that will lead to a "
1424 "crash soon.\n",
1425 drawable->w, drawable->h, buffer_name, buffer->name);
1426 return;
1427 }
1428
1429 intel_update_winsys_renderbuffer_miptree(brw, rb, bo,
1430 drawable->w, drawable->h,
1431 buffer->pitch);
1432
1433 if (brw_is_front_buffer_drawing(fb) &&
1434 (buffer->attachment == __DRI_BUFFER_FRONT_LEFT ||
1435 buffer->attachment == __DRI_BUFFER_FAKE_FRONT_LEFT) &&
1436 rb->Base.Base.NumSamples > 1) {
1437 intel_renderbuffer_upsample(brw, rb);
1438 }
1439
1440 assert(rb->mt);
1441
1442 drm_intel_bo_unreference(bo);
1443 }
1444
1445 /**
1446 * \brief Query DRI image loader to obtain a DRIdrawable's buffers.
1447 *
1448 * To determine which DRI buffers to request, examine the renderbuffers
1449 * attached to the drawable's framebuffer. Then request the buffers from
1450 * the image loader
1451 *
1452 * This is called from intel_update_renderbuffers().
1453 *
1454 * \param drawable Drawable whose buffers are queried.
1455 * \param buffers [out] List of buffers returned by DRI2 query.
1456 * \param buffer_count [out] Number of buffers returned.
1457 *
1458 * \see intel_update_renderbuffers()
1459 */
1460
1461 static void
1462 intel_update_image_buffer(struct brw_context *intel,
1463 __DRIdrawable *drawable,
1464 struct intel_renderbuffer *rb,
1465 __DRIimage *buffer,
1466 enum __DRIimageBufferMask buffer_type)
1467 {
1468 struct gl_framebuffer *fb = drawable->driverPrivate;
1469
1470 if (!rb || !buffer->bo)
1471 return;
1472
1473 unsigned num_samples = rb->Base.Base.NumSamples;
1474
1475 /* Check and see if we're already bound to the right
1476 * buffer object
1477 */
1478 struct intel_mipmap_tree *last_mt;
1479 if (num_samples == 0)
1480 last_mt = rb->mt;
1481 else
1482 last_mt = rb->singlesample_mt;
1483
1484 if (last_mt && last_mt->bo == buffer->bo)
1485 return;
1486
1487 intel_update_winsys_renderbuffer_miptree(intel, rb, buffer->bo,
1488 buffer->width, buffer->height,
1489 buffer->pitch);
1490
1491 if (brw_is_front_buffer_drawing(fb) &&
1492 buffer_type == __DRI_IMAGE_BUFFER_FRONT &&
1493 rb->Base.Base.NumSamples > 1) {
1494 intel_renderbuffer_upsample(intel, rb);
1495 }
1496 }
1497
1498 static void
1499 intel_update_image_buffers(struct brw_context *brw, __DRIdrawable *drawable)
1500 {
1501 struct gl_framebuffer *fb = drawable->driverPrivate;
1502 __DRIscreen *screen = brw->intelScreen->driScrnPriv;
1503 struct intel_renderbuffer *front_rb;
1504 struct intel_renderbuffer *back_rb;
1505 struct __DRIimageList images;
1506 unsigned int format;
1507 uint32_t buffer_mask = 0;
1508
1509 front_rb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT);
1510 back_rb = intel_get_renderbuffer(fb, BUFFER_BACK_LEFT);
1511
1512 if (back_rb)
1513 format = intel_rb_format(back_rb);
1514 else if (front_rb)
1515 format = intel_rb_format(front_rb);
1516 else
1517 return;
1518
1519 if (front_rb && (brw_is_front_buffer_drawing(fb) ||
1520 brw_is_front_buffer_reading(fb) || !back_rb)) {
1521 buffer_mask |= __DRI_IMAGE_BUFFER_FRONT;
1522 }
1523
1524 if (back_rb)
1525 buffer_mask |= __DRI_IMAGE_BUFFER_BACK;
1526
1527 (*screen->image.loader->getBuffers) (drawable,
1528 driGLFormatToImageFormat(format),
1529 &drawable->dri2.stamp,
1530 drawable->loaderPrivate,
1531 buffer_mask,
1532 &images);
1533
1534 if (images.image_mask & __DRI_IMAGE_BUFFER_FRONT) {
1535 drawable->w = images.front->width;
1536 drawable->h = images.front->height;
1537 intel_update_image_buffer(brw,
1538 drawable,
1539 front_rb,
1540 images.front,
1541 __DRI_IMAGE_BUFFER_FRONT);
1542 }
1543 if (images.image_mask & __DRI_IMAGE_BUFFER_BACK) {
1544 drawable->w = images.back->width;
1545 drawable->h = images.back->height;
1546 intel_update_image_buffer(brw,
1547 drawable,
1548 back_rb,
1549 images.back,
1550 __DRI_IMAGE_BUFFER_BACK);
1551 }
1552 }