i965: Store the GPU revision number in brw_context
[mesa.git] / src / mesa / drivers / dri / i965 / brw_context.c
1 /*
2 Copyright 2003 VMware, Inc.
3 Copyright (C) Intel Corp. 2006. All Rights Reserved.
4 Intel funded Tungsten Graphics to
5 develop this 3D driver.
6
7 Permission is hereby granted, free of charge, to any person obtaining
8 a copy of this software and associated documentation files (the
9 "Software"), to deal in the Software without restriction, including
10 without limitation the rights to use, copy, modify, merge, publish,
11 distribute, sublicense, and/or sell copies of the Software, and to
12 permit persons to whom the Software is furnished to do so, subject to
13 the following conditions:
14
15 The above copyright notice and this permission notice (including the
16 next paragraph) shall be included in all copies or substantial
17 portions of the Software.
18
19 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
20 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
22 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
23 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
24 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
25 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26
27 **********************************************************************/
28 /*
29 * Authors:
30 * Keith Whitwell <keithw@vmware.com>
31 */
32
33
34 #include "main/api_exec.h"
35 #include "main/context.h"
36 #include "main/fbobject.h"
37 #include "main/extensions.h"
38 #include "main/imports.h"
39 #include "main/macros.h"
40 #include "main/points.h"
41 #include "main/version.h"
42 #include "main/vtxfmt.h"
43 #include "main/texobj.h"
44
45 #include "vbo/vbo_context.h"
46
47 #include "drivers/common/driverfuncs.h"
48 #include "drivers/common/meta.h"
49 #include "utils.h"
50
51 #include "brw_context.h"
52 #include "brw_defines.h"
53 #include "brw_draw.h"
54 #include "brw_state.h"
55
56 #include "intel_batchbuffer.h"
57 #include "intel_buffer_objects.h"
58 #include "intel_buffers.h"
59 #include "intel_fbo.h"
60 #include "intel_mipmap_tree.h"
61 #include "intel_pixel.h"
62 #include "intel_image.h"
63 #include "intel_tex.h"
64 #include "intel_tex_obj.h"
65
66 #include "swrast_setup/swrast_setup.h"
67 #include "tnl/tnl.h"
68 #include "tnl/t_pipeline.h"
69 #include "util/ralloc.h"
70
71 #include "glsl/nir/nir.h"
72
73 /***************************************
74 * Mesa's Driver Functions
75 ***************************************/
76
77 static size_t
78 brw_query_samples_for_format(struct gl_context *ctx, GLenum target,
79 GLenum internalFormat, int samples[16])
80 {
81 struct brw_context *brw = brw_context(ctx);
82
83 (void) target;
84
85 switch (brw->gen) {
86 case 9:
87 case 8:
88 samples[0] = 8;
89 samples[1] = 4;
90 samples[2] = 2;
91 return 3;
92
93 case 7:
94 samples[0] = 8;
95 samples[1] = 4;
96 return 2;
97
98 case 6:
99 samples[0] = 4;
100 return 1;
101
102 default:
103 assert(brw->gen < 6);
104 samples[0] = 1;
105 return 1;
106 }
107 }
108
109 const char *const brw_vendor_string = "Intel Open Source Technology Center";
110
111 const char *
112 brw_get_renderer_string(unsigned deviceID)
113 {
114 const char *chipset;
115 static char buffer[128];
116
117 switch (deviceID) {
118 #undef CHIPSET
119 #define CHIPSET(id, symbol, str) case id: chipset = str; break;
120 #include "pci_ids/i965_pci_ids.h"
121 default:
122 chipset = "Unknown Intel Chipset";
123 break;
124 }
125
126 (void) driGetRendererString(buffer, chipset, 0);
127 return buffer;
128 }
129
130 static const GLubyte *
131 intel_get_string(struct gl_context * ctx, GLenum name)
132 {
133 const struct brw_context *const brw = brw_context(ctx);
134
135 switch (name) {
136 case GL_VENDOR:
137 return (GLubyte *) brw_vendor_string;
138
139 case GL_RENDERER:
140 return
141 (GLubyte *) brw_get_renderer_string(brw->intelScreen->deviceID);
142
143 default:
144 return NULL;
145 }
146 }
147
148 static void
149 intel_viewport(struct gl_context *ctx)
150 {
151 struct brw_context *brw = brw_context(ctx);
152 __DRIcontext *driContext = brw->driContext;
153
154 if (_mesa_is_winsys_fbo(ctx->DrawBuffer)) {
155 dri2InvalidateDrawable(driContext->driDrawablePriv);
156 dri2InvalidateDrawable(driContext->driReadablePriv);
157 }
158 }
159
160 static void
161 intel_update_state(struct gl_context * ctx, GLuint new_state)
162 {
163 struct brw_context *brw = brw_context(ctx);
164 struct intel_texture_object *tex_obj;
165 struct intel_renderbuffer *depth_irb;
166
167 if (ctx->swrast_context)
168 _swrast_InvalidateState(ctx, new_state);
169 _vbo_InvalidateState(ctx, new_state);
170
171 brw->NewGLState |= new_state;
172
173 _mesa_unlock_context_textures(ctx);
174
175 /* Resolve the depth buffer's HiZ buffer. */
176 depth_irb = intel_get_renderbuffer(ctx->DrawBuffer, BUFFER_DEPTH);
177 if (depth_irb)
178 intel_renderbuffer_resolve_hiz(brw, depth_irb);
179
180 /* Resolve depth buffer and render cache of each enabled texture. */
181 int maxEnabledUnit = ctx->Texture._MaxEnabledTexImageUnit;
182 for (int i = 0; i <= maxEnabledUnit; i++) {
183 if (!ctx->Texture.Unit[i]._Current)
184 continue;
185 tex_obj = intel_texture_object(ctx->Texture.Unit[i]._Current);
186 if (!tex_obj || !tex_obj->mt)
187 continue;
188 intel_miptree_all_slices_resolve_depth(brw, tex_obj->mt);
189 intel_miptree_resolve_color(brw, tex_obj->mt);
190 brw_render_cache_set_check_flush(brw, tex_obj->mt->bo);
191 }
192
193 _mesa_lock_context_textures(ctx);
194 }
195
196 #define flushFront(screen) ((screen)->image.loader ? (screen)->image.loader->flushFrontBuffer : (screen)->dri2.loader->flushFrontBuffer)
197
198 static void
199 intel_flush_front(struct gl_context *ctx)
200 {
201 struct brw_context *brw = brw_context(ctx);
202 __DRIcontext *driContext = brw->driContext;
203 __DRIdrawable *driDrawable = driContext->driDrawablePriv;
204 __DRIscreen *const screen = brw->intelScreen->driScrnPriv;
205
206 if (brw->front_buffer_dirty && _mesa_is_winsys_fbo(ctx->DrawBuffer)) {
207 if (flushFront(screen) && driDrawable &&
208 driDrawable->loaderPrivate) {
209
210 /* Resolve before flushing FAKE_FRONT_LEFT to FRONT_LEFT.
211 *
212 * This potentially resolves both front and back buffer. It
213 * is unnecessary to resolve the back, but harms nothing except
214 * performance. And no one cares about front-buffer render
215 * performance.
216 */
217 intel_resolve_for_dri2_flush(brw, driDrawable);
218 intel_batchbuffer_flush(brw);
219
220 flushFront(screen)(driDrawable, driDrawable->loaderPrivate);
221
222 /* We set the dirty bit in intel_prepare_render() if we're
223 * front buffer rendering once we get there.
224 */
225 brw->front_buffer_dirty = false;
226 }
227 }
228 }
229
230 static void
231 intel_glFlush(struct gl_context *ctx)
232 {
233 struct brw_context *brw = brw_context(ctx);
234
235 intel_batchbuffer_flush(brw);
236 intel_flush_front(ctx);
237
238 brw->need_flush_throttle = true;
239 }
240
241 static void
242 intel_finish(struct gl_context * ctx)
243 {
244 struct brw_context *brw = brw_context(ctx);
245
246 intel_glFlush(ctx);
247
248 if (brw->batch.last_bo)
249 drm_intel_bo_wait_rendering(brw->batch.last_bo);
250 }
251
252 static void
253 brw_init_driver_functions(struct brw_context *brw,
254 struct dd_function_table *functions)
255 {
256 _mesa_init_driver_functions(functions);
257
258 /* GLX uses DRI2 invalidate events to handle window resizing.
259 * Unfortunately, EGL does not - libEGL is written in XCB (not Xlib),
260 * which doesn't provide a mechanism for snooping the event queues.
261 *
262 * So EGL still relies on viewport hacks to handle window resizing.
263 * This should go away with DRI3000.
264 */
265 if (!brw->driContext->driScreenPriv->dri2.useInvalidate)
266 functions->Viewport = intel_viewport;
267
268 functions->Flush = intel_glFlush;
269 functions->Finish = intel_finish;
270 functions->GetString = intel_get_string;
271 functions->UpdateState = intel_update_state;
272
273 intelInitTextureFuncs(functions);
274 intelInitTextureImageFuncs(functions);
275 intelInitTextureSubImageFuncs(functions);
276 intelInitTextureCopyImageFuncs(functions);
277 intelInitCopyImageFuncs(functions);
278 intelInitClearFuncs(functions);
279 intelInitBufferFuncs(functions);
280 intelInitPixelFuncs(functions);
281 intelInitBufferObjectFuncs(functions);
282 intel_init_syncobj_functions(functions);
283 brw_init_object_purgeable_functions(functions);
284
285 brwInitFragProgFuncs( functions );
286 brw_init_common_queryobj_functions(functions);
287 if (brw->gen >= 6)
288 gen6_init_queryobj_functions(functions);
289 else
290 gen4_init_queryobj_functions(functions);
291
292 functions->QuerySamplesForFormat = brw_query_samples_for_format;
293
294 functions->NewTransformFeedback = brw_new_transform_feedback;
295 functions->DeleteTransformFeedback = brw_delete_transform_feedback;
296 functions->GetTransformFeedbackVertexCount =
297 brw_get_transform_feedback_vertex_count;
298 if (brw->gen >= 7) {
299 functions->BeginTransformFeedback = gen7_begin_transform_feedback;
300 functions->EndTransformFeedback = gen7_end_transform_feedback;
301 functions->PauseTransformFeedback = gen7_pause_transform_feedback;
302 functions->ResumeTransformFeedback = gen7_resume_transform_feedback;
303 } else {
304 functions->BeginTransformFeedback = brw_begin_transform_feedback;
305 functions->EndTransformFeedback = brw_end_transform_feedback;
306 }
307
308 if (brw->gen >= 6)
309 functions->GetSamplePosition = gen6_get_sample_position;
310 }
311
312 static void
313 brw_initialize_context_constants(struct brw_context *brw)
314 {
315 struct gl_context *ctx = &brw->ctx;
316
317 unsigned max_samplers =
318 brw->gen >= 8 || brw->is_haswell ? BRW_MAX_TEX_UNIT : 16;
319
320 ctx->Const.QueryCounterBits.Timestamp = 36;
321
322 ctx->Const.StripTextureBorder = true;
323
324 ctx->Const.MaxDualSourceDrawBuffers = 1;
325 ctx->Const.MaxDrawBuffers = BRW_MAX_DRAW_BUFFERS;
326 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxTextureImageUnits = max_samplers;
327 ctx->Const.MaxTextureCoordUnits = 8; /* Mesa limit */
328 ctx->Const.MaxTextureUnits =
329 MIN2(ctx->Const.MaxTextureCoordUnits,
330 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxTextureImageUnits);
331 ctx->Const.Program[MESA_SHADER_VERTEX].MaxTextureImageUnits = max_samplers;
332 if (brw->gen >= 6)
333 ctx->Const.Program[MESA_SHADER_GEOMETRY].MaxTextureImageUnits = max_samplers;
334 else
335 ctx->Const.Program[MESA_SHADER_GEOMETRY].MaxTextureImageUnits = 0;
336 if (_mesa_extension_override_enables.ARB_compute_shader) {
337 ctx->Const.Program[MESA_SHADER_COMPUTE].MaxTextureImageUnits = BRW_MAX_TEX_UNIT;
338 ctx->Const.MaxUniformBufferBindings += 12;
339 } else {
340 ctx->Const.Program[MESA_SHADER_COMPUTE].MaxTextureImageUnits = 0;
341 }
342 ctx->Const.MaxCombinedTextureImageUnits =
343 ctx->Const.Program[MESA_SHADER_VERTEX].MaxTextureImageUnits +
344 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxTextureImageUnits +
345 ctx->Const.Program[MESA_SHADER_GEOMETRY].MaxTextureImageUnits +
346 ctx->Const.Program[MESA_SHADER_COMPUTE].MaxTextureImageUnits;
347
348 ctx->Const.MaxTextureLevels = 14; /* 8192 */
349 if (ctx->Const.MaxTextureLevels > MAX_TEXTURE_LEVELS)
350 ctx->Const.MaxTextureLevels = MAX_TEXTURE_LEVELS;
351 ctx->Const.Max3DTextureLevels = 12; /* 2048 */
352 ctx->Const.MaxCubeTextureLevels = 14; /* 8192 */
353 ctx->Const.MaxTextureMbytes = 1536;
354
355 if (brw->gen >= 7)
356 ctx->Const.MaxArrayTextureLayers = 2048;
357 else
358 ctx->Const.MaxArrayTextureLayers = 512;
359
360 ctx->Const.MaxTextureRectSize = 1 << 12;
361
362 ctx->Const.MaxTextureMaxAnisotropy = 16.0;
363
364 ctx->Const.MaxRenderbufferSize = 8192;
365
366 /* Hardware only supports a limited number of transform feedback buffers.
367 * So we need to override the Mesa default (which is based only on software
368 * limits).
369 */
370 ctx->Const.MaxTransformFeedbackBuffers = BRW_MAX_SOL_BUFFERS;
371
372 /* On Gen6, in the worst case, we use up one binding table entry per
373 * transform feedback component (see comments above the definition of
374 * BRW_MAX_SOL_BINDINGS, in brw_context.h), so we need to advertise a value
375 * for MAX_TRANSFORM_FEEDBACK_INTERLEAVED_COMPONENTS equal to
376 * BRW_MAX_SOL_BINDINGS.
377 *
378 * In "separate components" mode, we need to divide this value by
379 * BRW_MAX_SOL_BUFFERS, so that the total number of binding table entries
380 * used up by all buffers will not exceed BRW_MAX_SOL_BINDINGS.
381 */
382 ctx->Const.MaxTransformFeedbackInterleavedComponents = BRW_MAX_SOL_BINDINGS;
383 ctx->Const.MaxTransformFeedbackSeparateComponents =
384 BRW_MAX_SOL_BINDINGS / BRW_MAX_SOL_BUFFERS;
385
386 ctx->Const.AlwaysUseGetTransformFeedbackVertexCount = true;
387
388 int max_samples;
389 const int *msaa_modes = intel_supported_msaa_modes(brw->intelScreen);
390 const int clamp_max_samples =
391 driQueryOptioni(&brw->optionCache, "clamp_max_samples");
392
393 if (clamp_max_samples < 0) {
394 max_samples = msaa_modes[0];
395 } else {
396 /* Select the largest supported MSAA mode that does not exceed
397 * clamp_max_samples.
398 */
399 max_samples = 0;
400 for (int i = 0; msaa_modes[i] != 0; ++i) {
401 if (msaa_modes[i] <= clamp_max_samples) {
402 max_samples = msaa_modes[i];
403 break;
404 }
405 }
406 }
407
408 ctx->Const.MaxSamples = max_samples;
409 ctx->Const.MaxColorTextureSamples = max_samples;
410 ctx->Const.MaxDepthTextureSamples = max_samples;
411 ctx->Const.MaxIntegerSamples = max_samples;
412
413 /* gen6_set_sample_maps() sets SampleMap{2,4,8}x variables which are used
414 * to map indices of rectangular grid to sample numbers within a pixel.
415 * These variables are used by GL_EXT_framebuffer_multisample_blit_scaled
416 * extension implementation. For more details see the comment above
417 * gen6_set_sample_maps() definition.
418 */
419 gen6_set_sample_maps(ctx);
420
421 if (brw->gen >= 7)
422 ctx->Const.MaxProgramTextureGatherComponents = 4;
423 else if (brw->gen == 6)
424 ctx->Const.MaxProgramTextureGatherComponents = 1;
425
426 ctx->Const.MinLineWidth = 1.0;
427 ctx->Const.MinLineWidthAA = 1.0;
428 if (brw->gen >= 9 || brw->is_cherryview) {
429 ctx->Const.MaxLineWidth = 40.0;
430 ctx->Const.MaxLineWidthAA = 40.0;
431 ctx->Const.LineWidthGranularity = 0.125;
432 } else if (brw->gen >= 6) {
433 ctx->Const.MaxLineWidth = 7.375;
434 ctx->Const.MaxLineWidthAA = 7.375;
435 ctx->Const.LineWidthGranularity = 0.125;
436 } else {
437 ctx->Const.MaxLineWidth = 7.0;
438 ctx->Const.MaxLineWidthAA = 7.0;
439 ctx->Const.LineWidthGranularity = 0.5;
440 }
441
442 ctx->Const.MinPointSize = 1.0;
443 ctx->Const.MinPointSizeAA = 1.0;
444 ctx->Const.MaxPointSize = 255.0;
445 ctx->Const.MaxPointSizeAA = 255.0;
446 ctx->Const.PointSizeGranularity = 1.0;
447
448 if (brw->gen >= 5 || brw->is_g4x)
449 ctx->Const.MaxClipPlanes = 8;
450
451 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeInstructions = 16 * 1024;
452 ctx->Const.Program[MESA_SHADER_VERTEX].MaxAluInstructions = 0;
453 ctx->Const.Program[MESA_SHADER_VERTEX].MaxTexInstructions = 0;
454 ctx->Const.Program[MESA_SHADER_VERTEX].MaxTexIndirections = 0;
455 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeAluInstructions = 0;
456 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeTexInstructions = 0;
457 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeTexIndirections = 0;
458 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeAttribs = 16;
459 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeTemps = 256;
460 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeAddressRegs = 1;
461 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeParameters = 1024;
462 ctx->Const.Program[MESA_SHADER_VERTEX].MaxEnvParams =
463 MIN2(ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeParameters,
464 ctx->Const.Program[MESA_SHADER_VERTEX].MaxEnvParams);
465
466 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeInstructions = 1024;
467 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeAluInstructions = 1024;
468 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeTexInstructions = 1024;
469 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeTexIndirections = 1024;
470 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeAttribs = 12;
471 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeTemps = 256;
472 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeAddressRegs = 0;
473 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeParameters = 1024;
474 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxEnvParams =
475 MIN2(ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeParameters,
476 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxEnvParams);
477
478 /* Fragment shaders use real, 32-bit twos-complement integers for all
479 * integer types.
480 */
481 ctx->Const.Program[MESA_SHADER_FRAGMENT].LowInt.RangeMin = 31;
482 ctx->Const.Program[MESA_SHADER_FRAGMENT].LowInt.RangeMax = 30;
483 ctx->Const.Program[MESA_SHADER_FRAGMENT].LowInt.Precision = 0;
484 ctx->Const.Program[MESA_SHADER_FRAGMENT].HighInt = ctx->Const.Program[MESA_SHADER_FRAGMENT].LowInt;
485 ctx->Const.Program[MESA_SHADER_FRAGMENT].MediumInt = ctx->Const.Program[MESA_SHADER_FRAGMENT].LowInt;
486
487 ctx->Const.Program[MESA_SHADER_VERTEX].LowInt.RangeMin = 31;
488 ctx->Const.Program[MESA_SHADER_VERTEX].LowInt.RangeMax = 30;
489 ctx->Const.Program[MESA_SHADER_VERTEX].LowInt.Precision = 0;
490 ctx->Const.Program[MESA_SHADER_VERTEX].HighInt = ctx->Const.Program[MESA_SHADER_VERTEX].LowInt;
491 ctx->Const.Program[MESA_SHADER_VERTEX].MediumInt = ctx->Const.Program[MESA_SHADER_VERTEX].LowInt;
492
493 if (brw->gen >= 7) {
494 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxAtomicCounters = MAX_ATOMIC_COUNTERS;
495 ctx->Const.Program[MESA_SHADER_VERTEX].MaxAtomicCounters = MAX_ATOMIC_COUNTERS;
496 ctx->Const.Program[MESA_SHADER_GEOMETRY].MaxAtomicCounters = MAX_ATOMIC_COUNTERS;
497 ctx->Const.Program[MESA_SHADER_COMPUTE].MaxAtomicCounters = MAX_ATOMIC_COUNTERS;
498 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxAtomicBuffers = BRW_MAX_ABO;
499 ctx->Const.Program[MESA_SHADER_VERTEX].MaxAtomicBuffers = BRW_MAX_ABO;
500 ctx->Const.Program[MESA_SHADER_GEOMETRY].MaxAtomicBuffers = BRW_MAX_ABO;
501 ctx->Const.Program[MESA_SHADER_COMPUTE].MaxAtomicBuffers = BRW_MAX_ABO;
502 ctx->Const.MaxCombinedAtomicBuffers = 3 * BRW_MAX_ABO;
503 }
504
505 /* Gen6 converts quads to polygon in beginning of 3D pipeline,
506 * but we're not sure how it's actually done for vertex order,
507 * that affect provoking vertex decision. Always use last vertex
508 * convention for quad primitive which works as expected for now.
509 */
510 if (brw->gen >= 6)
511 ctx->Const.QuadsFollowProvokingVertexConvention = false;
512
513 ctx->Const.NativeIntegers = true;
514 ctx->Const.VertexID_is_zero_based = true;
515
516 /* Regarding the CMP instruction, the Ivybridge PRM says:
517 *
518 * "For each enabled channel 0b or 1b is assigned to the appropriate flag
519 * bit and 0/all zeros or all ones (e.g, byte 0xFF, word 0xFFFF, DWord
520 * 0xFFFFFFFF) is assigned to dst."
521 *
522 * but PRMs for earlier generations say
523 *
524 * "In dword format, one GRF may store up to 8 results. When the register
525 * is used later as a vector of Booleans, as only LSB at each channel
526 * contains meaning [sic] data, software should make sure all higher bits
527 * are masked out (e.g. by 'and-ing' an [sic] 0x01 constant)."
528 *
529 * We select the representation of a true boolean uniform to be ~0, and fix
530 * the results of Gen <= 5 CMP instruction's with -(result & 1).
531 */
532 ctx->Const.UniformBooleanTrue = ~0;
533
534 /* From the gen4 PRM, volume 4 page 127:
535 *
536 * "For SURFTYPE_BUFFER non-rendertarget surfaces, this field specifies
537 * the base address of the first element of the surface, computed in
538 * software by adding the surface base address to the byte offset of
539 * the element in the buffer."
540 *
541 * However, unaligned accesses are slower, so enforce buffer alignment.
542 */
543 ctx->Const.UniformBufferOffsetAlignment = 16;
544 ctx->Const.TextureBufferOffsetAlignment = 16;
545
546 if (brw->gen >= 6) {
547 ctx->Const.MaxVarying = 32;
548 ctx->Const.Program[MESA_SHADER_VERTEX].MaxOutputComponents = 128;
549 ctx->Const.Program[MESA_SHADER_GEOMETRY].MaxInputComponents = 64;
550 ctx->Const.Program[MESA_SHADER_GEOMETRY].MaxOutputComponents = 128;
551 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxInputComponents = 128;
552 }
553
554 static const nir_shader_compiler_options nir_options = {
555 .native_integers = true,
556 };
557
558 /* We want the GLSL compiler to emit code that uses condition codes */
559 for (int i = 0; i < MESA_SHADER_STAGES; i++) {
560 ctx->Const.ShaderCompilerOptions[i].MaxIfDepth = brw->gen < 6 ? 16 : UINT_MAX;
561 ctx->Const.ShaderCompilerOptions[i].EmitCondCodes = true;
562 ctx->Const.ShaderCompilerOptions[i].EmitNoNoise = true;
563 ctx->Const.ShaderCompilerOptions[i].EmitNoMainReturn = true;
564 ctx->Const.ShaderCompilerOptions[i].EmitNoIndirectInput = true;
565 ctx->Const.ShaderCompilerOptions[i].EmitNoIndirectOutput =
566 (i == MESA_SHADER_FRAGMENT);
567 ctx->Const.ShaderCompilerOptions[i].EmitNoIndirectTemp =
568 (i == MESA_SHADER_FRAGMENT);
569 ctx->Const.ShaderCompilerOptions[i].EmitNoIndirectUniform = false;
570 ctx->Const.ShaderCompilerOptions[i].LowerClipDistance = true;
571 ctx->Const.ShaderCompilerOptions[i].NirOptions = &nir_options;
572 }
573
574 ctx->Const.ShaderCompilerOptions[MESA_SHADER_VERTEX].OptimizeForAOS = true;
575 ctx->Const.ShaderCompilerOptions[MESA_SHADER_GEOMETRY].OptimizeForAOS = true;
576
577 if (brw->scalar_vs) {
578 /* If we're using the scalar backend for vertex shaders, we need to
579 * configure these accordingly.
580 */
581 ctx->Const.ShaderCompilerOptions[MESA_SHADER_VERTEX].EmitNoIndirectOutput = true;
582 ctx->Const.ShaderCompilerOptions[MESA_SHADER_VERTEX].EmitNoIndirectTemp = true;
583 ctx->Const.ShaderCompilerOptions[MESA_SHADER_VERTEX].OptimizeForAOS = false;
584 }
585
586 /* ARB_viewport_array */
587 if (brw->gen >= 7 && ctx->API == API_OPENGL_CORE) {
588 ctx->Const.MaxViewports = GEN7_NUM_VIEWPORTS;
589 ctx->Const.ViewportSubpixelBits = 0;
590
591 /* Cast to float before negating becuase MaxViewportWidth is unsigned.
592 */
593 ctx->Const.ViewportBounds.Min = -(float)ctx->Const.MaxViewportWidth;
594 ctx->Const.ViewportBounds.Max = ctx->Const.MaxViewportWidth;
595 }
596
597 /* ARB_gpu_shader5 */
598 if (brw->gen >= 7)
599 ctx->Const.MaxVertexStreams = MIN2(4, MAX_VERTEX_STREAMS);
600 }
601
602 /**
603 * Process driconf (drirc) options, setting appropriate context flags.
604 *
605 * intelInitExtensions still pokes at optionCache directly, in order to
606 * avoid advertising various extensions. No flags are set, so it makes
607 * sense to continue doing that there.
608 */
609 static void
610 brw_process_driconf_options(struct brw_context *brw)
611 {
612 struct gl_context *ctx = &brw->ctx;
613
614 driOptionCache *options = &brw->optionCache;
615 driParseConfigFiles(options, &brw->intelScreen->optionCache,
616 brw->driContext->driScreenPriv->myNum, "i965");
617
618 int bo_reuse_mode = driQueryOptioni(options, "bo_reuse");
619 switch (bo_reuse_mode) {
620 case DRI_CONF_BO_REUSE_DISABLED:
621 break;
622 case DRI_CONF_BO_REUSE_ALL:
623 intel_bufmgr_gem_enable_reuse(brw->bufmgr);
624 break;
625 }
626
627 if (!driQueryOptionb(options, "hiz")) {
628 brw->has_hiz = false;
629 /* On gen6, you can only do separate stencil with HIZ. */
630 if (brw->gen == 6)
631 brw->has_separate_stencil = false;
632 }
633
634 if (driQueryOptionb(options, "always_flush_batch")) {
635 fprintf(stderr, "flushing batchbuffer before/after each draw call\n");
636 brw->always_flush_batch = true;
637 }
638
639 if (driQueryOptionb(options, "always_flush_cache")) {
640 fprintf(stderr, "flushing GPU caches before/after each draw call\n");
641 brw->always_flush_cache = true;
642 }
643
644 if (driQueryOptionb(options, "disable_throttling")) {
645 fprintf(stderr, "disabling flush throttling\n");
646 brw->disable_throttling = true;
647 }
648
649 brw->precompile = driQueryOptionb(&brw->optionCache, "shader_precompile");
650
651 ctx->Const.ForceGLSLExtensionsWarn =
652 driQueryOptionb(options, "force_glsl_extensions_warn");
653
654 ctx->Const.DisableGLSLLineContinuations =
655 driQueryOptionb(options, "disable_glsl_line_continuations");
656
657 ctx->Const.AllowGLSLExtensionDirectiveMidShader =
658 driQueryOptionb(options, "allow_glsl_extension_directive_midshader");
659 }
660
661 static int
662 brw_get_revision(int fd)
663 {
664 struct drm_i915_getparam gp;
665 int revision;
666 int ret;
667
668 memset(&gp, 0, sizeof(gp));
669 gp.param = I915_PARAM_REVISION;
670 gp.value = &revision;
671
672 ret = drmCommandWriteRead(fd, DRM_I915_GETPARAM, &gp, sizeof(gp));
673 if (ret)
674 revision = -1;
675
676 return revision;
677 }
678
679 GLboolean
680 brwCreateContext(gl_api api,
681 const struct gl_config *mesaVis,
682 __DRIcontext *driContextPriv,
683 unsigned major_version,
684 unsigned minor_version,
685 uint32_t flags,
686 bool notify_reset,
687 unsigned *dri_ctx_error,
688 void *sharedContextPrivate)
689 {
690 __DRIscreen *sPriv = driContextPriv->driScreenPriv;
691 struct gl_context *shareCtx = (struct gl_context *) sharedContextPrivate;
692 struct intel_screen *screen = sPriv->driverPrivate;
693 const struct brw_device_info *devinfo = screen->devinfo;
694 struct dd_function_table functions;
695
696 /* Only allow the __DRI_CTX_FLAG_ROBUST_BUFFER_ACCESS flag if the kernel
697 * provides us with context reset notifications.
698 */
699 uint32_t allowed_flags = __DRI_CTX_FLAG_DEBUG
700 | __DRI_CTX_FLAG_FORWARD_COMPATIBLE;
701
702 if (screen->has_context_reset_notification)
703 allowed_flags |= __DRI_CTX_FLAG_ROBUST_BUFFER_ACCESS;
704
705 if (flags & ~allowed_flags) {
706 *dri_ctx_error = __DRI_CTX_ERROR_UNKNOWN_FLAG;
707 return false;
708 }
709
710 struct brw_context *brw = rzalloc(NULL, struct brw_context);
711 if (!brw) {
712 fprintf(stderr, "%s: failed to alloc context\n", __FUNCTION__);
713 *dri_ctx_error = __DRI_CTX_ERROR_NO_MEMORY;
714 return false;
715 }
716
717 driContextPriv->driverPrivate = brw;
718 brw->driContext = driContextPriv;
719 brw->intelScreen = screen;
720 brw->bufmgr = screen->bufmgr;
721
722 brw->gen = devinfo->gen;
723 brw->gt = devinfo->gt;
724 brw->is_g4x = devinfo->is_g4x;
725 brw->is_baytrail = devinfo->is_baytrail;
726 brw->is_haswell = devinfo->is_haswell;
727 brw->is_cherryview = devinfo->is_cherryview;
728 brw->has_llc = devinfo->has_llc;
729 brw->has_hiz = devinfo->has_hiz_and_separate_stencil;
730 brw->has_separate_stencil = devinfo->has_hiz_and_separate_stencil;
731 brw->has_pln = devinfo->has_pln;
732 brw->has_compr4 = devinfo->has_compr4;
733 brw->has_surface_tile_offset = devinfo->has_surface_tile_offset;
734 brw->has_negative_rhw_bug = devinfo->has_negative_rhw_bug;
735 brw->needs_unlit_centroid_workaround =
736 devinfo->needs_unlit_centroid_workaround;
737 brw->revision = brw_get_revision(sPriv->fd);
738
739 brw->must_use_separate_stencil = screen->hw_must_use_separate_stencil;
740 brw->has_swizzling = screen->hw_has_swizzling;
741
742 brw->vs.base.stage = MESA_SHADER_VERTEX;
743 brw->gs.base.stage = MESA_SHADER_GEOMETRY;
744 brw->wm.base.stage = MESA_SHADER_FRAGMENT;
745 if (brw->gen >= 8) {
746 gen8_init_vtable_surface_functions(brw);
747 brw->vtbl.emit_depth_stencil_hiz = gen8_emit_depth_stencil_hiz;
748 } else if (brw->gen >= 7) {
749 gen7_init_vtable_surface_functions(brw);
750 brw->vtbl.emit_depth_stencil_hiz = gen7_emit_depth_stencil_hiz;
751 } else if (brw->gen >= 6) {
752 gen6_init_vtable_surface_functions(brw);
753 brw->vtbl.emit_depth_stencil_hiz = gen6_emit_depth_stencil_hiz;
754 } else {
755 gen4_init_vtable_surface_functions(brw);
756 brw->vtbl.emit_depth_stencil_hiz = brw_emit_depth_stencil_hiz;
757 }
758
759 brw_init_driver_functions(brw, &functions);
760
761 if (notify_reset)
762 functions.GetGraphicsResetStatus = brw_get_graphics_reset_status;
763
764 struct gl_context *ctx = &brw->ctx;
765
766 if (!_mesa_initialize_context(ctx, api, mesaVis, shareCtx, &functions)) {
767 *dri_ctx_error = __DRI_CTX_ERROR_NO_MEMORY;
768 fprintf(stderr, "%s: failed to init mesa context\n", __FUNCTION__);
769 intelDestroyContext(driContextPriv);
770 return false;
771 }
772
773 driContextSetFlags(ctx, flags);
774
775 /* Initialize the software rasterizer and helper modules.
776 *
777 * As of GL 3.1 core, the gen4+ driver doesn't need the swrast context for
778 * software fallbacks (which we have to support on legacy GL to do weird
779 * glDrawPixels(), glBitmap(), and other functions).
780 */
781 if (api != API_OPENGL_CORE && api != API_OPENGLES2) {
782 _swrast_CreateContext(ctx);
783 }
784
785 _vbo_CreateContext(ctx);
786 if (ctx->swrast_context) {
787 _tnl_CreateContext(ctx);
788 TNL_CONTEXT(ctx)->Driver.RunPipeline = _tnl_run_pipeline;
789 _swsetup_CreateContext(ctx);
790
791 /* Configure swrast to match hardware characteristics: */
792 _swrast_allow_pixel_fog(ctx, false);
793 _swrast_allow_vertex_fog(ctx, true);
794 }
795
796 _mesa_meta_init(ctx);
797
798 brw_process_driconf_options(brw);
799 brw_process_intel_debug_variable(brw);
800
801 if (brw->gen >= 8 && !(INTEL_DEBUG & DEBUG_VEC4VS))
802 brw->scalar_vs = true;
803
804 brw_initialize_context_constants(brw);
805
806 ctx->Const.ResetStrategy = notify_reset
807 ? GL_LOSE_CONTEXT_ON_RESET_ARB : GL_NO_RESET_NOTIFICATION_ARB;
808
809 /* Reinitialize the context point state. It depends on ctx->Const values. */
810 _mesa_init_point(ctx);
811
812 intel_fbo_init(brw);
813
814 intel_batchbuffer_init(brw);
815
816 if (brw->gen >= 6) {
817 /* Create a new hardware context. Using a hardware context means that
818 * our GPU state will be saved/restored on context switch, allowing us
819 * to assume that the GPU is in the same state we left it in.
820 *
821 * This is required for transform feedback buffer offsets, query objects,
822 * and also allows us to reduce how much state we have to emit.
823 */
824 brw->hw_ctx = drm_intel_gem_context_create(brw->bufmgr);
825
826 if (!brw->hw_ctx) {
827 fprintf(stderr, "Gen6+ requires Kernel 3.6 or later.\n");
828 intelDestroyContext(driContextPriv);
829 return false;
830 }
831 }
832
833 brw_init_state(brw);
834
835 intelInitExtensions(ctx);
836
837 brw_init_surface_formats(brw);
838
839 brw->max_vs_threads = devinfo->max_vs_threads;
840 brw->max_hs_threads = devinfo->max_hs_threads;
841 brw->max_ds_threads = devinfo->max_ds_threads;
842 brw->max_gs_threads = devinfo->max_gs_threads;
843 brw->max_wm_threads = devinfo->max_wm_threads;
844 brw->urb.size = devinfo->urb.size;
845 brw->urb.min_vs_entries = devinfo->urb.min_vs_entries;
846 brw->urb.max_vs_entries = devinfo->urb.max_vs_entries;
847 brw->urb.max_hs_entries = devinfo->urb.max_hs_entries;
848 brw->urb.max_ds_entries = devinfo->urb.max_ds_entries;
849 brw->urb.max_gs_entries = devinfo->urb.max_gs_entries;
850
851 /* Estimate the size of the mappable aperture into the GTT. There's an
852 * ioctl to get the whole GTT size, but not one to get the mappable subset.
853 * It turns out it's basically always 256MB, though some ancient hardware
854 * was smaller.
855 */
856 uint32_t gtt_size = 256 * 1024 * 1024;
857
858 /* We don't want to map two objects such that a memcpy between them would
859 * just fault one mapping in and then the other over and over forever. So
860 * we would need to divide the GTT size by 2. Additionally, some GTT is
861 * taken up by things like the framebuffer and the ringbuffer and such, so
862 * be more conservative.
863 */
864 brw->max_gtt_map_object_size = gtt_size / 4;
865
866 if (brw->gen == 6)
867 brw->urb.gs_present = false;
868
869 brw->prim_restart.in_progress = false;
870 brw->prim_restart.enable_cut_index = false;
871 brw->gs.enabled = false;
872 brw->sf.viewport_transform_enable = true;
873
874 ctx->VertexProgram._MaintainTnlProgram = true;
875 ctx->FragmentProgram._MaintainTexEnvProgram = true;
876
877 brw_draw_init( brw );
878
879 if ((flags & __DRI_CTX_FLAG_DEBUG) != 0) {
880 /* Turn on some extra GL_ARB_debug_output generation. */
881 brw->perf_debug = true;
882 }
883
884 if ((flags & __DRI_CTX_FLAG_ROBUST_BUFFER_ACCESS) != 0)
885 ctx->Const.ContextFlags |= GL_CONTEXT_FLAG_ROBUST_ACCESS_BIT_ARB;
886
887 if (INTEL_DEBUG & DEBUG_SHADER_TIME)
888 brw_init_shader_time(brw);
889
890 _mesa_compute_version(ctx);
891
892 _mesa_initialize_dispatch_tables(ctx);
893 _mesa_initialize_vbo_vtxfmt(ctx);
894
895 if (ctx->Extensions.AMD_performance_monitor) {
896 brw_init_performance_monitors(brw);
897 }
898
899 vbo_use_buffer_objects(ctx);
900 vbo_always_unmap_buffers(ctx);
901
902 return true;
903 }
904
905 void
906 intelDestroyContext(__DRIcontext * driContextPriv)
907 {
908 struct brw_context *brw =
909 (struct brw_context *) driContextPriv->driverPrivate;
910 struct gl_context *ctx = &brw->ctx;
911
912 assert(brw); /* should never be null */
913 if (!brw)
914 return;
915
916 /* Dump a final BMP in case the application doesn't call SwapBuffers */
917 if (INTEL_DEBUG & DEBUG_AUB) {
918 intel_batchbuffer_flush(brw);
919 aub_dump_bmp(&brw->ctx);
920 }
921
922 _mesa_meta_free(&brw->ctx);
923 brw_meta_fast_clear_free(brw);
924
925 if (INTEL_DEBUG & DEBUG_SHADER_TIME) {
926 /* Force a report. */
927 brw->shader_time.report_time = 0;
928
929 brw_collect_and_report_shader_time(brw);
930 brw_destroy_shader_time(brw);
931 }
932
933 brw_destroy_state(brw);
934 brw_draw_destroy(brw);
935
936 drm_intel_bo_unreference(brw->curbe.curbe_bo);
937 if (brw->vs.base.scratch_bo)
938 drm_intel_bo_unreference(brw->vs.base.scratch_bo);
939 if (brw->gs.base.scratch_bo)
940 drm_intel_bo_unreference(brw->gs.base.scratch_bo);
941 if (brw->wm.base.scratch_bo)
942 drm_intel_bo_unreference(brw->wm.base.scratch_bo);
943
944 drm_intel_gem_context_destroy(brw->hw_ctx);
945
946 if (ctx->swrast_context) {
947 _swsetup_DestroyContext(&brw->ctx);
948 _tnl_DestroyContext(&brw->ctx);
949 }
950 _vbo_DestroyContext(&brw->ctx);
951
952 if (ctx->swrast_context)
953 _swrast_DestroyContext(&brw->ctx);
954
955 intel_batchbuffer_free(brw);
956
957 drm_intel_bo_unreference(brw->throttle_batch[1]);
958 drm_intel_bo_unreference(brw->throttle_batch[0]);
959 brw->throttle_batch[1] = NULL;
960 brw->throttle_batch[0] = NULL;
961
962 driDestroyOptionCache(&brw->optionCache);
963
964 /* free the Mesa context */
965 _mesa_free_context_data(&brw->ctx);
966
967 ralloc_free(brw);
968 driContextPriv->driverPrivate = NULL;
969 }
970
971 GLboolean
972 intelUnbindContext(__DRIcontext * driContextPriv)
973 {
974 /* Unset current context and dispath table */
975 _mesa_make_current(NULL, NULL, NULL);
976
977 return true;
978 }
979
980 /**
981 * Fixes up the context for GLES23 with our default-to-sRGB-capable behavior
982 * on window system framebuffers.
983 *
984 * Desktop GL is fairly reasonable in its handling of sRGB: You can ask if
985 * your renderbuffer can do sRGB encode, and you can flip a switch that does
986 * sRGB encode if the renderbuffer can handle it. You can ask specifically
987 * for a visual where you're guaranteed to be capable, but it turns out that
988 * everyone just makes all their ARGB8888 visuals capable and doesn't offer
989 * incapable ones, becuase there's no difference between the two in resources
990 * used. Applications thus get built that accidentally rely on the default
991 * visual choice being sRGB, so we make ours sRGB capable. Everything sounds
992 * great...
993 *
994 * But for GLES2/3, they decided that it was silly to not turn on sRGB encode
995 * for sRGB renderbuffers you made with the GL_EXT_texture_sRGB equivalent.
996 * So they removed the enable knob and made it "if the renderbuffer is sRGB
997 * capable, do sRGB encode". Then, for your window system renderbuffers, you
998 * can ask for sRGB visuals and get sRGB encode, or not ask for sRGB visuals
999 * and get no sRGB encode (assuming that both kinds of visual are available).
1000 * Thus our choice to support sRGB by default on our visuals for desktop would
1001 * result in broken rendering of GLES apps that aren't expecting sRGB encode.
1002 *
1003 * Unfortunately, renderbuffer setup happens before a context is created. So
1004 * in intel_screen.c we always set up sRGB, and here, if you're a GLES2/3
1005 * context (without an sRGB visual, though we don't have sRGB visuals exposed
1006 * yet), we go turn that back off before anyone finds out.
1007 */
1008 static void
1009 intel_gles3_srgb_workaround(struct brw_context *brw,
1010 struct gl_framebuffer *fb)
1011 {
1012 struct gl_context *ctx = &brw->ctx;
1013
1014 if (_mesa_is_desktop_gl(ctx) || !fb->Visual.sRGBCapable)
1015 return;
1016
1017 /* Some day when we support the sRGB capable bit on visuals available for
1018 * GLES, we'll need to respect that and not disable things here.
1019 */
1020 fb->Visual.sRGBCapable = false;
1021 for (int i = 0; i < BUFFER_COUNT; i++) {
1022 if (fb->Attachment[i].Renderbuffer &&
1023 fb->Attachment[i].Renderbuffer->Format == MESA_FORMAT_B8G8R8A8_SRGB) {
1024 fb->Attachment[i].Renderbuffer->Format = MESA_FORMAT_B8G8R8A8_UNORM;
1025 }
1026 }
1027 }
1028
1029 GLboolean
1030 intelMakeCurrent(__DRIcontext * driContextPriv,
1031 __DRIdrawable * driDrawPriv,
1032 __DRIdrawable * driReadPriv)
1033 {
1034 struct brw_context *brw;
1035 GET_CURRENT_CONTEXT(curCtx);
1036
1037 if (driContextPriv)
1038 brw = (struct brw_context *) driContextPriv->driverPrivate;
1039 else
1040 brw = NULL;
1041
1042 /* According to the glXMakeCurrent() man page: "Pending commands to
1043 * the previous context, if any, are flushed before it is released."
1044 * But only flush if we're actually changing contexts.
1045 */
1046 if (brw_context(curCtx) && brw_context(curCtx) != brw) {
1047 _mesa_flush(curCtx);
1048 }
1049
1050 if (driContextPriv) {
1051 struct gl_context *ctx = &brw->ctx;
1052 struct gl_framebuffer *fb, *readFb;
1053
1054 if (driDrawPriv == NULL) {
1055 fb = _mesa_get_incomplete_framebuffer();
1056 } else {
1057 fb = driDrawPriv->driverPrivate;
1058 driContextPriv->dri2.draw_stamp = driDrawPriv->dri2.stamp - 1;
1059 }
1060
1061 if (driReadPriv == NULL) {
1062 readFb = _mesa_get_incomplete_framebuffer();
1063 } else {
1064 readFb = driReadPriv->driverPrivate;
1065 driContextPriv->dri2.read_stamp = driReadPriv->dri2.stamp - 1;
1066 }
1067
1068 /* The sRGB workaround changes the renderbuffer's format. We must change
1069 * the format before the renderbuffer's miptree get's allocated, otherwise
1070 * the formats of the renderbuffer and its miptree will differ.
1071 */
1072 intel_gles3_srgb_workaround(brw, fb);
1073 intel_gles3_srgb_workaround(brw, readFb);
1074
1075 /* If the context viewport hasn't been initialized, force a call out to
1076 * the loader to get buffers so we have a drawable size for the initial
1077 * viewport. */
1078 if (!brw->ctx.ViewportInitialized)
1079 intel_prepare_render(brw);
1080
1081 _mesa_make_current(ctx, fb, readFb);
1082 } else {
1083 _mesa_make_current(NULL, NULL, NULL);
1084 }
1085
1086 return true;
1087 }
1088
1089 void
1090 intel_resolve_for_dri2_flush(struct brw_context *brw,
1091 __DRIdrawable *drawable)
1092 {
1093 if (brw->gen < 6) {
1094 /* MSAA and fast color clear are not supported, so don't waste time
1095 * checking whether a resolve is needed.
1096 */
1097 return;
1098 }
1099
1100 struct gl_framebuffer *fb = drawable->driverPrivate;
1101 struct intel_renderbuffer *rb;
1102
1103 /* Usually, only the back buffer will need to be downsampled. However,
1104 * the front buffer will also need it if the user has rendered into it.
1105 */
1106 static const gl_buffer_index buffers[2] = {
1107 BUFFER_BACK_LEFT,
1108 BUFFER_FRONT_LEFT,
1109 };
1110
1111 for (int i = 0; i < 2; ++i) {
1112 rb = intel_get_renderbuffer(fb, buffers[i]);
1113 if (rb == NULL || rb->mt == NULL)
1114 continue;
1115 if (rb->mt->num_samples <= 1)
1116 intel_miptree_resolve_color(brw, rb->mt);
1117 else
1118 intel_renderbuffer_downsample(brw, rb);
1119 }
1120 }
1121
1122 static unsigned
1123 intel_bits_per_pixel(const struct intel_renderbuffer *rb)
1124 {
1125 return _mesa_get_format_bytes(intel_rb_format(rb)) * 8;
1126 }
1127
1128 static void
1129 intel_query_dri2_buffers(struct brw_context *brw,
1130 __DRIdrawable *drawable,
1131 __DRIbuffer **buffers,
1132 int *count);
1133
1134 static void
1135 intel_process_dri2_buffer(struct brw_context *brw,
1136 __DRIdrawable *drawable,
1137 __DRIbuffer *buffer,
1138 struct intel_renderbuffer *rb,
1139 const char *buffer_name);
1140
1141 static void
1142 intel_update_image_buffers(struct brw_context *brw, __DRIdrawable *drawable);
1143
1144 static void
1145 intel_update_dri2_buffers(struct brw_context *brw, __DRIdrawable *drawable)
1146 {
1147 struct gl_framebuffer *fb = drawable->driverPrivate;
1148 struct intel_renderbuffer *rb;
1149 __DRIbuffer *buffers = NULL;
1150 int i, count;
1151 const char *region_name;
1152
1153 /* Set this up front, so that in case our buffers get invalidated
1154 * while we're getting new buffers, we don't clobber the stamp and
1155 * thus ignore the invalidate. */
1156 drawable->lastStamp = drawable->dri2.stamp;
1157
1158 if (unlikely(INTEL_DEBUG & DEBUG_DRI))
1159 fprintf(stderr, "enter %s, drawable %p\n", __func__, drawable);
1160
1161 intel_query_dri2_buffers(brw, drawable, &buffers, &count);
1162
1163 if (buffers == NULL)
1164 return;
1165
1166 for (i = 0; i < count; i++) {
1167 switch (buffers[i].attachment) {
1168 case __DRI_BUFFER_FRONT_LEFT:
1169 rb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT);
1170 region_name = "dri2 front buffer";
1171 break;
1172
1173 case __DRI_BUFFER_FAKE_FRONT_LEFT:
1174 rb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT);
1175 region_name = "dri2 fake front buffer";
1176 break;
1177
1178 case __DRI_BUFFER_BACK_LEFT:
1179 rb = intel_get_renderbuffer(fb, BUFFER_BACK_LEFT);
1180 region_name = "dri2 back buffer";
1181 break;
1182
1183 case __DRI_BUFFER_DEPTH:
1184 case __DRI_BUFFER_HIZ:
1185 case __DRI_BUFFER_DEPTH_STENCIL:
1186 case __DRI_BUFFER_STENCIL:
1187 case __DRI_BUFFER_ACCUM:
1188 default:
1189 fprintf(stderr,
1190 "unhandled buffer attach event, attachment type %d\n",
1191 buffers[i].attachment);
1192 return;
1193 }
1194
1195 intel_process_dri2_buffer(brw, drawable, &buffers[i], rb, region_name);
1196 }
1197
1198 }
1199
1200 void
1201 intel_update_renderbuffers(__DRIcontext *context, __DRIdrawable *drawable)
1202 {
1203 struct brw_context *brw = context->driverPrivate;
1204 __DRIscreen *screen = brw->intelScreen->driScrnPriv;
1205
1206 /* Set this up front, so that in case our buffers get invalidated
1207 * while we're getting new buffers, we don't clobber the stamp and
1208 * thus ignore the invalidate. */
1209 drawable->lastStamp = drawable->dri2.stamp;
1210
1211 if (unlikely(INTEL_DEBUG & DEBUG_DRI))
1212 fprintf(stderr, "enter %s, drawable %p\n", __func__, drawable);
1213
1214 if (screen->image.loader)
1215 intel_update_image_buffers(brw, drawable);
1216 else
1217 intel_update_dri2_buffers(brw, drawable);
1218
1219 driUpdateFramebufferSize(&brw->ctx, drawable);
1220 }
1221
1222 /**
1223 * intel_prepare_render should be called anywhere that curent read/drawbuffer
1224 * state is required.
1225 */
1226 void
1227 intel_prepare_render(struct brw_context *brw)
1228 {
1229 struct gl_context *ctx = &brw->ctx;
1230 __DRIcontext *driContext = brw->driContext;
1231 __DRIdrawable *drawable;
1232
1233 drawable = driContext->driDrawablePriv;
1234 if (drawable && drawable->dri2.stamp != driContext->dri2.draw_stamp) {
1235 if (drawable->lastStamp != drawable->dri2.stamp)
1236 intel_update_renderbuffers(driContext, drawable);
1237 driContext->dri2.draw_stamp = drawable->dri2.stamp;
1238 }
1239
1240 drawable = driContext->driReadablePriv;
1241 if (drawable && drawable->dri2.stamp != driContext->dri2.read_stamp) {
1242 if (drawable->lastStamp != drawable->dri2.stamp)
1243 intel_update_renderbuffers(driContext, drawable);
1244 driContext->dri2.read_stamp = drawable->dri2.stamp;
1245 }
1246
1247 /* If we're currently rendering to the front buffer, the rendering
1248 * that will happen next will probably dirty the front buffer. So
1249 * mark it as dirty here.
1250 */
1251 if (brw_is_front_buffer_drawing(ctx->DrawBuffer))
1252 brw->front_buffer_dirty = true;
1253 }
1254
1255 /**
1256 * \brief Query DRI2 to obtain a DRIdrawable's buffers.
1257 *
1258 * To determine which DRI buffers to request, examine the renderbuffers
1259 * attached to the drawable's framebuffer. Then request the buffers with
1260 * DRI2GetBuffers() or DRI2GetBuffersWithFormat().
1261 *
1262 * This is called from intel_update_renderbuffers().
1263 *
1264 * \param drawable Drawable whose buffers are queried.
1265 * \param buffers [out] List of buffers returned by DRI2 query.
1266 * \param buffer_count [out] Number of buffers returned.
1267 *
1268 * \see intel_update_renderbuffers()
1269 * \see DRI2GetBuffers()
1270 * \see DRI2GetBuffersWithFormat()
1271 */
1272 static void
1273 intel_query_dri2_buffers(struct brw_context *brw,
1274 __DRIdrawable *drawable,
1275 __DRIbuffer **buffers,
1276 int *buffer_count)
1277 {
1278 __DRIscreen *screen = brw->intelScreen->driScrnPriv;
1279 struct gl_framebuffer *fb = drawable->driverPrivate;
1280 int i = 0;
1281 unsigned attachments[8];
1282
1283 struct intel_renderbuffer *front_rb;
1284 struct intel_renderbuffer *back_rb;
1285
1286 front_rb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT);
1287 back_rb = intel_get_renderbuffer(fb, BUFFER_BACK_LEFT);
1288
1289 memset(attachments, 0, sizeof(attachments));
1290 if ((brw_is_front_buffer_drawing(fb) ||
1291 brw_is_front_buffer_reading(fb) ||
1292 !back_rb) && front_rb) {
1293 /* If a fake front buffer is in use, then querying for
1294 * __DRI_BUFFER_FRONT_LEFT will cause the server to copy the image from
1295 * the real front buffer to the fake front buffer. So before doing the
1296 * query, we need to make sure all the pending drawing has landed in the
1297 * real front buffer.
1298 */
1299 intel_batchbuffer_flush(brw);
1300 intel_flush_front(&brw->ctx);
1301
1302 attachments[i++] = __DRI_BUFFER_FRONT_LEFT;
1303 attachments[i++] = intel_bits_per_pixel(front_rb);
1304 } else if (front_rb && brw->front_buffer_dirty) {
1305 /* We have pending front buffer rendering, but we aren't querying for a
1306 * front buffer. If the front buffer we have is a fake front buffer,
1307 * the X server is going to throw it away when it processes the query.
1308 * So before doing the query, make sure all the pending drawing has
1309 * landed in the real front buffer.
1310 */
1311 intel_batchbuffer_flush(brw);
1312 intel_flush_front(&brw->ctx);
1313 }
1314
1315 if (back_rb) {
1316 attachments[i++] = __DRI_BUFFER_BACK_LEFT;
1317 attachments[i++] = intel_bits_per_pixel(back_rb);
1318 }
1319
1320 assert(i <= ARRAY_SIZE(attachments));
1321
1322 *buffers = screen->dri2.loader->getBuffersWithFormat(drawable,
1323 &drawable->w,
1324 &drawable->h,
1325 attachments, i / 2,
1326 buffer_count,
1327 drawable->loaderPrivate);
1328 }
1329
1330 /**
1331 * \brief Assign a DRI buffer's DRM region to a renderbuffer.
1332 *
1333 * This is called from intel_update_renderbuffers().
1334 *
1335 * \par Note:
1336 * DRI buffers whose attachment point is DRI2BufferStencil or
1337 * DRI2BufferDepthStencil are handled as special cases.
1338 *
1339 * \param buffer_name is a human readable name, such as "dri2 front buffer",
1340 * that is passed to drm_intel_bo_gem_create_from_name().
1341 *
1342 * \see intel_update_renderbuffers()
1343 */
1344 static void
1345 intel_process_dri2_buffer(struct brw_context *brw,
1346 __DRIdrawable *drawable,
1347 __DRIbuffer *buffer,
1348 struct intel_renderbuffer *rb,
1349 const char *buffer_name)
1350 {
1351 struct gl_framebuffer *fb = drawable->driverPrivate;
1352 drm_intel_bo *bo;
1353
1354 if (!rb)
1355 return;
1356
1357 unsigned num_samples = rb->Base.Base.NumSamples;
1358
1359 /* We try to avoid closing and reopening the same BO name, because the first
1360 * use of a mapping of the buffer involves a bunch of page faulting which is
1361 * moderately expensive.
1362 */
1363 struct intel_mipmap_tree *last_mt;
1364 if (num_samples == 0)
1365 last_mt = rb->mt;
1366 else
1367 last_mt = rb->singlesample_mt;
1368
1369 uint32_t old_name = 0;
1370 if (last_mt) {
1371 /* The bo already has a name because the miptree was created by a
1372 * previous call to intel_process_dri2_buffer(). If a bo already has a
1373 * name, then drm_intel_bo_flink() is a low-cost getter. It does not
1374 * create a new name.
1375 */
1376 drm_intel_bo_flink(last_mt->bo, &old_name);
1377 }
1378
1379 if (old_name == buffer->name)
1380 return;
1381
1382 if (unlikely(INTEL_DEBUG & DEBUG_DRI)) {
1383 fprintf(stderr,
1384 "attaching buffer %d, at %d, cpp %d, pitch %d\n",
1385 buffer->name, buffer->attachment,
1386 buffer->cpp, buffer->pitch);
1387 }
1388
1389 intel_miptree_release(&rb->mt);
1390 bo = drm_intel_bo_gem_create_from_name(brw->bufmgr, buffer_name,
1391 buffer->name);
1392 if (!bo) {
1393 fprintf(stderr,
1394 "Failed to open BO for returned DRI2 buffer "
1395 "(%dx%d, %s, named %d).\n"
1396 "This is likely a bug in the X Server that will lead to a "
1397 "crash soon.\n",
1398 drawable->w, drawable->h, buffer_name, buffer->name);
1399 return;
1400 }
1401
1402 intel_update_winsys_renderbuffer_miptree(brw, rb, bo,
1403 drawable->w, drawable->h,
1404 buffer->pitch);
1405
1406 if (brw_is_front_buffer_drawing(fb) &&
1407 (buffer->attachment == __DRI_BUFFER_FRONT_LEFT ||
1408 buffer->attachment == __DRI_BUFFER_FAKE_FRONT_LEFT) &&
1409 rb->Base.Base.NumSamples > 1) {
1410 intel_renderbuffer_upsample(brw, rb);
1411 }
1412
1413 assert(rb->mt);
1414
1415 drm_intel_bo_unreference(bo);
1416 }
1417
1418 /**
1419 * \brief Query DRI image loader to obtain a DRIdrawable's buffers.
1420 *
1421 * To determine which DRI buffers to request, examine the renderbuffers
1422 * attached to the drawable's framebuffer. Then request the buffers from
1423 * the image loader
1424 *
1425 * This is called from intel_update_renderbuffers().
1426 *
1427 * \param drawable Drawable whose buffers are queried.
1428 * \param buffers [out] List of buffers returned by DRI2 query.
1429 * \param buffer_count [out] Number of buffers returned.
1430 *
1431 * \see intel_update_renderbuffers()
1432 */
1433
1434 static void
1435 intel_update_image_buffer(struct brw_context *intel,
1436 __DRIdrawable *drawable,
1437 struct intel_renderbuffer *rb,
1438 __DRIimage *buffer,
1439 enum __DRIimageBufferMask buffer_type)
1440 {
1441 struct gl_framebuffer *fb = drawable->driverPrivate;
1442
1443 if (!rb || !buffer->bo)
1444 return;
1445
1446 unsigned num_samples = rb->Base.Base.NumSamples;
1447
1448 /* Check and see if we're already bound to the right
1449 * buffer object
1450 */
1451 struct intel_mipmap_tree *last_mt;
1452 if (num_samples == 0)
1453 last_mt = rb->mt;
1454 else
1455 last_mt = rb->singlesample_mt;
1456
1457 if (last_mt && last_mt->bo == buffer->bo)
1458 return;
1459
1460 intel_update_winsys_renderbuffer_miptree(intel, rb, buffer->bo,
1461 buffer->width, buffer->height,
1462 buffer->pitch);
1463
1464 if (brw_is_front_buffer_drawing(fb) &&
1465 buffer_type == __DRI_IMAGE_BUFFER_FRONT &&
1466 rb->Base.Base.NumSamples > 1) {
1467 intel_renderbuffer_upsample(intel, rb);
1468 }
1469 }
1470
1471 static void
1472 intel_update_image_buffers(struct brw_context *brw, __DRIdrawable *drawable)
1473 {
1474 struct gl_framebuffer *fb = drawable->driverPrivate;
1475 __DRIscreen *screen = brw->intelScreen->driScrnPriv;
1476 struct intel_renderbuffer *front_rb;
1477 struct intel_renderbuffer *back_rb;
1478 struct __DRIimageList images;
1479 unsigned int format;
1480 uint32_t buffer_mask = 0;
1481
1482 front_rb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT);
1483 back_rb = intel_get_renderbuffer(fb, BUFFER_BACK_LEFT);
1484
1485 if (back_rb)
1486 format = intel_rb_format(back_rb);
1487 else if (front_rb)
1488 format = intel_rb_format(front_rb);
1489 else
1490 return;
1491
1492 if (front_rb && (brw_is_front_buffer_drawing(fb) ||
1493 brw_is_front_buffer_reading(fb) || !back_rb)) {
1494 buffer_mask |= __DRI_IMAGE_BUFFER_FRONT;
1495 }
1496
1497 if (back_rb)
1498 buffer_mask |= __DRI_IMAGE_BUFFER_BACK;
1499
1500 (*screen->image.loader->getBuffers) (drawable,
1501 driGLFormatToImageFormat(format),
1502 &drawable->dri2.stamp,
1503 drawable->loaderPrivate,
1504 buffer_mask,
1505 &images);
1506
1507 if (images.image_mask & __DRI_IMAGE_BUFFER_FRONT) {
1508 drawable->w = images.front->width;
1509 drawable->h = images.front->height;
1510 intel_update_image_buffer(brw,
1511 drawable,
1512 front_rb,
1513 images.front,
1514 __DRI_IMAGE_BUFFER_FRONT);
1515 }
1516 if (images.image_mask & __DRI_IMAGE_BUFFER_BACK) {
1517 drawable->w = images.back->width;
1518 drawable->h = images.back->height;
1519 intel_update_image_buffer(brw,
1520 drawable,
1521 back_rb,
1522 images.back,
1523 __DRI_IMAGE_BUFFER_BACK);
1524 }
1525 }