i965/miptree: Set refcount before failing via _release()
[mesa.git] / src / mesa / drivers / dri / i965 / brw_context.c
1 /*
2 Copyright 2003 VMware, Inc.
3 Copyright (C) Intel Corp. 2006. All Rights Reserved.
4 Intel funded Tungsten Graphics to
5 develop this 3D driver.
6
7 Permission is hereby granted, free of charge, to any person obtaining
8 a copy of this software and associated documentation files (the
9 "Software"), to deal in the Software without restriction, including
10 without limitation the rights to use, copy, modify, merge, publish,
11 distribute, sublicense, and/or sell copies of the Software, and to
12 permit persons to whom the Software is furnished to do so, subject to
13 the following conditions:
14
15 The above copyright notice and this permission notice (including the
16 next paragraph) shall be included in all copies or substantial
17 portions of the Software.
18
19 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
20 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
22 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
23 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
24 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
25 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26
27 **********************************************************************/
28 /*
29 * Authors:
30 * Keith Whitwell <keithw@vmware.com>
31 */
32
33
34 #include "compiler/nir/nir.h"
35 #include "main/api_exec.h"
36 #include "main/context.h"
37 #include "main/fbobject.h"
38 #include "main/extensions.h"
39 #include "main/imports.h"
40 #include "main/macros.h"
41 #include "main/points.h"
42 #include "main/version.h"
43 #include "main/vtxfmt.h"
44 #include "main/texobj.h"
45 #include "main/framebuffer.h"
46 #include "main/stencil.h"
47 #include "main/state.h"
48
49 #include "vbo/vbo_context.h"
50
51 #include "drivers/common/driverfuncs.h"
52 #include "drivers/common/meta.h"
53 #include "utils.h"
54
55 #include "brw_context.h"
56 #include "brw_defines.h"
57 #include "brw_blorp.h"
58 #include "brw_draw.h"
59 #include "brw_state.h"
60
61 #include "intel_batchbuffer.h"
62 #include "intel_buffer_objects.h"
63 #include "intel_buffers.h"
64 #include "intel_fbo.h"
65 #include "intel_mipmap_tree.h"
66 #include "intel_pixel.h"
67 #include "intel_image.h"
68 #include "intel_tex.h"
69 #include "intel_tex_obj.h"
70
71 #include "swrast_setup/swrast_setup.h"
72 #include "tnl/tnl.h"
73 #include "tnl/t_pipeline.h"
74 #include "util/ralloc.h"
75 #include "util/debug.h"
76 #include "isl/isl.h"
77
78 /***************************************
79 * Mesa's Driver Functions
80 ***************************************/
81
82 const char *const brw_vendor_string = "Intel Open Source Technology Center";
83
84 static const char *
85 get_bsw_model(const struct intel_screen *screen)
86 {
87 switch (screen->eu_total) {
88 case 16:
89 return "405";
90 case 12:
91 return "400";
92 default:
93 return " ";
94 }
95 }
96
97 const char *
98 brw_get_renderer_string(const struct intel_screen *screen)
99 {
100 const char *chipset;
101 static char buffer[128];
102 char *bsw = NULL;
103
104 switch (screen->deviceID) {
105 #undef CHIPSET
106 #define CHIPSET(id, symbol, str) case id: chipset = str; break;
107 #include "pci_ids/i965_pci_ids.h"
108 default:
109 chipset = "Unknown Intel Chipset";
110 break;
111 }
112
113 /* Braswell branding is funny, so we have to fix it up here */
114 if (screen->deviceID == 0x22B1) {
115 bsw = strdup(chipset);
116 char *needle = strstr(bsw, "XXX");
117 if (needle) {
118 memcpy(needle, get_bsw_model(screen), 3);
119 chipset = bsw;
120 }
121 }
122
123 (void) driGetRendererString(buffer, chipset, 0);
124 free(bsw);
125 return buffer;
126 }
127
128 static const GLubyte *
129 intel_get_string(struct gl_context * ctx, GLenum name)
130 {
131 const struct brw_context *const brw = brw_context(ctx);
132
133 switch (name) {
134 case GL_VENDOR:
135 return (GLubyte *) brw_vendor_string;
136
137 case GL_RENDERER:
138 return
139 (GLubyte *) brw_get_renderer_string(brw->screen);
140
141 default:
142 return NULL;
143 }
144 }
145
146 static void
147 intel_viewport(struct gl_context *ctx)
148 {
149 struct brw_context *brw = brw_context(ctx);
150 __DRIcontext *driContext = brw->driContext;
151
152 if (_mesa_is_winsys_fbo(ctx->DrawBuffer)) {
153 if (driContext->driDrawablePriv)
154 dri2InvalidateDrawable(driContext->driDrawablePriv);
155 if (driContext->driReadablePriv)
156 dri2InvalidateDrawable(driContext->driReadablePriv);
157 }
158 }
159
160 static void
161 intel_update_framebuffer(struct gl_context *ctx,
162 struct gl_framebuffer *fb)
163 {
164 struct brw_context *brw = brw_context(ctx);
165
166 /* Quantize the derived default number of samples
167 */
168 fb->DefaultGeometry._NumSamples =
169 intel_quantize_num_samples(brw->screen,
170 fb->DefaultGeometry.NumSamples);
171 }
172
173 static void
174 intel_update_state(struct gl_context * ctx)
175 {
176 GLuint new_state = ctx->NewState;
177 struct brw_context *brw = brw_context(ctx);
178
179 if (ctx->swrast_context)
180 _swrast_InvalidateState(ctx, new_state);
181
182 brw->NewGLState |= new_state;
183
184 if (new_state & (_NEW_SCISSOR | _NEW_BUFFERS | _NEW_VIEWPORT))
185 _mesa_update_draw_buffer_bounds(ctx, ctx->DrawBuffer);
186
187 if (new_state & (_NEW_STENCIL | _NEW_BUFFERS)) {
188 brw->stencil_enabled = _mesa_stencil_is_enabled(ctx);
189 brw->stencil_two_sided = _mesa_stencil_is_two_sided(ctx);
190 brw->stencil_write_enabled =
191 _mesa_stencil_is_write_enabled(ctx, brw->stencil_two_sided);
192 }
193
194 if (new_state & _NEW_POLYGON)
195 brw->polygon_front_bit = _mesa_polygon_get_front_bit(ctx);
196
197 intel_prepare_render(brw);
198
199 if (new_state & _NEW_BUFFERS) {
200 intel_update_framebuffer(ctx, ctx->DrawBuffer);
201 if (ctx->DrawBuffer != ctx->ReadBuffer)
202 intel_update_framebuffer(ctx, ctx->ReadBuffer);
203 }
204 }
205
206 #define flushFront(screen) ((screen)->image.loader ? (screen)->image.loader->flushFrontBuffer : (screen)->dri2.loader->flushFrontBuffer)
207
208 static void
209 intel_flush_front(struct gl_context *ctx)
210 {
211 struct brw_context *brw = brw_context(ctx);
212 __DRIcontext *driContext = brw->driContext;
213 __DRIdrawable *driDrawable = driContext->driDrawablePriv;
214 __DRIscreen *const dri_screen = brw->screen->driScrnPriv;
215
216 if (brw->front_buffer_dirty && _mesa_is_winsys_fbo(ctx->DrawBuffer)) {
217 if (flushFront(dri_screen) && driDrawable &&
218 driDrawable->loaderPrivate) {
219
220 /* Resolve before flushing FAKE_FRONT_LEFT to FRONT_LEFT.
221 *
222 * This potentially resolves both front and back buffer. It
223 * is unnecessary to resolve the back, but harms nothing except
224 * performance. And no one cares about front-buffer render
225 * performance.
226 */
227 intel_resolve_for_dri2_flush(brw, driDrawable);
228 intel_batchbuffer_flush(brw);
229
230 flushFront(dri_screen)(driDrawable, driDrawable->loaderPrivate);
231
232 /* We set the dirty bit in intel_prepare_render() if we're
233 * front buffer rendering once we get there.
234 */
235 brw->front_buffer_dirty = false;
236 }
237 }
238 }
239
240 static void
241 intel_glFlush(struct gl_context *ctx)
242 {
243 struct brw_context *brw = brw_context(ctx);
244
245 intel_batchbuffer_flush(brw);
246 intel_flush_front(ctx);
247
248 brw->need_flush_throttle = true;
249 }
250
251 static void
252 intel_finish(struct gl_context * ctx)
253 {
254 struct brw_context *brw = brw_context(ctx);
255
256 intel_glFlush(ctx);
257
258 if (brw->batch.last_bo)
259 brw_bo_wait_rendering(brw->batch.last_bo);
260 }
261
262 static void
263 brw_init_driver_functions(struct brw_context *brw,
264 struct dd_function_table *functions)
265 {
266 _mesa_init_driver_functions(functions);
267
268 /* GLX uses DRI2 invalidate events to handle window resizing.
269 * Unfortunately, EGL does not - libEGL is written in XCB (not Xlib),
270 * which doesn't provide a mechanism for snooping the event queues.
271 *
272 * So EGL still relies on viewport hacks to handle window resizing.
273 * This should go away with DRI3000.
274 */
275 if (!brw->driContext->driScreenPriv->dri2.useInvalidate)
276 functions->Viewport = intel_viewport;
277
278 functions->Flush = intel_glFlush;
279 functions->Finish = intel_finish;
280 functions->GetString = intel_get_string;
281 functions->UpdateState = intel_update_state;
282
283 intelInitTextureFuncs(functions);
284 intelInitTextureImageFuncs(functions);
285 intelInitTextureSubImageFuncs(functions);
286 intelInitTextureCopyImageFuncs(functions);
287 intelInitCopyImageFuncs(functions);
288 intelInitClearFuncs(functions);
289 intelInitBufferFuncs(functions);
290 intelInitPixelFuncs(functions);
291 intelInitBufferObjectFuncs(functions);
292 brw_init_syncobj_functions(functions);
293 brw_init_object_purgeable_functions(functions);
294
295 brwInitFragProgFuncs( functions );
296 brw_init_common_queryobj_functions(functions);
297 if (brw->gen >= 8 || brw->is_haswell)
298 hsw_init_queryobj_functions(functions);
299 else if (brw->gen >= 6)
300 gen6_init_queryobj_functions(functions);
301 else
302 gen4_init_queryobj_functions(functions);
303 brw_init_compute_functions(functions);
304 brw_init_conditional_render_functions(functions);
305
306 functions->QueryInternalFormat = brw_query_internal_format;
307
308 functions->NewTransformFeedback = brw_new_transform_feedback;
309 functions->DeleteTransformFeedback = brw_delete_transform_feedback;
310 if (can_do_mi_math_and_lrr(brw->screen)) {
311 functions->BeginTransformFeedback = hsw_begin_transform_feedback;
312 functions->EndTransformFeedback = hsw_end_transform_feedback;
313 functions->PauseTransformFeedback = hsw_pause_transform_feedback;
314 functions->ResumeTransformFeedback = hsw_resume_transform_feedback;
315 } else if (brw->gen >= 7) {
316 functions->BeginTransformFeedback = gen7_begin_transform_feedback;
317 functions->EndTransformFeedback = gen7_end_transform_feedback;
318 functions->PauseTransformFeedback = gen7_pause_transform_feedback;
319 functions->ResumeTransformFeedback = gen7_resume_transform_feedback;
320 functions->GetTransformFeedbackVertexCount =
321 brw_get_transform_feedback_vertex_count;
322 } else {
323 functions->BeginTransformFeedback = brw_begin_transform_feedback;
324 functions->EndTransformFeedback = brw_end_transform_feedback;
325 functions->PauseTransformFeedback = brw_pause_transform_feedback;
326 functions->ResumeTransformFeedback = brw_resume_transform_feedback;
327 functions->GetTransformFeedbackVertexCount =
328 brw_get_transform_feedback_vertex_count;
329 }
330
331 if (brw->gen >= 6)
332 functions->GetSamplePosition = gen6_get_sample_position;
333 }
334
335 static void
336 brw_initialize_context_constants(struct brw_context *brw)
337 {
338 struct gl_context *ctx = &brw->ctx;
339 const struct brw_compiler *compiler = brw->screen->compiler;
340
341 const bool stage_exists[MESA_SHADER_STAGES] = {
342 [MESA_SHADER_VERTEX] = true,
343 [MESA_SHADER_TESS_CTRL] = brw->gen >= 7,
344 [MESA_SHADER_TESS_EVAL] = brw->gen >= 7,
345 [MESA_SHADER_GEOMETRY] = brw->gen >= 6,
346 [MESA_SHADER_FRAGMENT] = true,
347 [MESA_SHADER_COMPUTE] =
348 ((ctx->API == API_OPENGL_COMPAT || ctx->API == API_OPENGL_CORE) &&
349 ctx->Const.MaxComputeWorkGroupSize[0] >= 1024) ||
350 (ctx->API == API_OPENGLES2 &&
351 ctx->Const.MaxComputeWorkGroupSize[0] >= 128) ||
352 _mesa_extension_override_enables.ARB_compute_shader,
353 };
354
355 unsigned num_stages = 0;
356 for (int i = 0; i < MESA_SHADER_STAGES; i++) {
357 if (stage_exists[i])
358 num_stages++;
359 }
360
361 unsigned max_samplers =
362 brw->gen >= 8 || brw->is_haswell ? BRW_MAX_TEX_UNIT : 16;
363
364 ctx->Const.MaxDualSourceDrawBuffers = 1;
365 ctx->Const.MaxDrawBuffers = BRW_MAX_DRAW_BUFFERS;
366 ctx->Const.MaxCombinedShaderOutputResources =
367 MAX_IMAGE_UNITS + BRW_MAX_DRAW_BUFFERS;
368
369 /* The timestamp register we can read for glGetTimestamp() is
370 * sometimes only 32 bits, before scaling to nanoseconds (depending
371 * on kernel).
372 *
373 * Once scaled to nanoseconds the timestamp would roll over at a
374 * non-power-of-two, so an application couldn't use
375 * GL_QUERY_COUNTER_BITS to handle rollover correctly. Instead, we
376 * report 36 bits and truncate at that (rolling over 5 times as
377 * often as the HW counter), and when the 32-bit counter rolls
378 * over, it happens to also be at a rollover in the reported value
379 * from near (1<<36) to 0.
380 *
381 * The low 32 bits rolls over in ~343 seconds. Our 36-bit result
382 * rolls over every ~69 seconds.
383 */
384 ctx->Const.QueryCounterBits.Timestamp = 36;
385
386 ctx->Const.MaxTextureCoordUnits = 8; /* Mesa limit */
387 ctx->Const.MaxImageUnits = MAX_IMAGE_UNITS;
388 if (brw->gen >= 7) {
389 ctx->Const.MaxRenderbufferSize = 16384;
390 ctx->Const.MaxTextureLevels = MIN2(15 /* 16384 */, MAX_TEXTURE_LEVELS);
391 ctx->Const.MaxCubeTextureLevels = 15; /* 16384 */
392 } else {
393 ctx->Const.MaxRenderbufferSize = 8192;
394 ctx->Const.MaxTextureLevels = MIN2(14 /* 8192 */, MAX_TEXTURE_LEVELS);
395 ctx->Const.MaxCubeTextureLevels = 14; /* 8192 */
396 }
397 ctx->Const.Max3DTextureLevels = 12; /* 2048 */
398 ctx->Const.MaxArrayTextureLayers = brw->gen >= 7 ? 2048 : 512;
399 ctx->Const.MaxTextureMbytes = 1536;
400 ctx->Const.MaxTextureRectSize = brw->gen >= 7 ? 16384 : 8192;
401 ctx->Const.MaxTextureMaxAnisotropy = 16.0;
402 ctx->Const.MaxTextureLodBias = 15.0;
403 ctx->Const.StripTextureBorder = true;
404 if (brw->gen >= 7) {
405 ctx->Const.MaxProgramTextureGatherComponents = 4;
406 ctx->Const.MinProgramTextureGatherOffset = -32;
407 ctx->Const.MaxProgramTextureGatherOffset = 31;
408 } else if (brw->gen == 6) {
409 ctx->Const.MaxProgramTextureGatherComponents = 1;
410 ctx->Const.MinProgramTextureGatherOffset = -8;
411 ctx->Const.MaxProgramTextureGatherOffset = 7;
412 }
413
414 ctx->Const.MaxUniformBlockSize = 65536;
415
416 for (int i = 0; i < MESA_SHADER_STAGES; i++) {
417 struct gl_program_constants *prog = &ctx->Const.Program[i];
418
419 if (!stage_exists[i])
420 continue;
421
422 prog->MaxTextureImageUnits = max_samplers;
423
424 prog->MaxUniformBlocks = BRW_MAX_UBO;
425 prog->MaxCombinedUniformComponents =
426 prog->MaxUniformComponents +
427 ctx->Const.MaxUniformBlockSize / 4 * prog->MaxUniformBlocks;
428
429 prog->MaxAtomicCounters = MAX_ATOMIC_COUNTERS;
430 prog->MaxAtomicBuffers = BRW_MAX_ABO;
431 prog->MaxImageUniforms = compiler->scalar_stage[i] ? BRW_MAX_IMAGES : 0;
432 prog->MaxShaderStorageBlocks = BRW_MAX_SSBO;
433 }
434
435 ctx->Const.MaxTextureUnits =
436 MIN2(ctx->Const.MaxTextureCoordUnits,
437 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxTextureImageUnits);
438
439 ctx->Const.MaxUniformBufferBindings = num_stages * BRW_MAX_UBO;
440 ctx->Const.MaxCombinedUniformBlocks = num_stages * BRW_MAX_UBO;
441 ctx->Const.MaxCombinedAtomicBuffers = num_stages * BRW_MAX_ABO;
442 ctx->Const.MaxCombinedShaderStorageBlocks = num_stages * BRW_MAX_SSBO;
443 ctx->Const.MaxShaderStorageBufferBindings = num_stages * BRW_MAX_SSBO;
444 ctx->Const.MaxCombinedTextureImageUnits = num_stages * max_samplers;
445 ctx->Const.MaxCombinedImageUniforms = num_stages * BRW_MAX_IMAGES;
446
447
448 /* Hardware only supports a limited number of transform feedback buffers.
449 * So we need to override the Mesa default (which is based only on software
450 * limits).
451 */
452 ctx->Const.MaxTransformFeedbackBuffers = BRW_MAX_SOL_BUFFERS;
453
454 /* On Gen6, in the worst case, we use up one binding table entry per
455 * transform feedback component (see comments above the definition of
456 * BRW_MAX_SOL_BINDINGS, in brw_context.h), so we need to advertise a value
457 * for MAX_TRANSFORM_FEEDBACK_INTERLEAVED_COMPONENTS equal to
458 * BRW_MAX_SOL_BINDINGS.
459 *
460 * In "separate components" mode, we need to divide this value by
461 * BRW_MAX_SOL_BUFFERS, so that the total number of binding table entries
462 * used up by all buffers will not exceed BRW_MAX_SOL_BINDINGS.
463 */
464 ctx->Const.MaxTransformFeedbackInterleavedComponents = BRW_MAX_SOL_BINDINGS;
465 ctx->Const.MaxTransformFeedbackSeparateComponents =
466 BRW_MAX_SOL_BINDINGS / BRW_MAX_SOL_BUFFERS;
467
468 ctx->Const.AlwaysUseGetTransformFeedbackVertexCount =
469 !can_do_mi_math_and_lrr(brw->screen);
470
471 int max_samples;
472 const int *msaa_modes = intel_supported_msaa_modes(brw->screen);
473 const int clamp_max_samples =
474 driQueryOptioni(&brw->optionCache, "clamp_max_samples");
475
476 if (clamp_max_samples < 0) {
477 max_samples = msaa_modes[0];
478 } else {
479 /* Select the largest supported MSAA mode that does not exceed
480 * clamp_max_samples.
481 */
482 max_samples = 0;
483 for (int i = 0; msaa_modes[i] != 0; ++i) {
484 if (msaa_modes[i] <= clamp_max_samples) {
485 max_samples = msaa_modes[i];
486 break;
487 }
488 }
489 }
490
491 ctx->Const.MaxSamples = max_samples;
492 ctx->Const.MaxColorTextureSamples = max_samples;
493 ctx->Const.MaxDepthTextureSamples = max_samples;
494 ctx->Const.MaxIntegerSamples = max_samples;
495 ctx->Const.MaxImageSamples = 0;
496
497 /* gen6_set_sample_maps() sets SampleMap{2,4,8}x variables which are used
498 * to map indices of rectangular grid to sample numbers within a pixel.
499 * These variables are used by GL_EXT_framebuffer_multisample_blit_scaled
500 * extension implementation. For more details see the comment above
501 * gen6_set_sample_maps() definition.
502 */
503 gen6_set_sample_maps(ctx);
504
505 ctx->Const.MinLineWidth = 1.0;
506 ctx->Const.MinLineWidthAA = 1.0;
507 if (brw->gen >= 6) {
508 ctx->Const.MaxLineWidth = 7.375;
509 ctx->Const.MaxLineWidthAA = 7.375;
510 ctx->Const.LineWidthGranularity = 0.125;
511 } else {
512 ctx->Const.MaxLineWidth = 7.0;
513 ctx->Const.MaxLineWidthAA = 7.0;
514 ctx->Const.LineWidthGranularity = 0.5;
515 }
516
517 /* For non-antialiased lines, we have to round the line width to the
518 * nearest whole number. Make sure that we don't advertise a line
519 * width that, when rounded, will be beyond the actual hardware
520 * maximum.
521 */
522 assert(roundf(ctx->Const.MaxLineWidth) <= ctx->Const.MaxLineWidth);
523
524 ctx->Const.MinPointSize = 1.0;
525 ctx->Const.MinPointSizeAA = 1.0;
526 ctx->Const.MaxPointSize = 255.0;
527 ctx->Const.MaxPointSizeAA = 255.0;
528 ctx->Const.PointSizeGranularity = 1.0;
529
530 if (brw->gen >= 5 || brw->is_g4x)
531 ctx->Const.MaxClipPlanes = 8;
532
533 ctx->Const.GLSLTessLevelsAsInputs = true;
534 ctx->Const.LowerTCSPatchVerticesIn = brw->gen >= 8;
535 ctx->Const.LowerTESPatchVerticesIn = true;
536 ctx->Const.PrimitiveRestartForPatches = true;
537
538 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeInstructions = 16 * 1024;
539 ctx->Const.Program[MESA_SHADER_VERTEX].MaxAluInstructions = 0;
540 ctx->Const.Program[MESA_SHADER_VERTEX].MaxTexInstructions = 0;
541 ctx->Const.Program[MESA_SHADER_VERTEX].MaxTexIndirections = 0;
542 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeAluInstructions = 0;
543 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeTexInstructions = 0;
544 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeTexIndirections = 0;
545 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeAttribs = 16;
546 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeTemps = 256;
547 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeAddressRegs = 1;
548 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeParameters = 1024;
549 ctx->Const.Program[MESA_SHADER_VERTEX].MaxEnvParams =
550 MIN2(ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeParameters,
551 ctx->Const.Program[MESA_SHADER_VERTEX].MaxEnvParams);
552
553 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeInstructions = 1024;
554 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeAluInstructions = 1024;
555 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeTexInstructions = 1024;
556 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeTexIndirections = 1024;
557 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeAttribs = 12;
558 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeTemps = 256;
559 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeAddressRegs = 0;
560 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeParameters = 1024;
561 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxEnvParams =
562 MIN2(ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeParameters,
563 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxEnvParams);
564
565 /* Fragment shaders use real, 32-bit twos-complement integers for all
566 * integer types.
567 */
568 ctx->Const.Program[MESA_SHADER_FRAGMENT].LowInt.RangeMin = 31;
569 ctx->Const.Program[MESA_SHADER_FRAGMENT].LowInt.RangeMax = 30;
570 ctx->Const.Program[MESA_SHADER_FRAGMENT].LowInt.Precision = 0;
571 ctx->Const.Program[MESA_SHADER_FRAGMENT].HighInt = ctx->Const.Program[MESA_SHADER_FRAGMENT].LowInt;
572 ctx->Const.Program[MESA_SHADER_FRAGMENT].MediumInt = ctx->Const.Program[MESA_SHADER_FRAGMENT].LowInt;
573
574 ctx->Const.Program[MESA_SHADER_VERTEX].LowInt.RangeMin = 31;
575 ctx->Const.Program[MESA_SHADER_VERTEX].LowInt.RangeMax = 30;
576 ctx->Const.Program[MESA_SHADER_VERTEX].LowInt.Precision = 0;
577 ctx->Const.Program[MESA_SHADER_VERTEX].HighInt = ctx->Const.Program[MESA_SHADER_VERTEX].LowInt;
578 ctx->Const.Program[MESA_SHADER_VERTEX].MediumInt = ctx->Const.Program[MESA_SHADER_VERTEX].LowInt;
579
580 /* Gen6 converts quads to polygon in beginning of 3D pipeline,
581 * but we're not sure how it's actually done for vertex order,
582 * that affect provoking vertex decision. Always use last vertex
583 * convention for quad primitive which works as expected for now.
584 */
585 if (brw->gen >= 6)
586 ctx->Const.QuadsFollowProvokingVertexConvention = false;
587
588 ctx->Const.NativeIntegers = true;
589 ctx->Const.VertexID_is_zero_based = true;
590
591 /* Regarding the CMP instruction, the Ivybridge PRM says:
592 *
593 * "For each enabled channel 0b or 1b is assigned to the appropriate flag
594 * bit and 0/all zeros or all ones (e.g, byte 0xFF, word 0xFFFF, DWord
595 * 0xFFFFFFFF) is assigned to dst."
596 *
597 * but PRMs for earlier generations say
598 *
599 * "In dword format, one GRF may store up to 8 results. When the register
600 * is used later as a vector of Booleans, as only LSB at each channel
601 * contains meaning [sic] data, software should make sure all higher bits
602 * are masked out (e.g. by 'and-ing' an [sic] 0x01 constant)."
603 *
604 * We select the representation of a true boolean uniform to be ~0, and fix
605 * the results of Gen <= 5 CMP instruction's with -(result & 1).
606 */
607 ctx->Const.UniformBooleanTrue = ~0;
608
609 /* From the gen4 PRM, volume 4 page 127:
610 *
611 * "For SURFTYPE_BUFFER non-rendertarget surfaces, this field specifies
612 * the base address of the first element of the surface, computed in
613 * software by adding the surface base address to the byte offset of
614 * the element in the buffer."
615 *
616 * However, unaligned accesses are slower, so enforce buffer alignment.
617 *
618 * In order to push UBO data, 3DSTATE_CONSTANT_XS imposes an additional
619 * restriction: the start of the buffer needs to be 32B aligned.
620 */
621 ctx->Const.UniformBufferOffsetAlignment = 32;
622
623 /* ShaderStorageBufferOffsetAlignment should be a cacheline (64 bytes) so
624 * that we can safely have the CPU and GPU writing the same SSBO on
625 * non-cachecoherent systems (our Atom CPUs). With UBOs, the GPU never
626 * writes, so there's no problem. For an SSBO, the GPU and the CPU can
627 * be updating disjoint regions of the buffer simultaneously and that will
628 * break if the regions overlap the same cacheline.
629 */
630 ctx->Const.ShaderStorageBufferOffsetAlignment = 64;
631 ctx->Const.TextureBufferOffsetAlignment = 16;
632 ctx->Const.MaxTextureBufferSize = 128 * 1024 * 1024;
633
634 if (brw->gen >= 6) {
635 ctx->Const.MaxVarying = 32;
636 ctx->Const.Program[MESA_SHADER_VERTEX].MaxOutputComponents = 128;
637 ctx->Const.Program[MESA_SHADER_GEOMETRY].MaxInputComponents = 64;
638 ctx->Const.Program[MESA_SHADER_GEOMETRY].MaxOutputComponents = 128;
639 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxInputComponents = 128;
640 ctx->Const.Program[MESA_SHADER_TESS_CTRL].MaxInputComponents = 128;
641 ctx->Const.Program[MESA_SHADER_TESS_CTRL].MaxOutputComponents = 128;
642 ctx->Const.Program[MESA_SHADER_TESS_EVAL].MaxInputComponents = 128;
643 ctx->Const.Program[MESA_SHADER_TESS_EVAL].MaxOutputComponents = 128;
644 }
645
646 /* We want the GLSL compiler to emit code that uses condition codes */
647 for (int i = 0; i < MESA_SHADER_STAGES; i++) {
648 ctx->Const.ShaderCompilerOptions[i] =
649 brw->screen->compiler->glsl_compiler_options[i];
650 }
651
652 if (brw->gen >= 7) {
653 ctx->Const.MaxViewportWidth = 32768;
654 ctx->Const.MaxViewportHeight = 32768;
655 }
656
657 /* ARB_viewport_array, OES_viewport_array */
658 if (brw->gen >= 6) {
659 ctx->Const.MaxViewports = GEN6_NUM_VIEWPORTS;
660 ctx->Const.ViewportSubpixelBits = 0;
661
662 /* Cast to float before negating because MaxViewportWidth is unsigned.
663 */
664 ctx->Const.ViewportBounds.Min = -(float)ctx->Const.MaxViewportWidth;
665 ctx->Const.ViewportBounds.Max = ctx->Const.MaxViewportWidth;
666 }
667
668 /* ARB_gpu_shader5 */
669 if (brw->gen >= 7)
670 ctx->Const.MaxVertexStreams = MIN2(4, MAX_VERTEX_STREAMS);
671
672 /* ARB_framebuffer_no_attachments */
673 ctx->Const.MaxFramebufferWidth = 16384;
674 ctx->Const.MaxFramebufferHeight = 16384;
675 ctx->Const.MaxFramebufferLayers = ctx->Const.MaxArrayTextureLayers;
676 ctx->Const.MaxFramebufferSamples = max_samples;
677
678 /* OES_primitive_bounding_box */
679 ctx->Const.NoPrimitiveBoundingBoxOutput = true;
680 }
681
682 static void
683 brw_initialize_cs_context_constants(struct brw_context *brw)
684 {
685 struct gl_context *ctx = &brw->ctx;
686 const struct intel_screen *screen = brw->screen;
687 struct gen_device_info *devinfo = &brw->screen->devinfo;
688
689 /* FINISHME: Do this for all platforms that the kernel supports */
690 if (brw->is_cherryview &&
691 screen->subslice_total > 0 && screen->eu_total > 0) {
692 /* Logical CS threads = EUs per subslice * 7 threads per EU */
693 uint32_t max_cs_threads = screen->eu_total / screen->subslice_total * 7;
694
695 /* Fuse configurations may give more threads than expected, never less. */
696 if (max_cs_threads > devinfo->max_cs_threads)
697 devinfo->max_cs_threads = max_cs_threads;
698 }
699
700 /* Maximum number of scalar compute shader invocations that can be run in
701 * parallel in the same subslice assuming SIMD32 dispatch.
702 *
703 * We don't advertise more than 64 threads, because we are limited to 64 by
704 * our usage of thread_width_max in the gpgpu walker command. This only
705 * currently impacts Haswell, which otherwise might be able to advertise 70
706 * threads. With SIMD32 and 64 threads, Haswell still provides twice the
707 * required the number of invocation needed for ARB_compute_shader.
708 */
709 const unsigned max_threads = MIN2(64, devinfo->max_cs_threads);
710 const uint32_t max_invocations = 32 * max_threads;
711 ctx->Const.MaxComputeWorkGroupSize[0] = max_invocations;
712 ctx->Const.MaxComputeWorkGroupSize[1] = max_invocations;
713 ctx->Const.MaxComputeWorkGroupSize[2] = max_invocations;
714 ctx->Const.MaxComputeWorkGroupInvocations = max_invocations;
715 ctx->Const.MaxComputeSharedMemorySize = 64 * 1024;
716 }
717
718 /**
719 * Process driconf (drirc) options, setting appropriate context flags.
720 *
721 * intelInitExtensions still pokes at optionCache directly, in order to
722 * avoid advertising various extensions. No flags are set, so it makes
723 * sense to continue doing that there.
724 */
725 static void
726 brw_process_driconf_options(struct brw_context *brw)
727 {
728 struct gl_context *ctx = &brw->ctx;
729
730 driOptionCache *options = &brw->optionCache;
731 driParseConfigFiles(options, &brw->screen->optionCache,
732 brw->driContext->driScreenPriv->myNum, "i965");
733
734 int bo_reuse_mode = driQueryOptioni(options, "bo_reuse");
735 switch (bo_reuse_mode) {
736 case DRI_CONF_BO_REUSE_DISABLED:
737 break;
738 case DRI_CONF_BO_REUSE_ALL:
739 brw_bufmgr_enable_reuse(brw->bufmgr);
740 break;
741 }
742
743 if (INTEL_DEBUG & DEBUG_NO_HIZ) {
744 brw->has_hiz = false;
745 /* On gen6, you can only do separate stencil with HIZ. */
746 if (brw->gen == 6)
747 brw->has_separate_stencil = false;
748 }
749
750 if (driQueryOptionb(options, "always_flush_batch")) {
751 fprintf(stderr, "flushing batchbuffer before/after each draw call\n");
752 brw->always_flush_batch = true;
753 }
754
755 if (driQueryOptionb(options, "always_flush_cache")) {
756 fprintf(stderr, "flushing GPU caches before/after each draw call\n");
757 brw->always_flush_cache = true;
758 }
759
760 if (driQueryOptionb(options, "disable_throttling")) {
761 fprintf(stderr, "disabling flush throttling\n");
762 brw->disable_throttling = true;
763 }
764
765 brw->precompile = driQueryOptionb(&brw->optionCache, "shader_precompile");
766
767 if (driQueryOptionb(&brw->optionCache, "precise_trig"))
768 brw->screen->compiler->precise_trig = true;
769
770 ctx->Const.ForceGLSLExtensionsWarn =
771 driQueryOptionb(options, "force_glsl_extensions_warn");
772
773 ctx->Const.ForceGLSLVersion =
774 driQueryOptioni(options, "force_glsl_version");
775
776 ctx->Const.DisableGLSLLineContinuations =
777 driQueryOptionb(options, "disable_glsl_line_continuations");
778
779 ctx->Const.AllowGLSLExtensionDirectiveMidShader =
780 driQueryOptionb(options, "allow_glsl_extension_directive_midshader");
781
782 ctx->Const.AllowGLSLBuiltinVariableRedeclaration =
783 driQueryOptionb(options, "allow_glsl_builtin_variable_redeclaration");
784
785 ctx->Const.AllowHigherCompatVersion =
786 driQueryOptionb(options, "allow_higher_compat_version");
787
788 ctx->Const.ForceGLSLAbsSqrt =
789 driQueryOptionb(options, "force_glsl_abs_sqrt");
790
791 ctx->Const.GLSLZeroInit = driQueryOptionb(options, "glsl_zero_init");
792
793 brw->dual_color_blend_by_location =
794 driQueryOptionb(options, "dual_color_blend_by_location");
795 }
796
797 GLboolean
798 brwCreateContext(gl_api api,
799 const struct gl_config *mesaVis,
800 __DRIcontext *driContextPriv,
801 unsigned major_version,
802 unsigned minor_version,
803 uint32_t flags,
804 bool notify_reset,
805 unsigned *dri_ctx_error,
806 void *sharedContextPrivate)
807 {
808 struct gl_context *shareCtx = (struct gl_context *) sharedContextPrivate;
809 struct intel_screen *screen = driContextPriv->driScreenPriv->driverPrivate;
810 const struct gen_device_info *devinfo = &screen->devinfo;
811 struct dd_function_table functions;
812
813 /* Only allow the __DRI_CTX_FLAG_ROBUST_BUFFER_ACCESS flag if the kernel
814 * provides us with context reset notifications.
815 */
816 uint32_t allowed_flags = __DRI_CTX_FLAG_DEBUG |
817 __DRI_CTX_FLAG_FORWARD_COMPATIBLE |
818 __DRI_CTX_FLAG_NO_ERROR;
819
820 if (screen->has_context_reset_notification)
821 allowed_flags |= __DRI_CTX_FLAG_ROBUST_BUFFER_ACCESS;
822
823 if (flags & ~allowed_flags) {
824 *dri_ctx_error = __DRI_CTX_ERROR_UNKNOWN_FLAG;
825 return false;
826 }
827
828 struct brw_context *brw = rzalloc(NULL, struct brw_context);
829 if (!brw) {
830 fprintf(stderr, "%s: failed to alloc context\n", __func__);
831 *dri_ctx_error = __DRI_CTX_ERROR_NO_MEMORY;
832 return false;
833 }
834
835 driContextPriv->driverPrivate = brw;
836 brw->driContext = driContextPriv;
837 brw->screen = screen;
838 brw->bufmgr = screen->bufmgr;
839
840 brw->gen = devinfo->gen;
841 brw->gt = devinfo->gt;
842 brw->is_g4x = devinfo->is_g4x;
843 brw->is_baytrail = devinfo->is_baytrail;
844 brw->is_haswell = devinfo->is_haswell;
845 brw->is_cherryview = devinfo->is_cherryview;
846 brw->is_broxton = devinfo->is_broxton || devinfo->is_geminilake;
847 brw->has_llc = devinfo->has_llc;
848 brw->has_hiz = devinfo->has_hiz_and_separate_stencil;
849 brw->has_separate_stencil = devinfo->has_hiz_and_separate_stencil;
850 brw->has_pln = devinfo->has_pln;
851 brw->has_compr4 = devinfo->has_compr4;
852 brw->has_surface_tile_offset = devinfo->has_surface_tile_offset;
853 brw->has_negative_rhw_bug = devinfo->has_negative_rhw_bug;
854 brw->needs_unlit_centroid_workaround =
855 devinfo->needs_unlit_centroid_workaround;
856
857 brw->must_use_separate_stencil = devinfo->must_use_separate_stencil;
858 brw->has_swizzling = screen->hw_has_swizzling;
859
860 brw->isl_dev = screen->isl_dev;
861
862 brw->vs.base.stage = MESA_SHADER_VERTEX;
863 brw->tcs.base.stage = MESA_SHADER_TESS_CTRL;
864 brw->tes.base.stage = MESA_SHADER_TESS_EVAL;
865 brw->gs.base.stage = MESA_SHADER_GEOMETRY;
866 brw->wm.base.stage = MESA_SHADER_FRAGMENT;
867 if (brw->gen >= 8) {
868 gen8_init_vtable_surface_functions(brw);
869 brw->vtbl.emit_depth_stencil_hiz = gen8_emit_depth_stencil_hiz;
870 } else if (brw->gen >= 7) {
871 gen7_init_vtable_surface_functions(brw);
872 brw->vtbl.emit_depth_stencil_hiz = gen7_emit_depth_stencil_hiz;
873 } else if (brw->gen >= 6) {
874 gen6_init_vtable_surface_functions(brw);
875 brw->vtbl.emit_depth_stencil_hiz = gen6_emit_depth_stencil_hiz;
876 } else {
877 gen4_init_vtable_surface_functions(brw);
878 brw->vtbl.emit_depth_stencil_hiz = brw_emit_depth_stencil_hiz;
879 }
880
881 brw_init_driver_functions(brw, &functions);
882
883 if (notify_reset)
884 functions.GetGraphicsResetStatus = brw_get_graphics_reset_status;
885
886 struct gl_context *ctx = &brw->ctx;
887
888 if (!_mesa_initialize_context(ctx, api, mesaVis, shareCtx, &functions)) {
889 *dri_ctx_error = __DRI_CTX_ERROR_NO_MEMORY;
890 fprintf(stderr, "%s: failed to init mesa context\n", __func__);
891 intelDestroyContext(driContextPriv);
892 return false;
893 }
894
895 driContextSetFlags(ctx, flags);
896
897 /* Initialize the software rasterizer and helper modules.
898 *
899 * As of GL 3.1 core, the gen4+ driver doesn't need the swrast context for
900 * software fallbacks (which we have to support on legacy GL to do weird
901 * glDrawPixels(), glBitmap(), and other functions).
902 */
903 if (api != API_OPENGL_CORE && api != API_OPENGLES2) {
904 _swrast_CreateContext(ctx);
905 }
906
907 _vbo_CreateContext(ctx);
908 if (ctx->swrast_context) {
909 _tnl_CreateContext(ctx);
910 TNL_CONTEXT(ctx)->Driver.RunPipeline = _tnl_run_pipeline;
911 _swsetup_CreateContext(ctx);
912
913 /* Configure swrast to match hardware characteristics: */
914 _swrast_allow_pixel_fog(ctx, false);
915 _swrast_allow_vertex_fog(ctx, true);
916 }
917
918 _mesa_meta_init(ctx);
919
920 brw_process_driconf_options(brw);
921
922 if (INTEL_DEBUG & DEBUG_PERF)
923 brw->perf_debug = true;
924
925 brw_initialize_cs_context_constants(brw);
926 brw_initialize_context_constants(brw);
927
928 ctx->Const.ResetStrategy = notify_reset
929 ? GL_LOSE_CONTEXT_ON_RESET_ARB : GL_NO_RESET_NOTIFICATION_ARB;
930
931 /* Reinitialize the context point state. It depends on ctx->Const values. */
932 _mesa_init_point(ctx);
933
934 intel_fbo_init(brw);
935
936 intel_batchbuffer_init(&brw->batch, brw->bufmgr, brw->has_llc);
937
938 if (brw->gen >= 6) {
939 /* Create a new hardware context. Using a hardware context means that
940 * our GPU state will be saved/restored on context switch, allowing us
941 * to assume that the GPU is in the same state we left it in.
942 *
943 * This is required for transform feedback buffer offsets, query objects,
944 * and also allows us to reduce how much state we have to emit.
945 */
946 brw->hw_ctx = brw_create_hw_context(brw->bufmgr);
947
948 if (!brw->hw_ctx) {
949 fprintf(stderr, "Failed to create hardware context.\n");
950 intelDestroyContext(driContextPriv);
951 return false;
952 }
953 }
954
955 if (brw_init_pipe_control(brw, devinfo)) {
956 *dri_ctx_error = __DRI_CTX_ERROR_NO_MEMORY;
957 intelDestroyContext(driContextPriv);
958 return false;
959 }
960
961 brw_init_state(brw);
962
963 intelInitExtensions(ctx);
964
965 brw_init_surface_formats(brw);
966
967 brw_blorp_init(brw);
968
969 brw->urb.size = devinfo->urb.size;
970
971 if (brw->gen == 6)
972 brw->urb.gs_present = false;
973
974 brw->prim_restart.in_progress = false;
975 brw->prim_restart.enable_cut_index = false;
976 brw->gs.enabled = false;
977 brw->clip.viewport_count = 1;
978
979 brw->predicate.state = BRW_PREDICATE_STATE_RENDER;
980
981 brw->max_gtt_map_object_size = screen->max_gtt_map_object_size;
982
983 ctx->VertexProgram._MaintainTnlProgram = true;
984 ctx->FragmentProgram._MaintainTexEnvProgram = true;
985
986 brw_draw_init( brw );
987
988 if ((flags & __DRI_CTX_FLAG_DEBUG) != 0) {
989 /* Turn on some extra GL_ARB_debug_output generation. */
990 brw->perf_debug = true;
991 }
992
993 if ((flags & __DRI_CTX_FLAG_ROBUST_BUFFER_ACCESS) != 0) {
994 ctx->Const.ContextFlags |= GL_CONTEXT_FLAG_ROBUST_ACCESS_BIT_ARB;
995 ctx->Const.RobustAccess = GL_TRUE;
996 }
997
998 if (INTEL_DEBUG & DEBUG_SHADER_TIME)
999 brw_init_shader_time(brw);
1000
1001 _mesa_compute_version(ctx);
1002
1003 _mesa_initialize_dispatch_tables(ctx);
1004 _mesa_initialize_vbo_vtxfmt(ctx);
1005
1006 if (ctx->Extensions.INTEL_performance_query)
1007 brw_init_performance_queries(brw);
1008
1009 vbo_use_buffer_objects(ctx);
1010 vbo_always_unmap_buffers(ctx);
1011
1012 return true;
1013 }
1014
1015 void
1016 intelDestroyContext(__DRIcontext * driContextPriv)
1017 {
1018 struct brw_context *brw =
1019 (struct brw_context *) driContextPriv->driverPrivate;
1020 struct gl_context *ctx = &brw->ctx;
1021
1022 _mesa_meta_free(&brw->ctx);
1023
1024 if (INTEL_DEBUG & DEBUG_SHADER_TIME) {
1025 /* Force a report. */
1026 brw->shader_time.report_time = 0;
1027
1028 brw_collect_and_report_shader_time(brw);
1029 brw_destroy_shader_time(brw);
1030 }
1031
1032 if (brw->gen >= 6)
1033 blorp_finish(&brw->blorp);
1034
1035 brw_destroy_state(brw);
1036 brw_draw_destroy(brw);
1037
1038 brw_bo_unreference(brw->curbe.curbe_bo);
1039 if (brw->vs.base.scratch_bo)
1040 brw_bo_unreference(brw->vs.base.scratch_bo);
1041 if (brw->tcs.base.scratch_bo)
1042 brw_bo_unreference(brw->tcs.base.scratch_bo);
1043 if (brw->tes.base.scratch_bo)
1044 brw_bo_unreference(brw->tes.base.scratch_bo);
1045 if (brw->gs.base.scratch_bo)
1046 brw_bo_unreference(brw->gs.base.scratch_bo);
1047 if (brw->wm.base.scratch_bo)
1048 brw_bo_unreference(brw->wm.base.scratch_bo);
1049
1050 brw_destroy_hw_context(brw->bufmgr, brw->hw_ctx);
1051
1052 if (ctx->swrast_context) {
1053 _swsetup_DestroyContext(&brw->ctx);
1054 _tnl_DestroyContext(&brw->ctx);
1055 }
1056 _vbo_DestroyContext(&brw->ctx);
1057
1058 if (ctx->swrast_context)
1059 _swrast_DestroyContext(&brw->ctx);
1060
1061 brw_fini_pipe_control(brw);
1062 intel_batchbuffer_free(&brw->batch);
1063
1064 brw_bo_unreference(brw->throttle_batch[1]);
1065 brw_bo_unreference(brw->throttle_batch[0]);
1066 brw->throttle_batch[1] = NULL;
1067 brw->throttle_batch[0] = NULL;
1068
1069 driDestroyOptionCache(&brw->optionCache);
1070
1071 /* free the Mesa context */
1072 _mesa_free_context_data(&brw->ctx);
1073
1074 ralloc_free(brw);
1075 driContextPriv->driverPrivate = NULL;
1076 }
1077
1078 GLboolean
1079 intelUnbindContext(__DRIcontext * driContextPriv)
1080 {
1081 /* Unset current context and dispath table */
1082 _mesa_make_current(NULL, NULL, NULL);
1083
1084 return true;
1085 }
1086
1087 /**
1088 * Fixes up the context for GLES23 with our default-to-sRGB-capable behavior
1089 * on window system framebuffers.
1090 *
1091 * Desktop GL is fairly reasonable in its handling of sRGB: You can ask if
1092 * your renderbuffer can do sRGB encode, and you can flip a switch that does
1093 * sRGB encode if the renderbuffer can handle it. You can ask specifically
1094 * for a visual where you're guaranteed to be capable, but it turns out that
1095 * everyone just makes all their ARGB8888 visuals capable and doesn't offer
1096 * incapable ones, because there's no difference between the two in resources
1097 * used. Applications thus get built that accidentally rely on the default
1098 * visual choice being sRGB, so we make ours sRGB capable. Everything sounds
1099 * great...
1100 *
1101 * But for GLES2/3, they decided that it was silly to not turn on sRGB encode
1102 * for sRGB renderbuffers you made with the GL_EXT_texture_sRGB equivalent.
1103 * So they removed the enable knob and made it "if the renderbuffer is sRGB
1104 * capable, do sRGB encode". Then, for your window system renderbuffers, you
1105 * can ask for sRGB visuals and get sRGB encode, or not ask for sRGB visuals
1106 * and get no sRGB encode (assuming that both kinds of visual are available).
1107 * Thus our choice to support sRGB by default on our visuals for desktop would
1108 * result in broken rendering of GLES apps that aren't expecting sRGB encode.
1109 *
1110 * Unfortunately, renderbuffer setup happens before a context is created. So
1111 * in intel_screen.c we always set up sRGB, and here, if you're a GLES2/3
1112 * context (without an sRGB visual, though we don't have sRGB visuals exposed
1113 * yet), we go turn that back off before anyone finds out.
1114 */
1115 static void
1116 intel_gles3_srgb_workaround(struct brw_context *brw,
1117 struct gl_framebuffer *fb)
1118 {
1119 struct gl_context *ctx = &brw->ctx;
1120
1121 if (_mesa_is_desktop_gl(ctx) || !fb->Visual.sRGBCapable)
1122 return;
1123
1124 /* Some day when we support the sRGB capable bit on visuals available for
1125 * GLES, we'll need to respect that and not disable things here.
1126 */
1127 fb->Visual.sRGBCapable = false;
1128 for (int i = 0; i < BUFFER_COUNT; i++) {
1129 struct gl_renderbuffer *rb = fb->Attachment[i].Renderbuffer;
1130 if (rb)
1131 rb->Format = _mesa_get_srgb_format_linear(rb->Format);
1132 }
1133 }
1134
1135 GLboolean
1136 intelMakeCurrent(__DRIcontext * driContextPriv,
1137 __DRIdrawable * driDrawPriv,
1138 __DRIdrawable * driReadPriv)
1139 {
1140 struct brw_context *brw;
1141 GET_CURRENT_CONTEXT(curCtx);
1142
1143 if (driContextPriv)
1144 brw = (struct brw_context *) driContextPriv->driverPrivate;
1145 else
1146 brw = NULL;
1147
1148 /* According to the glXMakeCurrent() man page: "Pending commands to
1149 * the previous context, if any, are flushed before it is released."
1150 * But only flush if we're actually changing contexts.
1151 */
1152 if (brw_context(curCtx) && brw_context(curCtx) != brw) {
1153 _mesa_flush(curCtx);
1154 }
1155
1156 if (driContextPriv) {
1157 struct gl_context *ctx = &brw->ctx;
1158 struct gl_framebuffer *fb, *readFb;
1159
1160 if (driDrawPriv == NULL) {
1161 fb = _mesa_get_incomplete_framebuffer();
1162 } else {
1163 fb = driDrawPriv->driverPrivate;
1164 driContextPriv->dri2.draw_stamp = driDrawPriv->dri2.stamp - 1;
1165 }
1166
1167 if (driReadPriv == NULL) {
1168 readFb = _mesa_get_incomplete_framebuffer();
1169 } else {
1170 readFb = driReadPriv->driverPrivate;
1171 driContextPriv->dri2.read_stamp = driReadPriv->dri2.stamp - 1;
1172 }
1173
1174 /* The sRGB workaround changes the renderbuffer's format. We must change
1175 * the format before the renderbuffer's miptree get's allocated, otherwise
1176 * the formats of the renderbuffer and its miptree will differ.
1177 */
1178 intel_gles3_srgb_workaround(brw, fb);
1179 intel_gles3_srgb_workaround(brw, readFb);
1180
1181 /* If the context viewport hasn't been initialized, force a call out to
1182 * the loader to get buffers so we have a drawable size for the initial
1183 * viewport. */
1184 if (!brw->ctx.ViewportInitialized)
1185 intel_prepare_render(brw);
1186
1187 _mesa_make_current(ctx, fb, readFb);
1188 } else {
1189 _mesa_make_current(NULL, NULL, NULL);
1190 }
1191
1192 return true;
1193 }
1194
1195 void
1196 intel_resolve_for_dri2_flush(struct brw_context *brw,
1197 __DRIdrawable *drawable)
1198 {
1199 if (brw->gen < 6) {
1200 /* MSAA and fast color clear are not supported, so don't waste time
1201 * checking whether a resolve is needed.
1202 */
1203 return;
1204 }
1205
1206 struct gl_framebuffer *fb = drawable->driverPrivate;
1207 struct intel_renderbuffer *rb;
1208
1209 /* Usually, only the back buffer will need to be downsampled. However,
1210 * the front buffer will also need it if the user has rendered into it.
1211 */
1212 static const gl_buffer_index buffers[2] = {
1213 BUFFER_BACK_LEFT,
1214 BUFFER_FRONT_LEFT,
1215 };
1216
1217 for (int i = 0; i < 2; ++i) {
1218 rb = intel_get_renderbuffer(fb, buffers[i]);
1219 if (rb == NULL || rb->mt == NULL)
1220 continue;
1221 if (rb->mt->num_samples <= 1) {
1222 assert(rb->mt_layer == 0 && rb->mt_level == 0 &&
1223 rb->layer_count == 1);
1224 intel_miptree_prepare_access(brw, rb->mt, 0, 1, 0, 1, false, false);
1225 } else {
1226 intel_renderbuffer_downsample(brw, rb);
1227 }
1228 }
1229 }
1230
1231 static unsigned
1232 intel_bits_per_pixel(const struct intel_renderbuffer *rb)
1233 {
1234 return _mesa_get_format_bytes(intel_rb_format(rb)) * 8;
1235 }
1236
1237 static void
1238 intel_query_dri2_buffers(struct brw_context *brw,
1239 __DRIdrawable *drawable,
1240 __DRIbuffer **buffers,
1241 int *count);
1242
1243 static void
1244 intel_process_dri2_buffer(struct brw_context *brw,
1245 __DRIdrawable *drawable,
1246 __DRIbuffer *buffer,
1247 struct intel_renderbuffer *rb,
1248 const char *buffer_name);
1249
1250 static void
1251 intel_update_image_buffers(struct brw_context *brw, __DRIdrawable *drawable);
1252
1253 static void
1254 intel_update_dri2_buffers(struct brw_context *brw, __DRIdrawable *drawable)
1255 {
1256 struct gl_framebuffer *fb = drawable->driverPrivate;
1257 struct intel_renderbuffer *rb;
1258 __DRIbuffer *buffers = NULL;
1259 int count;
1260 const char *region_name;
1261
1262 /* Set this up front, so that in case our buffers get invalidated
1263 * while we're getting new buffers, we don't clobber the stamp and
1264 * thus ignore the invalidate. */
1265 drawable->lastStamp = drawable->dri2.stamp;
1266
1267 if (unlikely(INTEL_DEBUG & DEBUG_DRI))
1268 fprintf(stderr, "enter %s, drawable %p\n", __func__, drawable);
1269
1270 intel_query_dri2_buffers(brw, drawable, &buffers, &count);
1271
1272 if (buffers == NULL)
1273 return;
1274
1275 for (int i = 0; i < count; i++) {
1276 switch (buffers[i].attachment) {
1277 case __DRI_BUFFER_FRONT_LEFT:
1278 rb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT);
1279 region_name = "dri2 front buffer";
1280 break;
1281
1282 case __DRI_BUFFER_FAKE_FRONT_LEFT:
1283 rb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT);
1284 region_name = "dri2 fake front buffer";
1285 break;
1286
1287 case __DRI_BUFFER_BACK_LEFT:
1288 rb = intel_get_renderbuffer(fb, BUFFER_BACK_LEFT);
1289 region_name = "dri2 back buffer";
1290 break;
1291
1292 case __DRI_BUFFER_DEPTH:
1293 case __DRI_BUFFER_HIZ:
1294 case __DRI_BUFFER_DEPTH_STENCIL:
1295 case __DRI_BUFFER_STENCIL:
1296 case __DRI_BUFFER_ACCUM:
1297 default:
1298 fprintf(stderr,
1299 "unhandled buffer attach event, attachment type %d\n",
1300 buffers[i].attachment);
1301 return;
1302 }
1303
1304 intel_process_dri2_buffer(brw, drawable, &buffers[i], rb, region_name);
1305 }
1306
1307 }
1308
1309 void
1310 intel_update_renderbuffers(__DRIcontext *context, __DRIdrawable *drawable)
1311 {
1312 struct brw_context *brw = context->driverPrivate;
1313 __DRIscreen *dri_screen = brw->screen->driScrnPriv;
1314
1315 /* Set this up front, so that in case our buffers get invalidated
1316 * while we're getting new buffers, we don't clobber the stamp and
1317 * thus ignore the invalidate. */
1318 drawable->lastStamp = drawable->dri2.stamp;
1319
1320 if (unlikely(INTEL_DEBUG & DEBUG_DRI))
1321 fprintf(stderr, "enter %s, drawable %p\n", __func__, drawable);
1322
1323 if (dri_screen->image.loader)
1324 intel_update_image_buffers(brw, drawable);
1325 else
1326 intel_update_dri2_buffers(brw, drawable);
1327
1328 driUpdateFramebufferSize(&brw->ctx, drawable);
1329 }
1330
1331 /**
1332 * intel_prepare_render should be called anywhere that curent read/drawbuffer
1333 * state is required.
1334 */
1335 void
1336 intel_prepare_render(struct brw_context *brw)
1337 {
1338 struct gl_context *ctx = &brw->ctx;
1339 __DRIcontext *driContext = brw->driContext;
1340 __DRIdrawable *drawable;
1341
1342 drawable = driContext->driDrawablePriv;
1343 if (drawable && drawable->dri2.stamp != driContext->dri2.draw_stamp) {
1344 if (drawable->lastStamp != drawable->dri2.stamp)
1345 intel_update_renderbuffers(driContext, drawable);
1346 driContext->dri2.draw_stamp = drawable->dri2.stamp;
1347 }
1348
1349 drawable = driContext->driReadablePriv;
1350 if (drawable && drawable->dri2.stamp != driContext->dri2.read_stamp) {
1351 if (drawable->lastStamp != drawable->dri2.stamp)
1352 intel_update_renderbuffers(driContext, drawable);
1353 driContext->dri2.read_stamp = drawable->dri2.stamp;
1354 }
1355
1356 /* If we're currently rendering to the front buffer, the rendering
1357 * that will happen next will probably dirty the front buffer. So
1358 * mark it as dirty here.
1359 */
1360 if (_mesa_is_front_buffer_drawing(ctx->DrawBuffer))
1361 brw->front_buffer_dirty = true;
1362 }
1363
1364 /**
1365 * \brief Query DRI2 to obtain a DRIdrawable's buffers.
1366 *
1367 * To determine which DRI buffers to request, examine the renderbuffers
1368 * attached to the drawable's framebuffer. Then request the buffers with
1369 * DRI2GetBuffers() or DRI2GetBuffersWithFormat().
1370 *
1371 * This is called from intel_update_renderbuffers().
1372 *
1373 * \param drawable Drawable whose buffers are queried.
1374 * \param buffers [out] List of buffers returned by DRI2 query.
1375 * \param buffer_count [out] Number of buffers returned.
1376 *
1377 * \see intel_update_renderbuffers()
1378 * \see DRI2GetBuffers()
1379 * \see DRI2GetBuffersWithFormat()
1380 */
1381 static void
1382 intel_query_dri2_buffers(struct brw_context *brw,
1383 __DRIdrawable *drawable,
1384 __DRIbuffer **buffers,
1385 int *buffer_count)
1386 {
1387 __DRIscreen *dri_screen = brw->screen->driScrnPriv;
1388 struct gl_framebuffer *fb = drawable->driverPrivate;
1389 int i = 0;
1390 unsigned attachments[8];
1391
1392 struct intel_renderbuffer *front_rb;
1393 struct intel_renderbuffer *back_rb;
1394
1395 front_rb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT);
1396 back_rb = intel_get_renderbuffer(fb, BUFFER_BACK_LEFT);
1397
1398 memset(attachments, 0, sizeof(attachments));
1399 if ((_mesa_is_front_buffer_drawing(fb) ||
1400 _mesa_is_front_buffer_reading(fb) ||
1401 !back_rb) && front_rb) {
1402 /* If a fake front buffer is in use, then querying for
1403 * __DRI_BUFFER_FRONT_LEFT will cause the server to copy the image from
1404 * the real front buffer to the fake front buffer. So before doing the
1405 * query, we need to make sure all the pending drawing has landed in the
1406 * real front buffer.
1407 */
1408 intel_batchbuffer_flush(brw);
1409 intel_flush_front(&brw->ctx);
1410
1411 attachments[i++] = __DRI_BUFFER_FRONT_LEFT;
1412 attachments[i++] = intel_bits_per_pixel(front_rb);
1413 } else if (front_rb && brw->front_buffer_dirty) {
1414 /* We have pending front buffer rendering, but we aren't querying for a
1415 * front buffer. If the front buffer we have is a fake front buffer,
1416 * the X server is going to throw it away when it processes the query.
1417 * So before doing the query, make sure all the pending drawing has
1418 * landed in the real front buffer.
1419 */
1420 intel_batchbuffer_flush(brw);
1421 intel_flush_front(&brw->ctx);
1422 }
1423
1424 if (back_rb) {
1425 attachments[i++] = __DRI_BUFFER_BACK_LEFT;
1426 attachments[i++] = intel_bits_per_pixel(back_rb);
1427 }
1428
1429 assert(i <= ARRAY_SIZE(attachments));
1430
1431 *buffers =
1432 dri_screen->dri2.loader->getBuffersWithFormat(drawable,
1433 &drawable->w,
1434 &drawable->h,
1435 attachments, i / 2,
1436 buffer_count,
1437 drawable->loaderPrivate);
1438 }
1439
1440 /**
1441 * \brief Assign a DRI buffer's DRM region to a renderbuffer.
1442 *
1443 * This is called from intel_update_renderbuffers().
1444 *
1445 * \par Note:
1446 * DRI buffers whose attachment point is DRI2BufferStencil or
1447 * DRI2BufferDepthStencil are handled as special cases.
1448 *
1449 * \param buffer_name is a human readable name, such as "dri2 front buffer",
1450 * that is passed to brw_bo_gem_create_from_name().
1451 *
1452 * \see intel_update_renderbuffers()
1453 */
1454 static void
1455 intel_process_dri2_buffer(struct brw_context *brw,
1456 __DRIdrawable *drawable,
1457 __DRIbuffer *buffer,
1458 struct intel_renderbuffer *rb,
1459 const char *buffer_name)
1460 {
1461 struct gl_framebuffer *fb = drawable->driverPrivate;
1462 struct brw_bo *bo;
1463
1464 if (!rb)
1465 return;
1466
1467 unsigned num_samples = rb->Base.Base.NumSamples;
1468
1469 /* We try to avoid closing and reopening the same BO name, because the first
1470 * use of a mapping of the buffer involves a bunch of page faulting which is
1471 * moderately expensive.
1472 */
1473 struct intel_mipmap_tree *last_mt;
1474 if (num_samples == 0)
1475 last_mt = rb->mt;
1476 else
1477 last_mt = rb->singlesample_mt;
1478
1479 uint32_t old_name = 0;
1480 if (last_mt) {
1481 /* The bo already has a name because the miptree was created by a
1482 * previous call to intel_process_dri2_buffer(). If a bo already has a
1483 * name, then brw_bo_flink() is a low-cost getter. It does not
1484 * create a new name.
1485 */
1486 brw_bo_flink(last_mt->bo, &old_name);
1487 }
1488
1489 if (old_name == buffer->name)
1490 return;
1491
1492 if (unlikely(INTEL_DEBUG & DEBUG_DRI)) {
1493 fprintf(stderr,
1494 "attaching buffer %d, at %d, cpp %d, pitch %d\n",
1495 buffer->name, buffer->attachment,
1496 buffer->cpp, buffer->pitch);
1497 }
1498
1499 bo = brw_bo_gem_create_from_name(brw->bufmgr, buffer_name,
1500 buffer->name);
1501 if (!bo) {
1502 fprintf(stderr,
1503 "Failed to open BO for returned DRI2 buffer "
1504 "(%dx%d, %s, named %d).\n"
1505 "This is likely a bug in the X Server that will lead to a "
1506 "crash soon.\n",
1507 drawable->w, drawable->h, buffer_name, buffer->name);
1508 return;
1509 }
1510
1511 struct intel_mipmap_tree *mt =
1512 intel_miptree_create_for_bo(brw,
1513 bo,
1514 intel_rb_format(rb),
1515 0,
1516 drawable->w,
1517 drawable->h,
1518 1,
1519 buffer->pitch,
1520 MIPTREE_LAYOUT_FOR_SCANOUT);
1521 if (!mt) {
1522 brw_bo_unreference(bo);
1523 return;
1524 }
1525
1526 if (!intel_update_winsys_renderbuffer_miptree(brw, rb, mt,
1527 drawable->w, drawable->h,
1528 buffer->pitch)) {
1529 brw_bo_unreference(bo);
1530 intel_miptree_release(&mt);
1531 return;
1532 }
1533
1534 if (_mesa_is_front_buffer_drawing(fb) &&
1535 (buffer->attachment == __DRI_BUFFER_FRONT_LEFT ||
1536 buffer->attachment == __DRI_BUFFER_FAKE_FRONT_LEFT) &&
1537 rb->Base.Base.NumSamples > 1) {
1538 intel_renderbuffer_upsample(brw, rb);
1539 }
1540
1541 assert(rb->mt);
1542
1543 brw_bo_unreference(bo);
1544 }
1545
1546 /**
1547 * \brief Query DRI image loader to obtain a DRIdrawable's buffers.
1548 *
1549 * To determine which DRI buffers to request, examine the renderbuffers
1550 * attached to the drawable's framebuffer. Then request the buffers from
1551 * the image loader
1552 *
1553 * This is called from intel_update_renderbuffers().
1554 *
1555 * \param drawable Drawable whose buffers are queried.
1556 * \param buffers [out] List of buffers returned by DRI2 query.
1557 * \param buffer_count [out] Number of buffers returned.
1558 *
1559 * \see intel_update_renderbuffers()
1560 */
1561
1562 static void
1563 intel_update_image_buffer(struct brw_context *intel,
1564 __DRIdrawable *drawable,
1565 struct intel_renderbuffer *rb,
1566 __DRIimage *buffer,
1567 enum __DRIimageBufferMask buffer_type)
1568 {
1569 struct gl_framebuffer *fb = drawable->driverPrivate;
1570
1571 if (!rb || !buffer->bo)
1572 return;
1573
1574 unsigned num_samples = rb->Base.Base.NumSamples;
1575
1576 /* Check and see if we're already bound to the right
1577 * buffer object
1578 */
1579 struct intel_mipmap_tree *last_mt;
1580 if (num_samples == 0)
1581 last_mt = rb->mt;
1582 else
1583 last_mt = rb->singlesample_mt;
1584
1585 if (last_mt && last_mt->bo == buffer->bo)
1586 return;
1587
1588 enum isl_colorspace colorspace;
1589 switch (_mesa_get_format_color_encoding(intel_rb_format(rb))) {
1590 case GL_SRGB:
1591 colorspace = ISL_COLORSPACE_SRGB;
1592 break;
1593 case GL_LINEAR:
1594 colorspace = ISL_COLORSPACE_LINEAR;
1595 break;
1596 default:
1597 unreachable("Invalid color encoding");
1598 }
1599
1600 struct intel_mipmap_tree *mt =
1601 intel_miptree_create_for_dri_image(intel, buffer, GL_TEXTURE_2D,
1602 colorspace, true);
1603 if (!mt)
1604 return;
1605
1606 if (!intel_update_winsys_renderbuffer_miptree(intel, rb, mt,
1607 buffer->width, buffer->height,
1608 buffer->pitch)) {
1609 intel_miptree_release(&mt);
1610 return;
1611 }
1612
1613 if (_mesa_is_front_buffer_drawing(fb) &&
1614 buffer_type == __DRI_IMAGE_BUFFER_FRONT &&
1615 rb->Base.Base.NumSamples > 1) {
1616 intel_renderbuffer_upsample(intel, rb);
1617 }
1618 }
1619
1620 static void
1621 intel_update_image_buffers(struct brw_context *brw, __DRIdrawable *drawable)
1622 {
1623 struct gl_framebuffer *fb = drawable->driverPrivate;
1624 __DRIscreen *dri_screen = brw->screen->driScrnPriv;
1625 struct intel_renderbuffer *front_rb;
1626 struct intel_renderbuffer *back_rb;
1627 struct __DRIimageList images;
1628 mesa_format format;
1629 uint32_t buffer_mask = 0;
1630 int ret;
1631
1632 front_rb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT);
1633 back_rb = intel_get_renderbuffer(fb, BUFFER_BACK_LEFT);
1634
1635 if (back_rb)
1636 format = intel_rb_format(back_rb);
1637 else if (front_rb)
1638 format = intel_rb_format(front_rb);
1639 else
1640 return;
1641
1642 if (front_rb && (_mesa_is_front_buffer_drawing(fb) ||
1643 _mesa_is_front_buffer_reading(fb) || !back_rb)) {
1644 buffer_mask |= __DRI_IMAGE_BUFFER_FRONT;
1645 }
1646
1647 if (back_rb)
1648 buffer_mask |= __DRI_IMAGE_BUFFER_BACK;
1649
1650 ret = dri_screen->image.loader->getBuffers(drawable,
1651 driGLFormatToImageFormat(format),
1652 &drawable->dri2.stamp,
1653 drawable->loaderPrivate,
1654 buffer_mask,
1655 &images);
1656 if (!ret)
1657 return;
1658
1659 if (images.image_mask & __DRI_IMAGE_BUFFER_FRONT) {
1660 drawable->w = images.front->width;
1661 drawable->h = images.front->height;
1662 intel_update_image_buffer(brw,
1663 drawable,
1664 front_rb,
1665 images.front,
1666 __DRI_IMAGE_BUFFER_FRONT);
1667 }
1668
1669 if (images.image_mask & __DRI_IMAGE_BUFFER_BACK) {
1670 drawable->w = images.back->width;
1671 drawable->h = images.back->height;
1672 intel_update_image_buffer(brw,
1673 drawable,
1674 back_rb,
1675 images.back,
1676 __DRI_IMAGE_BUFFER_BACK);
1677 }
1678 }