Revert "i965: Call intel_prepare_render() from intel_update_state()"
[mesa.git] / src / mesa / drivers / dri / i965 / brw_context.c
1 /*
2 Copyright 2003 VMware, Inc.
3 Copyright (C) Intel Corp. 2006. All Rights Reserved.
4 Intel funded Tungsten Graphics to
5 develop this 3D driver.
6
7 Permission is hereby granted, free of charge, to any person obtaining
8 a copy of this software and associated documentation files (the
9 "Software"), to deal in the Software without restriction, including
10 without limitation the rights to use, copy, modify, merge, publish,
11 distribute, sublicense, and/or sell copies of the Software, and to
12 permit persons to whom the Software is furnished to do so, subject to
13 the following conditions:
14
15 The above copyright notice and this permission notice (including the
16 next paragraph) shall be included in all copies or substantial
17 portions of the Software.
18
19 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
20 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
22 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
23 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
24 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
25 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26
27 **********************************************************************/
28 /*
29 * Authors:
30 * Keith Whitwell <keithw@vmware.com>
31 */
32
33
34 #include "compiler/nir/nir.h"
35 #include "main/api_exec.h"
36 #include "main/context.h"
37 #include "main/fbobject.h"
38 #include "main/extensions.h"
39 #include "main/imports.h"
40 #include "main/macros.h"
41 #include "main/points.h"
42 #include "main/version.h"
43 #include "main/vtxfmt.h"
44 #include "main/texobj.h"
45 #include "main/framebuffer.h"
46 #include "main/stencil.h"
47 #include "main/state.h"
48
49 #include "vbo/vbo_context.h"
50
51 #include "drivers/common/driverfuncs.h"
52 #include "drivers/common/meta.h"
53 #include "utils.h"
54
55 #include "brw_context.h"
56 #include "brw_defines.h"
57 #include "brw_blorp.h"
58 #include "brw_draw.h"
59 #include "brw_state.h"
60
61 #include "intel_batchbuffer.h"
62 #include "intel_buffer_objects.h"
63 #include "intel_buffers.h"
64 #include "intel_fbo.h"
65 #include "intel_mipmap_tree.h"
66 #include "intel_pixel.h"
67 #include "intel_image.h"
68 #include "intel_tex.h"
69 #include "intel_tex_obj.h"
70
71 #include "swrast_setup/swrast_setup.h"
72 #include "tnl/tnl.h"
73 #include "tnl/t_pipeline.h"
74 #include "util/ralloc.h"
75 #include "util/debug.h"
76 #include "isl/isl.h"
77
78 /***************************************
79 * Mesa's Driver Functions
80 ***************************************/
81
82 const char *const brw_vendor_string = "Intel Open Source Technology Center";
83
84 static const char *
85 get_bsw_model(const struct intel_screen *screen)
86 {
87 switch (screen->eu_total) {
88 case 16:
89 return "405";
90 case 12:
91 return "400";
92 default:
93 return " ";
94 }
95 }
96
97 const char *
98 brw_get_renderer_string(const struct intel_screen *screen)
99 {
100 const char *chipset;
101 static char buffer[128];
102 char *bsw = NULL;
103
104 switch (screen->deviceID) {
105 #undef CHIPSET
106 #define CHIPSET(id, symbol, str) case id: chipset = str; break;
107 #include "pci_ids/i965_pci_ids.h"
108 default:
109 chipset = "Unknown Intel Chipset";
110 break;
111 }
112
113 /* Braswell branding is funny, so we have to fix it up here */
114 if (screen->deviceID == 0x22B1) {
115 bsw = strdup(chipset);
116 char *needle = strstr(bsw, "XXX");
117 if (needle) {
118 memcpy(needle, get_bsw_model(screen), 3);
119 chipset = bsw;
120 }
121 }
122
123 (void) driGetRendererString(buffer, chipset, 0);
124 free(bsw);
125 return buffer;
126 }
127
128 static const GLubyte *
129 intel_get_string(struct gl_context * ctx, GLenum name)
130 {
131 const struct brw_context *const brw = brw_context(ctx);
132
133 switch (name) {
134 case GL_VENDOR:
135 return (GLubyte *) brw_vendor_string;
136
137 case GL_RENDERER:
138 return
139 (GLubyte *) brw_get_renderer_string(brw->screen);
140
141 default:
142 return NULL;
143 }
144 }
145
146 static void
147 intel_viewport(struct gl_context *ctx)
148 {
149 struct brw_context *brw = brw_context(ctx);
150 __DRIcontext *driContext = brw->driContext;
151
152 if (_mesa_is_winsys_fbo(ctx->DrawBuffer)) {
153 if (driContext->driDrawablePriv)
154 dri2InvalidateDrawable(driContext->driDrawablePriv);
155 if (driContext->driReadablePriv)
156 dri2InvalidateDrawable(driContext->driReadablePriv);
157 }
158 }
159
160 static void
161 intel_update_framebuffer(struct gl_context *ctx,
162 struct gl_framebuffer *fb)
163 {
164 struct brw_context *brw = brw_context(ctx);
165
166 /* Quantize the derived default number of samples
167 */
168 fb->DefaultGeometry._NumSamples =
169 intel_quantize_num_samples(brw->screen,
170 fb->DefaultGeometry.NumSamples);
171 }
172
173 static void
174 intel_update_state(struct gl_context * ctx)
175 {
176 GLuint new_state = ctx->NewState;
177 struct brw_context *brw = brw_context(ctx);
178
179 if (ctx->swrast_context)
180 _swrast_InvalidateState(ctx, new_state);
181
182 brw->NewGLState |= new_state;
183
184 if (new_state & (_NEW_SCISSOR | _NEW_BUFFERS | _NEW_VIEWPORT))
185 _mesa_update_draw_buffer_bounds(ctx, ctx->DrawBuffer);
186
187 if (new_state & (_NEW_STENCIL | _NEW_BUFFERS)) {
188 brw->stencil_enabled = _mesa_stencil_is_enabled(ctx);
189 brw->stencil_two_sided = _mesa_stencil_is_two_sided(ctx);
190 brw->stencil_write_enabled =
191 _mesa_stencil_is_write_enabled(ctx, brw->stencil_two_sided);
192 }
193
194 if (new_state & _NEW_POLYGON)
195 brw->polygon_front_bit = _mesa_polygon_get_front_bit(ctx);
196
197 if (new_state & _NEW_BUFFERS) {
198 intel_update_framebuffer(ctx, ctx->DrawBuffer);
199 if (ctx->DrawBuffer != ctx->ReadBuffer)
200 intel_update_framebuffer(ctx, ctx->ReadBuffer);
201 }
202 }
203
204 #define flushFront(screen) ((screen)->image.loader ? (screen)->image.loader->flushFrontBuffer : (screen)->dri2.loader->flushFrontBuffer)
205
206 static void
207 intel_flush_front(struct gl_context *ctx)
208 {
209 struct brw_context *brw = brw_context(ctx);
210 __DRIcontext *driContext = brw->driContext;
211 __DRIdrawable *driDrawable = driContext->driDrawablePriv;
212 __DRIscreen *const dri_screen = brw->screen->driScrnPriv;
213
214 if (brw->front_buffer_dirty && _mesa_is_winsys_fbo(ctx->DrawBuffer)) {
215 if (flushFront(dri_screen) && driDrawable &&
216 driDrawable->loaderPrivate) {
217
218 /* Resolve before flushing FAKE_FRONT_LEFT to FRONT_LEFT.
219 *
220 * This potentially resolves both front and back buffer. It
221 * is unnecessary to resolve the back, but harms nothing except
222 * performance. And no one cares about front-buffer render
223 * performance.
224 */
225 intel_resolve_for_dri2_flush(brw, driDrawable);
226 intel_batchbuffer_flush(brw);
227
228 flushFront(dri_screen)(driDrawable, driDrawable->loaderPrivate);
229
230 /* We set the dirty bit in intel_prepare_render() if we're
231 * front buffer rendering once we get there.
232 */
233 brw->front_buffer_dirty = false;
234 }
235 }
236 }
237
238 static void
239 intel_glFlush(struct gl_context *ctx)
240 {
241 struct brw_context *brw = brw_context(ctx);
242
243 intel_batchbuffer_flush(brw);
244 intel_flush_front(ctx);
245
246 brw->need_flush_throttle = true;
247 }
248
249 static void
250 intel_finish(struct gl_context * ctx)
251 {
252 struct brw_context *brw = brw_context(ctx);
253
254 intel_glFlush(ctx);
255
256 if (brw->batch.last_bo)
257 brw_bo_wait_rendering(brw->batch.last_bo);
258 }
259
260 static void
261 brw_init_driver_functions(struct brw_context *brw,
262 struct dd_function_table *functions)
263 {
264 _mesa_init_driver_functions(functions);
265
266 /* GLX uses DRI2 invalidate events to handle window resizing.
267 * Unfortunately, EGL does not - libEGL is written in XCB (not Xlib),
268 * which doesn't provide a mechanism for snooping the event queues.
269 *
270 * So EGL still relies on viewport hacks to handle window resizing.
271 * This should go away with DRI3000.
272 */
273 if (!brw->driContext->driScreenPriv->dri2.useInvalidate)
274 functions->Viewport = intel_viewport;
275
276 functions->Flush = intel_glFlush;
277 functions->Finish = intel_finish;
278 functions->GetString = intel_get_string;
279 functions->UpdateState = intel_update_state;
280
281 intelInitTextureFuncs(functions);
282 intelInitTextureImageFuncs(functions);
283 intelInitTextureSubImageFuncs(functions);
284 intelInitTextureCopyImageFuncs(functions);
285 intelInitCopyImageFuncs(functions);
286 intelInitClearFuncs(functions);
287 intelInitBufferFuncs(functions);
288 intelInitPixelFuncs(functions);
289 intelInitBufferObjectFuncs(functions);
290 brw_init_syncobj_functions(functions);
291 brw_init_object_purgeable_functions(functions);
292
293 brwInitFragProgFuncs( functions );
294 brw_init_common_queryobj_functions(functions);
295 if (brw->gen >= 8 || brw->is_haswell)
296 hsw_init_queryobj_functions(functions);
297 else if (brw->gen >= 6)
298 gen6_init_queryobj_functions(functions);
299 else
300 gen4_init_queryobj_functions(functions);
301 brw_init_compute_functions(functions);
302 brw_init_conditional_render_functions(functions);
303
304 functions->QueryInternalFormat = brw_query_internal_format;
305
306 functions->NewTransformFeedback = brw_new_transform_feedback;
307 functions->DeleteTransformFeedback = brw_delete_transform_feedback;
308 if (can_do_mi_math_and_lrr(brw->screen)) {
309 functions->BeginTransformFeedback = hsw_begin_transform_feedback;
310 functions->EndTransformFeedback = hsw_end_transform_feedback;
311 functions->PauseTransformFeedback = hsw_pause_transform_feedback;
312 functions->ResumeTransformFeedback = hsw_resume_transform_feedback;
313 } else if (brw->gen >= 7) {
314 functions->BeginTransformFeedback = gen7_begin_transform_feedback;
315 functions->EndTransformFeedback = gen7_end_transform_feedback;
316 functions->PauseTransformFeedback = gen7_pause_transform_feedback;
317 functions->ResumeTransformFeedback = gen7_resume_transform_feedback;
318 functions->GetTransformFeedbackVertexCount =
319 brw_get_transform_feedback_vertex_count;
320 } else {
321 functions->BeginTransformFeedback = brw_begin_transform_feedback;
322 functions->EndTransformFeedback = brw_end_transform_feedback;
323 functions->PauseTransformFeedback = brw_pause_transform_feedback;
324 functions->ResumeTransformFeedback = brw_resume_transform_feedback;
325 functions->GetTransformFeedbackVertexCount =
326 brw_get_transform_feedback_vertex_count;
327 }
328
329 if (brw->gen >= 6)
330 functions->GetSamplePosition = gen6_get_sample_position;
331 }
332
333 static void
334 brw_initialize_context_constants(struct brw_context *brw)
335 {
336 struct gl_context *ctx = &brw->ctx;
337 const struct brw_compiler *compiler = brw->screen->compiler;
338
339 const bool stage_exists[MESA_SHADER_STAGES] = {
340 [MESA_SHADER_VERTEX] = true,
341 [MESA_SHADER_TESS_CTRL] = brw->gen >= 7,
342 [MESA_SHADER_TESS_EVAL] = brw->gen >= 7,
343 [MESA_SHADER_GEOMETRY] = brw->gen >= 6,
344 [MESA_SHADER_FRAGMENT] = true,
345 [MESA_SHADER_COMPUTE] =
346 ((ctx->API == API_OPENGL_COMPAT || ctx->API == API_OPENGL_CORE) &&
347 ctx->Const.MaxComputeWorkGroupSize[0] >= 1024) ||
348 (ctx->API == API_OPENGLES2 &&
349 ctx->Const.MaxComputeWorkGroupSize[0] >= 128) ||
350 _mesa_extension_override_enables.ARB_compute_shader,
351 };
352
353 unsigned num_stages = 0;
354 for (int i = 0; i < MESA_SHADER_STAGES; i++) {
355 if (stage_exists[i])
356 num_stages++;
357 }
358
359 unsigned max_samplers =
360 brw->gen >= 8 || brw->is_haswell ? BRW_MAX_TEX_UNIT : 16;
361
362 ctx->Const.MaxDualSourceDrawBuffers = 1;
363 ctx->Const.MaxDrawBuffers = BRW_MAX_DRAW_BUFFERS;
364 ctx->Const.MaxCombinedShaderOutputResources =
365 MAX_IMAGE_UNITS + BRW_MAX_DRAW_BUFFERS;
366
367 /* The timestamp register we can read for glGetTimestamp() is
368 * sometimes only 32 bits, before scaling to nanoseconds (depending
369 * on kernel).
370 *
371 * Once scaled to nanoseconds the timestamp would roll over at a
372 * non-power-of-two, so an application couldn't use
373 * GL_QUERY_COUNTER_BITS to handle rollover correctly. Instead, we
374 * report 36 bits and truncate at that (rolling over 5 times as
375 * often as the HW counter), and when the 32-bit counter rolls
376 * over, it happens to also be at a rollover in the reported value
377 * from near (1<<36) to 0.
378 *
379 * The low 32 bits rolls over in ~343 seconds. Our 36-bit result
380 * rolls over every ~69 seconds.
381 */
382 ctx->Const.QueryCounterBits.Timestamp = 36;
383
384 ctx->Const.MaxTextureCoordUnits = 8; /* Mesa limit */
385 ctx->Const.MaxImageUnits = MAX_IMAGE_UNITS;
386 if (brw->gen >= 7) {
387 ctx->Const.MaxRenderbufferSize = 16384;
388 ctx->Const.MaxTextureLevels = MIN2(15 /* 16384 */, MAX_TEXTURE_LEVELS);
389 ctx->Const.MaxCubeTextureLevels = 15; /* 16384 */
390 } else {
391 ctx->Const.MaxRenderbufferSize = 8192;
392 ctx->Const.MaxTextureLevels = MIN2(14 /* 8192 */, MAX_TEXTURE_LEVELS);
393 ctx->Const.MaxCubeTextureLevels = 14; /* 8192 */
394 }
395 ctx->Const.Max3DTextureLevels = 12; /* 2048 */
396 ctx->Const.MaxArrayTextureLayers = brw->gen >= 7 ? 2048 : 512;
397 ctx->Const.MaxTextureMbytes = 1536;
398 ctx->Const.MaxTextureRectSize = brw->gen >= 7 ? 16384 : 8192;
399 ctx->Const.MaxTextureMaxAnisotropy = 16.0;
400 ctx->Const.MaxTextureLodBias = 15.0;
401 ctx->Const.StripTextureBorder = true;
402 if (brw->gen >= 7) {
403 ctx->Const.MaxProgramTextureGatherComponents = 4;
404 ctx->Const.MinProgramTextureGatherOffset = -32;
405 ctx->Const.MaxProgramTextureGatherOffset = 31;
406 } else if (brw->gen == 6) {
407 ctx->Const.MaxProgramTextureGatherComponents = 1;
408 ctx->Const.MinProgramTextureGatherOffset = -8;
409 ctx->Const.MaxProgramTextureGatherOffset = 7;
410 }
411
412 ctx->Const.MaxUniformBlockSize = 65536;
413
414 for (int i = 0; i < MESA_SHADER_STAGES; i++) {
415 struct gl_program_constants *prog = &ctx->Const.Program[i];
416
417 if (!stage_exists[i])
418 continue;
419
420 prog->MaxTextureImageUnits = max_samplers;
421
422 prog->MaxUniformBlocks = BRW_MAX_UBO;
423 prog->MaxCombinedUniformComponents =
424 prog->MaxUniformComponents +
425 ctx->Const.MaxUniformBlockSize / 4 * prog->MaxUniformBlocks;
426
427 prog->MaxAtomicCounters = MAX_ATOMIC_COUNTERS;
428 prog->MaxAtomicBuffers = BRW_MAX_ABO;
429 prog->MaxImageUniforms = compiler->scalar_stage[i] ? BRW_MAX_IMAGES : 0;
430 prog->MaxShaderStorageBlocks = BRW_MAX_SSBO;
431 }
432
433 ctx->Const.MaxTextureUnits =
434 MIN2(ctx->Const.MaxTextureCoordUnits,
435 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxTextureImageUnits);
436
437 ctx->Const.MaxUniformBufferBindings = num_stages * BRW_MAX_UBO;
438 ctx->Const.MaxCombinedUniformBlocks = num_stages * BRW_MAX_UBO;
439 ctx->Const.MaxCombinedAtomicBuffers = num_stages * BRW_MAX_ABO;
440 ctx->Const.MaxCombinedShaderStorageBlocks = num_stages * BRW_MAX_SSBO;
441 ctx->Const.MaxShaderStorageBufferBindings = num_stages * BRW_MAX_SSBO;
442 ctx->Const.MaxCombinedTextureImageUnits = num_stages * max_samplers;
443 ctx->Const.MaxCombinedImageUniforms = num_stages * BRW_MAX_IMAGES;
444
445
446 /* Hardware only supports a limited number of transform feedback buffers.
447 * So we need to override the Mesa default (which is based only on software
448 * limits).
449 */
450 ctx->Const.MaxTransformFeedbackBuffers = BRW_MAX_SOL_BUFFERS;
451
452 /* On Gen6, in the worst case, we use up one binding table entry per
453 * transform feedback component (see comments above the definition of
454 * BRW_MAX_SOL_BINDINGS, in brw_context.h), so we need to advertise a value
455 * for MAX_TRANSFORM_FEEDBACK_INTERLEAVED_COMPONENTS equal to
456 * BRW_MAX_SOL_BINDINGS.
457 *
458 * In "separate components" mode, we need to divide this value by
459 * BRW_MAX_SOL_BUFFERS, so that the total number of binding table entries
460 * used up by all buffers will not exceed BRW_MAX_SOL_BINDINGS.
461 */
462 ctx->Const.MaxTransformFeedbackInterleavedComponents = BRW_MAX_SOL_BINDINGS;
463 ctx->Const.MaxTransformFeedbackSeparateComponents =
464 BRW_MAX_SOL_BINDINGS / BRW_MAX_SOL_BUFFERS;
465
466 ctx->Const.AlwaysUseGetTransformFeedbackVertexCount =
467 !can_do_mi_math_and_lrr(brw->screen);
468
469 int max_samples;
470 const int *msaa_modes = intel_supported_msaa_modes(brw->screen);
471 const int clamp_max_samples =
472 driQueryOptioni(&brw->optionCache, "clamp_max_samples");
473
474 if (clamp_max_samples < 0) {
475 max_samples = msaa_modes[0];
476 } else {
477 /* Select the largest supported MSAA mode that does not exceed
478 * clamp_max_samples.
479 */
480 max_samples = 0;
481 for (int i = 0; msaa_modes[i] != 0; ++i) {
482 if (msaa_modes[i] <= clamp_max_samples) {
483 max_samples = msaa_modes[i];
484 break;
485 }
486 }
487 }
488
489 ctx->Const.MaxSamples = max_samples;
490 ctx->Const.MaxColorTextureSamples = max_samples;
491 ctx->Const.MaxDepthTextureSamples = max_samples;
492 ctx->Const.MaxIntegerSamples = max_samples;
493 ctx->Const.MaxImageSamples = 0;
494
495 /* gen6_set_sample_maps() sets SampleMap{2,4,8}x variables which are used
496 * to map indices of rectangular grid to sample numbers within a pixel.
497 * These variables are used by GL_EXT_framebuffer_multisample_blit_scaled
498 * extension implementation. For more details see the comment above
499 * gen6_set_sample_maps() definition.
500 */
501 gen6_set_sample_maps(ctx);
502
503 ctx->Const.MinLineWidth = 1.0;
504 ctx->Const.MinLineWidthAA = 1.0;
505 if (brw->gen >= 6) {
506 ctx->Const.MaxLineWidth = 7.375;
507 ctx->Const.MaxLineWidthAA = 7.375;
508 ctx->Const.LineWidthGranularity = 0.125;
509 } else {
510 ctx->Const.MaxLineWidth = 7.0;
511 ctx->Const.MaxLineWidthAA = 7.0;
512 ctx->Const.LineWidthGranularity = 0.5;
513 }
514
515 /* For non-antialiased lines, we have to round the line width to the
516 * nearest whole number. Make sure that we don't advertise a line
517 * width that, when rounded, will be beyond the actual hardware
518 * maximum.
519 */
520 assert(roundf(ctx->Const.MaxLineWidth) <= ctx->Const.MaxLineWidth);
521
522 ctx->Const.MinPointSize = 1.0;
523 ctx->Const.MinPointSizeAA = 1.0;
524 ctx->Const.MaxPointSize = 255.0;
525 ctx->Const.MaxPointSizeAA = 255.0;
526 ctx->Const.PointSizeGranularity = 1.0;
527
528 if (brw->gen >= 5 || brw->is_g4x)
529 ctx->Const.MaxClipPlanes = 8;
530
531 ctx->Const.GLSLTessLevelsAsInputs = true;
532 ctx->Const.LowerTCSPatchVerticesIn = brw->gen >= 8;
533 ctx->Const.LowerTESPatchVerticesIn = true;
534 ctx->Const.PrimitiveRestartForPatches = true;
535
536 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeInstructions = 16 * 1024;
537 ctx->Const.Program[MESA_SHADER_VERTEX].MaxAluInstructions = 0;
538 ctx->Const.Program[MESA_SHADER_VERTEX].MaxTexInstructions = 0;
539 ctx->Const.Program[MESA_SHADER_VERTEX].MaxTexIndirections = 0;
540 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeAluInstructions = 0;
541 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeTexInstructions = 0;
542 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeTexIndirections = 0;
543 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeAttribs = 16;
544 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeTemps = 256;
545 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeAddressRegs = 1;
546 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeParameters = 1024;
547 ctx->Const.Program[MESA_SHADER_VERTEX].MaxEnvParams =
548 MIN2(ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeParameters,
549 ctx->Const.Program[MESA_SHADER_VERTEX].MaxEnvParams);
550
551 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeInstructions = 1024;
552 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeAluInstructions = 1024;
553 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeTexInstructions = 1024;
554 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeTexIndirections = 1024;
555 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeAttribs = 12;
556 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeTemps = 256;
557 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeAddressRegs = 0;
558 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeParameters = 1024;
559 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxEnvParams =
560 MIN2(ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeParameters,
561 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxEnvParams);
562
563 /* Fragment shaders use real, 32-bit twos-complement integers for all
564 * integer types.
565 */
566 ctx->Const.Program[MESA_SHADER_FRAGMENT].LowInt.RangeMin = 31;
567 ctx->Const.Program[MESA_SHADER_FRAGMENT].LowInt.RangeMax = 30;
568 ctx->Const.Program[MESA_SHADER_FRAGMENT].LowInt.Precision = 0;
569 ctx->Const.Program[MESA_SHADER_FRAGMENT].HighInt = ctx->Const.Program[MESA_SHADER_FRAGMENT].LowInt;
570 ctx->Const.Program[MESA_SHADER_FRAGMENT].MediumInt = ctx->Const.Program[MESA_SHADER_FRAGMENT].LowInt;
571
572 ctx->Const.Program[MESA_SHADER_VERTEX].LowInt.RangeMin = 31;
573 ctx->Const.Program[MESA_SHADER_VERTEX].LowInt.RangeMax = 30;
574 ctx->Const.Program[MESA_SHADER_VERTEX].LowInt.Precision = 0;
575 ctx->Const.Program[MESA_SHADER_VERTEX].HighInt = ctx->Const.Program[MESA_SHADER_VERTEX].LowInt;
576 ctx->Const.Program[MESA_SHADER_VERTEX].MediumInt = ctx->Const.Program[MESA_SHADER_VERTEX].LowInt;
577
578 /* Gen6 converts quads to polygon in beginning of 3D pipeline,
579 * but we're not sure how it's actually done for vertex order,
580 * that affect provoking vertex decision. Always use last vertex
581 * convention for quad primitive which works as expected for now.
582 */
583 if (brw->gen >= 6)
584 ctx->Const.QuadsFollowProvokingVertexConvention = false;
585
586 ctx->Const.NativeIntegers = true;
587 ctx->Const.VertexID_is_zero_based = true;
588
589 /* Regarding the CMP instruction, the Ivybridge PRM says:
590 *
591 * "For each enabled channel 0b or 1b is assigned to the appropriate flag
592 * bit and 0/all zeros or all ones (e.g, byte 0xFF, word 0xFFFF, DWord
593 * 0xFFFFFFFF) is assigned to dst."
594 *
595 * but PRMs for earlier generations say
596 *
597 * "In dword format, one GRF may store up to 8 results. When the register
598 * is used later as a vector of Booleans, as only LSB at each channel
599 * contains meaning [sic] data, software should make sure all higher bits
600 * are masked out (e.g. by 'and-ing' an [sic] 0x01 constant)."
601 *
602 * We select the representation of a true boolean uniform to be ~0, and fix
603 * the results of Gen <= 5 CMP instruction's with -(result & 1).
604 */
605 ctx->Const.UniformBooleanTrue = ~0;
606
607 /* From the gen4 PRM, volume 4 page 127:
608 *
609 * "For SURFTYPE_BUFFER non-rendertarget surfaces, this field specifies
610 * the base address of the first element of the surface, computed in
611 * software by adding the surface base address to the byte offset of
612 * the element in the buffer."
613 *
614 * However, unaligned accesses are slower, so enforce buffer alignment.
615 *
616 * In order to push UBO data, 3DSTATE_CONSTANT_XS imposes an additional
617 * restriction: the start of the buffer needs to be 32B aligned.
618 */
619 ctx->Const.UniformBufferOffsetAlignment = 32;
620
621 /* ShaderStorageBufferOffsetAlignment should be a cacheline (64 bytes) so
622 * that we can safely have the CPU and GPU writing the same SSBO on
623 * non-cachecoherent systems (our Atom CPUs). With UBOs, the GPU never
624 * writes, so there's no problem. For an SSBO, the GPU and the CPU can
625 * be updating disjoint regions of the buffer simultaneously and that will
626 * break if the regions overlap the same cacheline.
627 */
628 ctx->Const.ShaderStorageBufferOffsetAlignment = 64;
629 ctx->Const.TextureBufferOffsetAlignment = 16;
630 ctx->Const.MaxTextureBufferSize = 128 * 1024 * 1024;
631
632 if (brw->gen >= 6) {
633 ctx->Const.MaxVarying = 32;
634 ctx->Const.Program[MESA_SHADER_VERTEX].MaxOutputComponents = 128;
635 ctx->Const.Program[MESA_SHADER_GEOMETRY].MaxInputComponents = 64;
636 ctx->Const.Program[MESA_SHADER_GEOMETRY].MaxOutputComponents = 128;
637 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxInputComponents = 128;
638 ctx->Const.Program[MESA_SHADER_TESS_CTRL].MaxInputComponents = 128;
639 ctx->Const.Program[MESA_SHADER_TESS_CTRL].MaxOutputComponents = 128;
640 ctx->Const.Program[MESA_SHADER_TESS_EVAL].MaxInputComponents = 128;
641 ctx->Const.Program[MESA_SHADER_TESS_EVAL].MaxOutputComponents = 128;
642 }
643
644 /* We want the GLSL compiler to emit code that uses condition codes */
645 for (int i = 0; i < MESA_SHADER_STAGES; i++) {
646 ctx->Const.ShaderCompilerOptions[i] =
647 brw->screen->compiler->glsl_compiler_options[i];
648 }
649
650 if (brw->gen >= 7) {
651 ctx->Const.MaxViewportWidth = 32768;
652 ctx->Const.MaxViewportHeight = 32768;
653 }
654
655 /* ARB_viewport_array, OES_viewport_array */
656 if (brw->gen >= 6) {
657 ctx->Const.MaxViewports = GEN6_NUM_VIEWPORTS;
658 ctx->Const.ViewportSubpixelBits = 0;
659
660 /* Cast to float before negating because MaxViewportWidth is unsigned.
661 */
662 ctx->Const.ViewportBounds.Min = -(float)ctx->Const.MaxViewportWidth;
663 ctx->Const.ViewportBounds.Max = ctx->Const.MaxViewportWidth;
664 }
665
666 /* ARB_gpu_shader5 */
667 if (brw->gen >= 7)
668 ctx->Const.MaxVertexStreams = MIN2(4, MAX_VERTEX_STREAMS);
669
670 /* ARB_framebuffer_no_attachments */
671 ctx->Const.MaxFramebufferWidth = 16384;
672 ctx->Const.MaxFramebufferHeight = 16384;
673 ctx->Const.MaxFramebufferLayers = ctx->Const.MaxArrayTextureLayers;
674 ctx->Const.MaxFramebufferSamples = max_samples;
675
676 /* OES_primitive_bounding_box */
677 ctx->Const.NoPrimitiveBoundingBoxOutput = true;
678 }
679
680 static void
681 brw_initialize_cs_context_constants(struct brw_context *brw)
682 {
683 struct gl_context *ctx = &brw->ctx;
684 const struct intel_screen *screen = brw->screen;
685 struct gen_device_info *devinfo = &brw->screen->devinfo;
686
687 /* FINISHME: Do this for all platforms that the kernel supports */
688 if (brw->is_cherryview &&
689 screen->subslice_total > 0 && screen->eu_total > 0) {
690 /* Logical CS threads = EUs per subslice * 7 threads per EU */
691 uint32_t max_cs_threads = screen->eu_total / screen->subslice_total * 7;
692
693 /* Fuse configurations may give more threads than expected, never less. */
694 if (max_cs_threads > devinfo->max_cs_threads)
695 devinfo->max_cs_threads = max_cs_threads;
696 }
697
698 /* Maximum number of scalar compute shader invocations that can be run in
699 * parallel in the same subslice assuming SIMD32 dispatch.
700 *
701 * We don't advertise more than 64 threads, because we are limited to 64 by
702 * our usage of thread_width_max in the gpgpu walker command. This only
703 * currently impacts Haswell, which otherwise might be able to advertise 70
704 * threads. With SIMD32 and 64 threads, Haswell still provides twice the
705 * required the number of invocation needed for ARB_compute_shader.
706 */
707 const unsigned max_threads = MIN2(64, devinfo->max_cs_threads);
708 const uint32_t max_invocations = 32 * max_threads;
709 ctx->Const.MaxComputeWorkGroupSize[0] = max_invocations;
710 ctx->Const.MaxComputeWorkGroupSize[1] = max_invocations;
711 ctx->Const.MaxComputeWorkGroupSize[2] = max_invocations;
712 ctx->Const.MaxComputeWorkGroupInvocations = max_invocations;
713 ctx->Const.MaxComputeSharedMemorySize = 64 * 1024;
714 }
715
716 /**
717 * Process driconf (drirc) options, setting appropriate context flags.
718 *
719 * intelInitExtensions still pokes at optionCache directly, in order to
720 * avoid advertising various extensions. No flags are set, so it makes
721 * sense to continue doing that there.
722 */
723 static void
724 brw_process_driconf_options(struct brw_context *brw)
725 {
726 struct gl_context *ctx = &brw->ctx;
727
728 driOptionCache *options = &brw->optionCache;
729 driParseConfigFiles(options, &brw->screen->optionCache,
730 brw->driContext->driScreenPriv->myNum, "i965");
731
732 int bo_reuse_mode = driQueryOptioni(options, "bo_reuse");
733 switch (bo_reuse_mode) {
734 case DRI_CONF_BO_REUSE_DISABLED:
735 break;
736 case DRI_CONF_BO_REUSE_ALL:
737 brw_bufmgr_enable_reuse(brw->bufmgr);
738 break;
739 }
740
741 if (INTEL_DEBUG & DEBUG_NO_HIZ) {
742 brw->has_hiz = false;
743 /* On gen6, you can only do separate stencil with HIZ. */
744 if (brw->gen == 6)
745 brw->has_separate_stencil = false;
746 }
747
748 if (driQueryOptionb(options, "always_flush_batch")) {
749 fprintf(stderr, "flushing batchbuffer before/after each draw call\n");
750 brw->always_flush_batch = true;
751 }
752
753 if (driQueryOptionb(options, "always_flush_cache")) {
754 fprintf(stderr, "flushing GPU caches before/after each draw call\n");
755 brw->always_flush_cache = true;
756 }
757
758 if (driQueryOptionb(options, "disable_throttling")) {
759 fprintf(stderr, "disabling flush throttling\n");
760 brw->disable_throttling = true;
761 }
762
763 brw->precompile = driQueryOptionb(&brw->optionCache, "shader_precompile");
764
765 if (driQueryOptionb(&brw->optionCache, "precise_trig"))
766 brw->screen->compiler->precise_trig = true;
767
768 ctx->Const.ForceGLSLExtensionsWarn =
769 driQueryOptionb(options, "force_glsl_extensions_warn");
770
771 ctx->Const.ForceGLSLVersion =
772 driQueryOptioni(options, "force_glsl_version");
773
774 ctx->Const.DisableGLSLLineContinuations =
775 driQueryOptionb(options, "disable_glsl_line_continuations");
776
777 ctx->Const.AllowGLSLExtensionDirectiveMidShader =
778 driQueryOptionb(options, "allow_glsl_extension_directive_midshader");
779
780 ctx->Const.AllowGLSLBuiltinVariableRedeclaration =
781 driQueryOptionb(options, "allow_glsl_builtin_variable_redeclaration");
782
783 ctx->Const.AllowHigherCompatVersion =
784 driQueryOptionb(options, "allow_higher_compat_version");
785
786 ctx->Const.ForceGLSLAbsSqrt =
787 driQueryOptionb(options, "force_glsl_abs_sqrt");
788
789 ctx->Const.GLSLZeroInit = driQueryOptionb(options, "glsl_zero_init");
790
791 brw->dual_color_blend_by_location =
792 driQueryOptionb(options, "dual_color_blend_by_location");
793 }
794
795 GLboolean
796 brwCreateContext(gl_api api,
797 const struct gl_config *mesaVis,
798 __DRIcontext *driContextPriv,
799 unsigned major_version,
800 unsigned minor_version,
801 uint32_t flags,
802 bool notify_reset,
803 unsigned *dri_ctx_error,
804 void *sharedContextPrivate)
805 {
806 struct gl_context *shareCtx = (struct gl_context *) sharedContextPrivate;
807 struct intel_screen *screen = driContextPriv->driScreenPriv->driverPrivate;
808 const struct gen_device_info *devinfo = &screen->devinfo;
809 struct dd_function_table functions;
810
811 /* Only allow the __DRI_CTX_FLAG_ROBUST_BUFFER_ACCESS flag if the kernel
812 * provides us with context reset notifications.
813 */
814 uint32_t allowed_flags = __DRI_CTX_FLAG_DEBUG |
815 __DRI_CTX_FLAG_FORWARD_COMPATIBLE |
816 __DRI_CTX_FLAG_NO_ERROR;
817
818 if (screen->has_context_reset_notification)
819 allowed_flags |= __DRI_CTX_FLAG_ROBUST_BUFFER_ACCESS;
820
821 if (flags & ~allowed_flags) {
822 *dri_ctx_error = __DRI_CTX_ERROR_UNKNOWN_FLAG;
823 return false;
824 }
825
826 struct brw_context *brw = rzalloc(NULL, struct brw_context);
827 if (!brw) {
828 fprintf(stderr, "%s: failed to alloc context\n", __func__);
829 *dri_ctx_error = __DRI_CTX_ERROR_NO_MEMORY;
830 return false;
831 }
832
833 driContextPriv->driverPrivate = brw;
834 brw->driContext = driContextPriv;
835 brw->screen = screen;
836 brw->bufmgr = screen->bufmgr;
837
838 brw->gen = devinfo->gen;
839 brw->gt = devinfo->gt;
840 brw->is_g4x = devinfo->is_g4x;
841 brw->is_baytrail = devinfo->is_baytrail;
842 brw->is_haswell = devinfo->is_haswell;
843 brw->is_cherryview = devinfo->is_cherryview;
844 brw->is_broxton = devinfo->is_broxton || devinfo->is_geminilake;
845 brw->has_llc = devinfo->has_llc;
846 brw->has_hiz = devinfo->has_hiz_and_separate_stencil;
847 brw->has_separate_stencil = devinfo->has_hiz_and_separate_stencil;
848 brw->has_pln = devinfo->has_pln;
849 brw->has_compr4 = devinfo->has_compr4;
850 brw->has_surface_tile_offset = devinfo->has_surface_tile_offset;
851 brw->has_negative_rhw_bug = devinfo->has_negative_rhw_bug;
852 brw->needs_unlit_centroid_workaround =
853 devinfo->needs_unlit_centroid_workaround;
854
855 brw->must_use_separate_stencil = devinfo->must_use_separate_stencil;
856 brw->has_swizzling = screen->hw_has_swizzling;
857
858 brw->isl_dev = screen->isl_dev;
859
860 brw->vs.base.stage = MESA_SHADER_VERTEX;
861 brw->tcs.base.stage = MESA_SHADER_TESS_CTRL;
862 brw->tes.base.stage = MESA_SHADER_TESS_EVAL;
863 brw->gs.base.stage = MESA_SHADER_GEOMETRY;
864 brw->wm.base.stage = MESA_SHADER_FRAGMENT;
865 if (brw->gen >= 8) {
866 gen8_init_vtable_surface_functions(brw);
867 brw->vtbl.emit_depth_stencil_hiz = gen8_emit_depth_stencil_hiz;
868 } else if (brw->gen >= 7) {
869 gen7_init_vtable_surface_functions(brw);
870 brw->vtbl.emit_depth_stencil_hiz = gen7_emit_depth_stencil_hiz;
871 } else if (brw->gen >= 6) {
872 gen6_init_vtable_surface_functions(brw);
873 brw->vtbl.emit_depth_stencil_hiz = gen6_emit_depth_stencil_hiz;
874 } else {
875 gen4_init_vtable_surface_functions(brw);
876 brw->vtbl.emit_depth_stencil_hiz = brw_emit_depth_stencil_hiz;
877 }
878
879 brw_init_driver_functions(brw, &functions);
880
881 if (notify_reset)
882 functions.GetGraphicsResetStatus = brw_get_graphics_reset_status;
883
884 struct gl_context *ctx = &brw->ctx;
885
886 if (!_mesa_initialize_context(ctx, api, mesaVis, shareCtx, &functions)) {
887 *dri_ctx_error = __DRI_CTX_ERROR_NO_MEMORY;
888 fprintf(stderr, "%s: failed to init mesa context\n", __func__);
889 intelDestroyContext(driContextPriv);
890 return false;
891 }
892
893 driContextSetFlags(ctx, flags);
894
895 /* Initialize the software rasterizer and helper modules.
896 *
897 * As of GL 3.1 core, the gen4+ driver doesn't need the swrast context for
898 * software fallbacks (which we have to support on legacy GL to do weird
899 * glDrawPixels(), glBitmap(), and other functions).
900 */
901 if (api != API_OPENGL_CORE && api != API_OPENGLES2) {
902 _swrast_CreateContext(ctx);
903 }
904
905 _vbo_CreateContext(ctx);
906 if (ctx->swrast_context) {
907 _tnl_CreateContext(ctx);
908 TNL_CONTEXT(ctx)->Driver.RunPipeline = _tnl_run_pipeline;
909 _swsetup_CreateContext(ctx);
910
911 /* Configure swrast to match hardware characteristics: */
912 _swrast_allow_pixel_fog(ctx, false);
913 _swrast_allow_vertex_fog(ctx, true);
914 }
915
916 _mesa_meta_init(ctx);
917
918 brw_process_driconf_options(brw);
919
920 if (INTEL_DEBUG & DEBUG_PERF)
921 brw->perf_debug = true;
922
923 brw_initialize_cs_context_constants(brw);
924 brw_initialize_context_constants(brw);
925
926 ctx->Const.ResetStrategy = notify_reset
927 ? GL_LOSE_CONTEXT_ON_RESET_ARB : GL_NO_RESET_NOTIFICATION_ARB;
928
929 /* Reinitialize the context point state. It depends on ctx->Const values. */
930 _mesa_init_point(ctx);
931
932 intel_fbo_init(brw);
933
934 intel_batchbuffer_init(&brw->batch, brw->bufmgr, brw->has_llc);
935
936 if (brw->gen >= 6) {
937 /* Create a new hardware context. Using a hardware context means that
938 * our GPU state will be saved/restored on context switch, allowing us
939 * to assume that the GPU is in the same state we left it in.
940 *
941 * This is required for transform feedback buffer offsets, query objects,
942 * and also allows us to reduce how much state we have to emit.
943 */
944 brw->hw_ctx = brw_create_hw_context(brw->bufmgr);
945
946 if (!brw->hw_ctx) {
947 fprintf(stderr, "Failed to create hardware context.\n");
948 intelDestroyContext(driContextPriv);
949 return false;
950 }
951 }
952
953 if (brw_init_pipe_control(brw, devinfo)) {
954 *dri_ctx_error = __DRI_CTX_ERROR_NO_MEMORY;
955 intelDestroyContext(driContextPriv);
956 return false;
957 }
958
959 brw_init_state(brw);
960
961 intelInitExtensions(ctx);
962
963 brw_init_surface_formats(brw);
964
965 brw_blorp_init(brw);
966
967 brw->urb.size = devinfo->urb.size;
968
969 if (brw->gen == 6)
970 brw->urb.gs_present = false;
971
972 brw->prim_restart.in_progress = false;
973 brw->prim_restart.enable_cut_index = false;
974 brw->gs.enabled = false;
975 brw->clip.viewport_count = 1;
976
977 brw->predicate.state = BRW_PREDICATE_STATE_RENDER;
978
979 brw->max_gtt_map_object_size = screen->max_gtt_map_object_size;
980
981 ctx->VertexProgram._MaintainTnlProgram = true;
982 ctx->FragmentProgram._MaintainTexEnvProgram = true;
983
984 brw_draw_init( brw );
985
986 if ((flags & __DRI_CTX_FLAG_DEBUG) != 0) {
987 /* Turn on some extra GL_ARB_debug_output generation. */
988 brw->perf_debug = true;
989 }
990
991 if ((flags & __DRI_CTX_FLAG_ROBUST_BUFFER_ACCESS) != 0) {
992 ctx->Const.ContextFlags |= GL_CONTEXT_FLAG_ROBUST_ACCESS_BIT_ARB;
993 ctx->Const.RobustAccess = GL_TRUE;
994 }
995
996 if (INTEL_DEBUG & DEBUG_SHADER_TIME)
997 brw_init_shader_time(brw);
998
999 _mesa_compute_version(ctx);
1000
1001 _mesa_initialize_dispatch_tables(ctx);
1002 _mesa_initialize_vbo_vtxfmt(ctx);
1003
1004 if (ctx->Extensions.INTEL_performance_query)
1005 brw_init_performance_queries(brw);
1006
1007 vbo_use_buffer_objects(ctx);
1008 vbo_always_unmap_buffers(ctx);
1009
1010 return true;
1011 }
1012
1013 void
1014 intelDestroyContext(__DRIcontext * driContextPriv)
1015 {
1016 struct brw_context *brw =
1017 (struct brw_context *) driContextPriv->driverPrivate;
1018 struct gl_context *ctx = &brw->ctx;
1019
1020 _mesa_meta_free(&brw->ctx);
1021
1022 if (INTEL_DEBUG & DEBUG_SHADER_TIME) {
1023 /* Force a report. */
1024 brw->shader_time.report_time = 0;
1025
1026 brw_collect_and_report_shader_time(brw);
1027 brw_destroy_shader_time(brw);
1028 }
1029
1030 if (brw->gen >= 6)
1031 blorp_finish(&brw->blorp);
1032
1033 brw_destroy_state(brw);
1034 brw_draw_destroy(brw);
1035
1036 brw_bo_unreference(brw->curbe.curbe_bo);
1037 if (brw->vs.base.scratch_bo)
1038 brw_bo_unreference(brw->vs.base.scratch_bo);
1039 if (brw->tcs.base.scratch_bo)
1040 brw_bo_unreference(brw->tcs.base.scratch_bo);
1041 if (brw->tes.base.scratch_bo)
1042 brw_bo_unreference(brw->tes.base.scratch_bo);
1043 if (brw->gs.base.scratch_bo)
1044 brw_bo_unreference(brw->gs.base.scratch_bo);
1045 if (brw->wm.base.scratch_bo)
1046 brw_bo_unreference(brw->wm.base.scratch_bo);
1047
1048 brw_destroy_hw_context(brw->bufmgr, brw->hw_ctx);
1049
1050 if (ctx->swrast_context) {
1051 _swsetup_DestroyContext(&brw->ctx);
1052 _tnl_DestroyContext(&brw->ctx);
1053 }
1054 _vbo_DestroyContext(&brw->ctx);
1055
1056 if (ctx->swrast_context)
1057 _swrast_DestroyContext(&brw->ctx);
1058
1059 brw_fini_pipe_control(brw);
1060 intel_batchbuffer_free(&brw->batch);
1061
1062 brw_bo_unreference(brw->throttle_batch[1]);
1063 brw_bo_unreference(brw->throttle_batch[0]);
1064 brw->throttle_batch[1] = NULL;
1065 brw->throttle_batch[0] = NULL;
1066
1067 driDestroyOptionCache(&brw->optionCache);
1068
1069 /* free the Mesa context */
1070 _mesa_free_context_data(&brw->ctx);
1071
1072 ralloc_free(brw);
1073 driContextPriv->driverPrivate = NULL;
1074 }
1075
1076 GLboolean
1077 intelUnbindContext(__DRIcontext * driContextPriv)
1078 {
1079 /* Unset current context and dispath table */
1080 _mesa_make_current(NULL, NULL, NULL);
1081
1082 return true;
1083 }
1084
1085 /**
1086 * Fixes up the context for GLES23 with our default-to-sRGB-capable behavior
1087 * on window system framebuffers.
1088 *
1089 * Desktop GL is fairly reasonable in its handling of sRGB: You can ask if
1090 * your renderbuffer can do sRGB encode, and you can flip a switch that does
1091 * sRGB encode if the renderbuffer can handle it. You can ask specifically
1092 * for a visual where you're guaranteed to be capable, but it turns out that
1093 * everyone just makes all their ARGB8888 visuals capable and doesn't offer
1094 * incapable ones, because there's no difference between the two in resources
1095 * used. Applications thus get built that accidentally rely on the default
1096 * visual choice being sRGB, so we make ours sRGB capable. Everything sounds
1097 * great...
1098 *
1099 * But for GLES2/3, they decided that it was silly to not turn on sRGB encode
1100 * for sRGB renderbuffers you made with the GL_EXT_texture_sRGB equivalent.
1101 * So they removed the enable knob and made it "if the renderbuffer is sRGB
1102 * capable, do sRGB encode". Then, for your window system renderbuffers, you
1103 * can ask for sRGB visuals and get sRGB encode, or not ask for sRGB visuals
1104 * and get no sRGB encode (assuming that both kinds of visual are available).
1105 * Thus our choice to support sRGB by default on our visuals for desktop would
1106 * result in broken rendering of GLES apps that aren't expecting sRGB encode.
1107 *
1108 * Unfortunately, renderbuffer setup happens before a context is created. So
1109 * in intel_screen.c we always set up sRGB, and here, if you're a GLES2/3
1110 * context (without an sRGB visual, though we don't have sRGB visuals exposed
1111 * yet), we go turn that back off before anyone finds out.
1112 */
1113 static void
1114 intel_gles3_srgb_workaround(struct brw_context *brw,
1115 struct gl_framebuffer *fb)
1116 {
1117 struct gl_context *ctx = &brw->ctx;
1118
1119 if (_mesa_is_desktop_gl(ctx) || !fb->Visual.sRGBCapable)
1120 return;
1121
1122 /* Some day when we support the sRGB capable bit on visuals available for
1123 * GLES, we'll need to respect that and not disable things here.
1124 */
1125 fb->Visual.sRGBCapable = false;
1126 for (int i = 0; i < BUFFER_COUNT; i++) {
1127 struct gl_renderbuffer *rb = fb->Attachment[i].Renderbuffer;
1128 if (rb)
1129 rb->Format = _mesa_get_srgb_format_linear(rb->Format);
1130 }
1131 }
1132
1133 GLboolean
1134 intelMakeCurrent(__DRIcontext * driContextPriv,
1135 __DRIdrawable * driDrawPriv,
1136 __DRIdrawable * driReadPriv)
1137 {
1138 struct brw_context *brw;
1139 GET_CURRENT_CONTEXT(curCtx);
1140
1141 if (driContextPriv)
1142 brw = (struct brw_context *) driContextPriv->driverPrivate;
1143 else
1144 brw = NULL;
1145
1146 /* According to the glXMakeCurrent() man page: "Pending commands to
1147 * the previous context, if any, are flushed before it is released."
1148 * But only flush if we're actually changing contexts.
1149 */
1150 if (brw_context(curCtx) && brw_context(curCtx) != brw) {
1151 _mesa_flush(curCtx);
1152 }
1153
1154 if (driContextPriv) {
1155 struct gl_context *ctx = &brw->ctx;
1156 struct gl_framebuffer *fb, *readFb;
1157
1158 if (driDrawPriv == NULL) {
1159 fb = _mesa_get_incomplete_framebuffer();
1160 } else {
1161 fb = driDrawPriv->driverPrivate;
1162 driContextPriv->dri2.draw_stamp = driDrawPriv->dri2.stamp - 1;
1163 }
1164
1165 if (driReadPriv == NULL) {
1166 readFb = _mesa_get_incomplete_framebuffer();
1167 } else {
1168 readFb = driReadPriv->driverPrivate;
1169 driContextPriv->dri2.read_stamp = driReadPriv->dri2.stamp - 1;
1170 }
1171
1172 /* The sRGB workaround changes the renderbuffer's format. We must change
1173 * the format before the renderbuffer's miptree get's allocated, otherwise
1174 * the formats of the renderbuffer and its miptree will differ.
1175 */
1176 intel_gles3_srgb_workaround(brw, fb);
1177 intel_gles3_srgb_workaround(brw, readFb);
1178
1179 /* If the context viewport hasn't been initialized, force a call out to
1180 * the loader to get buffers so we have a drawable size for the initial
1181 * viewport. */
1182 if (!brw->ctx.ViewportInitialized)
1183 intel_prepare_render(brw);
1184
1185 _mesa_make_current(ctx, fb, readFb);
1186 } else {
1187 _mesa_make_current(NULL, NULL, NULL);
1188 }
1189
1190 return true;
1191 }
1192
1193 void
1194 intel_resolve_for_dri2_flush(struct brw_context *brw,
1195 __DRIdrawable *drawable)
1196 {
1197 if (brw->gen < 6) {
1198 /* MSAA and fast color clear are not supported, so don't waste time
1199 * checking whether a resolve is needed.
1200 */
1201 return;
1202 }
1203
1204 struct gl_framebuffer *fb = drawable->driverPrivate;
1205 struct intel_renderbuffer *rb;
1206
1207 /* Usually, only the back buffer will need to be downsampled. However,
1208 * the front buffer will also need it if the user has rendered into it.
1209 */
1210 static const gl_buffer_index buffers[2] = {
1211 BUFFER_BACK_LEFT,
1212 BUFFER_FRONT_LEFT,
1213 };
1214
1215 for (int i = 0; i < 2; ++i) {
1216 rb = intel_get_renderbuffer(fb, buffers[i]);
1217 if (rb == NULL || rb->mt == NULL)
1218 continue;
1219 if (rb->mt->surf.samples == 1) {
1220 assert(rb->mt_layer == 0 && rb->mt_level == 0 &&
1221 rb->layer_count == 1);
1222 intel_miptree_prepare_access(brw, rb->mt, 0, 1, 0, 1, false, false);
1223 } else {
1224 intel_renderbuffer_downsample(brw, rb);
1225 }
1226 }
1227 }
1228
1229 static unsigned
1230 intel_bits_per_pixel(const struct intel_renderbuffer *rb)
1231 {
1232 return _mesa_get_format_bytes(intel_rb_format(rb)) * 8;
1233 }
1234
1235 static void
1236 intel_query_dri2_buffers(struct brw_context *brw,
1237 __DRIdrawable *drawable,
1238 __DRIbuffer **buffers,
1239 int *count);
1240
1241 static void
1242 intel_process_dri2_buffer(struct brw_context *brw,
1243 __DRIdrawable *drawable,
1244 __DRIbuffer *buffer,
1245 struct intel_renderbuffer *rb,
1246 const char *buffer_name);
1247
1248 static void
1249 intel_update_image_buffers(struct brw_context *brw, __DRIdrawable *drawable);
1250
1251 static void
1252 intel_update_dri2_buffers(struct brw_context *brw, __DRIdrawable *drawable)
1253 {
1254 struct gl_framebuffer *fb = drawable->driverPrivate;
1255 struct intel_renderbuffer *rb;
1256 __DRIbuffer *buffers = NULL;
1257 int count;
1258 const char *region_name;
1259
1260 /* Set this up front, so that in case our buffers get invalidated
1261 * while we're getting new buffers, we don't clobber the stamp and
1262 * thus ignore the invalidate. */
1263 drawable->lastStamp = drawable->dri2.stamp;
1264
1265 if (unlikely(INTEL_DEBUG & DEBUG_DRI))
1266 fprintf(stderr, "enter %s, drawable %p\n", __func__, drawable);
1267
1268 intel_query_dri2_buffers(brw, drawable, &buffers, &count);
1269
1270 if (buffers == NULL)
1271 return;
1272
1273 for (int i = 0; i < count; i++) {
1274 switch (buffers[i].attachment) {
1275 case __DRI_BUFFER_FRONT_LEFT:
1276 rb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT);
1277 region_name = "dri2 front buffer";
1278 break;
1279
1280 case __DRI_BUFFER_FAKE_FRONT_LEFT:
1281 rb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT);
1282 region_name = "dri2 fake front buffer";
1283 break;
1284
1285 case __DRI_BUFFER_BACK_LEFT:
1286 rb = intel_get_renderbuffer(fb, BUFFER_BACK_LEFT);
1287 region_name = "dri2 back buffer";
1288 break;
1289
1290 case __DRI_BUFFER_DEPTH:
1291 case __DRI_BUFFER_HIZ:
1292 case __DRI_BUFFER_DEPTH_STENCIL:
1293 case __DRI_BUFFER_STENCIL:
1294 case __DRI_BUFFER_ACCUM:
1295 default:
1296 fprintf(stderr,
1297 "unhandled buffer attach event, attachment type %d\n",
1298 buffers[i].attachment);
1299 return;
1300 }
1301
1302 intel_process_dri2_buffer(brw, drawable, &buffers[i], rb, region_name);
1303 }
1304
1305 }
1306
1307 void
1308 intel_update_renderbuffers(__DRIcontext *context, __DRIdrawable *drawable)
1309 {
1310 struct brw_context *brw = context->driverPrivate;
1311 __DRIscreen *dri_screen = brw->screen->driScrnPriv;
1312
1313 /* Set this up front, so that in case our buffers get invalidated
1314 * while we're getting new buffers, we don't clobber the stamp and
1315 * thus ignore the invalidate. */
1316 drawable->lastStamp = drawable->dri2.stamp;
1317
1318 if (unlikely(INTEL_DEBUG & DEBUG_DRI))
1319 fprintf(stderr, "enter %s, drawable %p\n", __func__, drawable);
1320
1321 if (dri_screen->image.loader)
1322 intel_update_image_buffers(brw, drawable);
1323 else
1324 intel_update_dri2_buffers(brw, drawable);
1325
1326 driUpdateFramebufferSize(&brw->ctx, drawable);
1327 }
1328
1329 /**
1330 * intel_prepare_render should be called anywhere that curent read/drawbuffer
1331 * state is required.
1332 */
1333 void
1334 intel_prepare_render(struct brw_context *brw)
1335 {
1336 struct gl_context *ctx = &brw->ctx;
1337 __DRIcontext *driContext = brw->driContext;
1338 __DRIdrawable *drawable;
1339
1340 drawable = driContext->driDrawablePriv;
1341 if (drawable && drawable->dri2.stamp != driContext->dri2.draw_stamp) {
1342 if (drawable->lastStamp != drawable->dri2.stamp)
1343 intel_update_renderbuffers(driContext, drawable);
1344 driContext->dri2.draw_stamp = drawable->dri2.stamp;
1345 }
1346
1347 drawable = driContext->driReadablePriv;
1348 if (drawable && drawable->dri2.stamp != driContext->dri2.read_stamp) {
1349 if (drawable->lastStamp != drawable->dri2.stamp)
1350 intel_update_renderbuffers(driContext, drawable);
1351 driContext->dri2.read_stamp = drawable->dri2.stamp;
1352 }
1353
1354 /* If we're currently rendering to the front buffer, the rendering
1355 * that will happen next will probably dirty the front buffer. So
1356 * mark it as dirty here.
1357 */
1358 if (_mesa_is_front_buffer_drawing(ctx->DrawBuffer))
1359 brw->front_buffer_dirty = true;
1360 }
1361
1362 /**
1363 * \brief Query DRI2 to obtain a DRIdrawable's buffers.
1364 *
1365 * To determine which DRI buffers to request, examine the renderbuffers
1366 * attached to the drawable's framebuffer. Then request the buffers with
1367 * DRI2GetBuffers() or DRI2GetBuffersWithFormat().
1368 *
1369 * This is called from intel_update_renderbuffers().
1370 *
1371 * \param drawable Drawable whose buffers are queried.
1372 * \param buffers [out] List of buffers returned by DRI2 query.
1373 * \param buffer_count [out] Number of buffers returned.
1374 *
1375 * \see intel_update_renderbuffers()
1376 * \see DRI2GetBuffers()
1377 * \see DRI2GetBuffersWithFormat()
1378 */
1379 static void
1380 intel_query_dri2_buffers(struct brw_context *brw,
1381 __DRIdrawable *drawable,
1382 __DRIbuffer **buffers,
1383 int *buffer_count)
1384 {
1385 __DRIscreen *dri_screen = brw->screen->driScrnPriv;
1386 struct gl_framebuffer *fb = drawable->driverPrivate;
1387 int i = 0;
1388 unsigned attachments[8];
1389
1390 struct intel_renderbuffer *front_rb;
1391 struct intel_renderbuffer *back_rb;
1392
1393 front_rb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT);
1394 back_rb = intel_get_renderbuffer(fb, BUFFER_BACK_LEFT);
1395
1396 memset(attachments, 0, sizeof(attachments));
1397 if ((_mesa_is_front_buffer_drawing(fb) ||
1398 _mesa_is_front_buffer_reading(fb) ||
1399 !back_rb) && front_rb) {
1400 /* If a fake front buffer is in use, then querying for
1401 * __DRI_BUFFER_FRONT_LEFT will cause the server to copy the image from
1402 * the real front buffer to the fake front buffer. So before doing the
1403 * query, we need to make sure all the pending drawing has landed in the
1404 * real front buffer.
1405 */
1406 intel_batchbuffer_flush(brw);
1407 intel_flush_front(&brw->ctx);
1408
1409 attachments[i++] = __DRI_BUFFER_FRONT_LEFT;
1410 attachments[i++] = intel_bits_per_pixel(front_rb);
1411 } else if (front_rb && brw->front_buffer_dirty) {
1412 /* We have pending front buffer rendering, but we aren't querying for a
1413 * front buffer. If the front buffer we have is a fake front buffer,
1414 * the X server is going to throw it away when it processes the query.
1415 * So before doing the query, make sure all the pending drawing has
1416 * landed in the real front buffer.
1417 */
1418 intel_batchbuffer_flush(brw);
1419 intel_flush_front(&brw->ctx);
1420 }
1421
1422 if (back_rb) {
1423 attachments[i++] = __DRI_BUFFER_BACK_LEFT;
1424 attachments[i++] = intel_bits_per_pixel(back_rb);
1425 }
1426
1427 assert(i <= ARRAY_SIZE(attachments));
1428
1429 *buffers =
1430 dri_screen->dri2.loader->getBuffersWithFormat(drawable,
1431 &drawable->w,
1432 &drawable->h,
1433 attachments, i / 2,
1434 buffer_count,
1435 drawable->loaderPrivate);
1436 }
1437
1438 /**
1439 * \brief Assign a DRI buffer's DRM region to a renderbuffer.
1440 *
1441 * This is called from intel_update_renderbuffers().
1442 *
1443 * \par Note:
1444 * DRI buffers whose attachment point is DRI2BufferStencil or
1445 * DRI2BufferDepthStencil are handled as special cases.
1446 *
1447 * \param buffer_name is a human readable name, such as "dri2 front buffer",
1448 * that is passed to brw_bo_gem_create_from_name().
1449 *
1450 * \see intel_update_renderbuffers()
1451 */
1452 static void
1453 intel_process_dri2_buffer(struct brw_context *brw,
1454 __DRIdrawable *drawable,
1455 __DRIbuffer *buffer,
1456 struct intel_renderbuffer *rb,
1457 const char *buffer_name)
1458 {
1459 struct gl_framebuffer *fb = drawable->driverPrivate;
1460 struct brw_bo *bo;
1461
1462 if (!rb)
1463 return;
1464
1465 unsigned num_samples = rb->Base.Base.NumSamples;
1466
1467 /* We try to avoid closing and reopening the same BO name, because the first
1468 * use of a mapping of the buffer involves a bunch of page faulting which is
1469 * moderately expensive.
1470 */
1471 struct intel_mipmap_tree *last_mt;
1472 if (num_samples == 0)
1473 last_mt = rb->mt;
1474 else
1475 last_mt = rb->singlesample_mt;
1476
1477 uint32_t old_name = 0;
1478 if (last_mt) {
1479 /* The bo already has a name because the miptree was created by a
1480 * previous call to intel_process_dri2_buffer(). If a bo already has a
1481 * name, then brw_bo_flink() is a low-cost getter. It does not
1482 * create a new name.
1483 */
1484 brw_bo_flink(last_mt->bo, &old_name);
1485 }
1486
1487 if (old_name == buffer->name)
1488 return;
1489
1490 if (unlikely(INTEL_DEBUG & DEBUG_DRI)) {
1491 fprintf(stderr,
1492 "attaching buffer %d, at %d, cpp %d, pitch %d\n",
1493 buffer->name, buffer->attachment,
1494 buffer->cpp, buffer->pitch);
1495 }
1496
1497 bo = brw_bo_gem_create_from_name(brw->bufmgr, buffer_name,
1498 buffer->name);
1499 if (!bo) {
1500 fprintf(stderr,
1501 "Failed to open BO for returned DRI2 buffer "
1502 "(%dx%d, %s, named %d).\n"
1503 "This is likely a bug in the X Server that will lead to a "
1504 "crash soon.\n",
1505 drawable->w, drawable->h, buffer_name, buffer->name);
1506 return;
1507 }
1508
1509 struct intel_mipmap_tree *mt =
1510 intel_miptree_create_for_bo(brw,
1511 bo,
1512 intel_rb_format(rb),
1513 0,
1514 drawable->w,
1515 drawable->h,
1516 1,
1517 buffer->pitch,
1518 MIPTREE_LAYOUT_FOR_SCANOUT);
1519 if (!mt) {
1520 brw_bo_unreference(bo);
1521 return;
1522 }
1523
1524 if (!intel_update_winsys_renderbuffer_miptree(brw, rb, mt,
1525 drawable->w, drawable->h,
1526 buffer->pitch)) {
1527 brw_bo_unreference(bo);
1528 intel_miptree_release(&mt);
1529 return;
1530 }
1531
1532 if (_mesa_is_front_buffer_drawing(fb) &&
1533 (buffer->attachment == __DRI_BUFFER_FRONT_LEFT ||
1534 buffer->attachment == __DRI_BUFFER_FAKE_FRONT_LEFT) &&
1535 rb->Base.Base.NumSamples > 1) {
1536 intel_renderbuffer_upsample(brw, rb);
1537 }
1538
1539 assert(rb->mt);
1540
1541 brw_bo_unreference(bo);
1542 }
1543
1544 /**
1545 * \brief Query DRI image loader to obtain a DRIdrawable's buffers.
1546 *
1547 * To determine which DRI buffers to request, examine the renderbuffers
1548 * attached to the drawable's framebuffer. Then request the buffers from
1549 * the image loader
1550 *
1551 * This is called from intel_update_renderbuffers().
1552 *
1553 * \param drawable Drawable whose buffers are queried.
1554 * \param buffers [out] List of buffers returned by DRI2 query.
1555 * \param buffer_count [out] Number of buffers returned.
1556 *
1557 * \see intel_update_renderbuffers()
1558 */
1559
1560 static void
1561 intel_update_image_buffer(struct brw_context *intel,
1562 __DRIdrawable *drawable,
1563 struct intel_renderbuffer *rb,
1564 __DRIimage *buffer,
1565 enum __DRIimageBufferMask buffer_type)
1566 {
1567 struct gl_framebuffer *fb = drawable->driverPrivate;
1568
1569 if (!rb || !buffer->bo)
1570 return;
1571
1572 unsigned num_samples = rb->Base.Base.NumSamples;
1573
1574 /* Check and see if we're already bound to the right
1575 * buffer object
1576 */
1577 struct intel_mipmap_tree *last_mt;
1578 if (num_samples == 0)
1579 last_mt = rb->mt;
1580 else
1581 last_mt = rb->singlesample_mt;
1582
1583 if (last_mt && last_mt->bo == buffer->bo)
1584 return;
1585
1586 enum isl_colorspace colorspace;
1587 switch (_mesa_get_format_color_encoding(intel_rb_format(rb))) {
1588 case GL_SRGB:
1589 colorspace = ISL_COLORSPACE_SRGB;
1590 break;
1591 case GL_LINEAR:
1592 colorspace = ISL_COLORSPACE_LINEAR;
1593 break;
1594 default:
1595 unreachable("Invalid color encoding");
1596 }
1597
1598 struct intel_mipmap_tree *mt =
1599 intel_miptree_create_for_dri_image(intel, buffer, GL_TEXTURE_2D,
1600 colorspace, true);
1601 if (!mt)
1602 return;
1603
1604 if (!intel_update_winsys_renderbuffer_miptree(intel, rb, mt,
1605 buffer->width, buffer->height,
1606 buffer->pitch)) {
1607 intel_miptree_release(&mt);
1608 return;
1609 }
1610
1611 if (_mesa_is_front_buffer_drawing(fb) &&
1612 buffer_type == __DRI_IMAGE_BUFFER_FRONT &&
1613 rb->Base.Base.NumSamples > 1) {
1614 intel_renderbuffer_upsample(intel, rb);
1615 }
1616 }
1617
1618 static void
1619 intel_update_image_buffers(struct brw_context *brw, __DRIdrawable *drawable)
1620 {
1621 struct gl_framebuffer *fb = drawable->driverPrivate;
1622 __DRIscreen *dri_screen = brw->screen->driScrnPriv;
1623 struct intel_renderbuffer *front_rb;
1624 struct intel_renderbuffer *back_rb;
1625 struct __DRIimageList images;
1626 mesa_format format;
1627 uint32_t buffer_mask = 0;
1628 int ret;
1629
1630 front_rb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT);
1631 back_rb = intel_get_renderbuffer(fb, BUFFER_BACK_LEFT);
1632
1633 if (back_rb)
1634 format = intel_rb_format(back_rb);
1635 else if (front_rb)
1636 format = intel_rb_format(front_rb);
1637 else
1638 return;
1639
1640 if (front_rb && (_mesa_is_front_buffer_drawing(fb) ||
1641 _mesa_is_front_buffer_reading(fb) || !back_rb)) {
1642 buffer_mask |= __DRI_IMAGE_BUFFER_FRONT;
1643 }
1644
1645 if (back_rb)
1646 buffer_mask |= __DRI_IMAGE_BUFFER_BACK;
1647
1648 ret = dri_screen->image.loader->getBuffers(drawable,
1649 driGLFormatToImageFormat(format),
1650 &drawable->dri2.stamp,
1651 drawable->loaderPrivate,
1652 buffer_mask,
1653 &images);
1654 if (!ret)
1655 return;
1656
1657 if (images.image_mask & __DRI_IMAGE_BUFFER_FRONT) {
1658 drawable->w = images.front->width;
1659 drawable->h = images.front->height;
1660 intel_update_image_buffer(brw,
1661 drawable,
1662 front_rb,
1663 images.front,
1664 __DRI_IMAGE_BUFFER_FRONT);
1665 }
1666
1667 if (images.image_mask & __DRI_IMAGE_BUFFER_BACK) {
1668 drawable->w = images.back->width;
1669 drawable->h = images.back->height;
1670 intel_update_image_buffer(brw,
1671 drawable,
1672 back_rb,
1673 images.back,
1674 __DRI_IMAGE_BUFFER_BACK);
1675 }
1676 }