i965: don't include compute resources in "Combined" limits
[mesa.git] / src / mesa / drivers / dri / i965 / brw_context.c
1 /*
2 Copyright 2003 VMware, Inc.
3 Copyright (C) Intel Corp. 2006. All Rights Reserved.
4 Intel funded Tungsten Graphics to
5 develop this 3D driver.
6
7 Permission is hereby granted, free of charge, to any person obtaining
8 a copy of this software and associated documentation files (the
9 "Software"), to deal in the Software without restriction, including
10 without limitation the rights to use, copy, modify, merge, publish,
11 distribute, sublicense, and/or sell copies of the Software, and to
12 permit persons to whom the Software is furnished to do so, subject to
13 the following conditions:
14
15 The above copyright notice and this permission notice (including the
16 next paragraph) shall be included in all copies or substantial
17 portions of the Software.
18
19 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
20 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
22 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
23 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
24 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
25 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26
27 **********************************************************************/
28 /*
29 * Authors:
30 * Keith Whitwell <keithw@vmware.com>
31 */
32
33
34 #include "compiler/nir/nir.h"
35 #include "main/api_exec.h"
36 #include "main/context.h"
37 #include "main/fbobject.h"
38 #include "main/extensions.h"
39 #include "main/imports.h"
40 #include "main/macros.h"
41 #include "main/points.h"
42 #include "main/version.h"
43 #include "main/vtxfmt.h"
44 #include "main/texobj.h"
45 #include "main/framebuffer.h"
46 #include "main/stencil.h"
47 #include "main/state.h"
48
49 #include "vbo/vbo.h"
50
51 #include "drivers/common/driverfuncs.h"
52 #include "drivers/common/meta.h"
53 #include "utils.h"
54
55 #include "brw_context.h"
56 #include "brw_defines.h"
57 #include "brw_blorp.h"
58 #include "brw_draw.h"
59 #include "brw_state.h"
60
61 #include "intel_batchbuffer.h"
62 #include "intel_buffer_objects.h"
63 #include "intel_buffers.h"
64 #include "intel_fbo.h"
65 #include "intel_mipmap_tree.h"
66 #include "intel_pixel.h"
67 #include "intel_image.h"
68 #include "intel_tex.h"
69 #include "intel_tex_obj.h"
70
71 #include "swrast_setup/swrast_setup.h"
72 #include "tnl/tnl.h"
73 #include "tnl/t_pipeline.h"
74 #include "util/ralloc.h"
75 #include "util/debug.h"
76 #include "util/disk_cache.h"
77 #include "isl/isl.h"
78
79 #include "common/gen_defines.h"
80
81 #include "compiler/spirv/nir_spirv.h"
82 /***************************************
83 * Mesa's Driver Functions
84 ***************************************/
85
86 const char *const brw_vendor_string = "Intel Open Source Technology Center";
87
88 static const char *
89 get_bsw_model(const struct intel_screen *screen)
90 {
91 switch (screen->eu_total) {
92 case 16:
93 return "405";
94 case 12:
95 return "400";
96 default:
97 return " ";
98 }
99 }
100
101 const char *
102 brw_get_renderer_string(const struct intel_screen *screen)
103 {
104 const char *chipset;
105 static char buffer[128];
106 char *bsw = NULL;
107
108 switch (screen->deviceID) {
109 #undef CHIPSET
110 #define CHIPSET(id, symbol, str) case id: chipset = str; break;
111 #include "pci_ids/i965_pci_ids.h"
112 default:
113 chipset = "Unknown Intel Chipset";
114 break;
115 }
116
117 /* Braswell branding is funny, so we have to fix it up here */
118 if (screen->deviceID == 0x22B1) {
119 bsw = strdup(chipset);
120 char *needle = strstr(bsw, "XXX");
121 if (needle) {
122 memcpy(needle, get_bsw_model(screen), 3);
123 chipset = bsw;
124 }
125 }
126
127 (void) driGetRendererString(buffer, chipset, 0);
128 free(bsw);
129 return buffer;
130 }
131
132 static const GLubyte *
133 intel_get_string(struct gl_context * ctx, GLenum name)
134 {
135 const struct brw_context *const brw = brw_context(ctx);
136
137 switch (name) {
138 case GL_VENDOR:
139 return (GLubyte *) brw_vendor_string;
140
141 case GL_RENDERER:
142 return
143 (GLubyte *) brw_get_renderer_string(brw->screen);
144
145 default:
146 return NULL;
147 }
148 }
149
150 static void
151 intel_viewport(struct gl_context *ctx)
152 {
153 struct brw_context *brw = brw_context(ctx);
154 __DRIcontext *driContext = brw->driContext;
155
156 if (_mesa_is_winsys_fbo(ctx->DrawBuffer)) {
157 if (driContext->driDrawablePriv)
158 dri2InvalidateDrawable(driContext->driDrawablePriv);
159 if (driContext->driReadablePriv)
160 dri2InvalidateDrawable(driContext->driReadablePriv);
161 }
162 }
163
164 static void
165 intel_update_framebuffer(struct gl_context *ctx,
166 struct gl_framebuffer *fb)
167 {
168 struct brw_context *brw = brw_context(ctx);
169
170 /* Quantize the derived default number of samples
171 */
172 fb->DefaultGeometry._NumSamples =
173 intel_quantize_num_samples(brw->screen,
174 fb->DefaultGeometry.NumSamples);
175 }
176
177 static void
178 intel_update_state(struct gl_context * ctx)
179 {
180 GLuint new_state = ctx->NewState;
181 struct brw_context *brw = brw_context(ctx);
182
183 if (ctx->swrast_context)
184 _swrast_InvalidateState(ctx, new_state);
185
186 brw->NewGLState |= new_state;
187
188 if (new_state & (_NEW_SCISSOR | _NEW_BUFFERS | _NEW_VIEWPORT))
189 _mesa_update_draw_buffer_bounds(ctx, ctx->DrawBuffer);
190
191 if (new_state & (_NEW_STENCIL | _NEW_BUFFERS)) {
192 brw->stencil_enabled = _mesa_stencil_is_enabled(ctx);
193 brw->stencil_two_sided = _mesa_stencil_is_two_sided(ctx);
194 brw->stencil_write_enabled =
195 _mesa_stencil_is_write_enabled(ctx, brw->stencil_two_sided);
196 }
197
198 if (new_state & _NEW_POLYGON)
199 brw->polygon_front_bit = _mesa_polygon_get_front_bit(ctx);
200
201 if (new_state & _NEW_BUFFERS) {
202 intel_update_framebuffer(ctx, ctx->DrawBuffer);
203 if (ctx->DrawBuffer != ctx->ReadBuffer)
204 intel_update_framebuffer(ctx, ctx->ReadBuffer);
205 }
206 }
207
208 #define flushFront(screen) ((screen)->image.loader ? (screen)->image.loader->flushFrontBuffer : (screen)->dri2.loader->flushFrontBuffer)
209
210 static void
211 intel_flush_front(struct gl_context *ctx)
212 {
213 struct brw_context *brw = brw_context(ctx);
214 __DRIcontext *driContext = brw->driContext;
215 __DRIdrawable *driDrawable = driContext->driDrawablePriv;
216 __DRIscreen *const dri_screen = brw->screen->driScrnPriv;
217
218 if (brw->front_buffer_dirty && _mesa_is_winsys_fbo(ctx->DrawBuffer)) {
219 if (flushFront(dri_screen) && driDrawable &&
220 driDrawable->loaderPrivate) {
221
222 /* Resolve before flushing FAKE_FRONT_LEFT to FRONT_LEFT.
223 *
224 * This potentially resolves both front and back buffer. It
225 * is unnecessary to resolve the back, but harms nothing except
226 * performance. And no one cares about front-buffer render
227 * performance.
228 */
229 intel_resolve_for_dri2_flush(brw, driDrawable);
230 intel_batchbuffer_flush(brw);
231
232 flushFront(dri_screen)(driDrawable, driDrawable->loaderPrivate);
233
234 /* We set the dirty bit in intel_prepare_render() if we're
235 * front buffer rendering once we get there.
236 */
237 brw->front_buffer_dirty = false;
238 }
239 }
240 }
241
242 static void
243 brw_display_shared_buffer(struct brw_context *brw)
244 {
245 __DRIcontext *dri_context = brw->driContext;
246 __DRIdrawable *dri_drawable = dri_context->driDrawablePriv;
247 __DRIscreen *dri_screen = brw->screen->driScrnPriv;
248 int fence_fd = -1;
249
250 if (!brw->is_shared_buffer_bound)
251 return;
252
253 if (!brw->is_shared_buffer_dirty)
254 return;
255
256 if (brw->screen->has_exec_fence) {
257 /* This function is always called during a flush operation, so there is
258 * no need to flush again here. But we want to provide a fence_fd to the
259 * loader, and a redundant flush is the easiest way to acquire one.
260 */
261 if (intel_batchbuffer_flush_fence(brw, -1, &fence_fd))
262 return;
263 }
264
265 dri_screen->mutableRenderBuffer.loader
266 ->displaySharedBuffer(dri_drawable, fence_fd,
267 dri_drawable->loaderPrivate);
268 brw->is_shared_buffer_dirty = false;
269 }
270
271 static void
272 intel_glFlush(struct gl_context *ctx)
273 {
274 struct brw_context *brw = brw_context(ctx);
275
276 intel_batchbuffer_flush(brw);
277 intel_flush_front(ctx);
278 brw_display_shared_buffer(brw);
279 brw->need_flush_throttle = true;
280 }
281
282 static void
283 intel_finish(struct gl_context * ctx)
284 {
285 struct brw_context *brw = brw_context(ctx);
286
287 intel_glFlush(ctx);
288
289 if (brw->batch.last_bo)
290 brw_bo_wait_rendering(brw->batch.last_bo);
291 }
292
293 static void
294 brw_init_driver_functions(struct brw_context *brw,
295 struct dd_function_table *functions)
296 {
297 const struct gen_device_info *devinfo = &brw->screen->devinfo;
298
299 _mesa_init_driver_functions(functions);
300
301 /* GLX uses DRI2 invalidate events to handle window resizing.
302 * Unfortunately, EGL does not - libEGL is written in XCB (not Xlib),
303 * which doesn't provide a mechanism for snooping the event queues.
304 *
305 * So EGL still relies on viewport hacks to handle window resizing.
306 * This should go away with DRI3000.
307 */
308 if (!brw->driContext->driScreenPriv->dri2.useInvalidate)
309 functions->Viewport = intel_viewport;
310
311 functions->Flush = intel_glFlush;
312 functions->Finish = intel_finish;
313 functions->GetString = intel_get_string;
314 functions->UpdateState = intel_update_state;
315
316 brw_init_draw_functions(functions);
317 intelInitTextureFuncs(functions);
318 intelInitTextureImageFuncs(functions);
319 intelInitTextureCopyImageFuncs(functions);
320 intelInitCopyImageFuncs(functions);
321 intelInitClearFuncs(functions);
322 intelInitBufferFuncs(functions);
323 intelInitPixelFuncs(functions);
324 intelInitBufferObjectFuncs(functions);
325 brw_init_syncobj_functions(functions);
326 brw_init_object_purgeable_functions(functions);
327
328 brwInitFragProgFuncs( functions );
329 brw_init_common_queryobj_functions(functions);
330 if (devinfo->gen >= 8 || devinfo->is_haswell)
331 hsw_init_queryobj_functions(functions);
332 else if (devinfo->gen >= 6)
333 gen6_init_queryobj_functions(functions);
334 else
335 gen4_init_queryobj_functions(functions);
336 brw_init_compute_functions(functions);
337 brw_init_conditional_render_functions(functions);
338
339 functions->GenerateMipmap = brw_generate_mipmap;
340
341 functions->QueryInternalFormat = brw_query_internal_format;
342
343 functions->NewTransformFeedback = brw_new_transform_feedback;
344 functions->DeleteTransformFeedback = brw_delete_transform_feedback;
345 if (can_do_mi_math_and_lrr(brw->screen)) {
346 functions->BeginTransformFeedback = hsw_begin_transform_feedback;
347 functions->EndTransformFeedback = hsw_end_transform_feedback;
348 functions->PauseTransformFeedback = hsw_pause_transform_feedback;
349 functions->ResumeTransformFeedback = hsw_resume_transform_feedback;
350 } else if (devinfo->gen >= 7) {
351 functions->BeginTransformFeedback = gen7_begin_transform_feedback;
352 functions->EndTransformFeedback = gen7_end_transform_feedback;
353 functions->PauseTransformFeedback = gen7_pause_transform_feedback;
354 functions->ResumeTransformFeedback = gen7_resume_transform_feedback;
355 functions->GetTransformFeedbackVertexCount =
356 brw_get_transform_feedback_vertex_count;
357 } else {
358 functions->BeginTransformFeedback = brw_begin_transform_feedback;
359 functions->EndTransformFeedback = brw_end_transform_feedback;
360 functions->PauseTransformFeedback = brw_pause_transform_feedback;
361 functions->ResumeTransformFeedback = brw_resume_transform_feedback;
362 functions->GetTransformFeedbackVertexCount =
363 brw_get_transform_feedback_vertex_count;
364 }
365
366 if (devinfo->gen >= 6)
367 functions->GetSamplePosition = gen6_get_sample_position;
368
369 /* GL_ARB_get_program_binary */
370 brw_program_binary_init(brw->screen->deviceID);
371 functions->GetProgramBinaryDriverSHA1 = brw_get_program_binary_driver_sha1;
372 functions->ProgramBinarySerializeDriverBlob = brw_serialize_program_binary;
373 functions->ProgramBinaryDeserializeDriverBlob =
374 brw_deserialize_program_binary;
375
376 if (brw->screen->disk_cache) {
377 functions->ShaderCacheSerializeDriverBlob = brw_program_serialize_nir;
378 }
379 }
380
381 static void
382 brw_initialize_spirv_supported_capabilities(struct brw_context *brw)
383 {
384 const struct gen_device_info *devinfo = &brw->screen->devinfo;
385 struct gl_context *ctx = &brw->ctx;
386
387 /* The following SPIR-V capabilities are only supported on gen7+. In theory
388 * you should enable the extension only on gen7+, but just in case let's
389 * assert it.
390 */
391 assert(devinfo->gen >= 7);
392
393 ctx->Const.SpirVCapabilities.float64 = devinfo->gen >= 8;
394 ctx->Const.SpirVCapabilities.int64 = devinfo->gen >= 8;
395 ctx->Const.SpirVCapabilities.tessellation = true;
396 ctx->Const.SpirVCapabilities.draw_parameters = true;
397 ctx->Const.SpirVCapabilities.image_write_without_format = true;
398 ctx->Const.SpirVCapabilities.variable_pointers = true;
399 ctx->Const.SpirVCapabilities.atomic_storage = devinfo->gen >= 7;
400 ctx->Const.SpirVCapabilities.transform_feedback = devinfo->gen >= 7;
401 ctx->Const.SpirVCapabilities.geometry_streams = devinfo->gen >= 7;
402 }
403
404 static void
405 brw_initialize_context_constants(struct brw_context *brw)
406 {
407 const struct gen_device_info *devinfo = &brw->screen->devinfo;
408 struct gl_context *ctx = &brw->ctx;
409 const struct brw_compiler *compiler = brw->screen->compiler;
410
411 const bool stage_exists[MESA_SHADER_STAGES] = {
412 [MESA_SHADER_VERTEX] = true,
413 [MESA_SHADER_TESS_CTRL] = devinfo->gen >= 7,
414 [MESA_SHADER_TESS_EVAL] = devinfo->gen >= 7,
415 [MESA_SHADER_GEOMETRY] = devinfo->gen >= 6,
416 [MESA_SHADER_FRAGMENT] = true,
417 [MESA_SHADER_COMPUTE] =
418 (_mesa_is_desktop_gl(ctx) &&
419 ctx->Const.MaxComputeWorkGroupSize[0] >= 1024) ||
420 (ctx->API == API_OPENGLES2 &&
421 ctx->Const.MaxComputeWorkGroupSize[0] >= 128),
422 };
423
424 unsigned num_gfx_stages = 0;
425 for (int i = 0; i < MESA_SHADER_COMPUTE; i++) {
426 if (stage_exists[i])
427 num_gfx_stages++;
428 }
429
430 unsigned max_samplers =
431 devinfo->gen >= 8 || devinfo->is_haswell ? BRW_MAX_TEX_UNIT : 16;
432
433 ctx->Const.MaxDualSourceDrawBuffers = 1;
434 ctx->Const.MaxDrawBuffers = BRW_MAX_DRAW_BUFFERS;
435 ctx->Const.MaxCombinedShaderOutputResources =
436 MAX_IMAGE_UNITS + BRW_MAX_DRAW_BUFFERS;
437
438 /* The timestamp register we can read for glGetTimestamp() is
439 * sometimes only 32 bits, before scaling to nanoseconds (depending
440 * on kernel).
441 *
442 * Once scaled to nanoseconds the timestamp would roll over at a
443 * non-power-of-two, so an application couldn't use
444 * GL_QUERY_COUNTER_BITS to handle rollover correctly. Instead, we
445 * report 36 bits and truncate at that (rolling over 5 times as
446 * often as the HW counter), and when the 32-bit counter rolls
447 * over, it happens to also be at a rollover in the reported value
448 * from near (1<<36) to 0.
449 *
450 * The low 32 bits rolls over in ~343 seconds. Our 36-bit result
451 * rolls over every ~69 seconds.
452 */
453 ctx->Const.QueryCounterBits.Timestamp = 36;
454
455 ctx->Const.MaxTextureCoordUnits = 8; /* Mesa limit */
456 ctx->Const.MaxImageUnits = MAX_IMAGE_UNITS;
457 if (devinfo->gen >= 7) {
458 ctx->Const.MaxRenderbufferSize = 16384;
459 ctx->Const.MaxTextureLevels = MIN2(15 /* 16384 */, MAX_TEXTURE_LEVELS);
460 ctx->Const.MaxCubeTextureLevels = 15; /* 16384 */
461 } else {
462 ctx->Const.MaxRenderbufferSize = 8192;
463 ctx->Const.MaxTextureLevels = MIN2(14 /* 8192 */, MAX_TEXTURE_LEVELS);
464 ctx->Const.MaxCubeTextureLevels = 14; /* 8192 */
465 }
466 ctx->Const.Max3DTextureLevels = 12; /* 2048 */
467 ctx->Const.MaxArrayTextureLayers = devinfo->gen >= 7 ? 2048 : 512;
468 ctx->Const.MaxTextureMbytes = 1536;
469 ctx->Const.MaxTextureRectSize = devinfo->gen >= 7 ? 16384 : 8192;
470 ctx->Const.MaxTextureMaxAnisotropy = 16.0;
471 ctx->Const.MaxTextureLodBias = 15.0;
472 ctx->Const.StripTextureBorder = true;
473 if (devinfo->gen >= 7) {
474 ctx->Const.MaxProgramTextureGatherComponents = 4;
475 ctx->Const.MinProgramTextureGatherOffset = -32;
476 ctx->Const.MaxProgramTextureGatherOffset = 31;
477 } else if (devinfo->gen == 6) {
478 ctx->Const.MaxProgramTextureGatherComponents = 1;
479 ctx->Const.MinProgramTextureGatherOffset = -8;
480 ctx->Const.MaxProgramTextureGatherOffset = 7;
481 }
482
483 ctx->Const.MaxUniformBlockSize = 65536;
484
485 for (int i = 0; i < MESA_SHADER_STAGES; i++) {
486 struct gl_program_constants *prog = &ctx->Const.Program[i];
487
488 if (!stage_exists[i])
489 continue;
490
491 prog->MaxTextureImageUnits = max_samplers;
492
493 prog->MaxUniformBlocks = BRW_MAX_UBO;
494 prog->MaxCombinedUniformComponents =
495 prog->MaxUniformComponents +
496 ctx->Const.MaxUniformBlockSize / 4 * prog->MaxUniformBlocks;
497
498 prog->MaxAtomicCounters = MAX_ATOMIC_COUNTERS;
499 prog->MaxAtomicBuffers = BRW_MAX_ABO;
500 prog->MaxImageUniforms = compiler->scalar_stage[i] ? BRW_MAX_IMAGES : 0;
501 prog->MaxShaderStorageBlocks = BRW_MAX_SSBO;
502 }
503
504 ctx->Const.MaxTextureUnits =
505 MIN2(ctx->Const.MaxTextureCoordUnits,
506 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxTextureImageUnits);
507
508 ctx->Const.MaxUniformBufferBindings = num_gfx_stages * BRW_MAX_UBO;
509 ctx->Const.MaxCombinedUniformBlocks = num_gfx_stages * BRW_MAX_UBO;
510 ctx->Const.MaxCombinedAtomicBuffers = num_gfx_stages * BRW_MAX_ABO;
511 ctx->Const.MaxCombinedShaderStorageBlocks = num_gfx_stages * BRW_MAX_SSBO;
512 ctx->Const.MaxCombinedTextureImageUnits = num_gfx_stages * max_samplers;
513 ctx->Const.MaxCombinedImageUniforms = num_gfx_stages * BRW_MAX_IMAGES;
514 ctx->Const.MaxShaderStorageBufferBindings =
515 (num_gfx_stages + stage_exists[MESA_SHADER_COMPUTE]) * BRW_MAX_SSBO;
516
517 /* Hardware only supports a limited number of transform feedback buffers.
518 * So we need to override the Mesa default (which is based only on software
519 * limits).
520 */
521 ctx->Const.MaxTransformFeedbackBuffers = BRW_MAX_SOL_BUFFERS;
522
523 /* On Gen6, in the worst case, we use up one binding table entry per
524 * transform feedback component (see comments above the definition of
525 * BRW_MAX_SOL_BINDINGS, in brw_context.h), so we need to advertise a value
526 * for MAX_TRANSFORM_FEEDBACK_INTERLEAVED_COMPONENTS equal to
527 * BRW_MAX_SOL_BINDINGS.
528 *
529 * In "separate components" mode, we need to divide this value by
530 * BRW_MAX_SOL_BUFFERS, so that the total number of binding table entries
531 * used up by all buffers will not exceed BRW_MAX_SOL_BINDINGS.
532 */
533 ctx->Const.MaxTransformFeedbackInterleavedComponents = BRW_MAX_SOL_BINDINGS;
534 ctx->Const.MaxTransformFeedbackSeparateComponents =
535 BRW_MAX_SOL_BINDINGS / BRW_MAX_SOL_BUFFERS;
536
537 ctx->Const.AlwaysUseGetTransformFeedbackVertexCount =
538 !can_do_mi_math_and_lrr(brw->screen);
539
540 int max_samples;
541 const int *msaa_modes = intel_supported_msaa_modes(brw->screen);
542 const int clamp_max_samples =
543 driQueryOptioni(&brw->optionCache, "clamp_max_samples");
544
545 if (clamp_max_samples < 0) {
546 max_samples = msaa_modes[0];
547 } else {
548 /* Select the largest supported MSAA mode that does not exceed
549 * clamp_max_samples.
550 */
551 max_samples = 0;
552 for (int i = 0; msaa_modes[i] != 0; ++i) {
553 if (msaa_modes[i] <= clamp_max_samples) {
554 max_samples = msaa_modes[i];
555 break;
556 }
557 }
558 }
559
560 ctx->Const.MaxSamples = max_samples;
561 ctx->Const.MaxColorTextureSamples = max_samples;
562 ctx->Const.MaxDepthTextureSamples = max_samples;
563 ctx->Const.MaxIntegerSamples = max_samples;
564 ctx->Const.MaxImageSamples = 0;
565
566 /* gen6_set_sample_maps() sets SampleMap{2,4,8}x variables which are used
567 * to map indices of rectangular grid to sample numbers within a pixel.
568 * These variables are used by GL_EXT_framebuffer_multisample_blit_scaled
569 * extension implementation. For more details see the comment above
570 * gen6_set_sample_maps() definition.
571 */
572 gen6_set_sample_maps(ctx);
573
574 ctx->Const.MinLineWidth = 1.0;
575 ctx->Const.MinLineWidthAA = 1.0;
576 if (devinfo->gen >= 6) {
577 ctx->Const.MaxLineWidth = 7.375;
578 ctx->Const.MaxLineWidthAA = 7.375;
579 ctx->Const.LineWidthGranularity = 0.125;
580 } else {
581 ctx->Const.MaxLineWidth = 7.0;
582 ctx->Const.MaxLineWidthAA = 7.0;
583 ctx->Const.LineWidthGranularity = 0.5;
584 }
585
586 /* For non-antialiased lines, we have to round the line width to the
587 * nearest whole number. Make sure that we don't advertise a line
588 * width that, when rounded, will be beyond the actual hardware
589 * maximum.
590 */
591 assert(roundf(ctx->Const.MaxLineWidth) <= ctx->Const.MaxLineWidth);
592
593 ctx->Const.MinPointSize = 1.0;
594 ctx->Const.MinPointSizeAA = 1.0;
595 ctx->Const.MaxPointSize = 255.0;
596 ctx->Const.MaxPointSizeAA = 255.0;
597 ctx->Const.PointSizeGranularity = 1.0;
598
599 if (devinfo->gen >= 5 || devinfo->is_g4x)
600 ctx->Const.MaxClipPlanes = 8;
601
602 ctx->Const.GLSLTessLevelsAsInputs = true;
603 ctx->Const.PrimitiveRestartForPatches = true;
604
605 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeInstructions = 16 * 1024;
606 ctx->Const.Program[MESA_SHADER_VERTEX].MaxAluInstructions = 0;
607 ctx->Const.Program[MESA_SHADER_VERTEX].MaxTexInstructions = 0;
608 ctx->Const.Program[MESA_SHADER_VERTEX].MaxTexIndirections = 0;
609 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeAluInstructions = 0;
610 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeTexInstructions = 0;
611 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeTexIndirections = 0;
612 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeAttribs = 16;
613 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeTemps = 256;
614 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeAddressRegs = 1;
615 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeParameters = 1024;
616 ctx->Const.Program[MESA_SHADER_VERTEX].MaxEnvParams =
617 MIN2(ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeParameters,
618 ctx->Const.Program[MESA_SHADER_VERTEX].MaxEnvParams);
619
620 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeInstructions = 1024;
621 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeAluInstructions = 1024;
622 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeTexInstructions = 1024;
623 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeTexIndirections = 1024;
624 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeAttribs = 12;
625 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeTemps = 256;
626 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeAddressRegs = 0;
627 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeParameters = 1024;
628 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxEnvParams =
629 MIN2(ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeParameters,
630 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxEnvParams);
631
632 /* Fragment shaders use real, 32-bit twos-complement integers for all
633 * integer types.
634 */
635 ctx->Const.Program[MESA_SHADER_FRAGMENT].LowInt.RangeMin = 31;
636 ctx->Const.Program[MESA_SHADER_FRAGMENT].LowInt.RangeMax = 30;
637 ctx->Const.Program[MESA_SHADER_FRAGMENT].LowInt.Precision = 0;
638 ctx->Const.Program[MESA_SHADER_FRAGMENT].HighInt = ctx->Const.Program[MESA_SHADER_FRAGMENT].LowInt;
639 ctx->Const.Program[MESA_SHADER_FRAGMENT].MediumInt = ctx->Const.Program[MESA_SHADER_FRAGMENT].LowInt;
640
641 ctx->Const.Program[MESA_SHADER_VERTEX].LowInt.RangeMin = 31;
642 ctx->Const.Program[MESA_SHADER_VERTEX].LowInt.RangeMax = 30;
643 ctx->Const.Program[MESA_SHADER_VERTEX].LowInt.Precision = 0;
644 ctx->Const.Program[MESA_SHADER_VERTEX].HighInt = ctx->Const.Program[MESA_SHADER_VERTEX].LowInt;
645 ctx->Const.Program[MESA_SHADER_VERTEX].MediumInt = ctx->Const.Program[MESA_SHADER_VERTEX].LowInt;
646
647 /* Gen6 converts quads to polygon in beginning of 3D pipeline,
648 * but we're not sure how it's actually done for vertex order,
649 * that affect provoking vertex decision. Always use last vertex
650 * convention for quad primitive which works as expected for now.
651 */
652 if (devinfo->gen >= 6)
653 ctx->Const.QuadsFollowProvokingVertexConvention = false;
654
655 ctx->Const.NativeIntegers = true;
656
657 /* Regarding the CMP instruction, the Ivybridge PRM says:
658 *
659 * "For each enabled channel 0b or 1b is assigned to the appropriate flag
660 * bit and 0/all zeros or all ones (e.g, byte 0xFF, word 0xFFFF, DWord
661 * 0xFFFFFFFF) is assigned to dst."
662 *
663 * but PRMs for earlier generations say
664 *
665 * "In dword format, one GRF may store up to 8 results. When the register
666 * is used later as a vector of Booleans, as only LSB at each channel
667 * contains meaning [sic] data, software should make sure all higher bits
668 * are masked out (e.g. by 'and-ing' an [sic] 0x01 constant)."
669 *
670 * We select the representation of a true boolean uniform to be ~0, and fix
671 * the results of Gen <= 5 CMP instruction's with -(result & 1).
672 */
673 ctx->Const.UniformBooleanTrue = ~0;
674
675 /* From the gen4 PRM, volume 4 page 127:
676 *
677 * "For SURFTYPE_BUFFER non-rendertarget surfaces, this field specifies
678 * the base address of the first element of the surface, computed in
679 * software by adding the surface base address to the byte offset of
680 * the element in the buffer."
681 *
682 * However, unaligned accesses are slower, so enforce buffer alignment.
683 *
684 * In order to push UBO data, 3DSTATE_CONSTANT_XS imposes an additional
685 * restriction: the start of the buffer needs to be 32B aligned.
686 */
687 ctx->Const.UniformBufferOffsetAlignment = 32;
688
689 /* ShaderStorageBufferOffsetAlignment should be a cacheline (64 bytes) so
690 * that we can safely have the CPU and GPU writing the same SSBO on
691 * non-cachecoherent systems (our Atom CPUs). With UBOs, the GPU never
692 * writes, so there's no problem. For an SSBO, the GPU and the CPU can
693 * be updating disjoint regions of the buffer simultaneously and that will
694 * break if the regions overlap the same cacheline.
695 */
696 ctx->Const.ShaderStorageBufferOffsetAlignment = 64;
697 ctx->Const.TextureBufferOffsetAlignment = 16;
698 ctx->Const.MaxTextureBufferSize = 128 * 1024 * 1024;
699
700 if (devinfo->gen >= 6) {
701 ctx->Const.MaxVarying = 32;
702 ctx->Const.Program[MESA_SHADER_VERTEX].MaxOutputComponents = 128;
703 ctx->Const.Program[MESA_SHADER_GEOMETRY].MaxInputComponents =
704 compiler->scalar_stage[MESA_SHADER_GEOMETRY] ? 128 : 64;
705 ctx->Const.Program[MESA_SHADER_GEOMETRY].MaxOutputComponents = 128;
706 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxInputComponents = 128;
707 ctx->Const.Program[MESA_SHADER_TESS_CTRL].MaxInputComponents = 128;
708 ctx->Const.Program[MESA_SHADER_TESS_CTRL].MaxOutputComponents = 128;
709 ctx->Const.Program[MESA_SHADER_TESS_EVAL].MaxInputComponents = 128;
710 ctx->Const.Program[MESA_SHADER_TESS_EVAL].MaxOutputComponents = 128;
711 }
712
713 /* We want the GLSL compiler to emit code that uses condition codes */
714 for (int i = 0; i < MESA_SHADER_STAGES; i++) {
715 ctx->Const.ShaderCompilerOptions[i] =
716 brw->screen->compiler->glsl_compiler_options[i];
717 }
718
719 if (devinfo->gen >= 7) {
720 ctx->Const.MaxViewportWidth = 32768;
721 ctx->Const.MaxViewportHeight = 32768;
722 }
723
724 /* ARB_viewport_array, OES_viewport_array */
725 if (devinfo->gen >= 6) {
726 ctx->Const.MaxViewports = GEN6_NUM_VIEWPORTS;
727 ctx->Const.ViewportSubpixelBits = 8;
728
729 /* Cast to float before negating because MaxViewportWidth is unsigned.
730 */
731 ctx->Const.ViewportBounds.Min = -(float)ctx->Const.MaxViewportWidth;
732 ctx->Const.ViewportBounds.Max = ctx->Const.MaxViewportWidth;
733 }
734
735 /* ARB_gpu_shader5 */
736 if (devinfo->gen >= 7)
737 ctx->Const.MaxVertexStreams = MIN2(4, MAX_VERTEX_STREAMS);
738
739 /* ARB_framebuffer_no_attachments */
740 ctx->Const.MaxFramebufferWidth = 16384;
741 ctx->Const.MaxFramebufferHeight = 16384;
742 ctx->Const.MaxFramebufferLayers = ctx->Const.MaxArrayTextureLayers;
743 ctx->Const.MaxFramebufferSamples = max_samples;
744
745 /* OES_primitive_bounding_box */
746 ctx->Const.NoPrimitiveBoundingBoxOutput = true;
747
748 /* TODO: We should be able to use STD430 packing by default on all hardware
749 * but some piglit tests [1] currently fail on SNB when this is enabled.
750 * The problem is the messages we're using for doing uniform pulls
751 * in the vec4 back-end on SNB is the OWORD block load instruction, which
752 * takes its offset in units of OWORDS (16 bytes). On IVB+, we use the
753 * sampler which doesn't have these restrictions.
754 *
755 * In the scalar back-end, we use the sampler for dynamic uniform loads and
756 * pull an entire cache line at a time for constant offset loads both of
757 * which support almost any alignment.
758 *
759 * [1] glsl-1.40/uniform_buffer/vs-float-array-variable-index.shader_test
760 */
761 if (devinfo->gen >= 7)
762 ctx->Const.UseSTD430AsDefaultPacking = true;
763
764 if (!(ctx->Const.ContextFlags & GL_CONTEXT_FLAG_DEBUG_BIT))
765 ctx->Const.AllowMappedBuffersDuringExecution = true;
766
767 /* GL_ARB_get_program_binary */
768 ctx->Const.NumProgramBinaryFormats = 1;
769 }
770
771 static void
772 brw_initialize_cs_context_constants(struct brw_context *brw)
773 {
774 struct gl_context *ctx = &brw->ctx;
775 const struct intel_screen *screen = brw->screen;
776 struct gen_device_info *devinfo = &brw->screen->devinfo;
777
778 /* FINISHME: Do this for all platforms that the kernel supports */
779 if (devinfo->is_cherryview &&
780 screen->subslice_total > 0 && screen->eu_total > 0) {
781 /* Logical CS threads = EUs per subslice * 7 threads per EU */
782 uint32_t max_cs_threads = screen->eu_total / screen->subslice_total * 7;
783
784 /* Fuse configurations may give more threads than expected, never less. */
785 if (max_cs_threads > devinfo->max_cs_threads)
786 devinfo->max_cs_threads = max_cs_threads;
787 }
788
789 /* Maximum number of scalar compute shader invocations that can be run in
790 * parallel in the same subslice assuming SIMD32 dispatch.
791 *
792 * We don't advertise more than 64 threads, because we are limited to 64 by
793 * our usage of thread_width_max in the gpgpu walker command. This only
794 * currently impacts Haswell, which otherwise might be able to advertise 70
795 * threads. With SIMD32 and 64 threads, Haswell still provides twice the
796 * required the number of invocation needed for ARB_compute_shader.
797 */
798 const unsigned max_threads = MIN2(64, devinfo->max_cs_threads);
799 const uint32_t max_invocations = 32 * max_threads;
800 ctx->Const.MaxComputeWorkGroupSize[0] = max_invocations;
801 ctx->Const.MaxComputeWorkGroupSize[1] = max_invocations;
802 ctx->Const.MaxComputeWorkGroupSize[2] = max_invocations;
803 ctx->Const.MaxComputeWorkGroupInvocations = max_invocations;
804 ctx->Const.MaxComputeSharedMemorySize = 64 * 1024;
805 }
806
807 /**
808 * Process driconf (drirc) options, setting appropriate context flags.
809 *
810 * intelInitExtensions still pokes at optionCache directly, in order to
811 * avoid advertising various extensions. No flags are set, so it makes
812 * sense to continue doing that there.
813 */
814 static void
815 brw_process_driconf_options(struct brw_context *brw)
816 {
817 const struct gen_device_info *devinfo = &brw->screen->devinfo;
818 struct gl_context *ctx = &brw->ctx;
819
820 driOptionCache *options = &brw->optionCache;
821 driParseConfigFiles(options, &brw->screen->optionCache,
822 brw->driContext->driScreenPriv->myNum,
823 "i965", NULL);
824
825 int bo_reuse_mode = driQueryOptioni(options, "bo_reuse");
826 switch (bo_reuse_mode) {
827 case DRI_CONF_BO_REUSE_DISABLED:
828 break;
829 case DRI_CONF_BO_REUSE_ALL:
830 brw_bufmgr_enable_reuse(brw->bufmgr);
831 break;
832 }
833
834 if (INTEL_DEBUG & DEBUG_NO_HIZ) {
835 brw->has_hiz = false;
836 /* On gen6, you can only do separate stencil with HIZ. */
837 if (devinfo->gen == 6)
838 brw->has_separate_stencil = false;
839 }
840
841 if (driQueryOptionb(options, "mesa_no_error"))
842 ctx->Const.ContextFlags |= GL_CONTEXT_FLAG_NO_ERROR_BIT_KHR;
843
844 if (driQueryOptionb(options, "always_flush_batch")) {
845 fprintf(stderr, "flushing batchbuffer before/after each draw call\n");
846 brw->always_flush_batch = true;
847 }
848
849 if (driQueryOptionb(options, "always_flush_cache")) {
850 fprintf(stderr, "flushing GPU caches before/after each draw call\n");
851 brw->always_flush_cache = true;
852 }
853
854 if (driQueryOptionb(options, "disable_throttling")) {
855 fprintf(stderr, "disabling flush throttling\n");
856 brw->disable_throttling = true;
857 }
858
859 brw->precompile = driQueryOptionb(&brw->optionCache, "shader_precompile");
860
861 if (driQueryOptionb(&brw->optionCache, "precise_trig"))
862 brw->screen->compiler->precise_trig = true;
863
864 ctx->Const.ForceGLSLExtensionsWarn =
865 driQueryOptionb(options, "force_glsl_extensions_warn");
866
867 ctx->Const.ForceGLSLVersion =
868 driQueryOptioni(options, "force_glsl_version");
869
870 ctx->Const.DisableGLSLLineContinuations =
871 driQueryOptionb(options, "disable_glsl_line_continuations");
872
873 ctx->Const.AllowGLSLExtensionDirectiveMidShader =
874 driQueryOptionb(options, "allow_glsl_extension_directive_midshader");
875
876 ctx->Const.AllowGLSLBuiltinVariableRedeclaration =
877 driQueryOptionb(options, "allow_glsl_builtin_variable_redeclaration");
878
879 ctx->Const.AllowHigherCompatVersion =
880 driQueryOptionb(options, "allow_higher_compat_version");
881
882 ctx->Const.ForceGLSLAbsSqrt =
883 driQueryOptionb(options, "force_glsl_abs_sqrt");
884
885 ctx->Const.GLSLZeroInit = driQueryOptionb(options, "glsl_zero_init");
886
887 brw->dual_color_blend_by_location =
888 driQueryOptionb(options, "dual_color_blend_by_location");
889
890 ctx->Const.AllowGLSLCrossStageInterpolationMismatch =
891 driQueryOptionb(options, "allow_glsl_cross_stage_interpolation_mismatch");
892
893 ctx->Const.dri_config_options_sha1 = ralloc_array(brw, unsigned char, 20);
894 driComputeOptionsSha1(&brw->screen->optionCache,
895 ctx->Const.dri_config_options_sha1);
896 }
897
898 GLboolean
899 brwCreateContext(gl_api api,
900 const struct gl_config *mesaVis,
901 __DRIcontext *driContextPriv,
902 const struct __DriverContextConfig *ctx_config,
903 unsigned *dri_ctx_error,
904 void *sharedContextPrivate)
905 {
906 struct gl_context *shareCtx = (struct gl_context *) sharedContextPrivate;
907 struct intel_screen *screen = driContextPriv->driScreenPriv->driverPrivate;
908 const struct gen_device_info *devinfo = &screen->devinfo;
909 struct dd_function_table functions;
910
911 /* Only allow the __DRI_CTX_FLAG_ROBUST_BUFFER_ACCESS flag if the kernel
912 * provides us with context reset notifications.
913 */
914 uint32_t allowed_flags = __DRI_CTX_FLAG_DEBUG |
915 __DRI_CTX_FLAG_FORWARD_COMPATIBLE |
916 __DRI_CTX_FLAG_NO_ERROR;
917
918 if (screen->has_context_reset_notification)
919 allowed_flags |= __DRI_CTX_FLAG_ROBUST_BUFFER_ACCESS;
920
921 if (ctx_config->flags & ~allowed_flags) {
922 *dri_ctx_error = __DRI_CTX_ERROR_UNKNOWN_FLAG;
923 return false;
924 }
925
926 if (ctx_config->attribute_mask &
927 ~(__DRIVER_CONTEXT_ATTRIB_RESET_STRATEGY |
928 __DRIVER_CONTEXT_ATTRIB_PRIORITY)) {
929 *dri_ctx_error = __DRI_CTX_ERROR_UNKNOWN_ATTRIBUTE;
930 return false;
931 }
932
933 bool notify_reset =
934 ((ctx_config->attribute_mask & __DRIVER_CONTEXT_ATTRIB_RESET_STRATEGY) &&
935 ctx_config->reset_strategy != __DRI_CTX_RESET_NO_NOTIFICATION);
936
937 struct brw_context *brw = rzalloc(NULL, struct brw_context);
938 if (!brw) {
939 fprintf(stderr, "%s: failed to alloc context\n", __func__);
940 *dri_ctx_error = __DRI_CTX_ERROR_NO_MEMORY;
941 return false;
942 }
943
944 driContextPriv->driverPrivate = brw;
945 brw->driContext = driContextPriv;
946 brw->screen = screen;
947 brw->bufmgr = screen->bufmgr;
948
949 brw->has_hiz = devinfo->has_hiz_and_separate_stencil;
950 brw->has_separate_stencil = devinfo->has_hiz_and_separate_stencil;
951
952 brw->has_swizzling = screen->hw_has_swizzling;
953
954 brw->isl_dev = screen->isl_dev;
955
956 brw->vs.base.stage = MESA_SHADER_VERTEX;
957 brw->tcs.base.stage = MESA_SHADER_TESS_CTRL;
958 brw->tes.base.stage = MESA_SHADER_TESS_EVAL;
959 brw->gs.base.stage = MESA_SHADER_GEOMETRY;
960 brw->wm.base.stage = MESA_SHADER_FRAGMENT;
961 brw->cs.base.stage = MESA_SHADER_COMPUTE;
962
963 brw_init_driver_functions(brw, &functions);
964
965 if (notify_reset)
966 functions.GetGraphicsResetStatus = brw_get_graphics_reset_status;
967
968 struct gl_context *ctx = &brw->ctx;
969
970 if (!_mesa_initialize_context(ctx, api, mesaVis, shareCtx, &functions)) {
971 *dri_ctx_error = __DRI_CTX_ERROR_NO_MEMORY;
972 fprintf(stderr, "%s: failed to init mesa context\n", __func__);
973 intelDestroyContext(driContextPriv);
974 return false;
975 }
976
977 driContextSetFlags(ctx, ctx_config->flags);
978
979 /* Initialize the software rasterizer and helper modules.
980 *
981 * As of GL 3.1 core, the gen4+ driver doesn't need the swrast context for
982 * software fallbacks (which we have to support on legacy GL to do weird
983 * glDrawPixels(), glBitmap(), and other functions).
984 */
985 if (api != API_OPENGL_CORE && api != API_OPENGLES2) {
986 _swrast_CreateContext(ctx);
987 }
988
989 _vbo_CreateContext(ctx);
990 if (ctx->swrast_context) {
991 _tnl_CreateContext(ctx);
992 TNL_CONTEXT(ctx)->Driver.RunPipeline = _tnl_run_pipeline;
993 _swsetup_CreateContext(ctx);
994
995 /* Configure swrast to match hardware characteristics: */
996 _swrast_allow_pixel_fog(ctx, false);
997 _swrast_allow_vertex_fog(ctx, true);
998 }
999
1000 _mesa_meta_init(ctx);
1001
1002 brw_process_driconf_options(brw);
1003
1004 if (INTEL_DEBUG & DEBUG_PERF)
1005 brw->perf_debug = true;
1006
1007 brw_initialize_cs_context_constants(brw);
1008 brw_initialize_context_constants(brw);
1009
1010 ctx->Const.ResetStrategy = notify_reset
1011 ? GL_LOSE_CONTEXT_ON_RESET_ARB : GL_NO_RESET_NOTIFICATION_ARB;
1012
1013 /* Reinitialize the context point state. It depends on ctx->Const values. */
1014 _mesa_init_point(ctx);
1015
1016 intel_fbo_init(brw);
1017
1018 intel_batchbuffer_init(brw);
1019
1020 /* Create a new hardware context. Using a hardware context means that
1021 * our GPU state will be saved/restored on context switch, allowing us
1022 * to assume that the GPU is in the same state we left it in.
1023 *
1024 * This is required for transform feedback buffer offsets, query objects,
1025 * and also allows us to reduce how much state we have to emit.
1026 */
1027 brw->hw_ctx = brw_create_hw_context(brw->bufmgr);
1028 if (!brw->hw_ctx && devinfo->gen >= 6) {
1029 fprintf(stderr, "Failed to create hardware context.\n");
1030 intelDestroyContext(driContextPriv);
1031 return false;
1032 }
1033
1034 if (brw->hw_ctx) {
1035 int hw_priority = GEN_CONTEXT_MEDIUM_PRIORITY;
1036 if (ctx_config->attribute_mask & __DRIVER_CONTEXT_ATTRIB_PRIORITY) {
1037 switch (ctx_config->priority) {
1038 case __DRI_CTX_PRIORITY_LOW:
1039 hw_priority = GEN_CONTEXT_LOW_PRIORITY;
1040 break;
1041 case __DRI_CTX_PRIORITY_HIGH:
1042 hw_priority = GEN_CONTEXT_HIGH_PRIORITY;
1043 break;
1044 }
1045 }
1046 if (hw_priority != I915_CONTEXT_DEFAULT_PRIORITY &&
1047 brw_hw_context_set_priority(brw->bufmgr, brw->hw_ctx, hw_priority)) {
1048 fprintf(stderr,
1049 "Failed to set priority [%d:%d] for hardware context.\n",
1050 ctx_config->priority, hw_priority);
1051 intelDestroyContext(driContextPriv);
1052 return false;
1053 }
1054 }
1055
1056 if (brw_init_pipe_control(brw, devinfo)) {
1057 *dri_ctx_error = __DRI_CTX_ERROR_NO_MEMORY;
1058 intelDestroyContext(driContextPriv);
1059 return false;
1060 }
1061
1062 if (devinfo->gen == 11) {
1063 fprintf(stderr,
1064 "WARNING: i965 does not fully support Gen11 yet.\n"
1065 "Instability or lower performance might occur.\n");
1066
1067 }
1068
1069 brw_upload_init(&brw->upload, brw->bufmgr, 65536);
1070
1071 brw_init_state(brw);
1072
1073 intelInitExtensions(ctx);
1074
1075 brw_init_surface_formats(brw);
1076
1077 brw_blorp_init(brw);
1078
1079 brw->urb.size = devinfo->urb.size;
1080
1081 if (devinfo->gen == 6)
1082 brw->urb.gs_present = false;
1083
1084 brw->prim_restart.in_progress = false;
1085 brw->prim_restart.enable_cut_index = false;
1086 brw->gs.enabled = false;
1087 brw->clip.viewport_count = 1;
1088
1089 brw->predicate.state = BRW_PREDICATE_STATE_RENDER;
1090
1091 brw->max_gtt_map_object_size = screen->max_gtt_map_object_size;
1092
1093 ctx->VertexProgram._MaintainTnlProgram = true;
1094 ctx->FragmentProgram._MaintainTexEnvProgram = true;
1095
1096 brw_draw_init( brw );
1097
1098 if ((ctx_config->flags & __DRI_CTX_FLAG_DEBUG) != 0) {
1099 /* Turn on some extra GL_ARB_debug_output generation. */
1100 brw->perf_debug = true;
1101 }
1102
1103 if ((ctx_config->flags & __DRI_CTX_FLAG_ROBUST_BUFFER_ACCESS) != 0) {
1104 ctx->Const.ContextFlags |= GL_CONTEXT_FLAG_ROBUST_ACCESS_BIT_ARB;
1105 ctx->Const.RobustAccess = GL_TRUE;
1106 }
1107
1108 if (INTEL_DEBUG & DEBUG_SHADER_TIME)
1109 brw_init_shader_time(brw);
1110
1111 _mesa_override_extensions(ctx);
1112 _mesa_compute_version(ctx);
1113
1114 /* GL_ARB_gl_spirv */
1115 if (ctx->Extensions.ARB_gl_spirv)
1116 brw_initialize_spirv_supported_capabilities(brw);
1117
1118 _mesa_initialize_dispatch_tables(ctx);
1119 _mesa_initialize_vbo_vtxfmt(ctx);
1120
1121 if (ctx->Extensions.INTEL_performance_query)
1122 brw_init_performance_queries(brw);
1123
1124 vbo_use_buffer_objects(ctx);
1125 vbo_always_unmap_buffers(ctx);
1126
1127 brw->ctx.Cache = brw->screen->disk_cache;
1128
1129 return true;
1130 }
1131
1132 void
1133 intelDestroyContext(__DRIcontext * driContextPriv)
1134 {
1135 struct brw_context *brw =
1136 (struct brw_context *) driContextPriv->driverPrivate;
1137 struct gl_context *ctx = &brw->ctx;
1138
1139 _mesa_meta_free(&brw->ctx);
1140
1141 if (INTEL_DEBUG & DEBUG_SHADER_TIME) {
1142 /* Force a report. */
1143 brw->shader_time.report_time = 0;
1144
1145 brw_collect_and_report_shader_time(brw);
1146 brw_destroy_shader_time(brw);
1147 }
1148
1149 blorp_finish(&brw->blorp);
1150
1151 brw_destroy_state(brw);
1152 brw_draw_destroy(brw);
1153
1154 brw_bo_unreference(brw->curbe.curbe_bo);
1155
1156 brw_bo_unreference(brw->vs.base.scratch_bo);
1157 brw_bo_unreference(brw->tcs.base.scratch_bo);
1158 brw_bo_unreference(brw->tes.base.scratch_bo);
1159 brw_bo_unreference(brw->gs.base.scratch_bo);
1160 brw_bo_unreference(brw->wm.base.scratch_bo);
1161
1162 brw_bo_unreference(brw->vs.base.push_const_bo);
1163 brw_bo_unreference(brw->tcs.base.push_const_bo);
1164 brw_bo_unreference(brw->tes.base.push_const_bo);
1165 brw_bo_unreference(brw->gs.base.push_const_bo);
1166 brw_bo_unreference(brw->wm.base.push_const_bo);
1167
1168 brw_destroy_hw_context(brw->bufmgr, brw->hw_ctx);
1169
1170 if (ctx->swrast_context) {
1171 _swsetup_DestroyContext(&brw->ctx);
1172 _tnl_DestroyContext(&brw->ctx);
1173 }
1174 _vbo_DestroyContext(&brw->ctx);
1175
1176 if (ctx->swrast_context)
1177 _swrast_DestroyContext(&brw->ctx);
1178
1179 brw_fini_pipe_control(brw);
1180 intel_batchbuffer_free(&brw->batch);
1181
1182 brw_bo_unreference(brw->throttle_batch[1]);
1183 brw_bo_unreference(brw->throttle_batch[0]);
1184 brw->throttle_batch[1] = NULL;
1185 brw->throttle_batch[0] = NULL;
1186
1187 driDestroyOptionCache(&brw->optionCache);
1188
1189 /* free the Mesa context */
1190 _mesa_free_context_data(&brw->ctx);
1191
1192 ralloc_free(brw);
1193 driContextPriv->driverPrivate = NULL;
1194 }
1195
1196 GLboolean
1197 intelUnbindContext(__DRIcontext * driContextPriv)
1198 {
1199 /* Unset current context and dispath table */
1200 _mesa_make_current(NULL, NULL, NULL);
1201
1202 return true;
1203 }
1204
1205 /**
1206 * Fixes up the context for GLES23 with our default-to-sRGB-capable behavior
1207 * on window system framebuffers.
1208 *
1209 * Desktop GL is fairly reasonable in its handling of sRGB: You can ask if
1210 * your renderbuffer can do sRGB encode, and you can flip a switch that does
1211 * sRGB encode if the renderbuffer can handle it. You can ask specifically
1212 * for a visual where you're guaranteed to be capable, but it turns out that
1213 * everyone just makes all their ARGB8888 visuals capable and doesn't offer
1214 * incapable ones, because there's no difference between the two in resources
1215 * used. Applications thus get built that accidentally rely on the default
1216 * visual choice being sRGB, so we make ours sRGB capable. Everything sounds
1217 * great...
1218 *
1219 * But for GLES2/3, they decided that it was silly to not turn on sRGB encode
1220 * for sRGB renderbuffers you made with the GL_EXT_texture_sRGB equivalent.
1221 * So they removed the enable knob and made it "if the renderbuffer is sRGB
1222 * capable, do sRGB encode". Then, for your window system renderbuffers, you
1223 * can ask for sRGB visuals and get sRGB encode, or not ask for sRGB visuals
1224 * and get no sRGB encode (assuming that both kinds of visual are available).
1225 * Thus our choice to support sRGB by default on our visuals for desktop would
1226 * result in broken rendering of GLES apps that aren't expecting sRGB encode.
1227 *
1228 * Unfortunately, renderbuffer setup happens before a context is created. So
1229 * in intel_screen.c we always set up sRGB, and here, if you're a GLES2/3
1230 * context (without an sRGB visual), we go turn that back off before anyone
1231 * finds out.
1232 */
1233 static void
1234 intel_gles3_srgb_workaround(struct brw_context *brw,
1235 struct gl_framebuffer *fb)
1236 {
1237 struct gl_context *ctx = &brw->ctx;
1238
1239 if (_mesa_is_desktop_gl(ctx) || !fb->Visual.sRGBCapable)
1240 return;
1241
1242 for (int i = 0; i < BUFFER_COUNT; i++) {
1243 struct gl_renderbuffer *rb = fb->Attachment[i].Renderbuffer;
1244
1245 /* Check if sRGB was specifically asked for. */
1246 struct intel_renderbuffer *irb = intel_get_renderbuffer(fb, i);
1247 if (irb && irb->need_srgb)
1248 return;
1249
1250 if (rb)
1251 rb->Format = _mesa_get_srgb_format_linear(rb->Format);
1252 }
1253 /* Disable sRGB from framebuffers that are not compatible. */
1254 fb->Visual.sRGBCapable = false;
1255 }
1256
1257 GLboolean
1258 intelMakeCurrent(__DRIcontext * driContextPriv,
1259 __DRIdrawable * driDrawPriv,
1260 __DRIdrawable * driReadPriv)
1261 {
1262 struct brw_context *brw;
1263
1264 if (driContextPriv)
1265 brw = (struct brw_context *) driContextPriv->driverPrivate;
1266 else
1267 brw = NULL;
1268
1269 if (driContextPriv) {
1270 struct gl_context *ctx = &brw->ctx;
1271 struct gl_framebuffer *fb, *readFb;
1272
1273 if (driDrawPriv == NULL) {
1274 fb = _mesa_get_incomplete_framebuffer();
1275 } else {
1276 fb = driDrawPriv->driverPrivate;
1277 driContextPriv->dri2.draw_stamp = driDrawPriv->dri2.stamp - 1;
1278 }
1279
1280 if (driReadPriv == NULL) {
1281 readFb = _mesa_get_incomplete_framebuffer();
1282 } else {
1283 readFb = driReadPriv->driverPrivate;
1284 driContextPriv->dri2.read_stamp = driReadPriv->dri2.stamp - 1;
1285 }
1286
1287 /* The sRGB workaround changes the renderbuffer's format. We must change
1288 * the format before the renderbuffer's miptree get's allocated, otherwise
1289 * the formats of the renderbuffer and its miptree will differ.
1290 */
1291 intel_gles3_srgb_workaround(brw, fb);
1292 intel_gles3_srgb_workaround(brw, readFb);
1293
1294 /* If the context viewport hasn't been initialized, force a call out to
1295 * the loader to get buffers so we have a drawable size for the initial
1296 * viewport. */
1297 if (!brw->ctx.ViewportInitialized)
1298 intel_prepare_render(brw);
1299
1300 _mesa_make_current(ctx, fb, readFb);
1301 } else {
1302 _mesa_make_current(NULL, NULL, NULL);
1303 }
1304
1305 return true;
1306 }
1307
1308 void
1309 intel_resolve_for_dri2_flush(struct brw_context *brw,
1310 __DRIdrawable *drawable)
1311 {
1312 const struct gen_device_info *devinfo = &brw->screen->devinfo;
1313
1314 if (devinfo->gen < 6) {
1315 /* MSAA and fast color clear are not supported, so don't waste time
1316 * checking whether a resolve is needed.
1317 */
1318 return;
1319 }
1320
1321 struct gl_framebuffer *fb = drawable->driverPrivate;
1322 struct intel_renderbuffer *rb;
1323
1324 /* Usually, only the back buffer will need to be downsampled. However,
1325 * the front buffer will also need it if the user has rendered into it.
1326 */
1327 static const gl_buffer_index buffers[2] = {
1328 BUFFER_BACK_LEFT,
1329 BUFFER_FRONT_LEFT,
1330 };
1331
1332 for (int i = 0; i < 2; ++i) {
1333 rb = intel_get_renderbuffer(fb, buffers[i]);
1334 if (rb == NULL || rb->mt == NULL)
1335 continue;
1336 if (rb->mt->surf.samples == 1) {
1337 assert(rb->mt_layer == 0 && rb->mt_level == 0 &&
1338 rb->layer_count == 1);
1339 intel_miptree_prepare_external(brw, rb->mt);
1340 } else {
1341 intel_renderbuffer_downsample(brw, rb);
1342
1343 /* Call prepare_external on the single-sample miptree to do any
1344 * needed resolves prior to handing it off to the window system.
1345 * This is needed in the case that rb->singlesample_mt is Y-tiled
1346 * with CCS_E enabled but without I915_FORMAT_MOD_Y_TILED_CCS_E. In
1347 * this case, the MSAA resolve above will write compressed data into
1348 * rb->singlesample_mt.
1349 *
1350 * TODO: Some day, if we decide to care about the tiny performance
1351 * hit we're taking by doing the MSAA resolve and then a CCS resolve,
1352 * we could detect this case and just allocate the single-sampled
1353 * miptree without aux. However, that would be a lot of plumbing and
1354 * this is a rather exotic case so it's not really worth it.
1355 */
1356 intel_miptree_prepare_external(brw, rb->singlesample_mt);
1357 }
1358 }
1359 }
1360
1361 static unsigned
1362 intel_bits_per_pixel(const struct intel_renderbuffer *rb)
1363 {
1364 return _mesa_get_format_bytes(intel_rb_format(rb)) * 8;
1365 }
1366
1367 static void
1368 intel_query_dri2_buffers(struct brw_context *brw,
1369 __DRIdrawable *drawable,
1370 __DRIbuffer **buffers,
1371 int *count);
1372
1373 static void
1374 intel_process_dri2_buffer(struct brw_context *brw,
1375 __DRIdrawable *drawable,
1376 __DRIbuffer *buffer,
1377 struct intel_renderbuffer *rb,
1378 const char *buffer_name);
1379
1380 static void
1381 intel_update_image_buffers(struct brw_context *brw, __DRIdrawable *drawable);
1382
1383 static void
1384 intel_update_dri2_buffers(struct brw_context *brw, __DRIdrawable *drawable)
1385 {
1386 struct gl_framebuffer *fb = drawable->driverPrivate;
1387 struct intel_renderbuffer *rb;
1388 __DRIbuffer *buffers = NULL;
1389 int count;
1390 const char *region_name;
1391
1392 /* Set this up front, so that in case our buffers get invalidated
1393 * while we're getting new buffers, we don't clobber the stamp and
1394 * thus ignore the invalidate. */
1395 drawable->lastStamp = drawable->dri2.stamp;
1396
1397 if (unlikely(INTEL_DEBUG & DEBUG_DRI))
1398 fprintf(stderr, "enter %s, drawable %p\n", __func__, drawable);
1399
1400 intel_query_dri2_buffers(brw, drawable, &buffers, &count);
1401
1402 if (buffers == NULL)
1403 return;
1404
1405 for (int i = 0; i < count; i++) {
1406 switch (buffers[i].attachment) {
1407 case __DRI_BUFFER_FRONT_LEFT:
1408 rb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT);
1409 region_name = "dri2 front buffer";
1410 break;
1411
1412 case __DRI_BUFFER_FAKE_FRONT_LEFT:
1413 rb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT);
1414 region_name = "dri2 fake front buffer";
1415 break;
1416
1417 case __DRI_BUFFER_BACK_LEFT:
1418 rb = intel_get_renderbuffer(fb, BUFFER_BACK_LEFT);
1419 region_name = "dri2 back buffer";
1420 break;
1421
1422 case __DRI_BUFFER_DEPTH:
1423 case __DRI_BUFFER_HIZ:
1424 case __DRI_BUFFER_DEPTH_STENCIL:
1425 case __DRI_BUFFER_STENCIL:
1426 case __DRI_BUFFER_ACCUM:
1427 default:
1428 fprintf(stderr,
1429 "unhandled buffer attach event, attachment type %d\n",
1430 buffers[i].attachment);
1431 return;
1432 }
1433
1434 intel_process_dri2_buffer(brw, drawable, &buffers[i], rb, region_name);
1435 }
1436
1437 }
1438
1439 void
1440 intel_update_renderbuffers(__DRIcontext *context, __DRIdrawable *drawable)
1441 {
1442 struct brw_context *brw = context->driverPrivate;
1443 __DRIscreen *dri_screen = brw->screen->driScrnPriv;
1444
1445 /* Set this up front, so that in case our buffers get invalidated
1446 * while we're getting new buffers, we don't clobber the stamp and
1447 * thus ignore the invalidate. */
1448 drawable->lastStamp = drawable->dri2.stamp;
1449
1450 if (unlikely(INTEL_DEBUG & DEBUG_DRI))
1451 fprintf(stderr, "enter %s, drawable %p\n", __func__, drawable);
1452
1453 if (dri_screen->image.loader)
1454 intel_update_image_buffers(brw, drawable);
1455 else
1456 intel_update_dri2_buffers(brw, drawable);
1457
1458 driUpdateFramebufferSize(&brw->ctx, drawable);
1459 }
1460
1461 /**
1462 * intel_prepare_render should be called anywhere that curent read/drawbuffer
1463 * state is required.
1464 */
1465 void
1466 intel_prepare_render(struct brw_context *brw)
1467 {
1468 struct gl_context *ctx = &brw->ctx;
1469 __DRIcontext *driContext = brw->driContext;
1470 __DRIdrawable *drawable;
1471
1472 drawable = driContext->driDrawablePriv;
1473 if (drawable && drawable->dri2.stamp != driContext->dri2.draw_stamp) {
1474 if (drawable->lastStamp != drawable->dri2.stamp)
1475 intel_update_renderbuffers(driContext, drawable);
1476 driContext->dri2.draw_stamp = drawable->dri2.stamp;
1477 }
1478
1479 drawable = driContext->driReadablePriv;
1480 if (drawable && drawable->dri2.stamp != driContext->dri2.read_stamp) {
1481 if (drawable->lastStamp != drawable->dri2.stamp)
1482 intel_update_renderbuffers(driContext, drawable);
1483 driContext->dri2.read_stamp = drawable->dri2.stamp;
1484 }
1485
1486 /* If we're currently rendering to the front buffer, the rendering
1487 * that will happen next will probably dirty the front buffer. So
1488 * mark it as dirty here.
1489 */
1490 if (_mesa_is_front_buffer_drawing(ctx->DrawBuffer))
1491 brw->front_buffer_dirty = true;
1492
1493 if (brw->is_shared_buffer_bound) {
1494 /* Subsequent rendering will probably dirty the shared buffer. */
1495 brw->is_shared_buffer_dirty = true;
1496 }
1497 }
1498
1499 /**
1500 * \brief Query DRI2 to obtain a DRIdrawable's buffers.
1501 *
1502 * To determine which DRI buffers to request, examine the renderbuffers
1503 * attached to the drawable's framebuffer. Then request the buffers with
1504 * DRI2GetBuffers() or DRI2GetBuffersWithFormat().
1505 *
1506 * This is called from intel_update_renderbuffers().
1507 *
1508 * \param drawable Drawable whose buffers are queried.
1509 * \param buffers [out] List of buffers returned by DRI2 query.
1510 * \param buffer_count [out] Number of buffers returned.
1511 *
1512 * \see intel_update_renderbuffers()
1513 * \see DRI2GetBuffers()
1514 * \see DRI2GetBuffersWithFormat()
1515 */
1516 static void
1517 intel_query_dri2_buffers(struct brw_context *brw,
1518 __DRIdrawable *drawable,
1519 __DRIbuffer **buffers,
1520 int *buffer_count)
1521 {
1522 __DRIscreen *dri_screen = brw->screen->driScrnPriv;
1523 struct gl_framebuffer *fb = drawable->driverPrivate;
1524 int i = 0;
1525 unsigned attachments[8];
1526
1527 struct intel_renderbuffer *front_rb;
1528 struct intel_renderbuffer *back_rb;
1529
1530 front_rb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT);
1531 back_rb = intel_get_renderbuffer(fb, BUFFER_BACK_LEFT);
1532
1533 memset(attachments, 0, sizeof(attachments));
1534 if ((_mesa_is_front_buffer_drawing(fb) ||
1535 _mesa_is_front_buffer_reading(fb) ||
1536 !back_rb) && front_rb) {
1537 /* If a fake front buffer is in use, then querying for
1538 * __DRI_BUFFER_FRONT_LEFT will cause the server to copy the image from
1539 * the real front buffer to the fake front buffer. So before doing the
1540 * query, we need to make sure all the pending drawing has landed in the
1541 * real front buffer.
1542 */
1543 intel_batchbuffer_flush(brw);
1544 intel_flush_front(&brw->ctx);
1545
1546 attachments[i++] = __DRI_BUFFER_FRONT_LEFT;
1547 attachments[i++] = intel_bits_per_pixel(front_rb);
1548 } else if (front_rb && brw->front_buffer_dirty) {
1549 /* We have pending front buffer rendering, but we aren't querying for a
1550 * front buffer. If the front buffer we have is a fake front buffer,
1551 * the X server is going to throw it away when it processes the query.
1552 * So before doing the query, make sure all the pending drawing has
1553 * landed in the real front buffer.
1554 */
1555 intel_batchbuffer_flush(brw);
1556 intel_flush_front(&brw->ctx);
1557 }
1558
1559 if (back_rb) {
1560 attachments[i++] = __DRI_BUFFER_BACK_LEFT;
1561 attachments[i++] = intel_bits_per_pixel(back_rb);
1562 }
1563
1564 assert(i <= ARRAY_SIZE(attachments));
1565
1566 *buffers =
1567 dri_screen->dri2.loader->getBuffersWithFormat(drawable,
1568 &drawable->w,
1569 &drawable->h,
1570 attachments, i / 2,
1571 buffer_count,
1572 drawable->loaderPrivate);
1573 }
1574
1575 /**
1576 * \brief Assign a DRI buffer's DRM region to a renderbuffer.
1577 *
1578 * This is called from intel_update_renderbuffers().
1579 *
1580 * \par Note:
1581 * DRI buffers whose attachment point is DRI2BufferStencil or
1582 * DRI2BufferDepthStencil are handled as special cases.
1583 *
1584 * \param buffer_name is a human readable name, such as "dri2 front buffer",
1585 * that is passed to brw_bo_gem_create_from_name().
1586 *
1587 * \see intel_update_renderbuffers()
1588 */
1589 static void
1590 intel_process_dri2_buffer(struct brw_context *brw,
1591 __DRIdrawable *drawable,
1592 __DRIbuffer *buffer,
1593 struct intel_renderbuffer *rb,
1594 const char *buffer_name)
1595 {
1596 struct gl_framebuffer *fb = drawable->driverPrivate;
1597 struct brw_bo *bo;
1598
1599 if (!rb)
1600 return;
1601
1602 unsigned num_samples = rb->Base.Base.NumSamples;
1603
1604 /* We try to avoid closing and reopening the same BO name, because the first
1605 * use of a mapping of the buffer involves a bunch of page faulting which is
1606 * moderately expensive.
1607 */
1608 struct intel_mipmap_tree *last_mt;
1609 if (num_samples == 0)
1610 last_mt = rb->mt;
1611 else
1612 last_mt = rb->singlesample_mt;
1613
1614 uint32_t old_name = 0;
1615 if (last_mt) {
1616 /* The bo already has a name because the miptree was created by a
1617 * previous call to intel_process_dri2_buffer(). If a bo already has a
1618 * name, then brw_bo_flink() is a low-cost getter. It does not
1619 * create a new name.
1620 */
1621 brw_bo_flink(last_mt->bo, &old_name);
1622 }
1623
1624 if (old_name == buffer->name)
1625 return;
1626
1627 if (unlikely(INTEL_DEBUG & DEBUG_DRI)) {
1628 fprintf(stderr,
1629 "attaching buffer %d, at %d, cpp %d, pitch %d\n",
1630 buffer->name, buffer->attachment,
1631 buffer->cpp, buffer->pitch);
1632 }
1633
1634 bo = brw_bo_gem_create_from_name(brw->bufmgr, buffer_name,
1635 buffer->name);
1636 if (!bo) {
1637 fprintf(stderr,
1638 "Failed to open BO for returned DRI2 buffer "
1639 "(%dx%d, %s, named %d).\n"
1640 "This is likely a bug in the X Server that will lead to a "
1641 "crash soon.\n",
1642 drawable->w, drawable->h, buffer_name, buffer->name);
1643 return;
1644 }
1645
1646 uint32_t tiling, swizzle;
1647 brw_bo_get_tiling(bo, &tiling, &swizzle);
1648
1649 struct intel_mipmap_tree *mt =
1650 intel_miptree_create_for_bo(brw,
1651 bo,
1652 intel_rb_format(rb),
1653 0,
1654 drawable->w,
1655 drawable->h,
1656 1,
1657 buffer->pitch,
1658 isl_tiling_from_i915_tiling(tiling),
1659 MIPTREE_CREATE_DEFAULT);
1660 if (!mt) {
1661 brw_bo_unreference(bo);
1662 return;
1663 }
1664
1665 /* We got this BO from X11. We cana't assume that we have coherent texture
1666 * access because X may suddenly decide to use it for scan-out which would
1667 * destroy coherency.
1668 */
1669 bo->cache_coherent = false;
1670
1671 if (!intel_update_winsys_renderbuffer_miptree(brw, rb, mt,
1672 drawable->w, drawable->h,
1673 buffer->pitch)) {
1674 brw_bo_unreference(bo);
1675 intel_miptree_release(&mt);
1676 return;
1677 }
1678
1679 if (_mesa_is_front_buffer_drawing(fb) &&
1680 (buffer->attachment == __DRI_BUFFER_FRONT_LEFT ||
1681 buffer->attachment == __DRI_BUFFER_FAKE_FRONT_LEFT) &&
1682 rb->Base.Base.NumSamples > 1) {
1683 intel_renderbuffer_upsample(brw, rb);
1684 }
1685
1686 assert(rb->mt);
1687
1688 brw_bo_unreference(bo);
1689 }
1690
1691 /**
1692 * \brief Query DRI image loader to obtain a DRIdrawable's buffers.
1693 *
1694 * To determine which DRI buffers to request, examine the renderbuffers
1695 * attached to the drawable's framebuffer. Then request the buffers from
1696 * the image loader
1697 *
1698 * This is called from intel_update_renderbuffers().
1699 *
1700 * \param drawable Drawable whose buffers are queried.
1701 * \param buffers [out] List of buffers returned by DRI2 query.
1702 * \param buffer_count [out] Number of buffers returned.
1703 *
1704 * \see intel_update_renderbuffers()
1705 */
1706
1707 static void
1708 intel_update_image_buffer(struct brw_context *intel,
1709 __DRIdrawable *drawable,
1710 struct intel_renderbuffer *rb,
1711 __DRIimage *buffer,
1712 enum __DRIimageBufferMask buffer_type)
1713 {
1714 struct gl_framebuffer *fb = drawable->driverPrivate;
1715
1716 if (!rb || !buffer->bo)
1717 return;
1718
1719 unsigned num_samples = rb->Base.Base.NumSamples;
1720
1721 /* Check and see if we're already bound to the right
1722 * buffer object
1723 */
1724 struct intel_mipmap_tree *last_mt;
1725 if (num_samples == 0)
1726 last_mt = rb->mt;
1727 else
1728 last_mt = rb->singlesample_mt;
1729
1730 if (last_mt && last_mt->bo == buffer->bo) {
1731 if (buffer_type == __DRI_IMAGE_BUFFER_SHARED) {
1732 intel_miptree_make_shareable(intel, last_mt);
1733 }
1734 return;
1735 }
1736
1737 /* Only allow internal compression if samples == 0. For multisampled
1738 * window system buffers, the only thing the single-sampled buffer is used
1739 * for is as a resolve target. If we do any compression beyond what is
1740 * supported by the window system, we will just have to resolve so it's
1741 * probably better to just not bother.
1742 */
1743 const bool allow_internal_aux = (num_samples == 0);
1744
1745 struct intel_mipmap_tree *mt =
1746 intel_miptree_create_for_dri_image(intel, buffer, GL_TEXTURE_2D,
1747 intel_rb_format(rb),
1748 allow_internal_aux);
1749 if (!mt)
1750 return;
1751
1752 if (!intel_update_winsys_renderbuffer_miptree(intel, rb, mt,
1753 buffer->width, buffer->height,
1754 buffer->pitch)) {
1755 intel_miptree_release(&mt);
1756 return;
1757 }
1758
1759 if (_mesa_is_front_buffer_drawing(fb) &&
1760 buffer_type == __DRI_IMAGE_BUFFER_FRONT &&
1761 rb->Base.Base.NumSamples > 1) {
1762 intel_renderbuffer_upsample(intel, rb);
1763 }
1764
1765 if (buffer_type == __DRI_IMAGE_BUFFER_SHARED) {
1766 /* The compositor and the application may access this image
1767 * concurrently. The display hardware may even scanout the image while
1768 * the GPU is rendering to it. Aux surfaces cause difficulty with
1769 * concurrent access, so permanently disable aux for this miptree.
1770 *
1771 * Perhaps we could improve overall application performance by
1772 * re-enabling the aux surface when EGL_RENDER_BUFFER transitions to
1773 * EGL_BACK_BUFFER, then disabling it again when EGL_RENDER_BUFFER
1774 * returns to EGL_SINGLE_BUFFER. I expect the wins and losses with this
1775 * approach to be highly dependent on the application's GL usage.
1776 *
1777 * I [chadv] expect clever disabling/reenabling to be counterproductive
1778 * in the use cases I care about: applications that render nearly
1779 * realtime handwriting to the surface while possibly undergiong
1780 * simultaneously scanout as a display plane. The app requires low
1781 * render latency. Even though the app spends most of its time in
1782 * shared-buffer mode, it also frequently transitions between
1783 * shared-buffer (EGL_SINGLE_BUFFER) and double-buffer (EGL_BACK_BUFFER)
1784 * mode. Visual sutter during the transitions should be avoided.
1785 *
1786 * In this case, I [chadv] believe reducing the GPU workload at
1787 * shared-buffer/double-buffer transitions would offer a smoother app
1788 * experience than any savings due to aux compression. But I've
1789 * collected no data to prove my theory.
1790 */
1791 intel_miptree_make_shareable(intel, mt);
1792 }
1793 }
1794
1795 static void
1796 intel_update_image_buffers(struct brw_context *brw, __DRIdrawable *drawable)
1797 {
1798 struct gl_framebuffer *fb = drawable->driverPrivate;
1799 __DRIscreen *dri_screen = brw->screen->driScrnPriv;
1800 struct intel_renderbuffer *front_rb;
1801 struct intel_renderbuffer *back_rb;
1802 struct __DRIimageList images;
1803 mesa_format format;
1804 uint32_t buffer_mask = 0;
1805 int ret;
1806
1807 front_rb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT);
1808 back_rb = intel_get_renderbuffer(fb, BUFFER_BACK_LEFT);
1809
1810 if (back_rb)
1811 format = intel_rb_format(back_rb);
1812 else if (front_rb)
1813 format = intel_rb_format(front_rb);
1814 else
1815 return;
1816
1817 if (front_rb && (_mesa_is_front_buffer_drawing(fb) ||
1818 _mesa_is_front_buffer_reading(fb) || !back_rb)) {
1819 buffer_mask |= __DRI_IMAGE_BUFFER_FRONT;
1820 }
1821
1822 if (back_rb)
1823 buffer_mask |= __DRI_IMAGE_BUFFER_BACK;
1824
1825 ret = dri_screen->image.loader->getBuffers(drawable,
1826 driGLFormatToImageFormat(format),
1827 &drawable->dri2.stamp,
1828 drawable->loaderPrivate,
1829 buffer_mask,
1830 &images);
1831 if (!ret)
1832 return;
1833
1834 if (images.image_mask & __DRI_IMAGE_BUFFER_FRONT) {
1835 drawable->w = images.front->width;
1836 drawable->h = images.front->height;
1837 intel_update_image_buffer(brw,
1838 drawable,
1839 front_rb,
1840 images.front,
1841 __DRI_IMAGE_BUFFER_FRONT);
1842 }
1843
1844 if (images.image_mask & __DRI_IMAGE_BUFFER_BACK) {
1845 drawable->w = images.back->width;
1846 drawable->h = images.back->height;
1847 intel_update_image_buffer(brw,
1848 drawable,
1849 back_rb,
1850 images.back,
1851 __DRI_IMAGE_BUFFER_BACK);
1852 }
1853
1854 if (images.image_mask & __DRI_IMAGE_BUFFER_SHARED) {
1855 assert(images.image_mask == __DRI_IMAGE_BUFFER_SHARED);
1856 drawable->w = images.back->width;
1857 drawable->h = images.back->height;
1858 intel_update_image_buffer(brw,
1859 drawable,
1860 back_rb,
1861 images.back,
1862 __DRI_IMAGE_BUFFER_SHARED);
1863 brw->is_shared_buffer_bound = true;
1864 } else {
1865 brw->is_shared_buffer_bound = false;
1866 brw->is_shared_buffer_dirty = false;
1867 }
1868 }