136f1325c6ebd364c353024c55fd21957407dd6f
[mesa.git] / src / mesa / drivers / dri / i965 / brw_context.c
1 /*
2 Copyright 2003 VMware, Inc.
3 Copyright (C) Intel Corp. 2006. All Rights Reserved.
4 Intel funded Tungsten Graphics to
5 develop this 3D driver.
6
7 Permission is hereby granted, free of charge, to any person obtaining
8 a copy of this software and associated documentation files (the
9 "Software"), to deal in the Software without restriction, including
10 without limitation the rights to use, copy, modify, merge, publish,
11 distribute, sublicense, and/or sell copies of the Software, and to
12 permit persons to whom the Software is furnished to do so, subject to
13 the following conditions:
14
15 The above copyright notice and this permission notice (including the
16 next paragraph) shall be included in all copies or substantial
17 portions of the Software.
18
19 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
20 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
22 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
23 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
24 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
25 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26
27 **********************************************************************/
28 /*
29 * Authors:
30 * Keith Whitwell <keithw@vmware.com>
31 */
32
33
34 #include "compiler/nir/nir.h"
35 #include "main/api_exec.h"
36 #include "main/context.h"
37 #include "main/fbobject.h"
38 #include "main/extensions.h"
39 #include "main/imports.h"
40 #include "main/macros.h"
41 #include "main/points.h"
42 #include "main/version.h"
43 #include "main/vtxfmt.h"
44 #include "main/texobj.h"
45 #include "main/framebuffer.h"
46 #include "main/stencil.h"
47 #include "main/state.h"
48
49 #include "vbo/vbo.h"
50
51 #include "drivers/common/driverfuncs.h"
52 #include "drivers/common/meta.h"
53 #include "utils.h"
54
55 #include "brw_context.h"
56 #include "brw_defines.h"
57 #include "brw_blorp.h"
58 #include "brw_draw.h"
59 #include "brw_state.h"
60
61 #include "intel_batchbuffer.h"
62 #include "intel_buffer_objects.h"
63 #include "intel_buffers.h"
64 #include "intel_fbo.h"
65 #include "intel_mipmap_tree.h"
66 #include "intel_pixel.h"
67 #include "intel_image.h"
68 #include "intel_tex.h"
69 #include "intel_tex_obj.h"
70
71 #include "swrast_setup/swrast_setup.h"
72 #include "tnl/tnl.h"
73 #include "tnl/t_pipeline.h"
74 #include "util/ralloc.h"
75 #include "util/debug.h"
76 #include "util/disk_cache.h"
77 #include "isl/isl.h"
78
79 #include "common/gen_defines.h"
80
81 #include "compiler/spirv/nir_spirv.h"
82 /***************************************
83 * Mesa's Driver Functions
84 ***************************************/
85
86 const char *const brw_vendor_string = "Intel Open Source Technology Center";
87
88 static const char *
89 get_bsw_model(const struct intel_screen *screen)
90 {
91 switch (screen->eu_total) {
92 case 16:
93 return "405";
94 case 12:
95 return "400";
96 default:
97 return " ";
98 }
99 }
100
101 const char *
102 brw_get_renderer_string(const struct intel_screen *screen)
103 {
104 const char *chipset;
105 static char buffer[128];
106 char *bsw = NULL;
107
108 switch (screen->deviceID) {
109 #undef CHIPSET
110 #define CHIPSET(id, symbol, str) case id: chipset = str; break;
111 #include "pci_ids/i965_pci_ids.h"
112 default:
113 chipset = "Unknown Intel Chipset";
114 break;
115 }
116
117 /* Braswell branding is funny, so we have to fix it up here */
118 if (screen->deviceID == 0x22B1) {
119 bsw = strdup(chipset);
120 char *needle = strstr(bsw, "XXX");
121 if (needle) {
122 memcpy(needle, get_bsw_model(screen), 3);
123 chipset = bsw;
124 }
125 }
126
127 (void) driGetRendererString(buffer, chipset, 0);
128 free(bsw);
129 return buffer;
130 }
131
132 static const GLubyte *
133 intel_get_string(struct gl_context * ctx, GLenum name)
134 {
135 const struct brw_context *const brw = brw_context(ctx);
136
137 switch (name) {
138 case GL_VENDOR:
139 return (GLubyte *) brw_vendor_string;
140
141 case GL_RENDERER:
142 return
143 (GLubyte *) brw_get_renderer_string(brw->screen);
144
145 default:
146 return NULL;
147 }
148 }
149
150 static void
151 intel_viewport(struct gl_context *ctx)
152 {
153 struct brw_context *brw = brw_context(ctx);
154 __DRIcontext *driContext = brw->driContext;
155
156 if (_mesa_is_winsys_fbo(ctx->DrawBuffer)) {
157 if (driContext->driDrawablePriv)
158 dri2InvalidateDrawable(driContext->driDrawablePriv);
159 if (driContext->driReadablePriv)
160 dri2InvalidateDrawable(driContext->driReadablePriv);
161 }
162 }
163
164 static void
165 intel_update_framebuffer(struct gl_context *ctx,
166 struct gl_framebuffer *fb)
167 {
168 struct brw_context *brw = brw_context(ctx);
169
170 /* Quantize the derived default number of samples
171 */
172 fb->DefaultGeometry._NumSamples =
173 intel_quantize_num_samples(brw->screen,
174 fb->DefaultGeometry.NumSamples);
175 }
176
177 static void
178 intel_update_state(struct gl_context * ctx)
179 {
180 GLuint new_state = ctx->NewState;
181 struct brw_context *brw = brw_context(ctx);
182
183 if (ctx->swrast_context)
184 _swrast_InvalidateState(ctx, new_state);
185
186 brw->NewGLState |= new_state;
187
188 if (new_state & (_NEW_SCISSOR | _NEW_BUFFERS | _NEW_VIEWPORT))
189 _mesa_update_draw_buffer_bounds(ctx, ctx->DrawBuffer);
190
191 if (new_state & (_NEW_STENCIL | _NEW_BUFFERS)) {
192 brw->stencil_enabled = _mesa_stencil_is_enabled(ctx);
193 brw->stencil_two_sided = _mesa_stencil_is_two_sided(ctx);
194 brw->stencil_write_enabled =
195 _mesa_stencil_is_write_enabled(ctx, brw->stencil_two_sided);
196 }
197
198 if (new_state & _NEW_POLYGON)
199 brw->polygon_front_bit = _mesa_polygon_get_front_bit(ctx);
200
201 if (new_state & _NEW_BUFFERS) {
202 intel_update_framebuffer(ctx, ctx->DrawBuffer);
203 if (ctx->DrawBuffer != ctx->ReadBuffer)
204 intel_update_framebuffer(ctx, ctx->ReadBuffer);
205 }
206 }
207
208 #define flushFront(screen) ((screen)->image.loader ? (screen)->image.loader->flushFrontBuffer : (screen)->dri2.loader->flushFrontBuffer)
209
210 static void
211 intel_flush_front(struct gl_context *ctx)
212 {
213 struct brw_context *brw = brw_context(ctx);
214 __DRIcontext *driContext = brw->driContext;
215 __DRIdrawable *driDrawable = driContext->driDrawablePriv;
216 __DRIscreen *const dri_screen = brw->screen->driScrnPriv;
217
218 if (brw->front_buffer_dirty && _mesa_is_winsys_fbo(ctx->DrawBuffer)) {
219 if (flushFront(dri_screen) && driDrawable &&
220 driDrawable->loaderPrivate) {
221
222 /* Resolve before flushing FAKE_FRONT_LEFT to FRONT_LEFT.
223 *
224 * This potentially resolves both front and back buffer. It
225 * is unnecessary to resolve the back, but harms nothing except
226 * performance. And no one cares about front-buffer render
227 * performance.
228 */
229 intel_resolve_for_dri2_flush(brw, driDrawable);
230 intel_batchbuffer_flush(brw);
231
232 flushFront(dri_screen)(driDrawable, driDrawable->loaderPrivate);
233
234 /* We set the dirty bit in intel_prepare_render() if we're
235 * front buffer rendering once we get there.
236 */
237 brw->front_buffer_dirty = false;
238 }
239 }
240 }
241
242 static void
243 intel_glFlush(struct gl_context *ctx)
244 {
245 struct brw_context *brw = brw_context(ctx);
246
247 intel_batchbuffer_flush(brw);
248 intel_flush_front(ctx);
249
250 brw->need_flush_throttle = true;
251 }
252
253 static void
254 intel_finish(struct gl_context * ctx)
255 {
256 struct brw_context *brw = brw_context(ctx);
257
258 intel_glFlush(ctx);
259
260 if (brw->batch.last_bo)
261 brw_bo_wait_rendering(brw->batch.last_bo);
262 }
263
264 static void
265 brw_init_driver_functions(struct brw_context *brw,
266 struct dd_function_table *functions)
267 {
268 const struct gen_device_info *devinfo = &brw->screen->devinfo;
269
270 _mesa_init_driver_functions(functions);
271
272 /* GLX uses DRI2 invalidate events to handle window resizing.
273 * Unfortunately, EGL does not - libEGL is written in XCB (not Xlib),
274 * which doesn't provide a mechanism for snooping the event queues.
275 *
276 * So EGL still relies on viewport hacks to handle window resizing.
277 * This should go away with DRI3000.
278 */
279 if (!brw->driContext->driScreenPriv->dri2.useInvalidate)
280 functions->Viewport = intel_viewport;
281
282 functions->Flush = intel_glFlush;
283 functions->Finish = intel_finish;
284 functions->GetString = intel_get_string;
285 functions->UpdateState = intel_update_state;
286
287 brw_init_draw_functions(functions);
288 intelInitTextureFuncs(functions);
289 intelInitTextureImageFuncs(functions);
290 intelInitTextureCopyImageFuncs(functions);
291 intelInitCopyImageFuncs(functions);
292 intelInitClearFuncs(functions);
293 intelInitBufferFuncs(functions);
294 intelInitPixelFuncs(functions);
295 intelInitBufferObjectFuncs(functions);
296 brw_init_syncobj_functions(functions);
297 brw_init_object_purgeable_functions(functions);
298
299 brwInitFragProgFuncs( functions );
300 brw_init_common_queryobj_functions(functions);
301 if (devinfo->gen >= 8 || devinfo->is_haswell)
302 hsw_init_queryobj_functions(functions);
303 else if (devinfo->gen >= 6)
304 gen6_init_queryobj_functions(functions);
305 else
306 gen4_init_queryobj_functions(functions);
307 brw_init_compute_functions(functions);
308 brw_init_conditional_render_functions(functions);
309
310 functions->GenerateMipmap = brw_generate_mipmap;
311
312 functions->QueryInternalFormat = brw_query_internal_format;
313
314 functions->NewTransformFeedback = brw_new_transform_feedback;
315 functions->DeleteTransformFeedback = brw_delete_transform_feedback;
316 if (can_do_mi_math_and_lrr(brw->screen)) {
317 functions->BeginTransformFeedback = hsw_begin_transform_feedback;
318 functions->EndTransformFeedback = hsw_end_transform_feedback;
319 functions->PauseTransformFeedback = hsw_pause_transform_feedback;
320 functions->ResumeTransformFeedback = hsw_resume_transform_feedback;
321 } else if (devinfo->gen >= 7) {
322 functions->BeginTransformFeedback = gen7_begin_transform_feedback;
323 functions->EndTransformFeedback = gen7_end_transform_feedback;
324 functions->PauseTransformFeedback = gen7_pause_transform_feedback;
325 functions->ResumeTransformFeedback = gen7_resume_transform_feedback;
326 functions->GetTransformFeedbackVertexCount =
327 brw_get_transform_feedback_vertex_count;
328 } else {
329 functions->BeginTransformFeedback = brw_begin_transform_feedback;
330 functions->EndTransformFeedback = brw_end_transform_feedback;
331 functions->PauseTransformFeedback = brw_pause_transform_feedback;
332 functions->ResumeTransformFeedback = brw_resume_transform_feedback;
333 functions->GetTransformFeedbackVertexCount =
334 brw_get_transform_feedback_vertex_count;
335 }
336
337 if (devinfo->gen >= 6)
338 functions->GetSamplePosition = gen6_get_sample_position;
339
340 /* GL_ARB_get_program_binary */
341 brw_program_binary_init(brw->screen->deviceID);
342 functions->GetProgramBinaryDriverSHA1 = brw_get_program_binary_driver_sha1;
343 functions->ProgramBinarySerializeDriverBlob = brw_serialize_program_binary;
344 functions->ProgramBinaryDeserializeDriverBlob =
345 brw_deserialize_program_binary;
346
347 if (brw->screen->disk_cache) {
348 functions->ShaderCacheSerializeDriverBlob = brw_program_serialize_nir;
349 }
350 }
351
352 static void
353 brw_initialize_spirv_supported_capabilities(struct brw_context *brw)
354 {
355 const struct gen_device_info *devinfo = &brw->screen->devinfo;
356 struct gl_context *ctx = &brw->ctx;
357
358 /* The following SPIR-V capabilities are only supported on gen7+. In theory
359 * you should enable the extension only on gen7+, but just in case let's
360 * assert it.
361 */
362 assert(devinfo->gen >= 7);
363
364 ctx->Const.SpirVCapabilities.float64 = devinfo->gen >= 8;
365 ctx->Const.SpirVCapabilities.int64 = devinfo->gen >= 8;
366 ctx->Const.SpirVCapabilities.tessellation = true;
367 ctx->Const.SpirVCapabilities.draw_parameters = true;
368 ctx->Const.SpirVCapabilities.image_write_without_format = true;
369 ctx->Const.SpirVCapabilities.variable_pointers = true;
370 ctx->Const.SpirVCapabilities.atomic_storage = devinfo->gen >= 7;
371 ctx->Const.SpirVCapabilities.transform_feedback = devinfo->gen >= 7;
372 ctx->Const.SpirVCapabilities.geometry_streams = devinfo->gen >= 7;
373 }
374
375 static void
376 brw_initialize_context_constants(struct brw_context *brw)
377 {
378 const struct gen_device_info *devinfo = &brw->screen->devinfo;
379 struct gl_context *ctx = &brw->ctx;
380 const struct brw_compiler *compiler = brw->screen->compiler;
381
382 const bool stage_exists[MESA_SHADER_STAGES] = {
383 [MESA_SHADER_VERTEX] = true,
384 [MESA_SHADER_TESS_CTRL] = devinfo->gen >= 7,
385 [MESA_SHADER_TESS_EVAL] = devinfo->gen >= 7,
386 [MESA_SHADER_GEOMETRY] = devinfo->gen >= 6,
387 [MESA_SHADER_FRAGMENT] = true,
388 [MESA_SHADER_COMPUTE] =
389 (_mesa_is_desktop_gl(ctx) &&
390 ctx->Const.MaxComputeWorkGroupSize[0] >= 1024) ||
391 (ctx->API == API_OPENGLES2 &&
392 ctx->Const.MaxComputeWorkGroupSize[0] >= 128),
393 };
394
395 unsigned num_stages = 0;
396 for (int i = 0; i < MESA_SHADER_STAGES; i++) {
397 if (stage_exists[i])
398 num_stages++;
399 }
400
401 unsigned max_samplers =
402 devinfo->gen >= 8 || devinfo->is_haswell ? BRW_MAX_TEX_UNIT : 16;
403
404 ctx->Const.MaxDualSourceDrawBuffers = 1;
405 ctx->Const.MaxDrawBuffers = BRW_MAX_DRAW_BUFFERS;
406 ctx->Const.MaxCombinedShaderOutputResources =
407 MAX_IMAGE_UNITS + BRW_MAX_DRAW_BUFFERS;
408
409 /* The timestamp register we can read for glGetTimestamp() is
410 * sometimes only 32 bits, before scaling to nanoseconds (depending
411 * on kernel).
412 *
413 * Once scaled to nanoseconds the timestamp would roll over at a
414 * non-power-of-two, so an application couldn't use
415 * GL_QUERY_COUNTER_BITS to handle rollover correctly. Instead, we
416 * report 36 bits and truncate at that (rolling over 5 times as
417 * often as the HW counter), and when the 32-bit counter rolls
418 * over, it happens to also be at a rollover in the reported value
419 * from near (1<<36) to 0.
420 *
421 * The low 32 bits rolls over in ~343 seconds. Our 36-bit result
422 * rolls over every ~69 seconds.
423 */
424 ctx->Const.QueryCounterBits.Timestamp = 36;
425
426 ctx->Const.MaxTextureCoordUnits = 8; /* Mesa limit */
427 ctx->Const.MaxImageUnits = MAX_IMAGE_UNITS;
428 if (devinfo->gen >= 7) {
429 ctx->Const.MaxRenderbufferSize = 16384;
430 ctx->Const.MaxTextureLevels = MIN2(15 /* 16384 */, MAX_TEXTURE_LEVELS);
431 ctx->Const.MaxCubeTextureLevels = 15; /* 16384 */
432 } else {
433 ctx->Const.MaxRenderbufferSize = 8192;
434 ctx->Const.MaxTextureLevels = MIN2(14 /* 8192 */, MAX_TEXTURE_LEVELS);
435 ctx->Const.MaxCubeTextureLevels = 14; /* 8192 */
436 }
437 ctx->Const.Max3DTextureLevels = 12; /* 2048 */
438 ctx->Const.MaxArrayTextureLayers = devinfo->gen >= 7 ? 2048 : 512;
439 ctx->Const.MaxTextureMbytes = 1536;
440 ctx->Const.MaxTextureRectSize = devinfo->gen >= 7 ? 16384 : 8192;
441 ctx->Const.MaxTextureMaxAnisotropy = 16.0;
442 ctx->Const.MaxTextureLodBias = 15.0;
443 ctx->Const.StripTextureBorder = true;
444 if (devinfo->gen >= 7) {
445 ctx->Const.MaxProgramTextureGatherComponents = 4;
446 ctx->Const.MinProgramTextureGatherOffset = -32;
447 ctx->Const.MaxProgramTextureGatherOffset = 31;
448 } else if (devinfo->gen == 6) {
449 ctx->Const.MaxProgramTextureGatherComponents = 1;
450 ctx->Const.MinProgramTextureGatherOffset = -8;
451 ctx->Const.MaxProgramTextureGatherOffset = 7;
452 }
453
454 ctx->Const.MaxUniformBlockSize = 65536;
455
456 for (int i = 0; i < MESA_SHADER_STAGES; i++) {
457 struct gl_program_constants *prog = &ctx->Const.Program[i];
458
459 if (!stage_exists[i])
460 continue;
461
462 prog->MaxTextureImageUnits = max_samplers;
463
464 prog->MaxUniformBlocks = BRW_MAX_UBO;
465 prog->MaxCombinedUniformComponents =
466 prog->MaxUniformComponents +
467 ctx->Const.MaxUniformBlockSize / 4 * prog->MaxUniformBlocks;
468
469 prog->MaxAtomicCounters = MAX_ATOMIC_COUNTERS;
470 prog->MaxAtomicBuffers = BRW_MAX_ABO;
471 prog->MaxImageUniforms = compiler->scalar_stage[i] ? BRW_MAX_IMAGES : 0;
472 prog->MaxShaderStorageBlocks = BRW_MAX_SSBO;
473 }
474
475 ctx->Const.MaxTextureUnits =
476 MIN2(ctx->Const.MaxTextureCoordUnits,
477 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxTextureImageUnits);
478
479 ctx->Const.MaxUniformBufferBindings = num_stages * BRW_MAX_UBO;
480 ctx->Const.MaxCombinedUniformBlocks = num_stages * BRW_MAX_UBO;
481 ctx->Const.MaxCombinedAtomicBuffers = num_stages * BRW_MAX_ABO;
482 ctx->Const.MaxCombinedShaderStorageBlocks = num_stages * BRW_MAX_SSBO;
483 ctx->Const.MaxShaderStorageBufferBindings = num_stages * BRW_MAX_SSBO;
484 ctx->Const.MaxCombinedTextureImageUnits = num_stages * max_samplers;
485 ctx->Const.MaxCombinedImageUniforms = num_stages * BRW_MAX_IMAGES;
486
487
488 /* Hardware only supports a limited number of transform feedback buffers.
489 * So we need to override the Mesa default (which is based only on software
490 * limits).
491 */
492 ctx->Const.MaxTransformFeedbackBuffers = BRW_MAX_SOL_BUFFERS;
493
494 /* On Gen6, in the worst case, we use up one binding table entry per
495 * transform feedback component (see comments above the definition of
496 * BRW_MAX_SOL_BINDINGS, in brw_context.h), so we need to advertise a value
497 * for MAX_TRANSFORM_FEEDBACK_INTERLEAVED_COMPONENTS equal to
498 * BRW_MAX_SOL_BINDINGS.
499 *
500 * In "separate components" mode, we need to divide this value by
501 * BRW_MAX_SOL_BUFFERS, so that the total number of binding table entries
502 * used up by all buffers will not exceed BRW_MAX_SOL_BINDINGS.
503 */
504 ctx->Const.MaxTransformFeedbackInterleavedComponents = BRW_MAX_SOL_BINDINGS;
505 ctx->Const.MaxTransformFeedbackSeparateComponents =
506 BRW_MAX_SOL_BINDINGS / BRW_MAX_SOL_BUFFERS;
507
508 ctx->Const.AlwaysUseGetTransformFeedbackVertexCount =
509 !can_do_mi_math_and_lrr(brw->screen);
510
511 int max_samples;
512 const int *msaa_modes = intel_supported_msaa_modes(brw->screen);
513 const int clamp_max_samples =
514 driQueryOptioni(&brw->optionCache, "clamp_max_samples");
515
516 if (clamp_max_samples < 0) {
517 max_samples = msaa_modes[0];
518 } else {
519 /* Select the largest supported MSAA mode that does not exceed
520 * clamp_max_samples.
521 */
522 max_samples = 0;
523 for (int i = 0; msaa_modes[i] != 0; ++i) {
524 if (msaa_modes[i] <= clamp_max_samples) {
525 max_samples = msaa_modes[i];
526 break;
527 }
528 }
529 }
530
531 ctx->Const.MaxSamples = max_samples;
532 ctx->Const.MaxColorTextureSamples = max_samples;
533 ctx->Const.MaxDepthTextureSamples = max_samples;
534 ctx->Const.MaxIntegerSamples = max_samples;
535 ctx->Const.MaxImageSamples = 0;
536
537 /* gen6_set_sample_maps() sets SampleMap{2,4,8}x variables which are used
538 * to map indices of rectangular grid to sample numbers within a pixel.
539 * These variables are used by GL_EXT_framebuffer_multisample_blit_scaled
540 * extension implementation. For more details see the comment above
541 * gen6_set_sample_maps() definition.
542 */
543 gen6_set_sample_maps(ctx);
544
545 ctx->Const.MinLineWidth = 1.0;
546 ctx->Const.MinLineWidthAA = 1.0;
547 if (devinfo->gen >= 6) {
548 ctx->Const.MaxLineWidth = 7.375;
549 ctx->Const.MaxLineWidthAA = 7.375;
550 ctx->Const.LineWidthGranularity = 0.125;
551 } else {
552 ctx->Const.MaxLineWidth = 7.0;
553 ctx->Const.MaxLineWidthAA = 7.0;
554 ctx->Const.LineWidthGranularity = 0.5;
555 }
556
557 /* For non-antialiased lines, we have to round the line width to the
558 * nearest whole number. Make sure that we don't advertise a line
559 * width that, when rounded, will be beyond the actual hardware
560 * maximum.
561 */
562 assert(roundf(ctx->Const.MaxLineWidth) <= ctx->Const.MaxLineWidth);
563
564 ctx->Const.MinPointSize = 1.0;
565 ctx->Const.MinPointSizeAA = 1.0;
566 ctx->Const.MaxPointSize = 255.0;
567 ctx->Const.MaxPointSizeAA = 255.0;
568 ctx->Const.PointSizeGranularity = 1.0;
569
570 if (devinfo->gen >= 5 || devinfo->is_g4x)
571 ctx->Const.MaxClipPlanes = 8;
572
573 ctx->Const.GLSLTessLevelsAsInputs = true;
574 ctx->Const.PrimitiveRestartForPatches = true;
575
576 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeInstructions = 16 * 1024;
577 ctx->Const.Program[MESA_SHADER_VERTEX].MaxAluInstructions = 0;
578 ctx->Const.Program[MESA_SHADER_VERTEX].MaxTexInstructions = 0;
579 ctx->Const.Program[MESA_SHADER_VERTEX].MaxTexIndirections = 0;
580 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeAluInstructions = 0;
581 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeTexInstructions = 0;
582 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeTexIndirections = 0;
583 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeAttribs = 16;
584 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeTemps = 256;
585 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeAddressRegs = 1;
586 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeParameters = 1024;
587 ctx->Const.Program[MESA_SHADER_VERTEX].MaxEnvParams =
588 MIN2(ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeParameters,
589 ctx->Const.Program[MESA_SHADER_VERTEX].MaxEnvParams);
590
591 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeInstructions = 1024;
592 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeAluInstructions = 1024;
593 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeTexInstructions = 1024;
594 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeTexIndirections = 1024;
595 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeAttribs = 12;
596 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeTemps = 256;
597 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeAddressRegs = 0;
598 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeParameters = 1024;
599 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxEnvParams =
600 MIN2(ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeParameters,
601 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxEnvParams);
602
603 /* Fragment shaders use real, 32-bit twos-complement integers for all
604 * integer types.
605 */
606 ctx->Const.Program[MESA_SHADER_FRAGMENT].LowInt.RangeMin = 31;
607 ctx->Const.Program[MESA_SHADER_FRAGMENT].LowInt.RangeMax = 30;
608 ctx->Const.Program[MESA_SHADER_FRAGMENT].LowInt.Precision = 0;
609 ctx->Const.Program[MESA_SHADER_FRAGMENT].HighInt = ctx->Const.Program[MESA_SHADER_FRAGMENT].LowInt;
610 ctx->Const.Program[MESA_SHADER_FRAGMENT].MediumInt = ctx->Const.Program[MESA_SHADER_FRAGMENT].LowInt;
611
612 ctx->Const.Program[MESA_SHADER_VERTEX].LowInt.RangeMin = 31;
613 ctx->Const.Program[MESA_SHADER_VERTEX].LowInt.RangeMax = 30;
614 ctx->Const.Program[MESA_SHADER_VERTEX].LowInt.Precision = 0;
615 ctx->Const.Program[MESA_SHADER_VERTEX].HighInt = ctx->Const.Program[MESA_SHADER_VERTEX].LowInt;
616 ctx->Const.Program[MESA_SHADER_VERTEX].MediumInt = ctx->Const.Program[MESA_SHADER_VERTEX].LowInt;
617
618 /* Gen6 converts quads to polygon in beginning of 3D pipeline,
619 * but we're not sure how it's actually done for vertex order,
620 * that affect provoking vertex decision. Always use last vertex
621 * convention for quad primitive which works as expected for now.
622 */
623 if (devinfo->gen >= 6)
624 ctx->Const.QuadsFollowProvokingVertexConvention = false;
625
626 ctx->Const.NativeIntegers = true;
627
628 /* Regarding the CMP instruction, the Ivybridge PRM says:
629 *
630 * "For each enabled channel 0b or 1b is assigned to the appropriate flag
631 * bit and 0/all zeros or all ones (e.g, byte 0xFF, word 0xFFFF, DWord
632 * 0xFFFFFFFF) is assigned to dst."
633 *
634 * but PRMs for earlier generations say
635 *
636 * "In dword format, one GRF may store up to 8 results. When the register
637 * is used later as a vector of Booleans, as only LSB at each channel
638 * contains meaning [sic] data, software should make sure all higher bits
639 * are masked out (e.g. by 'and-ing' an [sic] 0x01 constant)."
640 *
641 * We select the representation of a true boolean uniform to be ~0, and fix
642 * the results of Gen <= 5 CMP instruction's with -(result & 1).
643 */
644 ctx->Const.UniformBooleanTrue = ~0;
645
646 /* From the gen4 PRM, volume 4 page 127:
647 *
648 * "For SURFTYPE_BUFFER non-rendertarget surfaces, this field specifies
649 * the base address of the first element of the surface, computed in
650 * software by adding the surface base address to the byte offset of
651 * the element in the buffer."
652 *
653 * However, unaligned accesses are slower, so enforce buffer alignment.
654 *
655 * In order to push UBO data, 3DSTATE_CONSTANT_XS imposes an additional
656 * restriction: the start of the buffer needs to be 32B aligned.
657 */
658 ctx->Const.UniformBufferOffsetAlignment = 32;
659
660 /* ShaderStorageBufferOffsetAlignment should be a cacheline (64 bytes) so
661 * that we can safely have the CPU and GPU writing the same SSBO on
662 * non-cachecoherent systems (our Atom CPUs). With UBOs, the GPU never
663 * writes, so there's no problem. For an SSBO, the GPU and the CPU can
664 * be updating disjoint regions of the buffer simultaneously and that will
665 * break if the regions overlap the same cacheline.
666 */
667 ctx->Const.ShaderStorageBufferOffsetAlignment = 64;
668 ctx->Const.TextureBufferOffsetAlignment = 16;
669 ctx->Const.MaxTextureBufferSize = 128 * 1024 * 1024;
670
671 if (devinfo->gen >= 6) {
672 ctx->Const.MaxVarying = 32;
673 ctx->Const.Program[MESA_SHADER_VERTEX].MaxOutputComponents = 128;
674 ctx->Const.Program[MESA_SHADER_GEOMETRY].MaxInputComponents =
675 compiler->scalar_stage[MESA_SHADER_GEOMETRY] ? 128 : 64;
676 ctx->Const.Program[MESA_SHADER_GEOMETRY].MaxOutputComponents = 128;
677 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxInputComponents = 128;
678 ctx->Const.Program[MESA_SHADER_TESS_CTRL].MaxInputComponents = 128;
679 ctx->Const.Program[MESA_SHADER_TESS_CTRL].MaxOutputComponents = 128;
680 ctx->Const.Program[MESA_SHADER_TESS_EVAL].MaxInputComponents = 128;
681 ctx->Const.Program[MESA_SHADER_TESS_EVAL].MaxOutputComponents = 128;
682 }
683
684 /* We want the GLSL compiler to emit code that uses condition codes */
685 for (int i = 0; i < MESA_SHADER_STAGES; i++) {
686 ctx->Const.ShaderCompilerOptions[i] =
687 brw->screen->compiler->glsl_compiler_options[i];
688 }
689
690 if (devinfo->gen >= 7) {
691 ctx->Const.MaxViewportWidth = 32768;
692 ctx->Const.MaxViewportHeight = 32768;
693 }
694
695 /* ARB_viewport_array, OES_viewport_array */
696 if (devinfo->gen >= 6) {
697 ctx->Const.MaxViewports = GEN6_NUM_VIEWPORTS;
698 ctx->Const.ViewportSubpixelBits = 0;
699
700 /* Cast to float before negating because MaxViewportWidth is unsigned.
701 */
702 ctx->Const.ViewportBounds.Min = -(float)ctx->Const.MaxViewportWidth;
703 ctx->Const.ViewportBounds.Max = ctx->Const.MaxViewportWidth;
704 }
705
706 /* ARB_gpu_shader5 */
707 if (devinfo->gen >= 7)
708 ctx->Const.MaxVertexStreams = MIN2(4, MAX_VERTEX_STREAMS);
709
710 /* ARB_framebuffer_no_attachments */
711 ctx->Const.MaxFramebufferWidth = 16384;
712 ctx->Const.MaxFramebufferHeight = 16384;
713 ctx->Const.MaxFramebufferLayers = ctx->Const.MaxArrayTextureLayers;
714 ctx->Const.MaxFramebufferSamples = max_samples;
715
716 /* OES_primitive_bounding_box */
717 ctx->Const.NoPrimitiveBoundingBoxOutput = true;
718
719 /* TODO: We should be able to use STD430 packing by default on all hardware
720 * but some piglit tests [1] currently fail on SNB when this is enabled.
721 * The problem is the messages we're using for doing uniform pulls
722 * in the vec4 back-end on SNB is the OWORD block load instruction, which
723 * takes its offset in units of OWORDS (16 bytes). On IVB+, we use the
724 * sampler which doesn't have these restrictions.
725 *
726 * In the scalar back-end, we use the sampler for dynamic uniform loads and
727 * pull an entire cache line at a time for constant offset loads both of
728 * which support almost any alignment.
729 *
730 * [1] glsl-1.40/uniform_buffer/vs-float-array-variable-index.shader_test
731 */
732 if (devinfo->gen >= 7)
733 ctx->Const.UseSTD430AsDefaultPacking = true;
734
735 if (!(ctx->Const.ContextFlags & GL_CONTEXT_FLAG_DEBUG_BIT))
736 ctx->Const.AllowMappedBuffersDuringExecution = true;
737
738 /* GL_ARB_get_program_binary */
739 ctx->Const.NumProgramBinaryFormats = 1;
740 }
741
742 static void
743 brw_initialize_cs_context_constants(struct brw_context *brw)
744 {
745 struct gl_context *ctx = &brw->ctx;
746 const struct intel_screen *screen = brw->screen;
747 struct gen_device_info *devinfo = &brw->screen->devinfo;
748
749 /* FINISHME: Do this for all platforms that the kernel supports */
750 if (devinfo->is_cherryview &&
751 screen->subslice_total > 0 && screen->eu_total > 0) {
752 /* Logical CS threads = EUs per subslice * 7 threads per EU */
753 uint32_t max_cs_threads = screen->eu_total / screen->subslice_total * 7;
754
755 /* Fuse configurations may give more threads than expected, never less. */
756 if (max_cs_threads > devinfo->max_cs_threads)
757 devinfo->max_cs_threads = max_cs_threads;
758 }
759
760 /* Maximum number of scalar compute shader invocations that can be run in
761 * parallel in the same subslice assuming SIMD32 dispatch.
762 *
763 * We don't advertise more than 64 threads, because we are limited to 64 by
764 * our usage of thread_width_max in the gpgpu walker command. This only
765 * currently impacts Haswell, which otherwise might be able to advertise 70
766 * threads. With SIMD32 and 64 threads, Haswell still provides twice the
767 * required the number of invocation needed for ARB_compute_shader.
768 */
769 const unsigned max_threads = MIN2(64, devinfo->max_cs_threads);
770 const uint32_t max_invocations = 32 * max_threads;
771 ctx->Const.MaxComputeWorkGroupSize[0] = max_invocations;
772 ctx->Const.MaxComputeWorkGroupSize[1] = max_invocations;
773 ctx->Const.MaxComputeWorkGroupSize[2] = max_invocations;
774 ctx->Const.MaxComputeWorkGroupInvocations = max_invocations;
775 ctx->Const.MaxComputeSharedMemorySize = 64 * 1024;
776 }
777
778 /**
779 * Process driconf (drirc) options, setting appropriate context flags.
780 *
781 * intelInitExtensions still pokes at optionCache directly, in order to
782 * avoid advertising various extensions. No flags are set, so it makes
783 * sense to continue doing that there.
784 */
785 static void
786 brw_process_driconf_options(struct brw_context *brw)
787 {
788 const struct gen_device_info *devinfo = &brw->screen->devinfo;
789 struct gl_context *ctx = &brw->ctx;
790
791 driOptionCache *options = &brw->optionCache;
792 driParseConfigFiles(options, &brw->screen->optionCache,
793 brw->driContext->driScreenPriv->myNum, "i965");
794
795 int bo_reuse_mode = driQueryOptioni(options, "bo_reuse");
796 switch (bo_reuse_mode) {
797 case DRI_CONF_BO_REUSE_DISABLED:
798 break;
799 case DRI_CONF_BO_REUSE_ALL:
800 brw_bufmgr_enable_reuse(brw->bufmgr);
801 break;
802 }
803
804 if (INTEL_DEBUG & DEBUG_NO_HIZ) {
805 brw->has_hiz = false;
806 /* On gen6, you can only do separate stencil with HIZ. */
807 if (devinfo->gen == 6)
808 brw->has_separate_stencil = false;
809 }
810
811 if (driQueryOptionb(options, "mesa_no_error"))
812 ctx->Const.ContextFlags |= GL_CONTEXT_FLAG_NO_ERROR_BIT_KHR;
813
814 if (driQueryOptionb(options, "always_flush_batch")) {
815 fprintf(stderr, "flushing batchbuffer before/after each draw call\n");
816 brw->always_flush_batch = true;
817 }
818
819 if (driQueryOptionb(options, "always_flush_cache")) {
820 fprintf(stderr, "flushing GPU caches before/after each draw call\n");
821 brw->always_flush_cache = true;
822 }
823
824 if (driQueryOptionb(options, "disable_throttling")) {
825 fprintf(stderr, "disabling flush throttling\n");
826 brw->disable_throttling = true;
827 }
828
829 brw->precompile = driQueryOptionb(&brw->optionCache, "shader_precompile");
830
831 if (driQueryOptionb(&brw->optionCache, "precise_trig"))
832 brw->screen->compiler->precise_trig = true;
833
834 ctx->Const.ForceGLSLExtensionsWarn =
835 driQueryOptionb(options, "force_glsl_extensions_warn");
836
837 ctx->Const.ForceGLSLVersion =
838 driQueryOptioni(options, "force_glsl_version");
839
840 ctx->Const.DisableGLSLLineContinuations =
841 driQueryOptionb(options, "disable_glsl_line_continuations");
842
843 ctx->Const.AllowGLSLExtensionDirectiveMidShader =
844 driQueryOptionb(options, "allow_glsl_extension_directive_midshader");
845
846 ctx->Const.AllowGLSLBuiltinVariableRedeclaration =
847 driQueryOptionb(options, "allow_glsl_builtin_variable_redeclaration");
848
849 ctx->Const.AllowHigherCompatVersion =
850 driQueryOptionb(options, "allow_higher_compat_version");
851
852 ctx->Const.ForceGLSLAbsSqrt =
853 driQueryOptionb(options, "force_glsl_abs_sqrt");
854
855 ctx->Const.GLSLZeroInit = driQueryOptionb(options, "glsl_zero_init");
856
857 brw->dual_color_blend_by_location =
858 driQueryOptionb(options, "dual_color_blend_by_location");
859
860 ctx->Const.AllowGLSLCrossStageInterpolationMismatch =
861 driQueryOptionb(options, "allow_glsl_cross_stage_interpolation_mismatch");
862
863 ctx->Const.dri_config_options_sha1 = ralloc_array(brw, unsigned char, 20);
864 driComputeOptionsSha1(&brw->screen->optionCache,
865 ctx->Const.dri_config_options_sha1);
866 }
867
868 GLboolean
869 brwCreateContext(gl_api api,
870 const struct gl_config *mesaVis,
871 __DRIcontext *driContextPriv,
872 const struct __DriverContextConfig *ctx_config,
873 unsigned *dri_ctx_error,
874 void *sharedContextPrivate)
875 {
876 struct gl_context *shareCtx = (struct gl_context *) sharedContextPrivate;
877 struct intel_screen *screen = driContextPriv->driScreenPriv->driverPrivate;
878 const struct gen_device_info *devinfo = &screen->devinfo;
879 struct dd_function_table functions;
880
881 /* Only allow the __DRI_CTX_FLAG_ROBUST_BUFFER_ACCESS flag if the kernel
882 * provides us with context reset notifications.
883 */
884 uint32_t allowed_flags = __DRI_CTX_FLAG_DEBUG |
885 __DRI_CTX_FLAG_FORWARD_COMPATIBLE |
886 __DRI_CTX_FLAG_NO_ERROR;
887
888 if (screen->has_context_reset_notification)
889 allowed_flags |= __DRI_CTX_FLAG_ROBUST_BUFFER_ACCESS;
890
891 if (ctx_config->flags & ~allowed_flags) {
892 *dri_ctx_error = __DRI_CTX_ERROR_UNKNOWN_FLAG;
893 return false;
894 }
895
896 if (ctx_config->attribute_mask &
897 ~(__DRIVER_CONTEXT_ATTRIB_RESET_STRATEGY |
898 __DRIVER_CONTEXT_ATTRIB_PRIORITY)) {
899 *dri_ctx_error = __DRI_CTX_ERROR_UNKNOWN_ATTRIBUTE;
900 return false;
901 }
902
903 bool notify_reset =
904 ((ctx_config->attribute_mask & __DRIVER_CONTEXT_ATTRIB_RESET_STRATEGY) &&
905 ctx_config->reset_strategy != __DRI_CTX_RESET_NO_NOTIFICATION);
906
907 struct brw_context *brw = rzalloc(NULL, struct brw_context);
908 if (!brw) {
909 fprintf(stderr, "%s: failed to alloc context\n", __func__);
910 *dri_ctx_error = __DRI_CTX_ERROR_NO_MEMORY;
911 return false;
912 }
913
914 driContextPriv->driverPrivate = brw;
915 brw->driContext = driContextPriv;
916 brw->screen = screen;
917 brw->bufmgr = screen->bufmgr;
918
919 brw->has_hiz = devinfo->has_hiz_and_separate_stencil;
920 brw->has_separate_stencil = devinfo->has_hiz_and_separate_stencil;
921
922 brw->has_swizzling = screen->hw_has_swizzling;
923
924 brw->isl_dev = screen->isl_dev;
925
926 brw->vs.base.stage = MESA_SHADER_VERTEX;
927 brw->tcs.base.stage = MESA_SHADER_TESS_CTRL;
928 brw->tes.base.stage = MESA_SHADER_TESS_EVAL;
929 brw->gs.base.stage = MESA_SHADER_GEOMETRY;
930 brw->wm.base.stage = MESA_SHADER_FRAGMENT;
931 brw->cs.base.stage = MESA_SHADER_COMPUTE;
932
933 brw_init_driver_functions(brw, &functions);
934
935 if (notify_reset)
936 functions.GetGraphicsResetStatus = brw_get_graphics_reset_status;
937
938 struct gl_context *ctx = &brw->ctx;
939
940 if (!_mesa_initialize_context(ctx, api, mesaVis, shareCtx, &functions)) {
941 *dri_ctx_error = __DRI_CTX_ERROR_NO_MEMORY;
942 fprintf(stderr, "%s: failed to init mesa context\n", __func__);
943 intelDestroyContext(driContextPriv);
944 return false;
945 }
946
947 driContextSetFlags(ctx, ctx_config->flags);
948
949 /* Initialize the software rasterizer and helper modules.
950 *
951 * As of GL 3.1 core, the gen4+ driver doesn't need the swrast context for
952 * software fallbacks (which we have to support on legacy GL to do weird
953 * glDrawPixels(), glBitmap(), and other functions).
954 */
955 if (api != API_OPENGL_CORE && api != API_OPENGLES2) {
956 _swrast_CreateContext(ctx);
957 }
958
959 _vbo_CreateContext(ctx);
960 if (ctx->swrast_context) {
961 _tnl_CreateContext(ctx);
962 TNL_CONTEXT(ctx)->Driver.RunPipeline = _tnl_run_pipeline;
963 _swsetup_CreateContext(ctx);
964
965 /* Configure swrast to match hardware characteristics: */
966 _swrast_allow_pixel_fog(ctx, false);
967 _swrast_allow_vertex_fog(ctx, true);
968 }
969
970 _mesa_meta_init(ctx);
971
972 brw_process_driconf_options(brw);
973
974 if (INTEL_DEBUG & DEBUG_PERF)
975 brw->perf_debug = true;
976
977 brw_initialize_cs_context_constants(brw);
978 brw_initialize_context_constants(brw);
979
980 ctx->Const.ResetStrategy = notify_reset
981 ? GL_LOSE_CONTEXT_ON_RESET_ARB : GL_NO_RESET_NOTIFICATION_ARB;
982
983 /* Reinitialize the context point state. It depends on ctx->Const values. */
984 _mesa_init_point(ctx);
985
986 intel_fbo_init(brw);
987
988 intel_batchbuffer_init(brw);
989
990 /* Create a new hardware context. Using a hardware context means that
991 * our GPU state will be saved/restored on context switch, allowing us
992 * to assume that the GPU is in the same state we left it in.
993 *
994 * This is required for transform feedback buffer offsets, query objects,
995 * and also allows us to reduce how much state we have to emit.
996 */
997 brw->hw_ctx = brw_create_hw_context(brw->bufmgr);
998 if (!brw->hw_ctx && devinfo->gen >= 6) {
999 fprintf(stderr, "Failed to create hardware context.\n");
1000 intelDestroyContext(driContextPriv);
1001 return false;
1002 }
1003
1004 if (brw->hw_ctx) {
1005 int hw_priority = GEN_CONTEXT_MEDIUM_PRIORITY;
1006 if (ctx_config->attribute_mask & __DRIVER_CONTEXT_ATTRIB_PRIORITY) {
1007 switch (ctx_config->priority) {
1008 case __DRI_CTX_PRIORITY_LOW:
1009 hw_priority = GEN_CONTEXT_LOW_PRIORITY;
1010 break;
1011 case __DRI_CTX_PRIORITY_HIGH:
1012 hw_priority = GEN_CONTEXT_HIGH_PRIORITY;
1013 break;
1014 }
1015 }
1016 if (hw_priority != I915_CONTEXT_DEFAULT_PRIORITY &&
1017 brw_hw_context_set_priority(brw->bufmgr, brw->hw_ctx, hw_priority)) {
1018 fprintf(stderr,
1019 "Failed to set priority [%d:%d] for hardware context.\n",
1020 ctx_config->priority, hw_priority);
1021 intelDestroyContext(driContextPriv);
1022 return false;
1023 }
1024 }
1025
1026 if (brw_init_pipe_control(brw, devinfo)) {
1027 *dri_ctx_error = __DRI_CTX_ERROR_NO_MEMORY;
1028 intelDestroyContext(driContextPriv);
1029 return false;
1030 }
1031
1032 if (devinfo->gen == 11) {
1033 fprintf(stderr,
1034 "WARNING: i965 does not fully support Gen11 yet.\n"
1035 "Instability or lower performance might occur.\n");
1036
1037 }
1038
1039 brw_upload_init(&brw->upload, brw->bufmgr, 65536);
1040
1041 brw_init_state(brw);
1042
1043 intelInitExtensions(ctx);
1044
1045 brw_init_surface_formats(brw);
1046
1047 brw_blorp_init(brw);
1048
1049 brw->urb.size = devinfo->urb.size;
1050
1051 if (devinfo->gen == 6)
1052 brw->urb.gs_present = false;
1053
1054 brw->prim_restart.in_progress = false;
1055 brw->prim_restart.enable_cut_index = false;
1056 brw->gs.enabled = false;
1057 brw->clip.viewport_count = 1;
1058
1059 brw->predicate.state = BRW_PREDICATE_STATE_RENDER;
1060
1061 brw->max_gtt_map_object_size = screen->max_gtt_map_object_size;
1062
1063 ctx->VertexProgram._MaintainTnlProgram = true;
1064 ctx->FragmentProgram._MaintainTexEnvProgram = true;
1065
1066 brw_draw_init( brw );
1067
1068 if ((ctx_config->flags & __DRI_CTX_FLAG_DEBUG) != 0) {
1069 /* Turn on some extra GL_ARB_debug_output generation. */
1070 brw->perf_debug = true;
1071 }
1072
1073 if ((ctx_config->flags & __DRI_CTX_FLAG_ROBUST_BUFFER_ACCESS) != 0) {
1074 ctx->Const.ContextFlags |= GL_CONTEXT_FLAG_ROBUST_ACCESS_BIT_ARB;
1075 ctx->Const.RobustAccess = GL_TRUE;
1076 }
1077
1078 if (INTEL_DEBUG & DEBUG_SHADER_TIME)
1079 brw_init_shader_time(brw);
1080
1081 _mesa_override_extensions(ctx);
1082 _mesa_compute_version(ctx);
1083
1084 /* GL_ARB_gl_spirv */
1085 if (ctx->Extensions.ARB_gl_spirv)
1086 brw_initialize_spirv_supported_capabilities(brw);
1087
1088 _mesa_initialize_dispatch_tables(ctx);
1089 _mesa_initialize_vbo_vtxfmt(ctx);
1090
1091 if (ctx->Extensions.INTEL_performance_query)
1092 brw_init_performance_queries(brw);
1093
1094 vbo_use_buffer_objects(ctx);
1095 vbo_always_unmap_buffers(ctx);
1096
1097 brw->ctx.Cache = brw->screen->disk_cache;
1098
1099 return true;
1100 }
1101
1102 void
1103 intelDestroyContext(__DRIcontext * driContextPriv)
1104 {
1105 struct brw_context *brw =
1106 (struct brw_context *) driContextPriv->driverPrivate;
1107 struct gl_context *ctx = &brw->ctx;
1108
1109 _mesa_meta_free(&brw->ctx);
1110
1111 if (INTEL_DEBUG & DEBUG_SHADER_TIME) {
1112 /* Force a report. */
1113 brw->shader_time.report_time = 0;
1114
1115 brw_collect_and_report_shader_time(brw);
1116 brw_destroy_shader_time(brw);
1117 }
1118
1119 blorp_finish(&brw->blorp);
1120
1121 brw_destroy_state(brw);
1122 brw_draw_destroy(brw);
1123
1124 brw_bo_unreference(brw->curbe.curbe_bo);
1125
1126 brw_bo_unreference(brw->vs.base.scratch_bo);
1127 brw_bo_unreference(brw->tcs.base.scratch_bo);
1128 brw_bo_unreference(brw->tes.base.scratch_bo);
1129 brw_bo_unreference(brw->gs.base.scratch_bo);
1130 brw_bo_unreference(brw->wm.base.scratch_bo);
1131
1132 brw_bo_unreference(brw->vs.base.push_const_bo);
1133 brw_bo_unreference(brw->tcs.base.push_const_bo);
1134 brw_bo_unreference(brw->tes.base.push_const_bo);
1135 brw_bo_unreference(brw->gs.base.push_const_bo);
1136 brw_bo_unreference(brw->wm.base.push_const_bo);
1137
1138 brw_destroy_hw_context(brw->bufmgr, brw->hw_ctx);
1139
1140 if (ctx->swrast_context) {
1141 _swsetup_DestroyContext(&brw->ctx);
1142 _tnl_DestroyContext(&brw->ctx);
1143 }
1144 _vbo_DestroyContext(&brw->ctx);
1145
1146 if (ctx->swrast_context)
1147 _swrast_DestroyContext(&brw->ctx);
1148
1149 brw_fini_pipe_control(brw);
1150 intel_batchbuffer_free(&brw->batch);
1151
1152 brw_bo_unreference(brw->throttle_batch[1]);
1153 brw_bo_unreference(brw->throttle_batch[0]);
1154 brw->throttle_batch[1] = NULL;
1155 brw->throttle_batch[0] = NULL;
1156
1157 driDestroyOptionCache(&brw->optionCache);
1158
1159 /* free the Mesa context */
1160 _mesa_free_context_data(&brw->ctx);
1161
1162 ralloc_free(brw);
1163 driContextPriv->driverPrivate = NULL;
1164 }
1165
1166 GLboolean
1167 intelUnbindContext(__DRIcontext * driContextPriv)
1168 {
1169 /* Unset current context and dispath table */
1170 _mesa_make_current(NULL, NULL, NULL);
1171
1172 return true;
1173 }
1174
1175 /**
1176 * Fixes up the context for GLES23 with our default-to-sRGB-capable behavior
1177 * on window system framebuffers.
1178 *
1179 * Desktop GL is fairly reasonable in its handling of sRGB: You can ask if
1180 * your renderbuffer can do sRGB encode, and you can flip a switch that does
1181 * sRGB encode if the renderbuffer can handle it. You can ask specifically
1182 * for a visual where you're guaranteed to be capable, but it turns out that
1183 * everyone just makes all their ARGB8888 visuals capable and doesn't offer
1184 * incapable ones, because there's no difference between the two in resources
1185 * used. Applications thus get built that accidentally rely on the default
1186 * visual choice being sRGB, so we make ours sRGB capable. Everything sounds
1187 * great...
1188 *
1189 * But for GLES2/3, they decided that it was silly to not turn on sRGB encode
1190 * for sRGB renderbuffers you made with the GL_EXT_texture_sRGB equivalent.
1191 * So they removed the enable knob and made it "if the renderbuffer is sRGB
1192 * capable, do sRGB encode". Then, for your window system renderbuffers, you
1193 * can ask for sRGB visuals and get sRGB encode, or not ask for sRGB visuals
1194 * and get no sRGB encode (assuming that both kinds of visual are available).
1195 * Thus our choice to support sRGB by default on our visuals for desktop would
1196 * result in broken rendering of GLES apps that aren't expecting sRGB encode.
1197 *
1198 * Unfortunately, renderbuffer setup happens before a context is created. So
1199 * in intel_screen.c we always set up sRGB, and here, if you're a GLES2/3
1200 * context (without an sRGB visual), we go turn that back off before anyone
1201 * finds out.
1202 */
1203 static void
1204 intel_gles3_srgb_workaround(struct brw_context *brw,
1205 struct gl_framebuffer *fb)
1206 {
1207 struct gl_context *ctx = &brw->ctx;
1208
1209 if (_mesa_is_desktop_gl(ctx) || !fb->Visual.sRGBCapable)
1210 return;
1211
1212 for (int i = 0; i < BUFFER_COUNT; i++) {
1213 struct gl_renderbuffer *rb = fb->Attachment[i].Renderbuffer;
1214
1215 /* Check if sRGB was specifically asked for. */
1216 struct intel_renderbuffer *irb = intel_get_renderbuffer(fb, i);
1217 if (irb && irb->need_srgb)
1218 return;
1219
1220 if (rb)
1221 rb->Format = _mesa_get_srgb_format_linear(rb->Format);
1222 }
1223 /* Disable sRGB from framebuffers that are not compatible. */
1224 fb->Visual.sRGBCapable = false;
1225 }
1226
1227 GLboolean
1228 intelMakeCurrent(__DRIcontext * driContextPriv,
1229 __DRIdrawable * driDrawPriv,
1230 __DRIdrawable * driReadPriv)
1231 {
1232 struct brw_context *brw;
1233
1234 if (driContextPriv)
1235 brw = (struct brw_context *) driContextPriv->driverPrivate;
1236 else
1237 brw = NULL;
1238
1239 if (driContextPriv) {
1240 struct gl_context *ctx = &brw->ctx;
1241 struct gl_framebuffer *fb, *readFb;
1242
1243 if (driDrawPriv == NULL) {
1244 fb = _mesa_get_incomplete_framebuffer();
1245 } else {
1246 fb = driDrawPriv->driverPrivate;
1247 driContextPriv->dri2.draw_stamp = driDrawPriv->dri2.stamp - 1;
1248 }
1249
1250 if (driReadPriv == NULL) {
1251 readFb = _mesa_get_incomplete_framebuffer();
1252 } else {
1253 readFb = driReadPriv->driverPrivate;
1254 driContextPriv->dri2.read_stamp = driReadPriv->dri2.stamp - 1;
1255 }
1256
1257 /* The sRGB workaround changes the renderbuffer's format. We must change
1258 * the format before the renderbuffer's miptree get's allocated, otherwise
1259 * the formats of the renderbuffer and its miptree will differ.
1260 */
1261 intel_gles3_srgb_workaround(brw, fb);
1262 intel_gles3_srgb_workaround(brw, readFb);
1263
1264 /* If the context viewport hasn't been initialized, force a call out to
1265 * the loader to get buffers so we have a drawable size for the initial
1266 * viewport. */
1267 if (!brw->ctx.ViewportInitialized)
1268 intel_prepare_render(brw);
1269
1270 _mesa_make_current(ctx, fb, readFb);
1271 } else {
1272 _mesa_make_current(NULL, NULL, NULL);
1273 }
1274
1275 return true;
1276 }
1277
1278 void
1279 intel_resolve_for_dri2_flush(struct brw_context *brw,
1280 __DRIdrawable *drawable)
1281 {
1282 const struct gen_device_info *devinfo = &brw->screen->devinfo;
1283
1284 if (devinfo->gen < 6) {
1285 /* MSAA and fast color clear are not supported, so don't waste time
1286 * checking whether a resolve is needed.
1287 */
1288 return;
1289 }
1290
1291 struct gl_framebuffer *fb = drawable->driverPrivate;
1292 struct intel_renderbuffer *rb;
1293
1294 /* Usually, only the back buffer will need to be downsampled. However,
1295 * the front buffer will also need it if the user has rendered into it.
1296 */
1297 static const gl_buffer_index buffers[2] = {
1298 BUFFER_BACK_LEFT,
1299 BUFFER_FRONT_LEFT,
1300 };
1301
1302 for (int i = 0; i < 2; ++i) {
1303 rb = intel_get_renderbuffer(fb, buffers[i]);
1304 if (rb == NULL || rb->mt == NULL)
1305 continue;
1306 if (rb->mt->surf.samples == 1) {
1307 assert(rb->mt_layer == 0 && rb->mt_level == 0 &&
1308 rb->layer_count == 1);
1309 intel_miptree_prepare_external(brw, rb->mt);
1310 } else {
1311 intel_renderbuffer_downsample(brw, rb);
1312
1313 /* Call prepare_external on the single-sample miptree to do any
1314 * needed resolves prior to handing it off to the window system.
1315 * This is needed in the case that rb->singlesample_mt is Y-tiled
1316 * with CCS_E enabled but without I915_FORMAT_MOD_Y_TILED_CCS_E. In
1317 * this case, the MSAA resolve above will write compressed data into
1318 * rb->singlesample_mt.
1319 *
1320 * TODO: Some day, if we decide to care about the tiny performance
1321 * hit we're taking by doing the MSAA resolve and then a CCS resolve,
1322 * we could detect this case and just allocate the single-sampled
1323 * miptree without aux. However, that would be a lot of plumbing and
1324 * this is a rather exotic case so it's not really worth it.
1325 */
1326 intel_miptree_prepare_external(brw, rb->singlesample_mt);
1327 }
1328 }
1329 }
1330
1331 static unsigned
1332 intel_bits_per_pixel(const struct intel_renderbuffer *rb)
1333 {
1334 return _mesa_get_format_bytes(intel_rb_format(rb)) * 8;
1335 }
1336
1337 static void
1338 intel_query_dri2_buffers(struct brw_context *brw,
1339 __DRIdrawable *drawable,
1340 __DRIbuffer **buffers,
1341 int *count);
1342
1343 static void
1344 intel_process_dri2_buffer(struct brw_context *brw,
1345 __DRIdrawable *drawable,
1346 __DRIbuffer *buffer,
1347 struct intel_renderbuffer *rb,
1348 const char *buffer_name);
1349
1350 static void
1351 intel_update_image_buffers(struct brw_context *brw, __DRIdrawable *drawable);
1352
1353 static void
1354 intel_update_dri2_buffers(struct brw_context *brw, __DRIdrawable *drawable)
1355 {
1356 struct gl_framebuffer *fb = drawable->driverPrivate;
1357 struct intel_renderbuffer *rb;
1358 __DRIbuffer *buffers = NULL;
1359 int count;
1360 const char *region_name;
1361
1362 /* Set this up front, so that in case our buffers get invalidated
1363 * while we're getting new buffers, we don't clobber the stamp and
1364 * thus ignore the invalidate. */
1365 drawable->lastStamp = drawable->dri2.stamp;
1366
1367 if (unlikely(INTEL_DEBUG & DEBUG_DRI))
1368 fprintf(stderr, "enter %s, drawable %p\n", __func__, drawable);
1369
1370 intel_query_dri2_buffers(brw, drawable, &buffers, &count);
1371
1372 if (buffers == NULL)
1373 return;
1374
1375 for (int i = 0; i < count; i++) {
1376 switch (buffers[i].attachment) {
1377 case __DRI_BUFFER_FRONT_LEFT:
1378 rb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT);
1379 region_name = "dri2 front buffer";
1380 break;
1381
1382 case __DRI_BUFFER_FAKE_FRONT_LEFT:
1383 rb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT);
1384 region_name = "dri2 fake front buffer";
1385 break;
1386
1387 case __DRI_BUFFER_BACK_LEFT:
1388 rb = intel_get_renderbuffer(fb, BUFFER_BACK_LEFT);
1389 region_name = "dri2 back buffer";
1390 break;
1391
1392 case __DRI_BUFFER_DEPTH:
1393 case __DRI_BUFFER_HIZ:
1394 case __DRI_BUFFER_DEPTH_STENCIL:
1395 case __DRI_BUFFER_STENCIL:
1396 case __DRI_BUFFER_ACCUM:
1397 default:
1398 fprintf(stderr,
1399 "unhandled buffer attach event, attachment type %d\n",
1400 buffers[i].attachment);
1401 return;
1402 }
1403
1404 intel_process_dri2_buffer(brw, drawable, &buffers[i], rb, region_name);
1405 }
1406
1407 }
1408
1409 void
1410 intel_update_renderbuffers(__DRIcontext *context, __DRIdrawable *drawable)
1411 {
1412 struct brw_context *brw = context->driverPrivate;
1413 __DRIscreen *dri_screen = brw->screen->driScrnPriv;
1414
1415 /* Set this up front, so that in case our buffers get invalidated
1416 * while we're getting new buffers, we don't clobber the stamp and
1417 * thus ignore the invalidate. */
1418 drawable->lastStamp = drawable->dri2.stamp;
1419
1420 if (unlikely(INTEL_DEBUG & DEBUG_DRI))
1421 fprintf(stderr, "enter %s, drawable %p\n", __func__, drawable);
1422
1423 if (dri_screen->image.loader)
1424 intel_update_image_buffers(brw, drawable);
1425 else
1426 intel_update_dri2_buffers(brw, drawable);
1427
1428 driUpdateFramebufferSize(&brw->ctx, drawable);
1429 }
1430
1431 /**
1432 * intel_prepare_render should be called anywhere that curent read/drawbuffer
1433 * state is required.
1434 */
1435 void
1436 intel_prepare_render(struct brw_context *brw)
1437 {
1438 struct gl_context *ctx = &brw->ctx;
1439 __DRIcontext *driContext = brw->driContext;
1440 __DRIdrawable *drawable;
1441
1442 drawable = driContext->driDrawablePriv;
1443 if (drawable && drawable->dri2.stamp != driContext->dri2.draw_stamp) {
1444 if (drawable->lastStamp != drawable->dri2.stamp)
1445 intel_update_renderbuffers(driContext, drawable);
1446 driContext->dri2.draw_stamp = drawable->dri2.stamp;
1447 }
1448
1449 drawable = driContext->driReadablePriv;
1450 if (drawable && drawable->dri2.stamp != driContext->dri2.read_stamp) {
1451 if (drawable->lastStamp != drawable->dri2.stamp)
1452 intel_update_renderbuffers(driContext, drawable);
1453 driContext->dri2.read_stamp = drawable->dri2.stamp;
1454 }
1455
1456 /* If we're currently rendering to the front buffer, the rendering
1457 * that will happen next will probably dirty the front buffer. So
1458 * mark it as dirty here.
1459 */
1460 if (_mesa_is_front_buffer_drawing(ctx->DrawBuffer))
1461 brw->front_buffer_dirty = true;
1462 }
1463
1464 /**
1465 * \brief Query DRI2 to obtain a DRIdrawable's buffers.
1466 *
1467 * To determine which DRI buffers to request, examine the renderbuffers
1468 * attached to the drawable's framebuffer. Then request the buffers with
1469 * DRI2GetBuffers() or DRI2GetBuffersWithFormat().
1470 *
1471 * This is called from intel_update_renderbuffers().
1472 *
1473 * \param drawable Drawable whose buffers are queried.
1474 * \param buffers [out] List of buffers returned by DRI2 query.
1475 * \param buffer_count [out] Number of buffers returned.
1476 *
1477 * \see intel_update_renderbuffers()
1478 * \see DRI2GetBuffers()
1479 * \see DRI2GetBuffersWithFormat()
1480 */
1481 static void
1482 intel_query_dri2_buffers(struct brw_context *brw,
1483 __DRIdrawable *drawable,
1484 __DRIbuffer **buffers,
1485 int *buffer_count)
1486 {
1487 __DRIscreen *dri_screen = brw->screen->driScrnPriv;
1488 struct gl_framebuffer *fb = drawable->driverPrivate;
1489 int i = 0;
1490 unsigned attachments[8];
1491
1492 struct intel_renderbuffer *front_rb;
1493 struct intel_renderbuffer *back_rb;
1494
1495 front_rb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT);
1496 back_rb = intel_get_renderbuffer(fb, BUFFER_BACK_LEFT);
1497
1498 memset(attachments, 0, sizeof(attachments));
1499 if ((_mesa_is_front_buffer_drawing(fb) ||
1500 _mesa_is_front_buffer_reading(fb) ||
1501 !back_rb) && front_rb) {
1502 /* If a fake front buffer is in use, then querying for
1503 * __DRI_BUFFER_FRONT_LEFT will cause the server to copy the image from
1504 * the real front buffer to the fake front buffer. So before doing the
1505 * query, we need to make sure all the pending drawing has landed in the
1506 * real front buffer.
1507 */
1508 intel_batchbuffer_flush(brw);
1509 intel_flush_front(&brw->ctx);
1510
1511 attachments[i++] = __DRI_BUFFER_FRONT_LEFT;
1512 attachments[i++] = intel_bits_per_pixel(front_rb);
1513 } else if (front_rb && brw->front_buffer_dirty) {
1514 /* We have pending front buffer rendering, but we aren't querying for a
1515 * front buffer. If the front buffer we have is a fake front buffer,
1516 * the X server is going to throw it away when it processes the query.
1517 * So before doing the query, make sure all the pending drawing has
1518 * landed in the real front buffer.
1519 */
1520 intel_batchbuffer_flush(brw);
1521 intel_flush_front(&brw->ctx);
1522 }
1523
1524 if (back_rb) {
1525 attachments[i++] = __DRI_BUFFER_BACK_LEFT;
1526 attachments[i++] = intel_bits_per_pixel(back_rb);
1527 }
1528
1529 assert(i <= ARRAY_SIZE(attachments));
1530
1531 *buffers =
1532 dri_screen->dri2.loader->getBuffersWithFormat(drawable,
1533 &drawable->w,
1534 &drawable->h,
1535 attachments, i / 2,
1536 buffer_count,
1537 drawable->loaderPrivate);
1538 }
1539
1540 /**
1541 * \brief Assign a DRI buffer's DRM region to a renderbuffer.
1542 *
1543 * This is called from intel_update_renderbuffers().
1544 *
1545 * \par Note:
1546 * DRI buffers whose attachment point is DRI2BufferStencil or
1547 * DRI2BufferDepthStencil are handled as special cases.
1548 *
1549 * \param buffer_name is a human readable name, such as "dri2 front buffer",
1550 * that is passed to brw_bo_gem_create_from_name().
1551 *
1552 * \see intel_update_renderbuffers()
1553 */
1554 static void
1555 intel_process_dri2_buffer(struct brw_context *brw,
1556 __DRIdrawable *drawable,
1557 __DRIbuffer *buffer,
1558 struct intel_renderbuffer *rb,
1559 const char *buffer_name)
1560 {
1561 struct gl_framebuffer *fb = drawable->driverPrivate;
1562 struct brw_bo *bo;
1563
1564 if (!rb)
1565 return;
1566
1567 unsigned num_samples = rb->Base.Base.NumSamples;
1568
1569 /* We try to avoid closing and reopening the same BO name, because the first
1570 * use of a mapping of the buffer involves a bunch of page faulting which is
1571 * moderately expensive.
1572 */
1573 struct intel_mipmap_tree *last_mt;
1574 if (num_samples == 0)
1575 last_mt = rb->mt;
1576 else
1577 last_mt = rb->singlesample_mt;
1578
1579 uint32_t old_name = 0;
1580 if (last_mt) {
1581 /* The bo already has a name because the miptree was created by a
1582 * previous call to intel_process_dri2_buffer(). If a bo already has a
1583 * name, then brw_bo_flink() is a low-cost getter. It does not
1584 * create a new name.
1585 */
1586 brw_bo_flink(last_mt->bo, &old_name);
1587 }
1588
1589 if (old_name == buffer->name)
1590 return;
1591
1592 if (unlikely(INTEL_DEBUG & DEBUG_DRI)) {
1593 fprintf(stderr,
1594 "attaching buffer %d, at %d, cpp %d, pitch %d\n",
1595 buffer->name, buffer->attachment,
1596 buffer->cpp, buffer->pitch);
1597 }
1598
1599 bo = brw_bo_gem_create_from_name(brw->bufmgr, buffer_name,
1600 buffer->name);
1601 if (!bo) {
1602 fprintf(stderr,
1603 "Failed to open BO for returned DRI2 buffer "
1604 "(%dx%d, %s, named %d).\n"
1605 "This is likely a bug in the X Server that will lead to a "
1606 "crash soon.\n",
1607 drawable->w, drawable->h, buffer_name, buffer->name);
1608 return;
1609 }
1610
1611 uint32_t tiling, swizzle;
1612 brw_bo_get_tiling(bo, &tiling, &swizzle);
1613
1614 struct intel_mipmap_tree *mt =
1615 intel_miptree_create_for_bo(brw,
1616 bo,
1617 intel_rb_format(rb),
1618 0,
1619 drawable->w,
1620 drawable->h,
1621 1,
1622 buffer->pitch,
1623 isl_tiling_from_i915_tiling(tiling),
1624 MIPTREE_CREATE_DEFAULT);
1625 if (!mt) {
1626 brw_bo_unreference(bo);
1627 return;
1628 }
1629
1630 /* We got this BO from X11. We cana't assume that we have coherent texture
1631 * access because X may suddenly decide to use it for scan-out which would
1632 * destroy coherency.
1633 */
1634 bo->cache_coherent = false;
1635
1636 if (!intel_update_winsys_renderbuffer_miptree(brw, rb, mt,
1637 drawable->w, drawable->h,
1638 buffer->pitch)) {
1639 brw_bo_unreference(bo);
1640 intel_miptree_release(&mt);
1641 return;
1642 }
1643
1644 if (_mesa_is_front_buffer_drawing(fb) &&
1645 (buffer->attachment == __DRI_BUFFER_FRONT_LEFT ||
1646 buffer->attachment == __DRI_BUFFER_FAKE_FRONT_LEFT) &&
1647 rb->Base.Base.NumSamples > 1) {
1648 intel_renderbuffer_upsample(brw, rb);
1649 }
1650
1651 assert(rb->mt);
1652
1653 brw_bo_unreference(bo);
1654 }
1655
1656 /**
1657 * \brief Query DRI image loader to obtain a DRIdrawable's buffers.
1658 *
1659 * To determine which DRI buffers to request, examine the renderbuffers
1660 * attached to the drawable's framebuffer. Then request the buffers from
1661 * the image loader
1662 *
1663 * This is called from intel_update_renderbuffers().
1664 *
1665 * \param drawable Drawable whose buffers are queried.
1666 * \param buffers [out] List of buffers returned by DRI2 query.
1667 * \param buffer_count [out] Number of buffers returned.
1668 *
1669 * \see intel_update_renderbuffers()
1670 */
1671
1672 static void
1673 intel_update_image_buffer(struct brw_context *intel,
1674 __DRIdrawable *drawable,
1675 struct intel_renderbuffer *rb,
1676 __DRIimage *buffer,
1677 enum __DRIimageBufferMask buffer_type)
1678 {
1679 struct gl_framebuffer *fb = drawable->driverPrivate;
1680
1681 if (!rb || !buffer->bo)
1682 return;
1683
1684 unsigned num_samples = rb->Base.Base.NumSamples;
1685
1686 /* Check and see if we're already bound to the right
1687 * buffer object
1688 */
1689 struct intel_mipmap_tree *last_mt;
1690 if (num_samples == 0)
1691 last_mt = rb->mt;
1692 else
1693 last_mt = rb->singlesample_mt;
1694
1695 if (last_mt && last_mt->bo == buffer->bo)
1696 return;
1697
1698 /* Only allow internal compression if samples == 0. For multisampled
1699 * window system buffers, the only thing the single-sampled buffer is used
1700 * for is as a resolve target. If we do any compression beyond what is
1701 * supported by the window system, we will just have to resolve so it's
1702 * probably better to just not bother.
1703 */
1704 const bool allow_internal_aux = (num_samples == 0);
1705
1706 struct intel_mipmap_tree *mt =
1707 intel_miptree_create_for_dri_image(intel, buffer, GL_TEXTURE_2D,
1708 intel_rb_format(rb),
1709 allow_internal_aux);
1710 if (!mt)
1711 return;
1712
1713 if (!intel_update_winsys_renderbuffer_miptree(intel, rb, mt,
1714 buffer->width, buffer->height,
1715 buffer->pitch)) {
1716 intel_miptree_release(&mt);
1717 return;
1718 }
1719
1720 if (_mesa_is_front_buffer_drawing(fb) &&
1721 buffer_type == __DRI_IMAGE_BUFFER_FRONT &&
1722 rb->Base.Base.NumSamples > 1) {
1723 intel_renderbuffer_upsample(intel, rb);
1724 }
1725 }
1726
1727 static void
1728 intel_update_image_buffers(struct brw_context *brw, __DRIdrawable *drawable)
1729 {
1730 struct gl_framebuffer *fb = drawable->driverPrivate;
1731 __DRIscreen *dri_screen = brw->screen->driScrnPriv;
1732 struct intel_renderbuffer *front_rb;
1733 struct intel_renderbuffer *back_rb;
1734 struct __DRIimageList images;
1735 mesa_format format;
1736 uint32_t buffer_mask = 0;
1737 int ret;
1738
1739 front_rb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT);
1740 back_rb = intel_get_renderbuffer(fb, BUFFER_BACK_LEFT);
1741
1742 if (back_rb)
1743 format = intel_rb_format(back_rb);
1744 else if (front_rb)
1745 format = intel_rb_format(front_rb);
1746 else
1747 return;
1748
1749 if (front_rb && (_mesa_is_front_buffer_drawing(fb) ||
1750 _mesa_is_front_buffer_reading(fb) || !back_rb)) {
1751 buffer_mask |= __DRI_IMAGE_BUFFER_FRONT;
1752 }
1753
1754 if (back_rb)
1755 buffer_mask |= __DRI_IMAGE_BUFFER_BACK;
1756
1757 ret = dri_screen->image.loader->getBuffers(drawable,
1758 driGLFormatToImageFormat(format),
1759 &drawable->dri2.stamp,
1760 drawable->loaderPrivate,
1761 buffer_mask,
1762 &images);
1763 if (!ret)
1764 return;
1765
1766 if (images.image_mask & __DRI_IMAGE_BUFFER_FRONT) {
1767 drawable->w = images.front->width;
1768 drawable->h = images.front->height;
1769 intel_update_image_buffer(brw,
1770 drawable,
1771 front_rb,
1772 images.front,
1773 __DRI_IMAGE_BUFFER_FRONT);
1774 }
1775
1776 if (images.image_mask & __DRI_IMAGE_BUFFER_BACK) {
1777 drawable->w = images.back->width;
1778 drawable->h = images.back->height;
1779 intel_update_image_buffer(brw,
1780 drawable,
1781 back_rb,
1782 images.back,
1783 __DRI_IMAGE_BUFFER_BACK);
1784 }
1785 }