i965: Reimplement ARB_transform_feedback2 on Haswell and later.
[mesa.git] / src / mesa / drivers / dri / i965 / brw_context.c
1 /*
2 Copyright 2003 VMware, Inc.
3 Copyright (C) Intel Corp. 2006. All Rights Reserved.
4 Intel funded Tungsten Graphics to
5 develop this 3D driver.
6
7 Permission is hereby granted, free of charge, to any person obtaining
8 a copy of this software and associated documentation files (the
9 "Software"), to deal in the Software without restriction, including
10 without limitation the rights to use, copy, modify, merge, publish,
11 distribute, sublicense, and/or sell copies of the Software, and to
12 permit persons to whom the Software is furnished to do so, subject to
13 the following conditions:
14
15 The above copyright notice and this permission notice (including the
16 next paragraph) shall be included in all copies or substantial
17 portions of the Software.
18
19 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
20 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
22 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
23 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
24 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
25 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26
27 **********************************************************************/
28 /*
29 * Authors:
30 * Keith Whitwell <keithw@vmware.com>
31 */
32
33
34 #include "main/api_exec.h"
35 #include "main/context.h"
36 #include "main/fbobject.h"
37 #include "main/extensions.h"
38 #include "main/imports.h"
39 #include "main/macros.h"
40 #include "main/points.h"
41 #include "main/version.h"
42 #include "main/vtxfmt.h"
43 #include "main/texobj.h"
44 #include "main/framebuffer.h"
45
46 #include "vbo/vbo_context.h"
47
48 #include "drivers/common/driverfuncs.h"
49 #include "drivers/common/meta.h"
50 #include "utils.h"
51
52 #include "brw_context.h"
53 #include "brw_defines.h"
54 #include "brw_compiler.h"
55 #include "brw_draw.h"
56 #include "brw_state.h"
57
58 #include "intel_batchbuffer.h"
59 #include "intel_buffer_objects.h"
60 #include "intel_buffers.h"
61 #include "intel_fbo.h"
62 #include "intel_mipmap_tree.h"
63 #include "intel_pixel.h"
64 #include "intel_image.h"
65 #include "intel_tex.h"
66 #include "intel_tex_obj.h"
67
68 #include "swrast_setup/swrast_setup.h"
69 #include "tnl/tnl.h"
70 #include "tnl/t_pipeline.h"
71 #include "util/ralloc.h"
72 #include "util/debug.h"
73
74 /***************************************
75 * Mesa's Driver Functions
76 ***************************************/
77
78 const char *const brw_vendor_string = "Intel Open Source Technology Center";
79
80 static const char *
81 get_bsw_model(const struct intel_screen *intelScreen)
82 {
83 switch (intelScreen->eu_total) {
84 case 16:
85 return "405";
86 case 12:
87 return "400";
88 default:
89 return " ";
90 }
91 }
92
93 const char *
94 brw_get_renderer_string(const struct intel_screen *intelScreen)
95 {
96 const char *chipset;
97 static char buffer[128];
98 char *bsw = NULL;
99
100 switch (intelScreen->deviceID) {
101 #undef CHIPSET
102 #define CHIPSET(id, symbol, str) case id: chipset = str; break;
103 #include "pci_ids/i965_pci_ids.h"
104 default:
105 chipset = "Unknown Intel Chipset";
106 break;
107 }
108
109 /* Braswell branding is funny, so we have to fix it up here */
110 if (intelScreen->deviceID == 0x22B1) {
111 bsw = strdup(chipset);
112 char *needle = strstr(bsw, "XXX");
113 if (needle) {
114 memcpy(needle, get_bsw_model(intelScreen), 3);
115 chipset = bsw;
116 }
117 }
118
119 (void) driGetRendererString(buffer, chipset, 0);
120 free(bsw);
121 return buffer;
122 }
123
124 static const GLubyte *
125 intel_get_string(struct gl_context * ctx, GLenum name)
126 {
127 const struct brw_context *const brw = brw_context(ctx);
128
129 switch (name) {
130 case GL_VENDOR:
131 return (GLubyte *) brw_vendor_string;
132
133 case GL_RENDERER:
134 return
135 (GLubyte *) brw_get_renderer_string(brw->intelScreen);
136
137 default:
138 return NULL;
139 }
140 }
141
142 static void
143 intel_viewport(struct gl_context *ctx)
144 {
145 struct brw_context *brw = brw_context(ctx);
146 __DRIcontext *driContext = brw->driContext;
147
148 if (_mesa_is_winsys_fbo(ctx->DrawBuffer)) {
149 if (driContext->driDrawablePriv)
150 dri2InvalidateDrawable(driContext->driDrawablePriv);
151 if (driContext->driReadablePriv)
152 dri2InvalidateDrawable(driContext->driReadablePriv);
153 }
154 }
155
156 static void
157 intel_update_framebuffer(struct gl_context *ctx,
158 struct gl_framebuffer *fb)
159 {
160 struct brw_context *brw = brw_context(ctx);
161
162 /* Quantize the derived default number of samples
163 */
164 fb->DefaultGeometry._NumSamples =
165 intel_quantize_num_samples(brw->intelScreen,
166 fb->DefaultGeometry.NumSamples);
167 }
168
169 static void
170 intel_update_state(struct gl_context * ctx, GLuint new_state)
171 {
172 struct brw_context *brw = brw_context(ctx);
173 struct intel_texture_object *tex_obj;
174 struct intel_renderbuffer *depth_irb;
175
176 if (ctx->swrast_context)
177 _swrast_InvalidateState(ctx, new_state);
178 _vbo_InvalidateState(ctx, new_state);
179
180 brw->NewGLState |= new_state;
181
182 _mesa_unlock_context_textures(ctx);
183
184 /* Resolve the depth buffer's HiZ buffer. */
185 depth_irb = intel_get_renderbuffer(ctx->DrawBuffer, BUFFER_DEPTH);
186 if (depth_irb)
187 intel_renderbuffer_resolve_hiz(brw, depth_irb);
188
189 /* Resolve depth buffer and render cache of each enabled texture. */
190 int maxEnabledUnit = ctx->Texture._MaxEnabledTexImageUnit;
191 for (int i = 0; i <= maxEnabledUnit; i++) {
192 if (!ctx->Texture.Unit[i]._Current)
193 continue;
194 tex_obj = intel_texture_object(ctx->Texture.Unit[i]._Current);
195 if (!tex_obj || !tex_obj->mt)
196 continue;
197 intel_miptree_all_slices_resolve_depth(brw, tex_obj->mt);
198 /* Sampling engine understands lossless compression and resolving
199 * those surfaces should be skipped for performance reasons.
200 */
201 intel_miptree_resolve_color(brw, tex_obj->mt,
202 INTEL_MIPTREE_IGNORE_CCS_E);
203 brw_render_cache_set_check_flush(brw, tex_obj->mt->bo);
204 }
205
206 /* Resolve color for each active shader image. */
207 for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
208 const struct gl_shader *shader = ctx->_Shader->CurrentProgram[i] ?
209 ctx->_Shader->CurrentProgram[i]->_LinkedShaders[i] : NULL;
210
211 if (unlikely(shader && shader->NumImages)) {
212 for (unsigned j = 0; j < shader->NumImages; j++) {
213 struct gl_image_unit *u = &ctx->ImageUnits[shader->ImageUnits[j]];
214 tex_obj = intel_texture_object(u->TexObj);
215
216 if (tex_obj && tex_obj->mt) {
217 /* Access to images is implemented using indirect messages
218 * against data port. Normal render target write understands
219 * lossless compression but unfortunately the typed/untyped
220 * read/write interface doesn't. Therefore the compressed
221 * surfaces need to be resolved prior to accessing them.
222 */
223 intel_miptree_resolve_color(brw, tex_obj->mt, 0);
224 brw_render_cache_set_check_flush(brw, tex_obj->mt->bo);
225 }
226 }
227 }
228 }
229
230 /* If FRAMEBUFFER_SRGB is used on Gen9+ then we need to resolve any of the
231 * single-sampled color renderbuffers because the CCS buffer isn't
232 * supported for SRGB formats. This only matters if FRAMEBUFFER_SRGB is
233 * enabled because otherwise the surface state will be programmed with the
234 * linear equivalent format anyway.
235 */
236 if (brw->gen >= 9 && ctx->Color.sRGBEnabled) {
237 struct gl_framebuffer *fb = ctx->DrawBuffer;
238 for (int i = 0; i < fb->_NumColorDrawBuffers; i++) {
239 struct gl_renderbuffer *rb = fb->_ColorDrawBuffers[i];
240
241 if (rb == NULL)
242 continue;
243
244 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
245 struct intel_mipmap_tree *mt = irb->mt;
246
247 if (mt == NULL ||
248 mt->num_samples > 1 ||
249 _mesa_get_srgb_format_linear(mt->format) == mt->format)
250 continue;
251
252 /* Lossless compression is not supported for SRGB formats, it
253 * should be impossible to get here with such surfaces.
254 */
255 assert(!intel_miptree_is_lossless_compressed(brw, mt));
256 intel_miptree_resolve_color(brw, mt, 0);
257 brw_render_cache_set_check_flush(brw, mt->bo);
258 }
259 }
260
261 _mesa_lock_context_textures(ctx);
262
263 if (new_state & _NEW_BUFFERS) {
264 intel_update_framebuffer(ctx, ctx->DrawBuffer);
265 if (ctx->DrawBuffer != ctx->ReadBuffer)
266 intel_update_framebuffer(ctx, ctx->ReadBuffer);
267 }
268 }
269
270 #define flushFront(screen) ((screen)->image.loader ? (screen)->image.loader->flushFrontBuffer : (screen)->dri2.loader->flushFrontBuffer)
271
272 static void
273 intel_flush_front(struct gl_context *ctx)
274 {
275 struct brw_context *brw = brw_context(ctx);
276 __DRIcontext *driContext = brw->driContext;
277 __DRIdrawable *driDrawable = driContext->driDrawablePriv;
278 __DRIscreen *const screen = brw->intelScreen->driScrnPriv;
279
280 if (brw->front_buffer_dirty && _mesa_is_winsys_fbo(ctx->DrawBuffer)) {
281 if (flushFront(screen) && driDrawable &&
282 driDrawable->loaderPrivate) {
283
284 /* Resolve before flushing FAKE_FRONT_LEFT to FRONT_LEFT.
285 *
286 * This potentially resolves both front and back buffer. It
287 * is unnecessary to resolve the back, but harms nothing except
288 * performance. And no one cares about front-buffer render
289 * performance.
290 */
291 intel_resolve_for_dri2_flush(brw, driDrawable);
292 intel_batchbuffer_flush(brw);
293
294 flushFront(screen)(driDrawable, driDrawable->loaderPrivate);
295
296 /* We set the dirty bit in intel_prepare_render() if we're
297 * front buffer rendering once we get there.
298 */
299 brw->front_buffer_dirty = false;
300 }
301 }
302 }
303
304 static void
305 intel_glFlush(struct gl_context *ctx)
306 {
307 struct brw_context *brw = brw_context(ctx);
308
309 intel_batchbuffer_flush(brw);
310 intel_flush_front(ctx);
311
312 brw->need_flush_throttle = true;
313 }
314
315 static void
316 intel_finish(struct gl_context * ctx)
317 {
318 struct brw_context *brw = brw_context(ctx);
319
320 intel_glFlush(ctx);
321
322 if (brw->batch.last_bo)
323 drm_intel_bo_wait_rendering(brw->batch.last_bo);
324 }
325
326 static void
327 brw_init_driver_functions(struct brw_context *brw,
328 struct dd_function_table *functions)
329 {
330 _mesa_init_driver_functions(functions);
331
332 /* GLX uses DRI2 invalidate events to handle window resizing.
333 * Unfortunately, EGL does not - libEGL is written in XCB (not Xlib),
334 * which doesn't provide a mechanism for snooping the event queues.
335 *
336 * So EGL still relies on viewport hacks to handle window resizing.
337 * This should go away with DRI3000.
338 */
339 if (!brw->driContext->driScreenPriv->dri2.useInvalidate)
340 functions->Viewport = intel_viewport;
341
342 functions->Flush = intel_glFlush;
343 functions->Finish = intel_finish;
344 functions->GetString = intel_get_string;
345 functions->UpdateState = intel_update_state;
346
347 intelInitTextureFuncs(functions);
348 intelInitTextureImageFuncs(functions);
349 intelInitTextureSubImageFuncs(functions);
350 intelInitTextureCopyImageFuncs(functions);
351 intelInitCopyImageFuncs(functions);
352 intelInitClearFuncs(functions);
353 intelInitBufferFuncs(functions);
354 intelInitPixelFuncs(functions);
355 intelInitBufferObjectFuncs(functions);
356 intel_init_syncobj_functions(functions);
357 brw_init_object_purgeable_functions(functions);
358
359 brwInitFragProgFuncs( functions );
360 brw_init_common_queryobj_functions(functions);
361 if (brw->gen >= 8 || brw->is_haswell)
362 hsw_init_queryobj_functions(functions);
363 else if (brw->gen >= 6)
364 gen6_init_queryobj_functions(functions);
365 else
366 gen4_init_queryobj_functions(functions);
367 brw_init_compute_functions(functions);
368 if (brw->gen >= 7)
369 brw_init_conditional_render_functions(functions);
370
371 functions->QueryInternalFormat = brw_query_internal_format;
372
373 functions->NewTransformFeedback = brw_new_transform_feedback;
374 functions->DeleteTransformFeedback = brw_delete_transform_feedback;
375 if (brw->intelScreen->has_mi_math_and_lrr) {
376 functions->BeginTransformFeedback = hsw_begin_transform_feedback;
377 functions->EndTransformFeedback = hsw_end_transform_feedback;
378 functions->PauseTransformFeedback = hsw_pause_transform_feedback;
379 functions->ResumeTransformFeedback = hsw_resume_transform_feedback;
380 } else if (brw->gen >= 7) {
381 functions->BeginTransformFeedback = gen7_begin_transform_feedback;
382 functions->EndTransformFeedback = gen7_end_transform_feedback;
383 functions->PauseTransformFeedback = gen7_pause_transform_feedback;
384 functions->ResumeTransformFeedback = gen7_resume_transform_feedback;
385 functions->GetTransformFeedbackVertexCount =
386 brw_get_transform_feedback_vertex_count;
387 } else {
388 functions->BeginTransformFeedback = brw_begin_transform_feedback;
389 functions->EndTransformFeedback = brw_end_transform_feedback;
390 }
391
392 if (brw->gen >= 6)
393 functions->GetSamplePosition = gen6_get_sample_position;
394 }
395
396 static void
397 brw_initialize_context_constants(struct brw_context *brw)
398 {
399 struct gl_context *ctx = &brw->ctx;
400 const struct brw_compiler *compiler = brw->intelScreen->compiler;
401
402 const bool stage_exists[MESA_SHADER_STAGES] = {
403 [MESA_SHADER_VERTEX] = true,
404 [MESA_SHADER_TESS_CTRL] = brw->gen >= 7,
405 [MESA_SHADER_TESS_EVAL] = brw->gen >= 7,
406 [MESA_SHADER_GEOMETRY] = brw->gen >= 6,
407 [MESA_SHADER_FRAGMENT] = true,
408 [MESA_SHADER_COMPUTE] =
409 (ctx->API == API_OPENGL_CORE &&
410 ctx->Const.MaxComputeWorkGroupSize[0] >= 1024) ||
411 (ctx->API == API_OPENGLES2 &&
412 ctx->Const.MaxComputeWorkGroupSize[0] >= 128) ||
413 _mesa_extension_override_enables.ARB_compute_shader,
414 };
415
416 unsigned num_stages = 0;
417 for (int i = 0; i < MESA_SHADER_STAGES; i++) {
418 if (stage_exists[i])
419 num_stages++;
420 }
421
422 unsigned max_samplers =
423 brw->gen >= 8 || brw->is_haswell ? BRW_MAX_TEX_UNIT : 16;
424
425 ctx->Const.MaxDualSourceDrawBuffers = 1;
426 ctx->Const.MaxDrawBuffers = BRW_MAX_DRAW_BUFFERS;
427 ctx->Const.MaxCombinedShaderOutputResources =
428 MAX_IMAGE_UNITS + BRW_MAX_DRAW_BUFFERS;
429
430 ctx->Const.QueryCounterBits.Timestamp = 36;
431
432 ctx->Const.MaxTextureCoordUnits = 8; /* Mesa limit */
433 ctx->Const.MaxImageUnits = MAX_IMAGE_UNITS;
434 ctx->Const.MaxRenderbufferSize = 8192;
435 ctx->Const.MaxTextureLevels = MIN2(14 /* 8192 */, MAX_TEXTURE_LEVELS);
436 ctx->Const.Max3DTextureLevels = 12; /* 2048 */
437 ctx->Const.MaxCubeTextureLevels = 14; /* 8192 */
438 ctx->Const.MaxArrayTextureLayers = brw->gen >= 7 ? 2048 : 512;
439 ctx->Const.MaxTextureMbytes = 1536;
440 ctx->Const.MaxTextureRectSize = 1 << 12;
441 ctx->Const.MaxTextureMaxAnisotropy = 16.0;
442 ctx->Const.StripTextureBorder = true;
443 if (brw->gen >= 7)
444 ctx->Const.MaxProgramTextureGatherComponents = 4;
445 else if (brw->gen == 6)
446 ctx->Const.MaxProgramTextureGatherComponents = 1;
447
448 ctx->Const.MaxUniformBlockSize = 65536;
449
450 for (int i = 0; i < MESA_SHADER_STAGES; i++) {
451 struct gl_program_constants *prog = &ctx->Const.Program[i];
452
453 if (!stage_exists[i])
454 continue;
455
456 prog->MaxTextureImageUnits = max_samplers;
457
458 prog->MaxUniformBlocks = BRW_MAX_UBO;
459 prog->MaxCombinedUniformComponents =
460 prog->MaxUniformComponents +
461 ctx->Const.MaxUniformBlockSize / 4 * prog->MaxUniformBlocks;
462
463 prog->MaxAtomicCounters = MAX_ATOMIC_COUNTERS;
464 prog->MaxAtomicBuffers = BRW_MAX_ABO;
465 prog->MaxImageUniforms = compiler->scalar_stage[i] ? BRW_MAX_IMAGES : 0;
466 prog->MaxShaderStorageBlocks = BRW_MAX_SSBO;
467 }
468
469 ctx->Const.MaxTextureUnits =
470 MIN2(ctx->Const.MaxTextureCoordUnits,
471 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxTextureImageUnits);
472
473 ctx->Const.MaxUniformBufferBindings = num_stages * BRW_MAX_UBO;
474 ctx->Const.MaxCombinedUniformBlocks = num_stages * BRW_MAX_UBO;
475 ctx->Const.MaxCombinedAtomicBuffers = num_stages * BRW_MAX_ABO;
476 ctx->Const.MaxCombinedShaderStorageBlocks = num_stages * BRW_MAX_SSBO;
477 ctx->Const.MaxShaderStorageBufferBindings = num_stages * BRW_MAX_SSBO;
478 ctx->Const.MaxCombinedTextureImageUnits = num_stages * max_samplers;
479 ctx->Const.MaxCombinedImageUniforms = num_stages * BRW_MAX_IMAGES;
480
481
482 /* Hardware only supports a limited number of transform feedback buffers.
483 * So we need to override the Mesa default (which is based only on software
484 * limits).
485 */
486 ctx->Const.MaxTransformFeedbackBuffers = BRW_MAX_SOL_BUFFERS;
487
488 /* On Gen6, in the worst case, we use up one binding table entry per
489 * transform feedback component (see comments above the definition of
490 * BRW_MAX_SOL_BINDINGS, in brw_context.h), so we need to advertise a value
491 * for MAX_TRANSFORM_FEEDBACK_INTERLEAVED_COMPONENTS equal to
492 * BRW_MAX_SOL_BINDINGS.
493 *
494 * In "separate components" mode, we need to divide this value by
495 * BRW_MAX_SOL_BUFFERS, so that the total number of binding table entries
496 * used up by all buffers will not exceed BRW_MAX_SOL_BINDINGS.
497 */
498 ctx->Const.MaxTransformFeedbackInterleavedComponents = BRW_MAX_SOL_BINDINGS;
499 ctx->Const.MaxTransformFeedbackSeparateComponents =
500 BRW_MAX_SOL_BINDINGS / BRW_MAX_SOL_BUFFERS;
501
502 ctx->Const.AlwaysUseGetTransformFeedbackVertexCount =
503 !brw->intelScreen->has_mi_math_and_lrr;
504
505 int max_samples;
506 const int *msaa_modes = intel_supported_msaa_modes(brw->intelScreen);
507 const int clamp_max_samples =
508 driQueryOptioni(&brw->optionCache, "clamp_max_samples");
509
510 if (clamp_max_samples < 0) {
511 max_samples = msaa_modes[0];
512 } else {
513 /* Select the largest supported MSAA mode that does not exceed
514 * clamp_max_samples.
515 */
516 max_samples = 0;
517 for (int i = 0; msaa_modes[i] != 0; ++i) {
518 if (msaa_modes[i] <= clamp_max_samples) {
519 max_samples = msaa_modes[i];
520 break;
521 }
522 }
523 }
524
525 ctx->Const.MaxSamples = max_samples;
526 ctx->Const.MaxColorTextureSamples = max_samples;
527 ctx->Const.MaxDepthTextureSamples = max_samples;
528 ctx->Const.MaxIntegerSamples = max_samples;
529 ctx->Const.MaxImageSamples = 0;
530
531 /* gen6_set_sample_maps() sets SampleMap{2,4,8}x variables which are used
532 * to map indices of rectangular grid to sample numbers within a pixel.
533 * These variables are used by GL_EXT_framebuffer_multisample_blit_scaled
534 * extension implementation. For more details see the comment above
535 * gen6_set_sample_maps() definition.
536 */
537 gen6_set_sample_maps(ctx);
538
539 ctx->Const.MinLineWidth = 1.0;
540 ctx->Const.MinLineWidthAA = 1.0;
541 if (brw->gen >= 6) {
542 ctx->Const.MaxLineWidth = 7.375;
543 ctx->Const.MaxLineWidthAA = 7.375;
544 ctx->Const.LineWidthGranularity = 0.125;
545 } else {
546 ctx->Const.MaxLineWidth = 7.0;
547 ctx->Const.MaxLineWidthAA = 7.0;
548 ctx->Const.LineWidthGranularity = 0.5;
549 }
550
551 /* For non-antialiased lines, we have to round the line width to the
552 * nearest whole number. Make sure that we don't advertise a line
553 * width that, when rounded, will be beyond the actual hardware
554 * maximum.
555 */
556 assert(roundf(ctx->Const.MaxLineWidth) <= ctx->Const.MaxLineWidth);
557
558 ctx->Const.MinPointSize = 1.0;
559 ctx->Const.MinPointSizeAA = 1.0;
560 ctx->Const.MaxPointSize = 255.0;
561 ctx->Const.MaxPointSizeAA = 255.0;
562 ctx->Const.PointSizeGranularity = 1.0;
563
564 if (brw->gen >= 5 || brw->is_g4x)
565 ctx->Const.MaxClipPlanes = 8;
566
567 ctx->Const.LowerTessLevel = true;
568
569 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeInstructions = 16 * 1024;
570 ctx->Const.Program[MESA_SHADER_VERTEX].MaxAluInstructions = 0;
571 ctx->Const.Program[MESA_SHADER_VERTEX].MaxTexInstructions = 0;
572 ctx->Const.Program[MESA_SHADER_VERTEX].MaxTexIndirections = 0;
573 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeAluInstructions = 0;
574 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeTexInstructions = 0;
575 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeTexIndirections = 0;
576 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeAttribs = 16;
577 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeTemps = 256;
578 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeAddressRegs = 1;
579 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeParameters = 1024;
580 ctx->Const.Program[MESA_SHADER_VERTEX].MaxEnvParams =
581 MIN2(ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeParameters,
582 ctx->Const.Program[MESA_SHADER_VERTEX].MaxEnvParams);
583
584 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeInstructions = 1024;
585 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeAluInstructions = 1024;
586 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeTexInstructions = 1024;
587 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeTexIndirections = 1024;
588 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeAttribs = 12;
589 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeTemps = 256;
590 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeAddressRegs = 0;
591 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeParameters = 1024;
592 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxEnvParams =
593 MIN2(ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeParameters,
594 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxEnvParams);
595
596 /* Fragment shaders use real, 32-bit twos-complement integers for all
597 * integer types.
598 */
599 ctx->Const.Program[MESA_SHADER_FRAGMENT].LowInt.RangeMin = 31;
600 ctx->Const.Program[MESA_SHADER_FRAGMENT].LowInt.RangeMax = 30;
601 ctx->Const.Program[MESA_SHADER_FRAGMENT].LowInt.Precision = 0;
602 ctx->Const.Program[MESA_SHADER_FRAGMENT].HighInt = ctx->Const.Program[MESA_SHADER_FRAGMENT].LowInt;
603 ctx->Const.Program[MESA_SHADER_FRAGMENT].MediumInt = ctx->Const.Program[MESA_SHADER_FRAGMENT].LowInt;
604
605 ctx->Const.Program[MESA_SHADER_VERTEX].LowInt.RangeMin = 31;
606 ctx->Const.Program[MESA_SHADER_VERTEX].LowInt.RangeMax = 30;
607 ctx->Const.Program[MESA_SHADER_VERTEX].LowInt.Precision = 0;
608 ctx->Const.Program[MESA_SHADER_VERTEX].HighInt = ctx->Const.Program[MESA_SHADER_VERTEX].LowInt;
609 ctx->Const.Program[MESA_SHADER_VERTEX].MediumInt = ctx->Const.Program[MESA_SHADER_VERTEX].LowInt;
610
611 /* Gen6 converts quads to polygon in beginning of 3D pipeline,
612 * but we're not sure how it's actually done for vertex order,
613 * that affect provoking vertex decision. Always use last vertex
614 * convention for quad primitive which works as expected for now.
615 */
616 if (brw->gen >= 6)
617 ctx->Const.QuadsFollowProvokingVertexConvention = false;
618
619 ctx->Const.NativeIntegers = true;
620 ctx->Const.VertexID_is_zero_based = true;
621
622 /* Regarding the CMP instruction, the Ivybridge PRM says:
623 *
624 * "For each enabled channel 0b or 1b is assigned to the appropriate flag
625 * bit and 0/all zeros or all ones (e.g, byte 0xFF, word 0xFFFF, DWord
626 * 0xFFFFFFFF) is assigned to dst."
627 *
628 * but PRMs for earlier generations say
629 *
630 * "In dword format, one GRF may store up to 8 results. When the register
631 * is used later as a vector of Booleans, as only LSB at each channel
632 * contains meaning [sic] data, software should make sure all higher bits
633 * are masked out (e.g. by 'and-ing' an [sic] 0x01 constant)."
634 *
635 * We select the representation of a true boolean uniform to be ~0, and fix
636 * the results of Gen <= 5 CMP instruction's with -(result & 1).
637 */
638 ctx->Const.UniformBooleanTrue = ~0;
639
640 /* From the gen4 PRM, volume 4 page 127:
641 *
642 * "For SURFTYPE_BUFFER non-rendertarget surfaces, this field specifies
643 * the base address of the first element of the surface, computed in
644 * software by adding the surface base address to the byte offset of
645 * the element in the buffer."
646 *
647 * However, unaligned accesses are slower, so enforce buffer alignment.
648 */
649 ctx->Const.UniformBufferOffsetAlignment = 16;
650
651 /* ShaderStorageBufferOffsetAlignment should be a cacheline (64 bytes) so
652 * that we can safely have the CPU and GPU writing the same SSBO on
653 * non-cachecoherent systems (our Atom CPUs). With UBOs, the GPU never
654 * writes, so there's no problem. For an SSBO, the GPU and the CPU can
655 * be updating disjoint regions of the buffer simultaneously and that will
656 * break if the regions overlap the same cacheline.
657 */
658 ctx->Const.ShaderStorageBufferOffsetAlignment = 64;
659 ctx->Const.TextureBufferOffsetAlignment = 16;
660 ctx->Const.MaxTextureBufferSize = 128 * 1024 * 1024;
661
662 if (brw->gen >= 6) {
663 ctx->Const.MaxVarying = 32;
664 ctx->Const.Program[MESA_SHADER_VERTEX].MaxOutputComponents = 128;
665 ctx->Const.Program[MESA_SHADER_GEOMETRY].MaxInputComponents = 64;
666 ctx->Const.Program[MESA_SHADER_GEOMETRY].MaxOutputComponents = 128;
667 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxInputComponents = 128;
668 ctx->Const.Program[MESA_SHADER_TESS_CTRL].MaxInputComponents = 128;
669 ctx->Const.Program[MESA_SHADER_TESS_CTRL].MaxOutputComponents = 128;
670 ctx->Const.Program[MESA_SHADER_TESS_EVAL].MaxInputComponents = 128;
671 ctx->Const.Program[MESA_SHADER_TESS_EVAL].MaxOutputComponents = 128;
672 }
673
674 /* We want the GLSL compiler to emit code that uses condition codes */
675 for (int i = 0; i < MESA_SHADER_STAGES; i++) {
676 ctx->Const.ShaderCompilerOptions[i] =
677 brw->intelScreen->compiler->glsl_compiler_options[i];
678 }
679
680 if (brw->gen >= 7) {
681 ctx->Const.MaxViewportWidth = 32768;
682 ctx->Const.MaxViewportHeight = 32768;
683 }
684
685 /* ARB_viewport_array */
686 if (brw->gen >= 6 && ctx->API == API_OPENGL_CORE) {
687 ctx->Const.MaxViewports = GEN6_NUM_VIEWPORTS;
688 ctx->Const.ViewportSubpixelBits = 0;
689
690 /* Cast to float before negating because MaxViewportWidth is unsigned.
691 */
692 ctx->Const.ViewportBounds.Min = -(float)ctx->Const.MaxViewportWidth;
693 ctx->Const.ViewportBounds.Max = ctx->Const.MaxViewportWidth;
694 }
695
696 /* ARB_gpu_shader5 */
697 if (brw->gen >= 7)
698 ctx->Const.MaxVertexStreams = MIN2(4, MAX_VERTEX_STREAMS);
699
700 /* ARB_framebuffer_no_attachments */
701 ctx->Const.MaxFramebufferWidth = 16384;
702 ctx->Const.MaxFramebufferHeight = 16384;
703 ctx->Const.MaxFramebufferLayers = ctx->Const.MaxArrayTextureLayers;
704 ctx->Const.MaxFramebufferSamples = max_samples;
705 }
706
707 static void
708 brw_initialize_cs_context_constants(struct brw_context *brw, unsigned max_threads)
709 {
710 struct gl_context *ctx = &brw->ctx;
711
712 /* For ES, we set these constants based on SIMD8.
713 *
714 * TODO: Once we can always generate SIMD16, we should update this.
715 *
716 * For GL, we assume we can generate a SIMD16 program, but this currently
717 * is not always true. This allows us to run more test cases, and will be
718 * required based on desktop GL compute shader requirements.
719 */
720 const int simd_size = ctx->API == API_OPENGL_CORE ? 16 : 8;
721
722 const uint32_t max_invocations = simd_size * max_threads;
723 ctx->Const.MaxComputeWorkGroupSize[0] = max_invocations;
724 ctx->Const.MaxComputeWorkGroupSize[1] = max_invocations;
725 ctx->Const.MaxComputeWorkGroupSize[2] = max_invocations;
726 ctx->Const.MaxComputeWorkGroupInvocations = max_invocations;
727 ctx->Const.MaxComputeSharedMemorySize = 64 * 1024;
728 }
729
730 /**
731 * Process driconf (drirc) options, setting appropriate context flags.
732 *
733 * intelInitExtensions still pokes at optionCache directly, in order to
734 * avoid advertising various extensions. No flags are set, so it makes
735 * sense to continue doing that there.
736 */
737 static void
738 brw_process_driconf_options(struct brw_context *brw)
739 {
740 struct gl_context *ctx = &brw->ctx;
741
742 driOptionCache *options = &brw->optionCache;
743 driParseConfigFiles(options, &brw->intelScreen->optionCache,
744 brw->driContext->driScreenPriv->myNum, "i965");
745
746 int bo_reuse_mode = driQueryOptioni(options, "bo_reuse");
747 switch (bo_reuse_mode) {
748 case DRI_CONF_BO_REUSE_DISABLED:
749 break;
750 case DRI_CONF_BO_REUSE_ALL:
751 intel_bufmgr_gem_enable_reuse(brw->bufmgr);
752 break;
753 }
754
755 if (!driQueryOptionb(options, "hiz")) {
756 brw->has_hiz = false;
757 /* On gen6, you can only do separate stencil with HIZ. */
758 if (brw->gen == 6)
759 brw->has_separate_stencil = false;
760 }
761
762 if (driQueryOptionb(options, "always_flush_batch")) {
763 fprintf(stderr, "flushing batchbuffer before/after each draw call\n");
764 brw->always_flush_batch = true;
765 }
766
767 if (driQueryOptionb(options, "always_flush_cache")) {
768 fprintf(stderr, "flushing GPU caches before/after each draw call\n");
769 brw->always_flush_cache = true;
770 }
771
772 if (driQueryOptionb(options, "disable_throttling")) {
773 fprintf(stderr, "disabling flush throttling\n");
774 brw->disable_throttling = true;
775 }
776
777 brw->precompile = driQueryOptionb(&brw->optionCache, "shader_precompile");
778
779 ctx->Const.ForceGLSLExtensionsWarn =
780 driQueryOptionb(options, "force_glsl_extensions_warn");
781
782 ctx->Const.DisableGLSLLineContinuations =
783 driQueryOptionb(options, "disable_glsl_line_continuations");
784
785 ctx->Const.AllowGLSLExtensionDirectiveMidShader =
786 driQueryOptionb(options, "allow_glsl_extension_directive_midshader");
787
788 brw->dual_color_blend_by_location =
789 driQueryOptionb(options, "dual_color_blend_by_location");
790 }
791
792 GLboolean
793 brwCreateContext(gl_api api,
794 const struct gl_config *mesaVis,
795 __DRIcontext *driContextPriv,
796 unsigned major_version,
797 unsigned minor_version,
798 uint32_t flags,
799 bool notify_reset,
800 unsigned *dri_ctx_error,
801 void *sharedContextPrivate)
802 {
803 __DRIscreen *sPriv = driContextPriv->driScreenPriv;
804 struct gl_context *shareCtx = (struct gl_context *) sharedContextPrivate;
805 struct intel_screen *screen = sPriv->driverPrivate;
806 const struct brw_device_info *devinfo = screen->devinfo;
807 struct dd_function_table functions;
808
809 /* Only allow the __DRI_CTX_FLAG_ROBUST_BUFFER_ACCESS flag if the kernel
810 * provides us with context reset notifications.
811 */
812 uint32_t allowed_flags = __DRI_CTX_FLAG_DEBUG
813 | __DRI_CTX_FLAG_FORWARD_COMPATIBLE;
814
815 if (screen->has_context_reset_notification)
816 allowed_flags |= __DRI_CTX_FLAG_ROBUST_BUFFER_ACCESS;
817
818 if (flags & ~allowed_flags) {
819 *dri_ctx_error = __DRI_CTX_ERROR_UNKNOWN_FLAG;
820 return false;
821 }
822
823 struct brw_context *brw = rzalloc(NULL, struct brw_context);
824 if (!brw) {
825 fprintf(stderr, "%s: failed to alloc context\n", __func__);
826 *dri_ctx_error = __DRI_CTX_ERROR_NO_MEMORY;
827 return false;
828 }
829
830 driContextPriv->driverPrivate = brw;
831 brw->driContext = driContextPriv;
832 brw->intelScreen = screen;
833 brw->bufmgr = screen->bufmgr;
834
835 brw->gen = devinfo->gen;
836 brw->gt = devinfo->gt;
837 brw->is_g4x = devinfo->is_g4x;
838 brw->is_baytrail = devinfo->is_baytrail;
839 brw->is_haswell = devinfo->is_haswell;
840 brw->is_cherryview = devinfo->is_cherryview;
841 brw->is_broxton = devinfo->is_broxton;
842 brw->has_llc = devinfo->has_llc;
843 brw->has_hiz = devinfo->has_hiz_and_separate_stencil;
844 brw->has_separate_stencil = devinfo->has_hiz_and_separate_stencil;
845 brw->has_pln = devinfo->has_pln;
846 brw->has_compr4 = devinfo->has_compr4;
847 brw->has_surface_tile_offset = devinfo->has_surface_tile_offset;
848 brw->has_negative_rhw_bug = devinfo->has_negative_rhw_bug;
849 brw->needs_unlit_centroid_workaround =
850 devinfo->needs_unlit_centroid_workaround;
851
852 brw->must_use_separate_stencil = devinfo->must_use_separate_stencil;
853 brw->has_swizzling = screen->hw_has_swizzling;
854
855 brw->vs.base.stage = MESA_SHADER_VERTEX;
856 brw->tcs.base.stage = MESA_SHADER_TESS_CTRL;
857 brw->tes.base.stage = MESA_SHADER_TESS_EVAL;
858 brw->gs.base.stage = MESA_SHADER_GEOMETRY;
859 brw->wm.base.stage = MESA_SHADER_FRAGMENT;
860 if (brw->gen >= 8) {
861 gen8_init_vtable_surface_functions(brw);
862 brw->vtbl.emit_depth_stencil_hiz = gen8_emit_depth_stencil_hiz;
863 } else if (brw->gen >= 7) {
864 gen7_init_vtable_surface_functions(brw);
865 brw->vtbl.emit_depth_stencil_hiz = gen7_emit_depth_stencil_hiz;
866 } else if (brw->gen >= 6) {
867 gen6_init_vtable_surface_functions(brw);
868 brw->vtbl.emit_depth_stencil_hiz = gen6_emit_depth_stencil_hiz;
869 } else {
870 gen4_init_vtable_surface_functions(brw);
871 brw->vtbl.emit_depth_stencil_hiz = brw_emit_depth_stencil_hiz;
872 }
873
874 brw_init_driver_functions(brw, &functions);
875
876 if (notify_reset)
877 functions.GetGraphicsResetStatus = brw_get_graphics_reset_status;
878
879 struct gl_context *ctx = &brw->ctx;
880
881 if (!_mesa_initialize_context(ctx, api, mesaVis, shareCtx, &functions)) {
882 *dri_ctx_error = __DRI_CTX_ERROR_NO_MEMORY;
883 fprintf(stderr, "%s: failed to init mesa context\n", __func__);
884 intelDestroyContext(driContextPriv);
885 return false;
886 }
887
888 driContextSetFlags(ctx, flags);
889
890 /* Initialize the software rasterizer and helper modules.
891 *
892 * As of GL 3.1 core, the gen4+ driver doesn't need the swrast context for
893 * software fallbacks (which we have to support on legacy GL to do weird
894 * glDrawPixels(), glBitmap(), and other functions).
895 */
896 if (api != API_OPENGL_CORE && api != API_OPENGLES2) {
897 _swrast_CreateContext(ctx);
898 }
899
900 _vbo_CreateContext(ctx);
901 if (ctx->swrast_context) {
902 _tnl_CreateContext(ctx);
903 TNL_CONTEXT(ctx)->Driver.RunPipeline = _tnl_run_pipeline;
904 _swsetup_CreateContext(ctx);
905
906 /* Configure swrast to match hardware characteristics: */
907 _swrast_allow_pixel_fog(ctx, false);
908 _swrast_allow_vertex_fog(ctx, true);
909 }
910
911 _mesa_meta_init(ctx);
912
913 brw_process_driconf_options(brw);
914
915 if (INTEL_DEBUG & DEBUG_PERF)
916 brw->perf_debug = true;
917
918 brw_initialize_cs_context_constants(brw, devinfo->max_cs_threads);
919 brw_initialize_context_constants(brw);
920
921 ctx->Const.ResetStrategy = notify_reset
922 ? GL_LOSE_CONTEXT_ON_RESET_ARB : GL_NO_RESET_NOTIFICATION_ARB;
923
924 /* Reinitialize the context point state. It depends on ctx->Const values. */
925 _mesa_init_point(ctx);
926
927 intel_fbo_init(brw);
928
929 intel_batchbuffer_init(brw);
930
931 if (brw->gen >= 6) {
932 /* Create a new hardware context. Using a hardware context means that
933 * our GPU state will be saved/restored on context switch, allowing us
934 * to assume that the GPU is in the same state we left it in.
935 *
936 * This is required for transform feedback buffer offsets, query objects,
937 * and also allows us to reduce how much state we have to emit.
938 */
939 brw->hw_ctx = drm_intel_gem_context_create(brw->bufmgr);
940
941 if (!brw->hw_ctx) {
942 fprintf(stderr, "Gen6+ requires Kernel 3.6 or later.\n");
943 intelDestroyContext(driContextPriv);
944 return false;
945 }
946 }
947
948 if (brw_init_pipe_control(brw, devinfo)) {
949 *dri_ctx_error = __DRI_CTX_ERROR_NO_MEMORY;
950 intelDestroyContext(driContextPriv);
951 return false;
952 }
953
954 brw_init_state(brw);
955
956 intelInitExtensions(ctx);
957
958 brw_init_surface_formats(brw);
959
960 brw->max_vs_threads = devinfo->max_vs_threads;
961 brw->max_hs_threads = devinfo->max_hs_threads;
962 brw->max_ds_threads = devinfo->max_ds_threads;
963 brw->max_gs_threads = devinfo->max_gs_threads;
964 brw->max_wm_threads = devinfo->max_wm_threads;
965 /* FINISHME: Do this for all platforms that the kernel supports */
966 if (brw->is_cherryview &&
967 screen->subslice_total > 0 && screen->eu_total > 0) {
968 /* Logical CS threads = EUs per subslice * 7 threads per EU */
969 brw->max_cs_threads = screen->eu_total / screen->subslice_total * 7;
970
971 /* Fuse configurations may give more threads than expected, never less. */
972 if (brw->max_cs_threads < devinfo->max_cs_threads)
973 brw->max_cs_threads = devinfo->max_cs_threads;
974 } else {
975 brw->max_cs_threads = devinfo->max_cs_threads;
976 }
977 brw->urb.size = devinfo->urb.size;
978 brw->urb.min_vs_entries = devinfo->urb.min_vs_entries;
979 brw->urb.max_vs_entries = devinfo->urb.max_vs_entries;
980 brw->urb.max_hs_entries = devinfo->urb.max_hs_entries;
981 brw->urb.max_ds_entries = devinfo->urb.max_ds_entries;
982 brw->urb.max_gs_entries = devinfo->urb.max_gs_entries;
983
984 /* Estimate the size of the mappable aperture into the GTT. There's an
985 * ioctl to get the whole GTT size, but not one to get the mappable subset.
986 * It turns out it's basically always 256MB, though some ancient hardware
987 * was smaller.
988 */
989 uint32_t gtt_size = 256 * 1024 * 1024;
990
991 /* We don't want to map two objects such that a memcpy between them would
992 * just fault one mapping in and then the other over and over forever. So
993 * we would need to divide the GTT size by 2. Additionally, some GTT is
994 * taken up by things like the framebuffer and the ringbuffer and such, so
995 * be more conservative.
996 */
997 brw->max_gtt_map_object_size = gtt_size / 4;
998
999 if (brw->gen == 6)
1000 brw->urb.gs_present = false;
1001
1002 brw->prim_restart.in_progress = false;
1003 brw->prim_restart.enable_cut_index = false;
1004 brw->gs.enabled = false;
1005 brw->sf.viewport_transform_enable = true;
1006
1007 brw->predicate.state = BRW_PREDICATE_STATE_RENDER;
1008
1009 brw->use_resource_streamer = screen->has_resource_streamer &&
1010 (env_var_as_boolean("INTEL_USE_HW_BT", false) ||
1011 env_var_as_boolean("INTEL_USE_GATHER", false));
1012
1013 ctx->VertexProgram._MaintainTnlProgram = true;
1014 ctx->FragmentProgram._MaintainTexEnvProgram = true;
1015
1016 brw_draw_init( brw );
1017
1018 if ((flags & __DRI_CTX_FLAG_DEBUG) != 0) {
1019 /* Turn on some extra GL_ARB_debug_output generation. */
1020 brw->perf_debug = true;
1021 }
1022
1023 if ((flags & __DRI_CTX_FLAG_ROBUST_BUFFER_ACCESS) != 0)
1024 ctx->Const.ContextFlags |= GL_CONTEXT_FLAG_ROBUST_ACCESS_BIT_ARB;
1025
1026 if (INTEL_DEBUG & DEBUG_SHADER_TIME)
1027 brw_init_shader_time(brw);
1028
1029 _mesa_compute_version(ctx);
1030
1031 _mesa_initialize_dispatch_tables(ctx);
1032 _mesa_initialize_vbo_vtxfmt(ctx);
1033
1034 if (ctx->Extensions.AMD_performance_monitor) {
1035 brw_init_performance_monitors(brw);
1036 }
1037
1038 vbo_use_buffer_objects(ctx);
1039 vbo_always_unmap_buffers(ctx);
1040
1041 return true;
1042 }
1043
1044 void
1045 intelDestroyContext(__DRIcontext * driContextPriv)
1046 {
1047 struct brw_context *brw =
1048 (struct brw_context *) driContextPriv->driverPrivate;
1049 struct gl_context *ctx = &brw->ctx;
1050
1051 /* Dump a final BMP in case the application doesn't call SwapBuffers */
1052 if (INTEL_DEBUG & DEBUG_AUB) {
1053 intel_batchbuffer_flush(brw);
1054 aub_dump_bmp(&brw->ctx);
1055 }
1056
1057 _mesa_meta_free(&brw->ctx);
1058 brw_meta_fast_clear_free(brw);
1059
1060 if (INTEL_DEBUG & DEBUG_SHADER_TIME) {
1061 /* Force a report. */
1062 brw->shader_time.report_time = 0;
1063
1064 brw_collect_and_report_shader_time(brw);
1065 brw_destroy_shader_time(brw);
1066 }
1067
1068 brw_destroy_state(brw);
1069 brw_draw_destroy(brw);
1070
1071 drm_intel_bo_unreference(brw->curbe.curbe_bo);
1072 if (brw->vs.base.scratch_bo)
1073 drm_intel_bo_unreference(brw->vs.base.scratch_bo);
1074 if (brw->gs.base.scratch_bo)
1075 drm_intel_bo_unreference(brw->gs.base.scratch_bo);
1076 if (brw->wm.base.scratch_bo)
1077 drm_intel_bo_unreference(brw->wm.base.scratch_bo);
1078
1079 gen7_reset_hw_bt_pool_offsets(brw);
1080 drm_intel_bo_unreference(brw->hw_bt_pool.bo);
1081 brw->hw_bt_pool.bo = NULL;
1082
1083 drm_intel_gem_context_destroy(brw->hw_ctx);
1084
1085 if (ctx->swrast_context) {
1086 _swsetup_DestroyContext(&brw->ctx);
1087 _tnl_DestroyContext(&brw->ctx);
1088 }
1089 _vbo_DestroyContext(&brw->ctx);
1090
1091 if (ctx->swrast_context)
1092 _swrast_DestroyContext(&brw->ctx);
1093
1094 brw_fini_pipe_control(brw);
1095 intel_batchbuffer_free(brw);
1096
1097 drm_intel_bo_unreference(brw->throttle_batch[1]);
1098 drm_intel_bo_unreference(brw->throttle_batch[0]);
1099 brw->throttle_batch[1] = NULL;
1100 brw->throttle_batch[0] = NULL;
1101
1102 driDestroyOptionCache(&brw->optionCache);
1103
1104 /* free the Mesa context */
1105 _mesa_free_context_data(&brw->ctx);
1106
1107 ralloc_free(brw);
1108 driContextPriv->driverPrivate = NULL;
1109 }
1110
1111 GLboolean
1112 intelUnbindContext(__DRIcontext * driContextPriv)
1113 {
1114 /* Unset current context and dispath table */
1115 _mesa_make_current(NULL, NULL, NULL);
1116
1117 return true;
1118 }
1119
1120 /**
1121 * Fixes up the context for GLES23 with our default-to-sRGB-capable behavior
1122 * on window system framebuffers.
1123 *
1124 * Desktop GL is fairly reasonable in its handling of sRGB: You can ask if
1125 * your renderbuffer can do sRGB encode, and you can flip a switch that does
1126 * sRGB encode if the renderbuffer can handle it. You can ask specifically
1127 * for a visual where you're guaranteed to be capable, but it turns out that
1128 * everyone just makes all their ARGB8888 visuals capable and doesn't offer
1129 * incapable ones, because there's no difference between the two in resources
1130 * used. Applications thus get built that accidentally rely on the default
1131 * visual choice being sRGB, so we make ours sRGB capable. Everything sounds
1132 * great...
1133 *
1134 * But for GLES2/3, they decided that it was silly to not turn on sRGB encode
1135 * for sRGB renderbuffers you made with the GL_EXT_texture_sRGB equivalent.
1136 * So they removed the enable knob and made it "if the renderbuffer is sRGB
1137 * capable, do sRGB encode". Then, for your window system renderbuffers, you
1138 * can ask for sRGB visuals and get sRGB encode, or not ask for sRGB visuals
1139 * and get no sRGB encode (assuming that both kinds of visual are available).
1140 * Thus our choice to support sRGB by default on our visuals for desktop would
1141 * result in broken rendering of GLES apps that aren't expecting sRGB encode.
1142 *
1143 * Unfortunately, renderbuffer setup happens before a context is created. So
1144 * in intel_screen.c we always set up sRGB, and here, if you're a GLES2/3
1145 * context (without an sRGB visual, though we don't have sRGB visuals exposed
1146 * yet), we go turn that back off before anyone finds out.
1147 */
1148 static void
1149 intel_gles3_srgb_workaround(struct brw_context *brw,
1150 struct gl_framebuffer *fb)
1151 {
1152 struct gl_context *ctx = &brw->ctx;
1153
1154 if (_mesa_is_desktop_gl(ctx) || !fb->Visual.sRGBCapable)
1155 return;
1156
1157 /* Some day when we support the sRGB capable bit on visuals available for
1158 * GLES, we'll need to respect that and not disable things here.
1159 */
1160 fb->Visual.sRGBCapable = false;
1161 for (int i = 0; i < BUFFER_COUNT; i++) {
1162 struct gl_renderbuffer *rb = fb->Attachment[i].Renderbuffer;
1163 if (rb)
1164 rb->Format = _mesa_get_srgb_format_linear(rb->Format);
1165 }
1166 }
1167
1168 GLboolean
1169 intelMakeCurrent(__DRIcontext * driContextPriv,
1170 __DRIdrawable * driDrawPriv,
1171 __DRIdrawable * driReadPriv)
1172 {
1173 struct brw_context *brw;
1174 GET_CURRENT_CONTEXT(curCtx);
1175
1176 if (driContextPriv)
1177 brw = (struct brw_context *) driContextPriv->driverPrivate;
1178 else
1179 brw = NULL;
1180
1181 /* According to the glXMakeCurrent() man page: "Pending commands to
1182 * the previous context, if any, are flushed before it is released."
1183 * But only flush if we're actually changing contexts.
1184 */
1185 if (brw_context(curCtx) && brw_context(curCtx) != brw) {
1186 _mesa_flush(curCtx);
1187 }
1188
1189 if (driContextPriv) {
1190 struct gl_context *ctx = &brw->ctx;
1191 struct gl_framebuffer *fb, *readFb;
1192
1193 if (driDrawPriv == NULL) {
1194 fb = _mesa_get_incomplete_framebuffer();
1195 } else {
1196 fb = driDrawPriv->driverPrivate;
1197 driContextPriv->dri2.draw_stamp = driDrawPriv->dri2.stamp - 1;
1198 }
1199
1200 if (driReadPriv == NULL) {
1201 readFb = _mesa_get_incomplete_framebuffer();
1202 } else {
1203 readFb = driReadPriv->driverPrivate;
1204 driContextPriv->dri2.read_stamp = driReadPriv->dri2.stamp - 1;
1205 }
1206
1207 /* The sRGB workaround changes the renderbuffer's format. We must change
1208 * the format before the renderbuffer's miptree get's allocated, otherwise
1209 * the formats of the renderbuffer and its miptree will differ.
1210 */
1211 intel_gles3_srgb_workaround(brw, fb);
1212 intel_gles3_srgb_workaround(brw, readFb);
1213
1214 /* If the context viewport hasn't been initialized, force a call out to
1215 * the loader to get buffers so we have a drawable size for the initial
1216 * viewport. */
1217 if (!brw->ctx.ViewportInitialized)
1218 intel_prepare_render(brw);
1219
1220 _mesa_make_current(ctx, fb, readFb);
1221 } else {
1222 _mesa_make_current(NULL, NULL, NULL);
1223 }
1224
1225 return true;
1226 }
1227
1228 void
1229 intel_resolve_for_dri2_flush(struct brw_context *brw,
1230 __DRIdrawable *drawable)
1231 {
1232 if (brw->gen < 6) {
1233 /* MSAA and fast color clear are not supported, so don't waste time
1234 * checking whether a resolve is needed.
1235 */
1236 return;
1237 }
1238
1239 struct gl_framebuffer *fb = drawable->driverPrivate;
1240 struct intel_renderbuffer *rb;
1241
1242 /* Usually, only the back buffer will need to be downsampled. However,
1243 * the front buffer will also need it if the user has rendered into it.
1244 */
1245 static const gl_buffer_index buffers[2] = {
1246 BUFFER_BACK_LEFT,
1247 BUFFER_FRONT_LEFT,
1248 };
1249
1250 for (int i = 0; i < 2; ++i) {
1251 rb = intel_get_renderbuffer(fb, buffers[i]);
1252 if (rb == NULL || rb->mt == NULL)
1253 continue;
1254 if (rb->mt->num_samples <= 1)
1255 intel_miptree_resolve_color(brw, rb->mt, 0);
1256 else
1257 intel_renderbuffer_downsample(brw, rb);
1258 }
1259 }
1260
1261 static unsigned
1262 intel_bits_per_pixel(const struct intel_renderbuffer *rb)
1263 {
1264 return _mesa_get_format_bytes(intel_rb_format(rb)) * 8;
1265 }
1266
1267 static void
1268 intel_query_dri2_buffers(struct brw_context *brw,
1269 __DRIdrawable *drawable,
1270 __DRIbuffer **buffers,
1271 int *count);
1272
1273 static void
1274 intel_process_dri2_buffer(struct brw_context *brw,
1275 __DRIdrawable *drawable,
1276 __DRIbuffer *buffer,
1277 struct intel_renderbuffer *rb,
1278 const char *buffer_name);
1279
1280 static void
1281 intel_update_image_buffers(struct brw_context *brw, __DRIdrawable *drawable);
1282
1283 static void
1284 intel_update_dri2_buffers(struct brw_context *brw, __DRIdrawable *drawable)
1285 {
1286 struct gl_framebuffer *fb = drawable->driverPrivate;
1287 struct intel_renderbuffer *rb;
1288 __DRIbuffer *buffers = NULL;
1289 int i, count;
1290 const char *region_name;
1291
1292 /* Set this up front, so that in case our buffers get invalidated
1293 * while we're getting new buffers, we don't clobber the stamp and
1294 * thus ignore the invalidate. */
1295 drawable->lastStamp = drawable->dri2.stamp;
1296
1297 if (unlikely(INTEL_DEBUG & DEBUG_DRI))
1298 fprintf(stderr, "enter %s, drawable %p\n", __func__, drawable);
1299
1300 intel_query_dri2_buffers(brw, drawable, &buffers, &count);
1301
1302 if (buffers == NULL)
1303 return;
1304
1305 for (i = 0; i < count; i++) {
1306 switch (buffers[i].attachment) {
1307 case __DRI_BUFFER_FRONT_LEFT:
1308 rb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT);
1309 region_name = "dri2 front buffer";
1310 break;
1311
1312 case __DRI_BUFFER_FAKE_FRONT_LEFT:
1313 rb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT);
1314 region_name = "dri2 fake front buffer";
1315 break;
1316
1317 case __DRI_BUFFER_BACK_LEFT:
1318 rb = intel_get_renderbuffer(fb, BUFFER_BACK_LEFT);
1319 region_name = "dri2 back buffer";
1320 break;
1321
1322 case __DRI_BUFFER_DEPTH:
1323 case __DRI_BUFFER_HIZ:
1324 case __DRI_BUFFER_DEPTH_STENCIL:
1325 case __DRI_BUFFER_STENCIL:
1326 case __DRI_BUFFER_ACCUM:
1327 default:
1328 fprintf(stderr,
1329 "unhandled buffer attach event, attachment type %d\n",
1330 buffers[i].attachment);
1331 return;
1332 }
1333
1334 intel_process_dri2_buffer(brw, drawable, &buffers[i], rb, region_name);
1335 }
1336
1337 }
1338
1339 void
1340 intel_update_renderbuffers(__DRIcontext *context, __DRIdrawable *drawable)
1341 {
1342 struct brw_context *brw = context->driverPrivate;
1343 __DRIscreen *screen = brw->intelScreen->driScrnPriv;
1344
1345 /* Set this up front, so that in case our buffers get invalidated
1346 * while we're getting new buffers, we don't clobber the stamp and
1347 * thus ignore the invalidate. */
1348 drawable->lastStamp = drawable->dri2.stamp;
1349
1350 if (unlikely(INTEL_DEBUG & DEBUG_DRI))
1351 fprintf(stderr, "enter %s, drawable %p\n", __func__, drawable);
1352
1353 if (screen->image.loader)
1354 intel_update_image_buffers(brw, drawable);
1355 else
1356 intel_update_dri2_buffers(brw, drawable);
1357
1358 driUpdateFramebufferSize(&brw->ctx, drawable);
1359 }
1360
1361 /**
1362 * intel_prepare_render should be called anywhere that curent read/drawbuffer
1363 * state is required.
1364 */
1365 void
1366 intel_prepare_render(struct brw_context *brw)
1367 {
1368 struct gl_context *ctx = &brw->ctx;
1369 __DRIcontext *driContext = brw->driContext;
1370 __DRIdrawable *drawable;
1371
1372 drawable = driContext->driDrawablePriv;
1373 if (drawable && drawable->dri2.stamp != driContext->dri2.draw_stamp) {
1374 if (drawable->lastStamp != drawable->dri2.stamp)
1375 intel_update_renderbuffers(driContext, drawable);
1376 driContext->dri2.draw_stamp = drawable->dri2.stamp;
1377 }
1378
1379 drawable = driContext->driReadablePriv;
1380 if (drawable && drawable->dri2.stamp != driContext->dri2.read_stamp) {
1381 if (drawable->lastStamp != drawable->dri2.stamp)
1382 intel_update_renderbuffers(driContext, drawable);
1383 driContext->dri2.read_stamp = drawable->dri2.stamp;
1384 }
1385
1386 /* If we're currently rendering to the front buffer, the rendering
1387 * that will happen next will probably dirty the front buffer. So
1388 * mark it as dirty here.
1389 */
1390 if (_mesa_is_front_buffer_drawing(ctx->DrawBuffer))
1391 brw->front_buffer_dirty = true;
1392 }
1393
1394 /**
1395 * \brief Query DRI2 to obtain a DRIdrawable's buffers.
1396 *
1397 * To determine which DRI buffers to request, examine the renderbuffers
1398 * attached to the drawable's framebuffer. Then request the buffers with
1399 * DRI2GetBuffers() or DRI2GetBuffersWithFormat().
1400 *
1401 * This is called from intel_update_renderbuffers().
1402 *
1403 * \param drawable Drawable whose buffers are queried.
1404 * \param buffers [out] List of buffers returned by DRI2 query.
1405 * \param buffer_count [out] Number of buffers returned.
1406 *
1407 * \see intel_update_renderbuffers()
1408 * \see DRI2GetBuffers()
1409 * \see DRI2GetBuffersWithFormat()
1410 */
1411 static void
1412 intel_query_dri2_buffers(struct brw_context *brw,
1413 __DRIdrawable *drawable,
1414 __DRIbuffer **buffers,
1415 int *buffer_count)
1416 {
1417 __DRIscreen *screen = brw->intelScreen->driScrnPriv;
1418 struct gl_framebuffer *fb = drawable->driverPrivate;
1419 int i = 0;
1420 unsigned attachments[8];
1421
1422 struct intel_renderbuffer *front_rb;
1423 struct intel_renderbuffer *back_rb;
1424
1425 front_rb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT);
1426 back_rb = intel_get_renderbuffer(fb, BUFFER_BACK_LEFT);
1427
1428 memset(attachments, 0, sizeof(attachments));
1429 if ((_mesa_is_front_buffer_drawing(fb) ||
1430 _mesa_is_front_buffer_reading(fb) ||
1431 !back_rb) && front_rb) {
1432 /* If a fake front buffer is in use, then querying for
1433 * __DRI_BUFFER_FRONT_LEFT will cause the server to copy the image from
1434 * the real front buffer to the fake front buffer. So before doing the
1435 * query, we need to make sure all the pending drawing has landed in the
1436 * real front buffer.
1437 */
1438 intel_batchbuffer_flush(brw);
1439 intel_flush_front(&brw->ctx);
1440
1441 attachments[i++] = __DRI_BUFFER_FRONT_LEFT;
1442 attachments[i++] = intel_bits_per_pixel(front_rb);
1443 } else if (front_rb && brw->front_buffer_dirty) {
1444 /* We have pending front buffer rendering, but we aren't querying for a
1445 * front buffer. If the front buffer we have is a fake front buffer,
1446 * the X server is going to throw it away when it processes the query.
1447 * So before doing the query, make sure all the pending drawing has
1448 * landed in the real front buffer.
1449 */
1450 intel_batchbuffer_flush(brw);
1451 intel_flush_front(&brw->ctx);
1452 }
1453
1454 if (back_rb) {
1455 attachments[i++] = __DRI_BUFFER_BACK_LEFT;
1456 attachments[i++] = intel_bits_per_pixel(back_rb);
1457 }
1458
1459 assert(i <= ARRAY_SIZE(attachments));
1460
1461 *buffers = screen->dri2.loader->getBuffersWithFormat(drawable,
1462 &drawable->w,
1463 &drawable->h,
1464 attachments, i / 2,
1465 buffer_count,
1466 drawable->loaderPrivate);
1467 }
1468
1469 /**
1470 * \brief Assign a DRI buffer's DRM region to a renderbuffer.
1471 *
1472 * This is called from intel_update_renderbuffers().
1473 *
1474 * \par Note:
1475 * DRI buffers whose attachment point is DRI2BufferStencil or
1476 * DRI2BufferDepthStencil are handled as special cases.
1477 *
1478 * \param buffer_name is a human readable name, such as "dri2 front buffer",
1479 * that is passed to drm_intel_bo_gem_create_from_name().
1480 *
1481 * \see intel_update_renderbuffers()
1482 */
1483 static void
1484 intel_process_dri2_buffer(struct brw_context *brw,
1485 __DRIdrawable *drawable,
1486 __DRIbuffer *buffer,
1487 struct intel_renderbuffer *rb,
1488 const char *buffer_name)
1489 {
1490 struct gl_framebuffer *fb = drawable->driverPrivate;
1491 drm_intel_bo *bo;
1492
1493 if (!rb)
1494 return;
1495
1496 unsigned num_samples = rb->Base.Base.NumSamples;
1497
1498 /* We try to avoid closing and reopening the same BO name, because the first
1499 * use of a mapping of the buffer involves a bunch of page faulting which is
1500 * moderately expensive.
1501 */
1502 struct intel_mipmap_tree *last_mt;
1503 if (num_samples == 0)
1504 last_mt = rb->mt;
1505 else
1506 last_mt = rb->singlesample_mt;
1507
1508 uint32_t old_name = 0;
1509 if (last_mt) {
1510 /* The bo already has a name because the miptree was created by a
1511 * previous call to intel_process_dri2_buffer(). If a bo already has a
1512 * name, then drm_intel_bo_flink() is a low-cost getter. It does not
1513 * create a new name.
1514 */
1515 drm_intel_bo_flink(last_mt->bo, &old_name);
1516 }
1517
1518 if (old_name == buffer->name)
1519 return;
1520
1521 if (unlikely(INTEL_DEBUG & DEBUG_DRI)) {
1522 fprintf(stderr,
1523 "attaching buffer %d, at %d, cpp %d, pitch %d\n",
1524 buffer->name, buffer->attachment,
1525 buffer->cpp, buffer->pitch);
1526 }
1527
1528 bo = drm_intel_bo_gem_create_from_name(brw->bufmgr, buffer_name,
1529 buffer->name);
1530 if (!bo) {
1531 fprintf(stderr,
1532 "Failed to open BO for returned DRI2 buffer "
1533 "(%dx%d, %s, named %d).\n"
1534 "This is likely a bug in the X Server that will lead to a "
1535 "crash soon.\n",
1536 drawable->w, drawable->h, buffer_name, buffer->name);
1537 return;
1538 }
1539
1540 intel_update_winsys_renderbuffer_miptree(brw, rb, bo,
1541 drawable->w, drawable->h,
1542 buffer->pitch);
1543
1544 if (_mesa_is_front_buffer_drawing(fb) &&
1545 (buffer->attachment == __DRI_BUFFER_FRONT_LEFT ||
1546 buffer->attachment == __DRI_BUFFER_FAKE_FRONT_LEFT) &&
1547 rb->Base.Base.NumSamples > 1) {
1548 intel_renderbuffer_upsample(brw, rb);
1549 }
1550
1551 assert(rb->mt);
1552
1553 drm_intel_bo_unreference(bo);
1554 }
1555
1556 /**
1557 * \brief Query DRI image loader to obtain a DRIdrawable's buffers.
1558 *
1559 * To determine which DRI buffers to request, examine the renderbuffers
1560 * attached to the drawable's framebuffer. Then request the buffers from
1561 * the image loader
1562 *
1563 * This is called from intel_update_renderbuffers().
1564 *
1565 * \param drawable Drawable whose buffers are queried.
1566 * \param buffers [out] List of buffers returned by DRI2 query.
1567 * \param buffer_count [out] Number of buffers returned.
1568 *
1569 * \see intel_update_renderbuffers()
1570 */
1571
1572 static void
1573 intel_update_image_buffer(struct brw_context *intel,
1574 __DRIdrawable *drawable,
1575 struct intel_renderbuffer *rb,
1576 __DRIimage *buffer,
1577 enum __DRIimageBufferMask buffer_type)
1578 {
1579 struct gl_framebuffer *fb = drawable->driverPrivate;
1580
1581 if (!rb || !buffer->bo)
1582 return;
1583
1584 unsigned num_samples = rb->Base.Base.NumSamples;
1585
1586 /* Check and see if we're already bound to the right
1587 * buffer object
1588 */
1589 struct intel_mipmap_tree *last_mt;
1590 if (num_samples == 0)
1591 last_mt = rb->mt;
1592 else
1593 last_mt = rb->singlesample_mt;
1594
1595 if (last_mt && last_mt->bo == buffer->bo)
1596 return;
1597
1598 intel_update_winsys_renderbuffer_miptree(intel, rb, buffer->bo,
1599 buffer->width, buffer->height,
1600 buffer->pitch);
1601
1602 if (_mesa_is_front_buffer_drawing(fb) &&
1603 buffer_type == __DRI_IMAGE_BUFFER_FRONT &&
1604 rb->Base.Base.NumSamples > 1) {
1605 intel_renderbuffer_upsample(intel, rb);
1606 }
1607 }
1608
1609 static void
1610 intel_update_image_buffers(struct brw_context *brw, __DRIdrawable *drawable)
1611 {
1612 struct gl_framebuffer *fb = drawable->driverPrivate;
1613 __DRIscreen *screen = brw->intelScreen->driScrnPriv;
1614 struct intel_renderbuffer *front_rb;
1615 struct intel_renderbuffer *back_rb;
1616 struct __DRIimageList images;
1617 unsigned int format;
1618 uint32_t buffer_mask = 0;
1619
1620 front_rb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT);
1621 back_rb = intel_get_renderbuffer(fb, BUFFER_BACK_LEFT);
1622
1623 if (back_rb)
1624 format = intel_rb_format(back_rb);
1625 else if (front_rb)
1626 format = intel_rb_format(front_rb);
1627 else
1628 return;
1629
1630 if (front_rb && (_mesa_is_front_buffer_drawing(fb) ||
1631 _mesa_is_front_buffer_reading(fb) || !back_rb)) {
1632 buffer_mask |= __DRI_IMAGE_BUFFER_FRONT;
1633 }
1634
1635 if (back_rb)
1636 buffer_mask |= __DRI_IMAGE_BUFFER_BACK;
1637
1638 (*screen->image.loader->getBuffers) (drawable,
1639 driGLFormatToImageFormat(format),
1640 &drawable->dri2.stamp,
1641 drawable->loaderPrivate,
1642 buffer_mask,
1643 &images);
1644
1645 if (images.image_mask & __DRI_IMAGE_BUFFER_FRONT) {
1646 drawable->w = images.front->width;
1647 drawable->h = images.front->height;
1648 intel_update_image_buffer(brw,
1649 drawable,
1650 front_rb,
1651 images.front,
1652 __DRI_IMAGE_BUFFER_FRONT);
1653 }
1654 if (images.image_mask & __DRI_IMAGE_BUFFER_BACK) {
1655 drawable->w = images.back->width;
1656 drawable->h = images.back->height;
1657 intel_update_image_buffer(brw,
1658 drawable,
1659 back_rb,
1660 images.back,
1661 __DRI_IMAGE_BUFFER_BACK);
1662 }
1663 }