i965: Don't enable reset notification support on Gen4-5.
[mesa.git] / src / mesa / drivers / dri / i965 / intel_screen.c
1 /**************************************************************************
2 *
3 * Copyright 2003 VMware, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 #include <errno.h>
29 #include <time.h>
30 #include <unistd.h>
31 #include "main/glheader.h"
32 #include "main/context.h"
33 #include "main/framebuffer.h"
34 #include "main/renderbuffer.h"
35 #include "main/texobj.h"
36 #include "main/hash.h"
37 #include "main/fbobject.h"
38 #include "main/version.h"
39 #include "swrast/s_renderbuffer.h"
40 #include "glsl/ralloc.h"
41
42 #include "utils.h"
43 #include "xmlpool.h"
44
45 static const __DRIconfigOptionsExtension brw_config_options = {
46 .base = { __DRI_CONFIG_OPTIONS, 1 },
47 .xml =
48 DRI_CONF_BEGIN
49 DRI_CONF_SECTION_PERFORMANCE
50 DRI_CONF_VBLANK_MODE(DRI_CONF_VBLANK_ALWAYS_SYNC)
51 /* Options correspond to DRI_CONF_BO_REUSE_DISABLED,
52 * DRI_CONF_BO_REUSE_ALL
53 */
54 DRI_CONF_OPT_BEGIN_V(bo_reuse, enum, 1, "0:1")
55 DRI_CONF_DESC_BEGIN(en, "Buffer object reuse")
56 DRI_CONF_ENUM(0, "Disable buffer object reuse")
57 DRI_CONF_ENUM(1, "Enable reuse of all sizes of buffer objects")
58 DRI_CONF_DESC_END
59 DRI_CONF_OPT_END
60
61 DRI_CONF_OPT_BEGIN_B(hiz, "true")
62 DRI_CONF_DESC(en, "Enable Hierarchical Z on gen6+")
63 DRI_CONF_OPT_END
64
65 DRI_CONF_OPT_BEGIN_B(disable_derivative_optimization, "false")
66 DRI_CONF_DESC(en, "Derivatives with finer granularity by default")
67 DRI_CONF_OPT_END
68 DRI_CONF_SECTION_END
69
70 DRI_CONF_SECTION_QUALITY
71 DRI_CONF_FORCE_S3TC_ENABLE("false")
72
73 DRI_CONF_OPT_BEGIN(clamp_max_samples, int, -1)
74 DRI_CONF_DESC(en, "Clamp the value of GL_MAX_SAMPLES to the "
75 "given integer. If negative, then do not clamp.")
76 DRI_CONF_OPT_END
77 DRI_CONF_SECTION_END
78
79 DRI_CONF_SECTION_DEBUG
80 DRI_CONF_NO_RAST("false")
81 DRI_CONF_ALWAYS_FLUSH_BATCH("false")
82 DRI_CONF_ALWAYS_FLUSH_CACHE("false")
83 DRI_CONF_DISABLE_THROTTLING("false")
84 DRI_CONF_FORCE_GLSL_EXTENSIONS_WARN("false")
85 DRI_CONF_DISABLE_GLSL_LINE_CONTINUATIONS("false")
86 DRI_CONF_DISABLE_BLEND_FUNC_EXTENDED("false")
87
88 DRI_CONF_OPT_BEGIN_B(shader_precompile, "true")
89 DRI_CONF_DESC(en, "Perform code generation at shader link time.")
90 DRI_CONF_OPT_END
91 DRI_CONF_SECTION_END
92 DRI_CONF_END
93 };
94
95 #include "intel_batchbuffer.h"
96 #include "intel_buffers.h"
97 #include "intel_bufmgr.h"
98 #include "intel_chipset.h"
99 #include "intel_fbo.h"
100 #include "intel_mipmap_tree.h"
101 #include "intel_screen.h"
102 #include "intel_tex.h"
103 #include "intel_regions.h"
104
105 #include "brw_context.h"
106
107 #include "i915_drm.h"
108
109 /**
110 * For debugging purposes, this returns a time in seconds.
111 */
112 double
113 get_time(void)
114 {
115 struct timespec tp;
116
117 clock_gettime(CLOCK_MONOTONIC, &tp);
118
119 return tp.tv_sec + tp.tv_nsec / 1000000000.0;
120 }
121
122 void
123 aub_dump_bmp(struct gl_context *ctx)
124 {
125 struct gl_framebuffer *fb = ctx->DrawBuffer;
126
127 for (int i = 0; i < fb->_NumColorDrawBuffers; i++) {
128 struct intel_renderbuffer *irb =
129 intel_renderbuffer(fb->_ColorDrawBuffers[i]);
130
131 if (irb && irb->mt) {
132 enum aub_dump_bmp_format format;
133
134 switch (irb->Base.Base.Format) {
135 case MESA_FORMAT_B8G8R8A8_UNORM:
136 case MESA_FORMAT_B8G8R8X8_UNORM:
137 format = AUB_DUMP_BMP_FORMAT_ARGB_8888;
138 break;
139 default:
140 continue;
141 }
142
143 assert(irb->mt->region->pitch % irb->mt->region->cpp == 0);
144 drm_intel_gem_bo_aub_dump_bmp(irb->mt->region->bo,
145 irb->draw_x,
146 irb->draw_y,
147 irb->Base.Base.Width,
148 irb->Base.Base.Height,
149 format,
150 irb->mt->region->pitch,
151 0);
152 }
153 }
154 }
155
156 static const __DRItexBufferExtension intelTexBufferExtension = {
157 .base = { __DRI_TEX_BUFFER, 3 },
158
159 .setTexBuffer = intelSetTexBuffer,
160 .setTexBuffer2 = intelSetTexBuffer2,
161 .releaseTexBuffer = NULL,
162 };
163
164 static void
165 intel_dri2_flush_with_flags(__DRIcontext *cPriv,
166 __DRIdrawable *dPriv,
167 unsigned flags,
168 enum __DRI2throttleReason reason)
169 {
170 struct brw_context *brw = cPriv->driverPrivate;
171
172 if (!brw)
173 return;
174
175 struct gl_context *ctx = &brw->ctx;
176
177 FLUSH_VERTICES(ctx, 0);
178
179 if (flags & __DRI2_FLUSH_DRAWABLE)
180 intel_resolve_for_dri2_flush(brw, dPriv);
181
182 if (reason == __DRI2_THROTTLE_SWAPBUFFER ||
183 reason == __DRI2_THROTTLE_FLUSHFRONT) {
184 brw->need_throttle = true;
185 }
186
187 intel_batchbuffer_flush(brw);
188
189 if (INTEL_DEBUG & DEBUG_AUB) {
190 aub_dump_bmp(ctx);
191 }
192 }
193
194 /**
195 * Provides compatibility with loaders that only support the older (version
196 * 1-3) flush interface.
197 *
198 * That includes libGL up to Mesa 9.0, and the X Server at least up to 1.13.
199 */
200 static void
201 intel_dri2_flush(__DRIdrawable *drawable)
202 {
203 intel_dri2_flush_with_flags(drawable->driContextPriv, drawable,
204 __DRI2_FLUSH_DRAWABLE,
205 __DRI2_THROTTLE_SWAPBUFFER);
206 }
207
208 static const struct __DRI2flushExtensionRec intelFlushExtension = {
209 .base = { __DRI2_FLUSH, 4 },
210
211 .flush = intel_dri2_flush,
212 .invalidate = dri2InvalidateDrawable,
213 .flush_with_flags = intel_dri2_flush_with_flags,
214 };
215
216 static struct intel_image_format intel_image_formats[] = {
217 { __DRI_IMAGE_FOURCC_ARGB8888, __DRI_IMAGE_COMPONENTS_RGBA, 1,
218 { { 0, 0, 0, __DRI_IMAGE_FORMAT_ARGB8888, 4 } } },
219
220 { __DRI_IMAGE_FOURCC_SARGB8888, __DRI_IMAGE_COMPONENTS_RGBA, 1,
221 { { 0, 0, 0, __DRI_IMAGE_FORMAT_SARGB8, 4 } } },
222
223 { __DRI_IMAGE_FOURCC_XRGB8888, __DRI_IMAGE_COMPONENTS_RGB, 1,
224 { { 0, 0, 0, __DRI_IMAGE_FORMAT_XRGB8888, 4 }, } },
225
226 { __DRI_IMAGE_FOURCC_RGB565, __DRI_IMAGE_COMPONENTS_RGB, 1,
227 { { 0, 0, 0, __DRI_IMAGE_FORMAT_RGB565, 2 } } },
228
229 { __DRI_IMAGE_FOURCC_YUV410, __DRI_IMAGE_COMPONENTS_Y_U_V, 3,
230 { { 0, 0, 0, __DRI_IMAGE_FORMAT_R8, 1 },
231 { 1, 2, 2, __DRI_IMAGE_FORMAT_R8, 1 },
232 { 2, 2, 2, __DRI_IMAGE_FORMAT_R8, 1 } } },
233
234 { __DRI_IMAGE_FOURCC_YUV411, __DRI_IMAGE_COMPONENTS_Y_U_V, 3,
235 { { 0, 0, 0, __DRI_IMAGE_FORMAT_R8, 1 },
236 { 1, 2, 0, __DRI_IMAGE_FORMAT_R8, 1 },
237 { 2, 2, 0, __DRI_IMAGE_FORMAT_R8, 1 } } },
238
239 { __DRI_IMAGE_FOURCC_YUV420, __DRI_IMAGE_COMPONENTS_Y_U_V, 3,
240 { { 0, 0, 0, __DRI_IMAGE_FORMAT_R8, 1 },
241 { 1, 1, 1, __DRI_IMAGE_FORMAT_R8, 1 },
242 { 2, 1, 1, __DRI_IMAGE_FORMAT_R8, 1 } } },
243
244 { __DRI_IMAGE_FOURCC_YUV422, __DRI_IMAGE_COMPONENTS_Y_U_V, 3,
245 { { 0, 0, 0, __DRI_IMAGE_FORMAT_R8, 1 },
246 { 1, 1, 0, __DRI_IMAGE_FORMAT_R8, 1 },
247 { 2, 1, 0, __DRI_IMAGE_FORMAT_R8, 1 } } },
248
249 { __DRI_IMAGE_FOURCC_YUV444, __DRI_IMAGE_COMPONENTS_Y_U_V, 3,
250 { { 0, 0, 0, __DRI_IMAGE_FORMAT_R8, 1 },
251 { 1, 0, 0, __DRI_IMAGE_FORMAT_R8, 1 },
252 { 2, 0, 0, __DRI_IMAGE_FORMAT_R8, 1 } } },
253
254 { __DRI_IMAGE_FOURCC_NV12, __DRI_IMAGE_COMPONENTS_Y_UV, 2,
255 { { 0, 0, 0, __DRI_IMAGE_FORMAT_R8, 1 },
256 { 1, 1, 1, __DRI_IMAGE_FORMAT_GR88, 2 } } },
257
258 { __DRI_IMAGE_FOURCC_NV16, __DRI_IMAGE_COMPONENTS_Y_UV, 2,
259 { { 0, 0, 0, __DRI_IMAGE_FORMAT_R8, 1 },
260 { 1, 1, 0, __DRI_IMAGE_FORMAT_GR88, 2 } } },
261
262 /* For YUYV buffers, we set up two overlapping DRI images and treat
263 * them as planar buffers in the compositors. Plane 0 is GR88 and
264 * samples YU or YV pairs and places Y into the R component, while
265 * plane 1 is ARGB and samples YUYV clusters and places pairs and
266 * places U into the G component and V into A. This lets the
267 * texture sampler interpolate the Y components correctly when
268 * sampling from plane 0, and interpolate U and V correctly when
269 * sampling from plane 1. */
270 { __DRI_IMAGE_FOURCC_YUYV, __DRI_IMAGE_COMPONENTS_Y_XUXV, 2,
271 { { 0, 0, 0, __DRI_IMAGE_FORMAT_GR88, 2 },
272 { 0, 1, 0, __DRI_IMAGE_FORMAT_ARGB8888, 4 } } }
273 };
274
275 static struct intel_image_format *
276 intel_image_format_lookup(int fourcc)
277 {
278 struct intel_image_format *f = NULL;
279
280 for (unsigned i = 0; i < ARRAY_SIZE(intel_image_formats); i++) {
281 if (intel_image_formats[i].fourcc == fourcc) {
282 f = &intel_image_formats[i];
283 break;
284 }
285 }
286
287 return f;
288 }
289
290 static __DRIimage *
291 intel_allocate_image(int dri_format, void *loaderPrivate)
292 {
293 __DRIimage *image;
294
295 image = calloc(1, sizeof *image);
296 if (image == NULL)
297 return NULL;
298
299 image->dri_format = dri_format;
300 image->offset = 0;
301
302 image->format = driImageFormatToGLFormat(dri_format);
303 if (dri_format != __DRI_IMAGE_FORMAT_NONE &&
304 image->format == MESA_FORMAT_NONE) {
305 free(image);
306 return NULL;
307 }
308
309 image->internal_format = _mesa_get_format_base_format(image->format);
310 image->data = loaderPrivate;
311
312 return image;
313 }
314
315 /**
316 * Sets up a DRIImage structure to point to our shared image in a region
317 */
318 static void
319 intel_setup_image_from_mipmap_tree(struct brw_context *brw, __DRIimage *image,
320 struct intel_mipmap_tree *mt, GLuint level,
321 GLuint zoffset)
322 {
323 unsigned int draw_x, draw_y;
324 uint32_t mask_x, mask_y;
325
326 intel_miptree_make_shareable(brw, mt);
327
328 intel_miptree_check_level_layer(mt, level, zoffset);
329
330 intel_region_get_tile_masks(mt->region, &mask_x, &mask_y, false);
331 intel_miptree_get_image_offset(mt, level, zoffset, &draw_x, &draw_y);
332
333 image->width = minify(mt->physical_width0, level - mt->first_level);
334 image->height = minify(mt->physical_height0, level - mt->first_level);
335 image->tile_x = draw_x & mask_x;
336 image->tile_y = draw_y & mask_y;
337
338 image->offset = intel_region_get_aligned_offset(mt->region,
339 draw_x & ~mask_x,
340 draw_y & ~mask_y,
341 false);
342
343 intel_region_reference(&image->region, mt->region);
344 }
345
346 static void
347 intel_setup_image_from_dimensions(__DRIimage *image)
348 {
349 image->width = image->region->width;
350 image->height = image->region->height;
351 image->tile_x = 0;
352 image->tile_y = 0;
353 image->has_depthstencil = false;
354 }
355
356 static __DRIimage *
357 intel_create_image_from_name(__DRIscreen *screen,
358 int width, int height, int format,
359 int name, int pitch, void *loaderPrivate)
360 {
361 struct intel_screen *intelScreen = screen->driverPrivate;
362 __DRIimage *image;
363 int cpp;
364
365 image = intel_allocate_image(format, loaderPrivate);
366 if (image == NULL)
367 return NULL;
368
369 if (image->format == MESA_FORMAT_NONE)
370 cpp = 1;
371 else
372 cpp = _mesa_get_format_bytes(image->format);
373 image->region = intel_region_alloc_for_handle(intelScreen,
374 cpp, width, height,
375 pitch * cpp, name, "image");
376 if (image->region == NULL) {
377 free(image);
378 return NULL;
379 }
380
381 intel_setup_image_from_dimensions(image);
382
383 return image;
384 }
385
386 static __DRIimage *
387 intel_create_image_from_renderbuffer(__DRIcontext *context,
388 int renderbuffer, void *loaderPrivate)
389 {
390 __DRIimage *image;
391 struct brw_context *brw = context->driverPrivate;
392 struct gl_context *ctx = &brw->ctx;
393 struct gl_renderbuffer *rb;
394 struct intel_renderbuffer *irb;
395
396 rb = _mesa_lookup_renderbuffer(ctx, renderbuffer);
397 if (!rb) {
398 _mesa_error(ctx, GL_INVALID_OPERATION, "glRenderbufferExternalMESA");
399 return NULL;
400 }
401
402 irb = intel_renderbuffer(rb);
403 intel_miptree_make_shareable(brw, irb->mt);
404 image = calloc(1, sizeof *image);
405 if (image == NULL)
406 return NULL;
407
408 image->internal_format = rb->InternalFormat;
409 image->format = rb->Format;
410 image->offset = 0;
411 image->data = loaderPrivate;
412 intel_region_reference(&image->region, irb->mt->region);
413 intel_setup_image_from_dimensions(image);
414 image->dri_format = driGLFormatToImageFormat(image->format);
415 image->has_depthstencil = irb->mt->stencil_mt? true : false;
416
417 rb->NeedsFinishRenderTexture = true;
418 return image;
419 }
420
421 static __DRIimage *
422 intel_create_image_from_texture(__DRIcontext *context, int target,
423 unsigned texture, int zoffset,
424 int level,
425 unsigned *error,
426 void *loaderPrivate)
427 {
428 __DRIimage *image;
429 struct brw_context *brw = context->driverPrivate;
430 struct gl_texture_object *obj;
431 struct intel_texture_object *iobj;
432 GLuint face = 0;
433
434 obj = _mesa_lookup_texture(&brw->ctx, texture);
435 if (!obj || obj->Target != target) {
436 *error = __DRI_IMAGE_ERROR_BAD_PARAMETER;
437 return NULL;
438 }
439
440 if (target == GL_TEXTURE_CUBE_MAP)
441 face = zoffset;
442
443 _mesa_test_texobj_completeness(&brw->ctx, obj);
444 iobj = intel_texture_object(obj);
445 if (!obj->_BaseComplete || (level > 0 && !obj->_MipmapComplete)) {
446 *error = __DRI_IMAGE_ERROR_BAD_PARAMETER;
447 return NULL;
448 }
449
450 if (level < obj->BaseLevel || level > obj->_MaxLevel) {
451 *error = __DRI_IMAGE_ERROR_BAD_MATCH;
452 return NULL;
453 }
454
455 if (target == GL_TEXTURE_3D && obj->Image[face][level]->Depth < zoffset) {
456 *error = __DRI_IMAGE_ERROR_BAD_MATCH;
457 return NULL;
458 }
459 image = calloc(1, sizeof *image);
460 if (image == NULL) {
461 *error = __DRI_IMAGE_ERROR_BAD_ALLOC;
462 return NULL;
463 }
464
465 image->internal_format = obj->Image[face][level]->InternalFormat;
466 image->format = obj->Image[face][level]->TexFormat;
467 image->data = loaderPrivate;
468 intel_setup_image_from_mipmap_tree(brw, image, iobj->mt, level, zoffset);
469 image->dri_format = driGLFormatToImageFormat(image->format);
470 image->has_depthstencil = iobj->mt->stencil_mt? true : false;
471 if (image->dri_format == MESA_FORMAT_NONE) {
472 *error = __DRI_IMAGE_ERROR_BAD_PARAMETER;
473 free(image);
474 return NULL;
475 }
476
477 *error = __DRI_IMAGE_ERROR_SUCCESS;
478 return image;
479 }
480
481 static void
482 intel_destroy_image(__DRIimage *image)
483 {
484 intel_region_release(&image->region);
485 free(image);
486 }
487
488 static __DRIimage *
489 intel_create_image(__DRIscreen *screen,
490 int width, int height, int format,
491 unsigned int use,
492 void *loaderPrivate)
493 {
494 __DRIimage *image;
495 struct intel_screen *intelScreen = screen->driverPrivate;
496 uint32_t tiling;
497 int cpp;
498
499 tiling = I915_TILING_X;
500 if (use & __DRI_IMAGE_USE_CURSOR) {
501 if (width != 64 || height != 64)
502 return NULL;
503 tiling = I915_TILING_NONE;
504 }
505
506 if (use & __DRI_IMAGE_USE_LINEAR)
507 tiling = I915_TILING_NONE;
508
509 image = intel_allocate_image(format, loaderPrivate);
510 if (image == NULL)
511 return NULL;
512
513 cpp = _mesa_get_format_bytes(image->format);
514 image->region =
515 intel_region_alloc(intelScreen, tiling, cpp, width, height, true);
516 if (image->region == NULL) {
517 free(image);
518 return NULL;
519 }
520
521 intel_setup_image_from_dimensions(image);
522
523 return image;
524 }
525
526 static GLboolean
527 intel_query_image(__DRIimage *image, int attrib, int *value)
528 {
529 switch (attrib) {
530 case __DRI_IMAGE_ATTRIB_STRIDE:
531 *value = image->region->pitch;
532 return true;
533 case __DRI_IMAGE_ATTRIB_HANDLE:
534 *value = image->region->bo->handle;
535 return true;
536 case __DRI_IMAGE_ATTRIB_NAME:
537 return intel_region_flink(image->region, (uint32_t *) value);
538 case __DRI_IMAGE_ATTRIB_FORMAT:
539 *value = image->dri_format;
540 return true;
541 case __DRI_IMAGE_ATTRIB_WIDTH:
542 *value = image->region->width;
543 return true;
544 case __DRI_IMAGE_ATTRIB_HEIGHT:
545 *value = image->region->height;
546 return true;
547 case __DRI_IMAGE_ATTRIB_COMPONENTS:
548 if (image->planar_format == NULL)
549 return false;
550 *value = image->planar_format->components;
551 return true;
552 case __DRI_IMAGE_ATTRIB_FD:
553 if (drm_intel_bo_gem_export_to_prime(image->region->bo, value) == 0)
554 return true;
555 return false;
556 default:
557 return false;
558 }
559 }
560
561 static __DRIimage *
562 intel_dup_image(__DRIimage *orig_image, void *loaderPrivate)
563 {
564 __DRIimage *image;
565
566 image = calloc(1, sizeof *image);
567 if (image == NULL)
568 return NULL;
569
570 intel_region_reference(&image->region, orig_image->region);
571 if (image->region == NULL) {
572 free(image);
573 return NULL;
574 }
575
576 image->internal_format = orig_image->internal_format;
577 image->planar_format = orig_image->planar_format;
578 image->dri_format = orig_image->dri_format;
579 image->format = orig_image->format;
580 image->offset = orig_image->offset;
581 image->width = orig_image->width;
582 image->height = orig_image->height;
583 image->tile_x = orig_image->tile_x;
584 image->tile_y = orig_image->tile_y;
585 image->has_depthstencil = orig_image->has_depthstencil;
586 image->data = loaderPrivate;
587
588 memcpy(image->strides, orig_image->strides, sizeof(image->strides));
589 memcpy(image->offsets, orig_image->offsets, sizeof(image->offsets));
590
591 return image;
592 }
593
594 static GLboolean
595 intel_validate_usage(__DRIimage *image, unsigned int use)
596 {
597 if (use & __DRI_IMAGE_USE_CURSOR) {
598 if (image->region->width != 64 || image->region->height != 64)
599 return GL_FALSE;
600 }
601
602 return GL_TRUE;
603 }
604
605 static __DRIimage *
606 intel_create_image_from_names(__DRIscreen *screen,
607 int width, int height, int fourcc,
608 int *names, int num_names,
609 int *strides, int *offsets,
610 void *loaderPrivate)
611 {
612 struct intel_image_format *f = NULL;
613 __DRIimage *image;
614 int i, index;
615
616 if (screen == NULL || names == NULL || num_names != 1)
617 return NULL;
618
619 f = intel_image_format_lookup(fourcc);
620 if (f == NULL)
621 return NULL;
622
623 image = intel_create_image_from_name(screen, width, height,
624 __DRI_IMAGE_FORMAT_NONE,
625 names[0], strides[0],
626 loaderPrivate);
627
628 if (image == NULL)
629 return NULL;
630
631 image->planar_format = f;
632 for (i = 0; i < f->nplanes; i++) {
633 index = f->planes[i].buffer_index;
634 image->offsets[index] = offsets[index];
635 image->strides[index] = strides[index];
636 }
637
638 return image;
639 }
640
641 static __DRIimage *
642 intel_create_image_from_fds(__DRIscreen *screen,
643 int width, int height, int fourcc,
644 int *fds, int num_fds, int *strides, int *offsets,
645 void *loaderPrivate)
646 {
647 struct intel_screen *intelScreen = screen->driverPrivate;
648 struct intel_image_format *f;
649 uint32_t mask_x, mask_y;
650 __DRIimage *image;
651 int i, index;
652
653 if (fds == NULL || num_fds != 1)
654 return NULL;
655
656 f = intel_image_format_lookup(fourcc);
657 if (f == NULL)
658 return NULL;
659
660 if (f->nplanes == 1)
661 image = intel_allocate_image(f->planes[0].dri_format, loaderPrivate);
662 else
663 image = intel_allocate_image(__DRI_IMAGE_FORMAT_NONE, loaderPrivate);
664
665 if (image == NULL)
666 return NULL;
667
668 image->region = intel_region_alloc_for_fd(intelScreen,
669 f->planes[0].cpp, width, height, strides[0],
670 height * strides[0], fds[0], "image");
671 if (image->region == NULL) {
672 free(image);
673 return NULL;
674 }
675
676 image->planar_format = f;
677 for (i = 0; i < f->nplanes; i++) {
678 index = f->planes[i].buffer_index;
679 image->offsets[index] = offsets[index];
680 image->strides[index] = strides[index];
681 }
682
683 if (f->nplanes == 1) {
684 image->offset = image->offsets[0];
685 intel_region_get_tile_masks(image->region, &mask_x, &mask_y, false);
686 if (image->offset & mask_x)
687 _mesa_warning(NULL,
688 "intel_create_image_from_fds: offset not on tile boundary");
689 }
690
691 intel_setup_image_from_dimensions(image);
692
693 return image;
694 }
695
696 static __DRIimage *
697 intel_create_image_from_dma_bufs(__DRIscreen *screen,
698 int width, int height, int fourcc,
699 int *fds, int num_fds,
700 int *strides, int *offsets,
701 enum __DRIYUVColorSpace yuv_color_space,
702 enum __DRISampleRange sample_range,
703 enum __DRIChromaSiting horizontal_siting,
704 enum __DRIChromaSiting vertical_siting,
705 unsigned *error,
706 void *loaderPrivate)
707 {
708 __DRIimage *image;
709 struct intel_image_format *f = intel_image_format_lookup(fourcc);
710
711 /* For now only packed formats that have native sampling are supported. */
712 if (!f || f->nplanes != 1) {
713 *error = __DRI_IMAGE_ERROR_BAD_MATCH;
714 return NULL;
715 }
716
717 image = intel_create_image_from_fds(screen, width, height, fourcc, fds,
718 num_fds, strides, offsets,
719 loaderPrivate);
720
721 /*
722 * Invalid parameters and any inconsistencies between are assumed to be
723 * checked by the caller. Therefore besides unsupported formats one can fail
724 * only in allocation.
725 */
726 if (!image) {
727 *error = __DRI_IMAGE_ERROR_BAD_ALLOC;
728 return NULL;
729 }
730
731 image->dma_buf_imported = true;
732 image->yuv_color_space = yuv_color_space;
733 image->sample_range = sample_range;
734 image->horizontal_siting = horizontal_siting;
735 image->vertical_siting = vertical_siting;
736
737 *error = __DRI_IMAGE_ERROR_SUCCESS;
738 return image;
739 }
740
741 static __DRIimage *
742 intel_from_planar(__DRIimage *parent, int plane, void *loaderPrivate)
743 {
744 int width, height, offset, stride, dri_format, index;
745 struct intel_image_format *f;
746 uint32_t mask_x, mask_y;
747 __DRIimage *image;
748
749 if (parent == NULL || parent->planar_format == NULL)
750 return NULL;
751
752 f = parent->planar_format;
753
754 if (plane >= f->nplanes)
755 return NULL;
756
757 width = parent->region->width >> f->planes[plane].width_shift;
758 height = parent->region->height >> f->planes[plane].height_shift;
759 dri_format = f->planes[plane].dri_format;
760 index = f->planes[plane].buffer_index;
761 offset = parent->offsets[index];
762 stride = parent->strides[index];
763
764 image = intel_allocate_image(dri_format, loaderPrivate);
765 if (image == NULL)
766 return NULL;
767
768 if (offset + height * stride > parent->region->bo->size) {
769 _mesa_warning(NULL, "intel_create_sub_image: subimage out of bounds");
770 free(image);
771 return NULL;
772 }
773
774 image->region = calloc(sizeof(*image->region), 1);
775 if (image->region == NULL) {
776 free(image);
777 return NULL;
778 }
779
780 image->region->cpp = _mesa_get_format_bytes(image->format);
781 image->region->width = width;
782 image->region->height = height;
783 image->region->pitch = stride;
784 image->region->refcount = 1;
785 image->region->bo = parent->region->bo;
786 drm_intel_bo_reference(image->region->bo);
787 image->region->tiling = parent->region->tiling;
788 image->offset = offset;
789 intel_setup_image_from_dimensions(image);
790
791 intel_region_get_tile_masks(image->region, &mask_x, &mask_y, false);
792 if (offset & mask_x)
793 _mesa_warning(NULL,
794 "intel_create_sub_image: offset not on tile boundary");
795
796 return image;
797 }
798
799 static const __DRIimageExtension intelImageExtension = {
800 .base = { __DRI_IMAGE, 8 },
801
802 .createImageFromName = intel_create_image_from_name,
803 .createImageFromRenderbuffer = intel_create_image_from_renderbuffer,
804 .destroyImage = intel_destroy_image,
805 .createImage = intel_create_image,
806 .queryImage = intel_query_image,
807 .dupImage = intel_dup_image,
808 .validateUsage = intel_validate_usage,
809 .createImageFromNames = intel_create_image_from_names,
810 .fromPlanar = intel_from_planar,
811 .createImageFromTexture = intel_create_image_from_texture,
812 .createImageFromFds = intel_create_image_from_fds,
813 .createImageFromDmaBufs = intel_create_image_from_dma_bufs
814 };
815
816 static int
817 brw_query_renderer_integer(__DRIscreen *psp, int param, unsigned int *value)
818 {
819 const struct intel_screen *const intelScreen =
820 (struct intel_screen *) psp->driverPrivate;
821
822 switch (param) {
823 case __DRI2_RENDERER_VENDOR_ID:
824 value[0] = 0x8086;
825 return 0;
826 case __DRI2_RENDERER_DEVICE_ID:
827 value[0] = intelScreen->deviceID;
828 return 0;
829 case __DRI2_RENDERER_ACCELERATED:
830 value[0] = 1;
831 return 0;
832 case __DRI2_RENDERER_VIDEO_MEMORY: {
833 /* Once a batch uses more than 75% of the maximum mappable size, we
834 * assume that there's some fragmentation, and we start doing extra
835 * flushing, etc. That's the big cliff apps will care about.
836 */
837 size_t aper_size;
838 size_t mappable_size;
839
840 drm_intel_get_aperture_sizes(psp->fd, &mappable_size, &aper_size);
841
842 const unsigned gpu_mappable_megabytes =
843 (aper_size / (1024 * 1024)) * 3 / 4;
844
845 const long system_memory_pages = sysconf(_SC_PHYS_PAGES);
846 const long system_page_size = sysconf(_SC_PAGE_SIZE);
847
848 if (system_memory_pages <= 0 || system_page_size <= 0)
849 return -1;
850
851 const uint64_t system_memory_bytes = (uint64_t) system_memory_pages
852 * (uint64_t) system_page_size;
853
854 const unsigned system_memory_megabytes =
855 (unsigned) (system_memory_bytes / (1024 * 1024));
856
857 value[0] = MIN2(system_memory_megabytes, gpu_mappable_megabytes);
858 return 0;
859 }
860 case __DRI2_RENDERER_UNIFIED_MEMORY_ARCHITECTURE:
861 value[0] = 1;
862 return 0;
863 case __DRI2_RENDERER_PREFERRED_PROFILE:
864 value[0] = (psp->max_gl_core_version != 0)
865 ? (1U << __DRI_API_OPENGL_CORE) : (1U << __DRI_API_OPENGL);
866 return 0;
867 default:
868 return driQueryRendererIntegerCommon(psp, param, value);
869 }
870
871 return -1;
872 }
873
874 static int
875 brw_query_renderer_string(__DRIscreen *psp, int param, const char **value)
876 {
877 const struct intel_screen *intelScreen =
878 (struct intel_screen *) psp->driverPrivate;
879
880 switch (param) {
881 case __DRI2_RENDERER_VENDOR_ID:
882 value[0] = brw_vendor_string;
883 return 0;
884 case __DRI2_RENDERER_DEVICE_ID:
885 value[0] = brw_get_renderer_string(intelScreen->deviceID);
886 return 0;
887 default:
888 break;
889 }
890
891 return -1;
892 }
893
894 static const __DRI2rendererQueryExtension intelRendererQueryExtension = {
895 .base = { __DRI2_RENDERER_QUERY, 1 },
896
897 .queryInteger = brw_query_renderer_integer,
898 .queryString = brw_query_renderer_string
899 };
900
901 static const __DRIrobustnessExtension dri2Robustness = {
902 .base = { __DRI2_ROBUSTNESS, 1 }
903 };
904
905 static const __DRIextension *intelScreenExtensions[] = {
906 &intelTexBufferExtension.base,
907 &intelFlushExtension.base,
908 &intelImageExtension.base,
909 &intelRendererQueryExtension.base,
910 &dri2ConfigQueryExtension.base,
911 NULL
912 };
913
914 static const __DRIextension *intelRobustScreenExtensions[] = {
915 &intelTexBufferExtension.base,
916 &intelFlushExtension.base,
917 &intelImageExtension.base,
918 &intelRendererQueryExtension.base,
919 &dri2ConfigQueryExtension.base,
920 &dri2Robustness.base,
921 NULL
922 };
923
924 static bool
925 intel_get_param(__DRIscreen *psp, int param, int *value)
926 {
927 int ret;
928 struct drm_i915_getparam gp;
929
930 memset(&gp, 0, sizeof(gp));
931 gp.param = param;
932 gp.value = value;
933
934 ret = drmCommandWriteRead(psp->fd, DRM_I915_GETPARAM, &gp, sizeof(gp));
935 if (ret) {
936 if (ret != -EINVAL)
937 _mesa_warning(NULL, "drm_i915_getparam: %d", ret);
938 return false;
939 }
940
941 return true;
942 }
943
944 static bool
945 intel_get_boolean(__DRIscreen *psp, int param)
946 {
947 int value = 0;
948 return intel_get_param(psp, param, &value) && value;
949 }
950
951 static void
952 intelDestroyScreen(__DRIscreen * sPriv)
953 {
954 struct intel_screen *intelScreen = sPriv->driverPrivate;
955
956 dri_bufmgr_destroy(intelScreen->bufmgr);
957 driDestroyOptionInfo(&intelScreen->optionCache);
958
959 ralloc_free(intelScreen);
960 sPriv->driverPrivate = NULL;
961 }
962
963
964 /**
965 * This is called when we need to set up GL rendering to a new X window.
966 */
967 static GLboolean
968 intelCreateBuffer(__DRIscreen * driScrnPriv,
969 __DRIdrawable * driDrawPriv,
970 const struct gl_config * mesaVis, GLboolean isPixmap)
971 {
972 struct intel_renderbuffer *rb;
973 struct intel_screen *screen = (struct intel_screen*) driScrnPriv->driverPrivate;
974 mesa_format rgbFormat;
975 unsigned num_samples = intel_quantize_num_samples(screen, mesaVis->samples);
976 struct gl_framebuffer *fb;
977
978 if (isPixmap)
979 return false;
980
981 fb = CALLOC_STRUCT(gl_framebuffer);
982 if (!fb)
983 return false;
984
985 _mesa_initialize_window_framebuffer(fb, mesaVis);
986
987 if (screen->winsys_msaa_samples_override != -1) {
988 num_samples = screen->winsys_msaa_samples_override;
989 fb->Visual.samples = num_samples;
990 }
991
992 if (mesaVis->redBits == 5)
993 rgbFormat = MESA_FORMAT_B5G6R5_UNORM;
994 else if (mesaVis->sRGBCapable)
995 rgbFormat = MESA_FORMAT_B8G8R8A8_SRGB;
996 else if (mesaVis->alphaBits == 0)
997 rgbFormat = MESA_FORMAT_B8G8R8X8_UNORM;
998 else {
999 rgbFormat = MESA_FORMAT_B8G8R8A8_SRGB;
1000 fb->Visual.sRGBCapable = true;
1001 }
1002
1003 /* setup the hardware-based renderbuffers */
1004 rb = intel_create_renderbuffer(rgbFormat, num_samples);
1005 _mesa_add_renderbuffer(fb, BUFFER_FRONT_LEFT, &rb->Base.Base);
1006
1007 if (mesaVis->doubleBufferMode) {
1008 rb = intel_create_renderbuffer(rgbFormat, num_samples);
1009 _mesa_add_renderbuffer(fb, BUFFER_BACK_LEFT, &rb->Base.Base);
1010 }
1011
1012 /*
1013 * Assert here that the gl_config has an expected depth/stencil bit
1014 * combination: one of d24/s8, d16/s0, d0/s0. (See intelInitScreen2(),
1015 * which constructs the advertised configs.)
1016 */
1017 if (mesaVis->depthBits == 24) {
1018 assert(mesaVis->stencilBits == 8);
1019
1020 if (screen->devinfo->has_hiz_and_separate_stencil) {
1021 rb = intel_create_private_renderbuffer(MESA_FORMAT_Z24_UNORM_X8_UINT,
1022 num_samples);
1023 _mesa_add_renderbuffer(fb, BUFFER_DEPTH, &rb->Base.Base);
1024 rb = intel_create_private_renderbuffer(MESA_FORMAT_S_UINT8,
1025 num_samples);
1026 _mesa_add_renderbuffer(fb, BUFFER_STENCIL, &rb->Base.Base);
1027 } else {
1028 /*
1029 * Use combined depth/stencil. Note that the renderbuffer is
1030 * attached to two attachment points.
1031 */
1032 rb = intel_create_private_renderbuffer(MESA_FORMAT_Z24_UNORM_S8_UINT,
1033 num_samples);
1034 _mesa_add_renderbuffer(fb, BUFFER_DEPTH, &rb->Base.Base);
1035 _mesa_add_renderbuffer(fb, BUFFER_STENCIL, &rb->Base.Base);
1036 }
1037 }
1038 else if (mesaVis->depthBits == 16) {
1039 assert(mesaVis->stencilBits == 0);
1040 rb = intel_create_private_renderbuffer(MESA_FORMAT_Z_UNORM16,
1041 num_samples);
1042 _mesa_add_renderbuffer(fb, BUFFER_DEPTH, &rb->Base.Base);
1043 }
1044 else {
1045 assert(mesaVis->depthBits == 0);
1046 assert(mesaVis->stencilBits == 0);
1047 }
1048
1049 /* now add any/all software-based renderbuffers we may need */
1050 _swrast_add_soft_renderbuffers(fb,
1051 false, /* never sw color */
1052 false, /* never sw depth */
1053 false, /* never sw stencil */
1054 mesaVis->accumRedBits > 0,
1055 false, /* never sw alpha */
1056 false /* never sw aux */ );
1057 driDrawPriv->driverPrivate = fb;
1058
1059 return true;
1060 }
1061
1062 static void
1063 intelDestroyBuffer(__DRIdrawable * driDrawPriv)
1064 {
1065 struct gl_framebuffer *fb = driDrawPriv->driverPrivate;
1066
1067 _mesa_reference_framebuffer(&fb, NULL);
1068 }
1069
1070 static bool
1071 intel_init_bufmgr(struct intel_screen *intelScreen)
1072 {
1073 __DRIscreen *spriv = intelScreen->driScrnPriv;
1074
1075 intelScreen->no_hw = getenv("INTEL_NO_HW") != NULL;
1076
1077 intelScreen->bufmgr = intel_bufmgr_gem_init(spriv->fd, BATCH_SZ);
1078 if (intelScreen->bufmgr == NULL) {
1079 fprintf(stderr, "[%s:%u] Error initializing buffer manager.\n",
1080 __func__, __LINE__);
1081 return false;
1082 }
1083
1084 drm_intel_bufmgr_gem_enable_fenced_relocs(intelScreen->bufmgr);
1085
1086 if (!intel_get_boolean(spriv, I915_PARAM_HAS_RELAXED_DELTA)) {
1087 fprintf(stderr, "[%s: %u] Kernel 2.6.39 required.\n", __func__, __LINE__);
1088 return false;
1089 }
1090
1091 return true;
1092 }
1093
1094 static bool
1095 intel_detect_swizzling(struct intel_screen *screen)
1096 {
1097 drm_intel_bo *buffer;
1098 unsigned long flags = 0;
1099 unsigned long aligned_pitch;
1100 uint32_t tiling = I915_TILING_X;
1101 uint32_t swizzle_mode = 0;
1102
1103 buffer = drm_intel_bo_alloc_tiled(screen->bufmgr, "swizzle test",
1104 64, 64, 4,
1105 &tiling, &aligned_pitch, flags);
1106 if (buffer == NULL)
1107 return false;
1108
1109 drm_intel_bo_get_tiling(buffer, &tiling, &swizzle_mode);
1110 drm_intel_bo_unreference(buffer);
1111
1112 if (swizzle_mode == I915_BIT_6_SWIZZLE_NONE)
1113 return false;
1114 else
1115 return true;
1116 }
1117
1118 /**
1119 * Return array of MSAA modes supported by the hardware. The array is
1120 * zero-terminated and sorted in decreasing order.
1121 */
1122 const int*
1123 intel_supported_msaa_modes(const struct intel_screen *screen)
1124 {
1125 static const int gen8_modes[] = {8, 4, 2, 0, -1};
1126 static const int gen7_modes[] = {8, 4, 0, -1};
1127 static const int gen6_modes[] = {4, 0, -1};
1128 static const int gen4_modes[] = {0, -1};
1129
1130 if (screen->devinfo->gen >= 8) {
1131 return gen8_modes;
1132 } else if (screen->devinfo->gen >= 7) {
1133 return gen7_modes;
1134 } else if (screen->devinfo->gen == 6) {
1135 return gen6_modes;
1136 } else {
1137 return gen4_modes;
1138 }
1139 }
1140
1141 static __DRIconfig**
1142 intel_screen_make_configs(__DRIscreen *dri_screen)
1143 {
1144 static const mesa_format formats[] = {
1145 MESA_FORMAT_B5G6R5_UNORM,
1146 MESA_FORMAT_B8G8R8A8_UNORM
1147 };
1148
1149 /* GLX_SWAP_COPY_OML is not supported due to page flipping. */
1150 static const GLenum back_buffer_modes[] = {
1151 GLX_SWAP_UNDEFINED_OML, GLX_NONE,
1152 };
1153
1154 static const uint8_t singlesample_samples[1] = {0};
1155 static const uint8_t multisample_samples[2] = {4, 8};
1156
1157 struct intel_screen *screen = dri_screen->driverPrivate;
1158 const struct brw_device_info *devinfo = screen->devinfo;
1159 uint8_t depth_bits[4], stencil_bits[4];
1160 __DRIconfig **configs = NULL;
1161
1162 /* Generate singlesample configs without accumulation buffer. */
1163 for (int i = 0; i < ARRAY_SIZE(formats); i++) {
1164 __DRIconfig **new_configs;
1165 int num_depth_stencil_bits = 2;
1166
1167 /* Starting with DRI2 protocol version 1.1 we can request a depth/stencil
1168 * buffer that has a different number of bits per pixel than the color
1169 * buffer, gen >= 6 supports this.
1170 */
1171 depth_bits[0] = 0;
1172 stencil_bits[0] = 0;
1173
1174 if (formats[i] == MESA_FORMAT_B5G6R5_UNORM) {
1175 depth_bits[1] = 16;
1176 stencil_bits[1] = 0;
1177 if (devinfo->gen >= 6) {
1178 depth_bits[2] = 24;
1179 stencil_bits[2] = 8;
1180 num_depth_stencil_bits = 3;
1181 }
1182 } else {
1183 depth_bits[1] = 24;
1184 stencil_bits[1] = 8;
1185 }
1186
1187 new_configs = driCreateConfigs(formats[i],
1188 depth_bits,
1189 stencil_bits,
1190 num_depth_stencil_bits,
1191 back_buffer_modes, 2,
1192 singlesample_samples, 1,
1193 false);
1194 configs = driConcatConfigs(configs, new_configs);
1195 }
1196
1197 /* Generate the minimum possible set of configs that include an
1198 * accumulation buffer.
1199 */
1200 for (int i = 0; i < ARRAY_SIZE(formats); i++) {
1201 __DRIconfig **new_configs;
1202
1203 if (formats[i] == MESA_FORMAT_B5G6R5_UNORM) {
1204 depth_bits[0] = 16;
1205 stencil_bits[0] = 0;
1206 } else {
1207 depth_bits[0] = 24;
1208 stencil_bits[0] = 8;
1209 }
1210
1211 new_configs = driCreateConfigs(formats[i],
1212 depth_bits, stencil_bits, 1,
1213 back_buffer_modes, 1,
1214 singlesample_samples, 1,
1215 true);
1216 configs = driConcatConfigs(configs, new_configs);
1217 }
1218
1219 /* Generate multisample configs.
1220 *
1221 * This loop breaks early, and hence is a no-op, on gen < 6.
1222 *
1223 * Multisample configs must follow the singlesample configs in order to
1224 * work around an X server bug present in 1.12. The X server chooses to
1225 * associate the first listed RGBA888-Z24S8 config, regardless of its
1226 * sample count, with the 32-bit depth visual used for compositing.
1227 *
1228 * Only doublebuffer configs with GLX_SWAP_UNDEFINED_OML behavior are
1229 * supported. Singlebuffer configs are not supported because no one wants
1230 * them.
1231 */
1232 for (int i = 0; i < ARRAY_SIZE(formats); i++) {
1233 if (devinfo->gen < 6)
1234 break;
1235
1236 __DRIconfig **new_configs;
1237 const int num_depth_stencil_bits = 2;
1238 int num_msaa_modes = 0;
1239
1240 depth_bits[0] = 0;
1241 stencil_bits[0] = 0;
1242
1243 if (formats[i] == MESA_FORMAT_B5G6R5_UNORM) {
1244 depth_bits[1] = 16;
1245 stencil_bits[1] = 0;
1246 } else {
1247 depth_bits[1] = 24;
1248 stencil_bits[1] = 8;
1249 }
1250
1251 if (devinfo->gen >= 7)
1252 num_msaa_modes = 2;
1253 else if (devinfo->gen == 6)
1254 num_msaa_modes = 1;
1255
1256 new_configs = driCreateConfigs(formats[i],
1257 depth_bits,
1258 stencil_bits,
1259 num_depth_stencil_bits,
1260 back_buffer_modes, 1,
1261 multisample_samples,
1262 num_msaa_modes,
1263 false);
1264 configs = driConcatConfigs(configs, new_configs);
1265 }
1266
1267 if (configs == NULL) {
1268 fprintf(stderr, "[%s:%u] Error creating FBConfig!\n", __func__,
1269 __LINE__);
1270 return NULL;
1271 }
1272
1273 return configs;
1274 }
1275
1276 static void
1277 set_max_gl_versions(struct intel_screen *screen)
1278 {
1279 __DRIscreen *psp = screen->driScrnPriv;
1280
1281 switch (screen->devinfo->gen) {
1282 case 8:
1283 case 7:
1284 psp->max_gl_core_version = 33;
1285 psp->max_gl_compat_version = 30;
1286 psp->max_gl_es1_version = 11;
1287 psp->max_gl_es2_version = 30;
1288 break;
1289 case 6:
1290 psp->max_gl_core_version = 31;
1291 psp->max_gl_compat_version = 30;
1292 psp->max_gl_es1_version = 11;
1293 psp->max_gl_es2_version = 30;
1294 break;
1295 case 5:
1296 case 4:
1297 psp->max_gl_core_version = 0;
1298 psp->max_gl_compat_version = 21;
1299 psp->max_gl_es1_version = 11;
1300 psp->max_gl_es2_version = 20;
1301 break;
1302 default:
1303 assert(!"unrecognized intel_screen::gen");
1304 break;
1305 }
1306 }
1307
1308 /**
1309 * This is the driver specific part of the createNewScreen entry point.
1310 * Called when using DRI2.
1311 *
1312 * \return the struct gl_config supported by this driver
1313 */
1314 static const
1315 __DRIconfig **intelInitScreen2(__DRIscreen *psp)
1316 {
1317 struct intel_screen *intelScreen;
1318
1319 if (psp->image.loader) {
1320 } else if (psp->dri2.loader->base.version <= 2 ||
1321 psp->dri2.loader->getBuffersWithFormat == NULL) {
1322 fprintf(stderr,
1323 "\nERROR! DRI2 loader with getBuffersWithFormat() "
1324 "support required\n");
1325 return false;
1326 }
1327
1328 /* Allocate the private area */
1329 intelScreen = rzalloc(NULL, struct intel_screen);
1330 if (!intelScreen) {
1331 fprintf(stderr, "\nERROR! Allocating private area failed\n");
1332 return false;
1333 }
1334 /* parse information in __driConfigOptions */
1335 driParseOptionInfo(&intelScreen->optionCache, brw_config_options.xml);
1336
1337 intelScreen->driScrnPriv = psp;
1338 psp->driverPrivate = (void *) intelScreen;
1339
1340 if (!intel_init_bufmgr(intelScreen))
1341 return false;
1342
1343 intelScreen->deviceID = drm_intel_bufmgr_gem_get_devid(intelScreen->bufmgr);
1344 intelScreen->devinfo = brw_get_device_info(intelScreen->deviceID);
1345 if (!intelScreen->devinfo)
1346 return false;
1347
1348 intelScreen->hw_must_use_separate_stencil = intelScreen->devinfo->gen >= 7;
1349
1350 intelScreen->hw_has_swizzling = intel_detect_swizzling(intelScreen);
1351
1352 const char *force_msaa = getenv("INTEL_FORCE_MSAA");
1353 if (force_msaa) {
1354 intelScreen->winsys_msaa_samples_override =
1355 intel_quantize_num_samples(intelScreen, atoi(force_msaa));
1356 printf("Forcing winsys sample count to %d\n",
1357 intelScreen->winsys_msaa_samples_override);
1358 } else {
1359 intelScreen->winsys_msaa_samples_override = -1;
1360 }
1361
1362 set_max_gl_versions(intelScreen);
1363
1364 /* Notification of GPU resets requires hardware contexts and a kernel new
1365 * enough to support DRM_IOCTL_I915_GET_RESET_STATS. If the ioctl is
1366 * supported, calling it with a context of 0 will either generate EPERM or
1367 * no error. If the ioctl is not supported, it always generate EINVAL.
1368 * Use this to determine whether to advertise the __DRI2_ROBUSTNESS
1369 * extension to the loader.
1370 *
1371 * Don't even try on pre-Gen6, since we don't attempt to use contexts there.
1372 */
1373 if (intelScreen->devinfo->gen >= 6) {
1374 struct drm_i915_reset_stats stats;
1375 memset(&stats, 0, sizeof(stats));
1376
1377 const int ret = drmIoctl(psp->fd, DRM_IOCTL_I915_GET_RESET_STATS, &stats);
1378
1379 intelScreen->has_context_reset_notification =
1380 (ret != -1 || errno != EINVAL);
1381 }
1382
1383 psp->extensions = !intelScreen->has_context_reset_notification
1384 ? intelScreenExtensions : intelRobustScreenExtensions;
1385
1386 brw_fs_alloc_reg_sets(intelScreen);
1387 brw_vec4_alloc_reg_set(intelScreen);
1388
1389 return (const __DRIconfig**) intel_screen_make_configs(psp);
1390 }
1391
1392 struct intel_buffer {
1393 __DRIbuffer base;
1394 struct intel_region *region;
1395 };
1396
1397 static __DRIbuffer *
1398 intelAllocateBuffer(__DRIscreen *screen,
1399 unsigned attachment, unsigned format,
1400 int width, int height)
1401 {
1402 struct intel_buffer *intelBuffer;
1403 struct intel_screen *intelScreen = screen->driverPrivate;
1404
1405 assert(attachment == __DRI_BUFFER_FRONT_LEFT ||
1406 attachment == __DRI_BUFFER_BACK_LEFT);
1407
1408 intelBuffer = calloc(1, sizeof *intelBuffer);
1409 if (intelBuffer == NULL)
1410 return NULL;
1411
1412 /* The front and back buffers are color buffers, which are X tiled. */
1413 intelBuffer->region = intel_region_alloc(intelScreen,
1414 I915_TILING_X,
1415 format / 8,
1416 width,
1417 height,
1418 true);
1419
1420 if (intelBuffer->region == NULL) {
1421 free(intelBuffer);
1422 return NULL;
1423 }
1424
1425 intel_region_flink(intelBuffer->region, &intelBuffer->base.name);
1426
1427 intelBuffer->base.attachment = attachment;
1428 intelBuffer->base.cpp = intelBuffer->region->cpp;
1429 intelBuffer->base.pitch = intelBuffer->region->pitch;
1430
1431 return &intelBuffer->base;
1432 }
1433
1434 static void
1435 intelReleaseBuffer(__DRIscreen *screen, __DRIbuffer *buffer)
1436 {
1437 struct intel_buffer *intelBuffer = (struct intel_buffer *) buffer;
1438
1439 intel_region_release(&intelBuffer->region);
1440 free(intelBuffer);
1441 }
1442
1443 static const struct __DriverAPIRec brw_driver_api = {
1444 .InitScreen = intelInitScreen2,
1445 .DestroyScreen = intelDestroyScreen,
1446 .CreateContext = brwCreateContext,
1447 .DestroyContext = intelDestroyContext,
1448 .CreateBuffer = intelCreateBuffer,
1449 .DestroyBuffer = intelDestroyBuffer,
1450 .MakeCurrent = intelMakeCurrent,
1451 .UnbindContext = intelUnbindContext,
1452 .AllocateBuffer = intelAllocateBuffer,
1453 .ReleaseBuffer = intelReleaseBuffer
1454 };
1455
1456 static const struct __DRIDriverVtableExtensionRec brw_vtable = {
1457 .base = { __DRI_DRIVER_VTABLE, 1 },
1458 .vtable = &brw_driver_api,
1459 };
1460
1461 static const __DRIextension *brw_driver_extensions[] = {
1462 &driCoreExtension.base,
1463 &driImageDriverExtension.base,
1464 &driDRI2Extension.base,
1465 &brw_vtable.base,
1466 &brw_config_options.base,
1467 NULL
1468 };
1469
1470 PUBLIC const __DRIextension **__driDriverGetExtensions_i965(void)
1471 {
1472 globalDriverAPI = &brw_driver_api;
1473
1474 return brw_driver_extensions;
1475 }