i965: Use updated kernel interface for accurate TIMESTAMP reads
[mesa.git] / src / mesa / drivers / dri / i965 / intel_screen.c
1 /**************************************************************************
2 *
3 * Copyright 2003 VMware, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 #include <errno.h>
29 #include <time.h>
30 #include <unistd.h>
31 #include "main/glheader.h"
32 #include "main/context.h"
33 #include "main/framebuffer.h"
34 #include "main/renderbuffer.h"
35 #include "main/texobj.h"
36 #include "main/hash.h"
37 #include "main/fbobject.h"
38 #include "main/version.h"
39 #include "swrast/s_renderbuffer.h"
40 #include "util/ralloc.h"
41 #include "brw_shader.h"
42 #include "glsl/nir/nir.h"
43
44 #include "utils.h"
45 #include "xmlpool.h"
46
47 static const __DRIconfigOptionsExtension brw_config_options = {
48 .base = { __DRI_CONFIG_OPTIONS, 1 },
49 .xml =
50 DRI_CONF_BEGIN
51 DRI_CONF_SECTION_PERFORMANCE
52 DRI_CONF_VBLANK_MODE(DRI_CONF_VBLANK_ALWAYS_SYNC)
53 /* Options correspond to DRI_CONF_BO_REUSE_DISABLED,
54 * DRI_CONF_BO_REUSE_ALL
55 */
56 DRI_CONF_OPT_BEGIN_V(bo_reuse, enum, 1, "0:1")
57 DRI_CONF_DESC_BEGIN(en, "Buffer object reuse")
58 DRI_CONF_ENUM(0, "Disable buffer object reuse")
59 DRI_CONF_ENUM(1, "Enable reuse of all sizes of buffer objects")
60 DRI_CONF_DESC_END
61 DRI_CONF_OPT_END
62
63 DRI_CONF_OPT_BEGIN_B(hiz, "true")
64 DRI_CONF_DESC(en, "Enable Hierarchical Z on gen6+")
65 DRI_CONF_OPT_END
66 DRI_CONF_SECTION_END
67
68 DRI_CONF_SECTION_QUALITY
69 DRI_CONF_FORCE_S3TC_ENABLE("false")
70
71 DRI_CONF_OPT_BEGIN(clamp_max_samples, int, -1)
72 DRI_CONF_DESC(en, "Clamp the value of GL_MAX_SAMPLES to the "
73 "given integer. If negative, then do not clamp.")
74 DRI_CONF_OPT_END
75 DRI_CONF_SECTION_END
76
77 DRI_CONF_SECTION_DEBUG
78 DRI_CONF_NO_RAST("false")
79 DRI_CONF_ALWAYS_FLUSH_BATCH("false")
80 DRI_CONF_ALWAYS_FLUSH_CACHE("false")
81 DRI_CONF_DISABLE_THROTTLING("false")
82 DRI_CONF_FORCE_GLSL_EXTENSIONS_WARN("false")
83 DRI_CONF_DISABLE_GLSL_LINE_CONTINUATIONS("false")
84 DRI_CONF_DISABLE_BLEND_FUNC_EXTENDED("false")
85 DRI_CONF_ALLOW_GLSL_EXTENSION_DIRECTIVE_MIDSHADER("false")
86
87 DRI_CONF_OPT_BEGIN_B(shader_precompile, "true")
88 DRI_CONF_DESC(en, "Perform code generation at shader link time.")
89 DRI_CONF_OPT_END
90 DRI_CONF_SECTION_END
91 DRI_CONF_END
92 };
93
94 #include "intel_batchbuffer.h"
95 #include "intel_buffers.h"
96 #include "intel_bufmgr.h"
97 #include "intel_fbo.h"
98 #include "intel_mipmap_tree.h"
99 #include "intel_screen.h"
100 #include "intel_tex.h"
101 #include "intel_image.h"
102
103 #include "brw_context.h"
104
105 #include "i915_drm.h"
106
107 /**
108 * For debugging purposes, this returns a time in seconds.
109 */
110 double
111 get_time(void)
112 {
113 struct timespec tp;
114
115 clock_gettime(CLOCK_MONOTONIC, &tp);
116
117 return tp.tv_sec + tp.tv_nsec / 1000000000.0;
118 }
119
120 void
121 aub_dump_bmp(struct gl_context *ctx)
122 {
123 struct gl_framebuffer *fb = ctx->DrawBuffer;
124
125 for (int i = 0; i < fb->_NumColorDrawBuffers; i++) {
126 struct intel_renderbuffer *irb =
127 intel_renderbuffer(fb->_ColorDrawBuffers[i]);
128
129 if (irb && irb->mt) {
130 enum aub_dump_bmp_format format;
131
132 switch (irb->Base.Base.Format) {
133 case MESA_FORMAT_B8G8R8A8_UNORM:
134 case MESA_FORMAT_B8G8R8X8_UNORM:
135 format = AUB_DUMP_BMP_FORMAT_ARGB_8888;
136 break;
137 default:
138 continue;
139 }
140
141 drm_intel_gem_bo_aub_dump_bmp(irb->mt->bo,
142 irb->draw_x,
143 irb->draw_y,
144 irb->Base.Base.Width,
145 irb->Base.Base.Height,
146 format,
147 irb->mt->pitch,
148 0);
149 }
150 }
151 }
152
153 static const __DRItexBufferExtension intelTexBufferExtension = {
154 .base = { __DRI_TEX_BUFFER, 3 },
155
156 .setTexBuffer = intelSetTexBuffer,
157 .setTexBuffer2 = intelSetTexBuffer2,
158 .releaseTexBuffer = NULL,
159 };
160
161 static void
162 intel_dri2_flush_with_flags(__DRIcontext *cPriv,
163 __DRIdrawable *dPriv,
164 unsigned flags,
165 enum __DRI2throttleReason reason)
166 {
167 struct brw_context *brw = cPriv->driverPrivate;
168
169 if (!brw)
170 return;
171
172 struct gl_context *ctx = &brw->ctx;
173
174 FLUSH_VERTICES(ctx, 0);
175
176 if (flags & __DRI2_FLUSH_DRAWABLE)
177 intel_resolve_for_dri2_flush(brw, dPriv);
178
179 if (reason == __DRI2_THROTTLE_SWAPBUFFER)
180 brw->need_swap_throttle = true;
181 if (reason == __DRI2_THROTTLE_FLUSHFRONT)
182 brw->need_flush_throttle = true;
183
184 intel_batchbuffer_flush(brw);
185
186 if (INTEL_DEBUG & DEBUG_AUB) {
187 aub_dump_bmp(ctx);
188 }
189 }
190
191 /**
192 * Provides compatibility with loaders that only support the older (version
193 * 1-3) flush interface.
194 *
195 * That includes libGL up to Mesa 9.0, and the X Server at least up to 1.13.
196 */
197 static void
198 intel_dri2_flush(__DRIdrawable *drawable)
199 {
200 intel_dri2_flush_with_flags(drawable->driContextPriv, drawable,
201 __DRI2_FLUSH_DRAWABLE,
202 __DRI2_THROTTLE_SWAPBUFFER);
203 }
204
205 static const struct __DRI2flushExtensionRec intelFlushExtension = {
206 .base = { __DRI2_FLUSH, 4 },
207
208 .flush = intel_dri2_flush,
209 .invalidate = dri2InvalidateDrawable,
210 .flush_with_flags = intel_dri2_flush_with_flags,
211 };
212
213 static struct intel_image_format intel_image_formats[] = {
214 { __DRI_IMAGE_FOURCC_ARGB8888, __DRI_IMAGE_COMPONENTS_RGBA, 1,
215 { { 0, 0, 0, __DRI_IMAGE_FORMAT_ARGB8888, 4 } } },
216
217 { __DRI_IMAGE_FOURCC_ABGR8888, __DRI_IMAGE_COMPONENTS_RGBA, 1,
218 { { 0, 0, 0, __DRI_IMAGE_FORMAT_ABGR8888, 4 } } },
219
220 { __DRI_IMAGE_FOURCC_SARGB8888, __DRI_IMAGE_COMPONENTS_RGBA, 1,
221 { { 0, 0, 0, __DRI_IMAGE_FORMAT_SARGB8, 4 } } },
222
223 { __DRI_IMAGE_FOURCC_XRGB8888, __DRI_IMAGE_COMPONENTS_RGB, 1,
224 { { 0, 0, 0, __DRI_IMAGE_FORMAT_XRGB8888, 4 }, } },
225
226 { __DRI_IMAGE_FOURCC_XBGR8888, __DRI_IMAGE_COMPONENTS_RGB, 1,
227 { { 0, 0, 0, __DRI_IMAGE_FORMAT_XBGR8888, 4 }, } },
228
229 { __DRI_IMAGE_FOURCC_RGB565, __DRI_IMAGE_COMPONENTS_RGB, 1,
230 { { 0, 0, 0, __DRI_IMAGE_FORMAT_RGB565, 2 } } },
231
232 { __DRI_IMAGE_FOURCC_YUV410, __DRI_IMAGE_COMPONENTS_Y_U_V, 3,
233 { { 0, 0, 0, __DRI_IMAGE_FORMAT_R8, 1 },
234 { 1, 2, 2, __DRI_IMAGE_FORMAT_R8, 1 },
235 { 2, 2, 2, __DRI_IMAGE_FORMAT_R8, 1 } } },
236
237 { __DRI_IMAGE_FOURCC_YUV411, __DRI_IMAGE_COMPONENTS_Y_U_V, 3,
238 { { 0, 0, 0, __DRI_IMAGE_FORMAT_R8, 1 },
239 { 1, 2, 0, __DRI_IMAGE_FORMAT_R8, 1 },
240 { 2, 2, 0, __DRI_IMAGE_FORMAT_R8, 1 } } },
241
242 { __DRI_IMAGE_FOURCC_YUV420, __DRI_IMAGE_COMPONENTS_Y_U_V, 3,
243 { { 0, 0, 0, __DRI_IMAGE_FORMAT_R8, 1 },
244 { 1, 1, 1, __DRI_IMAGE_FORMAT_R8, 1 },
245 { 2, 1, 1, __DRI_IMAGE_FORMAT_R8, 1 } } },
246
247 { __DRI_IMAGE_FOURCC_YUV422, __DRI_IMAGE_COMPONENTS_Y_U_V, 3,
248 { { 0, 0, 0, __DRI_IMAGE_FORMAT_R8, 1 },
249 { 1, 1, 0, __DRI_IMAGE_FORMAT_R8, 1 },
250 { 2, 1, 0, __DRI_IMAGE_FORMAT_R8, 1 } } },
251
252 { __DRI_IMAGE_FOURCC_YUV444, __DRI_IMAGE_COMPONENTS_Y_U_V, 3,
253 { { 0, 0, 0, __DRI_IMAGE_FORMAT_R8, 1 },
254 { 1, 0, 0, __DRI_IMAGE_FORMAT_R8, 1 },
255 { 2, 0, 0, __DRI_IMAGE_FORMAT_R8, 1 } } },
256
257 { __DRI_IMAGE_FOURCC_NV12, __DRI_IMAGE_COMPONENTS_Y_UV, 2,
258 { { 0, 0, 0, __DRI_IMAGE_FORMAT_R8, 1 },
259 { 1, 1, 1, __DRI_IMAGE_FORMAT_GR88, 2 } } },
260
261 { __DRI_IMAGE_FOURCC_NV16, __DRI_IMAGE_COMPONENTS_Y_UV, 2,
262 { { 0, 0, 0, __DRI_IMAGE_FORMAT_R8, 1 },
263 { 1, 1, 0, __DRI_IMAGE_FORMAT_GR88, 2 } } },
264
265 /* For YUYV buffers, we set up two overlapping DRI images and treat
266 * them as planar buffers in the compositors. Plane 0 is GR88 and
267 * samples YU or YV pairs and places Y into the R component, while
268 * plane 1 is ARGB and samples YUYV clusters and places pairs and
269 * places U into the G component and V into A. This lets the
270 * texture sampler interpolate the Y components correctly when
271 * sampling from plane 0, and interpolate U and V correctly when
272 * sampling from plane 1. */
273 { __DRI_IMAGE_FOURCC_YUYV, __DRI_IMAGE_COMPONENTS_Y_XUXV, 2,
274 { { 0, 0, 0, __DRI_IMAGE_FORMAT_GR88, 2 },
275 { 0, 1, 0, __DRI_IMAGE_FORMAT_ARGB8888, 4 } } }
276 };
277
278 static void
279 intel_image_warn_if_unaligned(__DRIimage *image, const char *func)
280 {
281 uint32_t tiling, swizzle;
282 drm_intel_bo_get_tiling(image->bo, &tiling, &swizzle);
283
284 if (tiling != I915_TILING_NONE && (image->offset & 0xfff)) {
285 _mesa_warning(NULL, "%s: offset 0x%08x not on tile boundary",
286 func, image->offset);
287 }
288 }
289
290 static struct intel_image_format *
291 intel_image_format_lookup(int fourcc)
292 {
293 struct intel_image_format *f = NULL;
294
295 for (unsigned i = 0; i < ARRAY_SIZE(intel_image_formats); i++) {
296 if (intel_image_formats[i].fourcc == fourcc) {
297 f = &intel_image_formats[i];
298 break;
299 }
300 }
301
302 return f;
303 }
304
305 static boolean intel_lookup_fourcc(int dri_format, int *fourcc)
306 {
307 for (unsigned i = 0; i < ARRAY_SIZE(intel_image_formats); i++) {
308 if (intel_image_formats[i].planes[0].dri_format == dri_format) {
309 *fourcc = intel_image_formats[i].fourcc;
310 return true;
311 }
312 }
313 return false;
314 }
315
316 static __DRIimage *
317 intel_allocate_image(int dri_format, void *loaderPrivate)
318 {
319 __DRIimage *image;
320
321 image = calloc(1, sizeof *image);
322 if (image == NULL)
323 return NULL;
324
325 image->dri_format = dri_format;
326 image->offset = 0;
327
328 image->format = driImageFormatToGLFormat(dri_format);
329 if (dri_format != __DRI_IMAGE_FORMAT_NONE &&
330 image->format == MESA_FORMAT_NONE) {
331 free(image);
332 return NULL;
333 }
334
335 image->internal_format = _mesa_get_format_base_format(image->format);
336 image->data = loaderPrivate;
337
338 return image;
339 }
340
341 /**
342 * Sets up a DRIImage structure to point to a slice out of a miptree.
343 */
344 static void
345 intel_setup_image_from_mipmap_tree(struct brw_context *brw, __DRIimage *image,
346 struct intel_mipmap_tree *mt, GLuint level,
347 GLuint zoffset)
348 {
349 intel_miptree_make_shareable(brw, mt);
350
351 intel_miptree_check_level_layer(mt, level, zoffset);
352
353 image->width = minify(mt->physical_width0, level - mt->first_level);
354 image->height = minify(mt->physical_height0, level - mt->first_level);
355 image->pitch = mt->pitch;
356
357 image->offset = intel_miptree_get_tile_offsets(mt, level, zoffset,
358 &image->tile_x,
359 &image->tile_y);
360
361 drm_intel_bo_unreference(image->bo);
362 image->bo = mt->bo;
363 drm_intel_bo_reference(mt->bo);
364 }
365
366 static __DRIimage *
367 intel_create_image_from_name(__DRIscreen *screen,
368 int width, int height, int format,
369 int name, int pitch, void *loaderPrivate)
370 {
371 struct intel_screen *intelScreen = screen->driverPrivate;
372 __DRIimage *image;
373 int cpp;
374
375 image = intel_allocate_image(format, loaderPrivate);
376 if (image == NULL)
377 return NULL;
378
379 if (image->format == MESA_FORMAT_NONE)
380 cpp = 1;
381 else
382 cpp = _mesa_get_format_bytes(image->format);
383
384 image->width = width;
385 image->height = height;
386 image->pitch = pitch * cpp;
387 image->bo = drm_intel_bo_gem_create_from_name(intelScreen->bufmgr, "image",
388 name);
389 if (!image->bo) {
390 free(image);
391 return NULL;
392 }
393
394 return image;
395 }
396
397 static __DRIimage *
398 intel_create_image_from_renderbuffer(__DRIcontext *context,
399 int renderbuffer, void *loaderPrivate)
400 {
401 __DRIimage *image;
402 struct brw_context *brw = context->driverPrivate;
403 struct gl_context *ctx = &brw->ctx;
404 struct gl_renderbuffer *rb;
405 struct intel_renderbuffer *irb;
406
407 rb = _mesa_lookup_renderbuffer(ctx, renderbuffer);
408 if (!rb) {
409 _mesa_error(ctx, GL_INVALID_OPERATION, "glRenderbufferExternalMESA");
410 return NULL;
411 }
412
413 irb = intel_renderbuffer(rb);
414 intel_miptree_make_shareable(brw, irb->mt);
415 image = calloc(1, sizeof *image);
416 if (image == NULL)
417 return NULL;
418
419 image->internal_format = rb->InternalFormat;
420 image->format = rb->Format;
421 image->offset = 0;
422 image->data = loaderPrivate;
423 drm_intel_bo_unreference(image->bo);
424 image->bo = irb->mt->bo;
425 drm_intel_bo_reference(irb->mt->bo);
426 image->width = rb->Width;
427 image->height = rb->Height;
428 image->pitch = irb->mt->pitch;
429 image->dri_format = driGLFormatToImageFormat(image->format);
430 image->has_depthstencil = irb->mt->stencil_mt? true : false;
431
432 rb->NeedsFinishRenderTexture = true;
433 return image;
434 }
435
436 static __DRIimage *
437 intel_create_image_from_texture(__DRIcontext *context, int target,
438 unsigned texture, int zoffset,
439 int level,
440 unsigned *error,
441 void *loaderPrivate)
442 {
443 __DRIimage *image;
444 struct brw_context *brw = context->driverPrivate;
445 struct gl_texture_object *obj;
446 struct intel_texture_object *iobj;
447 GLuint face = 0;
448
449 obj = _mesa_lookup_texture(&brw->ctx, texture);
450 if (!obj || obj->Target != target) {
451 *error = __DRI_IMAGE_ERROR_BAD_PARAMETER;
452 return NULL;
453 }
454
455 if (target == GL_TEXTURE_CUBE_MAP)
456 face = zoffset;
457
458 _mesa_test_texobj_completeness(&brw->ctx, obj);
459 iobj = intel_texture_object(obj);
460 if (!obj->_BaseComplete || (level > 0 && !obj->_MipmapComplete)) {
461 *error = __DRI_IMAGE_ERROR_BAD_PARAMETER;
462 return NULL;
463 }
464
465 if (level < obj->BaseLevel || level > obj->_MaxLevel) {
466 *error = __DRI_IMAGE_ERROR_BAD_MATCH;
467 return NULL;
468 }
469
470 if (target == GL_TEXTURE_3D && obj->Image[face][level]->Depth < zoffset) {
471 *error = __DRI_IMAGE_ERROR_BAD_MATCH;
472 return NULL;
473 }
474 image = calloc(1, sizeof *image);
475 if (image == NULL) {
476 *error = __DRI_IMAGE_ERROR_BAD_ALLOC;
477 return NULL;
478 }
479
480 image->internal_format = obj->Image[face][level]->InternalFormat;
481 image->format = obj->Image[face][level]->TexFormat;
482 image->data = loaderPrivate;
483 intel_setup_image_from_mipmap_tree(brw, image, iobj->mt, level, zoffset);
484 image->dri_format = driGLFormatToImageFormat(image->format);
485 image->has_depthstencil = iobj->mt->stencil_mt? true : false;
486 if (image->dri_format == MESA_FORMAT_NONE) {
487 *error = __DRI_IMAGE_ERROR_BAD_PARAMETER;
488 free(image);
489 return NULL;
490 }
491
492 *error = __DRI_IMAGE_ERROR_SUCCESS;
493 return image;
494 }
495
496 static void
497 intel_destroy_image(__DRIimage *image)
498 {
499 drm_intel_bo_unreference(image->bo);
500 free(image);
501 }
502
503 static __DRIimage *
504 intel_create_image(__DRIscreen *screen,
505 int width, int height, int format,
506 unsigned int use,
507 void *loaderPrivate)
508 {
509 __DRIimage *image;
510 struct intel_screen *intelScreen = screen->driverPrivate;
511 uint32_t tiling;
512 int cpp;
513 unsigned long pitch;
514
515 tiling = I915_TILING_X;
516 if (use & __DRI_IMAGE_USE_CURSOR) {
517 if (width != 64 || height != 64)
518 return NULL;
519 tiling = I915_TILING_NONE;
520 }
521
522 if (use & __DRI_IMAGE_USE_LINEAR)
523 tiling = I915_TILING_NONE;
524
525 image = intel_allocate_image(format, loaderPrivate);
526 if (image == NULL)
527 return NULL;
528
529
530 cpp = _mesa_get_format_bytes(image->format);
531 image->bo = drm_intel_bo_alloc_tiled(intelScreen->bufmgr, "image",
532 width, height, cpp, &tiling,
533 &pitch, 0);
534 if (image->bo == NULL) {
535 free(image);
536 return NULL;
537 }
538 image->width = width;
539 image->height = height;
540 image->pitch = pitch;
541
542 return image;
543 }
544
545 static GLboolean
546 intel_query_image(__DRIimage *image, int attrib, int *value)
547 {
548 switch (attrib) {
549 case __DRI_IMAGE_ATTRIB_STRIDE:
550 *value = image->pitch;
551 return true;
552 case __DRI_IMAGE_ATTRIB_HANDLE:
553 *value = image->bo->handle;
554 return true;
555 case __DRI_IMAGE_ATTRIB_NAME:
556 return !drm_intel_bo_flink(image->bo, (uint32_t *) value);
557 case __DRI_IMAGE_ATTRIB_FORMAT:
558 *value = image->dri_format;
559 return true;
560 case __DRI_IMAGE_ATTRIB_WIDTH:
561 *value = image->width;
562 return true;
563 case __DRI_IMAGE_ATTRIB_HEIGHT:
564 *value = image->height;
565 return true;
566 case __DRI_IMAGE_ATTRIB_COMPONENTS:
567 if (image->planar_format == NULL)
568 return false;
569 *value = image->planar_format->components;
570 return true;
571 case __DRI_IMAGE_ATTRIB_FD:
572 if (drm_intel_bo_gem_export_to_prime(image->bo, value) == 0)
573 return true;
574 return false;
575 case __DRI_IMAGE_ATTRIB_FOURCC:
576 if (intel_lookup_fourcc(image->dri_format, value))
577 return true;
578 return false;
579 case __DRI_IMAGE_ATTRIB_NUM_PLANES:
580 *value = 1;
581 return true;
582
583 default:
584 return false;
585 }
586 }
587
588 static __DRIimage *
589 intel_dup_image(__DRIimage *orig_image, void *loaderPrivate)
590 {
591 __DRIimage *image;
592
593 image = calloc(1, sizeof *image);
594 if (image == NULL)
595 return NULL;
596
597 drm_intel_bo_reference(orig_image->bo);
598 image->bo = orig_image->bo;
599 image->internal_format = orig_image->internal_format;
600 image->planar_format = orig_image->planar_format;
601 image->dri_format = orig_image->dri_format;
602 image->format = orig_image->format;
603 image->offset = orig_image->offset;
604 image->width = orig_image->width;
605 image->height = orig_image->height;
606 image->pitch = orig_image->pitch;
607 image->tile_x = orig_image->tile_x;
608 image->tile_y = orig_image->tile_y;
609 image->has_depthstencil = orig_image->has_depthstencil;
610 image->data = loaderPrivate;
611
612 memcpy(image->strides, orig_image->strides, sizeof(image->strides));
613 memcpy(image->offsets, orig_image->offsets, sizeof(image->offsets));
614
615 return image;
616 }
617
618 static GLboolean
619 intel_validate_usage(__DRIimage *image, unsigned int use)
620 {
621 if (use & __DRI_IMAGE_USE_CURSOR) {
622 if (image->width != 64 || image->height != 64)
623 return GL_FALSE;
624 }
625
626 return GL_TRUE;
627 }
628
629 static __DRIimage *
630 intel_create_image_from_names(__DRIscreen *screen,
631 int width, int height, int fourcc,
632 int *names, int num_names,
633 int *strides, int *offsets,
634 void *loaderPrivate)
635 {
636 struct intel_image_format *f = NULL;
637 __DRIimage *image;
638 int i, index;
639
640 if (screen == NULL || names == NULL || num_names != 1)
641 return NULL;
642
643 f = intel_image_format_lookup(fourcc);
644 if (f == NULL)
645 return NULL;
646
647 image = intel_create_image_from_name(screen, width, height,
648 __DRI_IMAGE_FORMAT_NONE,
649 names[0], strides[0],
650 loaderPrivate);
651
652 if (image == NULL)
653 return NULL;
654
655 image->planar_format = f;
656 for (i = 0; i < f->nplanes; i++) {
657 index = f->planes[i].buffer_index;
658 image->offsets[index] = offsets[index];
659 image->strides[index] = strides[index];
660 }
661
662 return image;
663 }
664
665 static __DRIimage *
666 intel_create_image_from_fds(__DRIscreen *screen,
667 int width, int height, int fourcc,
668 int *fds, int num_fds, int *strides, int *offsets,
669 void *loaderPrivate)
670 {
671 struct intel_screen *intelScreen = screen->driverPrivate;
672 struct intel_image_format *f;
673 __DRIimage *image;
674 int i, index;
675
676 if (fds == NULL || num_fds != 1)
677 return NULL;
678
679 f = intel_image_format_lookup(fourcc);
680 if (f == NULL)
681 return NULL;
682
683 if (f->nplanes == 1)
684 image = intel_allocate_image(f->planes[0].dri_format, loaderPrivate);
685 else
686 image = intel_allocate_image(__DRI_IMAGE_FORMAT_NONE, loaderPrivate);
687
688 if (image == NULL)
689 return NULL;
690
691 image->bo = drm_intel_bo_gem_create_from_prime(intelScreen->bufmgr,
692 fds[0],
693 height * strides[0]);
694 if (image->bo == NULL) {
695 free(image);
696 return NULL;
697 }
698 image->width = width;
699 image->height = height;
700 image->pitch = strides[0];
701
702 image->planar_format = f;
703 for (i = 0; i < f->nplanes; i++) {
704 index = f->planes[i].buffer_index;
705 image->offsets[index] = offsets[index];
706 image->strides[index] = strides[index];
707 }
708
709 if (f->nplanes == 1) {
710 image->offset = image->offsets[0];
711 intel_image_warn_if_unaligned(image, __func__);
712 }
713
714 return image;
715 }
716
717 static __DRIimage *
718 intel_create_image_from_dma_bufs(__DRIscreen *screen,
719 int width, int height, int fourcc,
720 int *fds, int num_fds,
721 int *strides, int *offsets,
722 enum __DRIYUVColorSpace yuv_color_space,
723 enum __DRISampleRange sample_range,
724 enum __DRIChromaSiting horizontal_siting,
725 enum __DRIChromaSiting vertical_siting,
726 unsigned *error,
727 void *loaderPrivate)
728 {
729 __DRIimage *image;
730 struct intel_image_format *f = intel_image_format_lookup(fourcc);
731
732 /* For now only packed formats that have native sampling are supported. */
733 if (!f || f->nplanes != 1) {
734 *error = __DRI_IMAGE_ERROR_BAD_MATCH;
735 return NULL;
736 }
737
738 image = intel_create_image_from_fds(screen, width, height, fourcc, fds,
739 num_fds, strides, offsets,
740 loaderPrivate);
741
742 /*
743 * Invalid parameters and any inconsistencies between are assumed to be
744 * checked by the caller. Therefore besides unsupported formats one can fail
745 * only in allocation.
746 */
747 if (!image) {
748 *error = __DRI_IMAGE_ERROR_BAD_ALLOC;
749 return NULL;
750 }
751
752 image->dma_buf_imported = true;
753 image->yuv_color_space = yuv_color_space;
754 image->sample_range = sample_range;
755 image->horizontal_siting = horizontal_siting;
756 image->vertical_siting = vertical_siting;
757
758 *error = __DRI_IMAGE_ERROR_SUCCESS;
759 return image;
760 }
761
762 static __DRIimage *
763 intel_from_planar(__DRIimage *parent, int plane, void *loaderPrivate)
764 {
765 int width, height, offset, stride, dri_format, index;
766 struct intel_image_format *f;
767 __DRIimage *image;
768
769 if (parent == NULL || parent->planar_format == NULL)
770 return NULL;
771
772 f = parent->planar_format;
773
774 if (plane >= f->nplanes)
775 return NULL;
776
777 width = parent->width >> f->planes[plane].width_shift;
778 height = parent->height >> f->planes[plane].height_shift;
779 dri_format = f->planes[plane].dri_format;
780 index = f->planes[plane].buffer_index;
781 offset = parent->offsets[index];
782 stride = parent->strides[index];
783
784 image = intel_allocate_image(dri_format, loaderPrivate);
785 if (image == NULL)
786 return NULL;
787
788 if (offset + height * stride > parent->bo->size) {
789 _mesa_warning(NULL, "intel_create_sub_image: subimage out of bounds");
790 free(image);
791 return NULL;
792 }
793
794 image->bo = parent->bo;
795 drm_intel_bo_reference(parent->bo);
796
797 image->width = width;
798 image->height = height;
799 image->pitch = stride;
800 image->offset = offset;
801
802 intel_image_warn_if_unaligned(image, __func__);
803
804 return image;
805 }
806
807 static const __DRIimageExtension intelImageExtension = {
808 .base = { __DRI_IMAGE, 11 },
809
810 .createImageFromName = intel_create_image_from_name,
811 .createImageFromRenderbuffer = intel_create_image_from_renderbuffer,
812 .destroyImage = intel_destroy_image,
813 .createImage = intel_create_image,
814 .queryImage = intel_query_image,
815 .dupImage = intel_dup_image,
816 .validateUsage = intel_validate_usage,
817 .createImageFromNames = intel_create_image_from_names,
818 .fromPlanar = intel_from_planar,
819 .createImageFromTexture = intel_create_image_from_texture,
820 .createImageFromFds = intel_create_image_from_fds,
821 .createImageFromDmaBufs = intel_create_image_from_dma_bufs,
822 .blitImage = NULL,
823 .getCapabilities = NULL
824 };
825
826 static int
827 brw_query_renderer_integer(__DRIscreen *psp, int param, unsigned int *value)
828 {
829 const struct intel_screen *const intelScreen =
830 (struct intel_screen *) psp->driverPrivate;
831
832 switch (param) {
833 case __DRI2_RENDERER_VENDOR_ID:
834 value[0] = 0x8086;
835 return 0;
836 case __DRI2_RENDERER_DEVICE_ID:
837 value[0] = intelScreen->deviceID;
838 return 0;
839 case __DRI2_RENDERER_ACCELERATED:
840 value[0] = 1;
841 return 0;
842 case __DRI2_RENDERER_VIDEO_MEMORY: {
843 /* Once a batch uses more than 75% of the maximum mappable size, we
844 * assume that there's some fragmentation, and we start doing extra
845 * flushing, etc. That's the big cliff apps will care about.
846 */
847 size_t aper_size;
848 size_t mappable_size;
849
850 drm_intel_get_aperture_sizes(psp->fd, &mappable_size, &aper_size);
851
852 const unsigned gpu_mappable_megabytes =
853 (aper_size / (1024 * 1024)) * 3 / 4;
854
855 const long system_memory_pages = sysconf(_SC_PHYS_PAGES);
856 const long system_page_size = sysconf(_SC_PAGE_SIZE);
857
858 if (system_memory_pages <= 0 || system_page_size <= 0)
859 return -1;
860
861 const uint64_t system_memory_bytes = (uint64_t) system_memory_pages
862 * (uint64_t) system_page_size;
863
864 const unsigned system_memory_megabytes =
865 (unsigned) (system_memory_bytes / (1024 * 1024));
866
867 value[0] = MIN2(system_memory_megabytes, gpu_mappable_megabytes);
868 return 0;
869 }
870 case __DRI2_RENDERER_UNIFIED_MEMORY_ARCHITECTURE:
871 value[0] = 1;
872 return 0;
873 default:
874 return driQueryRendererIntegerCommon(psp, param, value);
875 }
876
877 return -1;
878 }
879
880 static int
881 brw_query_renderer_string(__DRIscreen *psp, int param, const char **value)
882 {
883 const struct intel_screen *intelScreen =
884 (struct intel_screen *) psp->driverPrivate;
885
886 switch (param) {
887 case __DRI2_RENDERER_VENDOR_ID:
888 value[0] = brw_vendor_string;
889 return 0;
890 case __DRI2_RENDERER_DEVICE_ID:
891 value[0] = brw_get_renderer_string(intelScreen->deviceID);
892 return 0;
893 default:
894 break;
895 }
896
897 return -1;
898 }
899
900 static const __DRI2rendererQueryExtension intelRendererQueryExtension = {
901 .base = { __DRI2_RENDERER_QUERY, 1 },
902
903 .queryInteger = brw_query_renderer_integer,
904 .queryString = brw_query_renderer_string
905 };
906
907 static const __DRIrobustnessExtension dri2Robustness = {
908 .base = { __DRI2_ROBUSTNESS, 1 }
909 };
910
911 static const __DRIextension *intelScreenExtensions[] = {
912 &intelTexBufferExtension.base,
913 &intelFenceExtension.base,
914 &intelFlushExtension.base,
915 &intelImageExtension.base,
916 &intelRendererQueryExtension.base,
917 &dri2ConfigQueryExtension.base,
918 NULL
919 };
920
921 static const __DRIextension *intelRobustScreenExtensions[] = {
922 &intelTexBufferExtension.base,
923 &intelFenceExtension.base,
924 &intelFlushExtension.base,
925 &intelImageExtension.base,
926 &intelRendererQueryExtension.base,
927 &dri2ConfigQueryExtension.base,
928 &dri2Robustness.base,
929 NULL
930 };
931
932 static bool
933 intel_get_param(__DRIscreen *psp, int param, int *value)
934 {
935 int ret;
936 struct drm_i915_getparam gp;
937
938 memset(&gp, 0, sizeof(gp));
939 gp.param = param;
940 gp.value = value;
941
942 ret = drmCommandWriteRead(psp->fd, DRM_I915_GETPARAM, &gp, sizeof(gp));
943 if (ret) {
944 if (ret != -EINVAL)
945 _mesa_warning(NULL, "drm_i915_getparam: %d", ret);
946 return false;
947 }
948
949 return true;
950 }
951
952 static bool
953 intel_get_boolean(__DRIscreen *psp, int param)
954 {
955 int value = 0;
956 return intel_get_param(psp, param, &value) && value;
957 }
958
959 static void
960 intelDestroyScreen(__DRIscreen * sPriv)
961 {
962 struct intel_screen *intelScreen = sPriv->driverPrivate;
963
964 dri_bufmgr_destroy(intelScreen->bufmgr);
965 driDestroyOptionInfo(&intelScreen->optionCache);
966
967 ralloc_free(intelScreen);
968 sPriv->driverPrivate = NULL;
969 }
970
971
972 /**
973 * This is called when we need to set up GL rendering to a new X window.
974 */
975 static GLboolean
976 intelCreateBuffer(__DRIscreen * driScrnPriv,
977 __DRIdrawable * driDrawPriv,
978 const struct gl_config * mesaVis, GLboolean isPixmap)
979 {
980 struct intel_renderbuffer *rb;
981 struct intel_screen *screen = (struct intel_screen*) driScrnPriv->driverPrivate;
982 mesa_format rgbFormat;
983 unsigned num_samples = intel_quantize_num_samples(screen, mesaVis->samples);
984 struct gl_framebuffer *fb;
985
986 if (isPixmap)
987 return false;
988
989 fb = CALLOC_STRUCT(gl_framebuffer);
990 if (!fb)
991 return false;
992
993 _mesa_initialize_window_framebuffer(fb, mesaVis);
994
995 if (screen->winsys_msaa_samples_override != -1) {
996 num_samples = screen->winsys_msaa_samples_override;
997 fb->Visual.samples = num_samples;
998 }
999
1000 if (mesaVis->redBits == 5)
1001 rgbFormat = MESA_FORMAT_B5G6R5_UNORM;
1002 else if (mesaVis->sRGBCapable)
1003 rgbFormat = MESA_FORMAT_B8G8R8A8_SRGB;
1004 else if (mesaVis->alphaBits == 0)
1005 rgbFormat = MESA_FORMAT_B8G8R8X8_UNORM;
1006 else {
1007 rgbFormat = MESA_FORMAT_B8G8R8A8_SRGB;
1008 fb->Visual.sRGBCapable = true;
1009 }
1010
1011 /* setup the hardware-based renderbuffers */
1012 rb = intel_create_renderbuffer(rgbFormat, num_samples);
1013 _mesa_add_renderbuffer(fb, BUFFER_FRONT_LEFT, &rb->Base.Base);
1014
1015 if (mesaVis->doubleBufferMode) {
1016 rb = intel_create_renderbuffer(rgbFormat, num_samples);
1017 _mesa_add_renderbuffer(fb, BUFFER_BACK_LEFT, &rb->Base.Base);
1018 }
1019
1020 /*
1021 * Assert here that the gl_config has an expected depth/stencil bit
1022 * combination: one of d24/s8, d16/s0, d0/s0. (See intelInitScreen2(),
1023 * which constructs the advertised configs.)
1024 */
1025 if (mesaVis->depthBits == 24) {
1026 assert(mesaVis->stencilBits == 8);
1027
1028 if (screen->devinfo->has_hiz_and_separate_stencil) {
1029 rb = intel_create_private_renderbuffer(MESA_FORMAT_Z24_UNORM_X8_UINT,
1030 num_samples);
1031 _mesa_add_renderbuffer(fb, BUFFER_DEPTH, &rb->Base.Base);
1032 rb = intel_create_private_renderbuffer(MESA_FORMAT_S_UINT8,
1033 num_samples);
1034 _mesa_add_renderbuffer(fb, BUFFER_STENCIL, &rb->Base.Base);
1035 } else {
1036 /*
1037 * Use combined depth/stencil. Note that the renderbuffer is
1038 * attached to two attachment points.
1039 */
1040 rb = intel_create_private_renderbuffer(MESA_FORMAT_Z24_UNORM_S8_UINT,
1041 num_samples);
1042 _mesa_add_renderbuffer(fb, BUFFER_DEPTH, &rb->Base.Base);
1043 _mesa_add_renderbuffer(fb, BUFFER_STENCIL, &rb->Base.Base);
1044 }
1045 }
1046 else if (mesaVis->depthBits == 16) {
1047 assert(mesaVis->stencilBits == 0);
1048 rb = intel_create_private_renderbuffer(MESA_FORMAT_Z_UNORM16,
1049 num_samples);
1050 _mesa_add_renderbuffer(fb, BUFFER_DEPTH, &rb->Base.Base);
1051 }
1052 else {
1053 assert(mesaVis->depthBits == 0);
1054 assert(mesaVis->stencilBits == 0);
1055 }
1056
1057 /* now add any/all software-based renderbuffers we may need */
1058 _swrast_add_soft_renderbuffers(fb,
1059 false, /* never sw color */
1060 false, /* never sw depth */
1061 false, /* never sw stencil */
1062 mesaVis->accumRedBits > 0,
1063 false, /* never sw alpha */
1064 false /* never sw aux */ );
1065 driDrawPriv->driverPrivate = fb;
1066
1067 return true;
1068 }
1069
1070 static void
1071 intelDestroyBuffer(__DRIdrawable * driDrawPriv)
1072 {
1073 struct gl_framebuffer *fb = driDrawPriv->driverPrivate;
1074
1075 _mesa_reference_framebuffer(&fb, NULL);
1076 }
1077
1078 static bool
1079 intel_init_bufmgr(struct intel_screen *intelScreen)
1080 {
1081 __DRIscreen *spriv = intelScreen->driScrnPriv;
1082
1083 intelScreen->no_hw = getenv("INTEL_NO_HW") != NULL;
1084
1085 intelScreen->bufmgr = intel_bufmgr_gem_init(spriv->fd, BATCH_SZ);
1086 if (intelScreen->bufmgr == NULL) {
1087 fprintf(stderr, "[%s:%u] Error initializing buffer manager.\n",
1088 __func__, __LINE__);
1089 return false;
1090 }
1091
1092 drm_intel_bufmgr_gem_enable_fenced_relocs(intelScreen->bufmgr);
1093
1094 if (!intel_get_boolean(spriv, I915_PARAM_HAS_RELAXED_DELTA)) {
1095 fprintf(stderr, "[%s: %u] Kernel 2.6.39 required.\n", __func__, __LINE__);
1096 return false;
1097 }
1098
1099 return true;
1100 }
1101
1102 static bool
1103 intel_detect_swizzling(struct intel_screen *screen)
1104 {
1105 drm_intel_bo *buffer;
1106 unsigned long flags = 0;
1107 unsigned long aligned_pitch;
1108 uint32_t tiling = I915_TILING_X;
1109 uint32_t swizzle_mode = 0;
1110
1111 buffer = drm_intel_bo_alloc_tiled(screen->bufmgr, "swizzle test",
1112 64, 64, 4,
1113 &tiling, &aligned_pitch, flags);
1114 if (buffer == NULL)
1115 return false;
1116
1117 drm_intel_bo_get_tiling(buffer, &tiling, &swizzle_mode);
1118 drm_intel_bo_unreference(buffer);
1119
1120 if (swizzle_mode == I915_BIT_6_SWIZZLE_NONE)
1121 return false;
1122 else
1123 return true;
1124 }
1125
1126 static int
1127 intel_detect_timestamp(struct intel_screen *screen)
1128 {
1129 uint64_t dummy = 0, last = 0;
1130 int upper, lower, loops;
1131
1132 /* On 64bit systems, some old kernels trigger a hw bug resulting in the
1133 * TIMESTAMP register being shifted and the low 32bits always zero.
1134 *
1135 * More recent kernels offer an interface to read the full 36bits
1136 * everywhere.
1137 */
1138 if (drm_intel_reg_read(screen->bufmgr, TIMESTAMP | 1, &dummy) == 0)
1139 return 3;
1140
1141 /* Determine if we have a 32bit or 64bit kernel by inspecting the
1142 * upper 32bits for a rapidly changing timestamp.
1143 */
1144 if (drm_intel_reg_read(screen->bufmgr, TIMESTAMP, &last))
1145 return 0;
1146
1147 upper = lower = 0;
1148 for (loops = 0; loops < 10; loops++) {
1149 /* The TIMESTAMP should change every 80ns, so several round trips
1150 * through the kernel should be enough to advance it.
1151 */
1152 if (drm_intel_reg_read(screen->bufmgr, TIMESTAMP, &dummy))
1153 return 0;
1154
1155 upper += (dummy >> 32) != (last >> 32);
1156 if (upper > 1) /* beware 32bit counter overflow */
1157 return 2; /* upper dword holds the low 32bits of the timestamp */
1158
1159 lower += (dummy & 0xffffffff) != (last & 0xffffffff);
1160 if (lower > 1)
1161 return 1; /* timestamp is unshifted */
1162
1163 last = dummy;
1164 }
1165
1166 /* No advancement? No timestamp! */
1167 return 0;
1168 }
1169
1170 /**
1171 * Return array of MSAA modes supported by the hardware. The array is
1172 * zero-terminated and sorted in decreasing order.
1173 */
1174 const int*
1175 intel_supported_msaa_modes(const struct intel_screen *screen)
1176 {
1177 static const int gen8_modes[] = {8, 4, 2, 0, -1};
1178 static const int gen7_modes[] = {8, 4, 0, -1};
1179 static const int gen6_modes[] = {4, 0, -1};
1180 static const int gen4_modes[] = {0, -1};
1181
1182 if (screen->devinfo->gen >= 8) {
1183 return gen8_modes;
1184 } else if (screen->devinfo->gen >= 7) {
1185 return gen7_modes;
1186 } else if (screen->devinfo->gen == 6) {
1187 return gen6_modes;
1188 } else {
1189 return gen4_modes;
1190 }
1191 }
1192
1193 static __DRIconfig**
1194 intel_screen_make_configs(__DRIscreen *dri_screen)
1195 {
1196 static const mesa_format formats[] = {
1197 MESA_FORMAT_B5G6R5_UNORM,
1198 MESA_FORMAT_B8G8R8A8_UNORM,
1199 MESA_FORMAT_B8G8R8X8_UNORM
1200 };
1201
1202 /* GLX_SWAP_COPY_OML is not supported due to page flipping. */
1203 static const GLenum back_buffer_modes[] = {
1204 GLX_SWAP_UNDEFINED_OML, GLX_NONE,
1205 };
1206
1207 static const uint8_t singlesample_samples[1] = {0};
1208 static const uint8_t multisample_samples[2] = {4, 8};
1209
1210 struct intel_screen *screen = dri_screen->driverPrivate;
1211 const struct brw_device_info *devinfo = screen->devinfo;
1212 uint8_t depth_bits[4], stencil_bits[4];
1213 __DRIconfig **configs = NULL;
1214
1215 /* Generate singlesample configs without accumulation buffer. */
1216 for (int i = 0; i < ARRAY_SIZE(formats); i++) {
1217 __DRIconfig **new_configs;
1218 int num_depth_stencil_bits = 2;
1219
1220 /* Starting with DRI2 protocol version 1.1 we can request a depth/stencil
1221 * buffer that has a different number of bits per pixel than the color
1222 * buffer, gen >= 6 supports this.
1223 */
1224 depth_bits[0] = 0;
1225 stencil_bits[0] = 0;
1226
1227 if (formats[i] == MESA_FORMAT_B5G6R5_UNORM) {
1228 depth_bits[1] = 16;
1229 stencil_bits[1] = 0;
1230 if (devinfo->gen >= 6) {
1231 depth_bits[2] = 24;
1232 stencil_bits[2] = 8;
1233 num_depth_stencil_bits = 3;
1234 }
1235 } else {
1236 depth_bits[1] = 24;
1237 stencil_bits[1] = 8;
1238 }
1239
1240 new_configs = driCreateConfigs(formats[i],
1241 depth_bits,
1242 stencil_bits,
1243 num_depth_stencil_bits,
1244 back_buffer_modes, 2,
1245 singlesample_samples, 1,
1246 false);
1247 configs = driConcatConfigs(configs, new_configs);
1248 }
1249
1250 /* Generate the minimum possible set of configs that include an
1251 * accumulation buffer.
1252 */
1253 for (int i = 0; i < ARRAY_SIZE(formats); i++) {
1254 __DRIconfig **new_configs;
1255
1256 if (formats[i] == MESA_FORMAT_B5G6R5_UNORM) {
1257 depth_bits[0] = 16;
1258 stencil_bits[0] = 0;
1259 } else {
1260 depth_bits[0] = 24;
1261 stencil_bits[0] = 8;
1262 }
1263
1264 new_configs = driCreateConfigs(formats[i],
1265 depth_bits, stencil_bits, 1,
1266 back_buffer_modes, 1,
1267 singlesample_samples, 1,
1268 true);
1269 configs = driConcatConfigs(configs, new_configs);
1270 }
1271
1272 /* Generate multisample configs.
1273 *
1274 * This loop breaks early, and hence is a no-op, on gen < 6.
1275 *
1276 * Multisample configs must follow the singlesample configs in order to
1277 * work around an X server bug present in 1.12. The X server chooses to
1278 * associate the first listed RGBA888-Z24S8 config, regardless of its
1279 * sample count, with the 32-bit depth visual used for compositing.
1280 *
1281 * Only doublebuffer configs with GLX_SWAP_UNDEFINED_OML behavior are
1282 * supported. Singlebuffer configs are not supported because no one wants
1283 * them.
1284 */
1285 for (int i = 0; i < ARRAY_SIZE(formats); i++) {
1286 if (devinfo->gen < 6)
1287 break;
1288
1289 __DRIconfig **new_configs;
1290 const int num_depth_stencil_bits = 2;
1291 int num_msaa_modes = 0;
1292
1293 depth_bits[0] = 0;
1294 stencil_bits[0] = 0;
1295
1296 if (formats[i] == MESA_FORMAT_B5G6R5_UNORM) {
1297 depth_bits[1] = 16;
1298 stencil_bits[1] = 0;
1299 } else {
1300 depth_bits[1] = 24;
1301 stencil_bits[1] = 8;
1302 }
1303
1304 if (devinfo->gen >= 7)
1305 num_msaa_modes = 2;
1306 else if (devinfo->gen == 6)
1307 num_msaa_modes = 1;
1308
1309 new_configs = driCreateConfigs(formats[i],
1310 depth_bits,
1311 stencil_bits,
1312 num_depth_stencil_bits,
1313 back_buffer_modes, 1,
1314 multisample_samples,
1315 num_msaa_modes,
1316 false);
1317 configs = driConcatConfigs(configs, new_configs);
1318 }
1319
1320 if (configs == NULL) {
1321 fprintf(stderr, "[%s:%u] Error creating FBConfig!\n", __func__,
1322 __LINE__);
1323 return NULL;
1324 }
1325
1326 return configs;
1327 }
1328
1329 static void
1330 set_max_gl_versions(struct intel_screen *screen)
1331 {
1332 __DRIscreen *psp = screen->driScrnPriv;
1333
1334 switch (screen->devinfo->gen) {
1335 case 9:
1336 case 8:
1337 case 7:
1338 case 6:
1339 psp->max_gl_core_version = 33;
1340 psp->max_gl_compat_version = 30;
1341 psp->max_gl_es1_version = 11;
1342 psp->max_gl_es2_version = 30;
1343 break;
1344 case 5:
1345 case 4:
1346 psp->max_gl_core_version = 0;
1347 psp->max_gl_compat_version = 21;
1348 psp->max_gl_es1_version = 11;
1349 psp->max_gl_es2_version = 20;
1350 break;
1351 default:
1352 unreachable("unrecognized intel_screen::gen");
1353 }
1354 }
1355
1356 static int
1357 brw_get_revision(int fd)
1358 {
1359 struct drm_i915_getparam gp;
1360 int revision;
1361 int ret;
1362
1363 memset(&gp, 0, sizeof(gp));
1364 gp.param = I915_PARAM_REVISION;
1365 gp.value = &revision;
1366
1367 ret = drmCommandWriteRead(fd, DRM_I915_GETPARAM, &gp, sizeof(gp));
1368 if (ret)
1369 revision = -1;
1370
1371 return revision;
1372 }
1373
1374 /* Drop when RS headers get pulled to libdrm */
1375 #ifndef I915_PARAM_HAS_RESOURCE_STREAMER
1376 #define I915_PARAM_HAS_RESOURCE_STREAMER 36
1377 #endif
1378
1379 /**
1380 * This is the driver specific part of the createNewScreen entry point.
1381 * Called when using DRI2.
1382 *
1383 * \return the struct gl_config supported by this driver
1384 */
1385 static const
1386 __DRIconfig **intelInitScreen2(__DRIscreen *psp)
1387 {
1388 struct intel_screen *intelScreen;
1389
1390 if (psp->image.loader) {
1391 } else if (psp->dri2.loader->base.version <= 2 ||
1392 psp->dri2.loader->getBuffersWithFormat == NULL) {
1393 fprintf(stderr,
1394 "\nERROR! DRI2 loader with getBuffersWithFormat() "
1395 "support required\n");
1396 return false;
1397 }
1398
1399 /* Allocate the private area */
1400 intelScreen = rzalloc(NULL, struct intel_screen);
1401 if (!intelScreen) {
1402 fprintf(stderr, "\nERROR! Allocating private area failed\n");
1403 return false;
1404 }
1405 /* parse information in __driConfigOptions */
1406 driParseOptionInfo(&intelScreen->optionCache, brw_config_options.xml);
1407
1408 intelScreen->driScrnPriv = psp;
1409 psp->driverPrivate = (void *) intelScreen;
1410
1411 if (!intel_init_bufmgr(intelScreen))
1412 return false;
1413
1414 intelScreen->deviceID = drm_intel_bufmgr_gem_get_devid(intelScreen->bufmgr);
1415 intelScreen->devinfo = brw_get_device_info(intelScreen->deviceID,
1416 brw_get_revision(psp->fd));
1417 if (!intelScreen->devinfo)
1418 return false;
1419
1420 brw_process_intel_debug_variable(intelScreen);
1421
1422 intelScreen->hw_must_use_separate_stencil = intelScreen->devinfo->gen >= 7;
1423
1424 intelScreen->hw_has_swizzling = intel_detect_swizzling(intelScreen);
1425 intelScreen->hw_has_timestamp = intel_detect_timestamp(intelScreen);
1426
1427 const char *force_msaa = getenv("INTEL_FORCE_MSAA");
1428 if (force_msaa) {
1429 intelScreen->winsys_msaa_samples_override =
1430 intel_quantize_num_samples(intelScreen, atoi(force_msaa));
1431 printf("Forcing winsys sample count to %d\n",
1432 intelScreen->winsys_msaa_samples_override);
1433 } else {
1434 intelScreen->winsys_msaa_samples_override = -1;
1435 }
1436
1437 set_max_gl_versions(intelScreen);
1438
1439 /* Notification of GPU resets requires hardware contexts and a kernel new
1440 * enough to support DRM_IOCTL_I915_GET_RESET_STATS. If the ioctl is
1441 * supported, calling it with a context of 0 will either generate EPERM or
1442 * no error. If the ioctl is not supported, it always generate EINVAL.
1443 * Use this to determine whether to advertise the __DRI2_ROBUSTNESS
1444 * extension to the loader.
1445 *
1446 * Don't even try on pre-Gen6, since we don't attempt to use contexts there.
1447 */
1448 if (intelScreen->devinfo->gen >= 6) {
1449 struct drm_i915_reset_stats stats;
1450 memset(&stats, 0, sizeof(stats));
1451
1452 const int ret = drmIoctl(psp->fd, DRM_IOCTL_I915_GET_RESET_STATS, &stats);
1453
1454 intelScreen->has_context_reset_notification =
1455 (ret != -1 || errno != EINVAL);
1456 }
1457
1458 struct drm_i915_getparam getparam;
1459 getparam.param = I915_PARAM_CMD_PARSER_VERSION;
1460 getparam.value = &intelScreen->cmd_parser_version;
1461 const int ret = drmIoctl(psp->fd, DRM_IOCTL_I915_GETPARAM, &getparam);
1462 if (ret == -1)
1463 intelScreen->cmd_parser_version = 0;
1464
1465 psp->extensions = !intelScreen->has_context_reset_notification
1466 ? intelScreenExtensions : intelRobustScreenExtensions;
1467
1468 intelScreen->compiler = brw_compiler_create(intelScreen,
1469 intelScreen->devinfo);
1470
1471 if (intelScreen->devinfo->has_resource_streamer) {
1472 int val = -1;
1473 getparam.param = I915_PARAM_HAS_RESOURCE_STREAMER;
1474 getparam.value = &val;
1475
1476 drmIoctl(psp->fd, DRM_IOCTL_I915_GETPARAM, &getparam);
1477 intelScreen->has_resource_streamer = val > 0;
1478 }
1479
1480 return (const __DRIconfig**) intel_screen_make_configs(psp);
1481 }
1482
1483 struct intel_buffer {
1484 __DRIbuffer base;
1485 drm_intel_bo *bo;
1486 };
1487
1488 static __DRIbuffer *
1489 intelAllocateBuffer(__DRIscreen *screen,
1490 unsigned attachment, unsigned format,
1491 int width, int height)
1492 {
1493 struct intel_buffer *intelBuffer;
1494 struct intel_screen *intelScreen = screen->driverPrivate;
1495
1496 assert(attachment == __DRI_BUFFER_FRONT_LEFT ||
1497 attachment == __DRI_BUFFER_BACK_LEFT);
1498
1499 intelBuffer = calloc(1, sizeof *intelBuffer);
1500 if (intelBuffer == NULL)
1501 return NULL;
1502
1503 /* The front and back buffers are color buffers, which are X tiled. */
1504 uint32_t tiling = I915_TILING_X;
1505 unsigned long pitch;
1506 int cpp = format / 8;
1507 intelBuffer->bo = drm_intel_bo_alloc_tiled(intelScreen->bufmgr,
1508 "intelAllocateBuffer",
1509 width,
1510 height,
1511 cpp,
1512 &tiling, &pitch,
1513 BO_ALLOC_FOR_RENDER);
1514
1515 if (intelBuffer->bo == NULL) {
1516 free(intelBuffer);
1517 return NULL;
1518 }
1519
1520 drm_intel_bo_flink(intelBuffer->bo, &intelBuffer->base.name);
1521
1522 intelBuffer->base.attachment = attachment;
1523 intelBuffer->base.cpp = cpp;
1524 intelBuffer->base.pitch = pitch;
1525
1526 return &intelBuffer->base;
1527 }
1528
1529 static void
1530 intelReleaseBuffer(__DRIscreen *screen, __DRIbuffer *buffer)
1531 {
1532 struct intel_buffer *intelBuffer = (struct intel_buffer *) buffer;
1533
1534 drm_intel_bo_unreference(intelBuffer->bo);
1535 free(intelBuffer);
1536 }
1537
1538 static const struct __DriverAPIRec brw_driver_api = {
1539 .InitScreen = intelInitScreen2,
1540 .DestroyScreen = intelDestroyScreen,
1541 .CreateContext = brwCreateContext,
1542 .DestroyContext = intelDestroyContext,
1543 .CreateBuffer = intelCreateBuffer,
1544 .DestroyBuffer = intelDestroyBuffer,
1545 .MakeCurrent = intelMakeCurrent,
1546 .UnbindContext = intelUnbindContext,
1547 .AllocateBuffer = intelAllocateBuffer,
1548 .ReleaseBuffer = intelReleaseBuffer
1549 };
1550
1551 static const struct __DRIDriverVtableExtensionRec brw_vtable = {
1552 .base = { __DRI_DRIVER_VTABLE, 1 },
1553 .vtable = &brw_driver_api,
1554 };
1555
1556 static const __DRIextension *brw_driver_extensions[] = {
1557 &driCoreExtension.base,
1558 &driImageDriverExtension.base,
1559 &driDRI2Extension.base,
1560 &brw_vtable.base,
1561 &brw_config_options.base,
1562 NULL
1563 };
1564
1565 PUBLIC const __DRIextension **__driDriverGetExtensions_i965(void)
1566 {
1567 globalDriverAPI = &brw_driver_api;
1568
1569 return brw_driver_extensions;
1570 }