i965: Support importing R8 and GR88 dma_bufs
[mesa.git] / src / mesa / drivers / dri / i965 / intel_screen.c
1 /**************************************************************************
2 *
3 * Copyright 2003 VMware, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 #include <errno.h>
29 #include <time.h>
30 #include <unistd.h>
31 #include "main/glheader.h"
32 #include "main/context.h"
33 #include "main/framebuffer.h"
34 #include "main/renderbuffer.h"
35 #include "main/texobj.h"
36 #include "main/hash.h"
37 #include "main/fbobject.h"
38 #include "main/version.h"
39 #include "swrast/s_renderbuffer.h"
40 #include "util/ralloc.h"
41 #include "brw_shader.h"
42 #include "glsl/nir/nir.h"
43
44 #include "utils.h"
45 #include "xmlpool.h"
46
47 static const __DRIconfigOptionsExtension brw_config_options = {
48 .base = { __DRI_CONFIG_OPTIONS, 1 },
49 .xml =
50 DRI_CONF_BEGIN
51 DRI_CONF_SECTION_PERFORMANCE
52 DRI_CONF_VBLANK_MODE(DRI_CONF_VBLANK_ALWAYS_SYNC)
53 /* Options correspond to DRI_CONF_BO_REUSE_DISABLED,
54 * DRI_CONF_BO_REUSE_ALL
55 */
56 DRI_CONF_OPT_BEGIN_V(bo_reuse, enum, 1, "0:1")
57 DRI_CONF_DESC_BEGIN(en, "Buffer object reuse")
58 DRI_CONF_ENUM(0, "Disable buffer object reuse")
59 DRI_CONF_ENUM(1, "Enable reuse of all sizes of buffer objects")
60 DRI_CONF_DESC_END
61 DRI_CONF_OPT_END
62
63 DRI_CONF_OPT_BEGIN_B(hiz, "true")
64 DRI_CONF_DESC(en, "Enable Hierarchical Z on gen6+")
65 DRI_CONF_OPT_END
66 DRI_CONF_SECTION_END
67
68 DRI_CONF_SECTION_QUALITY
69 DRI_CONF_FORCE_S3TC_ENABLE("false")
70
71 DRI_CONF_OPT_BEGIN(clamp_max_samples, int, -1)
72 DRI_CONF_DESC(en, "Clamp the value of GL_MAX_SAMPLES to the "
73 "given integer. If negative, then do not clamp.")
74 DRI_CONF_OPT_END
75 DRI_CONF_SECTION_END
76
77 DRI_CONF_SECTION_DEBUG
78 DRI_CONF_NO_RAST("false")
79 DRI_CONF_ALWAYS_FLUSH_BATCH("false")
80 DRI_CONF_ALWAYS_FLUSH_CACHE("false")
81 DRI_CONF_DISABLE_THROTTLING("false")
82 DRI_CONF_FORCE_GLSL_EXTENSIONS_WARN("false")
83 DRI_CONF_DISABLE_GLSL_LINE_CONTINUATIONS("false")
84 DRI_CONF_DISABLE_BLEND_FUNC_EXTENDED("false")
85 DRI_CONF_ALLOW_GLSL_EXTENSION_DIRECTIVE_MIDSHADER("false")
86
87 DRI_CONF_OPT_BEGIN_B(shader_precompile, "true")
88 DRI_CONF_DESC(en, "Perform code generation at shader link time.")
89 DRI_CONF_OPT_END
90 DRI_CONF_SECTION_END
91 DRI_CONF_END
92 };
93
94 #include "intel_batchbuffer.h"
95 #include "intel_buffers.h"
96 #include "intel_bufmgr.h"
97 #include "intel_fbo.h"
98 #include "intel_mipmap_tree.h"
99 #include "intel_screen.h"
100 #include "intel_tex.h"
101 #include "intel_image.h"
102
103 #include "brw_context.h"
104
105 #include "i915_drm.h"
106
107 /**
108 * For debugging purposes, this returns a time in seconds.
109 */
110 double
111 get_time(void)
112 {
113 struct timespec tp;
114
115 clock_gettime(CLOCK_MONOTONIC, &tp);
116
117 return tp.tv_sec + tp.tv_nsec / 1000000000.0;
118 }
119
120 void
121 aub_dump_bmp(struct gl_context *ctx)
122 {
123 struct gl_framebuffer *fb = ctx->DrawBuffer;
124
125 for (int i = 0; i < fb->_NumColorDrawBuffers; i++) {
126 struct intel_renderbuffer *irb =
127 intel_renderbuffer(fb->_ColorDrawBuffers[i]);
128
129 if (irb && irb->mt) {
130 enum aub_dump_bmp_format format;
131
132 switch (irb->Base.Base.Format) {
133 case MESA_FORMAT_B8G8R8A8_UNORM:
134 case MESA_FORMAT_B8G8R8X8_UNORM:
135 format = AUB_DUMP_BMP_FORMAT_ARGB_8888;
136 break;
137 default:
138 continue;
139 }
140
141 drm_intel_gem_bo_aub_dump_bmp(irb->mt->bo,
142 irb->draw_x,
143 irb->draw_y,
144 irb->Base.Base.Width,
145 irb->Base.Base.Height,
146 format,
147 irb->mt->pitch,
148 0);
149 }
150 }
151 }
152
153 static const __DRItexBufferExtension intelTexBufferExtension = {
154 .base = { __DRI_TEX_BUFFER, 3 },
155
156 .setTexBuffer = intelSetTexBuffer,
157 .setTexBuffer2 = intelSetTexBuffer2,
158 .releaseTexBuffer = NULL,
159 };
160
161 static void
162 intel_dri2_flush_with_flags(__DRIcontext *cPriv,
163 __DRIdrawable *dPriv,
164 unsigned flags,
165 enum __DRI2throttleReason reason)
166 {
167 struct brw_context *brw = cPriv->driverPrivate;
168
169 if (!brw)
170 return;
171
172 struct gl_context *ctx = &brw->ctx;
173
174 FLUSH_VERTICES(ctx, 0);
175
176 if (flags & __DRI2_FLUSH_DRAWABLE)
177 intel_resolve_for_dri2_flush(brw, dPriv);
178
179 if (reason == __DRI2_THROTTLE_SWAPBUFFER)
180 brw->need_swap_throttle = true;
181 if (reason == __DRI2_THROTTLE_FLUSHFRONT)
182 brw->need_flush_throttle = true;
183
184 intel_batchbuffer_flush(brw);
185
186 if (INTEL_DEBUG & DEBUG_AUB) {
187 aub_dump_bmp(ctx);
188 }
189 }
190
191 /**
192 * Provides compatibility with loaders that only support the older (version
193 * 1-3) flush interface.
194 *
195 * That includes libGL up to Mesa 9.0, and the X Server at least up to 1.13.
196 */
197 static void
198 intel_dri2_flush(__DRIdrawable *drawable)
199 {
200 intel_dri2_flush_with_flags(drawable->driContextPriv, drawable,
201 __DRI2_FLUSH_DRAWABLE,
202 __DRI2_THROTTLE_SWAPBUFFER);
203 }
204
205 static const struct __DRI2flushExtensionRec intelFlushExtension = {
206 .base = { __DRI2_FLUSH, 4 },
207
208 .flush = intel_dri2_flush,
209 .invalidate = dri2InvalidateDrawable,
210 .flush_with_flags = intel_dri2_flush_with_flags,
211 };
212
213 static struct intel_image_format intel_image_formats[] = {
214 { __DRI_IMAGE_FOURCC_ARGB8888, __DRI_IMAGE_COMPONENTS_RGBA, 1,
215 { { 0, 0, 0, __DRI_IMAGE_FORMAT_ARGB8888, 4 } } },
216
217 { __DRI_IMAGE_FOURCC_ABGR8888, __DRI_IMAGE_COMPONENTS_RGBA, 1,
218 { { 0, 0, 0, __DRI_IMAGE_FORMAT_ABGR8888, 4 } } },
219
220 { __DRI_IMAGE_FOURCC_SARGB8888, __DRI_IMAGE_COMPONENTS_RGBA, 1,
221 { { 0, 0, 0, __DRI_IMAGE_FORMAT_SARGB8, 4 } } },
222
223 { __DRI_IMAGE_FOURCC_XRGB8888, __DRI_IMAGE_COMPONENTS_RGB, 1,
224 { { 0, 0, 0, __DRI_IMAGE_FORMAT_XRGB8888, 4 }, } },
225
226 { __DRI_IMAGE_FOURCC_XBGR8888, __DRI_IMAGE_COMPONENTS_RGB, 1,
227 { { 0, 0, 0, __DRI_IMAGE_FORMAT_XBGR8888, 4 }, } },
228
229 { __DRI_IMAGE_FOURCC_RGB565, __DRI_IMAGE_COMPONENTS_RGB, 1,
230 { { 0, 0, 0, __DRI_IMAGE_FORMAT_RGB565, 2 } } },
231
232 { __DRI_IMAGE_FOURCC_R8, __DRI_IMAGE_COMPONENTS_R, 1,
233 { { 0, 0, 0, __DRI_IMAGE_FORMAT_R8, 1 }, } },
234
235 { __DRI_IMAGE_FOURCC_GR88, __DRI_IMAGE_COMPONENTS_RG, 1,
236 { { 0, 0, 0, __DRI_IMAGE_FORMAT_GR88, 2 }, } },
237
238 { __DRI_IMAGE_FOURCC_YUV410, __DRI_IMAGE_COMPONENTS_Y_U_V, 3,
239 { { 0, 0, 0, __DRI_IMAGE_FORMAT_R8, 1 },
240 { 1, 2, 2, __DRI_IMAGE_FORMAT_R8, 1 },
241 { 2, 2, 2, __DRI_IMAGE_FORMAT_R8, 1 } } },
242
243 { __DRI_IMAGE_FOURCC_YUV411, __DRI_IMAGE_COMPONENTS_Y_U_V, 3,
244 { { 0, 0, 0, __DRI_IMAGE_FORMAT_R8, 1 },
245 { 1, 2, 0, __DRI_IMAGE_FORMAT_R8, 1 },
246 { 2, 2, 0, __DRI_IMAGE_FORMAT_R8, 1 } } },
247
248 { __DRI_IMAGE_FOURCC_YUV420, __DRI_IMAGE_COMPONENTS_Y_U_V, 3,
249 { { 0, 0, 0, __DRI_IMAGE_FORMAT_R8, 1 },
250 { 1, 1, 1, __DRI_IMAGE_FORMAT_R8, 1 },
251 { 2, 1, 1, __DRI_IMAGE_FORMAT_R8, 1 } } },
252
253 { __DRI_IMAGE_FOURCC_YUV422, __DRI_IMAGE_COMPONENTS_Y_U_V, 3,
254 { { 0, 0, 0, __DRI_IMAGE_FORMAT_R8, 1 },
255 { 1, 1, 0, __DRI_IMAGE_FORMAT_R8, 1 },
256 { 2, 1, 0, __DRI_IMAGE_FORMAT_R8, 1 } } },
257
258 { __DRI_IMAGE_FOURCC_YUV444, __DRI_IMAGE_COMPONENTS_Y_U_V, 3,
259 { { 0, 0, 0, __DRI_IMAGE_FORMAT_R8, 1 },
260 { 1, 0, 0, __DRI_IMAGE_FORMAT_R8, 1 },
261 { 2, 0, 0, __DRI_IMAGE_FORMAT_R8, 1 } } },
262
263 { __DRI_IMAGE_FOURCC_NV12, __DRI_IMAGE_COMPONENTS_Y_UV, 2,
264 { { 0, 0, 0, __DRI_IMAGE_FORMAT_R8, 1 },
265 { 1, 1, 1, __DRI_IMAGE_FORMAT_GR88, 2 } } },
266
267 { __DRI_IMAGE_FOURCC_NV16, __DRI_IMAGE_COMPONENTS_Y_UV, 2,
268 { { 0, 0, 0, __DRI_IMAGE_FORMAT_R8, 1 },
269 { 1, 1, 0, __DRI_IMAGE_FORMAT_GR88, 2 } } },
270
271 /* For YUYV buffers, we set up two overlapping DRI images and treat
272 * them as planar buffers in the compositors. Plane 0 is GR88 and
273 * samples YU or YV pairs and places Y into the R component, while
274 * plane 1 is ARGB and samples YUYV clusters and places pairs and
275 * places U into the G component and V into A. This lets the
276 * texture sampler interpolate the Y components correctly when
277 * sampling from plane 0, and interpolate U and V correctly when
278 * sampling from plane 1. */
279 { __DRI_IMAGE_FOURCC_YUYV, __DRI_IMAGE_COMPONENTS_Y_XUXV, 2,
280 { { 0, 0, 0, __DRI_IMAGE_FORMAT_GR88, 2 },
281 { 0, 1, 0, __DRI_IMAGE_FORMAT_ARGB8888, 4 } } }
282 };
283
284 static void
285 intel_image_warn_if_unaligned(__DRIimage *image, const char *func)
286 {
287 uint32_t tiling, swizzle;
288 drm_intel_bo_get_tiling(image->bo, &tiling, &swizzle);
289
290 if (tiling != I915_TILING_NONE && (image->offset & 0xfff)) {
291 _mesa_warning(NULL, "%s: offset 0x%08x not on tile boundary",
292 func, image->offset);
293 }
294 }
295
296 static struct intel_image_format *
297 intel_image_format_lookup(int fourcc)
298 {
299 struct intel_image_format *f = NULL;
300
301 for (unsigned i = 0; i < ARRAY_SIZE(intel_image_formats); i++) {
302 if (intel_image_formats[i].fourcc == fourcc) {
303 f = &intel_image_formats[i];
304 break;
305 }
306 }
307
308 return f;
309 }
310
311 static boolean intel_lookup_fourcc(int dri_format, int *fourcc)
312 {
313 for (unsigned i = 0; i < ARRAY_SIZE(intel_image_formats); i++) {
314 if (intel_image_formats[i].planes[0].dri_format == dri_format) {
315 *fourcc = intel_image_formats[i].fourcc;
316 return true;
317 }
318 }
319 return false;
320 }
321
322 static __DRIimage *
323 intel_allocate_image(int dri_format, void *loaderPrivate)
324 {
325 __DRIimage *image;
326
327 image = calloc(1, sizeof *image);
328 if (image == NULL)
329 return NULL;
330
331 image->dri_format = dri_format;
332 image->offset = 0;
333
334 image->format = driImageFormatToGLFormat(dri_format);
335 if (dri_format != __DRI_IMAGE_FORMAT_NONE &&
336 image->format == MESA_FORMAT_NONE) {
337 free(image);
338 return NULL;
339 }
340
341 image->internal_format = _mesa_get_format_base_format(image->format);
342 image->data = loaderPrivate;
343
344 return image;
345 }
346
347 /**
348 * Sets up a DRIImage structure to point to a slice out of a miptree.
349 */
350 static void
351 intel_setup_image_from_mipmap_tree(struct brw_context *brw, __DRIimage *image,
352 struct intel_mipmap_tree *mt, GLuint level,
353 GLuint zoffset)
354 {
355 intel_miptree_make_shareable(brw, mt);
356
357 intel_miptree_check_level_layer(mt, level, zoffset);
358
359 image->width = minify(mt->physical_width0, level - mt->first_level);
360 image->height = minify(mt->physical_height0, level - mt->first_level);
361 image->pitch = mt->pitch;
362
363 image->offset = intel_miptree_get_tile_offsets(mt, level, zoffset,
364 &image->tile_x,
365 &image->tile_y);
366
367 drm_intel_bo_unreference(image->bo);
368 image->bo = mt->bo;
369 drm_intel_bo_reference(mt->bo);
370 }
371
372 static __DRIimage *
373 intel_create_image_from_name(__DRIscreen *screen,
374 int width, int height, int format,
375 int name, int pitch, void *loaderPrivate)
376 {
377 struct intel_screen *intelScreen = screen->driverPrivate;
378 __DRIimage *image;
379 int cpp;
380
381 image = intel_allocate_image(format, loaderPrivate);
382 if (image == NULL)
383 return NULL;
384
385 if (image->format == MESA_FORMAT_NONE)
386 cpp = 1;
387 else
388 cpp = _mesa_get_format_bytes(image->format);
389
390 image->width = width;
391 image->height = height;
392 image->pitch = pitch * cpp;
393 image->bo = drm_intel_bo_gem_create_from_name(intelScreen->bufmgr, "image",
394 name);
395 if (!image->bo) {
396 free(image);
397 return NULL;
398 }
399
400 return image;
401 }
402
403 static __DRIimage *
404 intel_create_image_from_renderbuffer(__DRIcontext *context,
405 int renderbuffer, void *loaderPrivate)
406 {
407 __DRIimage *image;
408 struct brw_context *brw = context->driverPrivate;
409 struct gl_context *ctx = &brw->ctx;
410 struct gl_renderbuffer *rb;
411 struct intel_renderbuffer *irb;
412
413 rb = _mesa_lookup_renderbuffer(ctx, renderbuffer);
414 if (!rb) {
415 _mesa_error(ctx, GL_INVALID_OPERATION, "glRenderbufferExternalMESA");
416 return NULL;
417 }
418
419 irb = intel_renderbuffer(rb);
420 intel_miptree_make_shareable(brw, irb->mt);
421 image = calloc(1, sizeof *image);
422 if (image == NULL)
423 return NULL;
424
425 image->internal_format = rb->InternalFormat;
426 image->format = rb->Format;
427 image->offset = 0;
428 image->data = loaderPrivate;
429 drm_intel_bo_unreference(image->bo);
430 image->bo = irb->mt->bo;
431 drm_intel_bo_reference(irb->mt->bo);
432 image->width = rb->Width;
433 image->height = rb->Height;
434 image->pitch = irb->mt->pitch;
435 image->dri_format = driGLFormatToImageFormat(image->format);
436 image->has_depthstencil = irb->mt->stencil_mt? true : false;
437
438 rb->NeedsFinishRenderTexture = true;
439 return image;
440 }
441
442 static __DRIimage *
443 intel_create_image_from_texture(__DRIcontext *context, int target,
444 unsigned texture, int zoffset,
445 int level,
446 unsigned *error,
447 void *loaderPrivate)
448 {
449 __DRIimage *image;
450 struct brw_context *brw = context->driverPrivate;
451 struct gl_texture_object *obj;
452 struct intel_texture_object *iobj;
453 GLuint face = 0;
454
455 obj = _mesa_lookup_texture(&brw->ctx, texture);
456 if (!obj || obj->Target != target) {
457 *error = __DRI_IMAGE_ERROR_BAD_PARAMETER;
458 return NULL;
459 }
460
461 if (target == GL_TEXTURE_CUBE_MAP)
462 face = zoffset;
463
464 _mesa_test_texobj_completeness(&brw->ctx, obj);
465 iobj = intel_texture_object(obj);
466 if (!obj->_BaseComplete || (level > 0 && !obj->_MipmapComplete)) {
467 *error = __DRI_IMAGE_ERROR_BAD_PARAMETER;
468 return NULL;
469 }
470
471 if (level < obj->BaseLevel || level > obj->_MaxLevel) {
472 *error = __DRI_IMAGE_ERROR_BAD_MATCH;
473 return NULL;
474 }
475
476 if (target == GL_TEXTURE_3D && obj->Image[face][level]->Depth < zoffset) {
477 *error = __DRI_IMAGE_ERROR_BAD_MATCH;
478 return NULL;
479 }
480 image = calloc(1, sizeof *image);
481 if (image == NULL) {
482 *error = __DRI_IMAGE_ERROR_BAD_ALLOC;
483 return NULL;
484 }
485
486 image->internal_format = obj->Image[face][level]->InternalFormat;
487 image->format = obj->Image[face][level]->TexFormat;
488 image->data = loaderPrivate;
489 intel_setup_image_from_mipmap_tree(brw, image, iobj->mt, level, zoffset);
490 image->dri_format = driGLFormatToImageFormat(image->format);
491 image->has_depthstencil = iobj->mt->stencil_mt? true : false;
492 if (image->dri_format == MESA_FORMAT_NONE) {
493 *error = __DRI_IMAGE_ERROR_BAD_PARAMETER;
494 free(image);
495 return NULL;
496 }
497
498 *error = __DRI_IMAGE_ERROR_SUCCESS;
499 return image;
500 }
501
502 static void
503 intel_destroy_image(__DRIimage *image)
504 {
505 drm_intel_bo_unreference(image->bo);
506 free(image);
507 }
508
509 static __DRIimage *
510 intel_create_image(__DRIscreen *screen,
511 int width, int height, int format,
512 unsigned int use,
513 void *loaderPrivate)
514 {
515 __DRIimage *image;
516 struct intel_screen *intelScreen = screen->driverPrivate;
517 uint32_t tiling;
518 int cpp;
519 unsigned long pitch;
520
521 tiling = I915_TILING_X;
522 if (use & __DRI_IMAGE_USE_CURSOR) {
523 if (width != 64 || height != 64)
524 return NULL;
525 tiling = I915_TILING_NONE;
526 }
527
528 if (use & __DRI_IMAGE_USE_LINEAR)
529 tiling = I915_TILING_NONE;
530
531 image = intel_allocate_image(format, loaderPrivate);
532 if (image == NULL)
533 return NULL;
534
535
536 cpp = _mesa_get_format_bytes(image->format);
537 image->bo = drm_intel_bo_alloc_tiled(intelScreen->bufmgr, "image",
538 width, height, cpp, &tiling,
539 &pitch, 0);
540 if (image->bo == NULL) {
541 free(image);
542 return NULL;
543 }
544 image->width = width;
545 image->height = height;
546 image->pitch = pitch;
547
548 return image;
549 }
550
551 static GLboolean
552 intel_query_image(__DRIimage *image, int attrib, int *value)
553 {
554 switch (attrib) {
555 case __DRI_IMAGE_ATTRIB_STRIDE:
556 *value = image->pitch;
557 return true;
558 case __DRI_IMAGE_ATTRIB_HANDLE:
559 *value = image->bo->handle;
560 return true;
561 case __DRI_IMAGE_ATTRIB_NAME:
562 return !drm_intel_bo_flink(image->bo, (uint32_t *) value);
563 case __DRI_IMAGE_ATTRIB_FORMAT:
564 *value = image->dri_format;
565 return true;
566 case __DRI_IMAGE_ATTRIB_WIDTH:
567 *value = image->width;
568 return true;
569 case __DRI_IMAGE_ATTRIB_HEIGHT:
570 *value = image->height;
571 return true;
572 case __DRI_IMAGE_ATTRIB_COMPONENTS:
573 if (image->planar_format == NULL)
574 return false;
575 *value = image->planar_format->components;
576 return true;
577 case __DRI_IMAGE_ATTRIB_FD:
578 if (drm_intel_bo_gem_export_to_prime(image->bo, value) == 0)
579 return true;
580 return false;
581 case __DRI_IMAGE_ATTRIB_FOURCC:
582 if (intel_lookup_fourcc(image->dri_format, value))
583 return true;
584 return false;
585 case __DRI_IMAGE_ATTRIB_NUM_PLANES:
586 *value = 1;
587 return true;
588
589 default:
590 return false;
591 }
592 }
593
594 static __DRIimage *
595 intel_dup_image(__DRIimage *orig_image, void *loaderPrivate)
596 {
597 __DRIimage *image;
598
599 image = calloc(1, sizeof *image);
600 if (image == NULL)
601 return NULL;
602
603 drm_intel_bo_reference(orig_image->bo);
604 image->bo = orig_image->bo;
605 image->internal_format = orig_image->internal_format;
606 image->planar_format = orig_image->planar_format;
607 image->dri_format = orig_image->dri_format;
608 image->format = orig_image->format;
609 image->offset = orig_image->offset;
610 image->width = orig_image->width;
611 image->height = orig_image->height;
612 image->pitch = orig_image->pitch;
613 image->tile_x = orig_image->tile_x;
614 image->tile_y = orig_image->tile_y;
615 image->has_depthstencil = orig_image->has_depthstencil;
616 image->data = loaderPrivate;
617
618 memcpy(image->strides, orig_image->strides, sizeof(image->strides));
619 memcpy(image->offsets, orig_image->offsets, sizeof(image->offsets));
620
621 return image;
622 }
623
624 static GLboolean
625 intel_validate_usage(__DRIimage *image, unsigned int use)
626 {
627 if (use & __DRI_IMAGE_USE_CURSOR) {
628 if (image->width != 64 || image->height != 64)
629 return GL_FALSE;
630 }
631
632 return GL_TRUE;
633 }
634
635 static __DRIimage *
636 intel_create_image_from_names(__DRIscreen *screen,
637 int width, int height, int fourcc,
638 int *names, int num_names,
639 int *strides, int *offsets,
640 void *loaderPrivate)
641 {
642 struct intel_image_format *f = NULL;
643 __DRIimage *image;
644 int i, index;
645
646 if (screen == NULL || names == NULL || num_names != 1)
647 return NULL;
648
649 f = intel_image_format_lookup(fourcc);
650 if (f == NULL)
651 return NULL;
652
653 image = intel_create_image_from_name(screen, width, height,
654 __DRI_IMAGE_FORMAT_NONE,
655 names[0], strides[0],
656 loaderPrivate);
657
658 if (image == NULL)
659 return NULL;
660
661 image->planar_format = f;
662 for (i = 0; i < f->nplanes; i++) {
663 index = f->planes[i].buffer_index;
664 image->offsets[index] = offsets[index];
665 image->strides[index] = strides[index];
666 }
667
668 return image;
669 }
670
671 static __DRIimage *
672 intel_create_image_from_fds(__DRIscreen *screen,
673 int width, int height, int fourcc,
674 int *fds, int num_fds, int *strides, int *offsets,
675 void *loaderPrivate)
676 {
677 struct intel_screen *intelScreen = screen->driverPrivate;
678 struct intel_image_format *f;
679 __DRIimage *image;
680 int i, index;
681
682 if (fds == NULL || num_fds != 1)
683 return NULL;
684
685 f = intel_image_format_lookup(fourcc);
686 if (f == NULL)
687 return NULL;
688
689 if (f->nplanes == 1)
690 image = intel_allocate_image(f->planes[0].dri_format, loaderPrivate);
691 else
692 image = intel_allocate_image(__DRI_IMAGE_FORMAT_NONE, loaderPrivate);
693
694 if (image == NULL)
695 return NULL;
696
697 image->bo = drm_intel_bo_gem_create_from_prime(intelScreen->bufmgr,
698 fds[0],
699 height * strides[0]);
700 if (image->bo == NULL) {
701 free(image);
702 return NULL;
703 }
704 image->width = width;
705 image->height = height;
706 image->pitch = strides[0];
707
708 image->planar_format = f;
709 for (i = 0; i < f->nplanes; i++) {
710 index = f->planes[i].buffer_index;
711 image->offsets[index] = offsets[index];
712 image->strides[index] = strides[index];
713 }
714
715 if (f->nplanes == 1) {
716 image->offset = image->offsets[0];
717 intel_image_warn_if_unaligned(image, __func__);
718 }
719
720 return image;
721 }
722
723 static __DRIimage *
724 intel_create_image_from_dma_bufs(__DRIscreen *screen,
725 int width, int height, int fourcc,
726 int *fds, int num_fds,
727 int *strides, int *offsets,
728 enum __DRIYUVColorSpace yuv_color_space,
729 enum __DRISampleRange sample_range,
730 enum __DRIChromaSiting horizontal_siting,
731 enum __DRIChromaSiting vertical_siting,
732 unsigned *error,
733 void *loaderPrivate)
734 {
735 __DRIimage *image;
736 struct intel_image_format *f = intel_image_format_lookup(fourcc);
737
738 /* For now only packed formats that have native sampling are supported. */
739 if (!f || f->nplanes != 1) {
740 *error = __DRI_IMAGE_ERROR_BAD_MATCH;
741 return NULL;
742 }
743
744 image = intel_create_image_from_fds(screen, width, height, fourcc, fds,
745 num_fds, strides, offsets,
746 loaderPrivate);
747
748 /*
749 * Invalid parameters and any inconsistencies between are assumed to be
750 * checked by the caller. Therefore besides unsupported formats one can fail
751 * only in allocation.
752 */
753 if (!image) {
754 *error = __DRI_IMAGE_ERROR_BAD_ALLOC;
755 return NULL;
756 }
757
758 image->dma_buf_imported = true;
759 image->yuv_color_space = yuv_color_space;
760 image->sample_range = sample_range;
761 image->horizontal_siting = horizontal_siting;
762 image->vertical_siting = vertical_siting;
763
764 *error = __DRI_IMAGE_ERROR_SUCCESS;
765 return image;
766 }
767
768 static __DRIimage *
769 intel_from_planar(__DRIimage *parent, int plane, void *loaderPrivate)
770 {
771 int width, height, offset, stride, dri_format, index;
772 struct intel_image_format *f;
773 __DRIimage *image;
774
775 if (parent == NULL || parent->planar_format == NULL)
776 return NULL;
777
778 f = parent->planar_format;
779
780 if (plane >= f->nplanes)
781 return NULL;
782
783 width = parent->width >> f->planes[plane].width_shift;
784 height = parent->height >> f->planes[plane].height_shift;
785 dri_format = f->planes[plane].dri_format;
786 index = f->planes[plane].buffer_index;
787 offset = parent->offsets[index];
788 stride = parent->strides[index];
789
790 image = intel_allocate_image(dri_format, loaderPrivate);
791 if (image == NULL)
792 return NULL;
793
794 if (offset + height * stride > parent->bo->size) {
795 _mesa_warning(NULL, "intel_create_sub_image: subimage out of bounds");
796 free(image);
797 return NULL;
798 }
799
800 image->bo = parent->bo;
801 drm_intel_bo_reference(parent->bo);
802
803 image->width = width;
804 image->height = height;
805 image->pitch = stride;
806 image->offset = offset;
807
808 intel_image_warn_if_unaligned(image, __func__);
809
810 return image;
811 }
812
813 static const __DRIimageExtension intelImageExtension = {
814 .base = { __DRI_IMAGE, 11 },
815
816 .createImageFromName = intel_create_image_from_name,
817 .createImageFromRenderbuffer = intel_create_image_from_renderbuffer,
818 .destroyImage = intel_destroy_image,
819 .createImage = intel_create_image,
820 .queryImage = intel_query_image,
821 .dupImage = intel_dup_image,
822 .validateUsage = intel_validate_usage,
823 .createImageFromNames = intel_create_image_from_names,
824 .fromPlanar = intel_from_planar,
825 .createImageFromTexture = intel_create_image_from_texture,
826 .createImageFromFds = intel_create_image_from_fds,
827 .createImageFromDmaBufs = intel_create_image_from_dma_bufs,
828 .blitImage = NULL,
829 .getCapabilities = NULL
830 };
831
832 static int
833 brw_query_renderer_integer(__DRIscreen *psp, int param, unsigned int *value)
834 {
835 const struct intel_screen *const intelScreen =
836 (struct intel_screen *) psp->driverPrivate;
837
838 switch (param) {
839 case __DRI2_RENDERER_VENDOR_ID:
840 value[0] = 0x8086;
841 return 0;
842 case __DRI2_RENDERER_DEVICE_ID:
843 value[0] = intelScreen->deviceID;
844 return 0;
845 case __DRI2_RENDERER_ACCELERATED:
846 value[0] = 1;
847 return 0;
848 case __DRI2_RENDERER_VIDEO_MEMORY: {
849 /* Once a batch uses more than 75% of the maximum mappable size, we
850 * assume that there's some fragmentation, and we start doing extra
851 * flushing, etc. That's the big cliff apps will care about.
852 */
853 size_t aper_size;
854 size_t mappable_size;
855
856 drm_intel_get_aperture_sizes(psp->fd, &mappable_size, &aper_size);
857
858 const unsigned gpu_mappable_megabytes =
859 (aper_size / (1024 * 1024)) * 3 / 4;
860
861 const long system_memory_pages = sysconf(_SC_PHYS_PAGES);
862 const long system_page_size = sysconf(_SC_PAGE_SIZE);
863
864 if (system_memory_pages <= 0 || system_page_size <= 0)
865 return -1;
866
867 const uint64_t system_memory_bytes = (uint64_t) system_memory_pages
868 * (uint64_t) system_page_size;
869
870 const unsigned system_memory_megabytes =
871 (unsigned) (system_memory_bytes / (1024 * 1024));
872
873 value[0] = MIN2(system_memory_megabytes, gpu_mappable_megabytes);
874 return 0;
875 }
876 case __DRI2_RENDERER_UNIFIED_MEMORY_ARCHITECTURE:
877 value[0] = 1;
878 return 0;
879 default:
880 return driQueryRendererIntegerCommon(psp, param, value);
881 }
882
883 return -1;
884 }
885
886 static int
887 brw_query_renderer_string(__DRIscreen *psp, int param, const char **value)
888 {
889 const struct intel_screen *intelScreen =
890 (struct intel_screen *) psp->driverPrivate;
891
892 switch (param) {
893 case __DRI2_RENDERER_VENDOR_ID:
894 value[0] = brw_vendor_string;
895 return 0;
896 case __DRI2_RENDERER_DEVICE_ID:
897 value[0] = brw_get_renderer_string(intelScreen->deviceID);
898 return 0;
899 default:
900 break;
901 }
902
903 return -1;
904 }
905
906 static const __DRI2rendererQueryExtension intelRendererQueryExtension = {
907 .base = { __DRI2_RENDERER_QUERY, 1 },
908
909 .queryInteger = brw_query_renderer_integer,
910 .queryString = brw_query_renderer_string
911 };
912
913 static const __DRIrobustnessExtension dri2Robustness = {
914 .base = { __DRI2_ROBUSTNESS, 1 }
915 };
916
917 static const __DRIextension *intelScreenExtensions[] = {
918 &intelTexBufferExtension.base,
919 &intelFenceExtension.base,
920 &intelFlushExtension.base,
921 &intelImageExtension.base,
922 &intelRendererQueryExtension.base,
923 &dri2ConfigQueryExtension.base,
924 NULL
925 };
926
927 static const __DRIextension *intelRobustScreenExtensions[] = {
928 &intelTexBufferExtension.base,
929 &intelFenceExtension.base,
930 &intelFlushExtension.base,
931 &intelImageExtension.base,
932 &intelRendererQueryExtension.base,
933 &dri2ConfigQueryExtension.base,
934 &dri2Robustness.base,
935 NULL
936 };
937
938 static bool
939 intel_get_param(__DRIscreen *psp, int param, int *value)
940 {
941 int ret;
942 struct drm_i915_getparam gp;
943
944 memset(&gp, 0, sizeof(gp));
945 gp.param = param;
946 gp.value = value;
947
948 ret = drmCommandWriteRead(psp->fd, DRM_I915_GETPARAM, &gp, sizeof(gp));
949 if (ret) {
950 if (ret != -EINVAL)
951 _mesa_warning(NULL, "drm_i915_getparam: %d", ret);
952 return false;
953 }
954
955 return true;
956 }
957
958 static bool
959 intel_get_boolean(__DRIscreen *psp, int param)
960 {
961 int value = 0;
962 return intel_get_param(psp, param, &value) && value;
963 }
964
965 static void
966 intelDestroyScreen(__DRIscreen * sPriv)
967 {
968 struct intel_screen *intelScreen = sPriv->driverPrivate;
969
970 dri_bufmgr_destroy(intelScreen->bufmgr);
971 driDestroyOptionInfo(&intelScreen->optionCache);
972
973 ralloc_free(intelScreen);
974 sPriv->driverPrivate = NULL;
975 }
976
977
978 /**
979 * This is called when we need to set up GL rendering to a new X window.
980 */
981 static GLboolean
982 intelCreateBuffer(__DRIscreen * driScrnPriv,
983 __DRIdrawable * driDrawPriv,
984 const struct gl_config * mesaVis, GLboolean isPixmap)
985 {
986 struct intel_renderbuffer *rb;
987 struct intel_screen *screen = (struct intel_screen*) driScrnPriv->driverPrivate;
988 mesa_format rgbFormat;
989 unsigned num_samples = intel_quantize_num_samples(screen, mesaVis->samples);
990 struct gl_framebuffer *fb;
991
992 if (isPixmap)
993 return false;
994
995 fb = CALLOC_STRUCT(gl_framebuffer);
996 if (!fb)
997 return false;
998
999 _mesa_initialize_window_framebuffer(fb, mesaVis);
1000
1001 if (screen->winsys_msaa_samples_override != -1) {
1002 num_samples = screen->winsys_msaa_samples_override;
1003 fb->Visual.samples = num_samples;
1004 }
1005
1006 if (mesaVis->redBits == 5)
1007 rgbFormat = MESA_FORMAT_B5G6R5_UNORM;
1008 else if (mesaVis->sRGBCapable)
1009 rgbFormat = MESA_FORMAT_B8G8R8A8_SRGB;
1010 else if (mesaVis->alphaBits == 0)
1011 rgbFormat = MESA_FORMAT_B8G8R8X8_UNORM;
1012 else {
1013 rgbFormat = MESA_FORMAT_B8G8R8A8_SRGB;
1014 fb->Visual.sRGBCapable = true;
1015 }
1016
1017 /* setup the hardware-based renderbuffers */
1018 rb = intel_create_renderbuffer(rgbFormat, num_samples);
1019 _mesa_add_renderbuffer(fb, BUFFER_FRONT_LEFT, &rb->Base.Base);
1020
1021 if (mesaVis->doubleBufferMode) {
1022 rb = intel_create_renderbuffer(rgbFormat, num_samples);
1023 _mesa_add_renderbuffer(fb, BUFFER_BACK_LEFT, &rb->Base.Base);
1024 }
1025
1026 /*
1027 * Assert here that the gl_config has an expected depth/stencil bit
1028 * combination: one of d24/s8, d16/s0, d0/s0. (See intelInitScreen2(),
1029 * which constructs the advertised configs.)
1030 */
1031 if (mesaVis->depthBits == 24) {
1032 assert(mesaVis->stencilBits == 8);
1033
1034 if (screen->devinfo->has_hiz_and_separate_stencil) {
1035 rb = intel_create_private_renderbuffer(MESA_FORMAT_Z24_UNORM_X8_UINT,
1036 num_samples);
1037 _mesa_add_renderbuffer(fb, BUFFER_DEPTH, &rb->Base.Base);
1038 rb = intel_create_private_renderbuffer(MESA_FORMAT_S_UINT8,
1039 num_samples);
1040 _mesa_add_renderbuffer(fb, BUFFER_STENCIL, &rb->Base.Base);
1041 } else {
1042 /*
1043 * Use combined depth/stencil. Note that the renderbuffer is
1044 * attached to two attachment points.
1045 */
1046 rb = intel_create_private_renderbuffer(MESA_FORMAT_Z24_UNORM_S8_UINT,
1047 num_samples);
1048 _mesa_add_renderbuffer(fb, BUFFER_DEPTH, &rb->Base.Base);
1049 _mesa_add_renderbuffer(fb, BUFFER_STENCIL, &rb->Base.Base);
1050 }
1051 }
1052 else if (mesaVis->depthBits == 16) {
1053 assert(mesaVis->stencilBits == 0);
1054 rb = intel_create_private_renderbuffer(MESA_FORMAT_Z_UNORM16,
1055 num_samples);
1056 _mesa_add_renderbuffer(fb, BUFFER_DEPTH, &rb->Base.Base);
1057 }
1058 else {
1059 assert(mesaVis->depthBits == 0);
1060 assert(mesaVis->stencilBits == 0);
1061 }
1062
1063 /* now add any/all software-based renderbuffers we may need */
1064 _swrast_add_soft_renderbuffers(fb,
1065 false, /* never sw color */
1066 false, /* never sw depth */
1067 false, /* never sw stencil */
1068 mesaVis->accumRedBits > 0,
1069 false, /* never sw alpha */
1070 false /* never sw aux */ );
1071 driDrawPriv->driverPrivate = fb;
1072
1073 return true;
1074 }
1075
1076 static void
1077 intelDestroyBuffer(__DRIdrawable * driDrawPriv)
1078 {
1079 struct gl_framebuffer *fb = driDrawPriv->driverPrivate;
1080
1081 _mesa_reference_framebuffer(&fb, NULL);
1082 }
1083
1084 static bool
1085 intel_init_bufmgr(struct intel_screen *intelScreen)
1086 {
1087 __DRIscreen *spriv = intelScreen->driScrnPriv;
1088
1089 intelScreen->no_hw = getenv("INTEL_NO_HW") != NULL;
1090
1091 intelScreen->bufmgr = intel_bufmgr_gem_init(spriv->fd, BATCH_SZ);
1092 if (intelScreen->bufmgr == NULL) {
1093 fprintf(stderr, "[%s:%u] Error initializing buffer manager.\n",
1094 __func__, __LINE__);
1095 return false;
1096 }
1097
1098 drm_intel_bufmgr_gem_enable_fenced_relocs(intelScreen->bufmgr);
1099
1100 if (!intel_get_boolean(spriv, I915_PARAM_HAS_RELAXED_DELTA)) {
1101 fprintf(stderr, "[%s: %u] Kernel 2.6.39 required.\n", __func__, __LINE__);
1102 return false;
1103 }
1104
1105 return true;
1106 }
1107
1108 static bool
1109 intel_detect_swizzling(struct intel_screen *screen)
1110 {
1111 drm_intel_bo *buffer;
1112 unsigned long flags = 0;
1113 unsigned long aligned_pitch;
1114 uint32_t tiling = I915_TILING_X;
1115 uint32_t swizzle_mode = 0;
1116
1117 buffer = drm_intel_bo_alloc_tiled(screen->bufmgr, "swizzle test",
1118 64, 64, 4,
1119 &tiling, &aligned_pitch, flags);
1120 if (buffer == NULL)
1121 return false;
1122
1123 drm_intel_bo_get_tiling(buffer, &tiling, &swizzle_mode);
1124 drm_intel_bo_unreference(buffer);
1125
1126 if (swizzle_mode == I915_BIT_6_SWIZZLE_NONE)
1127 return false;
1128 else
1129 return true;
1130 }
1131
1132 static int
1133 intel_detect_timestamp(struct intel_screen *screen)
1134 {
1135 uint64_t dummy = 0, last = 0;
1136 int upper, lower, loops;
1137
1138 /* On 64bit systems, some old kernels trigger a hw bug resulting in the
1139 * TIMESTAMP register being shifted and the low 32bits always zero.
1140 *
1141 * More recent kernels offer an interface to read the full 36bits
1142 * everywhere.
1143 */
1144 if (drm_intel_reg_read(screen->bufmgr, TIMESTAMP | 1, &dummy) == 0)
1145 return 3;
1146
1147 /* Determine if we have a 32bit or 64bit kernel by inspecting the
1148 * upper 32bits for a rapidly changing timestamp.
1149 */
1150 if (drm_intel_reg_read(screen->bufmgr, TIMESTAMP, &last))
1151 return 0;
1152
1153 upper = lower = 0;
1154 for (loops = 0; loops < 10; loops++) {
1155 /* The TIMESTAMP should change every 80ns, so several round trips
1156 * through the kernel should be enough to advance it.
1157 */
1158 if (drm_intel_reg_read(screen->bufmgr, TIMESTAMP, &dummy))
1159 return 0;
1160
1161 upper += (dummy >> 32) != (last >> 32);
1162 if (upper > 1) /* beware 32bit counter overflow */
1163 return 2; /* upper dword holds the low 32bits of the timestamp */
1164
1165 lower += (dummy & 0xffffffff) != (last & 0xffffffff);
1166 if (lower > 1)
1167 return 1; /* timestamp is unshifted */
1168
1169 last = dummy;
1170 }
1171
1172 /* No advancement? No timestamp! */
1173 return 0;
1174 }
1175
1176 /**
1177 * Return array of MSAA modes supported by the hardware. The array is
1178 * zero-terminated and sorted in decreasing order.
1179 */
1180 const int*
1181 intel_supported_msaa_modes(const struct intel_screen *screen)
1182 {
1183 static const int gen8_modes[] = {8, 4, 2, 0, -1};
1184 static const int gen7_modes[] = {8, 4, 0, -1};
1185 static const int gen6_modes[] = {4, 0, -1};
1186 static const int gen4_modes[] = {0, -1};
1187
1188 if (screen->devinfo->gen >= 8) {
1189 return gen8_modes;
1190 } else if (screen->devinfo->gen >= 7) {
1191 return gen7_modes;
1192 } else if (screen->devinfo->gen == 6) {
1193 return gen6_modes;
1194 } else {
1195 return gen4_modes;
1196 }
1197 }
1198
1199 static __DRIconfig**
1200 intel_screen_make_configs(__DRIscreen *dri_screen)
1201 {
1202 static const mesa_format formats[] = {
1203 MESA_FORMAT_B5G6R5_UNORM,
1204 MESA_FORMAT_B8G8R8A8_UNORM,
1205 MESA_FORMAT_B8G8R8X8_UNORM
1206 };
1207
1208 /* GLX_SWAP_COPY_OML is not supported due to page flipping. */
1209 static const GLenum back_buffer_modes[] = {
1210 GLX_SWAP_UNDEFINED_OML, GLX_NONE,
1211 };
1212
1213 static const uint8_t singlesample_samples[1] = {0};
1214 static const uint8_t multisample_samples[2] = {4, 8};
1215
1216 struct intel_screen *screen = dri_screen->driverPrivate;
1217 const struct brw_device_info *devinfo = screen->devinfo;
1218 uint8_t depth_bits[4], stencil_bits[4];
1219 __DRIconfig **configs = NULL;
1220
1221 /* Generate singlesample configs without accumulation buffer. */
1222 for (int i = 0; i < ARRAY_SIZE(formats); i++) {
1223 __DRIconfig **new_configs;
1224 int num_depth_stencil_bits = 2;
1225
1226 /* Starting with DRI2 protocol version 1.1 we can request a depth/stencil
1227 * buffer that has a different number of bits per pixel than the color
1228 * buffer, gen >= 6 supports this.
1229 */
1230 depth_bits[0] = 0;
1231 stencil_bits[0] = 0;
1232
1233 if (formats[i] == MESA_FORMAT_B5G6R5_UNORM) {
1234 depth_bits[1] = 16;
1235 stencil_bits[1] = 0;
1236 if (devinfo->gen >= 6) {
1237 depth_bits[2] = 24;
1238 stencil_bits[2] = 8;
1239 num_depth_stencil_bits = 3;
1240 }
1241 } else {
1242 depth_bits[1] = 24;
1243 stencil_bits[1] = 8;
1244 }
1245
1246 new_configs = driCreateConfigs(formats[i],
1247 depth_bits,
1248 stencil_bits,
1249 num_depth_stencil_bits,
1250 back_buffer_modes, 2,
1251 singlesample_samples, 1,
1252 false);
1253 configs = driConcatConfigs(configs, new_configs);
1254 }
1255
1256 /* Generate the minimum possible set of configs that include an
1257 * accumulation buffer.
1258 */
1259 for (int i = 0; i < ARRAY_SIZE(formats); i++) {
1260 __DRIconfig **new_configs;
1261
1262 if (formats[i] == MESA_FORMAT_B5G6R5_UNORM) {
1263 depth_bits[0] = 16;
1264 stencil_bits[0] = 0;
1265 } else {
1266 depth_bits[0] = 24;
1267 stencil_bits[0] = 8;
1268 }
1269
1270 new_configs = driCreateConfigs(formats[i],
1271 depth_bits, stencil_bits, 1,
1272 back_buffer_modes, 1,
1273 singlesample_samples, 1,
1274 true);
1275 configs = driConcatConfigs(configs, new_configs);
1276 }
1277
1278 /* Generate multisample configs.
1279 *
1280 * This loop breaks early, and hence is a no-op, on gen < 6.
1281 *
1282 * Multisample configs must follow the singlesample configs in order to
1283 * work around an X server bug present in 1.12. The X server chooses to
1284 * associate the first listed RGBA888-Z24S8 config, regardless of its
1285 * sample count, with the 32-bit depth visual used for compositing.
1286 *
1287 * Only doublebuffer configs with GLX_SWAP_UNDEFINED_OML behavior are
1288 * supported. Singlebuffer configs are not supported because no one wants
1289 * them.
1290 */
1291 for (int i = 0; i < ARRAY_SIZE(formats); i++) {
1292 if (devinfo->gen < 6)
1293 break;
1294
1295 __DRIconfig **new_configs;
1296 const int num_depth_stencil_bits = 2;
1297 int num_msaa_modes = 0;
1298
1299 depth_bits[0] = 0;
1300 stencil_bits[0] = 0;
1301
1302 if (formats[i] == MESA_FORMAT_B5G6R5_UNORM) {
1303 depth_bits[1] = 16;
1304 stencil_bits[1] = 0;
1305 } else {
1306 depth_bits[1] = 24;
1307 stencil_bits[1] = 8;
1308 }
1309
1310 if (devinfo->gen >= 7)
1311 num_msaa_modes = 2;
1312 else if (devinfo->gen == 6)
1313 num_msaa_modes = 1;
1314
1315 new_configs = driCreateConfigs(formats[i],
1316 depth_bits,
1317 stencil_bits,
1318 num_depth_stencil_bits,
1319 back_buffer_modes, 1,
1320 multisample_samples,
1321 num_msaa_modes,
1322 false);
1323 configs = driConcatConfigs(configs, new_configs);
1324 }
1325
1326 if (configs == NULL) {
1327 fprintf(stderr, "[%s:%u] Error creating FBConfig!\n", __func__,
1328 __LINE__);
1329 return NULL;
1330 }
1331
1332 return configs;
1333 }
1334
1335 static void
1336 set_max_gl_versions(struct intel_screen *screen)
1337 {
1338 __DRIscreen *psp = screen->driScrnPriv;
1339
1340 switch (screen->devinfo->gen) {
1341 case 9:
1342 case 8:
1343 case 7:
1344 case 6:
1345 psp->max_gl_core_version = 33;
1346 psp->max_gl_compat_version = 30;
1347 psp->max_gl_es1_version = 11;
1348 psp->max_gl_es2_version = 30;
1349 break;
1350 case 5:
1351 case 4:
1352 psp->max_gl_core_version = 0;
1353 psp->max_gl_compat_version = 21;
1354 psp->max_gl_es1_version = 11;
1355 psp->max_gl_es2_version = 20;
1356 break;
1357 default:
1358 unreachable("unrecognized intel_screen::gen");
1359 }
1360 }
1361
1362 static int
1363 brw_get_revision(int fd)
1364 {
1365 struct drm_i915_getparam gp;
1366 int revision;
1367 int ret;
1368
1369 memset(&gp, 0, sizeof(gp));
1370 gp.param = I915_PARAM_REVISION;
1371 gp.value = &revision;
1372
1373 ret = drmCommandWriteRead(fd, DRM_I915_GETPARAM, &gp, sizeof(gp));
1374 if (ret)
1375 revision = -1;
1376
1377 return revision;
1378 }
1379
1380 /* Drop when RS headers get pulled to libdrm */
1381 #ifndef I915_PARAM_HAS_RESOURCE_STREAMER
1382 #define I915_PARAM_HAS_RESOURCE_STREAMER 36
1383 #endif
1384
1385 /**
1386 * This is the driver specific part of the createNewScreen entry point.
1387 * Called when using DRI2.
1388 *
1389 * \return the struct gl_config supported by this driver
1390 */
1391 static const
1392 __DRIconfig **intelInitScreen2(__DRIscreen *psp)
1393 {
1394 struct intel_screen *intelScreen;
1395
1396 if (psp->image.loader) {
1397 } else if (psp->dri2.loader->base.version <= 2 ||
1398 psp->dri2.loader->getBuffersWithFormat == NULL) {
1399 fprintf(stderr,
1400 "\nERROR! DRI2 loader with getBuffersWithFormat() "
1401 "support required\n");
1402 return false;
1403 }
1404
1405 /* Allocate the private area */
1406 intelScreen = rzalloc(NULL, struct intel_screen);
1407 if (!intelScreen) {
1408 fprintf(stderr, "\nERROR! Allocating private area failed\n");
1409 return false;
1410 }
1411 /* parse information in __driConfigOptions */
1412 driParseOptionInfo(&intelScreen->optionCache, brw_config_options.xml);
1413
1414 intelScreen->driScrnPriv = psp;
1415 psp->driverPrivate = (void *) intelScreen;
1416
1417 if (!intel_init_bufmgr(intelScreen))
1418 return false;
1419
1420 intelScreen->deviceID = drm_intel_bufmgr_gem_get_devid(intelScreen->bufmgr);
1421 intelScreen->devinfo = brw_get_device_info(intelScreen->deviceID,
1422 brw_get_revision(psp->fd));
1423 if (!intelScreen->devinfo)
1424 return false;
1425
1426 brw_process_intel_debug_variable(intelScreen);
1427
1428 intelScreen->hw_must_use_separate_stencil = intelScreen->devinfo->gen >= 7;
1429
1430 intelScreen->hw_has_swizzling = intel_detect_swizzling(intelScreen);
1431 intelScreen->hw_has_timestamp = intel_detect_timestamp(intelScreen);
1432
1433 const char *force_msaa = getenv("INTEL_FORCE_MSAA");
1434 if (force_msaa) {
1435 intelScreen->winsys_msaa_samples_override =
1436 intel_quantize_num_samples(intelScreen, atoi(force_msaa));
1437 printf("Forcing winsys sample count to %d\n",
1438 intelScreen->winsys_msaa_samples_override);
1439 } else {
1440 intelScreen->winsys_msaa_samples_override = -1;
1441 }
1442
1443 set_max_gl_versions(intelScreen);
1444
1445 /* Notification of GPU resets requires hardware contexts and a kernel new
1446 * enough to support DRM_IOCTL_I915_GET_RESET_STATS. If the ioctl is
1447 * supported, calling it with a context of 0 will either generate EPERM or
1448 * no error. If the ioctl is not supported, it always generate EINVAL.
1449 * Use this to determine whether to advertise the __DRI2_ROBUSTNESS
1450 * extension to the loader.
1451 *
1452 * Don't even try on pre-Gen6, since we don't attempt to use contexts there.
1453 */
1454 if (intelScreen->devinfo->gen >= 6) {
1455 struct drm_i915_reset_stats stats;
1456 memset(&stats, 0, sizeof(stats));
1457
1458 const int ret = drmIoctl(psp->fd, DRM_IOCTL_I915_GET_RESET_STATS, &stats);
1459
1460 intelScreen->has_context_reset_notification =
1461 (ret != -1 || errno != EINVAL);
1462 }
1463
1464 struct drm_i915_getparam getparam;
1465 getparam.param = I915_PARAM_CMD_PARSER_VERSION;
1466 getparam.value = &intelScreen->cmd_parser_version;
1467 const int ret = drmIoctl(psp->fd, DRM_IOCTL_I915_GETPARAM, &getparam);
1468 if (ret == -1)
1469 intelScreen->cmd_parser_version = 0;
1470
1471 psp->extensions = !intelScreen->has_context_reset_notification
1472 ? intelScreenExtensions : intelRobustScreenExtensions;
1473
1474 intelScreen->compiler = brw_compiler_create(intelScreen,
1475 intelScreen->devinfo);
1476
1477 if (intelScreen->devinfo->has_resource_streamer) {
1478 int val = -1;
1479 getparam.param = I915_PARAM_HAS_RESOURCE_STREAMER;
1480 getparam.value = &val;
1481
1482 drmIoctl(psp->fd, DRM_IOCTL_I915_GETPARAM, &getparam);
1483 intelScreen->has_resource_streamer = val > 0;
1484 }
1485
1486 return (const __DRIconfig**) intel_screen_make_configs(psp);
1487 }
1488
1489 struct intel_buffer {
1490 __DRIbuffer base;
1491 drm_intel_bo *bo;
1492 };
1493
1494 static __DRIbuffer *
1495 intelAllocateBuffer(__DRIscreen *screen,
1496 unsigned attachment, unsigned format,
1497 int width, int height)
1498 {
1499 struct intel_buffer *intelBuffer;
1500 struct intel_screen *intelScreen = screen->driverPrivate;
1501
1502 assert(attachment == __DRI_BUFFER_FRONT_LEFT ||
1503 attachment == __DRI_BUFFER_BACK_LEFT);
1504
1505 intelBuffer = calloc(1, sizeof *intelBuffer);
1506 if (intelBuffer == NULL)
1507 return NULL;
1508
1509 /* The front and back buffers are color buffers, which are X tiled. */
1510 uint32_t tiling = I915_TILING_X;
1511 unsigned long pitch;
1512 int cpp = format / 8;
1513 intelBuffer->bo = drm_intel_bo_alloc_tiled(intelScreen->bufmgr,
1514 "intelAllocateBuffer",
1515 width,
1516 height,
1517 cpp,
1518 &tiling, &pitch,
1519 BO_ALLOC_FOR_RENDER);
1520
1521 if (intelBuffer->bo == NULL) {
1522 free(intelBuffer);
1523 return NULL;
1524 }
1525
1526 drm_intel_bo_flink(intelBuffer->bo, &intelBuffer->base.name);
1527
1528 intelBuffer->base.attachment = attachment;
1529 intelBuffer->base.cpp = cpp;
1530 intelBuffer->base.pitch = pitch;
1531
1532 return &intelBuffer->base;
1533 }
1534
1535 static void
1536 intelReleaseBuffer(__DRIscreen *screen, __DRIbuffer *buffer)
1537 {
1538 struct intel_buffer *intelBuffer = (struct intel_buffer *) buffer;
1539
1540 drm_intel_bo_unreference(intelBuffer->bo);
1541 free(intelBuffer);
1542 }
1543
1544 static const struct __DriverAPIRec brw_driver_api = {
1545 .InitScreen = intelInitScreen2,
1546 .DestroyScreen = intelDestroyScreen,
1547 .CreateContext = brwCreateContext,
1548 .DestroyContext = intelDestroyContext,
1549 .CreateBuffer = intelCreateBuffer,
1550 .DestroyBuffer = intelDestroyBuffer,
1551 .MakeCurrent = intelMakeCurrent,
1552 .UnbindContext = intelUnbindContext,
1553 .AllocateBuffer = intelAllocateBuffer,
1554 .ReleaseBuffer = intelReleaseBuffer
1555 };
1556
1557 static const struct __DRIDriverVtableExtensionRec brw_vtable = {
1558 .base = { __DRI_DRIVER_VTABLE, 1 },
1559 .vtable = &brw_driver_api,
1560 };
1561
1562 static const __DRIextension *brw_driver_extensions[] = {
1563 &driCoreExtension.base,
1564 &driImageDriverExtension.base,
1565 &driDRI2Extension.base,
1566 &brw_vtable.base,
1567 &brw_config_options.base,
1568 NULL
1569 };
1570
1571 PUBLIC const __DRIextension **__driDriverGetExtensions_i965(void)
1572 {
1573 globalDriverAPI = &brw_driver_api;
1574
1575 return brw_driver_extensions;
1576 }