Merge remote-tracking branch 'mesa-public/master' into vulkan
[mesa.git] / src / mesa / drivers / dri / i965 / intel_screen.c
1 /**************************************************************************
2 *
3 * Copyright 2003 VMware, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 #include <errno.h>
29 #include <time.h>
30 #include <unistd.h>
31 #include "main/glheader.h"
32 #include "main/context.h"
33 #include "main/framebuffer.h"
34 #include "main/renderbuffer.h"
35 #include "main/texobj.h"
36 #include "main/hash.h"
37 #include "main/fbobject.h"
38 #include "main/version.h"
39 #include "swrast/s_renderbuffer.h"
40 #include "util/ralloc.h"
41 #include "brw_shader.h"
42 #include "glsl/nir/nir.h"
43
44 #include "utils.h"
45 #include "xmlpool.h"
46
47 static const __DRIconfigOptionsExtension brw_config_options = {
48 .base = { __DRI_CONFIG_OPTIONS, 1 },
49 .xml =
50 DRI_CONF_BEGIN
51 DRI_CONF_SECTION_PERFORMANCE
52 DRI_CONF_VBLANK_MODE(DRI_CONF_VBLANK_ALWAYS_SYNC)
53 /* Options correspond to DRI_CONF_BO_REUSE_DISABLED,
54 * DRI_CONF_BO_REUSE_ALL
55 */
56 DRI_CONF_OPT_BEGIN_V(bo_reuse, enum, 1, "0:1")
57 DRI_CONF_DESC_BEGIN(en, "Buffer object reuse")
58 DRI_CONF_ENUM(0, "Disable buffer object reuse")
59 DRI_CONF_ENUM(1, "Enable reuse of all sizes of buffer objects")
60 DRI_CONF_DESC_END
61 DRI_CONF_OPT_END
62
63 DRI_CONF_OPT_BEGIN_B(hiz, "true")
64 DRI_CONF_DESC(en, "Enable Hierarchical Z on gen6+")
65 DRI_CONF_OPT_END
66 DRI_CONF_SECTION_END
67
68 DRI_CONF_SECTION_QUALITY
69 DRI_CONF_FORCE_S3TC_ENABLE("false")
70
71 DRI_CONF_OPT_BEGIN(clamp_max_samples, int, -1)
72 DRI_CONF_DESC(en, "Clamp the value of GL_MAX_SAMPLES to the "
73 "given integer. If negative, then do not clamp.")
74 DRI_CONF_OPT_END
75 DRI_CONF_SECTION_END
76
77 DRI_CONF_SECTION_DEBUG
78 DRI_CONF_NO_RAST("false")
79 DRI_CONF_ALWAYS_FLUSH_BATCH("false")
80 DRI_CONF_ALWAYS_FLUSH_CACHE("false")
81 DRI_CONF_DISABLE_THROTTLING("false")
82 DRI_CONF_FORCE_GLSL_EXTENSIONS_WARN("false")
83 DRI_CONF_DISABLE_GLSL_LINE_CONTINUATIONS("false")
84 DRI_CONF_DISABLE_BLEND_FUNC_EXTENDED("false")
85 DRI_CONF_ALLOW_GLSL_EXTENSION_DIRECTIVE_MIDSHADER("false")
86
87 DRI_CONF_OPT_BEGIN_B(shader_precompile, "true")
88 DRI_CONF_DESC(en, "Perform code generation at shader link time.")
89 DRI_CONF_OPT_END
90 DRI_CONF_SECTION_END
91 DRI_CONF_END
92 };
93
94 #include "intel_batchbuffer.h"
95 #include "intel_buffers.h"
96 #include "intel_bufmgr.h"
97 #include "intel_fbo.h"
98 #include "intel_mipmap_tree.h"
99 #include "intel_screen.h"
100 #include "intel_tex.h"
101 #include "intel_image.h"
102
103 #include "brw_context.h"
104
105 #include "i915_drm.h"
106
107 /**
108 * For debugging purposes, this returns a time in seconds.
109 */
110 double
111 get_time(void)
112 {
113 struct timespec tp;
114
115 clock_gettime(CLOCK_MONOTONIC, &tp);
116
117 return tp.tv_sec + tp.tv_nsec / 1000000000.0;
118 }
119
120 void
121 aub_dump_bmp(struct gl_context *ctx)
122 {
123 struct gl_framebuffer *fb = ctx->DrawBuffer;
124
125 for (int i = 0; i < fb->_NumColorDrawBuffers; i++) {
126 struct intel_renderbuffer *irb =
127 intel_renderbuffer(fb->_ColorDrawBuffers[i]);
128
129 if (irb && irb->mt) {
130 enum aub_dump_bmp_format format;
131
132 switch (irb->Base.Base.Format) {
133 case MESA_FORMAT_B8G8R8A8_UNORM:
134 case MESA_FORMAT_B8G8R8X8_UNORM:
135 format = AUB_DUMP_BMP_FORMAT_ARGB_8888;
136 break;
137 default:
138 continue;
139 }
140
141 drm_intel_gem_bo_aub_dump_bmp(irb->mt->bo,
142 irb->draw_x,
143 irb->draw_y,
144 irb->Base.Base.Width,
145 irb->Base.Base.Height,
146 format,
147 irb->mt->pitch,
148 0);
149 }
150 }
151 }
152
153 static const __DRItexBufferExtension intelTexBufferExtension = {
154 .base = { __DRI_TEX_BUFFER, 3 },
155
156 .setTexBuffer = intelSetTexBuffer,
157 .setTexBuffer2 = intelSetTexBuffer2,
158 .releaseTexBuffer = NULL,
159 };
160
161 static void
162 intel_dri2_flush_with_flags(__DRIcontext *cPriv,
163 __DRIdrawable *dPriv,
164 unsigned flags,
165 enum __DRI2throttleReason reason)
166 {
167 struct brw_context *brw = cPriv->driverPrivate;
168
169 if (!brw)
170 return;
171
172 struct gl_context *ctx = &brw->ctx;
173
174 FLUSH_VERTICES(ctx, 0);
175
176 if (flags & __DRI2_FLUSH_DRAWABLE)
177 intel_resolve_for_dri2_flush(brw, dPriv);
178
179 if (reason == __DRI2_THROTTLE_SWAPBUFFER)
180 brw->need_swap_throttle = true;
181 if (reason == __DRI2_THROTTLE_FLUSHFRONT)
182 brw->need_flush_throttle = true;
183
184 intel_batchbuffer_flush(brw);
185
186 if (INTEL_DEBUG & DEBUG_AUB) {
187 aub_dump_bmp(ctx);
188 }
189 }
190
191 /**
192 * Provides compatibility with loaders that only support the older (version
193 * 1-3) flush interface.
194 *
195 * That includes libGL up to Mesa 9.0, and the X Server at least up to 1.13.
196 */
197 static void
198 intel_dri2_flush(__DRIdrawable *drawable)
199 {
200 intel_dri2_flush_with_flags(drawable->driContextPriv, drawable,
201 __DRI2_FLUSH_DRAWABLE,
202 __DRI2_THROTTLE_SWAPBUFFER);
203 }
204
205 static const struct __DRI2flushExtensionRec intelFlushExtension = {
206 .base = { __DRI2_FLUSH, 4 },
207
208 .flush = intel_dri2_flush,
209 .invalidate = dri2InvalidateDrawable,
210 .flush_with_flags = intel_dri2_flush_with_flags,
211 };
212
213 static struct intel_image_format intel_image_formats[] = {
214 { __DRI_IMAGE_FOURCC_ARGB8888, __DRI_IMAGE_COMPONENTS_RGBA, 1,
215 { { 0, 0, 0, __DRI_IMAGE_FORMAT_ARGB8888, 4 } } },
216
217 { __DRI_IMAGE_FOURCC_ABGR8888, __DRI_IMAGE_COMPONENTS_RGBA, 1,
218 { { 0, 0, 0, __DRI_IMAGE_FORMAT_ABGR8888, 4 } } },
219
220 { __DRI_IMAGE_FOURCC_SARGB8888, __DRI_IMAGE_COMPONENTS_RGBA, 1,
221 { { 0, 0, 0, __DRI_IMAGE_FORMAT_SARGB8, 4 } } },
222
223 { __DRI_IMAGE_FOURCC_XRGB8888, __DRI_IMAGE_COMPONENTS_RGB, 1,
224 { { 0, 0, 0, __DRI_IMAGE_FORMAT_XRGB8888, 4 }, } },
225
226 { __DRI_IMAGE_FOURCC_XBGR8888, __DRI_IMAGE_COMPONENTS_RGB, 1,
227 { { 0, 0, 0, __DRI_IMAGE_FORMAT_XBGR8888, 4 }, } },
228
229 { __DRI_IMAGE_FOURCC_RGB565, __DRI_IMAGE_COMPONENTS_RGB, 1,
230 { { 0, 0, 0, __DRI_IMAGE_FORMAT_RGB565, 2 } } },
231
232 { __DRI_IMAGE_FOURCC_YUV410, __DRI_IMAGE_COMPONENTS_Y_U_V, 3,
233 { { 0, 0, 0, __DRI_IMAGE_FORMAT_R8, 1 },
234 { 1, 2, 2, __DRI_IMAGE_FORMAT_R8, 1 },
235 { 2, 2, 2, __DRI_IMAGE_FORMAT_R8, 1 } } },
236
237 { __DRI_IMAGE_FOURCC_YUV411, __DRI_IMAGE_COMPONENTS_Y_U_V, 3,
238 { { 0, 0, 0, __DRI_IMAGE_FORMAT_R8, 1 },
239 { 1, 2, 0, __DRI_IMAGE_FORMAT_R8, 1 },
240 { 2, 2, 0, __DRI_IMAGE_FORMAT_R8, 1 } } },
241
242 { __DRI_IMAGE_FOURCC_YUV420, __DRI_IMAGE_COMPONENTS_Y_U_V, 3,
243 { { 0, 0, 0, __DRI_IMAGE_FORMAT_R8, 1 },
244 { 1, 1, 1, __DRI_IMAGE_FORMAT_R8, 1 },
245 { 2, 1, 1, __DRI_IMAGE_FORMAT_R8, 1 } } },
246
247 { __DRI_IMAGE_FOURCC_YUV422, __DRI_IMAGE_COMPONENTS_Y_U_V, 3,
248 { { 0, 0, 0, __DRI_IMAGE_FORMAT_R8, 1 },
249 { 1, 1, 0, __DRI_IMAGE_FORMAT_R8, 1 },
250 { 2, 1, 0, __DRI_IMAGE_FORMAT_R8, 1 } } },
251
252 { __DRI_IMAGE_FOURCC_YUV444, __DRI_IMAGE_COMPONENTS_Y_U_V, 3,
253 { { 0, 0, 0, __DRI_IMAGE_FORMAT_R8, 1 },
254 { 1, 0, 0, __DRI_IMAGE_FORMAT_R8, 1 },
255 { 2, 0, 0, __DRI_IMAGE_FORMAT_R8, 1 } } },
256
257 { __DRI_IMAGE_FOURCC_NV12, __DRI_IMAGE_COMPONENTS_Y_UV, 2,
258 { { 0, 0, 0, __DRI_IMAGE_FORMAT_R8, 1 },
259 { 1, 1, 1, __DRI_IMAGE_FORMAT_GR88, 2 } } },
260
261 { __DRI_IMAGE_FOURCC_NV16, __DRI_IMAGE_COMPONENTS_Y_UV, 2,
262 { { 0, 0, 0, __DRI_IMAGE_FORMAT_R8, 1 },
263 { 1, 1, 0, __DRI_IMAGE_FORMAT_GR88, 2 } } },
264
265 /* For YUYV buffers, we set up two overlapping DRI images and treat
266 * them as planar buffers in the compositors. Plane 0 is GR88 and
267 * samples YU or YV pairs and places Y into the R component, while
268 * plane 1 is ARGB and samples YUYV clusters and places pairs and
269 * places U into the G component and V into A. This lets the
270 * texture sampler interpolate the Y components correctly when
271 * sampling from plane 0, and interpolate U and V correctly when
272 * sampling from plane 1. */
273 { __DRI_IMAGE_FOURCC_YUYV, __DRI_IMAGE_COMPONENTS_Y_XUXV, 2,
274 { { 0, 0, 0, __DRI_IMAGE_FORMAT_GR88, 2 },
275 { 0, 1, 0, __DRI_IMAGE_FORMAT_ARGB8888, 4 } } }
276 };
277
278 static void
279 intel_image_warn_if_unaligned(__DRIimage *image, const char *func)
280 {
281 uint32_t tiling, swizzle;
282 drm_intel_bo_get_tiling(image->bo, &tiling, &swizzle);
283
284 if (tiling != I915_TILING_NONE && (image->offset & 0xfff)) {
285 _mesa_warning(NULL, "%s: offset 0x%08x not on tile boundary",
286 func, image->offset);
287 }
288 }
289
290 static struct intel_image_format *
291 intel_image_format_lookup(int fourcc)
292 {
293 struct intel_image_format *f = NULL;
294
295 for (unsigned i = 0; i < ARRAY_SIZE(intel_image_formats); i++) {
296 if (intel_image_formats[i].fourcc == fourcc) {
297 f = &intel_image_formats[i];
298 break;
299 }
300 }
301
302 return f;
303 }
304
305 static boolean intel_lookup_fourcc(int dri_format, int *fourcc)
306 {
307 for (unsigned i = 0; i < ARRAY_SIZE(intel_image_formats); i++) {
308 if (intel_image_formats[i].planes[0].dri_format == dri_format) {
309 *fourcc = intel_image_formats[i].fourcc;
310 return true;
311 }
312 }
313 return false;
314 }
315
316 static __DRIimage *
317 intel_allocate_image(int dri_format, void *loaderPrivate)
318 {
319 __DRIimage *image;
320
321 image = calloc(1, sizeof *image);
322 if (image == NULL)
323 return NULL;
324
325 image->dri_format = dri_format;
326 image->offset = 0;
327
328 image->format = driImageFormatToGLFormat(dri_format);
329 if (dri_format != __DRI_IMAGE_FORMAT_NONE &&
330 image->format == MESA_FORMAT_NONE) {
331 free(image);
332 return NULL;
333 }
334
335 image->internal_format = _mesa_get_format_base_format(image->format);
336 image->data = loaderPrivate;
337
338 return image;
339 }
340
341 /**
342 * Sets up a DRIImage structure to point to a slice out of a miptree.
343 */
344 static void
345 intel_setup_image_from_mipmap_tree(struct brw_context *brw, __DRIimage *image,
346 struct intel_mipmap_tree *mt, GLuint level,
347 GLuint zoffset)
348 {
349 intel_miptree_make_shareable(brw, mt);
350
351 intel_miptree_check_level_layer(mt, level, zoffset);
352
353 image->width = minify(mt->physical_width0, level - mt->first_level);
354 image->height = minify(mt->physical_height0, level - mt->first_level);
355 image->pitch = mt->pitch;
356
357 image->offset = intel_miptree_get_tile_offsets(mt, level, zoffset,
358 &image->tile_x,
359 &image->tile_y);
360
361 drm_intel_bo_unreference(image->bo);
362 image->bo = mt->bo;
363 drm_intel_bo_reference(mt->bo);
364 }
365
366 static __DRIimage *
367 intel_create_image_from_name(__DRIscreen *screen,
368 int width, int height, int format,
369 int name, int pitch, void *loaderPrivate)
370 {
371 struct intel_screen *intelScreen = screen->driverPrivate;
372 __DRIimage *image;
373 int cpp;
374
375 image = intel_allocate_image(format, loaderPrivate);
376 if (image == NULL)
377 return NULL;
378
379 if (image->format == MESA_FORMAT_NONE)
380 cpp = 1;
381 else
382 cpp = _mesa_get_format_bytes(image->format);
383
384 image->width = width;
385 image->height = height;
386 image->pitch = pitch * cpp;
387 image->bo = drm_intel_bo_gem_create_from_name(intelScreen->bufmgr, "image",
388 name);
389 if (!image->bo) {
390 free(image);
391 return NULL;
392 }
393
394 return image;
395 }
396
397 static __DRIimage *
398 intel_create_image_from_renderbuffer(__DRIcontext *context,
399 int renderbuffer, void *loaderPrivate)
400 {
401 __DRIimage *image;
402 struct brw_context *brw = context->driverPrivate;
403 struct gl_context *ctx = &brw->ctx;
404 struct gl_renderbuffer *rb;
405 struct intel_renderbuffer *irb;
406
407 rb = _mesa_lookup_renderbuffer(ctx, renderbuffer);
408 if (!rb) {
409 _mesa_error(ctx, GL_INVALID_OPERATION, "glRenderbufferExternalMESA");
410 return NULL;
411 }
412
413 irb = intel_renderbuffer(rb);
414 intel_miptree_make_shareable(brw, irb->mt);
415 image = calloc(1, sizeof *image);
416 if (image == NULL)
417 return NULL;
418
419 image->internal_format = rb->InternalFormat;
420 image->format = rb->Format;
421 image->offset = 0;
422 image->data = loaderPrivate;
423 drm_intel_bo_unreference(image->bo);
424 image->bo = irb->mt->bo;
425 drm_intel_bo_reference(irb->mt->bo);
426 image->width = rb->Width;
427 image->height = rb->Height;
428 image->pitch = irb->mt->pitch;
429 image->dri_format = driGLFormatToImageFormat(image->format);
430 image->has_depthstencil = irb->mt->stencil_mt? true : false;
431
432 rb->NeedsFinishRenderTexture = true;
433 return image;
434 }
435
436 static __DRIimage *
437 intel_create_image_from_texture(__DRIcontext *context, int target,
438 unsigned texture, int zoffset,
439 int level,
440 unsigned *error,
441 void *loaderPrivate)
442 {
443 __DRIimage *image;
444 struct brw_context *brw = context->driverPrivate;
445 struct gl_texture_object *obj;
446 struct intel_texture_object *iobj;
447 GLuint face = 0;
448
449 obj = _mesa_lookup_texture(&brw->ctx, texture);
450 if (!obj || obj->Target != target) {
451 *error = __DRI_IMAGE_ERROR_BAD_PARAMETER;
452 return NULL;
453 }
454
455 if (target == GL_TEXTURE_CUBE_MAP)
456 face = zoffset;
457
458 _mesa_test_texobj_completeness(&brw->ctx, obj);
459 iobj = intel_texture_object(obj);
460 if (!obj->_BaseComplete || (level > 0 && !obj->_MipmapComplete)) {
461 *error = __DRI_IMAGE_ERROR_BAD_PARAMETER;
462 return NULL;
463 }
464
465 if (level < obj->BaseLevel || level > obj->_MaxLevel) {
466 *error = __DRI_IMAGE_ERROR_BAD_MATCH;
467 return NULL;
468 }
469
470 if (target == GL_TEXTURE_3D && obj->Image[face][level]->Depth < zoffset) {
471 *error = __DRI_IMAGE_ERROR_BAD_MATCH;
472 return NULL;
473 }
474 image = calloc(1, sizeof *image);
475 if (image == NULL) {
476 *error = __DRI_IMAGE_ERROR_BAD_ALLOC;
477 return NULL;
478 }
479
480 image->internal_format = obj->Image[face][level]->InternalFormat;
481 image->format = obj->Image[face][level]->TexFormat;
482 image->data = loaderPrivate;
483 intel_setup_image_from_mipmap_tree(brw, image, iobj->mt, level, zoffset);
484 image->dri_format = driGLFormatToImageFormat(image->format);
485 image->has_depthstencil = iobj->mt->stencil_mt? true : false;
486 if (image->dri_format == MESA_FORMAT_NONE) {
487 *error = __DRI_IMAGE_ERROR_BAD_PARAMETER;
488 free(image);
489 return NULL;
490 }
491
492 *error = __DRI_IMAGE_ERROR_SUCCESS;
493 return image;
494 }
495
496 static void
497 intel_destroy_image(__DRIimage *image)
498 {
499 drm_intel_bo_unreference(image->bo);
500 free(image);
501 }
502
503 static __DRIimage *
504 intel_create_image(__DRIscreen *screen,
505 int width, int height, int format,
506 unsigned int use,
507 void *loaderPrivate)
508 {
509 __DRIimage *image;
510 struct intel_screen *intelScreen = screen->driverPrivate;
511 uint32_t tiling;
512 int cpp;
513 unsigned long pitch;
514
515 tiling = I915_TILING_X;
516 if (use & __DRI_IMAGE_USE_CURSOR) {
517 if (width != 64 || height != 64)
518 return NULL;
519 tiling = I915_TILING_NONE;
520 }
521
522 if (use & __DRI_IMAGE_USE_LINEAR)
523 tiling = I915_TILING_NONE;
524
525 image = intel_allocate_image(format, loaderPrivate);
526 if (image == NULL)
527 return NULL;
528
529
530 cpp = _mesa_get_format_bytes(image->format);
531 image->bo = drm_intel_bo_alloc_tiled(intelScreen->bufmgr, "image",
532 width, height, cpp, &tiling,
533 &pitch, 0);
534 if (image->bo == NULL) {
535 free(image);
536 return NULL;
537 }
538 image->width = width;
539 image->height = height;
540 image->pitch = pitch;
541
542 return image;
543 }
544
545 static GLboolean
546 intel_query_image(__DRIimage *image, int attrib, int *value)
547 {
548 switch (attrib) {
549 case __DRI_IMAGE_ATTRIB_STRIDE:
550 *value = image->pitch;
551 return true;
552 case __DRI_IMAGE_ATTRIB_HANDLE:
553 *value = image->bo->handle;
554 return true;
555 case __DRI_IMAGE_ATTRIB_NAME:
556 return !drm_intel_bo_flink(image->bo, (uint32_t *) value);
557 case __DRI_IMAGE_ATTRIB_FORMAT:
558 *value = image->dri_format;
559 return true;
560 case __DRI_IMAGE_ATTRIB_WIDTH:
561 *value = image->width;
562 return true;
563 case __DRI_IMAGE_ATTRIB_HEIGHT:
564 *value = image->height;
565 return true;
566 case __DRI_IMAGE_ATTRIB_COMPONENTS:
567 if (image->planar_format == NULL)
568 return false;
569 *value = image->planar_format->components;
570 return true;
571 case __DRI_IMAGE_ATTRIB_FD:
572 if (drm_intel_bo_gem_export_to_prime(image->bo, value) == 0)
573 return true;
574 return false;
575 case __DRI_IMAGE_ATTRIB_FOURCC:
576 if (intel_lookup_fourcc(image->dri_format, value))
577 return true;
578 return false;
579 case __DRI_IMAGE_ATTRIB_NUM_PLANES:
580 *value = 1;
581 return true;
582
583 default:
584 return false;
585 }
586 }
587
588 static __DRIimage *
589 intel_dup_image(__DRIimage *orig_image, void *loaderPrivate)
590 {
591 __DRIimage *image;
592
593 image = calloc(1, sizeof *image);
594 if (image == NULL)
595 return NULL;
596
597 drm_intel_bo_reference(orig_image->bo);
598 image->bo = orig_image->bo;
599 image->internal_format = orig_image->internal_format;
600 image->planar_format = orig_image->planar_format;
601 image->dri_format = orig_image->dri_format;
602 image->format = orig_image->format;
603 image->offset = orig_image->offset;
604 image->width = orig_image->width;
605 image->height = orig_image->height;
606 image->pitch = orig_image->pitch;
607 image->tile_x = orig_image->tile_x;
608 image->tile_y = orig_image->tile_y;
609 image->has_depthstencil = orig_image->has_depthstencil;
610 image->data = loaderPrivate;
611
612 memcpy(image->strides, orig_image->strides, sizeof(image->strides));
613 memcpy(image->offsets, orig_image->offsets, sizeof(image->offsets));
614
615 return image;
616 }
617
618 static GLboolean
619 intel_validate_usage(__DRIimage *image, unsigned int use)
620 {
621 if (use & __DRI_IMAGE_USE_CURSOR) {
622 if (image->width != 64 || image->height != 64)
623 return GL_FALSE;
624 }
625
626 return GL_TRUE;
627 }
628
629 static __DRIimage *
630 intel_create_image_from_names(__DRIscreen *screen,
631 int width, int height, int fourcc,
632 int *names, int num_names,
633 int *strides, int *offsets,
634 void *loaderPrivate)
635 {
636 struct intel_image_format *f = NULL;
637 __DRIimage *image;
638 int i, index;
639
640 if (screen == NULL || names == NULL || num_names != 1)
641 return NULL;
642
643 f = intel_image_format_lookup(fourcc);
644 if (f == NULL)
645 return NULL;
646
647 image = intel_create_image_from_name(screen, width, height,
648 __DRI_IMAGE_FORMAT_NONE,
649 names[0], strides[0],
650 loaderPrivate);
651
652 if (image == NULL)
653 return NULL;
654
655 image->planar_format = f;
656 for (i = 0; i < f->nplanes; i++) {
657 index = f->planes[i].buffer_index;
658 image->offsets[index] = offsets[index];
659 image->strides[index] = strides[index];
660 }
661
662 return image;
663 }
664
665 static __DRIimage *
666 intel_create_image_from_fds(__DRIscreen *screen,
667 int width, int height, int fourcc,
668 int *fds, int num_fds, int *strides, int *offsets,
669 void *loaderPrivate)
670 {
671 struct intel_screen *intelScreen = screen->driverPrivate;
672 struct intel_image_format *f;
673 __DRIimage *image;
674 int i, index;
675
676 if (fds == NULL || num_fds != 1)
677 return NULL;
678
679 f = intel_image_format_lookup(fourcc);
680 if (f == NULL)
681 return NULL;
682
683 if (f->nplanes == 1)
684 image = intel_allocate_image(f->planes[0].dri_format, loaderPrivate);
685 else
686 image = intel_allocate_image(__DRI_IMAGE_FORMAT_NONE, loaderPrivate);
687
688 if (image == NULL)
689 return NULL;
690
691 image->bo = drm_intel_bo_gem_create_from_prime(intelScreen->bufmgr,
692 fds[0],
693 height * strides[0]);
694 if (image->bo == NULL) {
695 free(image);
696 return NULL;
697 }
698 image->width = width;
699 image->height = height;
700 image->pitch = strides[0];
701
702 image->planar_format = f;
703 for (i = 0; i < f->nplanes; i++) {
704 index = f->planes[i].buffer_index;
705 image->offsets[index] = offsets[index];
706 image->strides[index] = strides[index];
707 }
708
709 if (f->nplanes == 1) {
710 image->offset = image->offsets[0];
711 intel_image_warn_if_unaligned(image, __func__);
712 }
713
714 return image;
715 }
716
717 static __DRIimage *
718 intel_create_image_from_dma_bufs(__DRIscreen *screen,
719 int width, int height, int fourcc,
720 int *fds, int num_fds,
721 int *strides, int *offsets,
722 enum __DRIYUVColorSpace yuv_color_space,
723 enum __DRISampleRange sample_range,
724 enum __DRIChromaSiting horizontal_siting,
725 enum __DRIChromaSiting vertical_siting,
726 unsigned *error,
727 void *loaderPrivate)
728 {
729 __DRIimage *image;
730 struct intel_image_format *f = intel_image_format_lookup(fourcc);
731
732 /* For now only packed formats that have native sampling are supported. */
733 if (!f || f->nplanes != 1) {
734 *error = __DRI_IMAGE_ERROR_BAD_MATCH;
735 return NULL;
736 }
737
738 image = intel_create_image_from_fds(screen, width, height, fourcc, fds,
739 num_fds, strides, offsets,
740 loaderPrivate);
741
742 /*
743 * Invalid parameters and any inconsistencies between are assumed to be
744 * checked by the caller. Therefore besides unsupported formats one can fail
745 * only in allocation.
746 */
747 if (!image) {
748 *error = __DRI_IMAGE_ERROR_BAD_ALLOC;
749 return NULL;
750 }
751
752 image->dma_buf_imported = true;
753 image->yuv_color_space = yuv_color_space;
754 image->sample_range = sample_range;
755 image->horizontal_siting = horizontal_siting;
756 image->vertical_siting = vertical_siting;
757
758 *error = __DRI_IMAGE_ERROR_SUCCESS;
759 return image;
760 }
761
762 static __DRIimage *
763 intel_from_planar(__DRIimage *parent, int plane, void *loaderPrivate)
764 {
765 int width, height, offset, stride, dri_format, index;
766 struct intel_image_format *f;
767 __DRIimage *image;
768
769 if (parent == NULL || parent->planar_format == NULL)
770 return NULL;
771
772 f = parent->planar_format;
773
774 if (plane >= f->nplanes)
775 return NULL;
776
777 width = parent->width >> f->planes[plane].width_shift;
778 height = parent->height >> f->planes[plane].height_shift;
779 dri_format = f->planes[plane].dri_format;
780 index = f->planes[plane].buffer_index;
781 offset = parent->offsets[index];
782 stride = parent->strides[index];
783
784 image = intel_allocate_image(dri_format, loaderPrivate);
785 if (image == NULL)
786 return NULL;
787
788 if (offset + height * stride > parent->bo->size) {
789 _mesa_warning(NULL, "intel_create_sub_image: subimage out of bounds");
790 free(image);
791 return NULL;
792 }
793
794 image->bo = parent->bo;
795 drm_intel_bo_reference(parent->bo);
796
797 image->width = width;
798 image->height = height;
799 image->pitch = stride;
800 image->offset = offset;
801
802 intel_image_warn_if_unaligned(image, __func__);
803
804 return image;
805 }
806
807 static const __DRIimageExtension intelImageExtension = {
808 .base = { __DRI_IMAGE, 11 },
809
810 .createImageFromName = intel_create_image_from_name,
811 .createImageFromRenderbuffer = intel_create_image_from_renderbuffer,
812 .destroyImage = intel_destroy_image,
813 .createImage = intel_create_image,
814 .queryImage = intel_query_image,
815 .dupImage = intel_dup_image,
816 .validateUsage = intel_validate_usage,
817 .createImageFromNames = intel_create_image_from_names,
818 .fromPlanar = intel_from_planar,
819 .createImageFromTexture = intel_create_image_from_texture,
820 .createImageFromFds = intel_create_image_from_fds,
821 .createImageFromDmaBufs = intel_create_image_from_dma_bufs,
822 .blitImage = NULL,
823 .getCapabilities = NULL
824 };
825
826 static int
827 brw_query_renderer_integer(__DRIscreen *psp, int param, unsigned int *value)
828 {
829 const struct intel_screen *const intelScreen =
830 (struct intel_screen *) psp->driverPrivate;
831
832 switch (param) {
833 case __DRI2_RENDERER_VENDOR_ID:
834 value[0] = 0x8086;
835 return 0;
836 case __DRI2_RENDERER_DEVICE_ID:
837 value[0] = intelScreen->deviceID;
838 return 0;
839 case __DRI2_RENDERER_ACCELERATED:
840 value[0] = 1;
841 return 0;
842 case __DRI2_RENDERER_VIDEO_MEMORY: {
843 /* Once a batch uses more than 75% of the maximum mappable size, we
844 * assume that there's some fragmentation, and we start doing extra
845 * flushing, etc. That's the big cliff apps will care about.
846 */
847 size_t aper_size;
848 size_t mappable_size;
849
850 drm_intel_get_aperture_sizes(psp->fd, &mappable_size, &aper_size);
851
852 const unsigned gpu_mappable_megabytes =
853 (aper_size / (1024 * 1024)) * 3 / 4;
854
855 const long system_memory_pages = sysconf(_SC_PHYS_PAGES);
856 const long system_page_size = sysconf(_SC_PAGE_SIZE);
857
858 if (system_memory_pages <= 0 || system_page_size <= 0)
859 return -1;
860
861 const uint64_t system_memory_bytes = (uint64_t) system_memory_pages
862 * (uint64_t) system_page_size;
863
864 const unsigned system_memory_megabytes =
865 (unsigned) (system_memory_bytes / (1024 * 1024));
866
867 value[0] = MIN2(system_memory_megabytes, gpu_mappable_megabytes);
868 return 0;
869 }
870 case __DRI2_RENDERER_UNIFIED_MEMORY_ARCHITECTURE:
871 value[0] = 1;
872 return 0;
873 default:
874 return driQueryRendererIntegerCommon(psp, param, value);
875 }
876
877 return -1;
878 }
879
880 static int
881 brw_query_renderer_string(__DRIscreen *psp, int param, const char **value)
882 {
883 const struct intel_screen *intelScreen =
884 (struct intel_screen *) psp->driverPrivate;
885
886 switch (param) {
887 case __DRI2_RENDERER_VENDOR_ID:
888 value[0] = brw_vendor_string;
889 return 0;
890 case __DRI2_RENDERER_DEVICE_ID:
891 value[0] = brw_get_renderer_string(intelScreen->deviceID);
892 return 0;
893 default:
894 break;
895 }
896
897 return -1;
898 }
899
900 static const __DRI2rendererQueryExtension intelRendererQueryExtension = {
901 .base = { __DRI2_RENDERER_QUERY, 1 },
902
903 .queryInteger = brw_query_renderer_integer,
904 .queryString = brw_query_renderer_string
905 };
906
907 static const __DRIrobustnessExtension dri2Robustness = {
908 .base = { __DRI2_ROBUSTNESS, 1 }
909 };
910
911 static const __DRIextension *intelScreenExtensions[] = {
912 &intelTexBufferExtension.base,
913 &intelFenceExtension.base,
914 &intelFlushExtension.base,
915 &intelImageExtension.base,
916 &intelRendererQueryExtension.base,
917 &dri2ConfigQueryExtension.base,
918 NULL
919 };
920
921 static const __DRIextension *intelRobustScreenExtensions[] = {
922 &intelTexBufferExtension.base,
923 &intelFenceExtension.base,
924 &intelFlushExtension.base,
925 &intelImageExtension.base,
926 &intelRendererQueryExtension.base,
927 &dri2ConfigQueryExtension.base,
928 &dri2Robustness.base,
929 NULL
930 };
931
932 static bool
933 intel_get_param(__DRIscreen *psp, int param, int *value)
934 {
935 int ret;
936 struct drm_i915_getparam gp;
937
938 memset(&gp, 0, sizeof(gp));
939 gp.param = param;
940 gp.value = value;
941
942 ret = drmCommandWriteRead(psp->fd, DRM_I915_GETPARAM, &gp, sizeof(gp));
943 if (ret) {
944 if (ret != -EINVAL)
945 _mesa_warning(NULL, "drm_i915_getparam: %d", ret);
946 return false;
947 }
948
949 return true;
950 }
951
952 static bool
953 intel_get_boolean(__DRIscreen *psp, int param)
954 {
955 int value = 0;
956 return intel_get_param(psp, param, &value) && value;
957 }
958
959 static void
960 intelDestroyScreen(__DRIscreen * sPriv)
961 {
962 struct intel_screen *intelScreen = sPriv->driverPrivate;
963
964 dri_bufmgr_destroy(intelScreen->bufmgr);
965 driDestroyOptionInfo(&intelScreen->optionCache);
966
967 ralloc_free(intelScreen);
968 sPriv->driverPrivate = NULL;
969 }
970
971
972 /**
973 * This is called when we need to set up GL rendering to a new X window.
974 */
975 static GLboolean
976 intelCreateBuffer(__DRIscreen * driScrnPriv,
977 __DRIdrawable * driDrawPriv,
978 const struct gl_config * mesaVis, GLboolean isPixmap)
979 {
980 struct intel_renderbuffer *rb;
981 struct intel_screen *screen = (struct intel_screen*) driScrnPriv->driverPrivate;
982 mesa_format rgbFormat;
983 unsigned num_samples = intel_quantize_num_samples(screen, mesaVis->samples);
984 struct gl_framebuffer *fb;
985
986 if (isPixmap)
987 return false;
988
989 fb = CALLOC_STRUCT(gl_framebuffer);
990 if (!fb)
991 return false;
992
993 _mesa_initialize_window_framebuffer(fb, mesaVis);
994
995 if (screen->winsys_msaa_samples_override != -1) {
996 num_samples = screen->winsys_msaa_samples_override;
997 fb->Visual.samples = num_samples;
998 }
999
1000 if (mesaVis->redBits == 5)
1001 rgbFormat = MESA_FORMAT_B5G6R5_UNORM;
1002 else if (mesaVis->sRGBCapable)
1003 rgbFormat = MESA_FORMAT_B8G8R8A8_SRGB;
1004 else if (mesaVis->alphaBits == 0)
1005 rgbFormat = MESA_FORMAT_B8G8R8X8_UNORM;
1006 else {
1007 rgbFormat = MESA_FORMAT_B8G8R8A8_SRGB;
1008 fb->Visual.sRGBCapable = true;
1009 }
1010
1011 /* setup the hardware-based renderbuffers */
1012 rb = intel_create_renderbuffer(rgbFormat, num_samples);
1013 _mesa_add_renderbuffer(fb, BUFFER_FRONT_LEFT, &rb->Base.Base);
1014
1015 if (mesaVis->doubleBufferMode) {
1016 rb = intel_create_renderbuffer(rgbFormat, num_samples);
1017 _mesa_add_renderbuffer(fb, BUFFER_BACK_LEFT, &rb->Base.Base);
1018 }
1019
1020 /*
1021 * Assert here that the gl_config has an expected depth/stencil bit
1022 * combination: one of d24/s8, d16/s0, d0/s0. (See intelInitScreen2(),
1023 * which constructs the advertised configs.)
1024 */
1025 if (mesaVis->depthBits == 24) {
1026 assert(mesaVis->stencilBits == 8);
1027
1028 if (screen->devinfo->has_hiz_and_separate_stencil) {
1029 rb = intel_create_private_renderbuffer(MESA_FORMAT_Z24_UNORM_X8_UINT,
1030 num_samples);
1031 _mesa_add_renderbuffer(fb, BUFFER_DEPTH, &rb->Base.Base);
1032 rb = intel_create_private_renderbuffer(MESA_FORMAT_S_UINT8,
1033 num_samples);
1034 _mesa_add_renderbuffer(fb, BUFFER_STENCIL, &rb->Base.Base);
1035 } else {
1036 /*
1037 * Use combined depth/stencil. Note that the renderbuffer is
1038 * attached to two attachment points.
1039 */
1040 rb = intel_create_private_renderbuffer(MESA_FORMAT_Z24_UNORM_S8_UINT,
1041 num_samples);
1042 _mesa_add_renderbuffer(fb, BUFFER_DEPTH, &rb->Base.Base);
1043 _mesa_add_renderbuffer(fb, BUFFER_STENCIL, &rb->Base.Base);
1044 }
1045 }
1046 else if (mesaVis->depthBits == 16) {
1047 assert(mesaVis->stencilBits == 0);
1048 rb = intel_create_private_renderbuffer(MESA_FORMAT_Z_UNORM16,
1049 num_samples);
1050 _mesa_add_renderbuffer(fb, BUFFER_DEPTH, &rb->Base.Base);
1051 }
1052 else {
1053 assert(mesaVis->depthBits == 0);
1054 assert(mesaVis->stencilBits == 0);
1055 }
1056
1057 /* now add any/all software-based renderbuffers we may need */
1058 _swrast_add_soft_renderbuffers(fb,
1059 false, /* never sw color */
1060 false, /* never sw depth */
1061 false, /* never sw stencil */
1062 mesaVis->accumRedBits > 0,
1063 false, /* never sw alpha */
1064 false /* never sw aux */ );
1065 driDrawPriv->driverPrivate = fb;
1066
1067 return true;
1068 }
1069
1070 static void
1071 intelDestroyBuffer(__DRIdrawable * driDrawPriv)
1072 {
1073 struct gl_framebuffer *fb = driDrawPriv->driverPrivate;
1074
1075 _mesa_reference_framebuffer(&fb, NULL);
1076 }
1077
1078 static bool
1079 intel_init_bufmgr(struct intel_screen *intelScreen)
1080 {
1081 __DRIscreen *spriv = intelScreen->driScrnPriv;
1082
1083 intelScreen->no_hw = getenv("INTEL_NO_HW") != NULL;
1084
1085 intelScreen->bufmgr = intel_bufmgr_gem_init(spriv->fd, BATCH_SZ);
1086 if (intelScreen->bufmgr == NULL) {
1087 fprintf(stderr, "[%s:%u] Error initializing buffer manager.\n",
1088 __func__, __LINE__);
1089 return false;
1090 }
1091
1092 drm_intel_bufmgr_gem_enable_fenced_relocs(intelScreen->bufmgr);
1093
1094 if (!intel_get_boolean(spriv, I915_PARAM_HAS_RELAXED_DELTA)) {
1095 fprintf(stderr, "[%s: %u] Kernel 2.6.39 required.\n", __func__, __LINE__);
1096 return false;
1097 }
1098
1099 return true;
1100 }
1101
1102 static bool
1103 intel_detect_swizzling(struct intel_screen *screen)
1104 {
1105 drm_intel_bo *buffer;
1106 unsigned long flags = 0;
1107 unsigned long aligned_pitch;
1108 uint32_t tiling = I915_TILING_X;
1109 uint32_t swizzle_mode = 0;
1110
1111 buffer = drm_intel_bo_alloc_tiled(screen->bufmgr, "swizzle test",
1112 64, 64, 4,
1113 &tiling, &aligned_pitch, flags);
1114 if (buffer == NULL)
1115 return false;
1116
1117 drm_intel_bo_get_tiling(buffer, &tiling, &swizzle_mode);
1118 drm_intel_bo_unreference(buffer);
1119
1120 if (swizzle_mode == I915_BIT_6_SWIZZLE_NONE)
1121 return false;
1122 else
1123 return true;
1124 }
1125
1126 /**
1127 * Return array of MSAA modes supported by the hardware. The array is
1128 * zero-terminated and sorted in decreasing order.
1129 */
1130 const int*
1131 intel_supported_msaa_modes(const struct intel_screen *screen)
1132 {
1133 static const int gen8_modes[] = {8, 4, 2, 0, -1};
1134 static const int gen7_modes[] = {8, 4, 0, -1};
1135 static const int gen6_modes[] = {4, 0, -1};
1136 static const int gen4_modes[] = {0, -1};
1137
1138 if (screen->devinfo->gen >= 8) {
1139 return gen8_modes;
1140 } else if (screen->devinfo->gen >= 7) {
1141 return gen7_modes;
1142 } else if (screen->devinfo->gen == 6) {
1143 return gen6_modes;
1144 } else {
1145 return gen4_modes;
1146 }
1147 }
1148
1149 static __DRIconfig**
1150 intel_screen_make_configs(__DRIscreen *dri_screen)
1151 {
1152 static const mesa_format formats[] = {
1153 MESA_FORMAT_B5G6R5_UNORM,
1154 MESA_FORMAT_B8G8R8A8_UNORM,
1155 MESA_FORMAT_B8G8R8X8_UNORM
1156 };
1157
1158 /* GLX_SWAP_COPY_OML is not supported due to page flipping. */
1159 static const GLenum back_buffer_modes[] = {
1160 GLX_SWAP_UNDEFINED_OML, GLX_NONE,
1161 };
1162
1163 static const uint8_t singlesample_samples[1] = {0};
1164 static const uint8_t multisample_samples[2] = {4, 8};
1165
1166 struct intel_screen *screen = dri_screen->driverPrivate;
1167 const struct brw_device_info *devinfo = screen->devinfo;
1168 uint8_t depth_bits[4], stencil_bits[4];
1169 __DRIconfig **configs = NULL;
1170
1171 /* Generate singlesample configs without accumulation buffer. */
1172 for (int i = 0; i < ARRAY_SIZE(formats); i++) {
1173 __DRIconfig **new_configs;
1174 int num_depth_stencil_bits = 2;
1175
1176 /* Starting with DRI2 protocol version 1.1 we can request a depth/stencil
1177 * buffer that has a different number of bits per pixel than the color
1178 * buffer, gen >= 6 supports this.
1179 */
1180 depth_bits[0] = 0;
1181 stencil_bits[0] = 0;
1182
1183 if (formats[i] == MESA_FORMAT_B5G6R5_UNORM) {
1184 depth_bits[1] = 16;
1185 stencil_bits[1] = 0;
1186 if (devinfo->gen >= 6) {
1187 depth_bits[2] = 24;
1188 stencil_bits[2] = 8;
1189 num_depth_stencil_bits = 3;
1190 }
1191 } else {
1192 depth_bits[1] = 24;
1193 stencil_bits[1] = 8;
1194 }
1195
1196 new_configs = driCreateConfigs(formats[i],
1197 depth_bits,
1198 stencil_bits,
1199 num_depth_stencil_bits,
1200 back_buffer_modes, 2,
1201 singlesample_samples, 1,
1202 false);
1203 configs = driConcatConfigs(configs, new_configs);
1204 }
1205
1206 /* Generate the minimum possible set of configs that include an
1207 * accumulation buffer.
1208 */
1209 for (int i = 0; i < ARRAY_SIZE(formats); i++) {
1210 __DRIconfig **new_configs;
1211
1212 if (formats[i] == MESA_FORMAT_B5G6R5_UNORM) {
1213 depth_bits[0] = 16;
1214 stencil_bits[0] = 0;
1215 } else {
1216 depth_bits[0] = 24;
1217 stencil_bits[0] = 8;
1218 }
1219
1220 new_configs = driCreateConfigs(formats[i],
1221 depth_bits, stencil_bits, 1,
1222 back_buffer_modes, 1,
1223 singlesample_samples, 1,
1224 true);
1225 configs = driConcatConfigs(configs, new_configs);
1226 }
1227
1228 /* Generate multisample configs.
1229 *
1230 * This loop breaks early, and hence is a no-op, on gen < 6.
1231 *
1232 * Multisample configs must follow the singlesample configs in order to
1233 * work around an X server bug present in 1.12. The X server chooses to
1234 * associate the first listed RGBA888-Z24S8 config, regardless of its
1235 * sample count, with the 32-bit depth visual used for compositing.
1236 *
1237 * Only doublebuffer configs with GLX_SWAP_UNDEFINED_OML behavior are
1238 * supported. Singlebuffer configs are not supported because no one wants
1239 * them.
1240 */
1241 for (int i = 0; i < ARRAY_SIZE(formats); i++) {
1242 if (devinfo->gen < 6)
1243 break;
1244
1245 __DRIconfig **new_configs;
1246 const int num_depth_stencil_bits = 2;
1247 int num_msaa_modes = 0;
1248
1249 depth_bits[0] = 0;
1250 stencil_bits[0] = 0;
1251
1252 if (formats[i] == MESA_FORMAT_B5G6R5_UNORM) {
1253 depth_bits[1] = 16;
1254 stencil_bits[1] = 0;
1255 } else {
1256 depth_bits[1] = 24;
1257 stencil_bits[1] = 8;
1258 }
1259
1260 if (devinfo->gen >= 7)
1261 num_msaa_modes = 2;
1262 else if (devinfo->gen == 6)
1263 num_msaa_modes = 1;
1264
1265 new_configs = driCreateConfigs(formats[i],
1266 depth_bits,
1267 stencil_bits,
1268 num_depth_stencil_bits,
1269 back_buffer_modes, 1,
1270 multisample_samples,
1271 num_msaa_modes,
1272 false);
1273 configs = driConcatConfigs(configs, new_configs);
1274 }
1275
1276 if (configs == NULL) {
1277 fprintf(stderr, "[%s:%u] Error creating FBConfig!\n", __func__,
1278 __LINE__);
1279 return NULL;
1280 }
1281
1282 return configs;
1283 }
1284
1285 static void
1286 set_max_gl_versions(struct intel_screen *screen)
1287 {
1288 __DRIscreen *psp = screen->driScrnPriv;
1289
1290 switch (screen->devinfo->gen) {
1291 case 9:
1292 case 8:
1293 case 7:
1294 case 6:
1295 psp->max_gl_core_version = 33;
1296 psp->max_gl_compat_version = 30;
1297 psp->max_gl_es1_version = 11;
1298 psp->max_gl_es2_version = 30;
1299 break;
1300 case 5:
1301 case 4:
1302 psp->max_gl_core_version = 0;
1303 psp->max_gl_compat_version = 21;
1304 psp->max_gl_es1_version = 11;
1305 psp->max_gl_es2_version = 20;
1306 break;
1307 default:
1308 unreachable("unrecognized intel_screen::gen");
1309 }
1310 }
1311
1312 /* drop when libdrm 2.4.61 is released */
1313 #ifndef I915_PARAM_REVISION
1314 #define I915_PARAM_REVISION 32
1315 #endif
1316
1317 static int
1318 brw_get_revision(int fd)
1319 {
1320 struct drm_i915_getparam gp;
1321 int revision;
1322 int ret;
1323
1324 memset(&gp, 0, sizeof(gp));
1325 gp.param = I915_PARAM_REVISION;
1326 gp.value = &revision;
1327
1328 ret = drmCommandWriteRead(fd, DRM_I915_GETPARAM, &gp, sizeof(gp));
1329 if (ret)
1330 revision = -1;
1331
1332 return revision;
1333 }
1334
1335 /**
1336 * This is the driver specific part of the createNewScreen entry point.
1337 * Called when using DRI2.
1338 *
1339 * \return the struct gl_config supported by this driver
1340 */
1341 static const
1342 __DRIconfig **intelInitScreen2(__DRIscreen *psp)
1343 {
1344 struct intel_screen *intelScreen;
1345
1346 if (psp->image.loader) {
1347 } else if (psp->dri2.loader->base.version <= 2 ||
1348 psp->dri2.loader->getBuffersWithFormat == NULL) {
1349 fprintf(stderr,
1350 "\nERROR! DRI2 loader with getBuffersWithFormat() "
1351 "support required\n");
1352 return false;
1353 }
1354
1355 /* Allocate the private area */
1356 intelScreen = rzalloc(NULL, struct intel_screen);
1357 if (!intelScreen) {
1358 fprintf(stderr, "\nERROR! Allocating private area failed\n");
1359 return false;
1360 }
1361 /* parse information in __driConfigOptions */
1362 driParseOptionInfo(&intelScreen->optionCache, brw_config_options.xml);
1363
1364 intelScreen->driScrnPriv = psp;
1365 psp->driverPrivate = (void *) intelScreen;
1366
1367 if (!intel_init_bufmgr(intelScreen))
1368 return false;
1369
1370 intelScreen->deviceID = drm_intel_bufmgr_gem_get_devid(intelScreen->bufmgr);
1371 intelScreen->devinfo = brw_get_device_info(intelScreen->deviceID,
1372 brw_get_revision(psp->fd));
1373 if (!intelScreen->devinfo)
1374 return false;
1375
1376 brw_process_intel_debug_variable(intelScreen);
1377
1378 intelScreen->hw_must_use_separate_stencil = intelScreen->devinfo->gen >= 7;
1379
1380 intelScreen->hw_has_swizzling = intel_detect_swizzling(intelScreen);
1381
1382 const char *force_msaa = getenv("INTEL_FORCE_MSAA");
1383 if (force_msaa) {
1384 intelScreen->winsys_msaa_samples_override =
1385 intel_quantize_num_samples(intelScreen, atoi(force_msaa));
1386 printf("Forcing winsys sample count to %d\n",
1387 intelScreen->winsys_msaa_samples_override);
1388 } else {
1389 intelScreen->winsys_msaa_samples_override = -1;
1390 }
1391
1392 set_max_gl_versions(intelScreen);
1393
1394 /* Notification of GPU resets requires hardware contexts and a kernel new
1395 * enough to support DRM_IOCTL_I915_GET_RESET_STATS. If the ioctl is
1396 * supported, calling it with a context of 0 will either generate EPERM or
1397 * no error. If the ioctl is not supported, it always generate EINVAL.
1398 * Use this to determine whether to advertise the __DRI2_ROBUSTNESS
1399 * extension to the loader.
1400 *
1401 * Don't even try on pre-Gen6, since we don't attempt to use contexts there.
1402 */
1403 if (intelScreen->devinfo->gen >= 6) {
1404 struct drm_i915_reset_stats stats;
1405 memset(&stats, 0, sizeof(stats));
1406
1407 const int ret = drmIoctl(psp->fd, DRM_IOCTL_I915_GET_RESET_STATS, &stats);
1408
1409 intelScreen->has_context_reset_notification =
1410 (ret != -1 || errno != EINVAL);
1411 }
1412
1413 struct drm_i915_getparam getparam;
1414 getparam.param = I915_PARAM_CMD_PARSER_VERSION;
1415 getparam.value = &intelScreen->cmd_parser_version;
1416 const int ret = drmIoctl(psp->fd, DRM_IOCTL_I915_GETPARAM, &getparam);
1417 if (ret == -1)
1418 intelScreen->cmd_parser_version = 0;
1419
1420 psp->extensions = !intelScreen->has_context_reset_notification
1421 ? intelScreenExtensions : intelRobustScreenExtensions;
1422
1423 intelScreen->compiler = brw_compiler_create(intelScreen,
1424 intelScreen->devinfo);
1425
1426 return (const __DRIconfig**) intel_screen_make_configs(psp);
1427 }
1428
1429 struct intel_screen *
1430 intel_screen_create(int fd)
1431 {
1432 __DRIscreen *psp;
1433 __DRIconfig **configs;
1434 int i;
1435
1436 psp = malloc(sizeof(*psp));
1437 if (psp == NULL)
1438 return NULL;
1439
1440 psp->image.loader = (void *) 1; /* Don't complain about this being NULL */
1441 psp->fd = fd;
1442 psp->dri2.useInvalidate = (void *) 1;
1443
1444 configs = (__DRIconfig **) intelInitScreen2(psp);
1445 for (i = 0; configs[i]; i++)
1446 free(configs[i]);
1447 free(configs);
1448
1449 return psp->driverPrivate;
1450 }
1451
1452 void
1453 intel_screen_destroy(struct intel_screen *screen)
1454 {
1455 __DRIscreen *psp;
1456
1457 psp = screen->driScrnPriv;
1458 intelDestroyScreen(screen->driScrnPriv);
1459 free(psp);
1460 }
1461
1462
1463 struct brw_context *
1464 intel_context_create(struct intel_screen *screen)
1465 {
1466 __DRIcontext *driContextPriv;
1467 struct brw_context *brw;
1468 unsigned error;
1469
1470 driContextPriv = malloc(sizeof(*driContextPriv));
1471 if (driContextPriv == NULL)
1472 return NULL;
1473
1474 driContextPriv->driScreenPriv = screen->driScrnPriv;
1475
1476 brwCreateContext(API_OPENGL_CORE,
1477 NULL, /* visual */
1478 driContextPriv,
1479 3, 0,
1480 0, /* flags */
1481 false, /* notify_reset */
1482 &error,
1483 NULL);
1484
1485 brw = driContextPriv->driverPrivate;
1486 brw->ctx.FirstTimeCurrent = false;
1487
1488 return driContextPriv->driverPrivate;
1489 }
1490
1491 void
1492 intel_context_destroy(struct brw_context *brw)
1493 {
1494 __DRIcontext *driContextPriv;
1495
1496 driContextPriv = brw->driContext;
1497 intelDestroyContext(driContextPriv);
1498 free(driContextPriv);
1499 }
1500
1501 struct intel_buffer {
1502 __DRIbuffer base;
1503 drm_intel_bo *bo;
1504 };
1505
1506 static __DRIbuffer *
1507 intelAllocateBuffer(__DRIscreen *screen,
1508 unsigned attachment, unsigned format,
1509 int width, int height)
1510 {
1511 struct intel_buffer *intelBuffer;
1512 struct intel_screen *intelScreen = screen->driverPrivate;
1513
1514 assert(attachment == __DRI_BUFFER_FRONT_LEFT ||
1515 attachment == __DRI_BUFFER_BACK_LEFT);
1516
1517 intelBuffer = calloc(1, sizeof *intelBuffer);
1518 if (intelBuffer == NULL)
1519 return NULL;
1520
1521 /* The front and back buffers are color buffers, which are X tiled. */
1522 uint32_t tiling = I915_TILING_X;
1523 unsigned long pitch;
1524 int cpp = format / 8;
1525 intelBuffer->bo = drm_intel_bo_alloc_tiled(intelScreen->bufmgr,
1526 "intelAllocateBuffer",
1527 width,
1528 height,
1529 cpp,
1530 &tiling, &pitch,
1531 BO_ALLOC_FOR_RENDER);
1532
1533 if (intelBuffer->bo == NULL) {
1534 free(intelBuffer);
1535 return NULL;
1536 }
1537
1538 drm_intel_bo_flink(intelBuffer->bo, &intelBuffer->base.name);
1539
1540 intelBuffer->base.attachment = attachment;
1541 intelBuffer->base.cpp = cpp;
1542 intelBuffer->base.pitch = pitch;
1543
1544 return &intelBuffer->base;
1545 }
1546
1547 static void
1548 intelReleaseBuffer(__DRIscreen *screen, __DRIbuffer *buffer)
1549 {
1550 struct intel_buffer *intelBuffer = (struct intel_buffer *) buffer;
1551
1552 drm_intel_bo_unreference(intelBuffer->bo);
1553 free(intelBuffer);
1554 }
1555
1556 static const struct __DriverAPIRec brw_driver_api = {
1557 .InitScreen = intelInitScreen2,
1558 .DestroyScreen = intelDestroyScreen,
1559 .CreateContext = brwCreateContext,
1560 .DestroyContext = intelDestroyContext,
1561 .CreateBuffer = intelCreateBuffer,
1562 .DestroyBuffer = intelDestroyBuffer,
1563 .MakeCurrent = intelMakeCurrent,
1564 .UnbindContext = intelUnbindContext,
1565 .AllocateBuffer = intelAllocateBuffer,
1566 .ReleaseBuffer = intelReleaseBuffer
1567 };
1568
1569 static const struct __DRIDriverVtableExtensionRec brw_vtable = {
1570 .base = { __DRI_DRIVER_VTABLE, 1 },
1571 .vtable = &brw_driver_api,
1572 };
1573
1574 static const __DRIextension *brw_driver_extensions[] = {
1575 &driCoreExtension.base,
1576 &driImageDriverExtension.base,
1577 &driDRI2Extension.base,
1578 &brw_vtable.base,
1579 &brw_config_options.base,
1580 NULL
1581 };
1582
1583 PUBLIC const __DRIextension **__driDriverGetExtensions_i965(void)
1584 {
1585 globalDriverAPI = &brw_driver_api;
1586
1587 return brw_driver_extensions;
1588 }