s/Tungsten Graphics/VMware/
[mesa.git] / src / mesa / drivers / dri / i965 / intel_screen.c
1 /**************************************************************************
2 *
3 * Copyright 2003 VMware, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 #include <errno.h>
29 #include <time.h>
30 #include <unistd.h>
31 #include "main/glheader.h"
32 #include "main/context.h"
33 #include "main/framebuffer.h"
34 #include "main/renderbuffer.h"
35 #include "main/texobj.h"
36 #include "main/hash.h"
37 #include "main/fbobject.h"
38 #include "main/version.h"
39 #include "swrast/s_renderbuffer.h"
40
41 #include "utils.h"
42 #include "xmlpool.h"
43
44 static const __DRIconfigOptionsExtension brw_config_options = {
45 .base = { __DRI_CONFIG_OPTIONS, 1 },
46 .xml =
47 DRI_CONF_BEGIN
48 DRI_CONF_SECTION_PERFORMANCE
49 DRI_CONF_VBLANK_MODE(DRI_CONF_VBLANK_ALWAYS_SYNC)
50 /* Options correspond to DRI_CONF_BO_REUSE_DISABLED,
51 * DRI_CONF_BO_REUSE_ALL
52 */
53 DRI_CONF_OPT_BEGIN_V(bo_reuse, enum, 1, "0:1")
54 DRI_CONF_DESC_BEGIN(en, "Buffer object reuse")
55 DRI_CONF_ENUM(0, "Disable buffer object reuse")
56 DRI_CONF_ENUM(1, "Enable reuse of all sizes of buffer objects")
57 DRI_CONF_DESC_END
58 DRI_CONF_OPT_END
59
60 DRI_CONF_OPT_BEGIN_B(hiz, "true")
61 DRI_CONF_DESC(en, "Enable Hierarchical Z on gen6+")
62 DRI_CONF_OPT_END
63
64 DRI_CONF_OPT_BEGIN_B(disable_derivative_optimization, "false")
65 DRI_CONF_DESC(en, "Derivatives with finer granularity by default")
66 DRI_CONF_OPT_END
67 DRI_CONF_SECTION_END
68
69 DRI_CONF_SECTION_QUALITY
70 DRI_CONF_FORCE_S3TC_ENABLE("false")
71
72 DRI_CONF_OPT_BEGIN(clamp_max_samples, int, -1)
73 DRI_CONF_DESC(en, "Clamp the value of GL_MAX_SAMPLES to the "
74 "given integer. If negative, then do not clamp.")
75 DRI_CONF_OPT_END
76 DRI_CONF_SECTION_END
77
78 DRI_CONF_SECTION_DEBUG
79 DRI_CONF_NO_RAST("false")
80 DRI_CONF_ALWAYS_FLUSH_BATCH("false")
81 DRI_CONF_ALWAYS_FLUSH_CACHE("false")
82 DRI_CONF_DISABLE_THROTTLING("false")
83 DRI_CONF_FORCE_GLSL_EXTENSIONS_WARN("false")
84 DRI_CONF_DISABLE_GLSL_LINE_CONTINUATIONS("false")
85 DRI_CONF_DISABLE_BLEND_FUNC_EXTENDED("false")
86
87 DRI_CONF_OPT_BEGIN_B(shader_precompile, "true")
88 DRI_CONF_DESC(en, "Perform code generation at shader link time.")
89 DRI_CONF_OPT_END
90 DRI_CONF_SECTION_END
91 DRI_CONF_END
92 };
93
94 #include "intel_batchbuffer.h"
95 #include "intel_buffers.h"
96 #include "intel_bufmgr.h"
97 #include "intel_chipset.h"
98 #include "intel_fbo.h"
99 #include "intel_mipmap_tree.h"
100 #include "intel_screen.h"
101 #include "intel_tex.h"
102 #include "intel_regions.h"
103
104 #include "brw_context.h"
105
106 #include "i915_drm.h"
107
108 #ifdef USE_NEW_INTERFACE
109 static PFNGLXCREATECONTEXTMODES create_context_modes = NULL;
110 #endif /*USE_NEW_INTERFACE */
111
112 /**
113 * For debugging purposes, this returns a time in seconds.
114 */
115 double
116 get_time(void)
117 {
118 struct timespec tp;
119
120 clock_gettime(CLOCK_MONOTONIC, &tp);
121
122 return tp.tv_sec + tp.tv_nsec / 1000000000.0;
123 }
124
125 void
126 aub_dump_bmp(struct gl_context *ctx)
127 {
128 struct gl_framebuffer *fb = ctx->DrawBuffer;
129
130 for (int i = 0; i < fb->_NumColorDrawBuffers; i++) {
131 struct intel_renderbuffer *irb =
132 intel_renderbuffer(fb->_ColorDrawBuffers[i]);
133
134 if (irb && irb->mt) {
135 enum aub_dump_bmp_format format;
136
137 switch (irb->Base.Base.Format) {
138 case MESA_FORMAT_ARGB8888:
139 case MESA_FORMAT_XRGB8888:
140 format = AUB_DUMP_BMP_FORMAT_ARGB_8888;
141 break;
142 default:
143 continue;
144 }
145
146 assert(irb->mt->region->pitch % irb->mt->region->cpp == 0);
147 drm_intel_gem_bo_aub_dump_bmp(irb->mt->region->bo,
148 irb->draw_x,
149 irb->draw_y,
150 irb->Base.Base.Width,
151 irb->Base.Base.Height,
152 format,
153 irb->mt->region->pitch,
154 0);
155 }
156 }
157 }
158
159 static const __DRItexBufferExtension intelTexBufferExtension = {
160 .base = { __DRI_TEX_BUFFER, __DRI_TEX_BUFFER_VERSION },
161
162 .setTexBuffer = intelSetTexBuffer,
163 .setTexBuffer2 = intelSetTexBuffer2,
164 .releaseTexBuffer = NULL,
165 };
166
167 static void
168 intel_dri2_flush_with_flags(__DRIcontext *cPriv,
169 __DRIdrawable *dPriv,
170 unsigned flags,
171 enum __DRI2throttleReason reason)
172 {
173 struct brw_context *brw = cPriv->driverPrivate;
174
175 if (!brw)
176 return;
177
178 struct gl_context *ctx = &brw->ctx;
179
180 FLUSH_VERTICES(ctx, 0);
181
182 if (flags & __DRI2_FLUSH_DRAWABLE)
183 intel_resolve_for_dri2_flush(brw, dPriv);
184
185 if (reason == __DRI2_THROTTLE_SWAPBUFFER ||
186 reason == __DRI2_THROTTLE_FLUSHFRONT) {
187 brw->need_throttle = true;
188 }
189
190 intel_batchbuffer_flush(brw);
191
192 if (INTEL_DEBUG & DEBUG_AUB) {
193 aub_dump_bmp(ctx);
194 }
195 }
196
197 /**
198 * Provides compatibility with loaders that only support the older (version
199 * 1-3) flush interface.
200 *
201 * That includes libGL up to Mesa 9.0, and the X Server at least up to 1.13.
202 */
203 static void
204 intel_dri2_flush(__DRIdrawable *drawable)
205 {
206 intel_dri2_flush_with_flags(drawable->driContextPriv, drawable,
207 __DRI2_FLUSH_DRAWABLE,
208 __DRI2_THROTTLE_SWAPBUFFER);
209 }
210
211 static const struct __DRI2flushExtensionRec intelFlushExtension = {
212 .base = { __DRI2_FLUSH, 4 },
213
214 .flush = intel_dri2_flush,
215 .invalidate = dri2InvalidateDrawable,
216 .flush_with_flags = intel_dri2_flush_with_flags,
217 };
218
219 static struct intel_image_format intel_image_formats[] = {
220 { __DRI_IMAGE_FOURCC_ARGB8888, __DRI_IMAGE_COMPONENTS_RGBA, 1,
221 { { 0, 0, 0, __DRI_IMAGE_FORMAT_ARGB8888, 4 } } },
222
223 { __DRI_IMAGE_FOURCC_XRGB8888, __DRI_IMAGE_COMPONENTS_RGB, 1,
224 { { 0, 0, 0, __DRI_IMAGE_FORMAT_XRGB8888, 4 }, } },
225
226 { __DRI_IMAGE_FOURCC_RGB565, __DRI_IMAGE_COMPONENTS_RGB, 1,
227 { { 0, 0, 0, __DRI_IMAGE_FORMAT_RGB565, 2 } } },
228
229 { __DRI_IMAGE_FOURCC_YUV410, __DRI_IMAGE_COMPONENTS_Y_U_V, 3,
230 { { 0, 0, 0, __DRI_IMAGE_FORMAT_R8, 1 },
231 { 1, 2, 2, __DRI_IMAGE_FORMAT_R8, 1 },
232 { 2, 2, 2, __DRI_IMAGE_FORMAT_R8, 1 } } },
233
234 { __DRI_IMAGE_FOURCC_YUV411, __DRI_IMAGE_COMPONENTS_Y_U_V, 3,
235 { { 0, 0, 0, __DRI_IMAGE_FORMAT_R8, 1 },
236 { 1, 2, 0, __DRI_IMAGE_FORMAT_R8, 1 },
237 { 2, 2, 0, __DRI_IMAGE_FORMAT_R8, 1 } } },
238
239 { __DRI_IMAGE_FOURCC_YUV420, __DRI_IMAGE_COMPONENTS_Y_U_V, 3,
240 { { 0, 0, 0, __DRI_IMAGE_FORMAT_R8, 1 },
241 { 1, 1, 1, __DRI_IMAGE_FORMAT_R8, 1 },
242 { 2, 1, 1, __DRI_IMAGE_FORMAT_R8, 1 } } },
243
244 { __DRI_IMAGE_FOURCC_YUV422, __DRI_IMAGE_COMPONENTS_Y_U_V, 3,
245 { { 0, 0, 0, __DRI_IMAGE_FORMAT_R8, 1 },
246 { 1, 1, 0, __DRI_IMAGE_FORMAT_R8, 1 },
247 { 2, 1, 0, __DRI_IMAGE_FORMAT_R8, 1 } } },
248
249 { __DRI_IMAGE_FOURCC_YUV444, __DRI_IMAGE_COMPONENTS_Y_U_V, 3,
250 { { 0, 0, 0, __DRI_IMAGE_FORMAT_R8, 1 },
251 { 1, 0, 0, __DRI_IMAGE_FORMAT_R8, 1 },
252 { 2, 0, 0, __DRI_IMAGE_FORMAT_R8, 1 } } },
253
254 { __DRI_IMAGE_FOURCC_NV12, __DRI_IMAGE_COMPONENTS_Y_UV, 2,
255 { { 0, 0, 0, __DRI_IMAGE_FORMAT_R8, 1 },
256 { 1, 1, 1, __DRI_IMAGE_FORMAT_GR88, 2 } } },
257
258 { __DRI_IMAGE_FOURCC_NV16, __DRI_IMAGE_COMPONENTS_Y_UV, 2,
259 { { 0, 0, 0, __DRI_IMAGE_FORMAT_R8, 1 },
260 { 1, 1, 0, __DRI_IMAGE_FORMAT_GR88, 2 } } },
261
262 /* For YUYV buffers, we set up two overlapping DRI images and treat
263 * them as planar buffers in the compositors. Plane 0 is GR88 and
264 * samples YU or YV pairs and places Y into the R component, while
265 * plane 1 is ARGB and samples YUYV clusters and places pairs and
266 * places U into the G component and V into A. This lets the
267 * texture sampler interpolate the Y components correctly when
268 * sampling from plane 0, and interpolate U and V correctly when
269 * sampling from plane 1. */
270 { __DRI_IMAGE_FOURCC_YUYV, __DRI_IMAGE_COMPONENTS_Y_XUXV, 2,
271 { { 0, 0, 0, __DRI_IMAGE_FORMAT_GR88, 2 },
272 { 0, 1, 0, __DRI_IMAGE_FORMAT_ARGB8888, 4 } } }
273 };
274
275 static struct intel_image_format *
276 intel_image_format_lookup(int fourcc)
277 {
278 struct intel_image_format *f = NULL;
279
280 for (unsigned i = 0; i < ARRAY_SIZE(intel_image_formats); i++) {
281 if (intel_image_formats[i].fourcc == fourcc) {
282 f = &intel_image_formats[i];
283 break;
284 }
285 }
286
287 return f;
288 }
289
290 static __DRIimage *
291 intel_allocate_image(int dri_format, void *loaderPrivate)
292 {
293 __DRIimage *image;
294
295 image = calloc(1, sizeof *image);
296 if (image == NULL)
297 return NULL;
298
299 image->dri_format = dri_format;
300 image->offset = 0;
301
302 image->format = driImageFormatToGLFormat(dri_format);
303 if (dri_format != __DRI_IMAGE_FORMAT_NONE &&
304 image->format == MESA_FORMAT_NONE) {
305 free(image);
306 return NULL;
307 }
308
309 image->internal_format = _mesa_get_format_base_format(image->format);
310 image->data = loaderPrivate;
311
312 return image;
313 }
314
315 /**
316 * Sets up a DRIImage structure to point to our shared image in a region
317 */
318 static void
319 intel_setup_image_from_mipmap_tree(struct brw_context *brw, __DRIimage *image,
320 struct intel_mipmap_tree *mt, GLuint level,
321 GLuint zoffset)
322 {
323 unsigned int draw_x, draw_y;
324 uint32_t mask_x, mask_y;
325
326 intel_miptree_make_shareable(brw, mt);
327
328 intel_miptree_check_level_layer(mt, level, zoffset);
329
330 intel_region_get_tile_masks(mt->region, &mask_x, &mask_y, false);
331 intel_miptree_get_image_offset(mt, level, zoffset, &draw_x, &draw_y);
332
333 image->width = mt->level[level].width;
334 image->height = mt->level[level].height;
335 image->tile_x = draw_x & mask_x;
336 image->tile_y = draw_y & mask_y;
337
338 image->offset = intel_region_get_aligned_offset(mt->region,
339 draw_x & ~mask_x,
340 draw_y & ~mask_y,
341 false);
342
343 intel_region_reference(&image->region, mt->region);
344 }
345
346 static void
347 intel_setup_image_from_dimensions(__DRIimage *image)
348 {
349 image->width = image->region->width;
350 image->height = image->region->height;
351 image->tile_x = 0;
352 image->tile_y = 0;
353 image->has_depthstencil = false;
354 }
355
356 static __DRIimage *
357 intel_create_image_from_name(__DRIscreen *screen,
358 int width, int height, int format,
359 int name, int pitch, void *loaderPrivate)
360 {
361 struct intel_screen *intelScreen = screen->driverPrivate;
362 __DRIimage *image;
363 int cpp;
364
365 image = intel_allocate_image(format, loaderPrivate);
366 if (image == NULL)
367 return NULL;
368
369 if (image->format == MESA_FORMAT_NONE)
370 cpp = 1;
371 else
372 cpp = _mesa_get_format_bytes(image->format);
373 image->region = intel_region_alloc_for_handle(intelScreen,
374 cpp, width, height,
375 pitch * cpp, name, "image");
376 if (image->region == NULL) {
377 free(image);
378 return NULL;
379 }
380
381 intel_setup_image_from_dimensions(image);
382
383 return image;
384 }
385
386 static __DRIimage *
387 intel_create_image_from_renderbuffer(__DRIcontext *context,
388 int renderbuffer, void *loaderPrivate)
389 {
390 __DRIimage *image;
391 struct brw_context *brw = context->driverPrivate;
392 struct gl_context *ctx = &brw->ctx;
393 struct gl_renderbuffer *rb;
394 struct intel_renderbuffer *irb;
395
396 rb = _mesa_lookup_renderbuffer(ctx, renderbuffer);
397 if (!rb) {
398 _mesa_error(ctx, GL_INVALID_OPERATION, "glRenderbufferExternalMESA");
399 return NULL;
400 }
401
402 irb = intel_renderbuffer(rb);
403 intel_miptree_make_shareable(brw, irb->mt);
404 image = calloc(1, sizeof *image);
405 if (image == NULL)
406 return NULL;
407
408 image->internal_format = rb->InternalFormat;
409 image->format = rb->Format;
410 image->offset = 0;
411 image->data = loaderPrivate;
412 intel_region_reference(&image->region, irb->mt->region);
413 intel_setup_image_from_dimensions(image);
414 image->dri_format = driGLFormatToImageFormat(image->format);
415 image->has_depthstencil = irb->mt->stencil_mt? true : false;
416
417 rb->NeedsFinishRenderTexture = true;
418 return image;
419 }
420
421 static __DRIimage *
422 intel_create_image_from_texture(__DRIcontext *context, int target,
423 unsigned texture, int zoffset,
424 int level,
425 unsigned *error,
426 void *loaderPrivate)
427 {
428 __DRIimage *image;
429 struct brw_context *brw = context->driverPrivate;
430 struct gl_texture_object *obj;
431 struct intel_texture_object *iobj;
432 GLuint face = 0;
433
434 obj = _mesa_lookup_texture(&brw->ctx, texture);
435 if (!obj || obj->Target != target) {
436 *error = __DRI_IMAGE_ERROR_BAD_PARAMETER;
437 return NULL;
438 }
439
440 if (target == GL_TEXTURE_CUBE_MAP)
441 face = zoffset;
442
443 _mesa_test_texobj_completeness(&brw->ctx, obj);
444 iobj = intel_texture_object(obj);
445 if (!obj->_BaseComplete || (level > 0 && !obj->_MipmapComplete)) {
446 *error = __DRI_IMAGE_ERROR_BAD_PARAMETER;
447 return NULL;
448 }
449
450 if (level < obj->BaseLevel || level > obj->_MaxLevel) {
451 *error = __DRI_IMAGE_ERROR_BAD_MATCH;
452 return NULL;
453 }
454
455 if (target == GL_TEXTURE_3D && obj->Image[face][level]->Depth < zoffset) {
456 *error = __DRI_IMAGE_ERROR_BAD_MATCH;
457 return NULL;
458 }
459 image = calloc(1, sizeof *image);
460 if (image == NULL) {
461 *error = __DRI_IMAGE_ERROR_BAD_ALLOC;
462 return NULL;
463 }
464
465 image->internal_format = obj->Image[face][level]->InternalFormat;
466 image->format = obj->Image[face][level]->TexFormat;
467 image->data = loaderPrivate;
468 intel_setup_image_from_mipmap_tree(brw, image, iobj->mt, level, zoffset);
469 image->dri_format = driGLFormatToImageFormat(image->format);
470 image->has_depthstencil = iobj->mt->stencil_mt? true : false;
471 if (image->dri_format == MESA_FORMAT_NONE) {
472 *error = __DRI_IMAGE_ERROR_BAD_PARAMETER;
473 free(image);
474 return NULL;
475 }
476
477 *error = __DRI_IMAGE_ERROR_SUCCESS;
478 return image;
479 }
480
481 static void
482 intel_destroy_image(__DRIimage *image)
483 {
484 intel_region_release(&image->region);
485 free(image);
486 }
487
488 static __DRIimage *
489 intel_create_image(__DRIscreen *screen,
490 int width, int height, int format,
491 unsigned int use,
492 void *loaderPrivate)
493 {
494 __DRIimage *image;
495 struct intel_screen *intelScreen = screen->driverPrivate;
496 uint32_t tiling;
497 int cpp;
498
499 tiling = I915_TILING_X;
500 if (use & __DRI_IMAGE_USE_CURSOR) {
501 if (width != 64 || height != 64)
502 return NULL;
503 tiling = I915_TILING_NONE;
504 }
505
506 if (use & __DRI_IMAGE_USE_LINEAR)
507 tiling = I915_TILING_NONE;
508
509 image = intel_allocate_image(format, loaderPrivate);
510 if (image == NULL)
511 return NULL;
512
513 cpp = _mesa_get_format_bytes(image->format);
514 image->region =
515 intel_region_alloc(intelScreen, tiling, cpp, width, height, true);
516 if (image->region == NULL) {
517 free(image);
518 return NULL;
519 }
520
521 intel_setup_image_from_dimensions(image);
522
523 return image;
524 }
525
526 static GLboolean
527 intel_query_image(__DRIimage *image, int attrib, int *value)
528 {
529 switch (attrib) {
530 case __DRI_IMAGE_ATTRIB_STRIDE:
531 *value = image->region->pitch;
532 return true;
533 case __DRI_IMAGE_ATTRIB_HANDLE:
534 *value = image->region->bo->handle;
535 return true;
536 case __DRI_IMAGE_ATTRIB_NAME:
537 return intel_region_flink(image->region, (uint32_t *) value);
538 case __DRI_IMAGE_ATTRIB_FORMAT:
539 *value = image->dri_format;
540 return true;
541 case __DRI_IMAGE_ATTRIB_WIDTH:
542 *value = image->region->width;
543 return true;
544 case __DRI_IMAGE_ATTRIB_HEIGHT:
545 *value = image->region->height;
546 return true;
547 case __DRI_IMAGE_ATTRIB_COMPONENTS:
548 if (image->planar_format == NULL)
549 return false;
550 *value = image->planar_format->components;
551 return true;
552 case __DRI_IMAGE_ATTRIB_FD:
553 if (drm_intel_bo_gem_export_to_prime(image->region->bo, value) == 0)
554 return true;
555 return false;
556 default:
557 return false;
558 }
559 }
560
561 static __DRIimage *
562 intel_dup_image(__DRIimage *orig_image, void *loaderPrivate)
563 {
564 __DRIimage *image;
565
566 image = calloc(1, sizeof *image);
567 if (image == NULL)
568 return NULL;
569
570 intel_region_reference(&image->region, orig_image->region);
571 if (image->region == NULL) {
572 free(image);
573 return NULL;
574 }
575
576 image->internal_format = orig_image->internal_format;
577 image->planar_format = orig_image->planar_format;
578 image->dri_format = orig_image->dri_format;
579 image->format = orig_image->format;
580 image->offset = orig_image->offset;
581 image->width = orig_image->width;
582 image->height = orig_image->height;
583 image->tile_x = orig_image->tile_x;
584 image->tile_y = orig_image->tile_y;
585 image->has_depthstencil = orig_image->has_depthstencil;
586 image->data = loaderPrivate;
587
588 memcpy(image->strides, orig_image->strides, sizeof(image->strides));
589 memcpy(image->offsets, orig_image->offsets, sizeof(image->offsets));
590
591 return image;
592 }
593
594 static GLboolean
595 intel_validate_usage(__DRIimage *image, unsigned int use)
596 {
597 if (use & __DRI_IMAGE_USE_CURSOR) {
598 if (image->region->width != 64 || image->region->height != 64)
599 return GL_FALSE;
600 }
601
602 return GL_TRUE;
603 }
604
605 static __DRIimage *
606 intel_create_image_from_names(__DRIscreen *screen,
607 int width, int height, int fourcc,
608 int *names, int num_names,
609 int *strides, int *offsets,
610 void *loaderPrivate)
611 {
612 struct intel_image_format *f = NULL;
613 __DRIimage *image;
614 int i, index;
615
616 if (screen == NULL || names == NULL || num_names != 1)
617 return NULL;
618
619 f = intel_image_format_lookup(fourcc);
620 if (f == NULL)
621 return NULL;
622
623 image = intel_create_image_from_name(screen, width, height,
624 __DRI_IMAGE_FORMAT_NONE,
625 names[0], strides[0],
626 loaderPrivate);
627
628 if (image == NULL)
629 return NULL;
630
631 image->planar_format = f;
632 for (i = 0; i < f->nplanes; i++) {
633 index = f->planes[i].buffer_index;
634 image->offsets[index] = offsets[index];
635 image->strides[index] = strides[index];
636 }
637
638 return image;
639 }
640
641 static __DRIimage *
642 intel_create_image_from_fds(__DRIscreen *screen,
643 int width, int height, int fourcc,
644 int *fds, int num_fds, int *strides, int *offsets,
645 void *loaderPrivate)
646 {
647 struct intel_screen *intelScreen = screen->driverPrivate;
648 struct intel_image_format *f;
649 __DRIimage *image;
650 int i, index;
651
652 if (fds == NULL || num_fds != 1)
653 return NULL;
654
655 f = intel_image_format_lookup(fourcc);
656 if (f == NULL)
657 return NULL;
658
659 if (f->nplanes == 1)
660 image = intel_allocate_image(f->planes[0].dri_format, loaderPrivate);
661 else
662 image = intel_allocate_image(__DRI_IMAGE_FORMAT_NONE, loaderPrivate);
663
664 if (image == NULL)
665 return NULL;
666
667 image->region = intel_region_alloc_for_fd(intelScreen,
668 f->planes[0].cpp, width, height, strides[0],
669 height * strides[0], fds[0], "image");
670 if (image->region == NULL) {
671 free(image);
672 return NULL;
673 }
674
675 image->planar_format = f;
676 for (i = 0; i < f->nplanes; i++) {
677 index = f->planes[i].buffer_index;
678 image->offsets[index] = offsets[index];
679 image->strides[index] = strides[index];
680 }
681
682 intel_setup_image_from_dimensions(image);
683
684 return image;
685 }
686
687 static __DRIimage *
688 intel_create_image_from_dma_bufs(__DRIscreen *screen,
689 int width, int height, int fourcc,
690 int *fds, int num_fds,
691 int *strides, int *offsets,
692 enum __DRIYUVColorSpace yuv_color_space,
693 enum __DRISampleRange sample_range,
694 enum __DRIChromaSiting horizontal_siting,
695 enum __DRIChromaSiting vertical_siting,
696 unsigned *error,
697 void *loaderPrivate)
698 {
699 __DRIimage *image;
700 struct intel_image_format *f = intel_image_format_lookup(fourcc);
701
702 /* For now only packed formats that have native sampling are supported. */
703 if (!f || f->nplanes != 1) {
704 *error = __DRI_IMAGE_ERROR_BAD_MATCH;
705 return NULL;
706 }
707
708 image = intel_create_image_from_fds(screen, width, height, fourcc, fds,
709 num_fds, strides, offsets,
710 loaderPrivate);
711
712 /*
713 * Invalid parameters and any inconsistencies between are assumed to be
714 * checked by the caller. Therefore besides unsupported formats one can fail
715 * only in allocation.
716 */
717 if (!image) {
718 *error = __DRI_IMAGE_ERROR_BAD_ALLOC;
719 return NULL;
720 }
721
722 image->dma_buf_imported = true;
723 image->yuv_color_space = yuv_color_space;
724 image->sample_range = sample_range;
725 image->horizontal_siting = horizontal_siting;
726 image->vertical_siting = vertical_siting;
727
728 *error = __DRI_IMAGE_ERROR_SUCCESS;
729 return image;
730 }
731
732 static __DRIimage *
733 intel_from_planar(__DRIimage *parent, int plane, void *loaderPrivate)
734 {
735 int width, height, offset, stride, dri_format, index;
736 struct intel_image_format *f;
737 uint32_t mask_x, mask_y;
738 __DRIimage *image;
739
740 if (parent == NULL || parent->planar_format == NULL)
741 return NULL;
742
743 f = parent->planar_format;
744
745 if (plane >= f->nplanes)
746 return NULL;
747
748 width = parent->region->width >> f->planes[plane].width_shift;
749 height = parent->region->height >> f->planes[plane].height_shift;
750 dri_format = f->planes[plane].dri_format;
751 index = f->planes[plane].buffer_index;
752 offset = parent->offsets[index];
753 stride = parent->strides[index];
754
755 image = intel_allocate_image(dri_format, loaderPrivate);
756 if (image == NULL)
757 return NULL;
758
759 if (offset + height * stride > parent->region->bo->size) {
760 _mesa_warning(NULL, "intel_create_sub_image: subimage out of bounds");
761 free(image);
762 return NULL;
763 }
764
765 image->region = calloc(sizeof(*image->region), 1);
766 if (image->region == NULL) {
767 free(image);
768 return NULL;
769 }
770
771 image->region->cpp = _mesa_get_format_bytes(image->format);
772 image->region->width = width;
773 image->region->height = height;
774 image->region->pitch = stride;
775 image->region->refcount = 1;
776 image->region->bo = parent->region->bo;
777 drm_intel_bo_reference(image->region->bo);
778 image->region->tiling = parent->region->tiling;
779 image->offset = offset;
780 intel_setup_image_from_dimensions(image);
781
782 intel_region_get_tile_masks(image->region, &mask_x, &mask_y, false);
783 if (offset & mask_x)
784 _mesa_warning(NULL,
785 "intel_create_sub_image: offset not on tile boundary");
786
787 return image;
788 }
789
790 static struct __DRIimageExtensionRec intelImageExtension = {
791 .base = { __DRI_IMAGE, 8 },
792
793 .createImageFromName = intel_create_image_from_name,
794 .createImageFromRenderbuffer = intel_create_image_from_renderbuffer,
795 .destroyImage = intel_destroy_image,
796 .createImage = intel_create_image,
797 .queryImage = intel_query_image,
798 .dupImage = intel_dup_image,
799 .validateUsage = intel_validate_usage,
800 .createImageFromNames = intel_create_image_from_names,
801 .fromPlanar = intel_from_planar,
802 .createImageFromTexture = intel_create_image_from_texture,
803 .createImageFromFds = intel_create_image_from_fds,
804 .createImageFromDmaBufs = intel_create_image_from_dma_bufs
805 };
806
807 static int
808 brw_query_renderer_integer(__DRIscreen *psp, int param, unsigned int *value)
809 {
810 const struct intel_screen *const intelScreen =
811 (struct intel_screen *) psp->driverPrivate;
812
813 switch (param) {
814 case __DRI2_RENDERER_VENDOR_ID:
815 value[0] = 0x8086;
816 return 0;
817 case __DRI2_RENDERER_DEVICE_ID:
818 value[0] = intelScreen->deviceID;
819 return 0;
820 case __DRI2_RENDERER_ACCELERATED:
821 value[0] = 1;
822 return 0;
823 case __DRI2_RENDERER_VIDEO_MEMORY: {
824 /* Once a batch uses more than 75% of the maximum mappable size, we
825 * assume that there's some fragmentation, and we start doing extra
826 * flushing, etc. That's the big cliff apps will care about.
827 */
828 size_t aper_size;
829 size_t mappable_size;
830
831 drm_intel_get_aperture_sizes(psp->fd, &mappable_size, &aper_size);
832
833 const unsigned gpu_mappable_megabytes =
834 (aper_size / (1024 * 1024)) * 3 / 4;
835
836 const long system_memory_pages = sysconf(_SC_PHYS_PAGES);
837 const long system_page_size = sysconf(_SC_PAGE_SIZE);
838
839 if (system_memory_pages <= 0 || system_page_size <= 0)
840 return -1;
841
842 const uint64_t system_memory_bytes = (uint64_t) system_memory_pages
843 * (uint64_t) system_page_size;
844
845 const unsigned system_memory_megabytes =
846 (unsigned) (system_memory_bytes / 1024);
847
848 value[0] = MIN2(system_memory_megabytes, gpu_mappable_megabytes);
849 return 0;
850 }
851 case __DRI2_RENDERER_UNIFIED_MEMORY_ARCHITECTURE:
852 value[0] = 1;
853 return 0;
854 case __DRI2_RENDERER_PREFERRED_PROFILE:
855 value[0] = (psp->max_gl_core_version != 0)
856 ? (1U << __DRI_API_OPENGL_CORE) : (1U << __DRI_API_OPENGL);
857 return 0;
858 default:
859 return driQueryRendererIntegerCommon(psp, param, value);
860 }
861
862 return -1;
863 }
864
865 static int
866 brw_query_renderer_string(__DRIscreen *psp, int param, const char **value)
867 {
868 const struct intel_screen *intelScreen =
869 (struct intel_screen *) psp->driverPrivate;
870
871 switch (param) {
872 case __DRI2_RENDERER_VENDOR_ID:
873 value[0] = brw_vendor_string;
874 return 0;
875 case __DRI2_RENDERER_DEVICE_ID:
876 value[0] = brw_get_renderer_string(intelScreen->deviceID);
877 return 0;
878 default:
879 break;
880 }
881
882 return -1;
883 }
884
885 static struct __DRI2rendererQueryExtensionRec intelRendererQueryExtension = {
886 .base = { __DRI2_RENDERER_QUERY, 1 },
887
888 .queryInteger = brw_query_renderer_integer,
889 .queryString = brw_query_renderer_string
890 };
891
892 static const struct __DRIrobustnessExtensionRec dri2Robustness = {
893 { __DRI2_ROBUSTNESS, 1 }
894 };
895
896 static const __DRIextension *intelScreenExtensions[] = {
897 &intelTexBufferExtension.base,
898 &intelFlushExtension.base,
899 &intelImageExtension.base,
900 &intelRendererQueryExtension.base,
901 &dri2ConfigQueryExtension.base,
902 NULL
903 };
904
905 static const __DRIextension *intelRobustScreenExtensions[] = {
906 &intelTexBufferExtension.base,
907 &intelFlushExtension.base,
908 &intelImageExtension.base,
909 &intelRendererQueryExtension.base,
910 &dri2ConfigQueryExtension.base,
911 &dri2Robustness.base,
912 NULL
913 };
914
915 static bool
916 intel_get_param(__DRIscreen *psp, int param, int *value)
917 {
918 int ret;
919 struct drm_i915_getparam gp;
920
921 memset(&gp, 0, sizeof(gp));
922 gp.param = param;
923 gp.value = value;
924
925 ret = drmCommandWriteRead(psp->fd, DRM_I915_GETPARAM, &gp, sizeof(gp));
926 if (ret) {
927 if (ret != -EINVAL)
928 _mesa_warning(NULL, "drm_i915_getparam: %d", ret);
929 return false;
930 }
931
932 return true;
933 }
934
935 static bool
936 intel_get_boolean(__DRIscreen *psp, int param)
937 {
938 int value = 0;
939 return intel_get_param(psp, param, &value) && value;
940 }
941
942 static void
943 intelDestroyScreen(__DRIscreen * sPriv)
944 {
945 struct intel_screen *intelScreen = sPriv->driverPrivate;
946
947 dri_bufmgr_destroy(intelScreen->bufmgr);
948 driDestroyOptionInfo(&intelScreen->optionCache);
949
950 free(intelScreen);
951 sPriv->driverPrivate = NULL;
952 }
953
954
955 /**
956 * This is called when we need to set up GL rendering to a new X window.
957 */
958 static GLboolean
959 intelCreateBuffer(__DRIscreen * driScrnPriv,
960 __DRIdrawable * driDrawPriv,
961 const struct gl_config * mesaVis, GLboolean isPixmap)
962 {
963 struct intel_renderbuffer *rb;
964 struct intel_screen *screen = (struct intel_screen*) driScrnPriv->driverPrivate;
965 gl_format rgbFormat;
966 unsigned num_samples = intel_quantize_num_samples(screen, mesaVis->samples);
967 struct gl_framebuffer *fb;
968
969 if (isPixmap)
970 return false;
971
972 fb = CALLOC_STRUCT(gl_framebuffer);
973 if (!fb)
974 return false;
975
976 _mesa_initialize_window_framebuffer(fb, mesaVis);
977
978 if (mesaVis->redBits == 5)
979 rgbFormat = MESA_FORMAT_RGB565;
980 else if (mesaVis->sRGBCapable)
981 rgbFormat = MESA_FORMAT_SARGB8;
982 else if (mesaVis->alphaBits == 0)
983 rgbFormat = MESA_FORMAT_XRGB8888;
984 else {
985 rgbFormat = MESA_FORMAT_SARGB8;
986 fb->Visual.sRGBCapable = true;
987 }
988
989 /* setup the hardware-based renderbuffers */
990 rb = intel_create_renderbuffer(rgbFormat, num_samples);
991 _mesa_add_renderbuffer(fb, BUFFER_FRONT_LEFT, &rb->Base.Base);
992
993 if (mesaVis->doubleBufferMode) {
994 rb = intel_create_renderbuffer(rgbFormat, num_samples);
995 _mesa_add_renderbuffer(fb, BUFFER_BACK_LEFT, &rb->Base.Base);
996 }
997
998 /*
999 * Assert here that the gl_config has an expected depth/stencil bit
1000 * combination: one of d24/s8, d16/s0, d0/s0. (See intelInitScreen2(),
1001 * which constructs the advertised configs.)
1002 */
1003 if (mesaVis->depthBits == 24) {
1004 assert(mesaVis->stencilBits == 8);
1005
1006 if (screen->devinfo->has_hiz_and_separate_stencil) {
1007 rb = intel_create_private_renderbuffer(MESA_FORMAT_X8_Z24,
1008 num_samples);
1009 _mesa_add_renderbuffer(fb, BUFFER_DEPTH, &rb->Base.Base);
1010 rb = intel_create_private_renderbuffer(MESA_FORMAT_S8,
1011 num_samples);
1012 _mesa_add_renderbuffer(fb, BUFFER_STENCIL, &rb->Base.Base);
1013 } else {
1014 /*
1015 * Use combined depth/stencil. Note that the renderbuffer is
1016 * attached to two attachment points.
1017 */
1018 rb = intel_create_private_renderbuffer(MESA_FORMAT_S8_Z24,
1019 num_samples);
1020 _mesa_add_renderbuffer(fb, BUFFER_DEPTH, &rb->Base.Base);
1021 _mesa_add_renderbuffer(fb, BUFFER_STENCIL, &rb->Base.Base);
1022 }
1023 }
1024 else if (mesaVis->depthBits == 16) {
1025 assert(mesaVis->stencilBits == 0);
1026 rb = intel_create_private_renderbuffer(MESA_FORMAT_Z16,
1027 num_samples);
1028 _mesa_add_renderbuffer(fb, BUFFER_DEPTH, &rb->Base.Base);
1029 }
1030 else {
1031 assert(mesaVis->depthBits == 0);
1032 assert(mesaVis->stencilBits == 0);
1033 }
1034
1035 /* now add any/all software-based renderbuffers we may need */
1036 _swrast_add_soft_renderbuffers(fb,
1037 false, /* never sw color */
1038 false, /* never sw depth */
1039 false, /* never sw stencil */
1040 mesaVis->accumRedBits > 0,
1041 false, /* never sw alpha */
1042 false /* never sw aux */ );
1043 driDrawPriv->driverPrivate = fb;
1044
1045 return true;
1046 }
1047
1048 static void
1049 intelDestroyBuffer(__DRIdrawable * driDrawPriv)
1050 {
1051 struct gl_framebuffer *fb = driDrawPriv->driverPrivate;
1052
1053 _mesa_reference_framebuffer(&fb, NULL);
1054 }
1055
1056 static bool
1057 intel_init_bufmgr(struct intel_screen *intelScreen)
1058 {
1059 __DRIscreen *spriv = intelScreen->driScrnPriv;
1060
1061 intelScreen->no_hw = getenv("INTEL_NO_HW") != NULL;
1062
1063 intelScreen->bufmgr = intel_bufmgr_gem_init(spriv->fd, BATCH_SZ);
1064 if (intelScreen->bufmgr == NULL) {
1065 fprintf(stderr, "[%s:%u] Error initializing buffer manager.\n",
1066 __func__, __LINE__);
1067 return false;
1068 }
1069
1070 drm_intel_bufmgr_gem_enable_fenced_relocs(intelScreen->bufmgr);
1071
1072 if (!intel_get_boolean(spriv, I915_PARAM_HAS_RELAXED_DELTA)) {
1073 fprintf(stderr, "[%s: %u] Kernel 2.6.39 required.\n", __func__, __LINE__);
1074 return false;
1075 }
1076
1077 return true;
1078 }
1079
1080 static bool
1081 intel_detect_swizzling(struct intel_screen *screen)
1082 {
1083 drm_intel_bo *buffer;
1084 unsigned long flags = 0;
1085 unsigned long aligned_pitch;
1086 uint32_t tiling = I915_TILING_X;
1087 uint32_t swizzle_mode = 0;
1088
1089 buffer = drm_intel_bo_alloc_tiled(screen->bufmgr, "swizzle test",
1090 64, 64, 4,
1091 &tiling, &aligned_pitch, flags);
1092 if (buffer == NULL)
1093 return false;
1094
1095 drm_intel_bo_get_tiling(buffer, &tiling, &swizzle_mode);
1096 drm_intel_bo_unreference(buffer);
1097
1098 if (swizzle_mode == I915_BIT_6_SWIZZLE_NONE)
1099 return false;
1100 else
1101 return true;
1102 }
1103
1104 /**
1105 * Return array of MSAA modes supported by the hardware. The array is
1106 * zero-terminated and sorted in decreasing order.
1107 */
1108 const int*
1109 intel_supported_msaa_modes(const struct intel_screen *screen)
1110 {
1111 static const int gen7_modes[] = {8, 4, 0, -1};
1112 static const int gen6_modes[] = {4, 0, -1};
1113 static const int gen4_modes[] = {0, -1};
1114
1115 if (screen->devinfo->gen >= 7) {
1116 return gen7_modes;
1117 } else if (screen->devinfo->gen == 6) {
1118 return gen6_modes;
1119 } else {
1120 return gen4_modes;
1121 }
1122 }
1123
1124 static __DRIconfig**
1125 intel_screen_make_configs(__DRIscreen *dri_screen)
1126 {
1127 static const gl_format formats[] = {
1128 MESA_FORMAT_RGB565,
1129 MESA_FORMAT_ARGB8888
1130 };
1131
1132 /* GLX_SWAP_COPY_OML is not supported due to page flipping. */
1133 static const GLenum back_buffer_modes[] = {
1134 GLX_SWAP_UNDEFINED_OML, GLX_NONE,
1135 };
1136
1137 static const uint8_t singlesample_samples[1] = {0};
1138 static const uint8_t multisample_samples[2] = {4, 8};
1139
1140 struct intel_screen *screen = dri_screen->driverPrivate;
1141 const struct brw_device_info *devinfo = screen->devinfo;
1142 uint8_t depth_bits[4], stencil_bits[4];
1143 __DRIconfig **configs = NULL;
1144
1145 /* Generate singlesample configs without accumulation buffer. */
1146 for (int i = 0; i < ARRAY_SIZE(formats); i++) {
1147 __DRIconfig **new_configs;
1148 int num_depth_stencil_bits = 2;
1149
1150 /* Starting with DRI2 protocol version 1.1 we can request a depth/stencil
1151 * buffer that has a different number of bits per pixel than the color
1152 * buffer, gen >= 6 supports this.
1153 */
1154 depth_bits[0] = 0;
1155 stencil_bits[0] = 0;
1156
1157 if (formats[i] == MESA_FORMAT_RGB565) {
1158 depth_bits[1] = 16;
1159 stencil_bits[1] = 0;
1160 if (devinfo->gen >= 6) {
1161 depth_bits[2] = 24;
1162 stencil_bits[2] = 8;
1163 num_depth_stencil_bits = 3;
1164 }
1165 } else {
1166 depth_bits[1] = 24;
1167 stencil_bits[1] = 8;
1168 }
1169
1170 new_configs = driCreateConfigs(formats[i],
1171 depth_bits,
1172 stencil_bits,
1173 num_depth_stencil_bits,
1174 back_buffer_modes, 2,
1175 singlesample_samples, 1,
1176 false);
1177 configs = driConcatConfigs(configs, new_configs);
1178 }
1179
1180 /* Generate the minimum possible set of configs that include an
1181 * accumulation buffer.
1182 */
1183 for (int i = 0; i < ARRAY_SIZE(formats); i++) {
1184 __DRIconfig **new_configs;
1185
1186 if (formats[i] == MESA_FORMAT_RGB565) {
1187 depth_bits[0] = 16;
1188 stencil_bits[0] = 0;
1189 } else {
1190 depth_bits[0] = 24;
1191 stencil_bits[0] = 8;
1192 }
1193
1194 new_configs = driCreateConfigs(formats[i],
1195 depth_bits, stencil_bits, 1,
1196 back_buffer_modes, 1,
1197 singlesample_samples, 1,
1198 true);
1199 configs = driConcatConfigs(configs, new_configs);
1200 }
1201
1202 /* Generate multisample configs.
1203 *
1204 * This loop breaks early, and hence is a no-op, on gen < 6.
1205 *
1206 * Multisample configs must follow the singlesample configs in order to
1207 * work around an X server bug present in 1.12. The X server chooses to
1208 * associate the first listed RGBA888-Z24S8 config, regardless of its
1209 * sample count, with the 32-bit depth visual used for compositing.
1210 *
1211 * Only doublebuffer configs with GLX_SWAP_UNDEFINED_OML behavior are
1212 * supported. Singlebuffer configs are not supported because no one wants
1213 * them.
1214 */
1215 for (int i = 0; i < ARRAY_SIZE(formats); i++) {
1216 if (devinfo->gen < 6)
1217 break;
1218
1219 __DRIconfig **new_configs;
1220 const int num_depth_stencil_bits = 2;
1221 int num_msaa_modes = 0;
1222
1223 depth_bits[0] = 0;
1224 stencil_bits[0] = 0;
1225
1226 if (formats[i] == MESA_FORMAT_RGB565) {
1227 depth_bits[1] = 16;
1228 stencil_bits[1] = 0;
1229 } else {
1230 depth_bits[1] = 24;
1231 stencil_bits[1] = 8;
1232 }
1233
1234 if (devinfo->gen >= 7)
1235 num_msaa_modes = 2;
1236 else if (devinfo->gen == 6)
1237 num_msaa_modes = 1;
1238
1239 new_configs = driCreateConfigs(formats[i],
1240 depth_bits,
1241 stencil_bits,
1242 num_depth_stencil_bits,
1243 back_buffer_modes, 1,
1244 multisample_samples,
1245 num_msaa_modes,
1246 false);
1247 configs = driConcatConfigs(configs, new_configs);
1248 }
1249
1250 if (configs == NULL) {
1251 fprintf(stderr, "[%s:%u] Error creating FBConfig!\n", __func__,
1252 __LINE__);
1253 return NULL;
1254 }
1255
1256 return configs;
1257 }
1258
1259 static void
1260 set_max_gl_versions(struct intel_screen *screen)
1261 {
1262 __DRIscreen *psp = screen->driScrnPriv;
1263
1264 switch (screen->devinfo->gen) {
1265 case 8:
1266 case 7:
1267 psp->max_gl_core_version = 33;
1268 psp->max_gl_compat_version = 30;
1269 psp->max_gl_es1_version = 11;
1270 psp->max_gl_es2_version = 30;
1271 break;
1272 case 6:
1273 psp->max_gl_core_version = 31;
1274 psp->max_gl_compat_version = 30;
1275 psp->max_gl_es1_version = 11;
1276 psp->max_gl_es2_version = 30;
1277 break;
1278 case 5:
1279 case 4:
1280 psp->max_gl_core_version = 0;
1281 psp->max_gl_compat_version = 21;
1282 psp->max_gl_es1_version = 11;
1283 psp->max_gl_es2_version = 20;
1284 break;
1285 default:
1286 assert(!"unrecognized intel_screen::gen");
1287 break;
1288 }
1289 }
1290
1291 /**
1292 * This is the driver specific part of the createNewScreen entry point.
1293 * Called when using DRI2.
1294 *
1295 * \return the struct gl_config supported by this driver
1296 */
1297 static const
1298 __DRIconfig **intelInitScreen2(__DRIscreen *psp)
1299 {
1300 struct intel_screen *intelScreen;
1301
1302 if (psp->image.loader) {
1303 } else if (psp->dri2.loader->base.version <= 2 ||
1304 psp->dri2.loader->getBuffersWithFormat == NULL) {
1305 fprintf(stderr,
1306 "\nERROR! DRI2 loader with getBuffersWithFormat() "
1307 "support required\n");
1308 return false;
1309 }
1310
1311 /* Allocate the private area */
1312 intelScreen = calloc(1, sizeof *intelScreen);
1313 if (!intelScreen) {
1314 fprintf(stderr, "\nERROR! Allocating private area failed\n");
1315 return false;
1316 }
1317 /* parse information in __driConfigOptions */
1318 driParseOptionInfo(&intelScreen->optionCache, brw_config_options.xml);
1319
1320 intelScreen->driScrnPriv = psp;
1321 psp->driverPrivate = (void *) intelScreen;
1322
1323 if (!intel_init_bufmgr(intelScreen))
1324 return false;
1325
1326 intelScreen->deviceID = drm_intel_bufmgr_gem_get_devid(intelScreen->bufmgr);
1327 intelScreen->devinfo = brw_get_device_info(intelScreen->deviceID);
1328
1329 intelScreen->hw_must_use_separate_stencil = intelScreen->devinfo->gen >= 7;
1330
1331 intelScreen->hw_has_swizzling = intel_detect_swizzling(intelScreen);
1332
1333 set_max_gl_versions(intelScreen);
1334
1335 /* Notification of GPU resets requires hardware contexts and a kernel new
1336 * enough to support DRM_IOCTL_I915_GET_RESET_STATS. If the ioctl is
1337 * supported, calling it with a context of 0 will either generate EPERM or
1338 * no error. If the ioctl is not supported, it always generate EINVAL.
1339 * Use this to determine whether to advertise the __DRI2_ROBUSTNESS
1340 * extension to the loader.
1341 */
1342 struct drm_i915_reset_stats stats;
1343 memset(&stats, 0, sizeof(stats));
1344
1345 const int ret = drmIoctl(psp->fd, DRM_IOCTL_I915_GET_RESET_STATS, &stats);
1346
1347 intelScreen->has_context_reset_notification = (ret != -1 || errno != EINVAL);
1348
1349 psp->extensions = !intelScreen->has_context_reset_notification
1350 ? intelScreenExtensions : intelRobustScreenExtensions;
1351
1352 return (const __DRIconfig**) intel_screen_make_configs(psp);
1353 }
1354
1355 struct intel_buffer {
1356 __DRIbuffer base;
1357 struct intel_region *region;
1358 };
1359
1360 static __DRIbuffer *
1361 intelAllocateBuffer(__DRIscreen *screen,
1362 unsigned attachment, unsigned format,
1363 int width, int height)
1364 {
1365 struct intel_buffer *intelBuffer;
1366 struct intel_screen *intelScreen = screen->driverPrivate;
1367
1368 assert(attachment == __DRI_BUFFER_FRONT_LEFT ||
1369 attachment == __DRI_BUFFER_BACK_LEFT);
1370
1371 intelBuffer = calloc(1, sizeof *intelBuffer);
1372 if (intelBuffer == NULL)
1373 return NULL;
1374
1375 /* The front and back buffers are color buffers, which are X tiled. */
1376 intelBuffer->region = intel_region_alloc(intelScreen,
1377 I915_TILING_X,
1378 format / 8,
1379 width,
1380 height,
1381 true);
1382
1383 if (intelBuffer->region == NULL) {
1384 free(intelBuffer);
1385 return NULL;
1386 }
1387
1388 intel_region_flink(intelBuffer->region, &intelBuffer->base.name);
1389
1390 intelBuffer->base.attachment = attachment;
1391 intelBuffer->base.cpp = intelBuffer->region->cpp;
1392 intelBuffer->base.pitch = intelBuffer->region->pitch;
1393
1394 return &intelBuffer->base;
1395 }
1396
1397 static void
1398 intelReleaseBuffer(__DRIscreen *screen, __DRIbuffer *buffer)
1399 {
1400 struct intel_buffer *intelBuffer = (struct intel_buffer *) buffer;
1401
1402 intel_region_release(&intelBuffer->region);
1403 free(intelBuffer);
1404 }
1405
1406 static const struct __DriverAPIRec brw_driver_api = {
1407 .InitScreen = intelInitScreen2,
1408 .DestroyScreen = intelDestroyScreen,
1409 .CreateContext = brwCreateContext,
1410 .DestroyContext = intelDestroyContext,
1411 .CreateBuffer = intelCreateBuffer,
1412 .DestroyBuffer = intelDestroyBuffer,
1413 .MakeCurrent = intelMakeCurrent,
1414 .UnbindContext = intelUnbindContext,
1415 .AllocateBuffer = intelAllocateBuffer,
1416 .ReleaseBuffer = intelReleaseBuffer
1417 };
1418
1419 static const struct __DRIDriverVtableExtensionRec brw_vtable = {
1420 .base = { __DRI_DRIVER_VTABLE, 1 },
1421 .vtable = &brw_driver_api,
1422 };
1423
1424 static const __DRIextension *brw_driver_extensions[] = {
1425 &driCoreExtension.base,
1426 &driImageDriverExtension.base,
1427 &driDRI2Extension.base,
1428 &brw_vtable.base,
1429 &brw_config_options.base,
1430 NULL
1431 };
1432
1433 PUBLIC const __DRIextension **__driDriverGetExtensions_i965(void)
1434 {
1435 globalDriverAPI = &brw_driver_api;
1436
1437 return brw_driver_extensions;
1438 }