2 * Copyright 2003 VMware, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial portions
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
18 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
21 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
22 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
23 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
29 #include "main/context.h"
30 #include "main/framebuffer.h"
31 #include "main/renderbuffer.h"
32 #include "main/texobj.h"
33 #include "main/hash.h"
34 #include "main/fbobject.h"
35 #include "main/version.h"
36 #include "swrast/s_renderbuffer.h"
37 #include "util/ralloc.h"
38 #include "brw_defines.h"
39 #include "compiler/nir/nir.h"
44 static const __DRIconfigOptionsExtension brw_config_options
= {
45 .base
= { __DRI_CONFIG_OPTIONS
, 1 },
48 DRI_CONF_SECTION_PERFORMANCE
49 DRI_CONF_VBLANK_MODE(DRI_CONF_VBLANK_ALWAYS_SYNC
)
50 /* Options correspond to DRI_CONF_BO_REUSE_DISABLED,
51 * DRI_CONF_BO_REUSE_ALL
53 DRI_CONF_OPT_BEGIN_V(bo_reuse
, enum, 1, "0:1")
54 DRI_CONF_DESC_BEGIN(en
, "Buffer object reuse")
55 DRI_CONF_ENUM(0, "Disable buffer object reuse")
56 DRI_CONF_ENUM(1, "Enable reuse of all sizes of buffer objects")
61 DRI_CONF_SECTION_QUALITY
62 DRI_CONF_FORCE_S3TC_ENABLE("false")
64 DRI_CONF_PRECISE_TRIG("false")
66 DRI_CONF_OPT_BEGIN(clamp_max_samples
, int, -1)
67 DRI_CONF_DESC(en
, "Clamp the value of GL_MAX_SAMPLES to the "
68 "given integer. If negative, then do not clamp.")
72 DRI_CONF_SECTION_DEBUG
73 DRI_CONF_NO_RAST("false")
74 DRI_CONF_ALWAYS_FLUSH_BATCH("false")
75 DRI_CONF_ALWAYS_FLUSH_CACHE("false")
76 DRI_CONF_DISABLE_THROTTLING("false")
77 DRI_CONF_FORCE_GLSL_EXTENSIONS_WARN("false")
78 DRI_CONF_FORCE_GLSL_VERSION(0)
79 DRI_CONF_DISABLE_GLSL_LINE_CONTINUATIONS("false")
80 DRI_CONF_DISABLE_BLEND_FUNC_EXTENDED("false")
81 DRI_CONF_DUAL_COLOR_BLEND_BY_LOCATION("false")
82 DRI_CONF_ALLOW_GLSL_EXTENSION_DIRECTIVE_MIDSHADER("false")
83 DRI_CONF_ALLOW_HIGHER_COMPAT_VERSION("false")
85 DRI_CONF_OPT_BEGIN_B(shader_precompile
, "true")
86 DRI_CONF_DESC(en
, "Perform code generation at shader link time.")
90 DRI_CONF_SECTION_MISCELLANEOUS
91 DRI_CONF_GLSL_ZERO_INIT("false")
96 #include "intel_batchbuffer.h"
97 #include "intel_buffers.h"
98 #include "intel_bufmgr.h"
99 #include "intel_fbo.h"
100 #include "intel_mipmap_tree.h"
101 #include "intel_screen.h"
102 #include "intel_tex.h"
103 #include "intel_image.h"
105 #include "brw_context.h"
107 #include "i915_drm.h"
110 * For debugging purposes, this returns a time in seconds.
117 clock_gettime(CLOCK_MONOTONIC
, &tp
);
119 return tp
.tv_sec
+ tp
.tv_nsec
/ 1000000000.0;
122 static const __DRItexBufferExtension intelTexBufferExtension
= {
123 .base
= { __DRI_TEX_BUFFER
, 3 },
125 .setTexBuffer
= intelSetTexBuffer
,
126 .setTexBuffer2
= intelSetTexBuffer2
,
127 .releaseTexBuffer
= NULL
,
131 intel_dri2_flush_with_flags(__DRIcontext
*cPriv
,
132 __DRIdrawable
*dPriv
,
134 enum __DRI2throttleReason reason
)
136 struct brw_context
*brw
= cPriv
->driverPrivate
;
141 struct gl_context
*ctx
= &brw
->ctx
;
143 FLUSH_VERTICES(ctx
, 0);
145 if (flags
& __DRI2_FLUSH_DRAWABLE
)
146 intel_resolve_for_dri2_flush(brw
, dPriv
);
148 if (reason
== __DRI2_THROTTLE_SWAPBUFFER
)
149 brw
->need_swap_throttle
= true;
150 if (reason
== __DRI2_THROTTLE_FLUSHFRONT
)
151 brw
->need_flush_throttle
= true;
153 intel_batchbuffer_flush(brw
);
157 * Provides compatibility with loaders that only support the older (version
158 * 1-3) flush interface.
160 * That includes libGL up to Mesa 9.0, and the X Server at least up to 1.13.
163 intel_dri2_flush(__DRIdrawable
*drawable
)
165 intel_dri2_flush_with_flags(drawable
->driContextPriv
, drawable
,
166 __DRI2_FLUSH_DRAWABLE
,
167 __DRI2_THROTTLE_SWAPBUFFER
);
170 static const struct __DRI2flushExtensionRec intelFlushExtension
= {
171 .base
= { __DRI2_FLUSH
, 4 },
173 .flush
= intel_dri2_flush
,
174 .invalidate
= dri2InvalidateDrawable
,
175 .flush_with_flags
= intel_dri2_flush_with_flags
,
178 static struct intel_image_format intel_image_formats
[] = {
179 { __DRI_IMAGE_FOURCC_ARGB8888
, __DRI_IMAGE_COMPONENTS_RGBA
, 1,
180 { { 0, 0, 0, __DRI_IMAGE_FORMAT_ARGB8888
, 4 } } },
182 { __DRI_IMAGE_FOURCC_ABGR8888
, __DRI_IMAGE_COMPONENTS_RGBA
, 1,
183 { { 0, 0, 0, __DRI_IMAGE_FORMAT_ABGR8888
, 4 } } },
185 { __DRI_IMAGE_FOURCC_SARGB8888
, __DRI_IMAGE_COMPONENTS_RGBA
, 1,
186 { { 0, 0, 0, __DRI_IMAGE_FORMAT_SARGB8
, 4 } } },
188 { __DRI_IMAGE_FOURCC_XRGB8888
, __DRI_IMAGE_COMPONENTS_RGB
, 1,
189 { { 0, 0, 0, __DRI_IMAGE_FORMAT_XRGB8888
, 4 }, } },
191 { __DRI_IMAGE_FOURCC_XBGR8888
, __DRI_IMAGE_COMPONENTS_RGB
, 1,
192 { { 0, 0, 0, __DRI_IMAGE_FORMAT_XBGR8888
, 4 }, } },
194 { __DRI_IMAGE_FOURCC_ARGB1555
, __DRI_IMAGE_COMPONENTS_RGBA
, 1,
195 { { 0, 0, 0, __DRI_IMAGE_FORMAT_ARGB1555
, 2 } } },
197 { __DRI_IMAGE_FOURCC_RGB565
, __DRI_IMAGE_COMPONENTS_RGB
, 1,
198 { { 0, 0, 0, __DRI_IMAGE_FORMAT_RGB565
, 2 } } },
200 { __DRI_IMAGE_FOURCC_R8
, __DRI_IMAGE_COMPONENTS_R
, 1,
201 { { 0, 0, 0, __DRI_IMAGE_FORMAT_R8
, 1 }, } },
203 { __DRI_IMAGE_FOURCC_R16
, __DRI_IMAGE_COMPONENTS_R
, 1,
204 { { 0, 0, 0, __DRI_IMAGE_FORMAT_R16
, 1 }, } },
206 { __DRI_IMAGE_FOURCC_GR88
, __DRI_IMAGE_COMPONENTS_RG
, 1,
207 { { 0, 0, 0, __DRI_IMAGE_FORMAT_GR88
, 2 }, } },
209 { __DRI_IMAGE_FOURCC_GR1616
, __DRI_IMAGE_COMPONENTS_RG
, 1,
210 { { 0, 0, 0, __DRI_IMAGE_FORMAT_GR1616
, 2 }, } },
212 { __DRI_IMAGE_FOURCC_YUV410
, __DRI_IMAGE_COMPONENTS_Y_U_V
, 3,
213 { { 0, 0, 0, __DRI_IMAGE_FORMAT_R8
, 1 },
214 { 1, 2, 2, __DRI_IMAGE_FORMAT_R8
, 1 },
215 { 2, 2, 2, __DRI_IMAGE_FORMAT_R8
, 1 } } },
217 { __DRI_IMAGE_FOURCC_YUV411
, __DRI_IMAGE_COMPONENTS_Y_U_V
, 3,
218 { { 0, 0, 0, __DRI_IMAGE_FORMAT_R8
, 1 },
219 { 1, 2, 0, __DRI_IMAGE_FORMAT_R8
, 1 },
220 { 2, 2, 0, __DRI_IMAGE_FORMAT_R8
, 1 } } },
222 { __DRI_IMAGE_FOURCC_YUV420
, __DRI_IMAGE_COMPONENTS_Y_U_V
, 3,
223 { { 0, 0, 0, __DRI_IMAGE_FORMAT_R8
, 1 },
224 { 1, 1, 1, __DRI_IMAGE_FORMAT_R8
, 1 },
225 { 2, 1, 1, __DRI_IMAGE_FORMAT_R8
, 1 } } },
227 { __DRI_IMAGE_FOURCC_YUV422
, __DRI_IMAGE_COMPONENTS_Y_U_V
, 3,
228 { { 0, 0, 0, __DRI_IMAGE_FORMAT_R8
, 1 },
229 { 1, 1, 0, __DRI_IMAGE_FORMAT_R8
, 1 },
230 { 2, 1, 0, __DRI_IMAGE_FORMAT_R8
, 1 } } },
232 { __DRI_IMAGE_FOURCC_YUV444
, __DRI_IMAGE_COMPONENTS_Y_U_V
, 3,
233 { { 0, 0, 0, __DRI_IMAGE_FORMAT_R8
, 1 },
234 { 1, 0, 0, __DRI_IMAGE_FORMAT_R8
, 1 },
235 { 2, 0, 0, __DRI_IMAGE_FORMAT_R8
, 1 } } },
237 { __DRI_IMAGE_FOURCC_YVU410
, __DRI_IMAGE_COMPONENTS_Y_U_V
, 3,
238 { { 0, 0, 0, __DRI_IMAGE_FORMAT_R8
, 1 },
239 { 2, 2, 2, __DRI_IMAGE_FORMAT_R8
, 1 },
240 { 1, 2, 2, __DRI_IMAGE_FORMAT_R8
, 1 } } },
242 { __DRI_IMAGE_FOURCC_YVU411
, __DRI_IMAGE_COMPONENTS_Y_U_V
, 3,
243 { { 0, 0, 0, __DRI_IMAGE_FORMAT_R8
, 1 },
244 { 2, 2, 0, __DRI_IMAGE_FORMAT_R8
, 1 },
245 { 1, 2, 0, __DRI_IMAGE_FORMAT_R8
, 1 } } },
247 { __DRI_IMAGE_FOURCC_YVU420
, __DRI_IMAGE_COMPONENTS_Y_U_V
, 3,
248 { { 0, 0, 0, __DRI_IMAGE_FORMAT_R8
, 1 },
249 { 2, 1, 1, __DRI_IMAGE_FORMAT_R8
, 1 },
250 { 1, 1, 1, __DRI_IMAGE_FORMAT_R8
, 1 } } },
252 { __DRI_IMAGE_FOURCC_YVU422
, __DRI_IMAGE_COMPONENTS_Y_U_V
, 3,
253 { { 0, 0, 0, __DRI_IMAGE_FORMAT_R8
, 1 },
254 { 2, 1, 0, __DRI_IMAGE_FORMAT_R8
, 1 },
255 { 1, 1, 0, __DRI_IMAGE_FORMAT_R8
, 1 } } },
257 { __DRI_IMAGE_FOURCC_YVU444
, __DRI_IMAGE_COMPONENTS_Y_U_V
, 3,
258 { { 0, 0, 0, __DRI_IMAGE_FORMAT_R8
, 1 },
259 { 2, 0, 0, __DRI_IMAGE_FORMAT_R8
, 1 },
260 { 1, 0, 0, __DRI_IMAGE_FORMAT_R8
, 1 } } },
262 { __DRI_IMAGE_FOURCC_NV12
, __DRI_IMAGE_COMPONENTS_Y_UV
, 2,
263 { { 0, 0, 0, __DRI_IMAGE_FORMAT_R8
, 1 },
264 { 1, 1, 1, __DRI_IMAGE_FORMAT_GR88
, 2 } } },
266 { __DRI_IMAGE_FOURCC_NV16
, __DRI_IMAGE_COMPONENTS_Y_UV
, 2,
267 { { 0, 0, 0, __DRI_IMAGE_FORMAT_R8
, 1 },
268 { 1, 1, 0, __DRI_IMAGE_FORMAT_GR88
, 2 } } },
270 /* For YUYV buffers, we set up two overlapping DRI images and treat
271 * them as planar buffers in the compositors. Plane 0 is GR88 and
272 * samples YU or YV pairs and places Y into the R component, while
273 * plane 1 is ARGB and samples YUYV clusters and places pairs and
274 * places U into the G component and V into A. This lets the
275 * texture sampler interpolate the Y components correctly when
276 * sampling from plane 0, and interpolate U and V correctly when
277 * sampling from plane 1. */
278 { __DRI_IMAGE_FOURCC_YUYV
, __DRI_IMAGE_COMPONENTS_Y_XUXV
, 2,
279 { { 0, 0, 0, __DRI_IMAGE_FORMAT_GR88
, 2 },
280 { 0, 1, 0, __DRI_IMAGE_FORMAT_ARGB8888
, 4 } } }
284 intel_image_warn_if_unaligned(__DRIimage
*image
, const char *func
)
286 uint32_t tiling
, swizzle
;
287 drm_intel_bo_get_tiling(image
->bo
, &tiling
, &swizzle
);
289 if (tiling
!= I915_TILING_NONE
&& (image
->offset
& 0xfff)) {
290 _mesa_warning(NULL
, "%s: offset 0x%08x not on tile boundary",
291 func
, image
->offset
);
295 static struct intel_image_format
*
296 intel_image_format_lookup(int fourcc
)
298 struct intel_image_format
*f
= NULL
;
300 for (unsigned i
= 0; i
< ARRAY_SIZE(intel_image_formats
); i
++) {
301 if (intel_image_formats
[i
].fourcc
== fourcc
) {
302 f
= &intel_image_formats
[i
];
310 static boolean
intel_lookup_fourcc(int dri_format
, int *fourcc
)
312 for (unsigned i
= 0; i
< ARRAY_SIZE(intel_image_formats
); i
++) {
313 if (intel_image_formats
[i
].planes
[0].dri_format
== dri_format
) {
314 *fourcc
= intel_image_formats
[i
].fourcc
;
322 intel_allocate_image(int dri_format
, void *loaderPrivate
)
326 image
= calloc(1, sizeof *image
);
330 image
->dri_format
= dri_format
;
333 image
->format
= driImageFormatToGLFormat(dri_format
);
334 if (dri_format
!= __DRI_IMAGE_FORMAT_NONE
&&
335 image
->format
== MESA_FORMAT_NONE
) {
340 image
->internal_format
= _mesa_get_format_base_format(image
->format
);
341 image
->data
= loaderPrivate
;
347 * Sets up a DRIImage structure to point to a slice out of a miptree.
350 intel_setup_image_from_mipmap_tree(struct brw_context
*brw
, __DRIimage
*image
,
351 struct intel_mipmap_tree
*mt
, GLuint level
,
354 intel_miptree_make_shareable(brw
, mt
);
356 intel_miptree_check_level_layer(mt
, level
, zoffset
);
358 image
->width
= minify(mt
->physical_width0
, level
- mt
->first_level
);
359 image
->height
= minify(mt
->physical_height0
, level
- mt
->first_level
);
360 image
->pitch
= mt
->pitch
;
362 image
->offset
= intel_miptree_get_tile_offsets(mt
, level
, zoffset
,
366 drm_intel_bo_unreference(image
->bo
);
368 drm_intel_bo_reference(mt
->bo
);
372 intel_create_image_from_name(__DRIscreen
*dri_screen
,
373 int width
, int height
, int format
,
374 int name
, int pitch
, void *loaderPrivate
)
376 struct intel_screen
*screen
= dri_screen
->driverPrivate
;
380 image
= intel_allocate_image(format
, loaderPrivate
);
384 if (image
->format
== MESA_FORMAT_NONE
)
387 cpp
= _mesa_get_format_bytes(image
->format
);
389 image
->width
= width
;
390 image
->height
= height
;
391 image
->pitch
= pitch
* cpp
;
392 image
->bo
= drm_intel_bo_gem_create_from_name(screen
->bufmgr
, "image",
403 intel_create_image_from_renderbuffer(__DRIcontext
*context
,
404 int renderbuffer
, void *loaderPrivate
)
407 struct brw_context
*brw
= context
->driverPrivate
;
408 struct gl_context
*ctx
= &brw
->ctx
;
409 struct gl_renderbuffer
*rb
;
410 struct intel_renderbuffer
*irb
;
412 rb
= _mesa_lookup_renderbuffer(ctx
, renderbuffer
);
414 _mesa_error(ctx
, GL_INVALID_OPERATION
, "glRenderbufferExternalMESA");
418 irb
= intel_renderbuffer(rb
);
419 intel_miptree_make_shareable(brw
, irb
->mt
);
420 image
= calloc(1, sizeof *image
);
424 image
->internal_format
= rb
->InternalFormat
;
425 image
->format
= rb
->Format
;
427 image
->data
= loaderPrivate
;
428 drm_intel_bo_unreference(image
->bo
);
429 image
->bo
= irb
->mt
->bo
;
430 drm_intel_bo_reference(irb
->mt
->bo
);
431 image
->width
= rb
->Width
;
432 image
->height
= rb
->Height
;
433 image
->pitch
= irb
->mt
->pitch
;
434 image
->dri_format
= driGLFormatToImageFormat(image
->format
);
435 image
->has_depthstencil
= irb
->mt
->stencil_mt
? true : false;
437 rb
->NeedsFinishRenderTexture
= true;
442 intel_create_image_from_texture(__DRIcontext
*context
, int target
,
443 unsigned texture
, int zoffset
,
449 struct brw_context
*brw
= context
->driverPrivate
;
450 struct gl_texture_object
*obj
;
451 struct intel_texture_object
*iobj
;
454 obj
= _mesa_lookup_texture(&brw
->ctx
, texture
);
455 if (!obj
|| obj
->Target
!= target
) {
456 *error
= __DRI_IMAGE_ERROR_BAD_PARAMETER
;
460 if (target
== GL_TEXTURE_CUBE_MAP
)
463 _mesa_test_texobj_completeness(&brw
->ctx
, obj
);
464 iobj
= intel_texture_object(obj
);
465 if (!obj
->_BaseComplete
|| (level
> 0 && !obj
->_MipmapComplete
)) {
466 *error
= __DRI_IMAGE_ERROR_BAD_PARAMETER
;
470 if (level
< obj
->BaseLevel
|| level
> obj
->_MaxLevel
) {
471 *error
= __DRI_IMAGE_ERROR_BAD_MATCH
;
475 if (target
== GL_TEXTURE_3D
&& obj
->Image
[face
][level
]->Depth
< zoffset
) {
476 *error
= __DRI_IMAGE_ERROR_BAD_MATCH
;
479 image
= calloc(1, sizeof *image
);
481 *error
= __DRI_IMAGE_ERROR_BAD_ALLOC
;
485 image
->internal_format
= obj
->Image
[face
][level
]->InternalFormat
;
486 image
->format
= obj
->Image
[face
][level
]->TexFormat
;
487 image
->data
= loaderPrivate
;
488 intel_setup_image_from_mipmap_tree(brw
, image
, iobj
->mt
, level
, zoffset
);
489 image
->dri_format
= driGLFormatToImageFormat(image
->format
);
490 image
->has_depthstencil
= iobj
->mt
->stencil_mt
? true : false;
491 if (image
->dri_format
== MESA_FORMAT_NONE
) {
492 *error
= __DRI_IMAGE_ERROR_BAD_PARAMETER
;
497 *error
= __DRI_IMAGE_ERROR_SUCCESS
;
502 intel_destroy_image(__DRIimage
*image
)
504 drm_intel_bo_unreference(image
->bo
);
509 intel_create_image(__DRIscreen
*dri_screen
,
510 int width
, int height
, int format
,
515 struct intel_screen
*screen
= dri_screen
->driverPrivate
;
520 tiling
= I915_TILING_X
;
521 if (use
& __DRI_IMAGE_USE_CURSOR
) {
522 if (width
!= 64 || height
!= 64)
524 tiling
= I915_TILING_NONE
;
527 if (use
& __DRI_IMAGE_USE_LINEAR
)
528 tiling
= I915_TILING_NONE
;
530 image
= intel_allocate_image(format
, loaderPrivate
);
534 cpp
= _mesa_get_format_bytes(image
->format
);
535 image
->bo
= drm_intel_bo_alloc_tiled(screen
->bufmgr
, "image",
536 width
, height
, cpp
, &tiling
,
538 if (image
->bo
== NULL
) {
542 image
->width
= width
;
543 image
->height
= height
;
544 image
->pitch
= pitch
;
550 intel_query_image(__DRIimage
*image
, int attrib
, int *value
)
553 case __DRI_IMAGE_ATTRIB_STRIDE
:
554 *value
= image
->pitch
;
556 case __DRI_IMAGE_ATTRIB_HANDLE
:
557 *value
= image
->bo
->handle
;
559 case __DRI_IMAGE_ATTRIB_NAME
:
560 return !drm_intel_bo_flink(image
->bo
, (uint32_t *) value
);
561 case __DRI_IMAGE_ATTRIB_FORMAT
:
562 *value
= image
->dri_format
;
564 case __DRI_IMAGE_ATTRIB_WIDTH
:
565 *value
= image
->width
;
567 case __DRI_IMAGE_ATTRIB_HEIGHT
:
568 *value
= image
->height
;
570 case __DRI_IMAGE_ATTRIB_COMPONENTS
:
571 if (image
->planar_format
== NULL
)
573 *value
= image
->planar_format
->components
;
575 case __DRI_IMAGE_ATTRIB_FD
:
576 return !drm_intel_bo_gem_export_to_prime(image
->bo
, value
);
577 case __DRI_IMAGE_ATTRIB_FOURCC
:
578 return intel_lookup_fourcc(image
->dri_format
, value
);
579 case __DRI_IMAGE_ATTRIB_NUM_PLANES
:
582 case __DRI_IMAGE_ATTRIB_OFFSET
:
583 *value
= image
->offset
;
592 intel_dup_image(__DRIimage
*orig_image
, void *loaderPrivate
)
596 image
= calloc(1, sizeof *image
);
600 drm_intel_bo_reference(orig_image
->bo
);
601 image
->bo
= orig_image
->bo
;
602 image
->internal_format
= orig_image
->internal_format
;
603 image
->planar_format
= orig_image
->planar_format
;
604 image
->dri_format
= orig_image
->dri_format
;
605 image
->format
= orig_image
->format
;
606 image
->offset
= orig_image
->offset
;
607 image
->width
= orig_image
->width
;
608 image
->height
= orig_image
->height
;
609 image
->pitch
= orig_image
->pitch
;
610 image
->tile_x
= orig_image
->tile_x
;
611 image
->tile_y
= orig_image
->tile_y
;
612 image
->has_depthstencil
= orig_image
->has_depthstencil
;
613 image
->data
= loaderPrivate
;
615 memcpy(image
->strides
, orig_image
->strides
, sizeof(image
->strides
));
616 memcpy(image
->offsets
, orig_image
->offsets
, sizeof(image
->offsets
));
622 intel_validate_usage(__DRIimage
*image
, unsigned int use
)
624 if (use
& __DRI_IMAGE_USE_CURSOR
) {
625 if (image
->width
!= 64 || image
->height
!= 64)
633 intel_create_image_from_names(__DRIscreen
*dri_screen
,
634 int width
, int height
, int fourcc
,
635 int *names
, int num_names
,
636 int *strides
, int *offsets
,
639 struct intel_image_format
*f
= NULL
;
643 if (dri_screen
== NULL
|| names
== NULL
|| num_names
!= 1)
646 f
= intel_image_format_lookup(fourcc
);
650 image
= intel_create_image_from_name(dri_screen
, width
, height
,
651 __DRI_IMAGE_FORMAT_NONE
,
652 names
[0], strides
[0],
658 image
->planar_format
= f
;
659 for (i
= 0; i
< f
->nplanes
; i
++) {
660 index
= f
->planes
[i
].buffer_index
;
661 image
->offsets
[index
] = offsets
[index
];
662 image
->strides
[index
] = strides
[index
];
669 intel_create_image_from_fds(__DRIscreen
*dri_screen
,
670 int width
, int height
, int fourcc
,
671 int *fds
, int num_fds
, int *strides
, int *offsets
,
674 struct intel_screen
*screen
= dri_screen
->driverPrivate
;
675 struct intel_image_format
*f
;
679 if (fds
== NULL
|| num_fds
< 1)
682 /* We only support all planes from the same bo */
683 for (i
= 0; i
< num_fds
; i
++)
684 if (fds
[0] != fds
[i
])
687 f
= intel_image_format_lookup(fourcc
);
692 image
= intel_allocate_image(f
->planes
[0].dri_format
, loaderPrivate
);
694 image
= intel_allocate_image(__DRI_IMAGE_FORMAT_NONE
, loaderPrivate
);
699 image
->width
= width
;
700 image
->height
= height
;
701 image
->pitch
= strides
[0];
703 image
->planar_format
= f
;
705 for (i
= 0; i
< f
->nplanes
; i
++) {
706 index
= f
->planes
[i
].buffer_index
;
707 image
->offsets
[index
] = offsets
[index
];
708 image
->strides
[index
] = strides
[index
];
710 const int plane_height
= height
>> f
->planes
[i
].height_shift
;
711 const int end
= offsets
[index
] + plane_height
* strides
[index
];
716 image
->bo
= drm_intel_bo_gem_create_from_prime(screen
->bufmgr
,
718 if (image
->bo
== NULL
) {
723 if (f
->nplanes
== 1) {
724 image
->offset
= image
->offsets
[0];
725 intel_image_warn_if_unaligned(image
, __func__
);
732 intel_create_image_from_dma_bufs(__DRIscreen
*dri_screen
,
733 int width
, int height
, int fourcc
,
734 int *fds
, int num_fds
,
735 int *strides
, int *offsets
,
736 enum __DRIYUVColorSpace yuv_color_space
,
737 enum __DRISampleRange sample_range
,
738 enum __DRIChromaSiting horizontal_siting
,
739 enum __DRIChromaSiting vertical_siting
,
744 struct intel_image_format
*f
= intel_image_format_lookup(fourcc
);
747 *error
= __DRI_IMAGE_ERROR_BAD_MATCH
;
751 image
= intel_create_image_from_fds(dri_screen
, width
, height
, fourcc
, fds
,
752 num_fds
, strides
, offsets
,
756 * Invalid parameters and any inconsistencies between are assumed to be
757 * checked by the caller. Therefore besides unsupported formats one can fail
758 * only in allocation.
761 *error
= __DRI_IMAGE_ERROR_BAD_ALLOC
;
765 image
->dma_buf_imported
= true;
766 image
->yuv_color_space
= yuv_color_space
;
767 image
->sample_range
= sample_range
;
768 image
->horizontal_siting
= horizontal_siting
;
769 image
->vertical_siting
= vertical_siting
;
771 *error
= __DRI_IMAGE_ERROR_SUCCESS
;
776 intel_from_planar(__DRIimage
*parent
, int plane
, void *loaderPrivate
)
778 int width
, height
, offset
, stride
, dri_format
, index
;
779 struct intel_image_format
*f
;
782 if (parent
== NULL
|| parent
->planar_format
== NULL
)
785 f
= parent
->planar_format
;
787 if (plane
>= f
->nplanes
)
790 width
= parent
->width
>> f
->planes
[plane
].width_shift
;
791 height
= parent
->height
>> f
->planes
[plane
].height_shift
;
792 dri_format
= f
->planes
[plane
].dri_format
;
793 index
= f
->planes
[plane
].buffer_index
;
794 offset
= parent
->offsets
[index
];
795 stride
= parent
->strides
[index
];
797 image
= intel_allocate_image(dri_format
, loaderPrivate
);
801 if (offset
+ height
* stride
> parent
->bo
->size
) {
802 _mesa_warning(NULL
, "intel_create_sub_image: subimage out of bounds");
807 image
->bo
= parent
->bo
;
808 drm_intel_bo_reference(parent
->bo
);
810 image
->width
= width
;
811 image
->height
= height
;
812 image
->pitch
= stride
;
813 image
->offset
= offset
;
815 intel_image_warn_if_unaligned(image
, __func__
);
820 static const __DRIimageExtension intelImageExtension
= {
821 .base
= { __DRI_IMAGE
, 13 },
823 .createImageFromName
= intel_create_image_from_name
,
824 .createImageFromRenderbuffer
= intel_create_image_from_renderbuffer
,
825 .destroyImage
= intel_destroy_image
,
826 .createImage
= intel_create_image
,
827 .queryImage
= intel_query_image
,
828 .dupImage
= intel_dup_image
,
829 .validateUsage
= intel_validate_usage
,
830 .createImageFromNames
= intel_create_image_from_names
,
831 .fromPlanar
= intel_from_planar
,
832 .createImageFromTexture
= intel_create_image_from_texture
,
833 .createImageFromFds
= intel_create_image_from_fds
,
834 .createImageFromDmaBufs
= intel_create_image_from_dma_bufs
,
836 .getCapabilities
= NULL
,
842 brw_query_renderer_integer(__DRIscreen
*dri_screen
,
843 int param
, unsigned int *value
)
845 const struct intel_screen
*const screen
=
846 (struct intel_screen
*) dri_screen
->driverPrivate
;
849 case __DRI2_RENDERER_VENDOR_ID
:
852 case __DRI2_RENDERER_DEVICE_ID
:
853 value
[0] = screen
->deviceID
;
855 case __DRI2_RENDERER_ACCELERATED
:
858 case __DRI2_RENDERER_VIDEO_MEMORY
: {
859 /* Once a batch uses more than 75% of the maximum mappable size, we
860 * assume that there's some fragmentation, and we start doing extra
861 * flushing, etc. That's the big cliff apps will care about.
864 size_t mappable_size
;
866 drm_intel_get_aperture_sizes(dri_screen
->fd
, &mappable_size
, &aper_size
);
868 const unsigned gpu_mappable_megabytes
=
869 (aper_size
/ (1024 * 1024)) * 3 / 4;
871 const long system_memory_pages
= sysconf(_SC_PHYS_PAGES
);
872 const long system_page_size
= sysconf(_SC_PAGE_SIZE
);
874 if (system_memory_pages
<= 0 || system_page_size
<= 0)
877 const uint64_t system_memory_bytes
= (uint64_t) system_memory_pages
878 * (uint64_t) system_page_size
;
880 const unsigned system_memory_megabytes
=
881 (unsigned) (system_memory_bytes
/ (1024 * 1024));
883 value
[0] = MIN2(system_memory_megabytes
, gpu_mappable_megabytes
);
886 case __DRI2_RENDERER_UNIFIED_MEMORY_ARCHITECTURE
:
889 case __DRI2_RENDERER_HAS_TEXTURE_3D
:
893 return driQueryRendererIntegerCommon(dri_screen
, param
, value
);
900 brw_query_renderer_string(__DRIscreen
*dri_screen
,
901 int param
, const char **value
)
903 const struct intel_screen
*screen
=
904 (struct intel_screen
*) dri_screen
->driverPrivate
;
907 case __DRI2_RENDERER_VENDOR_ID
:
908 value
[0] = brw_vendor_string
;
910 case __DRI2_RENDERER_DEVICE_ID
:
911 value
[0] = brw_get_renderer_string(screen
);
920 static const __DRI2rendererQueryExtension intelRendererQueryExtension
= {
921 .base
= { __DRI2_RENDERER_QUERY
, 1 },
923 .queryInteger
= brw_query_renderer_integer
,
924 .queryString
= brw_query_renderer_string
927 static const __DRIrobustnessExtension dri2Robustness
= {
928 .base
= { __DRI2_ROBUSTNESS
, 1 }
931 static const __DRIextension
*screenExtensions
[] = {
932 &intelTexBufferExtension
.base
,
933 &intelFenceExtension
.base
,
934 &intelFlushExtension
.base
,
935 &intelImageExtension
.base
,
936 &intelRendererQueryExtension
.base
,
937 &dri2ConfigQueryExtension
.base
,
941 static const __DRIextension
*intelRobustScreenExtensions
[] = {
942 &intelTexBufferExtension
.base
,
943 &intelFenceExtension
.base
,
944 &intelFlushExtension
.base
,
945 &intelImageExtension
.base
,
946 &intelRendererQueryExtension
.base
,
947 &dri2ConfigQueryExtension
.base
,
948 &dri2Robustness
.base
,
953 intel_get_param(struct intel_screen
*screen
, int param
, int *value
)
956 struct drm_i915_getparam gp
;
958 memset(&gp
, 0, sizeof(gp
));
962 if (drmIoctl(screen
->driScrnPriv
->fd
, DRM_IOCTL_I915_GETPARAM
, &gp
) == -1) {
965 _mesa_warning(NULL
, "drm_i915_getparam: %d", ret
);
972 intel_get_boolean(struct intel_screen
*screen
, int param
)
975 return (intel_get_param(screen
, param
, &value
) == 0) && value
;
979 intel_get_integer(struct intel_screen
*screen
, int param
)
983 if (intel_get_param(screen
, param
, &value
) == 0)
990 intelDestroyScreen(__DRIscreen
* sPriv
)
992 struct intel_screen
*screen
= sPriv
->driverPrivate
;
994 dri_bufmgr_destroy(screen
->bufmgr
);
995 driDestroyOptionInfo(&screen
->optionCache
);
998 sPriv
->driverPrivate
= NULL
;
1003 * This is called when we need to set up GL rendering to a new X window.
1006 intelCreateBuffer(__DRIscreen
*dri_screen
,
1007 __DRIdrawable
* driDrawPriv
,
1008 const struct gl_config
* mesaVis
, GLboolean isPixmap
)
1010 struct intel_renderbuffer
*rb
;
1011 struct intel_screen
*screen
= (struct intel_screen
*)
1012 dri_screen
->driverPrivate
;
1013 mesa_format rgbFormat
;
1014 unsigned num_samples
=
1015 intel_quantize_num_samples(screen
, mesaVis
->samples
);
1016 struct gl_framebuffer
*fb
;
1021 fb
= CALLOC_STRUCT(gl_framebuffer
);
1025 _mesa_initialize_window_framebuffer(fb
, mesaVis
);
1027 if (screen
->winsys_msaa_samples_override
!= -1) {
1028 num_samples
= screen
->winsys_msaa_samples_override
;
1029 fb
->Visual
.samples
= num_samples
;
1032 if (mesaVis
->redBits
== 5) {
1033 rgbFormat
= mesaVis
->redMask
== 0x1f ? MESA_FORMAT_R5G6B5_UNORM
1034 : MESA_FORMAT_B5G6R5_UNORM
;
1035 } else if (mesaVis
->sRGBCapable
) {
1036 rgbFormat
= mesaVis
->redMask
== 0xff ? MESA_FORMAT_R8G8B8A8_SRGB
1037 : MESA_FORMAT_B8G8R8A8_SRGB
;
1038 } else if (mesaVis
->alphaBits
== 0) {
1039 rgbFormat
= mesaVis
->redMask
== 0xff ? MESA_FORMAT_R8G8B8X8_UNORM
1040 : MESA_FORMAT_B8G8R8X8_UNORM
;
1042 rgbFormat
= mesaVis
->redMask
== 0xff ? MESA_FORMAT_R8G8B8A8_SRGB
1043 : MESA_FORMAT_B8G8R8A8_SRGB
;
1044 fb
->Visual
.sRGBCapable
= true;
1047 /* setup the hardware-based renderbuffers */
1048 rb
= intel_create_renderbuffer(rgbFormat
, num_samples
);
1049 _mesa_add_renderbuffer(fb
, BUFFER_FRONT_LEFT
, &rb
->Base
.Base
);
1051 if (mesaVis
->doubleBufferMode
) {
1052 rb
= intel_create_renderbuffer(rgbFormat
, num_samples
);
1053 _mesa_add_renderbuffer(fb
, BUFFER_BACK_LEFT
, &rb
->Base
.Base
);
1057 * Assert here that the gl_config has an expected depth/stencil bit
1058 * combination: one of d24/s8, d16/s0, d0/s0. (See intelInitScreen2(),
1059 * which constructs the advertised configs.)
1061 if (mesaVis
->depthBits
== 24) {
1062 assert(mesaVis
->stencilBits
== 8);
1064 if (screen
->devinfo
.has_hiz_and_separate_stencil
) {
1065 rb
= intel_create_private_renderbuffer(MESA_FORMAT_Z24_UNORM_X8_UINT
,
1067 _mesa_add_renderbuffer(fb
, BUFFER_DEPTH
, &rb
->Base
.Base
);
1068 rb
= intel_create_private_renderbuffer(MESA_FORMAT_S_UINT8
,
1070 _mesa_add_renderbuffer(fb
, BUFFER_STENCIL
, &rb
->Base
.Base
);
1073 * Use combined depth/stencil. Note that the renderbuffer is
1074 * attached to two attachment points.
1076 rb
= intel_create_private_renderbuffer(MESA_FORMAT_Z24_UNORM_S8_UINT
,
1078 _mesa_add_renderbuffer(fb
, BUFFER_DEPTH
, &rb
->Base
.Base
);
1079 _mesa_add_renderbuffer(fb
, BUFFER_STENCIL
, &rb
->Base
.Base
);
1082 else if (mesaVis
->depthBits
== 16) {
1083 assert(mesaVis
->stencilBits
== 0);
1084 rb
= intel_create_private_renderbuffer(MESA_FORMAT_Z_UNORM16
,
1086 _mesa_add_renderbuffer(fb
, BUFFER_DEPTH
, &rb
->Base
.Base
);
1089 assert(mesaVis
->depthBits
== 0);
1090 assert(mesaVis
->stencilBits
== 0);
1093 /* now add any/all software-based renderbuffers we may need */
1094 _swrast_add_soft_renderbuffers(fb
,
1095 false, /* never sw color */
1096 false, /* never sw depth */
1097 false, /* never sw stencil */
1098 mesaVis
->accumRedBits
> 0,
1099 false, /* never sw alpha */
1100 false /* never sw aux */ );
1101 driDrawPriv
->driverPrivate
= fb
;
1107 intelDestroyBuffer(__DRIdrawable
* driDrawPriv
)
1109 struct gl_framebuffer
*fb
= driDrawPriv
->driverPrivate
;
1111 _mesa_reference_framebuffer(&fb
, NULL
);
1115 intel_detect_sseu(struct intel_screen
*screen
)
1117 assert(screen
->devinfo
.gen
>= 8);
1120 screen
->subslice_total
= -1;
1121 screen
->eu_total
= -1;
1123 ret
= intel_get_param(screen
, I915_PARAM_SUBSLICE_TOTAL
,
1124 &screen
->subslice_total
);
1125 if (ret
< 0 && ret
!= -EINVAL
)
1128 ret
= intel_get_param(screen
,
1129 I915_PARAM_EU_TOTAL
, &screen
->eu_total
);
1130 if (ret
< 0 && ret
!= -EINVAL
)
1133 /* Without this information, we cannot get the right Braswell brandstrings,
1134 * and we have to use conservative numbers for GPGPU on many platforms, but
1135 * otherwise, things will just work.
1137 if (screen
->subslice_total
< 1 || screen
->eu_total
< 1)
1139 "Kernel 4.1 required to properly query GPU properties.\n");
1144 screen
->subslice_total
= -1;
1145 screen
->eu_total
= -1;
1146 _mesa_warning(NULL
, "Failed to query GPU properties (%s).\n", strerror(-ret
));
1150 intel_init_bufmgr(struct intel_screen
*screen
)
1152 __DRIscreen
*dri_screen
= screen
->driScrnPriv
;
1154 screen
->no_hw
= getenv("INTEL_NO_HW") != NULL
;
1156 screen
->bufmgr
= intel_bufmgr_gem_init(dri_screen
->fd
, BATCH_SZ
);
1157 if (screen
->bufmgr
== NULL
) {
1158 fprintf(stderr
, "[%s:%u] Error initializing buffer manager.\n",
1159 __func__
, __LINE__
);
1163 drm_intel_bufmgr_gem_enable_fenced_relocs(screen
->bufmgr
);
1165 if (!intel_get_boolean(screen
, I915_PARAM_HAS_RELAXED_DELTA
)) {
1166 fprintf(stderr
, "[%s: %u] Kernel 2.6.39 required.\n", __func__
, __LINE__
);
1174 intel_detect_swizzling(struct intel_screen
*screen
)
1176 drm_intel_bo
*buffer
;
1177 unsigned long flags
= 0;
1178 unsigned long aligned_pitch
;
1179 uint32_t tiling
= I915_TILING_X
;
1180 uint32_t swizzle_mode
= 0;
1182 buffer
= drm_intel_bo_alloc_tiled(screen
->bufmgr
, "swizzle test",
1184 &tiling
, &aligned_pitch
, flags
);
1188 drm_intel_bo_get_tiling(buffer
, &tiling
, &swizzle_mode
);
1189 drm_intel_bo_unreference(buffer
);
1191 if (swizzle_mode
== I915_BIT_6_SWIZZLE_NONE
)
1198 intel_detect_timestamp(struct intel_screen
*screen
)
1200 uint64_t dummy
= 0, last
= 0;
1201 int upper
, lower
, loops
;
1203 /* On 64bit systems, some old kernels trigger a hw bug resulting in the
1204 * TIMESTAMP register being shifted and the low 32bits always zero.
1206 * More recent kernels offer an interface to read the full 36bits
1209 if (drm_intel_reg_read(screen
->bufmgr
, TIMESTAMP
| 1, &dummy
) == 0)
1212 /* Determine if we have a 32bit or 64bit kernel by inspecting the
1213 * upper 32bits for a rapidly changing timestamp.
1215 if (drm_intel_reg_read(screen
->bufmgr
, TIMESTAMP
, &last
))
1219 for (loops
= 0; loops
< 10; loops
++) {
1220 /* The TIMESTAMP should change every 80ns, so several round trips
1221 * through the kernel should be enough to advance it.
1223 if (drm_intel_reg_read(screen
->bufmgr
, TIMESTAMP
, &dummy
))
1226 upper
+= (dummy
>> 32) != (last
>> 32);
1227 if (upper
> 1) /* beware 32bit counter overflow */
1228 return 2; /* upper dword holds the low 32bits of the timestamp */
1230 lower
+= (dummy
& 0xffffffff) != (last
& 0xffffffff);
1232 return 1; /* timestamp is unshifted */
1237 /* No advancement? No timestamp! */
1242 * Test if we can use MI_LOAD_REGISTER_MEM from an untrusted batchbuffer.
1244 * Some combinations of hardware and kernel versions allow this feature,
1245 * while others don't. Instead of trying to enumerate every case, just
1246 * try and write a register and see if works.
1249 intel_detect_pipelined_register(struct intel_screen
*screen
,
1250 int reg
, uint32_t expected_value
, bool reset
)
1252 drm_intel_bo
*results
, *bo
;
1254 uint32_t offset
= 0;
1255 bool success
= false;
1257 /* Create a zero'ed temporary buffer for reading our results */
1258 results
= drm_intel_bo_alloc(screen
->bufmgr
, "registers", 4096, 0);
1259 if (results
== NULL
)
1262 bo
= drm_intel_bo_alloc(screen
->bufmgr
, "batchbuffer", 4096, 0);
1266 if (drm_intel_bo_map(bo
, 1))
1269 batch
= bo
->virtual;
1271 /* Write the register. */
1272 *batch
++ = MI_LOAD_REGISTER_IMM
| (3 - 2);
1274 *batch
++ = expected_value
;
1276 /* Save the register's value back to the buffer. */
1277 *batch
++ = MI_STORE_REGISTER_MEM
| (3 - 2);
1279 drm_intel_bo_emit_reloc(bo
, (char *)batch
-(char *)bo
->virtual,
1280 results
, offset
*sizeof(uint32_t),
1281 I915_GEM_DOMAIN_INSTRUCTION
,
1282 I915_GEM_DOMAIN_INSTRUCTION
);
1283 *batch
++ = results
->offset
+ offset
*sizeof(uint32_t);
1285 /* And afterwards clear the register */
1287 *batch
++ = MI_LOAD_REGISTER_IMM
| (3 - 2);
1292 *batch
++ = MI_BATCH_BUFFER_END
;
1294 drm_intel_bo_mrb_exec(bo
, ALIGN((char *)batch
- (char *)bo
->virtual, 8),
1298 /* Check whether the value got written. */
1299 if (drm_intel_bo_map(results
, false) == 0) {
1300 success
= *((uint32_t *)results
->virtual + offset
) == expected_value
;
1301 drm_intel_bo_unmap(results
);
1305 drm_intel_bo_unreference(bo
);
1307 drm_intel_bo_unreference(results
);
1313 intel_detect_pipelined_so(struct intel_screen
*screen
)
1315 /* Supposedly, Broadwell just works. */
1316 if (screen
->devinfo
.gen
>= 8)
1319 if (screen
->devinfo
.gen
<= 6)
1322 /* We use SO_WRITE_OFFSET0 since you're supposed to write it (unlike the
1323 * statistics registers), and we already reset it to zero before using it.
1325 return intel_detect_pipelined_register(screen
,
1326 GEN7_SO_WRITE_OFFSET(0),
1332 * Return array of MSAA modes supported by the hardware. The array is
1333 * zero-terminated and sorted in decreasing order.
1336 intel_supported_msaa_modes(const struct intel_screen
*screen
)
1338 static const int gen9_modes
[] = {16, 8, 4, 2, 0, -1};
1339 static const int gen8_modes
[] = {8, 4, 2, 0, -1};
1340 static const int gen7_modes
[] = {8, 4, 0, -1};
1341 static const int gen6_modes
[] = {4, 0, -1};
1342 static const int gen4_modes
[] = {0, -1};
1344 if (screen
->devinfo
.gen
>= 9) {
1346 } else if (screen
->devinfo
.gen
>= 8) {
1348 } else if (screen
->devinfo
.gen
>= 7) {
1350 } else if (screen
->devinfo
.gen
== 6) {
1357 static __DRIconfig
**
1358 intel_screen_make_configs(__DRIscreen
*dri_screen
)
1360 static const mesa_format formats
[] = {
1361 MESA_FORMAT_B5G6R5_UNORM
,
1362 MESA_FORMAT_B8G8R8A8_UNORM
,
1363 MESA_FORMAT_B8G8R8X8_UNORM
1366 /* GLX_SWAP_COPY_OML is not supported due to page flipping. */
1367 static const GLenum back_buffer_modes
[] = {
1368 GLX_SWAP_UNDEFINED_OML
, GLX_NONE
,
1371 static const uint8_t singlesample_samples
[1] = {0};
1372 static const uint8_t multisample_samples
[2] = {4, 8};
1374 struct intel_screen
*screen
= dri_screen
->driverPrivate
;
1375 const struct gen_device_info
*devinfo
= &screen
->devinfo
;
1376 uint8_t depth_bits
[4], stencil_bits
[4];
1377 __DRIconfig
**configs
= NULL
;
1379 /* Generate singlesample configs without accumulation buffer. */
1380 for (unsigned i
= 0; i
< ARRAY_SIZE(formats
); i
++) {
1381 __DRIconfig
**new_configs
;
1382 int num_depth_stencil_bits
= 2;
1384 /* Starting with DRI2 protocol version 1.1 we can request a depth/stencil
1385 * buffer that has a different number of bits per pixel than the color
1386 * buffer, gen >= 6 supports this.
1389 stencil_bits
[0] = 0;
1391 if (formats
[i
] == MESA_FORMAT_B5G6R5_UNORM
) {
1393 stencil_bits
[1] = 0;
1394 if (devinfo
->gen
>= 6) {
1396 stencil_bits
[2] = 8;
1397 num_depth_stencil_bits
= 3;
1401 stencil_bits
[1] = 8;
1404 new_configs
= driCreateConfigs(formats
[i
],
1407 num_depth_stencil_bits
,
1408 back_buffer_modes
, 2,
1409 singlesample_samples
, 1,
1411 configs
= driConcatConfigs(configs
, new_configs
);
1414 /* Generate the minimum possible set of configs that include an
1415 * accumulation buffer.
1417 for (unsigned i
= 0; i
< ARRAY_SIZE(formats
); i
++) {
1418 __DRIconfig
**new_configs
;
1420 if (formats
[i
] == MESA_FORMAT_B5G6R5_UNORM
) {
1422 stencil_bits
[0] = 0;
1425 stencil_bits
[0] = 8;
1428 new_configs
= driCreateConfigs(formats
[i
],
1429 depth_bits
, stencil_bits
, 1,
1430 back_buffer_modes
, 1,
1431 singlesample_samples
, 1,
1433 configs
= driConcatConfigs(configs
, new_configs
);
1436 /* Generate multisample configs.
1438 * This loop breaks early, and hence is a no-op, on gen < 6.
1440 * Multisample configs must follow the singlesample configs in order to
1441 * work around an X server bug present in 1.12. The X server chooses to
1442 * associate the first listed RGBA888-Z24S8 config, regardless of its
1443 * sample count, with the 32-bit depth visual used for compositing.
1445 * Only doublebuffer configs with GLX_SWAP_UNDEFINED_OML behavior are
1446 * supported. Singlebuffer configs are not supported because no one wants
1449 for (unsigned i
= 0; i
< ARRAY_SIZE(formats
); i
++) {
1450 if (devinfo
->gen
< 6)
1453 __DRIconfig
**new_configs
;
1454 const int num_depth_stencil_bits
= 2;
1455 int num_msaa_modes
= 0;
1458 stencil_bits
[0] = 0;
1460 if (formats
[i
] == MESA_FORMAT_B5G6R5_UNORM
) {
1462 stencil_bits
[1] = 0;
1465 stencil_bits
[1] = 8;
1468 if (devinfo
->gen
>= 7)
1470 else if (devinfo
->gen
== 6)
1473 new_configs
= driCreateConfigs(formats
[i
],
1476 num_depth_stencil_bits
,
1477 back_buffer_modes
, 1,
1478 multisample_samples
,
1481 configs
= driConcatConfigs(configs
, new_configs
);
1484 if (configs
== NULL
) {
1485 fprintf(stderr
, "[%s:%u] Error creating FBConfig!\n", __func__
,
1494 set_max_gl_versions(struct intel_screen
*screen
)
1496 __DRIscreen
*dri_screen
= screen
->driScrnPriv
;
1497 const bool has_astc
= screen
->devinfo
.gen
>= 9;
1499 switch (screen
->devinfo
.gen
) {
1502 dri_screen
->max_gl_core_version
= 45;
1503 dri_screen
->max_gl_compat_version
= 30;
1504 dri_screen
->max_gl_es1_version
= 11;
1505 dri_screen
->max_gl_es2_version
= has_astc
? 32 : 31;
1508 dri_screen
->max_gl_core_version
= screen
->devinfo
.is_haswell
&&
1509 can_do_pipelined_register_writes(screen
) ? 45 : 33;
1510 dri_screen
->max_gl_compat_version
= 30;
1511 dri_screen
->max_gl_es1_version
= 11;
1512 dri_screen
->max_gl_es2_version
= screen
->devinfo
.is_haswell
? 31 : 30;
1515 dri_screen
->max_gl_core_version
= 33;
1516 dri_screen
->max_gl_compat_version
= 30;
1517 dri_screen
->max_gl_es1_version
= 11;
1518 dri_screen
->max_gl_es2_version
= 30;
1522 dri_screen
->max_gl_core_version
= 0;
1523 dri_screen
->max_gl_compat_version
= 21;
1524 dri_screen
->max_gl_es1_version
= 11;
1525 dri_screen
->max_gl_es2_version
= 20;
1528 unreachable("unrecognized intel_screen::gen");
1533 * Return the revision (generally the revid field of the PCI header) of the
1536 * XXX: This function is useful to keep around even if it is not currently in
1537 * use. It is necessary for new platforms and revision specific workarounds or
1538 * features. Please don't remove it so that we know it at least continues to
1541 static __attribute__((__unused__
)) int
1542 brw_get_revision(int fd
)
1544 struct drm_i915_getparam gp
;
1548 memset(&gp
, 0, sizeof(gp
));
1549 gp
.param
= I915_PARAM_REVISION
;
1550 gp
.value
= &revision
;
1552 ret
= drmCommandWriteRead(fd
, DRM_I915_GETPARAM
, &gp
, sizeof(gp
));
1560 shader_debug_log_mesa(void *data
, const char *fmt
, ...)
1562 struct brw_context
*brw
= (struct brw_context
*)data
;
1565 va_start(args
, fmt
);
1567 _mesa_gl_vdebug(&brw
->ctx
, &msg_id
,
1568 MESA_DEBUG_SOURCE_SHADER_COMPILER
,
1569 MESA_DEBUG_TYPE_OTHER
,
1570 MESA_DEBUG_SEVERITY_NOTIFICATION
, fmt
, args
);
1575 shader_perf_log_mesa(void *data
, const char *fmt
, ...)
1577 struct brw_context
*brw
= (struct brw_context
*)data
;
1580 va_start(args
, fmt
);
1582 if (unlikely(INTEL_DEBUG
& DEBUG_PERF
)) {
1584 va_copy(args_copy
, args
);
1585 vfprintf(stderr
, fmt
, args_copy
);
1589 if (brw
->perf_debug
) {
1591 _mesa_gl_vdebug(&brw
->ctx
, &msg_id
,
1592 MESA_DEBUG_SOURCE_SHADER_COMPILER
,
1593 MESA_DEBUG_TYPE_PERFORMANCE
,
1594 MESA_DEBUG_SEVERITY_MEDIUM
, fmt
, args
);
1600 * This is the driver specific part of the createNewScreen entry point.
1601 * Called when using DRI2.
1603 * \return the struct gl_config supported by this driver
1606 __DRIconfig
**intelInitScreen2(__DRIscreen
*dri_screen
)
1608 struct intel_screen
*screen
;
1610 if (dri_screen
->image
.loader
) {
1611 } else if (dri_screen
->dri2
.loader
->base
.version
<= 2 ||
1612 dri_screen
->dri2
.loader
->getBuffersWithFormat
== NULL
) {
1614 "\nERROR! DRI2 loader with getBuffersWithFormat() "
1615 "support required\n");
1619 /* Allocate the private area */
1620 screen
= rzalloc(NULL
, struct intel_screen
);
1622 fprintf(stderr
, "\nERROR! Allocating private area failed\n");
1625 /* parse information in __driConfigOptions */
1626 driParseOptionInfo(&screen
->optionCache
, brw_config_options
.xml
);
1628 screen
->driScrnPriv
= dri_screen
;
1629 dri_screen
->driverPrivate
= (void *) screen
;
1631 if (!intel_init_bufmgr(screen
))
1634 screen
->deviceID
= drm_intel_bufmgr_gem_get_devid(screen
->bufmgr
);
1635 if (!gen_get_device_info(screen
->deviceID
, &screen
->devinfo
))
1638 const struct gen_device_info
*devinfo
= &screen
->devinfo
;
1640 brw_process_intel_debug_variable();
1642 if (INTEL_DEBUG
& DEBUG_BUFMGR
)
1643 dri_bufmgr_set_debug(screen
->bufmgr
, true);
1645 if ((INTEL_DEBUG
& DEBUG_SHADER_TIME
) && devinfo
->gen
< 7) {
1647 "shader_time debugging requires gen7 (Ivybridge) or better.\n");
1648 INTEL_DEBUG
&= ~DEBUG_SHADER_TIME
;
1651 if (intel_get_integer(screen
, I915_PARAM_MMAP_GTT_VERSION
) >= 1) {
1652 /* Theorectically unlimited! At least for individual objects...
1654 * Currently the entire (global) address space for all GTT maps is
1655 * limited to 64bits. That is all objects on the system that are
1656 * setup for GTT mmapping must fit within 64bits. An attempt to use
1657 * one that exceeds the limit with fail in drm_intel_bo_map_gtt().
1659 * Long before we hit that limit, we will be practically limited by
1660 * that any single object must fit in physical memory (RAM). The upper
1661 * limit on the CPU's address space is currently 48bits (Skylake), of
1662 * which only 39bits can be physical memory. (The GPU itself also has
1663 * a 48bit addressable virtual space.) We can fit over 32 million
1664 * objects of the current maximum allocable size before running out
1667 screen
->max_gtt_map_object_size
= UINT64_MAX
;
1669 /* Estimate the size of the mappable aperture into the GTT. There's an
1670 * ioctl to get the whole GTT size, but not one to get the mappable subset.
1671 * It turns out it's basically always 256MB, though some ancient hardware
1674 uint32_t gtt_size
= 256 * 1024 * 1024;
1676 /* We don't want to map two objects such that a memcpy between them would
1677 * just fault one mapping in and then the other over and over forever. So
1678 * we would need to divide the GTT size by 2. Additionally, some GTT is
1679 * taken up by things like the framebuffer and the ringbuffer and such, so
1680 * be more conservative.
1682 screen
->max_gtt_map_object_size
= gtt_size
/ 4;
1685 screen
->hw_has_swizzling
= intel_detect_swizzling(screen
);
1686 screen
->hw_has_timestamp
= intel_detect_timestamp(screen
);
1688 /* GENs prior to 8 do not support EU/Subslice info */
1689 if (devinfo
->gen
>= 8) {
1690 intel_detect_sseu(screen
);
1691 } else if (devinfo
->gen
== 7) {
1692 screen
->subslice_total
= 1 << (devinfo
->gt
- 1);
1695 if (intel_detect_pipelined_so(screen
))
1696 screen
->kernel_features
|= KERNEL_ALLOWS_SOL_OFFSET_WRITES
;
1698 const char *force_msaa
= getenv("INTEL_FORCE_MSAA");
1700 screen
->winsys_msaa_samples_override
=
1701 intel_quantize_num_samples(screen
, atoi(force_msaa
));
1702 printf("Forcing winsys sample count to %d\n",
1703 screen
->winsys_msaa_samples_override
);
1705 screen
->winsys_msaa_samples_override
= -1;
1708 set_max_gl_versions(screen
);
1710 /* Notification of GPU resets requires hardware contexts and a kernel new
1711 * enough to support DRM_IOCTL_I915_GET_RESET_STATS. If the ioctl is
1712 * supported, calling it with a context of 0 will either generate EPERM or
1713 * no error. If the ioctl is not supported, it always generate EINVAL.
1714 * Use this to determine whether to advertise the __DRI2_ROBUSTNESS
1715 * extension to the loader.
1717 * Don't even try on pre-Gen6, since we don't attempt to use contexts there.
1719 if (devinfo
->gen
>= 6) {
1720 struct drm_i915_reset_stats stats
;
1721 memset(&stats
, 0, sizeof(stats
));
1723 const int ret
= drmIoctl(dri_screen
->fd
, DRM_IOCTL_I915_GET_RESET_STATS
, &stats
);
1725 screen
->has_context_reset_notification
=
1726 (ret
!= -1 || errno
!= EINVAL
);
1729 if (intel_get_param(screen
, I915_PARAM_CMD_PARSER_VERSION
,
1730 &screen
->cmd_parser_version
) < 0) {
1731 screen
->cmd_parser_version
= 0;
1734 if (devinfo
->gen
>= 8 || screen
->cmd_parser_version
>= 2)
1735 screen
->kernel_features
|= KERNEL_ALLOWS_PREDICATE_WRITES
;
1737 /* Haswell requires command parser version 4 in order to have L3
1738 * atomic scratch1 and chicken3 bits
1740 if (devinfo
->is_haswell
&& screen
->cmd_parser_version
>= 4) {
1741 screen
->kernel_features
|=
1742 KERNEL_ALLOWS_HSW_SCRATCH1_AND_ROW_CHICKEN3
;
1745 /* Haswell requires command parser version 6 in order to write to the
1746 * MI_MATH GPR registers, and version 7 in order to use
1747 * MI_LOAD_REGISTER_REG (which all users of MI_MATH use).
1749 if (devinfo
->gen
>= 8 ||
1750 (devinfo
->is_haswell
&& screen
->cmd_parser_version
>= 7)) {
1751 screen
->kernel_features
|= KERNEL_ALLOWS_MI_MATH_AND_LRR
;
1754 /* Gen7 needs at least command parser version 5 to support compute */
1755 if (devinfo
->gen
>= 8 || screen
->cmd_parser_version
>= 5)
1756 screen
->kernel_features
|= KERNEL_ALLOWS_COMPUTE_DISPATCH
;
1758 dri_screen
->extensions
= !screen
->has_context_reset_notification
1759 ? screenExtensions
: intelRobustScreenExtensions
;
1761 screen
->compiler
= brw_compiler_create(screen
, devinfo
);
1762 screen
->compiler
->shader_debug_log
= shader_debug_log_mesa
;
1763 screen
->compiler
->shader_perf_log
= shader_perf_log_mesa
;
1764 screen
->program_id
= 1;
1766 screen
->has_exec_fence
=
1767 intel_get_boolean(screen
, I915_PARAM_HAS_EXEC_FENCE
);
1769 return (const __DRIconfig
**) intel_screen_make_configs(dri_screen
);
1772 struct intel_buffer
{
1777 static __DRIbuffer
*
1778 intelAllocateBuffer(__DRIscreen
*dri_screen
,
1779 unsigned attachment
, unsigned format
,
1780 int width
, int height
)
1782 struct intel_buffer
*intelBuffer
;
1783 struct intel_screen
*screen
= dri_screen
->driverPrivate
;
1785 assert(attachment
== __DRI_BUFFER_FRONT_LEFT
||
1786 attachment
== __DRI_BUFFER_BACK_LEFT
);
1788 intelBuffer
= calloc(1, sizeof *intelBuffer
);
1789 if (intelBuffer
== NULL
)
1792 /* The front and back buffers are color buffers, which are X tiled. */
1793 uint32_t tiling
= I915_TILING_X
;
1794 unsigned long pitch
;
1795 int cpp
= format
/ 8;
1796 intelBuffer
->bo
= drm_intel_bo_alloc_tiled(screen
->bufmgr
,
1797 "intelAllocateBuffer",
1802 BO_ALLOC_FOR_RENDER
);
1804 if (intelBuffer
->bo
== NULL
) {
1809 drm_intel_bo_flink(intelBuffer
->bo
, &intelBuffer
->base
.name
);
1811 intelBuffer
->base
.attachment
= attachment
;
1812 intelBuffer
->base
.cpp
= cpp
;
1813 intelBuffer
->base
.pitch
= pitch
;
1815 return &intelBuffer
->base
;
1819 intelReleaseBuffer(__DRIscreen
*dri_screen
, __DRIbuffer
*buffer
)
1821 struct intel_buffer
*intelBuffer
= (struct intel_buffer
*) buffer
;
1823 drm_intel_bo_unreference(intelBuffer
->bo
);
1827 static const struct __DriverAPIRec brw_driver_api
= {
1828 .InitScreen
= intelInitScreen2
,
1829 .DestroyScreen
= intelDestroyScreen
,
1830 .CreateContext
= brwCreateContext
,
1831 .DestroyContext
= intelDestroyContext
,
1832 .CreateBuffer
= intelCreateBuffer
,
1833 .DestroyBuffer
= intelDestroyBuffer
,
1834 .MakeCurrent
= intelMakeCurrent
,
1835 .UnbindContext
= intelUnbindContext
,
1836 .AllocateBuffer
= intelAllocateBuffer
,
1837 .ReleaseBuffer
= intelReleaseBuffer
1840 static const struct __DRIDriverVtableExtensionRec brw_vtable
= {
1841 .base
= { __DRI_DRIVER_VTABLE
, 1 },
1842 .vtable
= &brw_driver_api
,
1845 static const __DRIextension
*brw_driver_extensions
[] = {
1846 &driCoreExtension
.base
,
1847 &driImageDriverExtension
.base
,
1848 &driDRI2Extension
.base
,
1850 &brw_config_options
.base
,
1854 PUBLIC
const __DRIextension
**__driDriverGetExtensions_i965(void)
1856 globalDriverAPI
= &brw_driver_api
;
1858 return brw_driver_extensions
;