2 * Copyright 2003 VMware, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial portions
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
18 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
21 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
22 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
23 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
29 #include "main/context.h"
30 #include "main/framebuffer.h"
31 #include "main/renderbuffer.h"
32 #include "main/texobj.h"
33 #include "main/hash.h"
34 #include "main/fbobject.h"
35 #include "main/version.h"
36 #include "swrast/s_renderbuffer.h"
37 #include "util/ralloc.h"
38 #include "brw_defines.h"
39 #include "compiler/nir/nir.h"
44 static const __DRIconfigOptionsExtension brw_config_options
= {
45 .base
= { __DRI_CONFIG_OPTIONS
, 1 },
48 DRI_CONF_SECTION_PERFORMANCE
49 DRI_CONF_VBLANK_MODE(DRI_CONF_VBLANK_ALWAYS_SYNC
)
50 /* Options correspond to DRI_CONF_BO_REUSE_DISABLED,
51 * DRI_CONF_BO_REUSE_ALL
53 DRI_CONF_OPT_BEGIN_V(bo_reuse
, enum, 1, "0:1")
54 DRI_CONF_DESC_BEGIN(en
, "Buffer object reuse")
55 DRI_CONF_ENUM(0, "Disable buffer object reuse")
56 DRI_CONF_ENUM(1, "Enable reuse of all sizes of buffer objects")
60 DRI_CONF_OPT_BEGIN_B(hiz
, "true")
61 DRI_CONF_DESC(en
, "Enable Hierarchical Z on gen6+")
65 DRI_CONF_SECTION_QUALITY
66 DRI_CONF_FORCE_S3TC_ENABLE("false")
68 DRI_CONF_PRECISE_TRIG("false")
70 DRI_CONF_OPT_BEGIN(clamp_max_samples
, int, -1)
71 DRI_CONF_DESC(en
, "Clamp the value of GL_MAX_SAMPLES to the "
72 "given integer. If negative, then do not clamp.")
76 DRI_CONF_SECTION_DEBUG
77 DRI_CONF_NO_RAST("false")
78 DRI_CONF_ALWAYS_FLUSH_BATCH("false")
79 DRI_CONF_ALWAYS_FLUSH_CACHE("false")
80 DRI_CONF_DISABLE_THROTTLING("false")
81 DRI_CONF_FORCE_GLSL_EXTENSIONS_WARN("false")
82 DRI_CONF_FORCE_GLSL_VERSION(0)
83 DRI_CONF_DISABLE_GLSL_LINE_CONTINUATIONS("false")
84 DRI_CONF_DISABLE_BLEND_FUNC_EXTENDED("false")
85 DRI_CONF_DUAL_COLOR_BLEND_BY_LOCATION("false")
86 DRI_CONF_ALLOW_GLSL_EXTENSION_DIRECTIVE_MIDSHADER("false")
87 DRI_CONF_ALLOW_HIGHER_COMPAT_VERSION("false")
89 DRI_CONF_OPT_BEGIN_B(shader_precompile
, "true")
90 DRI_CONF_DESC(en
, "Perform code generation at shader link time.")
94 DRI_CONF_SECTION_MISCELLANEOUS
95 DRI_CONF_GLSL_ZERO_INIT("false")
100 #include "intel_batchbuffer.h"
101 #include "intel_buffers.h"
102 #include "intel_bufmgr.h"
103 #include "intel_fbo.h"
104 #include "intel_mipmap_tree.h"
105 #include "intel_screen.h"
106 #include "intel_tex.h"
107 #include "intel_image.h"
109 #include "brw_context.h"
111 #include "i915_drm.h"
114 * For debugging purposes, this returns a time in seconds.
121 clock_gettime(CLOCK_MONOTONIC
, &tp
);
123 return tp
.tv_sec
+ tp
.tv_nsec
/ 1000000000.0;
126 static const __DRItexBufferExtension intelTexBufferExtension
= {
127 .base
= { __DRI_TEX_BUFFER
, 3 },
129 .setTexBuffer
= intelSetTexBuffer
,
130 .setTexBuffer2
= intelSetTexBuffer2
,
131 .releaseTexBuffer
= NULL
,
135 intel_dri2_flush_with_flags(__DRIcontext
*cPriv
,
136 __DRIdrawable
*dPriv
,
138 enum __DRI2throttleReason reason
)
140 struct brw_context
*brw
= cPriv
->driverPrivate
;
145 struct gl_context
*ctx
= &brw
->ctx
;
147 FLUSH_VERTICES(ctx
, 0);
149 if (flags
& __DRI2_FLUSH_DRAWABLE
)
150 intel_resolve_for_dri2_flush(brw
, dPriv
);
152 if (reason
== __DRI2_THROTTLE_SWAPBUFFER
)
153 brw
->need_swap_throttle
= true;
154 if (reason
== __DRI2_THROTTLE_FLUSHFRONT
)
155 brw
->need_flush_throttle
= true;
157 intel_batchbuffer_flush(brw
);
161 * Provides compatibility with loaders that only support the older (version
162 * 1-3) flush interface.
164 * That includes libGL up to Mesa 9.0, and the X Server at least up to 1.13.
167 intel_dri2_flush(__DRIdrawable
*drawable
)
169 intel_dri2_flush_with_flags(drawable
->driContextPriv
, drawable
,
170 __DRI2_FLUSH_DRAWABLE
,
171 __DRI2_THROTTLE_SWAPBUFFER
);
174 static const struct __DRI2flushExtensionRec intelFlushExtension
= {
175 .base
= { __DRI2_FLUSH
, 4 },
177 .flush
= intel_dri2_flush
,
178 .invalidate
= dri2InvalidateDrawable
,
179 .flush_with_flags
= intel_dri2_flush_with_flags
,
182 static struct intel_image_format intel_image_formats
[] = {
183 { __DRI_IMAGE_FOURCC_ARGB8888
, __DRI_IMAGE_COMPONENTS_RGBA
, 1,
184 { { 0, 0, 0, __DRI_IMAGE_FORMAT_ARGB8888
, 4 } } },
186 { __DRI_IMAGE_FOURCC_ABGR8888
, __DRI_IMAGE_COMPONENTS_RGBA
, 1,
187 { { 0, 0, 0, __DRI_IMAGE_FORMAT_ABGR8888
, 4 } } },
189 { __DRI_IMAGE_FOURCC_SARGB8888
, __DRI_IMAGE_COMPONENTS_RGBA
, 1,
190 { { 0, 0, 0, __DRI_IMAGE_FORMAT_SARGB8
, 4 } } },
192 { __DRI_IMAGE_FOURCC_XRGB8888
, __DRI_IMAGE_COMPONENTS_RGB
, 1,
193 { { 0, 0, 0, __DRI_IMAGE_FORMAT_XRGB8888
, 4 }, } },
195 { __DRI_IMAGE_FOURCC_XBGR8888
, __DRI_IMAGE_COMPONENTS_RGB
, 1,
196 { { 0, 0, 0, __DRI_IMAGE_FORMAT_XBGR8888
, 4 }, } },
198 { __DRI_IMAGE_FOURCC_ARGB1555
, __DRI_IMAGE_COMPONENTS_RGBA
, 1,
199 { { 0, 0, 0, __DRI_IMAGE_FORMAT_ARGB1555
, 2 } } },
201 { __DRI_IMAGE_FOURCC_RGB565
, __DRI_IMAGE_COMPONENTS_RGB
, 1,
202 { { 0, 0, 0, __DRI_IMAGE_FORMAT_RGB565
, 2 } } },
204 { __DRI_IMAGE_FOURCC_R8
, __DRI_IMAGE_COMPONENTS_R
, 1,
205 { { 0, 0, 0, __DRI_IMAGE_FORMAT_R8
, 1 }, } },
207 { __DRI_IMAGE_FOURCC_R16
, __DRI_IMAGE_COMPONENTS_R
, 1,
208 { { 0, 0, 0, __DRI_IMAGE_FORMAT_R16
, 1 }, } },
210 { __DRI_IMAGE_FOURCC_GR88
, __DRI_IMAGE_COMPONENTS_RG
, 1,
211 { { 0, 0, 0, __DRI_IMAGE_FORMAT_GR88
, 2 }, } },
213 { __DRI_IMAGE_FOURCC_GR1616
, __DRI_IMAGE_COMPONENTS_RG
, 1,
214 { { 0, 0, 0, __DRI_IMAGE_FORMAT_GR1616
, 2 }, } },
216 { __DRI_IMAGE_FOURCC_YUV410
, __DRI_IMAGE_COMPONENTS_Y_U_V
, 3,
217 { { 0, 0, 0, __DRI_IMAGE_FORMAT_R8
, 1 },
218 { 1, 2, 2, __DRI_IMAGE_FORMAT_R8
, 1 },
219 { 2, 2, 2, __DRI_IMAGE_FORMAT_R8
, 1 } } },
221 { __DRI_IMAGE_FOURCC_YUV411
, __DRI_IMAGE_COMPONENTS_Y_U_V
, 3,
222 { { 0, 0, 0, __DRI_IMAGE_FORMAT_R8
, 1 },
223 { 1, 2, 0, __DRI_IMAGE_FORMAT_R8
, 1 },
224 { 2, 2, 0, __DRI_IMAGE_FORMAT_R8
, 1 } } },
226 { __DRI_IMAGE_FOURCC_YUV420
, __DRI_IMAGE_COMPONENTS_Y_U_V
, 3,
227 { { 0, 0, 0, __DRI_IMAGE_FORMAT_R8
, 1 },
228 { 1, 1, 1, __DRI_IMAGE_FORMAT_R8
, 1 },
229 { 2, 1, 1, __DRI_IMAGE_FORMAT_R8
, 1 } } },
231 { __DRI_IMAGE_FOURCC_YUV422
, __DRI_IMAGE_COMPONENTS_Y_U_V
, 3,
232 { { 0, 0, 0, __DRI_IMAGE_FORMAT_R8
, 1 },
233 { 1, 1, 0, __DRI_IMAGE_FORMAT_R8
, 1 },
234 { 2, 1, 0, __DRI_IMAGE_FORMAT_R8
, 1 } } },
236 { __DRI_IMAGE_FOURCC_YUV444
, __DRI_IMAGE_COMPONENTS_Y_U_V
, 3,
237 { { 0, 0, 0, __DRI_IMAGE_FORMAT_R8
, 1 },
238 { 1, 0, 0, __DRI_IMAGE_FORMAT_R8
, 1 },
239 { 2, 0, 0, __DRI_IMAGE_FORMAT_R8
, 1 } } },
241 { __DRI_IMAGE_FOURCC_YVU410
, __DRI_IMAGE_COMPONENTS_Y_U_V
, 3,
242 { { 0, 0, 0, __DRI_IMAGE_FORMAT_R8
, 1 },
243 { 2, 2, 2, __DRI_IMAGE_FORMAT_R8
, 1 },
244 { 1, 2, 2, __DRI_IMAGE_FORMAT_R8
, 1 } } },
246 { __DRI_IMAGE_FOURCC_YVU411
, __DRI_IMAGE_COMPONENTS_Y_U_V
, 3,
247 { { 0, 0, 0, __DRI_IMAGE_FORMAT_R8
, 1 },
248 { 2, 2, 0, __DRI_IMAGE_FORMAT_R8
, 1 },
249 { 1, 2, 0, __DRI_IMAGE_FORMAT_R8
, 1 } } },
251 { __DRI_IMAGE_FOURCC_YVU420
, __DRI_IMAGE_COMPONENTS_Y_U_V
, 3,
252 { { 0, 0, 0, __DRI_IMAGE_FORMAT_R8
, 1 },
253 { 2, 1, 1, __DRI_IMAGE_FORMAT_R8
, 1 },
254 { 1, 1, 1, __DRI_IMAGE_FORMAT_R8
, 1 } } },
256 { __DRI_IMAGE_FOURCC_YVU422
, __DRI_IMAGE_COMPONENTS_Y_U_V
, 3,
257 { { 0, 0, 0, __DRI_IMAGE_FORMAT_R8
, 1 },
258 { 2, 1, 0, __DRI_IMAGE_FORMAT_R8
, 1 },
259 { 1, 1, 0, __DRI_IMAGE_FORMAT_R8
, 1 } } },
261 { __DRI_IMAGE_FOURCC_YVU444
, __DRI_IMAGE_COMPONENTS_Y_U_V
, 3,
262 { { 0, 0, 0, __DRI_IMAGE_FORMAT_R8
, 1 },
263 { 2, 0, 0, __DRI_IMAGE_FORMAT_R8
, 1 },
264 { 1, 0, 0, __DRI_IMAGE_FORMAT_R8
, 1 } } },
266 { __DRI_IMAGE_FOURCC_NV12
, __DRI_IMAGE_COMPONENTS_Y_UV
, 2,
267 { { 0, 0, 0, __DRI_IMAGE_FORMAT_R8
, 1 },
268 { 1, 1, 1, __DRI_IMAGE_FORMAT_GR88
, 2 } } },
270 { __DRI_IMAGE_FOURCC_NV16
, __DRI_IMAGE_COMPONENTS_Y_UV
, 2,
271 { { 0, 0, 0, __DRI_IMAGE_FORMAT_R8
, 1 },
272 { 1, 1, 0, __DRI_IMAGE_FORMAT_GR88
, 2 } } },
274 /* For YUYV buffers, we set up two overlapping DRI images and treat
275 * them as planar buffers in the compositors. Plane 0 is GR88 and
276 * samples YU or YV pairs and places Y into the R component, while
277 * plane 1 is ARGB and samples YUYV clusters and places pairs and
278 * places U into the G component and V into A. This lets the
279 * texture sampler interpolate the Y components correctly when
280 * sampling from plane 0, and interpolate U and V correctly when
281 * sampling from plane 1. */
282 { __DRI_IMAGE_FOURCC_YUYV
, __DRI_IMAGE_COMPONENTS_Y_XUXV
, 2,
283 { { 0, 0, 0, __DRI_IMAGE_FORMAT_GR88
, 2 },
284 { 0, 1, 0, __DRI_IMAGE_FORMAT_ARGB8888
, 4 } } }
288 intel_image_warn_if_unaligned(__DRIimage
*image
, const char *func
)
290 uint32_t tiling
, swizzle
;
291 drm_intel_bo_get_tiling(image
->bo
, &tiling
, &swizzle
);
293 if (tiling
!= I915_TILING_NONE
&& (image
->offset
& 0xfff)) {
294 _mesa_warning(NULL
, "%s: offset 0x%08x not on tile boundary",
295 func
, image
->offset
);
299 static struct intel_image_format
*
300 intel_image_format_lookup(int fourcc
)
302 struct intel_image_format
*f
= NULL
;
304 for (unsigned i
= 0; i
< ARRAY_SIZE(intel_image_formats
); i
++) {
305 if (intel_image_formats
[i
].fourcc
== fourcc
) {
306 f
= &intel_image_formats
[i
];
314 static boolean
intel_lookup_fourcc(int dri_format
, int *fourcc
)
316 for (unsigned i
= 0; i
< ARRAY_SIZE(intel_image_formats
); i
++) {
317 if (intel_image_formats
[i
].planes
[0].dri_format
== dri_format
) {
318 *fourcc
= intel_image_formats
[i
].fourcc
;
326 intel_allocate_image(int dri_format
, void *loaderPrivate
)
330 image
= calloc(1, sizeof *image
);
334 image
->dri_format
= dri_format
;
337 image
->format
= driImageFormatToGLFormat(dri_format
);
338 if (dri_format
!= __DRI_IMAGE_FORMAT_NONE
&&
339 image
->format
== MESA_FORMAT_NONE
) {
344 image
->internal_format
= _mesa_get_format_base_format(image
->format
);
345 image
->data
= loaderPrivate
;
351 * Sets up a DRIImage structure to point to a slice out of a miptree.
354 intel_setup_image_from_mipmap_tree(struct brw_context
*brw
, __DRIimage
*image
,
355 struct intel_mipmap_tree
*mt
, GLuint level
,
358 intel_miptree_make_shareable(brw
, mt
);
360 intel_miptree_check_level_layer(mt
, level
, zoffset
);
362 image
->width
= minify(mt
->physical_width0
, level
- mt
->first_level
);
363 image
->height
= minify(mt
->physical_height0
, level
- mt
->first_level
);
364 image
->pitch
= mt
->pitch
;
366 image
->offset
= intel_miptree_get_tile_offsets(mt
, level
, zoffset
,
370 drm_intel_bo_unreference(image
->bo
);
372 drm_intel_bo_reference(mt
->bo
);
376 intel_create_image_from_name(__DRIscreen
*dri_screen
,
377 int width
, int height
, int format
,
378 int name
, int pitch
, void *loaderPrivate
)
380 struct intel_screen
*screen
= dri_screen
->driverPrivate
;
384 image
= intel_allocate_image(format
, loaderPrivate
);
388 if (image
->format
== MESA_FORMAT_NONE
)
391 cpp
= _mesa_get_format_bytes(image
->format
);
393 image
->width
= width
;
394 image
->height
= height
;
395 image
->pitch
= pitch
* cpp
;
396 image
->bo
= drm_intel_bo_gem_create_from_name(screen
->bufmgr
, "image",
407 intel_create_image_from_renderbuffer(__DRIcontext
*context
,
408 int renderbuffer
, void *loaderPrivate
)
411 struct brw_context
*brw
= context
->driverPrivate
;
412 struct gl_context
*ctx
= &brw
->ctx
;
413 struct gl_renderbuffer
*rb
;
414 struct intel_renderbuffer
*irb
;
416 rb
= _mesa_lookup_renderbuffer(ctx
, renderbuffer
);
418 _mesa_error(ctx
, GL_INVALID_OPERATION
, "glRenderbufferExternalMESA");
422 irb
= intel_renderbuffer(rb
);
423 intel_miptree_make_shareable(brw
, irb
->mt
);
424 image
= calloc(1, sizeof *image
);
428 image
->internal_format
= rb
->InternalFormat
;
429 image
->format
= rb
->Format
;
431 image
->data
= loaderPrivate
;
432 drm_intel_bo_unreference(image
->bo
);
433 image
->bo
= irb
->mt
->bo
;
434 drm_intel_bo_reference(irb
->mt
->bo
);
435 image
->width
= rb
->Width
;
436 image
->height
= rb
->Height
;
437 image
->pitch
= irb
->mt
->pitch
;
438 image
->dri_format
= driGLFormatToImageFormat(image
->format
);
439 image
->has_depthstencil
= irb
->mt
->stencil_mt
? true : false;
441 rb
->NeedsFinishRenderTexture
= true;
446 intel_create_image_from_texture(__DRIcontext
*context
, int target
,
447 unsigned texture
, int zoffset
,
453 struct brw_context
*brw
= context
->driverPrivate
;
454 struct gl_texture_object
*obj
;
455 struct intel_texture_object
*iobj
;
458 obj
= _mesa_lookup_texture(&brw
->ctx
, texture
);
459 if (!obj
|| obj
->Target
!= target
) {
460 *error
= __DRI_IMAGE_ERROR_BAD_PARAMETER
;
464 if (target
== GL_TEXTURE_CUBE_MAP
)
467 _mesa_test_texobj_completeness(&brw
->ctx
, obj
);
468 iobj
= intel_texture_object(obj
);
469 if (!obj
->_BaseComplete
|| (level
> 0 && !obj
->_MipmapComplete
)) {
470 *error
= __DRI_IMAGE_ERROR_BAD_PARAMETER
;
474 if (level
< obj
->BaseLevel
|| level
> obj
->_MaxLevel
) {
475 *error
= __DRI_IMAGE_ERROR_BAD_MATCH
;
479 if (target
== GL_TEXTURE_3D
&& obj
->Image
[face
][level
]->Depth
< zoffset
) {
480 *error
= __DRI_IMAGE_ERROR_BAD_MATCH
;
483 image
= calloc(1, sizeof *image
);
485 *error
= __DRI_IMAGE_ERROR_BAD_ALLOC
;
489 image
->internal_format
= obj
->Image
[face
][level
]->InternalFormat
;
490 image
->format
= obj
->Image
[face
][level
]->TexFormat
;
491 image
->data
= loaderPrivate
;
492 intel_setup_image_from_mipmap_tree(brw
, image
, iobj
->mt
, level
, zoffset
);
493 image
->dri_format
= driGLFormatToImageFormat(image
->format
);
494 image
->has_depthstencil
= iobj
->mt
->stencil_mt
? true : false;
495 if (image
->dri_format
== MESA_FORMAT_NONE
) {
496 *error
= __DRI_IMAGE_ERROR_BAD_PARAMETER
;
501 *error
= __DRI_IMAGE_ERROR_SUCCESS
;
506 intel_destroy_image(__DRIimage
*image
)
508 drm_intel_bo_unreference(image
->bo
);
513 intel_create_image(__DRIscreen
*dri_screen
,
514 int width
, int height
, int format
,
519 struct intel_screen
*screen
= dri_screen
->driverPrivate
;
524 tiling
= I915_TILING_X
;
525 if (use
& __DRI_IMAGE_USE_CURSOR
) {
526 if (width
!= 64 || height
!= 64)
528 tiling
= I915_TILING_NONE
;
531 if (use
& __DRI_IMAGE_USE_LINEAR
)
532 tiling
= I915_TILING_NONE
;
534 image
= intel_allocate_image(format
, loaderPrivate
);
538 cpp
= _mesa_get_format_bytes(image
->format
);
539 image
->bo
= drm_intel_bo_alloc_tiled(screen
->bufmgr
, "image",
540 width
, height
, cpp
, &tiling
,
542 if (image
->bo
== NULL
) {
546 image
->width
= width
;
547 image
->height
= height
;
548 image
->pitch
= pitch
;
554 intel_query_image(__DRIimage
*image
, int attrib
, int *value
)
557 case __DRI_IMAGE_ATTRIB_STRIDE
:
558 *value
= image
->pitch
;
560 case __DRI_IMAGE_ATTRIB_HANDLE
:
561 *value
= image
->bo
->handle
;
563 case __DRI_IMAGE_ATTRIB_NAME
:
564 return !drm_intel_bo_flink(image
->bo
, (uint32_t *) value
);
565 case __DRI_IMAGE_ATTRIB_FORMAT
:
566 *value
= image
->dri_format
;
568 case __DRI_IMAGE_ATTRIB_WIDTH
:
569 *value
= image
->width
;
571 case __DRI_IMAGE_ATTRIB_HEIGHT
:
572 *value
= image
->height
;
574 case __DRI_IMAGE_ATTRIB_COMPONENTS
:
575 if (image
->planar_format
== NULL
)
577 *value
= image
->planar_format
->components
;
579 case __DRI_IMAGE_ATTRIB_FD
:
580 return !drm_intel_bo_gem_export_to_prime(image
->bo
, value
);
581 case __DRI_IMAGE_ATTRIB_FOURCC
:
582 return intel_lookup_fourcc(image
->dri_format
, value
);
583 case __DRI_IMAGE_ATTRIB_NUM_PLANES
:
586 case __DRI_IMAGE_ATTRIB_OFFSET
:
587 *value
= image
->offset
;
596 intel_dup_image(__DRIimage
*orig_image
, void *loaderPrivate
)
600 image
= calloc(1, sizeof *image
);
604 drm_intel_bo_reference(orig_image
->bo
);
605 image
->bo
= orig_image
->bo
;
606 image
->internal_format
= orig_image
->internal_format
;
607 image
->planar_format
= orig_image
->planar_format
;
608 image
->dri_format
= orig_image
->dri_format
;
609 image
->format
= orig_image
->format
;
610 image
->offset
= orig_image
->offset
;
611 image
->width
= orig_image
->width
;
612 image
->height
= orig_image
->height
;
613 image
->pitch
= orig_image
->pitch
;
614 image
->tile_x
= orig_image
->tile_x
;
615 image
->tile_y
= orig_image
->tile_y
;
616 image
->has_depthstencil
= orig_image
->has_depthstencil
;
617 image
->data
= loaderPrivate
;
619 memcpy(image
->strides
, orig_image
->strides
, sizeof(image
->strides
));
620 memcpy(image
->offsets
, orig_image
->offsets
, sizeof(image
->offsets
));
626 intel_validate_usage(__DRIimage
*image
, unsigned int use
)
628 if (use
& __DRI_IMAGE_USE_CURSOR
) {
629 if (image
->width
!= 64 || image
->height
!= 64)
637 intel_create_image_from_names(__DRIscreen
*dri_screen
,
638 int width
, int height
, int fourcc
,
639 int *names
, int num_names
,
640 int *strides
, int *offsets
,
643 struct intel_image_format
*f
= NULL
;
647 if (dri_screen
== NULL
|| names
== NULL
|| num_names
!= 1)
650 f
= intel_image_format_lookup(fourcc
);
654 image
= intel_create_image_from_name(dri_screen
, width
, height
,
655 __DRI_IMAGE_FORMAT_NONE
,
656 names
[0], strides
[0],
662 image
->planar_format
= f
;
663 for (i
= 0; i
< f
->nplanes
; i
++) {
664 index
= f
->planes
[i
].buffer_index
;
665 image
->offsets
[index
] = offsets
[index
];
666 image
->strides
[index
] = strides
[index
];
673 intel_create_image_from_fds(__DRIscreen
*dri_screen
,
674 int width
, int height
, int fourcc
,
675 int *fds
, int num_fds
, int *strides
, int *offsets
,
678 struct intel_screen
*screen
= dri_screen
->driverPrivate
;
679 struct intel_image_format
*f
;
683 if (fds
== NULL
|| num_fds
< 1)
686 /* We only support all planes from the same bo */
687 for (i
= 0; i
< num_fds
; i
++)
688 if (fds
[0] != fds
[i
])
691 f
= intel_image_format_lookup(fourcc
);
696 image
= intel_allocate_image(f
->planes
[0].dri_format
, loaderPrivate
);
698 image
= intel_allocate_image(__DRI_IMAGE_FORMAT_NONE
, loaderPrivate
);
703 image
->width
= width
;
704 image
->height
= height
;
705 image
->pitch
= strides
[0];
707 image
->planar_format
= f
;
709 for (i
= 0; i
< f
->nplanes
; i
++) {
710 index
= f
->planes
[i
].buffer_index
;
711 image
->offsets
[index
] = offsets
[index
];
712 image
->strides
[index
] = strides
[index
];
714 const int plane_height
= height
>> f
->planes
[i
].height_shift
;
715 const int end
= offsets
[index
] + plane_height
* strides
[index
];
720 image
->bo
= drm_intel_bo_gem_create_from_prime(screen
->bufmgr
,
722 if (image
->bo
== NULL
) {
727 if (f
->nplanes
== 1) {
728 image
->offset
= image
->offsets
[0];
729 intel_image_warn_if_unaligned(image
, __func__
);
736 intel_create_image_from_dma_bufs(__DRIscreen
*dri_screen
,
737 int width
, int height
, int fourcc
,
738 int *fds
, int num_fds
,
739 int *strides
, int *offsets
,
740 enum __DRIYUVColorSpace yuv_color_space
,
741 enum __DRISampleRange sample_range
,
742 enum __DRIChromaSiting horizontal_siting
,
743 enum __DRIChromaSiting vertical_siting
,
748 struct intel_image_format
*f
= intel_image_format_lookup(fourcc
);
751 *error
= __DRI_IMAGE_ERROR_BAD_MATCH
;
755 image
= intel_create_image_from_fds(dri_screen
, width
, height
, fourcc
, fds
,
756 num_fds
, strides
, offsets
,
760 * Invalid parameters and any inconsistencies between are assumed to be
761 * checked by the caller. Therefore besides unsupported formats one can fail
762 * only in allocation.
765 *error
= __DRI_IMAGE_ERROR_BAD_ALLOC
;
769 image
->dma_buf_imported
= true;
770 image
->yuv_color_space
= yuv_color_space
;
771 image
->sample_range
= sample_range
;
772 image
->horizontal_siting
= horizontal_siting
;
773 image
->vertical_siting
= vertical_siting
;
775 *error
= __DRI_IMAGE_ERROR_SUCCESS
;
780 intel_from_planar(__DRIimage
*parent
, int plane
, void *loaderPrivate
)
782 int width
, height
, offset
, stride
, dri_format
, index
;
783 struct intel_image_format
*f
;
786 if (parent
== NULL
|| parent
->planar_format
== NULL
)
789 f
= parent
->planar_format
;
791 if (plane
>= f
->nplanes
)
794 width
= parent
->width
>> f
->planes
[plane
].width_shift
;
795 height
= parent
->height
>> f
->planes
[plane
].height_shift
;
796 dri_format
= f
->planes
[plane
].dri_format
;
797 index
= f
->planes
[plane
].buffer_index
;
798 offset
= parent
->offsets
[index
];
799 stride
= parent
->strides
[index
];
801 image
= intel_allocate_image(dri_format
, loaderPrivate
);
805 if (offset
+ height
* stride
> parent
->bo
->size
) {
806 _mesa_warning(NULL
, "intel_create_sub_image: subimage out of bounds");
811 image
->bo
= parent
->bo
;
812 drm_intel_bo_reference(parent
->bo
);
814 image
->width
= width
;
815 image
->height
= height
;
816 image
->pitch
= stride
;
817 image
->offset
= offset
;
819 intel_image_warn_if_unaligned(image
, __func__
);
824 static const __DRIimageExtension intelImageExtension
= {
825 .base
= { __DRI_IMAGE
, 13 },
827 .createImageFromName
= intel_create_image_from_name
,
828 .createImageFromRenderbuffer
= intel_create_image_from_renderbuffer
,
829 .destroyImage
= intel_destroy_image
,
830 .createImage
= intel_create_image
,
831 .queryImage
= intel_query_image
,
832 .dupImage
= intel_dup_image
,
833 .validateUsage
= intel_validate_usage
,
834 .createImageFromNames
= intel_create_image_from_names
,
835 .fromPlanar
= intel_from_planar
,
836 .createImageFromTexture
= intel_create_image_from_texture
,
837 .createImageFromFds
= intel_create_image_from_fds
,
838 .createImageFromDmaBufs
= intel_create_image_from_dma_bufs
,
840 .getCapabilities
= NULL
,
846 brw_query_renderer_integer(__DRIscreen
*dri_screen
,
847 int param
, unsigned int *value
)
849 const struct intel_screen
*const screen
=
850 (struct intel_screen
*) dri_screen
->driverPrivate
;
853 case __DRI2_RENDERER_VENDOR_ID
:
856 case __DRI2_RENDERER_DEVICE_ID
:
857 value
[0] = screen
->deviceID
;
859 case __DRI2_RENDERER_ACCELERATED
:
862 case __DRI2_RENDERER_VIDEO_MEMORY
: {
863 /* Once a batch uses more than 75% of the maximum mappable size, we
864 * assume that there's some fragmentation, and we start doing extra
865 * flushing, etc. That's the big cliff apps will care about.
868 size_t mappable_size
;
870 drm_intel_get_aperture_sizes(dri_screen
->fd
, &mappable_size
, &aper_size
);
872 const unsigned gpu_mappable_megabytes
=
873 (aper_size
/ (1024 * 1024)) * 3 / 4;
875 const long system_memory_pages
= sysconf(_SC_PHYS_PAGES
);
876 const long system_page_size
= sysconf(_SC_PAGE_SIZE
);
878 if (system_memory_pages
<= 0 || system_page_size
<= 0)
881 const uint64_t system_memory_bytes
= (uint64_t) system_memory_pages
882 * (uint64_t) system_page_size
;
884 const unsigned system_memory_megabytes
=
885 (unsigned) (system_memory_bytes
/ (1024 * 1024));
887 value
[0] = MIN2(system_memory_megabytes
, gpu_mappable_megabytes
);
890 case __DRI2_RENDERER_UNIFIED_MEMORY_ARCHITECTURE
:
893 case __DRI2_RENDERER_HAS_TEXTURE_3D
:
897 return driQueryRendererIntegerCommon(dri_screen
, param
, value
);
904 brw_query_renderer_string(__DRIscreen
*dri_screen
,
905 int param
, const char **value
)
907 const struct intel_screen
*screen
=
908 (struct intel_screen
*) dri_screen
->driverPrivate
;
911 case __DRI2_RENDERER_VENDOR_ID
:
912 value
[0] = brw_vendor_string
;
914 case __DRI2_RENDERER_DEVICE_ID
:
915 value
[0] = brw_get_renderer_string(screen
);
924 static const __DRI2rendererQueryExtension intelRendererQueryExtension
= {
925 .base
= { __DRI2_RENDERER_QUERY
, 1 },
927 .queryInteger
= brw_query_renderer_integer
,
928 .queryString
= brw_query_renderer_string
931 static const __DRIrobustnessExtension dri2Robustness
= {
932 .base
= { __DRI2_ROBUSTNESS
, 1 }
935 static const __DRIextension
*screenExtensions
[] = {
936 &intelTexBufferExtension
.base
,
937 &intelFenceExtension
.base
,
938 &intelFlushExtension
.base
,
939 &intelImageExtension
.base
,
940 &intelRendererQueryExtension
.base
,
941 &dri2ConfigQueryExtension
.base
,
945 static const __DRIextension
*intelRobustScreenExtensions
[] = {
946 &intelTexBufferExtension
.base
,
947 &intelFenceExtension
.base
,
948 &intelFlushExtension
.base
,
949 &intelImageExtension
.base
,
950 &intelRendererQueryExtension
.base
,
951 &dri2ConfigQueryExtension
.base
,
952 &dri2Robustness
.base
,
957 intel_get_param(struct intel_screen
*screen
, int param
, int *value
)
960 struct drm_i915_getparam gp
;
962 memset(&gp
, 0, sizeof(gp
));
966 if (drmIoctl(screen
->driScrnPriv
->fd
, DRM_IOCTL_I915_GETPARAM
, &gp
) == -1) {
969 _mesa_warning(NULL
, "drm_i915_getparam: %d", ret
);
976 intel_get_boolean(struct intel_screen
*screen
, int param
)
979 return (intel_get_param(screen
, param
, &value
) == 0) && value
;
983 intel_get_integer(struct intel_screen
*screen
, int param
)
987 if (intel_get_param(screen
, param
, &value
) == 0)
994 intelDestroyScreen(__DRIscreen
* sPriv
)
996 struct intel_screen
*screen
= sPriv
->driverPrivate
;
998 dri_bufmgr_destroy(screen
->bufmgr
);
999 driDestroyOptionInfo(&screen
->optionCache
);
1001 ralloc_free(screen
);
1002 sPriv
->driverPrivate
= NULL
;
1007 * This is called when we need to set up GL rendering to a new X window.
1010 intelCreateBuffer(__DRIscreen
*dri_screen
,
1011 __DRIdrawable
* driDrawPriv
,
1012 const struct gl_config
* mesaVis
, GLboolean isPixmap
)
1014 struct intel_renderbuffer
*rb
;
1015 struct intel_screen
*screen
= (struct intel_screen
*)
1016 dri_screen
->driverPrivate
;
1017 mesa_format rgbFormat
;
1018 unsigned num_samples
=
1019 intel_quantize_num_samples(screen
, mesaVis
->samples
);
1020 struct gl_framebuffer
*fb
;
1025 fb
= CALLOC_STRUCT(gl_framebuffer
);
1029 _mesa_initialize_window_framebuffer(fb
, mesaVis
);
1031 if (screen
->winsys_msaa_samples_override
!= -1) {
1032 num_samples
= screen
->winsys_msaa_samples_override
;
1033 fb
->Visual
.samples
= num_samples
;
1036 if (mesaVis
->redBits
== 5) {
1037 rgbFormat
= mesaVis
->redMask
== 0x1f ? MESA_FORMAT_R5G6B5_UNORM
1038 : MESA_FORMAT_B5G6R5_UNORM
;
1039 } else if (mesaVis
->sRGBCapable
) {
1040 rgbFormat
= mesaVis
->redMask
== 0xff ? MESA_FORMAT_R8G8B8A8_SRGB
1041 : MESA_FORMAT_B8G8R8A8_SRGB
;
1042 } else if (mesaVis
->alphaBits
== 0) {
1043 rgbFormat
= mesaVis
->redMask
== 0xff ? MESA_FORMAT_R8G8B8X8_UNORM
1044 : MESA_FORMAT_B8G8R8X8_UNORM
;
1046 rgbFormat
= mesaVis
->redMask
== 0xff ? MESA_FORMAT_R8G8B8A8_SRGB
1047 : MESA_FORMAT_B8G8R8A8_SRGB
;
1048 fb
->Visual
.sRGBCapable
= true;
1051 /* setup the hardware-based renderbuffers */
1052 rb
= intel_create_renderbuffer(rgbFormat
, num_samples
);
1053 _mesa_add_renderbuffer(fb
, BUFFER_FRONT_LEFT
, &rb
->Base
.Base
);
1055 if (mesaVis
->doubleBufferMode
) {
1056 rb
= intel_create_renderbuffer(rgbFormat
, num_samples
);
1057 _mesa_add_renderbuffer(fb
, BUFFER_BACK_LEFT
, &rb
->Base
.Base
);
1061 * Assert here that the gl_config has an expected depth/stencil bit
1062 * combination: one of d24/s8, d16/s0, d0/s0. (See intelInitScreen2(),
1063 * which constructs the advertised configs.)
1065 if (mesaVis
->depthBits
== 24) {
1066 assert(mesaVis
->stencilBits
== 8);
1068 if (screen
->devinfo
.has_hiz_and_separate_stencil
) {
1069 rb
= intel_create_private_renderbuffer(MESA_FORMAT_Z24_UNORM_X8_UINT
,
1071 _mesa_add_renderbuffer(fb
, BUFFER_DEPTH
, &rb
->Base
.Base
);
1072 rb
= intel_create_private_renderbuffer(MESA_FORMAT_S_UINT8
,
1074 _mesa_add_renderbuffer(fb
, BUFFER_STENCIL
, &rb
->Base
.Base
);
1077 * Use combined depth/stencil. Note that the renderbuffer is
1078 * attached to two attachment points.
1080 rb
= intel_create_private_renderbuffer(MESA_FORMAT_Z24_UNORM_S8_UINT
,
1082 _mesa_add_renderbuffer(fb
, BUFFER_DEPTH
, &rb
->Base
.Base
);
1083 _mesa_add_renderbuffer(fb
, BUFFER_STENCIL
, &rb
->Base
.Base
);
1086 else if (mesaVis
->depthBits
== 16) {
1087 assert(mesaVis
->stencilBits
== 0);
1088 rb
= intel_create_private_renderbuffer(MESA_FORMAT_Z_UNORM16
,
1090 _mesa_add_renderbuffer(fb
, BUFFER_DEPTH
, &rb
->Base
.Base
);
1093 assert(mesaVis
->depthBits
== 0);
1094 assert(mesaVis
->stencilBits
== 0);
1097 /* now add any/all software-based renderbuffers we may need */
1098 _swrast_add_soft_renderbuffers(fb
,
1099 false, /* never sw color */
1100 false, /* never sw depth */
1101 false, /* never sw stencil */
1102 mesaVis
->accumRedBits
> 0,
1103 false, /* never sw alpha */
1104 false /* never sw aux */ );
1105 driDrawPriv
->driverPrivate
= fb
;
1111 intelDestroyBuffer(__DRIdrawable
* driDrawPriv
)
1113 struct gl_framebuffer
*fb
= driDrawPriv
->driverPrivate
;
1115 _mesa_reference_framebuffer(&fb
, NULL
);
1119 intel_detect_sseu(struct intel_screen
*screen
)
1121 assert(screen
->devinfo
.gen
>= 8);
1124 screen
->subslice_total
= -1;
1125 screen
->eu_total
= -1;
1127 ret
= intel_get_param(screen
, I915_PARAM_SUBSLICE_TOTAL
,
1128 &screen
->subslice_total
);
1129 if (ret
< 0 && ret
!= -EINVAL
)
1132 ret
= intel_get_param(screen
,
1133 I915_PARAM_EU_TOTAL
, &screen
->eu_total
);
1134 if (ret
< 0 && ret
!= -EINVAL
)
1137 /* Without this information, we cannot get the right Braswell brandstrings,
1138 * and we have to use conservative numbers for GPGPU on many platforms, but
1139 * otherwise, things will just work.
1141 if (screen
->subslice_total
< 1 || screen
->eu_total
< 1)
1143 "Kernel 4.1 required to properly query GPU properties.\n");
1148 screen
->subslice_total
= -1;
1149 screen
->eu_total
= -1;
1150 _mesa_warning(NULL
, "Failed to query GPU properties (%s).\n", strerror(-ret
));
1154 intel_init_bufmgr(struct intel_screen
*screen
)
1156 __DRIscreen
*dri_screen
= screen
->driScrnPriv
;
1158 screen
->no_hw
= getenv("INTEL_NO_HW") != NULL
;
1160 screen
->bufmgr
= intel_bufmgr_gem_init(dri_screen
->fd
, BATCH_SZ
);
1161 if (screen
->bufmgr
== NULL
) {
1162 fprintf(stderr
, "[%s:%u] Error initializing buffer manager.\n",
1163 __func__
, __LINE__
);
1167 drm_intel_bufmgr_gem_enable_fenced_relocs(screen
->bufmgr
);
1169 if (!intel_get_boolean(screen
, I915_PARAM_HAS_RELAXED_DELTA
)) {
1170 fprintf(stderr
, "[%s: %u] Kernel 2.6.39 required.\n", __func__
, __LINE__
);
1178 intel_detect_swizzling(struct intel_screen
*screen
)
1180 drm_intel_bo
*buffer
;
1181 unsigned long flags
= 0;
1182 unsigned long aligned_pitch
;
1183 uint32_t tiling
= I915_TILING_X
;
1184 uint32_t swizzle_mode
= 0;
1186 buffer
= drm_intel_bo_alloc_tiled(screen
->bufmgr
, "swizzle test",
1188 &tiling
, &aligned_pitch
, flags
);
1192 drm_intel_bo_get_tiling(buffer
, &tiling
, &swizzle_mode
);
1193 drm_intel_bo_unreference(buffer
);
1195 if (swizzle_mode
== I915_BIT_6_SWIZZLE_NONE
)
1202 intel_detect_timestamp(struct intel_screen
*screen
)
1204 uint64_t dummy
= 0, last
= 0;
1205 int upper
, lower
, loops
;
1207 /* On 64bit systems, some old kernels trigger a hw bug resulting in the
1208 * TIMESTAMP register being shifted and the low 32bits always zero.
1210 * More recent kernels offer an interface to read the full 36bits
1213 if (drm_intel_reg_read(screen
->bufmgr
, TIMESTAMP
| 1, &dummy
) == 0)
1216 /* Determine if we have a 32bit or 64bit kernel by inspecting the
1217 * upper 32bits for a rapidly changing timestamp.
1219 if (drm_intel_reg_read(screen
->bufmgr
, TIMESTAMP
, &last
))
1223 for (loops
= 0; loops
< 10; loops
++) {
1224 /* The TIMESTAMP should change every 80ns, so several round trips
1225 * through the kernel should be enough to advance it.
1227 if (drm_intel_reg_read(screen
->bufmgr
, TIMESTAMP
, &dummy
))
1230 upper
+= (dummy
>> 32) != (last
>> 32);
1231 if (upper
> 1) /* beware 32bit counter overflow */
1232 return 2; /* upper dword holds the low 32bits of the timestamp */
1234 lower
+= (dummy
& 0xffffffff) != (last
& 0xffffffff);
1236 return 1; /* timestamp is unshifted */
1241 /* No advancement? No timestamp! */
1246 * Test if we can use MI_LOAD_REGISTER_MEM from an untrusted batchbuffer.
1248 * Some combinations of hardware and kernel versions allow this feature,
1249 * while others don't. Instead of trying to enumerate every case, just
1250 * try and write a register and see if works.
1253 intel_detect_pipelined_register(struct intel_screen
*screen
,
1254 int reg
, uint32_t expected_value
, bool reset
)
1256 drm_intel_bo
*results
, *bo
;
1258 uint32_t offset
= 0;
1259 bool success
= false;
1261 /* Create a zero'ed temporary buffer for reading our results */
1262 results
= drm_intel_bo_alloc(screen
->bufmgr
, "registers", 4096, 0);
1263 if (results
== NULL
)
1266 bo
= drm_intel_bo_alloc(screen
->bufmgr
, "batchbuffer", 4096, 0);
1270 if (drm_intel_bo_map(bo
, 1))
1273 batch
= bo
->virtual;
1275 /* Write the register. */
1276 *batch
++ = MI_LOAD_REGISTER_IMM
| (3 - 2);
1278 *batch
++ = expected_value
;
1280 /* Save the register's value back to the buffer. */
1281 *batch
++ = MI_STORE_REGISTER_MEM
| (3 - 2);
1283 drm_intel_bo_emit_reloc(bo
, (char *)batch
-(char *)bo
->virtual,
1284 results
, offset
*sizeof(uint32_t),
1285 I915_GEM_DOMAIN_INSTRUCTION
,
1286 I915_GEM_DOMAIN_INSTRUCTION
);
1287 *batch
++ = results
->offset
+ offset
*sizeof(uint32_t);
1289 /* And afterwards clear the register */
1291 *batch
++ = MI_LOAD_REGISTER_IMM
| (3 - 2);
1296 *batch
++ = MI_BATCH_BUFFER_END
;
1298 drm_intel_bo_mrb_exec(bo
, ALIGN((char *)batch
- (char *)bo
->virtual, 8),
1302 /* Check whether the value got written. */
1303 if (drm_intel_bo_map(results
, false) == 0) {
1304 success
= *((uint32_t *)results
->virtual + offset
) == expected_value
;
1305 drm_intel_bo_unmap(results
);
1309 drm_intel_bo_unreference(bo
);
1311 drm_intel_bo_unreference(results
);
1317 intel_detect_pipelined_so(struct intel_screen
*screen
)
1319 /* Supposedly, Broadwell just works. */
1320 if (screen
->devinfo
.gen
>= 8)
1323 if (screen
->devinfo
.gen
<= 6)
1326 /* We use SO_WRITE_OFFSET0 since you're supposed to write it (unlike the
1327 * statistics registers), and we already reset it to zero before using it.
1329 return intel_detect_pipelined_register(screen
,
1330 GEN7_SO_WRITE_OFFSET(0),
1336 * Return array of MSAA modes supported by the hardware. The array is
1337 * zero-terminated and sorted in decreasing order.
1340 intel_supported_msaa_modes(const struct intel_screen
*screen
)
1342 static const int gen9_modes
[] = {16, 8, 4, 2, 0, -1};
1343 static const int gen8_modes
[] = {8, 4, 2, 0, -1};
1344 static const int gen7_modes
[] = {8, 4, 0, -1};
1345 static const int gen6_modes
[] = {4, 0, -1};
1346 static const int gen4_modes
[] = {0, -1};
1348 if (screen
->devinfo
.gen
>= 9) {
1350 } else if (screen
->devinfo
.gen
>= 8) {
1352 } else if (screen
->devinfo
.gen
>= 7) {
1354 } else if (screen
->devinfo
.gen
== 6) {
1361 static __DRIconfig
**
1362 intel_screen_make_configs(__DRIscreen
*dri_screen
)
1364 static const mesa_format formats
[] = {
1365 MESA_FORMAT_B5G6R5_UNORM
,
1366 MESA_FORMAT_B8G8R8A8_UNORM
,
1367 MESA_FORMAT_B8G8R8X8_UNORM
1370 /* GLX_SWAP_COPY_OML is not supported due to page flipping. */
1371 static const GLenum back_buffer_modes
[] = {
1372 GLX_SWAP_UNDEFINED_OML
, GLX_NONE
,
1375 static const uint8_t singlesample_samples
[1] = {0};
1376 static const uint8_t multisample_samples
[2] = {4, 8};
1378 struct intel_screen
*screen
= dri_screen
->driverPrivate
;
1379 const struct gen_device_info
*devinfo
= &screen
->devinfo
;
1380 uint8_t depth_bits
[4], stencil_bits
[4];
1381 __DRIconfig
**configs
= NULL
;
1383 /* Generate singlesample configs without accumulation buffer. */
1384 for (unsigned i
= 0; i
< ARRAY_SIZE(formats
); i
++) {
1385 __DRIconfig
**new_configs
;
1386 int num_depth_stencil_bits
= 2;
1388 /* Starting with DRI2 protocol version 1.1 we can request a depth/stencil
1389 * buffer that has a different number of bits per pixel than the color
1390 * buffer, gen >= 6 supports this.
1393 stencil_bits
[0] = 0;
1395 if (formats
[i
] == MESA_FORMAT_B5G6R5_UNORM
) {
1397 stencil_bits
[1] = 0;
1398 if (devinfo
->gen
>= 6) {
1400 stencil_bits
[2] = 8;
1401 num_depth_stencil_bits
= 3;
1405 stencil_bits
[1] = 8;
1408 new_configs
= driCreateConfigs(formats
[i
],
1411 num_depth_stencil_bits
,
1412 back_buffer_modes
, 2,
1413 singlesample_samples
, 1,
1415 configs
= driConcatConfigs(configs
, new_configs
);
1418 /* Generate the minimum possible set of configs that include an
1419 * accumulation buffer.
1421 for (unsigned i
= 0; i
< ARRAY_SIZE(formats
); i
++) {
1422 __DRIconfig
**new_configs
;
1424 if (formats
[i
] == MESA_FORMAT_B5G6R5_UNORM
) {
1426 stencil_bits
[0] = 0;
1429 stencil_bits
[0] = 8;
1432 new_configs
= driCreateConfigs(formats
[i
],
1433 depth_bits
, stencil_bits
, 1,
1434 back_buffer_modes
, 1,
1435 singlesample_samples
, 1,
1437 configs
= driConcatConfigs(configs
, new_configs
);
1440 /* Generate multisample configs.
1442 * This loop breaks early, and hence is a no-op, on gen < 6.
1444 * Multisample configs must follow the singlesample configs in order to
1445 * work around an X server bug present in 1.12. The X server chooses to
1446 * associate the first listed RGBA888-Z24S8 config, regardless of its
1447 * sample count, with the 32-bit depth visual used for compositing.
1449 * Only doublebuffer configs with GLX_SWAP_UNDEFINED_OML behavior are
1450 * supported. Singlebuffer configs are not supported because no one wants
1453 for (unsigned i
= 0; i
< ARRAY_SIZE(formats
); i
++) {
1454 if (devinfo
->gen
< 6)
1457 __DRIconfig
**new_configs
;
1458 const int num_depth_stencil_bits
= 2;
1459 int num_msaa_modes
= 0;
1462 stencil_bits
[0] = 0;
1464 if (formats
[i
] == MESA_FORMAT_B5G6R5_UNORM
) {
1466 stencil_bits
[1] = 0;
1469 stencil_bits
[1] = 8;
1472 if (devinfo
->gen
>= 7)
1474 else if (devinfo
->gen
== 6)
1477 new_configs
= driCreateConfigs(formats
[i
],
1480 num_depth_stencil_bits
,
1481 back_buffer_modes
, 1,
1482 multisample_samples
,
1485 configs
= driConcatConfigs(configs
, new_configs
);
1488 if (configs
== NULL
) {
1489 fprintf(stderr
, "[%s:%u] Error creating FBConfig!\n", __func__
,
1498 set_max_gl_versions(struct intel_screen
*screen
)
1500 __DRIscreen
*dri_screen
= screen
->driScrnPriv
;
1501 const bool has_astc
= screen
->devinfo
.gen
>= 9;
1503 switch (screen
->devinfo
.gen
) {
1506 dri_screen
->max_gl_core_version
= 45;
1507 dri_screen
->max_gl_compat_version
= 30;
1508 dri_screen
->max_gl_es1_version
= 11;
1509 dri_screen
->max_gl_es2_version
= has_astc
? 32 : 31;
1512 dri_screen
->max_gl_core_version
= screen
->devinfo
.is_haswell
&&
1513 can_do_pipelined_register_writes(screen
) ? 45 : 33;
1514 dri_screen
->max_gl_compat_version
= 30;
1515 dri_screen
->max_gl_es1_version
= 11;
1516 dri_screen
->max_gl_es2_version
= screen
->devinfo
.is_haswell
? 31 : 30;
1519 dri_screen
->max_gl_core_version
= 33;
1520 dri_screen
->max_gl_compat_version
= 30;
1521 dri_screen
->max_gl_es1_version
= 11;
1522 dri_screen
->max_gl_es2_version
= 30;
1526 dri_screen
->max_gl_core_version
= 0;
1527 dri_screen
->max_gl_compat_version
= 21;
1528 dri_screen
->max_gl_es1_version
= 11;
1529 dri_screen
->max_gl_es2_version
= 20;
1532 unreachable("unrecognized intel_screen::gen");
1537 * Return the revision (generally the revid field of the PCI header) of the
1540 * XXX: This function is useful to keep around even if it is not currently in
1541 * use. It is necessary for new platforms and revision specific workarounds or
1542 * features. Please don't remove it so that we know it at least continues to
1545 static __attribute__((__unused__
)) int
1546 brw_get_revision(int fd
)
1548 struct drm_i915_getparam gp
;
1552 memset(&gp
, 0, sizeof(gp
));
1553 gp
.param
= I915_PARAM_REVISION
;
1554 gp
.value
= &revision
;
1556 ret
= drmCommandWriteRead(fd
, DRM_I915_GETPARAM
, &gp
, sizeof(gp
));
1564 shader_debug_log_mesa(void *data
, const char *fmt
, ...)
1566 struct brw_context
*brw
= (struct brw_context
*)data
;
1569 va_start(args
, fmt
);
1571 _mesa_gl_vdebug(&brw
->ctx
, &msg_id
,
1572 MESA_DEBUG_SOURCE_SHADER_COMPILER
,
1573 MESA_DEBUG_TYPE_OTHER
,
1574 MESA_DEBUG_SEVERITY_NOTIFICATION
, fmt
, args
);
1579 shader_perf_log_mesa(void *data
, const char *fmt
, ...)
1581 struct brw_context
*brw
= (struct brw_context
*)data
;
1584 va_start(args
, fmt
);
1586 if (unlikely(INTEL_DEBUG
& DEBUG_PERF
)) {
1588 va_copy(args_copy
, args
);
1589 vfprintf(stderr
, fmt
, args_copy
);
1593 if (brw
->perf_debug
) {
1595 _mesa_gl_vdebug(&brw
->ctx
, &msg_id
,
1596 MESA_DEBUG_SOURCE_SHADER_COMPILER
,
1597 MESA_DEBUG_TYPE_PERFORMANCE
,
1598 MESA_DEBUG_SEVERITY_MEDIUM
, fmt
, args
);
1604 * This is the driver specific part of the createNewScreen entry point.
1605 * Called when using DRI2.
1607 * \return the struct gl_config supported by this driver
1610 __DRIconfig
**intelInitScreen2(__DRIscreen
*dri_screen
)
1612 struct intel_screen
*screen
;
1614 if (dri_screen
->image
.loader
) {
1615 } else if (dri_screen
->dri2
.loader
->base
.version
<= 2 ||
1616 dri_screen
->dri2
.loader
->getBuffersWithFormat
== NULL
) {
1618 "\nERROR! DRI2 loader with getBuffersWithFormat() "
1619 "support required\n");
1623 /* Allocate the private area */
1624 screen
= rzalloc(NULL
, struct intel_screen
);
1626 fprintf(stderr
, "\nERROR! Allocating private area failed\n");
1629 /* parse information in __driConfigOptions */
1630 driParseOptionInfo(&screen
->optionCache
, brw_config_options
.xml
);
1632 screen
->driScrnPriv
= dri_screen
;
1633 dri_screen
->driverPrivate
= (void *) screen
;
1635 if (!intel_init_bufmgr(screen
))
1638 screen
->deviceID
= drm_intel_bufmgr_gem_get_devid(screen
->bufmgr
);
1639 if (!gen_get_device_info(screen
->deviceID
, &screen
->devinfo
))
1642 const struct gen_device_info
*devinfo
= &screen
->devinfo
;
1644 brw_process_intel_debug_variable();
1646 if (INTEL_DEBUG
& DEBUG_BUFMGR
)
1647 dri_bufmgr_set_debug(screen
->bufmgr
, true);
1649 if ((INTEL_DEBUG
& DEBUG_SHADER_TIME
) && devinfo
->gen
< 7) {
1651 "shader_time debugging requires gen7 (Ivybridge) or better.\n");
1652 INTEL_DEBUG
&= ~DEBUG_SHADER_TIME
;
1655 if (intel_get_integer(screen
, I915_PARAM_MMAP_GTT_VERSION
) >= 1) {
1656 /* Theorectically unlimited! At least for individual objects...
1658 * Currently the entire (global) address space for all GTT maps is
1659 * limited to 64bits. That is all objects on the system that are
1660 * setup for GTT mmapping must fit within 64bits. An attempt to use
1661 * one that exceeds the limit with fail in drm_intel_bo_map_gtt().
1663 * Long before we hit that limit, we will be practically limited by
1664 * that any single object must fit in physical memory (RAM). The upper
1665 * limit on the CPU's address space is currently 48bits (Skylake), of
1666 * which only 39bits can be physical memory. (The GPU itself also has
1667 * a 48bit addressable virtual space.) We can fit over 32 million
1668 * objects of the current maximum allocable size before running out
1671 screen
->max_gtt_map_object_size
= UINT64_MAX
;
1673 /* Estimate the size of the mappable aperture into the GTT. There's an
1674 * ioctl to get the whole GTT size, but not one to get the mappable subset.
1675 * It turns out it's basically always 256MB, though some ancient hardware
1678 uint32_t gtt_size
= 256 * 1024 * 1024;
1680 /* We don't want to map two objects such that a memcpy between them would
1681 * just fault one mapping in and then the other over and over forever. So
1682 * we would need to divide the GTT size by 2. Additionally, some GTT is
1683 * taken up by things like the framebuffer and the ringbuffer and such, so
1684 * be more conservative.
1686 screen
->max_gtt_map_object_size
= gtt_size
/ 4;
1689 screen
->hw_has_swizzling
= intel_detect_swizzling(screen
);
1690 screen
->hw_has_timestamp
= intel_detect_timestamp(screen
);
1692 /* GENs prior to 8 do not support EU/Subslice info */
1693 if (devinfo
->gen
>= 8) {
1694 intel_detect_sseu(screen
);
1695 } else if (devinfo
->gen
== 7) {
1696 screen
->subslice_total
= 1 << (devinfo
->gt
- 1);
1699 if (intel_detect_pipelined_so(screen
))
1700 screen
->kernel_features
|= KERNEL_ALLOWS_SOL_OFFSET_WRITES
;
1702 const char *force_msaa
= getenv("INTEL_FORCE_MSAA");
1704 screen
->winsys_msaa_samples_override
=
1705 intel_quantize_num_samples(screen
, atoi(force_msaa
));
1706 printf("Forcing winsys sample count to %d\n",
1707 screen
->winsys_msaa_samples_override
);
1709 screen
->winsys_msaa_samples_override
= -1;
1712 set_max_gl_versions(screen
);
1714 /* Notification of GPU resets requires hardware contexts and a kernel new
1715 * enough to support DRM_IOCTL_I915_GET_RESET_STATS. If the ioctl is
1716 * supported, calling it with a context of 0 will either generate EPERM or
1717 * no error. If the ioctl is not supported, it always generate EINVAL.
1718 * Use this to determine whether to advertise the __DRI2_ROBUSTNESS
1719 * extension to the loader.
1721 * Don't even try on pre-Gen6, since we don't attempt to use contexts there.
1723 if (devinfo
->gen
>= 6) {
1724 struct drm_i915_reset_stats stats
;
1725 memset(&stats
, 0, sizeof(stats
));
1727 const int ret
= drmIoctl(dri_screen
->fd
, DRM_IOCTL_I915_GET_RESET_STATS
, &stats
);
1729 screen
->has_context_reset_notification
=
1730 (ret
!= -1 || errno
!= EINVAL
);
1733 if (intel_get_param(screen
, I915_PARAM_CMD_PARSER_VERSION
,
1734 &screen
->cmd_parser_version
) < 0) {
1735 screen
->cmd_parser_version
= 0;
1738 if (devinfo
->gen
>= 8 || screen
->cmd_parser_version
>= 2)
1739 screen
->kernel_features
|= KERNEL_ALLOWS_PREDICATE_WRITES
;
1741 /* Haswell requires command parser version 4 in order to have L3
1742 * atomic scratch1 and chicken3 bits
1744 if (devinfo
->is_haswell
&& screen
->cmd_parser_version
>= 4) {
1745 screen
->kernel_features
|=
1746 KERNEL_ALLOWS_HSW_SCRATCH1_AND_ROW_CHICKEN3
;
1749 /* Haswell requires command parser version 6 in order to write to the
1750 * MI_MATH GPR registers, and version 7 in order to use
1751 * MI_LOAD_REGISTER_REG (which all users of MI_MATH use).
1753 if (devinfo
->gen
>= 8 ||
1754 (devinfo
->is_haswell
&& screen
->cmd_parser_version
>= 7)) {
1755 screen
->kernel_features
|= KERNEL_ALLOWS_MI_MATH_AND_LRR
;
1758 /* Gen7 needs at least command parser version 5 to support compute */
1759 if (devinfo
->gen
>= 8 || screen
->cmd_parser_version
>= 5)
1760 screen
->kernel_features
|= KERNEL_ALLOWS_COMPUTE_DISPATCH
;
1762 dri_screen
->extensions
= !screen
->has_context_reset_notification
1763 ? screenExtensions
: intelRobustScreenExtensions
;
1765 screen
->compiler
= brw_compiler_create(screen
, devinfo
);
1766 screen
->compiler
->shader_debug_log
= shader_debug_log_mesa
;
1767 screen
->compiler
->shader_perf_log
= shader_perf_log_mesa
;
1768 screen
->program_id
= 1;
1770 screen
->has_exec_fence
=
1771 intel_get_boolean(screen
, I915_PARAM_HAS_EXEC_FENCE
);
1773 return (const __DRIconfig
**) intel_screen_make_configs(dri_screen
);
1776 struct intel_buffer
{
1781 static __DRIbuffer
*
1782 intelAllocateBuffer(__DRIscreen
*dri_screen
,
1783 unsigned attachment
, unsigned format
,
1784 int width
, int height
)
1786 struct intel_buffer
*intelBuffer
;
1787 struct intel_screen
*screen
= dri_screen
->driverPrivate
;
1789 assert(attachment
== __DRI_BUFFER_FRONT_LEFT
||
1790 attachment
== __DRI_BUFFER_BACK_LEFT
);
1792 intelBuffer
= calloc(1, sizeof *intelBuffer
);
1793 if (intelBuffer
== NULL
)
1796 /* The front and back buffers are color buffers, which are X tiled. */
1797 uint32_t tiling
= I915_TILING_X
;
1798 unsigned long pitch
;
1799 int cpp
= format
/ 8;
1800 intelBuffer
->bo
= drm_intel_bo_alloc_tiled(screen
->bufmgr
,
1801 "intelAllocateBuffer",
1806 BO_ALLOC_FOR_RENDER
);
1808 if (intelBuffer
->bo
== NULL
) {
1813 drm_intel_bo_flink(intelBuffer
->bo
, &intelBuffer
->base
.name
);
1815 intelBuffer
->base
.attachment
= attachment
;
1816 intelBuffer
->base
.cpp
= cpp
;
1817 intelBuffer
->base
.pitch
= pitch
;
1819 return &intelBuffer
->base
;
1823 intelReleaseBuffer(__DRIscreen
*dri_screen
, __DRIbuffer
*buffer
)
1825 struct intel_buffer
*intelBuffer
= (struct intel_buffer
*) buffer
;
1827 drm_intel_bo_unreference(intelBuffer
->bo
);
1831 static const struct __DriverAPIRec brw_driver_api
= {
1832 .InitScreen
= intelInitScreen2
,
1833 .DestroyScreen
= intelDestroyScreen
,
1834 .CreateContext
= brwCreateContext
,
1835 .DestroyContext
= intelDestroyContext
,
1836 .CreateBuffer
= intelCreateBuffer
,
1837 .DestroyBuffer
= intelDestroyBuffer
,
1838 .MakeCurrent
= intelMakeCurrent
,
1839 .UnbindContext
= intelUnbindContext
,
1840 .AllocateBuffer
= intelAllocateBuffer
,
1841 .ReleaseBuffer
= intelReleaseBuffer
1844 static const struct __DRIDriverVtableExtensionRec brw_vtable
= {
1845 .base
= { __DRI_DRIVER_VTABLE
, 1 },
1846 .vtable
= &brw_driver_api
,
1849 static const __DRIextension
*brw_driver_extensions
[] = {
1850 &driCoreExtension
.base
,
1851 &driImageDriverExtension
.base
,
1852 &driDRI2Extension
.base
,
1854 &brw_config_options
.base
,
1858 PUBLIC
const __DRIextension
**__driDriverGetExtensions_i965(void)
1860 globalDriverAPI
= &brw_driver_api
;
1862 return brw_driver_extensions
;