i965/screen: Return false for unsupported formats in query_modifiers
[mesa.git] / src / mesa / drivers / dri / i965 / intel_screen.c
1 /*
2 * Copyright 2003 VMware, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial portions
15 * of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
18 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
21 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
22 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
23 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 */
25
26 #include <drm_fourcc.h>
27 #include <errno.h>
28 #include <time.h>
29 #include <unistd.h>
30 #include "main/context.h"
31 #include "main/framebuffer.h"
32 #include "main/renderbuffer.h"
33 #include "main/texobj.h"
34 #include "main/hash.h"
35 #include "main/fbobject.h"
36 #include "main/version.h"
37 #include "swrast/s_renderbuffer.h"
38 #include "util/ralloc.h"
39 #include "util/disk_cache.h"
40 #include "brw_defines.h"
41 #include "brw_state.h"
42 #include "compiler/nir/nir.h"
43
44 #include "utils.h"
45 #include "util/disk_cache.h"
46 #include "util/xmlpool.h"
47
48 #include "common/gen_defines.h"
49
50 static const __DRIconfigOptionsExtension brw_config_options = {
51 .base = { __DRI_CONFIG_OPTIONS, 1 },
52 .xml =
53 DRI_CONF_BEGIN
54 DRI_CONF_SECTION_PERFORMANCE
55 /* Options correspond to DRI_CONF_BO_REUSE_DISABLED,
56 * DRI_CONF_BO_REUSE_ALL
57 */
58 DRI_CONF_OPT_BEGIN_V(bo_reuse, enum, 1, "0:1")
59 DRI_CONF_DESC_BEGIN(en, "Buffer object reuse")
60 DRI_CONF_ENUM(0, "Disable buffer object reuse")
61 DRI_CONF_ENUM(1, "Enable reuse of all sizes of buffer objects")
62 DRI_CONF_DESC_END
63 DRI_CONF_OPT_END
64 DRI_CONF_MESA_NO_ERROR("false")
65 DRI_CONF_SECTION_END
66
67 DRI_CONF_SECTION_QUALITY
68 DRI_CONF_PRECISE_TRIG("false")
69
70 DRI_CONF_OPT_BEGIN(clamp_max_samples, int, -1)
71 DRI_CONF_DESC(en, "Clamp the value of GL_MAX_SAMPLES to the "
72 "given integer. If negative, then do not clamp.")
73 DRI_CONF_OPT_END
74 DRI_CONF_SECTION_END
75
76 DRI_CONF_SECTION_DEBUG
77 DRI_CONF_NO_RAST("false")
78 DRI_CONF_ALWAYS_FLUSH_BATCH("false")
79 DRI_CONF_ALWAYS_FLUSH_CACHE("false")
80 DRI_CONF_DISABLE_THROTTLING("false")
81 DRI_CONF_FORCE_GLSL_EXTENSIONS_WARN("false")
82 DRI_CONF_FORCE_GLSL_VERSION(0)
83 DRI_CONF_DISABLE_GLSL_LINE_CONTINUATIONS("false")
84 DRI_CONF_DISABLE_BLEND_FUNC_EXTENDED("false")
85 DRI_CONF_DUAL_COLOR_BLEND_BY_LOCATION("false")
86 DRI_CONF_ALLOW_GLSL_EXTENSION_DIRECTIVE_MIDSHADER("false")
87 DRI_CONF_ALLOW_GLSL_BUILTIN_VARIABLE_REDECLARATION("false")
88 DRI_CONF_ALLOW_GLSL_CROSS_STAGE_INTERPOLATION_MISMATCH("false")
89 DRI_CONF_ALLOW_HIGHER_COMPAT_VERSION("false")
90 DRI_CONF_FORCE_GLSL_ABS_SQRT("false")
91
92 DRI_CONF_OPT_BEGIN_B(shader_precompile, "true")
93 DRI_CONF_DESC(en, "Perform code generation at shader link time.")
94 DRI_CONF_OPT_END
95 DRI_CONF_SECTION_END
96
97 DRI_CONF_SECTION_MISCELLANEOUS
98 DRI_CONF_GLSL_ZERO_INIT("false")
99 DRI_CONF_ALLOW_RGB10_CONFIGS("false")
100 DRI_CONF_SECTION_END
101 DRI_CONF_END
102 };
103
104 #include "intel_batchbuffer.h"
105 #include "intel_buffers.h"
106 #include "brw_bufmgr.h"
107 #include "intel_fbo.h"
108 #include "intel_mipmap_tree.h"
109 #include "intel_screen.h"
110 #include "intel_tex.h"
111 #include "intel_image.h"
112
113 #include "brw_context.h"
114
115 #include "i915_drm.h"
116
117 /**
118 * For debugging purposes, this returns a time in seconds.
119 */
120 double
121 get_time(void)
122 {
123 struct timespec tp;
124
125 clock_gettime(CLOCK_MONOTONIC, &tp);
126
127 return tp.tv_sec + tp.tv_nsec / 1000000000.0;
128 }
129
130 static const __DRItexBufferExtension intelTexBufferExtension = {
131 .base = { __DRI_TEX_BUFFER, 3 },
132
133 .setTexBuffer = intelSetTexBuffer,
134 .setTexBuffer2 = intelSetTexBuffer2,
135 .releaseTexBuffer = intelReleaseTexBuffer,
136 };
137
138 static void
139 intel_dri2_flush_with_flags(__DRIcontext *cPriv,
140 __DRIdrawable *dPriv,
141 unsigned flags,
142 enum __DRI2throttleReason reason)
143 {
144 struct brw_context *brw = cPriv->driverPrivate;
145
146 if (!brw)
147 return;
148
149 struct gl_context *ctx = &brw->ctx;
150
151 FLUSH_VERTICES(ctx, 0);
152
153 if (flags & __DRI2_FLUSH_DRAWABLE)
154 intel_resolve_for_dri2_flush(brw, dPriv);
155
156 if (reason == __DRI2_THROTTLE_SWAPBUFFER)
157 brw->need_swap_throttle = true;
158 if (reason == __DRI2_THROTTLE_FLUSHFRONT)
159 brw->need_flush_throttle = true;
160
161 intel_batchbuffer_flush(brw);
162 }
163
164 /**
165 * Provides compatibility with loaders that only support the older (version
166 * 1-3) flush interface.
167 *
168 * That includes libGL up to Mesa 9.0, and the X Server at least up to 1.13.
169 */
170 static void
171 intel_dri2_flush(__DRIdrawable *drawable)
172 {
173 intel_dri2_flush_with_flags(drawable->driContextPriv, drawable,
174 __DRI2_FLUSH_DRAWABLE,
175 __DRI2_THROTTLE_SWAPBUFFER);
176 }
177
178 static const struct __DRI2flushExtensionRec intelFlushExtension = {
179 .base = { __DRI2_FLUSH, 4 },
180
181 .flush = intel_dri2_flush,
182 .invalidate = dri2InvalidateDrawable,
183 .flush_with_flags = intel_dri2_flush_with_flags,
184 };
185
186 static const struct intel_image_format intel_image_formats[] = {
187 { __DRI_IMAGE_FOURCC_ARGB2101010, __DRI_IMAGE_COMPONENTS_RGBA, 1,
188 { { 0, 0, 0, __DRI_IMAGE_FORMAT_ARGB2101010, 4 } } },
189
190 { __DRI_IMAGE_FOURCC_XRGB2101010, __DRI_IMAGE_COMPONENTS_RGB, 1,
191 { { 0, 0, 0, __DRI_IMAGE_FORMAT_XRGB2101010, 4 } } },
192
193 { __DRI_IMAGE_FOURCC_ABGR2101010, __DRI_IMAGE_COMPONENTS_RGBA, 1,
194 { { 0, 0, 0, __DRI_IMAGE_FORMAT_ABGR2101010, 4 } } },
195
196 { __DRI_IMAGE_FOURCC_XBGR2101010, __DRI_IMAGE_COMPONENTS_RGB, 1,
197 { { 0, 0, 0, __DRI_IMAGE_FORMAT_XBGR2101010, 4 } } },
198
199 { __DRI_IMAGE_FOURCC_ARGB8888, __DRI_IMAGE_COMPONENTS_RGBA, 1,
200 { { 0, 0, 0, __DRI_IMAGE_FORMAT_ARGB8888, 4 } } },
201
202 { __DRI_IMAGE_FOURCC_ABGR8888, __DRI_IMAGE_COMPONENTS_RGBA, 1,
203 { { 0, 0, 0, __DRI_IMAGE_FORMAT_ABGR8888, 4 } } },
204
205 { __DRI_IMAGE_FOURCC_SARGB8888, __DRI_IMAGE_COMPONENTS_RGBA, 1,
206 { { 0, 0, 0, __DRI_IMAGE_FORMAT_SARGB8, 4 } } },
207
208 { __DRI_IMAGE_FOURCC_XRGB8888, __DRI_IMAGE_COMPONENTS_RGB, 1,
209 { { 0, 0, 0, __DRI_IMAGE_FORMAT_XRGB8888, 4 }, } },
210
211 { __DRI_IMAGE_FOURCC_XBGR8888, __DRI_IMAGE_COMPONENTS_RGB, 1,
212 { { 0, 0, 0, __DRI_IMAGE_FORMAT_XBGR8888, 4 }, } },
213
214 { __DRI_IMAGE_FOURCC_ARGB1555, __DRI_IMAGE_COMPONENTS_RGBA, 1,
215 { { 0, 0, 0, __DRI_IMAGE_FORMAT_ARGB1555, 2 } } },
216
217 { __DRI_IMAGE_FOURCC_RGB565, __DRI_IMAGE_COMPONENTS_RGB, 1,
218 { { 0, 0, 0, __DRI_IMAGE_FORMAT_RGB565, 2 } } },
219
220 { __DRI_IMAGE_FOURCC_R8, __DRI_IMAGE_COMPONENTS_R, 1,
221 { { 0, 0, 0, __DRI_IMAGE_FORMAT_R8, 1 }, } },
222
223 { __DRI_IMAGE_FOURCC_R16, __DRI_IMAGE_COMPONENTS_R, 1,
224 { { 0, 0, 0, __DRI_IMAGE_FORMAT_R16, 1 }, } },
225
226 { __DRI_IMAGE_FOURCC_GR88, __DRI_IMAGE_COMPONENTS_RG, 1,
227 { { 0, 0, 0, __DRI_IMAGE_FORMAT_GR88, 2 }, } },
228
229 { __DRI_IMAGE_FOURCC_GR1616, __DRI_IMAGE_COMPONENTS_RG, 1,
230 { { 0, 0, 0, __DRI_IMAGE_FORMAT_GR1616, 2 }, } },
231
232 { __DRI_IMAGE_FOURCC_YUV410, __DRI_IMAGE_COMPONENTS_Y_U_V, 3,
233 { { 0, 0, 0, __DRI_IMAGE_FORMAT_R8, 1 },
234 { 1, 2, 2, __DRI_IMAGE_FORMAT_R8, 1 },
235 { 2, 2, 2, __DRI_IMAGE_FORMAT_R8, 1 } } },
236
237 { __DRI_IMAGE_FOURCC_YUV411, __DRI_IMAGE_COMPONENTS_Y_U_V, 3,
238 { { 0, 0, 0, __DRI_IMAGE_FORMAT_R8, 1 },
239 { 1, 2, 0, __DRI_IMAGE_FORMAT_R8, 1 },
240 { 2, 2, 0, __DRI_IMAGE_FORMAT_R8, 1 } } },
241
242 { __DRI_IMAGE_FOURCC_YUV420, __DRI_IMAGE_COMPONENTS_Y_U_V, 3,
243 { { 0, 0, 0, __DRI_IMAGE_FORMAT_R8, 1 },
244 { 1, 1, 1, __DRI_IMAGE_FORMAT_R8, 1 },
245 { 2, 1, 1, __DRI_IMAGE_FORMAT_R8, 1 } } },
246
247 { __DRI_IMAGE_FOURCC_YUV422, __DRI_IMAGE_COMPONENTS_Y_U_V, 3,
248 { { 0, 0, 0, __DRI_IMAGE_FORMAT_R8, 1 },
249 { 1, 1, 0, __DRI_IMAGE_FORMAT_R8, 1 },
250 { 2, 1, 0, __DRI_IMAGE_FORMAT_R8, 1 } } },
251
252 { __DRI_IMAGE_FOURCC_YUV444, __DRI_IMAGE_COMPONENTS_Y_U_V, 3,
253 { { 0, 0, 0, __DRI_IMAGE_FORMAT_R8, 1 },
254 { 1, 0, 0, __DRI_IMAGE_FORMAT_R8, 1 },
255 { 2, 0, 0, __DRI_IMAGE_FORMAT_R8, 1 } } },
256
257 { __DRI_IMAGE_FOURCC_YVU410, __DRI_IMAGE_COMPONENTS_Y_U_V, 3,
258 { { 0, 0, 0, __DRI_IMAGE_FORMAT_R8, 1 },
259 { 2, 2, 2, __DRI_IMAGE_FORMAT_R8, 1 },
260 { 1, 2, 2, __DRI_IMAGE_FORMAT_R8, 1 } } },
261
262 { __DRI_IMAGE_FOURCC_YVU411, __DRI_IMAGE_COMPONENTS_Y_U_V, 3,
263 { { 0, 0, 0, __DRI_IMAGE_FORMAT_R8, 1 },
264 { 2, 2, 0, __DRI_IMAGE_FORMAT_R8, 1 },
265 { 1, 2, 0, __DRI_IMAGE_FORMAT_R8, 1 } } },
266
267 { __DRI_IMAGE_FOURCC_YVU420, __DRI_IMAGE_COMPONENTS_Y_U_V, 3,
268 { { 0, 0, 0, __DRI_IMAGE_FORMAT_R8, 1 },
269 { 2, 1, 1, __DRI_IMAGE_FORMAT_R8, 1 },
270 { 1, 1, 1, __DRI_IMAGE_FORMAT_R8, 1 } } },
271
272 { __DRI_IMAGE_FOURCC_YVU422, __DRI_IMAGE_COMPONENTS_Y_U_V, 3,
273 { { 0, 0, 0, __DRI_IMAGE_FORMAT_R8, 1 },
274 { 2, 1, 0, __DRI_IMAGE_FORMAT_R8, 1 },
275 { 1, 1, 0, __DRI_IMAGE_FORMAT_R8, 1 } } },
276
277 { __DRI_IMAGE_FOURCC_YVU444, __DRI_IMAGE_COMPONENTS_Y_U_V, 3,
278 { { 0, 0, 0, __DRI_IMAGE_FORMAT_R8, 1 },
279 { 2, 0, 0, __DRI_IMAGE_FORMAT_R8, 1 },
280 { 1, 0, 0, __DRI_IMAGE_FORMAT_R8, 1 } } },
281
282 { __DRI_IMAGE_FOURCC_NV12, __DRI_IMAGE_COMPONENTS_Y_UV, 2,
283 { { 0, 0, 0, __DRI_IMAGE_FORMAT_R8, 1 },
284 { 1, 1, 1, __DRI_IMAGE_FORMAT_GR88, 2 } } },
285
286 { __DRI_IMAGE_FOURCC_NV16, __DRI_IMAGE_COMPONENTS_Y_UV, 2,
287 { { 0, 0, 0, __DRI_IMAGE_FORMAT_R8, 1 },
288 { 1, 1, 0, __DRI_IMAGE_FORMAT_GR88, 2 } } },
289
290 /* For YUYV and UYVY buffers, we set up two overlapping DRI images
291 * and treat them as planar buffers in the compositors.
292 * Plane 0 is GR88 and samples YU or YV pairs and places Y into
293 * the R component, while plane 1 is ARGB/ABGR and samples YUYV/UYVY
294 * clusters and places pairs and places U into the G component and
295 * V into A. This lets the texture sampler interpolate the Y
296 * components correctly when sampling from plane 0, and interpolate
297 * U and V correctly when sampling from plane 1. */
298 { __DRI_IMAGE_FOURCC_YUYV, __DRI_IMAGE_COMPONENTS_Y_XUXV, 2,
299 { { 0, 0, 0, __DRI_IMAGE_FORMAT_GR88, 2 },
300 { 0, 1, 0, __DRI_IMAGE_FORMAT_ARGB8888, 4 } } },
301 { __DRI_IMAGE_FOURCC_UYVY, __DRI_IMAGE_COMPONENTS_Y_UXVX, 2,
302 { { 0, 0, 0, __DRI_IMAGE_FORMAT_GR88, 2 },
303 { 0, 1, 0, __DRI_IMAGE_FORMAT_ABGR8888, 4 } } }
304 };
305
306 static const struct {
307 uint64_t modifier;
308 unsigned since_gen;
309 } supported_modifiers[] = {
310 { .modifier = DRM_FORMAT_MOD_LINEAR , .since_gen = 1 },
311 { .modifier = I915_FORMAT_MOD_X_TILED , .since_gen = 1 },
312 { .modifier = I915_FORMAT_MOD_Y_TILED , .since_gen = 6 },
313 { .modifier = I915_FORMAT_MOD_Y_TILED_CCS , .since_gen = 9 },
314 };
315
316 static bool
317 modifier_is_supported(const struct gen_device_info *devinfo,
318 const struct intel_image_format *fmt, int dri_format,
319 uint64_t modifier)
320 {
321 const struct isl_drm_modifier_info *modinfo =
322 isl_drm_modifier_get_info(modifier);
323 int i;
324
325 /* ISL had better know about the modifier */
326 if (!modinfo)
327 return false;
328
329 if (modinfo->aux_usage == ISL_AUX_USAGE_CCS_E) {
330 /* If INTEL_DEBUG=norbc is set, don't support any CCS_E modifiers */
331 if (unlikely(INTEL_DEBUG & DEBUG_NO_RBC))
332 return false;
333
334 /* CCS_E is not supported for planar images */
335 if (fmt && fmt->nplanes > 1)
336 return false;
337
338 if (fmt) {
339 assert(dri_format == 0);
340 dri_format = fmt->planes[0].dri_format;
341 }
342
343 mesa_format format = driImageFormatToGLFormat(dri_format);
344 format = _mesa_get_srgb_format_linear(format);
345 if (!isl_format_supports_ccs_e(devinfo,
346 brw_isl_format_for_mesa_format(format)))
347 return false;
348 }
349
350 for (i = 0; i < ARRAY_SIZE(supported_modifiers); i++) {
351 if (supported_modifiers[i].modifier != modifier)
352 continue;
353
354 return supported_modifiers[i].since_gen <= devinfo->gen;
355 }
356
357 return false;
358 }
359
360 static uint64_t
361 tiling_to_modifier(uint32_t tiling)
362 {
363 static const uint64_t map[] = {
364 [I915_TILING_NONE] = DRM_FORMAT_MOD_LINEAR,
365 [I915_TILING_X] = I915_FORMAT_MOD_X_TILED,
366 [I915_TILING_Y] = I915_FORMAT_MOD_Y_TILED,
367 };
368
369 assert(tiling < ARRAY_SIZE(map));
370
371 return map[tiling];
372 }
373
374 static void
375 intel_image_warn_if_unaligned(__DRIimage *image, const char *func)
376 {
377 uint32_t tiling, swizzle;
378 brw_bo_get_tiling(image->bo, &tiling, &swizzle);
379
380 if (tiling != I915_TILING_NONE && (image->offset & 0xfff)) {
381 _mesa_warning(NULL, "%s: offset 0x%08x not on tile boundary",
382 func, image->offset);
383 }
384 }
385
386 static const struct intel_image_format *
387 intel_image_format_lookup(int fourcc)
388 {
389 for (unsigned i = 0; i < ARRAY_SIZE(intel_image_formats); i++) {
390 if (intel_image_formats[i].fourcc == fourcc)
391 return &intel_image_formats[i];
392 }
393
394 return NULL;
395 }
396
397 static boolean
398 intel_image_get_fourcc(__DRIimage *image, int *fourcc)
399 {
400 if (image->planar_format) {
401 *fourcc = image->planar_format->fourcc;
402 return true;
403 }
404
405 for (unsigned i = 0; i < ARRAY_SIZE(intel_image_formats); i++) {
406 if (intel_image_formats[i].planes[0].dri_format == image->dri_format) {
407 *fourcc = intel_image_formats[i].fourcc;
408 return true;
409 }
410 }
411 return false;
412 }
413
414 static __DRIimage *
415 intel_allocate_image(struct intel_screen *screen, int dri_format,
416 void *loaderPrivate)
417 {
418 __DRIimage *image;
419
420 image = calloc(1, sizeof *image);
421 if (image == NULL)
422 return NULL;
423
424 image->screen = screen;
425 image->dri_format = dri_format;
426 image->offset = 0;
427
428 image->format = driImageFormatToGLFormat(dri_format);
429 if (dri_format != __DRI_IMAGE_FORMAT_NONE &&
430 image->format == MESA_FORMAT_NONE) {
431 free(image);
432 return NULL;
433 }
434
435 image->internal_format = _mesa_get_format_base_format(image->format);
436 image->data = loaderPrivate;
437
438 return image;
439 }
440
441 /**
442 * Sets up a DRIImage structure to point to a slice out of a miptree.
443 */
444 static void
445 intel_setup_image_from_mipmap_tree(struct brw_context *brw, __DRIimage *image,
446 struct intel_mipmap_tree *mt, GLuint level,
447 GLuint zoffset)
448 {
449 intel_miptree_make_shareable(brw, mt);
450
451 intel_miptree_check_level_layer(mt, level, zoffset);
452
453 image->width = minify(mt->surf.phys_level0_sa.width,
454 level - mt->first_level);
455 image->height = minify(mt->surf.phys_level0_sa.height,
456 level - mt->first_level);
457 image->pitch = mt->surf.row_pitch;
458
459 image->offset = intel_miptree_get_tile_offsets(mt, level, zoffset,
460 &image->tile_x,
461 &image->tile_y);
462
463 brw_bo_unreference(image->bo);
464 image->bo = mt->bo;
465 brw_bo_reference(mt->bo);
466 }
467
468 static __DRIimage *
469 intel_create_image_from_name(__DRIscreen *dri_screen,
470 int width, int height, int format,
471 int name, int pitch, void *loaderPrivate)
472 {
473 struct intel_screen *screen = dri_screen->driverPrivate;
474 __DRIimage *image;
475 int cpp;
476
477 image = intel_allocate_image(screen, format, loaderPrivate);
478 if (image == NULL)
479 return NULL;
480
481 if (image->format == MESA_FORMAT_NONE)
482 cpp = 1;
483 else
484 cpp = _mesa_get_format_bytes(image->format);
485
486 image->width = width;
487 image->height = height;
488 image->pitch = pitch * cpp;
489 image->bo = brw_bo_gem_create_from_name(screen->bufmgr, "image",
490 name);
491 if (!image->bo) {
492 free(image);
493 return NULL;
494 }
495 image->modifier = tiling_to_modifier(image->bo->tiling_mode);
496
497 return image;
498 }
499
500 static __DRIimage *
501 intel_create_image_from_renderbuffer(__DRIcontext *context,
502 int renderbuffer, void *loaderPrivate)
503 {
504 __DRIimage *image;
505 struct brw_context *brw = context->driverPrivate;
506 struct gl_context *ctx = &brw->ctx;
507 struct gl_renderbuffer *rb;
508 struct intel_renderbuffer *irb;
509
510 rb = _mesa_lookup_renderbuffer(ctx, renderbuffer);
511 if (!rb) {
512 _mesa_error(ctx, GL_INVALID_OPERATION, "glRenderbufferExternalMESA");
513 return NULL;
514 }
515
516 irb = intel_renderbuffer(rb);
517 intel_miptree_make_shareable(brw, irb->mt);
518 image = calloc(1, sizeof *image);
519 if (image == NULL)
520 return NULL;
521
522 image->internal_format = rb->InternalFormat;
523 image->format = rb->Format;
524 image->modifier = tiling_to_modifier(
525 isl_tiling_to_i915_tiling(irb->mt->surf.tiling));
526 image->offset = 0;
527 image->data = loaderPrivate;
528 brw_bo_unreference(image->bo);
529 image->bo = irb->mt->bo;
530 brw_bo_reference(irb->mt->bo);
531 image->width = rb->Width;
532 image->height = rb->Height;
533 image->pitch = irb->mt->surf.row_pitch;
534 image->dri_format = driGLFormatToImageFormat(image->format);
535 image->has_depthstencil = irb->mt->stencil_mt? true : false;
536
537 rb->NeedsFinishRenderTexture = true;
538 return image;
539 }
540
541 static __DRIimage *
542 intel_create_image_from_texture(__DRIcontext *context, int target,
543 unsigned texture, int zoffset,
544 int level,
545 unsigned *error,
546 void *loaderPrivate)
547 {
548 __DRIimage *image;
549 struct brw_context *brw = context->driverPrivate;
550 struct gl_texture_object *obj;
551 struct intel_texture_object *iobj;
552 GLuint face = 0;
553
554 obj = _mesa_lookup_texture(&brw->ctx, texture);
555 if (!obj || obj->Target != target) {
556 *error = __DRI_IMAGE_ERROR_BAD_PARAMETER;
557 return NULL;
558 }
559
560 if (target == GL_TEXTURE_CUBE_MAP)
561 face = zoffset;
562
563 _mesa_test_texobj_completeness(&brw->ctx, obj);
564 iobj = intel_texture_object(obj);
565 if (!obj->_BaseComplete || (level > 0 && !obj->_MipmapComplete)) {
566 *error = __DRI_IMAGE_ERROR_BAD_PARAMETER;
567 return NULL;
568 }
569
570 if (level < obj->BaseLevel || level > obj->_MaxLevel) {
571 *error = __DRI_IMAGE_ERROR_BAD_MATCH;
572 return NULL;
573 }
574
575 if (target == GL_TEXTURE_3D && obj->Image[face][level]->Depth < zoffset) {
576 *error = __DRI_IMAGE_ERROR_BAD_MATCH;
577 return NULL;
578 }
579 image = calloc(1, sizeof *image);
580 if (image == NULL) {
581 *error = __DRI_IMAGE_ERROR_BAD_ALLOC;
582 return NULL;
583 }
584
585 image->internal_format = obj->Image[face][level]->InternalFormat;
586 image->format = obj->Image[face][level]->TexFormat;
587 image->modifier = tiling_to_modifier(
588 isl_tiling_to_i915_tiling(iobj->mt->surf.tiling));
589 image->data = loaderPrivate;
590 intel_setup_image_from_mipmap_tree(brw, image, iobj->mt, level, zoffset);
591 image->dri_format = driGLFormatToImageFormat(image->format);
592 image->has_depthstencil = iobj->mt->stencil_mt? true : false;
593 image->planar_format = iobj->planar_format;
594 if (image->dri_format == __DRI_IMAGE_FORMAT_NONE) {
595 *error = __DRI_IMAGE_ERROR_BAD_PARAMETER;
596 free(image);
597 return NULL;
598 }
599
600 *error = __DRI_IMAGE_ERROR_SUCCESS;
601 return image;
602 }
603
604 static void
605 intel_destroy_image(__DRIimage *image)
606 {
607 brw_bo_unreference(image->bo);
608 free(image);
609 }
610
611 enum modifier_priority {
612 MODIFIER_PRIORITY_INVALID = 0,
613 MODIFIER_PRIORITY_LINEAR,
614 MODIFIER_PRIORITY_X,
615 MODIFIER_PRIORITY_Y,
616 MODIFIER_PRIORITY_Y_CCS,
617 };
618
619 const uint64_t priority_to_modifier[] = {
620 [MODIFIER_PRIORITY_INVALID] = DRM_FORMAT_MOD_INVALID,
621 [MODIFIER_PRIORITY_LINEAR] = DRM_FORMAT_MOD_LINEAR,
622 [MODIFIER_PRIORITY_X] = I915_FORMAT_MOD_X_TILED,
623 [MODIFIER_PRIORITY_Y] = I915_FORMAT_MOD_Y_TILED,
624 [MODIFIER_PRIORITY_Y_CCS] = I915_FORMAT_MOD_Y_TILED_CCS,
625 };
626
627 static uint64_t
628 select_best_modifier(struct gen_device_info *devinfo,
629 int dri_format,
630 const uint64_t *modifiers,
631 const unsigned count)
632 {
633 enum modifier_priority prio = MODIFIER_PRIORITY_INVALID;
634
635 for (int i = 0; i < count; i++) {
636 if (!modifier_is_supported(devinfo, NULL, dri_format, modifiers[i]))
637 continue;
638
639 switch (modifiers[i]) {
640 case I915_FORMAT_MOD_Y_TILED_CCS:
641 prio = MAX2(prio, MODIFIER_PRIORITY_Y_CCS);
642 break;
643 case I915_FORMAT_MOD_Y_TILED:
644 prio = MAX2(prio, MODIFIER_PRIORITY_Y);
645 break;
646 case I915_FORMAT_MOD_X_TILED:
647 prio = MAX2(prio, MODIFIER_PRIORITY_X);
648 break;
649 case DRM_FORMAT_MOD_LINEAR:
650 prio = MAX2(prio, MODIFIER_PRIORITY_LINEAR);
651 break;
652 case DRM_FORMAT_MOD_INVALID:
653 default:
654 break;
655 }
656 }
657
658 return priority_to_modifier[prio];
659 }
660
661 static __DRIimage *
662 intel_create_image_common(__DRIscreen *dri_screen,
663 int width, int height, int format,
664 unsigned int use,
665 const uint64_t *modifiers,
666 unsigned count,
667 void *loaderPrivate)
668 {
669 __DRIimage *image;
670 struct intel_screen *screen = dri_screen->driverPrivate;
671 uint64_t modifier = DRM_FORMAT_MOD_INVALID;
672 bool ok;
673
674 /* Callers of this may specify a modifier, or a dri usage, but not both. The
675 * newer modifier interface deprecates the older usage flags newer modifier
676 * interface deprecates the older usage flags.
677 */
678 assert(!(use && count));
679
680 if (use & __DRI_IMAGE_USE_CURSOR) {
681 if (width != 64 || height != 64)
682 return NULL;
683 modifier = DRM_FORMAT_MOD_LINEAR;
684 }
685
686 if (use & __DRI_IMAGE_USE_LINEAR)
687 modifier = DRM_FORMAT_MOD_LINEAR;
688
689 if (modifier == DRM_FORMAT_MOD_INVALID) {
690 if (modifiers) {
691 /* User requested specific modifiers */
692 modifier = select_best_modifier(&screen->devinfo, format,
693 modifiers, count);
694 if (modifier == DRM_FORMAT_MOD_INVALID)
695 return NULL;
696 } else {
697 /* Historically, X-tiled was the default, and so lack of modifier means
698 * X-tiled.
699 */
700 modifier = I915_FORMAT_MOD_X_TILED;
701 }
702 }
703
704 image = intel_allocate_image(screen, format, loaderPrivate);
705 if (image == NULL)
706 return NULL;
707
708 const struct isl_drm_modifier_info *mod_info =
709 isl_drm_modifier_get_info(modifier);
710
711 struct isl_surf surf;
712 ok = isl_surf_init(&screen->isl_dev, &surf,
713 .dim = ISL_SURF_DIM_2D,
714 .format = brw_isl_format_for_mesa_format(image->format),
715 .width = width,
716 .height = height,
717 .depth = 1,
718 .levels = 1,
719 .array_len = 1,
720 .samples = 1,
721 .usage = ISL_SURF_USAGE_RENDER_TARGET_BIT |
722 ISL_SURF_USAGE_TEXTURE_BIT |
723 ISL_SURF_USAGE_STORAGE_BIT,
724 .tiling_flags = (1 << mod_info->tiling));
725 assert(ok);
726 if (!ok) {
727 free(image);
728 return NULL;
729 }
730
731 struct isl_surf aux_surf;
732 if (mod_info->aux_usage == ISL_AUX_USAGE_CCS_E) {
733 ok = isl_surf_get_ccs_surf(&screen->isl_dev, &surf, &aux_surf, 0);
734 if (!ok) {
735 free(image);
736 return NULL;
737 }
738 } else {
739 assert(mod_info->aux_usage == ISL_AUX_USAGE_NONE);
740 aux_surf.size = 0;
741 }
742
743 /* We request that the bufmgr zero the buffer for us for two reasons:
744 *
745 * 1) If a buffer gets re-used from the pool, we don't want to leak random
746 * garbage from our process to some other.
747 *
748 * 2) For images with CCS_E, we want to ensure that the CCS starts off in
749 * a valid state. A CCS value of 0 indicates that the given block is
750 * in the pass-through state which is what we want.
751 */
752 image->bo = brw_bo_alloc_tiled(screen->bufmgr, "image",
753 surf.size + aux_surf.size,
754 BRW_MEMZONE_OTHER,
755 isl_tiling_to_i915_tiling(mod_info->tiling),
756 surf.row_pitch, BO_ALLOC_ZEROED);
757 if (image->bo == NULL) {
758 free(image);
759 return NULL;
760 }
761 image->width = width;
762 image->height = height;
763 image->pitch = surf.row_pitch;
764 image->modifier = modifier;
765
766 if (aux_surf.size) {
767 image->aux_offset = surf.size;
768 image->aux_pitch = aux_surf.row_pitch;
769 image->aux_size = aux_surf.size;
770 }
771
772 return image;
773 }
774
775 static __DRIimage *
776 intel_create_image(__DRIscreen *dri_screen,
777 int width, int height, int format,
778 unsigned int use,
779 void *loaderPrivate)
780 {
781 return intel_create_image_common(dri_screen, width, height, format, use, NULL, 0,
782 loaderPrivate);
783 }
784
785 static void *
786 intel_map_image(__DRIcontext *context, __DRIimage *image,
787 int x0, int y0, int width, int height,
788 unsigned int flags, int *stride, void **map_info)
789 {
790 struct brw_context *brw = NULL;
791 struct brw_bo *bo = NULL;
792 void *raw_data = NULL;
793 GLuint pix_w = 1;
794 GLuint pix_h = 1;
795 GLint pix_bytes = 1;
796
797 if (!context || !image || !stride || !map_info || *map_info)
798 return NULL;
799
800 if (x0 < 0 || x0 >= image->width || width > image->width - x0)
801 return NULL;
802
803 if (y0 < 0 || y0 >= image->height || height > image->height - y0)
804 return NULL;
805
806 if (flags & MAP_INTERNAL_MASK)
807 return NULL;
808
809 brw = context->driverPrivate;
810 bo = image->bo;
811
812 assert(brw);
813 assert(bo);
814
815 /* DRI flags and GL_MAP.*_BIT flags are the same, so just pass them on. */
816 raw_data = brw_bo_map(brw, bo, flags);
817 if (!raw_data)
818 return NULL;
819
820 _mesa_get_format_block_size(image->format, &pix_w, &pix_h);
821 pix_bytes = _mesa_get_format_bytes(image->format);
822
823 assert(pix_w);
824 assert(pix_h);
825 assert(pix_bytes > 0);
826
827 raw_data += (x0 / pix_w) * pix_bytes + (y0 / pix_h) * image->pitch;
828
829 brw_bo_reference(bo);
830
831 *stride = image->pitch;
832 *map_info = bo;
833
834 return raw_data;
835 }
836
837 static void
838 intel_unmap_image(__DRIcontext *context, __DRIimage *image, void *map_info)
839 {
840 struct brw_bo *bo = map_info;
841
842 brw_bo_unmap(bo);
843 brw_bo_unreference(bo);
844 }
845
846 static __DRIimage *
847 intel_create_image_with_modifiers(__DRIscreen *dri_screen,
848 int width, int height, int format,
849 const uint64_t *modifiers,
850 const unsigned count,
851 void *loaderPrivate)
852 {
853 return intel_create_image_common(dri_screen, width, height, format, 0,
854 modifiers, count, loaderPrivate);
855 }
856
857 static GLboolean
858 intel_query_image(__DRIimage *image, int attrib, int *value)
859 {
860 switch (attrib) {
861 case __DRI_IMAGE_ATTRIB_STRIDE:
862 *value = image->pitch;
863 return true;
864 case __DRI_IMAGE_ATTRIB_HANDLE:
865 *value = brw_bo_export_gem_handle(image->bo);
866 return true;
867 case __DRI_IMAGE_ATTRIB_NAME:
868 return !brw_bo_flink(image->bo, (uint32_t *) value);
869 case __DRI_IMAGE_ATTRIB_FORMAT:
870 *value = image->dri_format;
871 return true;
872 case __DRI_IMAGE_ATTRIB_WIDTH:
873 *value = image->width;
874 return true;
875 case __DRI_IMAGE_ATTRIB_HEIGHT:
876 *value = image->height;
877 return true;
878 case __DRI_IMAGE_ATTRIB_COMPONENTS:
879 if (image->planar_format == NULL)
880 return false;
881 *value = image->planar_format->components;
882 return true;
883 case __DRI_IMAGE_ATTRIB_FD:
884 return !brw_bo_gem_export_to_prime(image->bo, value);
885 case __DRI_IMAGE_ATTRIB_FOURCC:
886 return intel_image_get_fourcc(image, value);
887 case __DRI_IMAGE_ATTRIB_NUM_PLANES:
888 if (isl_drm_modifier_has_aux(image->modifier)) {
889 assert(!image->planar_format || image->planar_format->nplanes == 1);
890 *value = 2;
891 } else if (image->planar_format) {
892 *value = image->planar_format->nplanes;
893 } else {
894 *value = 1;
895 }
896 return true;
897 case __DRI_IMAGE_ATTRIB_OFFSET:
898 *value = image->offset;
899 return true;
900 case __DRI_IMAGE_ATTRIB_MODIFIER_LOWER:
901 *value = (image->modifier & 0xffffffff);
902 return true;
903 case __DRI_IMAGE_ATTRIB_MODIFIER_UPPER:
904 *value = ((image->modifier >> 32) & 0xffffffff);
905 return true;
906
907 default:
908 return false;
909 }
910 }
911
912 static GLboolean
913 intel_query_format_modifier_attribs(__DRIscreen *dri_screen,
914 uint32_t fourcc, uint64_t modifier,
915 int attrib, uint64_t *value)
916 {
917 struct intel_screen *screen = dri_screen->driverPrivate;
918 const struct intel_image_format *f = intel_image_format_lookup(fourcc);
919
920 if (!modifier_is_supported(&screen->devinfo, f, 0, modifier))
921 return false;
922
923 switch (attrib) {
924 case __DRI_IMAGE_FORMAT_MODIFIER_ATTRIB_PLANE_COUNT:
925 *value = isl_drm_modifier_has_aux(modifier) ? 2 : f->nplanes;
926 return true;
927
928 default:
929 return false;
930 }
931 }
932
933 static __DRIimage *
934 intel_dup_image(__DRIimage *orig_image, void *loaderPrivate)
935 {
936 __DRIimage *image;
937
938 image = calloc(1, sizeof *image);
939 if (image == NULL)
940 return NULL;
941
942 brw_bo_reference(orig_image->bo);
943 image->bo = orig_image->bo;
944 image->internal_format = orig_image->internal_format;
945 image->planar_format = orig_image->planar_format;
946 image->dri_format = orig_image->dri_format;
947 image->format = orig_image->format;
948 image->modifier = orig_image->modifier;
949 image->offset = orig_image->offset;
950 image->width = orig_image->width;
951 image->height = orig_image->height;
952 image->pitch = orig_image->pitch;
953 image->tile_x = orig_image->tile_x;
954 image->tile_y = orig_image->tile_y;
955 image->has_depthstencil = orig_image->has_depthstencil;
956 image->data = loaderPrivate;
957 image->dma_buf_imported = orig_image->dma_buf_imported;
958 image->aux_offset = orig_image->aux_offset;
959 image->aux_pitch = orig_image->aux_pitch;
960
961 memcpy(image->strides, orig_image->strides, sizeof(image->strides));
962 memcpy(image->offsets, orig_image->offsets, sizeof(image->offsets));
963
964 return image;
965 }
966
967 static GLboolean
968 intel_validate_usage(__DRIimage *image, unsigned int use)
969 {
970 if (use & __DRI_IMAGE_USE_CURSOR) {
971 if (image->width != 64 || image->height != 64)
972 return GL_FALSE;
973 }
974
975 return GL_TRUE;
976 }
977
978 static __DRIimage *
979 intel_create_image_from_names(__DRIscreen *dri_screen,
980 int width, int height, int fourcc,
981 int *names, int num_names,
982 int *strides, int *offsets,
983 void *loaderPrivate)
984 {
985 const struct intel_image_format *f = NULL;
986 __DRIimage *image;
987 int i, index;
988
989 if (dri_screen == NULL || names == NULL || num_names != 1)
990 return NULL;
991
992 f = intel_image_format_lookup(fourcc);
993 if (f == NULL)
994 return NULL;
995
996 image = intel_create_image_from_name(dri_screen, width, height,
997 __DRI_IMAGE_FORMAT_NONE,
998 names[0], strides[0],
999 loaderPrivate);
1000
1001 if (image == NULL)
1002 return NULL;
1003
1004 image->planar_format = f;
1005 for (i = 0; i < f->nplanes; i++) {
1006 index = f->planes[i].buffer_index;
1007 image->offsets[index] = offsets[index];
1008 image->strides[index] = strides[index];
1009 }
1010
1011 return image;
1012 }
1013
1014 static __DRIimage *
1015 intel_create_image_from_fds_common(__DRIscreen *dri_screen,
1016 int width, int height, int fourcc,
1017 uint64_t modifier, int *fds, int num_fds,
1018 int *strides, int *offsets,
1019 void *loaderPrivate)
1020 {
1021 struct intel_screen *screen = dri_screen->driverPrivate;
1022 const struct intel_image_format *f;
1023 __DRIimage *image;
1024 int i, index;
1025 bool ok;
1026
1027 if (fds == NULL || num_fds < 1)
1028 return NULL;
1029
1030 f = intel_image_format_lookup(fourcc);
1031 if (f == NULL)
1032 return NULL;
1033
1034 if (modifier != DRM_FORMAT_MOD_INVALID &&
1035 !modifier_is_supported(&screen->devinfo, f, 0, modifier))
1036 return NULL;
1037
1038 if (f->nplanes == 1)
1039 image = intel_allocate_image(screen, f->planes[0].dri_format,
1040 loaderPrivate);
1041 else
1042 image = intel_allocate_image(screen, __DRI_IMAGE_FORMAT_NONE,
1043 loaderPrivate);
1044
1045 if (image == NULL)
1046 return NULL;
1047
1048 image->width = width;
1049 image->height = height;
1050 image->pitch = strides[0];
1051
1052 image->planar_format = f;
1053
1054 if (modifier != DRM_FORMAT_MOD_INVALID) {
1055 const struct isl_drm_modifier_info *mod_info =
1056 isl_drm_modifier_get_info(modifier);
1057 uint32_t tiling = isl_tiling_to_i915_tiling(mod_info->tiling);
1058 image->bo = brw_bo_gem_create_from_prime_tiled(screen->bufmgr, fds[0],
1059 tiling, strides[0]);
1060 } else {
1061 image->bo = brw_bo_gem_create_from_prime(screen->bufmgr, fds[0]);
1062 }
1063
1064 if (image->bo == NULL) {
1065 free(image);
1066 return NULL;
1067 }
1068
1069 /* We only support all planes from the same bo.
1070 * brw_bo_gem_create_from_prime() should return the same pointer for all
1071 * fds received here */
1072 for (i = 1; i < num_fds; i++) {
1073 struct brw_bo *aux = brw_bo_gem_create_from_prime(screen->bufmgr, fds[i]);
1074 brw_bo_unreference(aux);
1075 if (aux != image->bo) {
1076 brw_bo_unreference(image->bo);
1077 free(image);
1078 return NULL;
1079 }
1080 }
1081
1082 if (modifier != DRM_FORMAT_MOD_INVALID)
1083 image->modifier = modifier;
1084 else
1085 image->modifier = tiling_to_modifier(image->bo->tiling_mode);
1086
1087 const struct isl_drm_modifier_info *mod_info =
1088 isl_drm_modifier_get_info(image->modifier);
1089
1090 int size = 0;
1091 struct isl_surf surf;
1092 for (i = 0; i < f->nplanes; i++) {
1093 index = f->planes[i].buffer_index;
1094 image->offsets[index] = offsets[index];
1095 image->strides[index] = strides[index];
1096
1097 mesa_format format = driImageFormatToGLFormat(f->planes[i].dri_format);
1098
1099 ok = isl_surf_init(&screen->isl_dev, &surf,
1100 .dim = ISL_SURF_DIM_2D,
1101 .format = brw_isl_format_for_mesa_format(format),
1102 .width = image->width >> f->planes[i].width_shift,
1103 .height = image->height >> f->planes[i].height_shift,
1104 .depth = 1,
1105 .levels = 1,
1106 .array_len = 1,
1107 .samples = 1,
1108 .row_pitch = strides[index],
1109 .usage = ISL_SURF_USAGE_RENDER_TARGET_BIT |
1110 ISL_SURF_USAGE_TEXTURE_BIT |
1111 ISL_SURF_USAGE_STORAGE_BIT,
1112 .tiling_flags = (1 << mod_info->tiling));
1113 if (!ok) {
1114 brw_bo_unreference(image->bo);
1115 free(image);
1116 return NULL;
1117 }
1118
1119 const int end = offsets[index] + surf.size;
1120 if (size < end)
1121 size = end;
1122 }
1123
1124 if (mod_info->aux_usage == ISL_AUX_USAGE_CCS_E) {
1125 /* Even though we initialize surf in the loop above, we know that
1126 * anything with CCS_E will have exactly one plane so surf is properly
1127 * initialized when we get here.
1128 */
1129 assert(f->nplanes == 1);
1130
1131 image->aux_offset = offsets[1];
1132 image->aux_pitch = strides[1];
1133
1134 /* Scanout hardware requires that the CCS be placed after the main
1135 * surface in memory. We consider any CCS that is placed any earlier in
1136 * memory to be invalid and reject it.
1137 *
1138 * At some point in the future, this restriction may be relaxed if the
1139 * hardware becomes less strict but we may need a new modifier for that.
1140 */
1141 assert(size > 0);
1142 if (image->aux_offset < size) {
1143 brw_bo_unreference(image->bo);
1144 free(image);
1145 return NULL;
1146 }
1147
1148 struct isl_surf aux_surf;
1149 ok = isl_surf_get_ccs_surf(&screen->isl_dev, &surf, &aux_surf,
1150 image->aux_pitch);
1151 if (!ok) {
1152 brw_bo_unreference(image->bo);
1153 free(image);
1154 return NULL;
1155 }
1156
1157 image->aux_size = aux_surf.size;
1158
1159 const int end = image->aux_offset + aux_surf.size;
1160 if (size < end)
1161 size = end;
1162 } else {
1163 assert(mod_info->aux_usage == ISL_AUX_USAGE_NONE);
1164 }
1165
1166 /* Check that the requested image actually fits within the BO. 'size'
1167 * is already relative to the offsets, so we don't need to add that. */
1168 if (image->bo->size == 0) {
1169 image->bo->size = size;
1170 } else if (size > image->bo->size) {
1171 brw_bo_unreference(image->bo);
1172 free(image);
1173 return NULL;
1174 }
1175
1176 if (f->nplanes == 1) {
1177 image->offset = image->offsets[0];
1178 intel_image_warn_if_unaligned(image, __func__);
1179 }
1180
1181 return image;
1182 }
1183
1184 static __DRIimage *
1185 intel_create_image_from_fds(__DRIscreen *dri_screen,
1186 int width, int height, int fourcc,
1187 int *fds, int num_fds, int *strides, int *offsets,
1188 void *loaderPrivate)
1189 {
1190 return intel_create_image_from_fds_common(dri_screen, width, height, fourcc,
1191 DRM_FORMAT_MOD_INVALID,
1192 fds, num_fds, strides, offsets,
1193 loaderPrivate);
1194 }
1195
1196 static __DRIimage *
1197 intel_create_image_from_dma_bufs2(__DRIscreen *dri_screen,
1198 int width, int height,
1199 int fourcc, uint64_t modifier,
1200 int *fds, int num_fds,
1201 int *strides, int *offsets,
1202 enum __DRIYUVColorSpace yuv_color_space,
1203 enum __DRISampleRange sample_range,
1204 enum __DRIChromaSiting horizontal_siting,
1205 enum __DRIChromaSiting vertical_siting,
1206 unsigned *error,
1207 void *loaderPrivate)
1208 {
1209 __DRIimage *image;
1210 const struct intel_image_format *f = intel_image_format_lookup(fourcc);
1211
1212 if (!f) {
1213 *error = __DRI_IMAGE_ERROR_BAD_MATCH;
1214 return NULL;
1215 }
1216
1217 image = intel_create_image_from_fds_common(dri_screen, width, height,
1218 fourcc, modifier,
1219 fds, num_fds, strides, offsets,
1220 loaderPrivate);
1221
1222 /*
1223 * Invalid parameters and any inconsistencies between are assumed to be
1224 * checked by the caller. Therefore besides unsupported formats one can fail
1225 * only in allocation.
1226 */
1227 if (!image) {
1228 *error = __DRI_IMAGE_ERROR_BAD_ALLOC;
1229 return NULL;
1230 }
1231
1232 image->dma_buf_imported = true;
1233 image->yuv_color_space = yuv_color_space;
1234 image->sample_range = sample_range;
1235 image->horizontal_siting = horizontal_siting;
1236 image->vertical_siting = vertical_siting;
1237
1238 *error = __DRI_IMAGE_ERROR_SUCCESS;
1239 return image;
1240 }
1241
1242 static __DRIimage *
1243 intel_create_image_from_dma_bufs(__DRIscreen *dri_screen,
1244 int width, int height, int fourcc,
1245 int *fds, int num_fds,
1246 int *strides, int *offsets,
1247 enum __DRIYUVColorSpace yuv_color_space,
1248 enum __DRISampleRange sample_range,
1249 enum __DRIChromaSiting horizontal_siting,
1250 enum __DRIChromaSiting vertical_siting,
1251 unsigned *error,
1252 void *loaderPrivate)
1253 {
1254 return intel_create_image_from_dma_bufs2(dri_screen, width, height,
1255 fourcc, DRM_FORMAT_MOD_INVALID,
1256 fds, num_fds, strides, offsets,
1257 yuv_color_space,
1258 sample_range,
1259 horizontal_siting,
1260 vertical_siting,
1261 error,
1262 loaderPrivate);
1263 }
1264
1265 static bool
1266 intel_image_format_is_supported(const struct intel_image_format *fmt)
1267 {
1268 if (fmt->fourcc == __DRI_IMAGE_FOURCC_SARGB8888 ||
1269 fmt->fourcc == __DRI_IMAGE_FOURCC_SABGR8888)
1270 return false;
1271
1272 return true;
1273 }
1274
1275 static GLboolean
1276 intel_query_dma_buf_formats(__DRIscreen *screen, int max,
1277 int *formats, int *count)
1278 {
1279 int num_formats = 0, i;
1280
1281 for (i = 0; i < ARRAY_SIZE(intel_image_formats); i++) {
1282 if (!intel_image_format_is_supported(&intel_image_formats[i]))
1283 continue;
1284
1285 num_formats++;
1286 if (max == 0)
1287 continue;
1288
1289 formats[num_formats - 1] = intel_image_formats[i].fourcc;
1290 if (num_formats >= max)
1291 break;
1292 }
1293
1294 *count = num_formats;
1295 return true;
1296 }
1297
1298 static GLboolean
1299 intel_query_dma_buf_modifiers(__DRIscreen *_screen, int fourcc, int max,
1300 uint64_t *modifiers,
1301 unsigned int *external_only,
1302 int *count)
1303 {
1304 struct intel_screen *screen = _screen->driverPrivate;
1305 const struct intel_image_format *f;
1306 int num_mods = 0, i;
1307
1308 f = intel_image_format_lookup(fourcc);
1309 if (f == NULL)
1310 return false;
1311
1312 if (!intel_image_format_is_supported(f))
1313 return false;
1314
1315 for (i = 0; i < ARRAY_SIZE(supported_modifiers); i++) {
1316 uint64_t modifier = supported_modifiers[i].modifier;
1317 if (!modifier_is_supported(&screen->devinfo, f, 0, modifier))
1318 continue;
1319
1320 num_mods++;
1321 if (max == 0)
1322 continue;
1323
1324 modifiers[num_mods - 1] = modifier;
1325 if (num_mods >= max)
1326 break;
1327 }
1328
1329 if (external_only != NULL) {
1330 for (i = 0; i < num_mods && i < max; i++) {
1331 if (f->components == __DRI_IMAGE_COMPONENTS_Y_U_V ||
1332 f->components == __DRI_IMAGE_COMPONENTS_Y_UV ||
1333 f->components == __DRI_IMAGE_COMPONENTS_Y_XUXV) {
1334 external_only[i] = GL_TRUE;
1335 }
1336 else {
1337 external_only[i] = GL_FALSE;
1338 }
1339 }
1340 }
1341
1342 *count = num_mods;
1343 return true;
1344 }
1345
1346 static __DRIimage *
1347 intel_from_planar(__DRIimage *parent, int plane, void *loaderPrivate)
1348 {
1349 int width, height, offset, stride, size, dri_format;
1350 __DRIimage *image;
1351
1352 if (parent == NULL)
1353 return NULL;
1354
1355 width = parent->width;
1356 height = parent->height;
1357
1358 const struct intel_image_format *f = parent->planar_format;
1359
1360 if (f && plane < f->nplanes) {
1361 /* Use the planar format definition. */
1362 width >>= f->planes[plane].width_shift;
1363 height >>= f->planes[plane].height_shift;
1364 dri_format = f->planes[plane].dri_format;
1365 int index = f->planes[plane].buffer_index;
1366 offset = parent->offsets[index];
1367 stride = parent->strides[index];
1368 size = height * stride;
1369 } else if (plane == 0) {
1370 /* The only plane of a non-planar image: copy the parent definition
1371 * directly. */
1372 dri_format = parent->dri_format;
1373 offset = parent->offset;
1374 stride = parent->pitch;
1375 size = height * stride;
1376 } else if (plane == 1 && parent->modifier != DRM_FORMAT_MOD_INVALID &&
1377 isl_drm_modifier_has_aux(parent->modifier)) {
1378 /* Auxiliary plane */
1379 dri_format = parent->dri_format;
1380 offset = parent->aux_offset;
1381 stride = parent->aux_pitch;
1382 size = parent->aux_size;
1383 } else {
1384 return NULL;
1385 }
1386
1387 if (offset + size > parent->bo->size) {
1388 _mesa_warning(NULL, "intel_from_planar: subimage out of bounds");
1389 return NULL;
1390 }
1391
1392 image = intel_allocate_image(parent->screen, dri_format, loaderPrivate);
1393 if (image == NULL)
1394 return NULL;
1395
1396 image->bo = parent->bo;
1397 brw_bo_reference(parent->bo);
1398 image->modifier = parent->modifier;
1399
1400 image->width = width;
1401 image->height = height;
1402 image->pitch = stride;
1403 image->offset = offset;
1404
1405 intel_image_warn_if_unaligned(image, __func__);
1406
1407 return image;
1408 }
1409
1410 static const __DRIimageExtension intelImageExtension = {
1411 .base = { __DRI_IMAGE, 16 },
1412
1413 .createImageFromName = intel_create_image_from_name,
1414 .createImageFromRenderbuffer = intel_create_image_from_renderbuffer,
1415 .destroyImage = intel_destroy_image,
1416 .createImage = intel_create_image,
1417 .queryImage = intel_query_image,
1418 .dupImage = intel_dup_image,
1419 .validateUsage = intel_validate_usage,
1420 .createImageFromNames = intel_create_image_from_names,
1421 .fromPlanar = intel_from_planar,
1422 .createImageFromTexture = intel_create_image_from_texture,
1423 .createImageFromFds = intel_create_image_from_fds,
1424 .createImageFromDmaBufs = intel_create_image_from_dma_bufs,
1425 .blitImage = NULL,
1426 .getCapabilities = NULL,
1427 .mapImage = intel_map_image,
1428 .unmapImage = intel_unmap_image,
1429 .createImageWithModifiers = intel_create_image_with_modifiers,
1430 .createImageFromDmaBufs2 = intel_create_image_from_dma_bufs2,
1431 .queryDmaBufFormats = intel_query_dma_buf_formats,
1432 .queryDmaBufModifiers = intel_query_dma_buf_modifiers,
1433 .queryDmaBufFormatModifierAttribs = intel_query_format_modifier_attribs,
1434 };
1435
1436 static uint64_t
1437 get_aperture_size(int fd)
1438 {
1439 struct drm_i915_gem_get_aperture aperture;
1440
1441 if (drmIoctl(fd, DRM_IOCTL_I915_GEM_GET_APERTURE, &aperture) != 0)
1442 return 0;
1443
1444 return aperture.aper_size;
1445 }
1446
1447 static int
1448 brw_query_renderer_integer(__DRIscreen *dri_screen,
1449 int param, unsigned int *value)
1450 {
1451 const struct intel_screen *const screen =
1452 (struct intel_screen *) dri_screen->driverPrivate;
1453
1454 switch (param) {
1455 case __DRI2_RENDERER_VENDOR_ID:
1456 value[0] = 0x8086;
1457 return 0;
1458 case __DRI2_RENDERER_DEVICE_ID:
1459 value[0] = screen->deviceID;
1460 return 0;
1461 case __DRI2_RENDERER_ACCELERATED:
1462 value[0] = 1;
1463 return 0;
1464 case __DRI2_RENDERER_VIDEO_MEMORY: {
1465 /* Once a batch uses more than 75% of the maximum mappable size, we
1466 * assume that there's some fragmentation, and we start doing extra
1467 * flushing, etc. That's the big cliff apps will care about.
1468 */
1469 const unsigned gpu_mappable_megabytes =
1470 screen->aperture_threshold / (1024 * 1024);
1471
1472 const long system_memory_pages = sysconf(_SC_PHYS_PAGES);
1473 const long system_page_size = sysconf(_SC_PAGE_SIZE);
1474
1475 if (system_memory_pages <= 0 || system_page_size <= 0)
1476 return -1;
1477
1478 const uint64_t system_memory_bytes = (uint64_t) system_memory_pages
1479 * (uint64_t) system_page_size;
1480
1481 const unsigned system_memory_megabytes =
1482 (unsigned) (system_memory_bytes / (1024 * 1024));
1483
1484 value[0] = MIN2(system_memory_megabytes, gpu_mappable_megabytes);
1485 return 0;
1486 }
1487 case __DRI2_RENDERER_UNIFIED_MEMORY_ARCHITECTURE:
1488 value[0] = 1;
1489 return 0;
1490 case __DRI2_RENDERER_HAS_TEXTURE_3D:
1491 value[0] = 1;
1492 return 0;
1493 case __DRI2_RENDERER_HAS_CONTEXT_PRIORITY:
1494 value[0] = 0;
1495 if (brw_hw_context_set_priority(screen->bufmgr,
1496 0, GEN_CONTEXT_HIGH_PRIORITY) == 0)
1497 value[0] |= __DRI2_RENDERER_HAS_CONTEXT_PRIORITY_HIGH;
1498 if (brw_hw_context_set_priority(screen->bufmgr,
1499 0, GEN_CONTEXT_LOW_PRIORITY) == 0)
1500 value[0] |= __DRI2_RENDERER_HAS_CONTEXT_PRIORITY_LOW;
1501 /* reset to default last, just in case */
1502 if (brw_hw_context_set_priority(screen->bufmgr,
1503 0, GEN_CONTEXT_MEDIUM_PRIORITY) == 0)
1504 value[0] |= __DRI2_RENDERER_HAS_CONTEXT_PRIORITY_MEDIUM;
1505 return 0;
1506 case __DRI2_RENDERER_HAS_FRAMEBUFFER_SRGB:
1507 value[0] = 1;
1508 return 0;
1509 default:
1510 return driQueryRendererIntegerCommon(dri_screen, param, value);
1511 }
1512
1513 return -1;
1514 }
1515
1516 static int
1517 brw_query_renderer_string(__DRIscreen *dri_screen,
1518 int param, const char **value)
1519 {
1520 const struct intel_screen *screen =
1521 (struct intel_screen *) dri_screen->driverPrivate;
1522
1523 switch (param) {
1524 case __DRI2_RENDERER_VENDOR_ID:
1525 value[0] = brw_vendor_string;
1526 return 0;
1527 case __DRI2_RENDERER_DEVICE_ID:
1528 value[0] = brw_get_renderer_string(screen);
1529 return 0;
1530 default:
1531 break;
1532 }
1533
1534 return -1;
1535 }
1536
1537 static void
1538 brw_set_cache_funcs(__DRIscreen *dri_screen,
1539 __DRIblobCacheSet set, __DRIblobCacheGet get)
1540 {
1541 const struct intel_screen *const screen =
1542 (struct intel_screen *) dri_screen->driverPrivate;
1543
1544 if (!screen->disk_cache)
1545 return;
1546
1547 disk_cache_set_callbacks(screen->disk_cache, set, get);
1548 }
1549
1550 static const __DRI2rendererQueryExtension intelRendererQueryExtension = {
1551 .base = { __DRI2_RENDERER_QUERY, 1 },
1552
1553 .queryInteger = brw_query_renderer_integer,
1554 .queryString = brw_query_renderer_string
1555 };
1556
1557 static const __DRIrobustnessExtension dri2Robustness = {
1558 .base = { __DRI2_ROBUSTNESS, 1 }
1559 };
1560
1561 static const __DRI2blobExtension intelBlobExtension = {
1562 .base = { __DRI2_BLOB, 1 },
1563 .set_cache_funcs = brw_set_cache_funcs
1564 };
1565
1566 static const __DRIextension *screenExtensions[] = {
1567 &intelTexBufferExtension.base,
1568 &intelFenceExtension.base,
1569 &intelFlushExtension.base,
1570 &intelImageExtension.base,
1571 &intelRendererQueryExtension.base,
1572 &dri2ConfigQueryExtension.base,
1573 &dri2NoErrorExtension.base,
1574 &intelBlobExtension.base,
1575 NULL
1576 };
1577
1578 static const __DRIextension *intelRobustScreenExtensions[] = {
1579 &intelTexBufferExtension.base,
1580 &intelFenceExtension.base,
1581 &intelFlushExtension.base,
1582 &intelImageExtension.base,
1583 &intelRendererQueryExtension.base,
1584 &dri2ConfigQueryExtension.base,
1585 &dri2Robustness.base,
1586 &dri2NoErrorExtension.base,
1587 &intelBlobExtension.base,
1588 NULL
1589 };
1590
1591 static int
1592 intel_get_param(struct intel_screen *screen, int param, int *value)
1593 {
1594 int ret = 0;
1595 struct drm_i915_getparam gp;
1596
1597 memset(&gp, 0, sizeof(gp));
1598 gp.param = param;
1599 gp.value = value;
1600
1601 if (drmIoctl(screen->driScrnPriv->fd, DRM_IOCTL_I915_GETPARAM, &gp) == -1) {
1602 ret = -errno;
1603 if (ret != -EINVAL)
1604 _mesa_warning(NULL, "drm_i915_getparam: %d", ret);
1605 }
1606
1607 return ret;
1608 }
1609
1610 static bool
1611 intel_get_boolean(struct intel_screen *screen, int param)
1612 {
1613 int value = 0;
1614 return (intel_get_param(screen, param, &value) == 0) && value;
1615 }
1616
1617 static int
1618 intel_get_integer(struct intel_screen *screen, int param)
1619 {
1620 int value = -1;
1621
1622 if (intel_get_param(screen, param, &value) == 0)
1623 return value;
1624
1625 return -1;
1626 }
1627
1628 static void
1629 intelDestroyScreen(__DRIscreen * sPriv)
1630 {
1631 struct intel_screen *screen = sPriv->driverPrivate;
1632
1633 brw_bufmgr_destroy(screen->bufmgr);
1634 driDestroyOptionInfo(&screen->optionCache);
1635
1636 disk_cache_destroy(screen->disk_cache);
1637
1638 ralloc_free(screen);
1639 sPriv->driverPrivate = NULL;
1640 }
1641
1642
1643 /**
1644 * Create a gl_framebuffer and attach it to __DRIdrawable::driverPrivate.
1645 *
1646 *_This implements driDriverAPI::createNewDrawable, which the DRI layer calls
1647 * when creating a EGLSurface, GLXDrawable, or GLXPixmap. Despite the name,
1648 * this does not allocate GPU memory.
1649 */
1650 static GLboolean
1651 intelCreateBuffer(__DRIscreen *dri_screen,
1652 __DRIdrawable * driDrawPriv,
1653 const struct gl_config * mesaVis, GLboolean isPixmap)
1654 {
1655 struct intel_renderbuffer *rb;
1656 struct intel_screen *screen = (struct intel_screen *)
1657 dri_screen->driverPrivate;
1658 mesa_format rgbFormat;
1659 unsigned num_samples =
1660 intel_quantize_num_samples(screen, mesaVis->samples);
1661
1662 if (isPixmap)
1663 return false;
1664
1665 struct gl_framebuffer *fb = CALLOC_STRUCT(gl_framebuffer);
1666 if (!fb)
1667 return false;
1668
1669 _mesa_initialize_window_framebuffer(fb, mesaVis);
1670
1671 if (screen->winsys_msaa_samples_override != -1) {
1672 num_samples = screen->winsys_msaa_samples_override;
1673 fb->Visual.samples = num_samples;
1674 }
1675
1676 if (mesaVis->redBits == 10 && mesaVis->alphaBits > 0) {
1677 rgbFormat = mesaVis->redMask == 0x3ff00000 ? MESA_FORMAT_B10G10R10A2_UNORM
1678 : MESA_FORMAT_R10G10B10A2_UNORM;
1679 } else if (mesaVis->redBits == 10) {
1680 rgbFormat = mesaVis->redMask == 0x3ff00000 ? MESA_FORMAT_B10G10R10X2_UNORM
1681 : MESA_FORMAT_R10G10B10X2_UNORM;
1682 } else if (mesaVis->redBits == 5) {
1683 rgbFormat = mesaVis->redMask == 0x1f ? MESA_FORMAT_R5G6B5_UNORM
1684 : MESA_FORMAT_B5G6R5_UNORM;
1685 } else if (mesaVis->sRGBCapable) {
1686 rgbFormat = mesaVis->redMask == 0xff ? MESA_FORMAT_R8G8B8A8_SRGB
1687 : MESA_FORMAT_B8G8R8A8_SRGB;
1688 } else if (mesaVis->alphaBits == 0) {
1689 rgbFormat = mesaVis->redMask == 0xff ? MESA_FORMAT_R8G8B8X8_UNORM
1690 : MESA_FORMAT_B8G8R8X8_UNORM;
1691 } else {
1692 rgbFormat = mesaVis->redMask == 0xff ? MESA_FORMAT_R8G8B8A8_SRGB
1693 : MESA_FORMAT_B8G8R8A8_SRGB;
1694 fb->Visual.sRGBCapable = true;
1695 }
1696
1697 /* mesaVis->sRGBCapable was set, user is asking for sRGB */
1698 bool srgb_cap_set = mesaVis->redBits >= 8 && mesaVis->sRGBCapable;
1699
1700 /* setup the hardware-based renderbuffers */
1701 rb = intel_create_winsys_renderbuffer(screen, rgbFormat, num_samples);
1702 _mesa_attach_and_own_rb(fb, BUFFER_FRONT_LEFT, &rb->Base.Base);
1703 rb->need_srgb = srgb_cap_set;
1704
1705 if (mesaVis->doubleBufferMode) {
1706 rb = intel_create_winsys_renderbuffer(screen, rgbFormat, num_samples);
1707 _mesa_attach_and_own_rb(fb, BUFFER_BACK_LEFT, &rb->Base.Base);
1708 rb->need_srgb = srgb_cap_set;
1709 }
1710
1711 /*
1712 * Assert here that the gl_config has an expected depth/stencil bit
1713 * combination: one of d24/s8, d16/s0, d0/s0. (See intelInitScreen2(),
1714 * which constructs the advertised configs.)
1715 */
1716 if (mesaVis->depthBits == 24) {
1717 assert(mesaVis->stencilBits == 8);
1718
1719 if (screen->devinfo.has_hiz_and_separate_stencil) {
1720 rb = intel_create_private_renderbuffer(screen,
1721 MESA_FORMAT_Z24_UNORM_X8_UINT,
1722 num_samples);
1723 _mesa_attach_and_own_rb(fb, BUFFER_DEPTH, &rb->Base.Base);
1724 rb = intel_create_private_renderbuffer(screen, MESA_FORMAT_S_UINT8,
1725 num_samples);
1726 _mesa_attach_and_own_rb(fb, BUFFER_STENCIL, &rb->Base.Base);
1727 } else {
1728 /*
1729 * Use combined depth/stencil. Note that the renderbuffer is
1730 * attached to two attachment points.
1731 */
1732 rb = intel_create_private_renderbuffer(screen,
1733 MESA_FORMAT_Z24_UNORM_S8_UINT,
1734 num_samples);
1735 _mesa_attach_and_own_rb(fb, BUFFER_DEPTH, &rb->Base.Base);
1736 _mesa_attach_and_reference_rb(fb, BUFFER_STENCIL, &rb->Base.Base);
1737 }
1738 }
1739 else if (mesaVis->depthBits == 16) {
1740 assert(mesaVis->stencilBits == 0);
1741 rb = intel_create_private_renderbuffer(screen, MESA_FORMAT_Z_UNORM16,
1742 num_samples);
1743 _mesa_attach_and_own_rb(fb, BUFFER_DEPTH, &rb->Base.Base);
1744 }
1745 else {
1746 assert(mesaVis->depthBits == 0);
1747 assert(mesaVis->stencilBits == 0);
1748 }
1749
1750 /* now add any/all software-based renderbuffers we may need */
1751 _swrast_add_soft_renderbuffers(fb,
1752 false, /* never sw color */
1753 false, /* never sw depth */
1754 false, /* never sw stencil */
1755 mesaVis->accumRedBits > 0,
1756 false, /* never sw alpha */
1757 false /* never sw aux */ );
1758 driDrawPriv->driverPrivate = fb;
1759
1760 return true;
1761 }
1762
1763 static void
1764 intelDestroyBuffer(__DRIdrawable * driDrawPriv)
1765 {
1766 struct gl_framebuffer *fb = driDrawPriv->driverPrivate;
1767
1768 _mesa_reference_framebuffer(&fb, NULL);
1769 }
1770
1771 static void
1772 intel_cs_timestamp_frequency(struct intel_screen *screen)
1773 {
1774 /* We shouldn't need to update gen_device_info.timestamp_frequency prior to
1775 * gen10, PCI-id is enough to figure it out.
1776 */
1777 assert(screen->devinfo.gen >= 10);
1778
1779 int ret, freq;
1780
1781 ret = intel_get_param(screen, I915_PARAM_CS_TIMESTAMP_FREQUENCY,
1782 &freq);
1783 if (ret < 0) {
1784 _mesa_warning(NULL,
1785 "Kernel 4.15 required to read the CS timestamp frequency.\n");
1786 return;
1787 }
1788
1789 screen->devinfo.timestamp_frequency = freq;
1790 }
1791
1792 static void
1793 intel_detect_sseu(struct intel_screen *screen)
1794 {
1795 assert(screen->devinfo.gen >= 8);
1796 int ret;
1797
1798 screen->subslice_total = -1;
1799 screen->eu_total = -1;
1800
1801 ret = intel_get_param(screen, I915_PARAM_SUBSLICE_TOTAL,
1802 &screen->subslice_total);
1803 if (ret < 0 && ret != -EINVAL)
1804 goto err_out;
1805
1806 ret = intel_get_param(screen,
1807 I915_PARAM_EU_TOTAL, &screen->eu_total);
1808 if (ret < 0 && ret != -EINVAL)
1809 goto err_out;
1810
1811 /* Without this information, we cannot get the right Braswell brandstrings,
1812 * and we have to use conservative numbers for GPGPU on many platforms, but
1813 * otherwise, things will just work.
1814 */
1815 if (screen->subslice_total < 1 || screen->eu_total < 1)
1816 _mesa_warning(NULL,
1817 "Kernel 4.1 required to properly query GPU properties.\n");
1818
1819 return;
1820
1821 err_out:
1822 screen->subslice_total = -1;
1823 screen->eu_total = -1;
1824 _mesa_warning(NULL, "Failed to query GPU properties (%s).\n", strerror(-ret));
1825 }
1826
1827 static bool
1828 intel_init_bufmgr(struct intel_screen *screen)
1829 {
1830 __DRIscreen *dri_screen = screen->driScrnPriv;
1831
1832 if (getenv("INTEL_NO_HW") != NULL)
1833 screen->no_hw = true;
1834
1835 screen->bufmgr = brw_bufmgr_init(&screen->devinfo, dri_screen->fd);
1836 if (screen->bufmgr == NULL) {
1837 fprintf(stderr, "[%s:%u] Error initializing buffer manager.\n",
1838 __func__, __LINE__);
1839 return false;
1840 }
1841
1842 if (!intel_get_boolean(screen, I915_PARAM_HAS_EXEC_NO_RELOC)) {
1843 fprintf(stderr, "[%s: %u] Kernel 3.9 required.\n", __func__, __LINE__);
1844 return false;
1845 }
1846
1847 return true;
1848 }
1849
1850 static bool
1851 intel_detect_swizzling(struct intel_screen *screen)
1852 {
1853 uint32_t tiling = I915_TILING_X;
1854 uint32_t swizzle_mode = 0;
1855 struct brw_bo *buffer =
1856 brw_bo_alloc_tiled(screen->bufmgr, "swizzle test", 32768,
1857 BRW_MEMZONE_OTHER, tiling, 512, 0);
1858 if (buffer == NULL)
1859 return false;
1860
1861 brw_bo_get_tiling(buffer, &tiling, &swizzle_mode);
1862 brw_bo_unreference(buffer);
1863
1864 return swizzle_mode != I915_BIT_6_SWIZZLE_NONE;
1865 }
1866
1867 static int
1868 intel_detect_timestamp(struct intel_screen *screen)
1869 {
1870 uint64_t dummy = 0, last = 0;
1871 int upper, lower, loops;
1872
1873 /* On 64bit systems, some old kernels trigger a hw bug resulting in the
1874 * TIMESTAMP register being shifted and the low 32bits always zero.
1875 *
1876 * More recent kernels offer an interface to read the full 36bits
1877 * everywhere.
1878 */
1879 if (brw_reg_read(screen->bufmgr, TIMESTAMP | 1, &dummy) == 0)
1880 return 3;
1881
1882 /* Determine if we have a 32bit or 64bit kernel by inspecting the
1883 * upper 32bits for a rapidly changing timestamp.
1884 */
1885 if (brw_reg_read(screen->bufmgr, TIMESTAMP, &last))
1886 return 0;
1887
1888 upper = lower = 0;
1889 for (loops = 0; loops < 10; loops++) {
1890 /* The TIMESTAMP should change every 80ns, so several round trips
1891 * through the kernel should be enough to advance it.
1892 */
1893 if (brw_reg_read(screen->bufmgr, TIMESTAMP, &dummy))
1894 return 0;
1895
1896 upper += (dummy >> 32) != (last >> 32);
1897 if (upper > 1) /* beware 32bit counter overflow */
1898 return 2; /* upper dword holds the low 32bits of the timestamp */
1899
1900 lower += (dummy & 0xffffffff) != (last & 0xffffffff);
1901 if (lower > 1)
1902 return 1; /* timestamp is unshifted */
1903
1904 last = dummy;
1905 }
1906
1907 /* No advancement? No timestamp! */
1908 return 0;
1909 }
1910
1911 /**
1912 * Test if we can use MI_LOAD_REGISTER_MEM from an untrusted batchbuffer.
1913 *
1914 * Some combinations of hardware and kernel versions allow this feature,
1915 * while others don't. Instead of trying to enumerate every case, just
1916 * try and write a register and see if works.
1917 */
1918 static bool
1919 intel_detect_pipelined_register(struct intel_screen *screen,
1920 int reg, uint32_t expected_value, bool reset)
1921 {
1922 if (screen->no_hw)
1923 return false;
1924
1925 struct brw_bo *results, *bo;
1926 uint32_t *batch;
1927 uint32_t offset = 0;
1928 void *map;
1929 bool success = false;
1930
1931 /* Create a zero'ed temporary buffer for reading our results */
1932 results = brw_bo_alloc(screen->bufmgr, "registers", 4096, BRW_MEMZONE_OTHER);
1933 if (results == NULL)
1934 goto err;
1935
1936 bo = brw_bo_alloc(screen->bufmgr, "batchbuffer", 4096, BRW_MEMZONE_OTHER);
1937 if (bo == NULL)
1938 goto err_results;
1939
1940 map = brw_bo_map(NULL, bo, MAP_WRITE);
1941 if (!map)
1942 goto err_batch;
1943
1944 batch = map;
1945
1946 /* Write the register. */
1947 *batch++ = MI_LOAD_REGISTER_IMM | (3 - 2);
1948 *batch++ = reg;
1949 *batch++ = expected_value;
1950
1951 /* Save the register's value back to the buffer. */
1952 *batch++ = MI_STORE_REGISTER_MEM | (3 - 2);
1953 *batch++ = reg;
1954 struct drm_i915_gem_relocation_entry reloc = {
1955 .offset = (char *) batch - (char *) map,
1956 .delta = offset * sizeof(uint32_t),
1957 .target_handle = results->gem_handle,
1958 .read_domains = I915_GEM_DOMAIN_INSTRUCTION,
1959 .write_domain = I915_GEM_DOMAIN_INSTRUCTION,
1960 };
1961 *batch++ = reloc.presumed_offset + reloc.delta;
1962
1963 /* And afterwards clear the register */
1964 if (reset) {
1965 *batch++ = MI_LOAD_REGISTER_IMM | (3 - 2);
1966 *batch++ = reg;
1967 *batch++ = 0;
1968 }
1969
1970 *batch++ = MI_BATCH_BUFFER_END;
1971
1972 struct drm_i915_gem_exec_object2 exec_objects[2] = {
1973 {
1974 .handle = results->gem_handle,
1975 },
1976 {
1977 .handle = bo->gem_handle,
1978 .relocation_count = 1,
1979 .relocs_ptr = (uintptr_t) &reloc,
1980 }
1981 };
1982
1983 struct drm_i915_gem_execbuffer2 execbuf = {
1984 .buffers_ptr = (uintptr_t) exec_objects,
1985 .buffer_count = 2,
1986 .batch_len = ALIGN((char *) batch - (char *) map, 8),
1987 .flags = I915_EXEC_RENDER,
1988 };
1989
1990 /* Don't bother with error checking - if the execbuf fails, the
1991 * value won't be written and we'll just report that there's no access.
1992 */
1993 __DRIscreen *dri_screen = screen->driScrnPriv;
1994 drmIoctl(dri_screen->fd, DRM_IOCTL_I915_GEM_EXECBUFFER2, &execbuf);
1995
1996 /* Check whether the value got written. */
1997 void *results_map = brw_bo_map(NULL, results, MAP_READ);
1998 if (results_map) {
1999 success = *((uint32_t *)results_map + offset) == expected_value;
2000 brw_bo_unmap(results);
2001 }
2002
2003 err_batch:
2004 brw_bo_unreference(bo);
2005 err_results:
2006 brw_bo_unreference(results);
2007 err:
2008 return success;
2009 }
2010
2011 static bool
2012 intel_detect_pipelined_so(struct intel_screen *screen)
2013 {
2014 const struct gen_device_info *devinfo = &screen->devinfo;
2015
2016 /* Supposedly, Broadwell just works. */
2017 if (devinfo->gen >= 8)
2018 return true;
2019
2020 if (devinfo->gen <= 6)
2021 return false;
2022
2023 /* See the big explanation about command parser versions below */
2024 if (screen->cmd_parser_version >= (devinfo->is_haswell ? 7 : 2))
2025 return true;
2026
2027 /* We use SO_WRITE_OFFSET0 since you're supposed to write it (unlike the
2028 * statistics registers), and we already reset it to zero before using it.
2029 */
2030 return intel_detect_pipelined_register(screen,
2031 GEN7_SO_WRITE_OFFSET(0),
2032 0x1337d0d0,
2033 false);
2034 }
2035
2036 /**
2037 * Return array of MSAA modes supported by the hardware. The array is
2038 * zero-terminated and sorted in decreasing order.
2039 */
2040 const int*
2041 intel_supported_msaa_modes(const struct intel_screen *screen)
2042 {
2043 static const int gen9_modes[] = {16, 8, 4, 2, 0, -1};
2044 static const int gen8_modes[] = {8, 4, 2, 0, -1};
2045 static const int gen7_modes[] = {8, 4, 0, -1};
2046 static const int gen6_modes[] = {4, 0, -1};
2047 static const int gen4_modes[] = {0, -1};
2048
2049 if (screen->devinfo.gen >= 9) {
2050 return gen9_modes;
2051 } else if (screen->devinfo.gen >= 8) {
2052 return gen8_modes;
2053 } else if (screen->devinfo.gen >= 7) {
2054 return gen7_modes;
2055 } else if (screen->devinfo.gen == 6) {
2056 return gen6_modes;
2057 } else {
2058 return gen4_modes;
2059 }
2060 }
2061
2062 static unsigned
2063 intel_loader_get_cap(const __DRIscreen *dri_screen, enum dri_loader_cap cap)
2064 {
2065 if (dri_screen->dri2.loader && dri_screen->dri2.loader->base.version >= 4 &&
2066 dri_screen->dri2.loader->getCapability)
2067 return dri_screen->dri2.loader->getCapability(dri_screen->loaderPrivate, cap);
2068
2069 if (dri_screen->image.loader && dri_screen->image.loader->base.version >= 2 &&
2070 dri_screen->image.loader->getCapability)
2071 return dri_screen->image.loader->getCapability(dri_screen->loaderPrivate, cap);
2072
2073 return 0;
2074 }
2075
2076 static __DRIconfig**
2077 intel_screen_make_configs(__DRIscreen *dri_screen)
2078 {
2079 static const mesa_format formats[] = {
2080 MESA_FORMAT_B5G6R5_UNORM,
2081 MESA_FORMAT_B8G8R8A8_UNORM,
2082 MESA_FORMAT_B8G8R8X8_UNORM,
2083
2084 MESA_FORMAT_B8G8R8A8_SRGB,
2085
2086 /* For 10 bpc, 30 bit depth framebuffers. */
2087 MESA_FORMAT_B10G10R10A2_UNORM,
2088 MESA_FORMAT_B10G10R10X2_UNORM,
2089
2090 /* The 32-bit RGBA format must not precede the 32-bit BGRA format.
2091 * Likewise for RGBX and BGRX. Otherwise, the GLX client and the GLX
2092 * server may disagree on which format the GLXFBConfig represents,
2093 * resulting in swapped color channels.
2094 *
2095 * The problem, as of 2017-05-30:
2096 * When matching a GLXFBConfig to a __DRIconfig, GLX ignores the channel
2097 * order and chooses the first __DRIconfig with the expected channel
2098 * sizes. Specifically, GLX compares the GLXFBConfig's and __DRIconfig's
2099 * __DRI_ATTRIB_{CHANNEL}_SIZE but ignores __DRI_ATTRIB_{CHANNEL}_MASK.
2100 *
2101 * EGL does not suffer from this problem. It correctly compares the
2102 * channel masks when matching EGLConfig to __DRIconfig.
2103 */
2104
2105 /* Required by Android, for HAL_PIXEL_FORMAT_RGBA_8888. */
2106 MESA_FORMAT_R8G8B8A8_UNORM,
2107
2108 /* Required by Android, for HAL_PIXEL_FORMAT_RGBX_8888. */
2109 MESA_FORMAT_R8G8B8X8_UNORM,
2110
2111 MESA_FORMAT_R8G8B8A8_SRGB,
2112 };
2113
2114 /* GLX_SWAP_COPY_OML is not supported due to page flipping. */
2115 static const GLenum back_buffer_modes[] = {
2116 __DRI_ATTRIB_SWAP_UNDEFINED, __DRI_ATTRIB_SWAP_NONE
2117 };
2118
2119 static const uint8_t singlesample_samples[1] = {0};
2120
2121 struct intel_screen *screen = dri_screen->driverPrivate;
2122 const struct gen_device_info *devinfo = &screen->devinfo;
2123 uint8_t depth_bits[4], stencil_bits[4];
2124 __DRIconfig **configs = NULL;
2125
2126 /* Expose only BGRA ordering if the loader doesn't support RGBA ordering. */
2127 unsigned num_formats;
2128 if (intel_loader_get_cap(dri_screen, DRI_LOADER_CAP_RGBA_ORDERING))
2129 num_formats = ARRAY_SIZE(formats);
2130 else
2131 num_formats = ARRAY_SIZE(formats) - 3; /* all - RGBA_ORDERING formats */
2132
2133 /* Shall we expose 10 bpc formats? */
2134 bool allow_rgb10_configs = driQueryOptionb(&screen->optionCache,
2135 "allow_rgb10_configs");
2136
2137 /* Generate singlesample configs without accumulation buffer. */
2138 for (unsigned i = 0; i < num_formats; i++) {
2139 __DRIconfig **new_configs;
2140 int num_depth_stencil_bits = 2;
2141
2142 if (!allow_rgb10_configs &&
2143 (formats[i] == MESA_FORMAT_B10G10R10A2_UNORM ||
2144 formats[i] == MESA_FORMAT_B10G10R10X2_UNORM))
2145 continue;
2146
2147 /* Starting with DRI2 protocol version 1.1 we can request a depth/stencil
2148 * buffer that has a different number of bits per pixel than the color
2149 * buffer, gen >= 6 supports this.
2150 */
2151 depth_bits[0] = 0;
2152 stencil_bits[0] = 0;
2153
2154 if (formats[i] == MESA_FORMAT_B5G6R5_UNORM) {
2155 depth_bits[1] = 16;
2156 stencil_bits[1] = 0;
2157 if (devinfo->gen >= 6) {
2158 depth_bits[2] = 24;
2159 stencil_bits[2] = 8;
2160 num_depth_stencil_bits = 3;
2161 }
2162 } else {
2163 depth_bits[1] = 24;
2164 stencil_bits[1] = 8;
2165 }
2166
2167 new_configs = driCreateConfigs(formats[i],
2168 depth_bits,
2169 stencil_bits,
2170 num_depth_stencil_bits,
2171 back_buffer_modes, 2,
2172 singlesample_samples, 1,
2173 false, false);
2174 configs = driConcatConfigs(configs, new_configs);
2175 }
2176
2177 /* Generate the minimum possible set of configs that include an
2178 * accumulation buffer.
2179 */
2180 for (unsigned i = 0; i < num_formats; i++) {
2181 __DRIconfig **new_configs;
2182
2183 if (!allow_rgb10_configs &&
2184 (formats[i] == MESA_FORMAT_B10G10R10A2_UNORM ||
2185 formats[i] == MESA_FORMAT_B10G10R10X2_UNORM))
2186 continue;
2187
2188 if (formats[i] == MESA_FORMAT_B5G6R5_UNORM) {
2189 depth_bits[0] = 16;
2190 stencil_bits[0] = 0;
2191 } else {
2192 depth_bits[0] = 24;
2193 stencil_bits[0] = 8;
2194 }
2195
2196 new_configs = driCreateConfigs(formats[i],
2197 depth_bits, stencil_bits, 1,
2198 back_buffer_modes, 1,
2199 singlesample_samples, 1,
2200 true, false);
2201 configs = driConcatConfigs(configs, new_configs);
2202 }
2203
2204 /* Generate multisample configs.
2205 *
2206 * This loop breaks early, and hence is a no-op, on gen < 6.
2207 *
2208 * Multisample configs must follow the singlesample configs in order to
2209 * work around an X server bug present in 1.12. The X server chooses to
2210 * associate the first listed RGBA888-Z24S8 config, regardless of its
2211 * sample count, with the 32-bit depth visual used for compositing.
2212 *
2213 * Only doublebuffer configs with GLX_SWAP_UNDEFINED_OML behavior are
2214 * supported. Singlebuffer configs are not supported because no one wants
2215 * them.
2216 */
2217 for (unsigned i = 0; i < num_formats; i++) {
2218 if (devinfo->gen < 6)
2219 break;
2220
2221 if (!allow_rgb10_configs &&
2222 (formats[i] == MESA_FORMAT_B10G10R10A2_UNORM ||
2223 formats[i] == MESA_FORMAT_B10G10R10X2_UNORM))
2224 continue;
2225
2226 __DRIconfig **new_configs;
2227 const int num_depth_stencil_bits = 2;
2228 int num_msaa_modes = 0;
2229 const uint8_t *multisample_samples = NULL;
2230
2231 depth_bits[0] = 0;
2232 stencil_bits[0] = 0;
2233
2234 if (formats[i] == MESA_FORMAT_B5G6R5_UNORM) {
2235 depth_bits[1] = 16;
2236 stencil_bits[1] = 0;
2237 } else {
2238 depth_bits[1] = 24;
2239 stencil_bits[1] = 8;
2240 }
2241
2242 if (devinfo->gen >= 9) {
2243 static const uint8_t multisample_samples_gen9[] = {2, 4, 8, 16};
2244 multisample_samples = multisample_samples_gen9;
2245 num_msaa_modes = ARRAY_SIZE(multisample_samples_gen9);
2246 } else if (devinfo->gen == 8) {
2247 static const uint8_t multisample_samples_gen8[] = {2, 4, 8};
2248 multisample_samples = multisample_samples_gen8;
2249 num_msaa_modes = ARRAY_SIZE(multisample_samples_gen8);
2250 } else if (devinfo->gen == 7) {
2251 static const uint8_t multisample_samples_gen7[] = {4, 8};
2252 multisample_samples = multisample_samples_gen7;
2253 num_msaa_modes = ARRAY_SIZE(multisample_samples_gen7);
2254 } else if (devinfo->gen == 6) {
2255 static const uint8_t multisample_samples_gen6[] = {4};
2256 multisample_samples = multisample_samples_gen6;
2257 num_msaa_modes = ARRAY_SIZE(multisample_samples_gen6);
2258 }
2259
2260 new_configs = driCreateConfigs(formats[i],
2261 depth_bits,
2262 stencil_bits,
2263 num_depth_stencil_bits,
2264 back_buffer_modes, 1,
2265 multisample_samples,
2266 num_msaa_modes,
2267 false, false);
2268 configs = driConcatConfigs(configs, new_configs);
2269 }
2270
2271 if (configs == NULL) {
2272 fprintf(stderr, "[%s:%u] Error creating FBConfig!\n", __func__,
2273 __LINE__);
2274 return NULL;
2275 }
2276
2277 return configs;
2278 }
2279
2280 static void
2281 set_max_gl_versions(struct intel_screen *screen)
2282 {
2283 __DRIscreen *dri_screen = screen->driScrnPriv;
2284 const bool has_astc = screen->devinfo.gen >= 9;
2285
2286 switch (screen->devinfo.gen) {
2287 case 11:
2288 case 10:
2289 case 9:
2290 case 8:
2291 dri_screen->max_gl_core_version = 45;
2292 dri_screen->max_gl_compat_version = 30;
2293 dri_screen->max_gl_es1_version = 11;
2294 dri_screen->max_gl_es2_version = has_astc ? 32 : 31;
2295 break;
2296 case 7:
2297 dri_screen->max_gl_core_version = 33;
2298 if (can_do_pipelined_register_writes(screen)) {
2299 dri_screen->max_gl_core_version = 42;
2300 if (screen->devinfo.is_haswell && can_do_compute_dispatch(screen))
2301 dri_screen->max_gl_core_version = 43;
2302 if (screen->devinfo.is_haswell && can_do_mi_math_and_lrr(screen))
2303 dri_screen->max_gl_core_version = 45;
2304 }
2305 dri_screen->max_gl_compat_version = 30;
2306 dri_screen->max_gl_es1_version = 11;
2307 dri_screen->max_gl_es2_version = screen->devinfo.is_haswell ? 31 : 30;
2308 break;
2309 case 6:
2310 dri_screen->max_gl_core_version = 33;
2311 dri_screen->max_gl_compat_version = 30;
2312 dri_screen->max_gl_es1_version = 11;
2313 dri_screen->max_gl_es2_version = 30;
2314 break;
2315 case 5:
2316 case 4:
2317 dri_screen->max_gl_core_version = 0;
2318 dri_screen->max_gl_compat_version = 21;
2319 dri_screen->max_gl_es1_version = 11;
2320 dri_screen->max_gl_es2_version = 20;
2321 break;
2322 default:
2323 unreachable("unrecognized intel_screen::gen");
2324 }
2325 }
2326
2327 /**
2328 * Return the revision (generally the revid field of the PCI header) of the
2329 * graphics device.
2330 */
2331 int
2332 intel_device_get_revision(int fd)
2333 {
2334 struct drm_i915_getparam gp;
2335 int revision;
2336 int ret;
2337
2338 memset(&gp, 0, sizeof(gp));
2339 gp.param = I915_PARAM_REVISION;
2340 gp.value = &revision;
2341
2342 ret = drmCommandWriteRead(fd, DRM_I915_GETPARAM, &gp, sizeof(gp));
2343 if (ret)
2344 revision = -1;
2345
2346 return revision;
2347 }
2348
2349 static void
2350 shader_debug_log_mesa(void *data, const char *fmt, ...)
2351 {
2352 struct brw_context *brw = (struct brw_context *)data;
2353 va_list args;
2354
2355 va_start(args, fmt);
2356 GLuint msg_id = 0;
2357 _mesa_gl_vdebug(&brw->ctx, &msg_id,
2358 MESA_DEBUG_SOURCE_SHADER_COMPILER,
2359 MESA_DEBUG_TYPE_OTHER,
2360 MESA_DEBUG_SEVERITY_NOTIFICATION, fmt, args);
2361 va_end(args);
2362 }
2363
2364 static void
2365 shader_perf_log_mesa(void *data, const char *fmt, ...)
2366 {
2367 struct brw_context *brw = (struct brw_context *)data;
2368
2369 va_list args;
2370 va_start(args, fmt);
2371
2372 if (unlikely(INTEL_DEBUG & DEBUG_PERF)) {
2373 va_list args_copy;
2374 va_copy(args_copy, args);
2375 vfprintf(stderr, fmt, args_copy);
2376 va_end(args_copy);
2377 }
2378
2379 if (brw->perf_debug) {
2380 GLuint msg_id = 0;
2381 _mesa_gl_vdebug(&brw->ctx, &msg_id,
2382 MESA_DEBUG_SOURCE_SHADER_COMPILER,
2383 MESA_DEBUG_TYPE_PERFORMANCE,
2384 MESA_DEBUG_SEVERITY_MEDIUM, fmt, args);
2385 }
2386 va_end(args);
2387 }
2388
2389 /**
2390 * This is the driver specific part of the createNewScreen entry point.
2391 * Called when using DRI2.
2392 *
2393 * \return the struct gl_config supported by this driver
2394 */
2395 static const
2396 __DRIconfig **intelInitScreen2(__DRIscreen *dri_screen)
2397 {
2398 struct intel_screen *screen;
2399
2400 if (dri_screen->image.loader) {
2401 } else if (dri_screen->dri2.loader->base.version <= 2 ||
2402 dri_screen->dri2.loader->getBuffersWithFormat == NULL) {
2403 fprintf(stderr,
2404 "\nERROR! DRI2 loader with getBuffersWithFormat() "
2405 "support required\n");
2406 return NULL;
2407 }
2408
2409 /* Allocate the private area */
2410 screen = rzalloc(NULL, struct intel_screen);
2411 if (!screen) {
2412 fprintf(stderr, "\nERROR! Allocating private area failed\n");
2413 return NULL;
2414 }
2415 /* parse information in __driConfigOptions */
2416 driOptionCache options;
2417 memset(&options, 0, sizeof(options));
2418
2419 driParseOptionInfo(&options, brw_config_options.xml);
2420 driParseConfigFiles(&screen->optionCache, &options, dri_screen->myNum, "i965");
2421 driDestroyOptionCache(&options);
2422
2423 screen->driScrnPriv = dri_screen;
2424 dri_screen->driverPrivate = (void *) screen;
2425
2426 screen->deviceID = gen_get_pci_device_id_override();
2427 if (screen->deviceID < 0)
2428 screen->deviceID = intel_get_integer(screen, I915_PARAM_CHIPSET_ID);
2429 else
2430 screen->no_hw = true;
2431
2432 if (!gen_get_device_info(screen->deviceID, &screen->devinfo))
2433 return NULL;
2434
2435 if (!intel_init_bufmgr(screen))
2436 return NULL;
2437
2438 const struct gen_device_info *devinfo = &screen->devinfo;
2439
2440 brw_process_intel_debug_variable();
2441
2442 if ((INTEL_DEBUG & DEBUG_SHADER_TIME) && devinfo->gen < 7) {
2443 fprintf(stderr,
2444 "shader_time debugging requires gen7 (Ivybridge) or better.\n");
2445 INTEL_DEBUG &= ~DEBUG_SHADER_TIME;
2446 }
2447
2448 if (intel_get_integer(screen, I915_PARAM_MMAP_GTT_VERSION) >= 1) {
2449 /* Theorectically unlimited! At least for individual objects...
2450 *
2451 * Currently the entire (global) address space for all GTT maps is
2452 * limited to 64bits. That is all objects on the system that are
2453 * setup for GTT mmapping must fit within 64bits. An attempt to use
2454 * one that exceeds the limit with fail in brw_bo_map_gtt().
2455 *
2456 * Long before we hit that limit, we will be practically limited by
2457 * that any single object must fit in physical memory (RAM). The upper
2458 * limit on the CPU's address space is currently 48bits (Skylake), of
2459 * which only 39bits can be physical memory. (The GPU itself also has
2460 * a 48bit addressable virtual space.) We can fit over 32 million
2461 * objects of the current maximum allocable size before running out
2462 * of mmap space.
2463 */
2464 screen->max_gtt_map_object_size = UINT64_MAX;
2465 } else {
2466 /* Estimate the size of the mappable aperture into the GTT. There's an
2467 * ioctl to get the whole GTT size, but not one to get the mappable subset.
2468 * It turns out it's basically always 256MB, though some ancient hardware
2469 * was smaller.
2470 */
2471 uint32_t gtt_size = 256 * 1024 * 1024;
2472
2473 /* We don't want to map two objects such that a memcpy between them would
2474 * just fault one mapping in and then the other over and over forever. So
2475 * we would need to divide the GTT size by 2. Additionally, some GTT is
2476 * taken up by things like the framebuffer and the ringbuffer and such, so
2477 * be more conservative.
2478 */
2479 screen->max_gtt_map_object_size = gtt_size / 4;
2480 }
2481
2482 screen->aperture_threshold = get_aperture_size(dri_screen->fd) * 3 / 4;
2483
2484 screen->hw_has_swizzling = intel_detect_swizzling(screen);
2485 screen->hw_has_timestamp = intel_detect_timestamp(screen);
2486
2487 isl_device_init(&screen->isl_dev, &screen->devinfo,
2488 screen->hw_has_swizzling);
2489
2490 if (devinfo->gen >= 10)
2491 intel_cs_timestamp_frequency(screen);
2492
2493 /* GENs prior to 8 do not support EU/Subslice info */
2494 if (devinfo->gen >= 8) {
2495 intel_detect_sseu(screen);
2496 } else if (devinfo->gen == 7) {
2497 screen->subslice_total = 1 << (devinfo->gt - 1);
2498 }
2499
2500 /* Gen7-7.5 kernel requirements / command parser saga:
2501 *
2502 * - pre-v3.16:
2503 * Haswell and Baytrail cannot use any privileged batchbuffer features.
2504 *
2505 * Ivybridge has aliasing PPGTT on by default, which accidentally marks
2506 * all batches secure, allowing them to use any feature with no checking.
2507 * This is effectively equivalent to a command parser version of
2508 * \infinity - everything is possible.
2509 *
2510 * The command parser does not exist, and querying the version will
2511 * return -EINVAL.
2512 *
2513 * - v3.16:
2514 * The kernel enables the command parser by default, for systems with
2515 * aliasing PPGTT enabled (Ivybridge and Haswell). However, the
2516 * hardware checker is still enabled, so Haswell and Baytrail cannot
2517 * do anything.
2518 *
2519 * Ivybridge goes from "everything is possible" to "only what the
2520 * command parser allows" (if the user boots with i915.cmd_parser=0,
2521 * then everything is possible again). We can only safely use features
2522 * allowed by the supported command parser version.
2523 *
2524 * Annoyingly, I915_PARAM_CMD_PARSER_VERSION reports the static version
2525 * implemented by the kernel, even if it's turned off. So, checking
2526 * for version > 0 does not mean that you can write registers. We have
2527 * to try it and see. The version does, however, indicate the age of
2528 * the kernel.
2529 *
2530 * Instead of matching the hardware checker's behavior of converting
2531 * privileged commands to MI_NOOP, it makes execbuf2 start returning
2532 * -EINVAL, making it dangerous to try and use privileged features.
2533 *
2534 * Effective command parser versions:
2535 * - Haswell: 0 (reporting 1, writes don't work)
2536 * - Baytrail: 0 (reporting 1, writes don't work)
2537 * - Ivybridge: 1 (enabled) or infinite (disabled)
2538 *
2539 * - v3.17:
2540 * Baytrail aliasing PPGTT is enabled, making it like Ivybridge:
2541 * effectively version 1 (enabled) or infinite (disabled).
2542 *
2543 * - v3.19: f1f55cc0556031c8ee3fe99dae7251e78b9b653b
2544 * Command parser v2 supports predicate writes.
2545 *
2546 * - Haswell: 0 (reporting 1, writes don't work)
2547 * - Baytrail: 2 (enabled) or infinite (disabled)
2548 * - Ivybridge: 2 (enabled) or infinite (disabled)
2549 *
2550 * So version >= 2 is enough to know that Ivybridge and Baytrail
2551 * will work. Haswell still can't do anything.
2552 *
2553 * - v4.0: Version 3 happened. Largely not relevant.
2554 *
2555 * - v4.1: 6702cf16e0ba8b0129f5aa1b6609d4e9c70bc13b
2556 * L3 config registers are properly saved and restored as part
2557 * of the hardware context. We can approximately detect this point
2558 * in time by checking if I915_PARAM_REVISION is recognized - it
2559 * landed in a later commit, but in the same release cycle.
2560 *
2561 * - v4.2: 245054a1fe33c06ad233e0d58a27ec7b64db9284
2562 * Command parser finally gains secure batch promotion. On Haswell,
2563 * the hardware checker gets disabled, which finally allows it to do
2564 * privileged commands.
2565 *
2566 * I915_PARAM_CMD_PARSER_VERSION reports 3. Effective versions:
2567 * - Haswell: 3 (enabled) or 0 (disabled)
2568 * - Baytrail: 3 (enabled) or infinite (disabled)
2569 * - Ivybridge: 3 (enabled) or infinite (disabled)
2570 *
2571 * Unfortunately, detecting this point in time is tricky, because
2572 * no version bump happened when this important change occurred.
2573 * On Haswell, if we can write any register, then the kernel is at
2574 * least this new, and we can start trusting the version number.
2575 *
2576 * - v4.4: 2bbe6bbb0dc94fd4ce287bdac9e1bd184e23057b and
2577 * Command parser reaches version 4, allowing access to Haswell
2578 * atomic scratch and chicken3 registers. If version >= 4, we know
2579 * the kernel is new enough to support privileged features on all
2580 * hardware. However, the user might have disabled it...and the
2581 * kernel will still report version 4. So we still have to guess
2582 * and check.
2583 *
2584 * - v4.4: 7b9748cb513a6bef4af87b79f0da3ff7e8b56cd8
2585 * Command parser v5 whitelists indirect compute shader dispatch
2586 * registers, needed for OpenGL 4.3 and later.
2587 *
2588 * - v4.8:
2589 * Command parser v7 lets us use MI_MATH on Haswell.
2590 *
2591 * Additionally, the kernel begins reporting version 0 when
2592 * the command parser is disabled, allowing us to skip the
2593 * guess-and-check step on Haswell. Unfortunately, this also
2594 * means that we can no longer use it as an indicator of the
2595 * age of the kernel.
2596 */
2597 if (intel_get_param(screen, I915_PARAM_CMD_PARSER_VERSION,
2598 &screen->cmd_parser_version) < 0) {
2599 /* Command parser does not exist - getparam is unrecognized */
2600 screen->cmd_parser_version = 0;
2601 }
2602
2603 /* Kernel 4.13 retuired for exec object capture */
2604 if (intel_get_boolean(screen, I915_PARAM_HAS_EXEC_CAPTURE)) {
2605 screen->kernel_features |= KERNEL_ALLOWS_EXEC_CAPTURE;
2606 }
2607
2608 if (intel_get_boolean(screen, I915_PARAM_HAS_EXEC_BATCH_FIRST)) {
2609 screen->kernel_features |= KERNEL_ALLOWS_EXEC_BATCH_FIRST;
2610 }
2611
2612 if (!intel_detect_pipelined_so(screen)) {
2613 /* We can't do anything, so the effective version is 0. */
2614 screen->cmd_parser_version = 0;
2615 } else {
2616 screen->kernel_features |= KERNEL_ALLOWS_SOL_OFFSET_WRITES;
2617 }
2618
2619 if (devinfo->gen >= 8 || screen->cmd_parser_version >= 2)
2620 screen->kernel_features |= KERNEL_ALLOWS_PREDICATE_WRITES;
2621
2622 /* Haswell requires command parser version 4 in order to have L3
2623 * atomic scratch1 and chicken3 bits
2624 */
2625 if (devinfo->is_haswell && screen->cmd_parser_version >= 4) {
2626 screen->kernel_features |=
2627 KERNEL_ALLOWS_HSW_SCRATCH1_AND_ROW_CHICKEN3;
2628 }
2629
2630 /* Haswell requires command parser version 6 in order to write to the
2631 * MI_MATH GPR registers, and version 7 in order to use
2632 * MI_LOAD_REGISTER_REG (which all users of MI_MATH use).
2633 */
2634 if (devinfo->gen >= 8 ||
2635 (devinfo->is_haswell && screen->cmd_parser_version >= 7)) {
2636 screen->kernel_features |= KERNEL_ALLOWS_MI_MATH_AND_LRR;
2637 }
2638
2639 /* Gen7 needs at least command parser version 5 to support compute */
2640 if (devinfo->gen >= 8 || screen->cmd_parser_version >= 5)
2641 screen->kernel_features |= KERNEL_ALLOWS_COMPUTE_DISPATCH;
2642
2643 if (intel_get_boolean(screen, I915_PARAM_HAS_CONTEXT_ISOLATION))
2644 screen->kernel_features |= KERNEL_ALLOWS_CONTEXT_ISOLATION;
2645
2646 const char *force_msaa = getenv("INTEL_FORCE_MSAA");
2647 if (force_msaa) {
2648 screen->winsys_msaa_samples_override =
2649 intel_quantize_num_samples(screen, atoi(force_msaa));
2650 printf("Forcing winsys sample count to %d\n",
2651 screen->winsys_msaa_samples_override);
2652 } else {
2653 screen->winsys_msaa_samples_override = -1;
2654 }
2655
2656 set_max_gl_versions(screen);
2657
2658 /* Notification of GPU resets requires hardware contexts and a kernel new
2659 * enough to support DRM_IOCTL_I915_GET_RESET_STATS. If the ioctl is
2660 * supported, calling it with a context of 0 will either generate EPERM or
2661 * no error. If the ioctl is not supported, it always generate EINVAL.
2662 * Use this to determine whether to advertise the __DRI2_ROBUSTNESS
2663 * extension to the loader.
2664 *
2665 * Don't even try on pre-Gen6, since we don't attempt to use contexts there.
2666 */
2667 if (devinfo->gen >= 6) {
2668 struct drm_i915_reset_stats stats;
2669 memset(&stats, 0, sizeof(stats));
2670
2671 const int ret = drmIoctl(dri_screen->fd, DRM_IOCTL_I915_GET_RESET_STATS, &stats);
2672
2673 screen->has_context_reset_notification =
2674 (ret != -1 || errno != EINVAL);
2675 }
2676
2677 dri_screen->extensions = !screen->has_context_reset_notification
2678 ? screenExtensions : intelRobustScreenExtensions;
2679
2680 screen->compiler = brw_compiler_create(screen, devinfo);
2681 screen->compiler->shader_debug_log = shader_debug_log_mesa;
2682 screen->compiler->shader_perf_log = shader_perf_log_mesa;
2683
2684 /* Changing the meaning of constant buffer pointers from a dynamic state
2685 * offset to an absolute address is only safe if the kernel isolates other
2686 * contexts from our changes.
2687 */
2688 screen->compiler->constant_buffer_0_is_relative = devinfo->gen < 8 ||
2689 !(screen->kernel_features & KERNEL_ALLOWS_CONTEXT_ISOLATION);
2690
2691 screen->compiler->supports_pull_constants = true;
2692
2693 screen->has_exec_fence =
2694 intel_get_boolean(screen, I915_PARAM_HAS_EXEC_FENCE);
2695
2696 intel_screen_init_surface_formats(screen);
2697
2698 if (INTEL_DEBUG & (DEBUG_BATCH | DEBUG_SUBMIT)) {
2699 unsigned int caps = intel_get_integer(screen, I915_PARAM_HAS_SCHEDULER);
2700 if (caps) {
2701 fprintf(stderr, "Kernel scheduler detected: %08x\n", caps);
2702 if (caps & I915_SCHEDULER_CAP_PRIORITY)
2703 fprintf(stderr, " - User priority sorting enabled\n");
2704 if (caps & I915_SCHEDULER_CAP_PREEMPTION)
2705 fprintf(stderr, " - Preemption enabled\n");
2706 }
2707 }
2708
2709 brw_disk_cache_init(screen);
2710
2711 return (const __DRIconfig**) intel_screen_make_configs(dri_screen);
2712 }
2713
2714 struct intel_buffer {
2715 __DRIbuffer base;
2716 struct brw_bo *bo;
2717 };
2718
2719 static __DRIbuffer *
2720 intelAllocateBuffer(__DRIscreen *dri_screen,
2721 unsigned attachment, unsigned format,
2722 int width, int height)
2723 {
2724 struct intel_buffer *intelBuffer;
2725 struct intel_screen *screen = dri_screen->driverPrivate;
2726
2727 assert(attachment == __DRI_BUFFER_FRONT_LEFT ||
2728 attachment == __DRI_BUFFER_BACK_LEFT);
2729
2730 intelBuffer = calloc(1, sizeof *intelBuffer);
2731 if (intelBuffer == NULL)
2732 return NULL;
2733
2734 /* The front and back buffers are color buffers, which are X tiled. GEN9+
2735 * supports Y tiled and compressed buffers, but there is no way to plumb that
2736 * through to here. */
2737 uint32_t pitch;
2738 int cpp = format / 8;
2739 intelBuffer->bo = brw_bo_alloc_tiled_2d(screen->bufmgr,
2740 "intelAllocateBuffer",
2741 width,
2742 height,
2743 cpp,
2744 BRW_MEMZONE_OTHER,
2745 I915_TILING_X, &pitch,
2746 BO_ALLOC_BUSY);
2747
2748 if (intelBuffer->bo == NULL) {
2749 free(intelBuffer);
2750 return NULL;
2751 }
2752
2753 brw_bo_flink(intelBuffer->bo, &intelBuffer->base.name);
2754
2755 intelBuffer->base.attachment = attachment;
2756 intelBuffer->base.cpp = cpp;
2757 intelBuffer->base.pitch = pitch;
2758
2759 return &intelBuffer->base;
2760 }
2761
2762 static void
2763 intelReleaseBuffer(__DRIscreen *dri_screen, __DRIbuffer *buffer)
2764 {
2765 struct intel_buffer *intelBuffer = (struct intel_buffer *) buffer;
2766
2767 brw_bo_unreference(intelBuffer->bo);
2768 free(intelBuffer);
2769 }
2770
2771 static const struct __DriverAPIRec brw_driver_api = {
2772 .InitScreen = intelInitScreen2,
2773 .DestroyScreen = intelDestroyScreen,
2774 .CreateContext = brwCreateContext,
2775 .DestroyContext = intelDestroyContext,
2776 .CreateBuffer = intelCreateBuffer,
2777 .DestroyBuffer = intelDestroyBuffer,
2778 .MakeCurrent = intelMakeCurrent,
2779 .UnbindContext = intelUnbindContext,
2780 .AllocateBuffer = intelAllocateBuffer,
2781 .ReleaseBuffer = intelReleaseBuffer
2782 };
2783
2784 static const struct __DRIDriverVtableExtensionRec brw_vtable = {
2785 .base = { __DRI_DRIVER_VTABLE, 1 },
2786 .vtable = &brw_driver_api,
2787 };
2788
2789 static const __DRIextension *brw_driver_extensions[] = {
2790 &driCoreExtension.base,
2791 &driImageDriverExtension.base,
2792 &driDRI2Extension.base,
2793 &brw_vtable.base,
2794 &brw_config_options.base,
2795 NULL
2796 };
2797
2798 PUBLIC const __DRIextension **__driDriverGetExtensions_i965(void)
2799 {
2800 globalDriverAPI = &brw_driver_api;
2801
2802 return brw_driver_extensions;
2803 }