ab6c003b0fe21aaebd9d3af24dc4a01953fe1313
[mesa.git] / src / mesa / drivers / dri / i965 / intel_screen.c
1 /*
2 * Copyright 2003 VMware, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial portions
15 * of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
18 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
21 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
22 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
23 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 */
25
26 #include <drm_fourcc.h>
27 #include <errno.h>
28 #include <time.h>
29 #include <unistd.h>
30 #include "main/context.h"
31 #include "main/framebuffer.h"
32 #include "main/renderbuffer.h"
33 #include "main/texobj.h"
34 #include "main/hash.h"
35 #include "main/fbobject.h"
36 #include "main/version.h"
37 #include "swrast/s_renderbuffer.h"
38 #include "util/ralloc.h"
39 #include "util/disk_cache.h"
40 #include "brw_defines.h"
41 #include "brw_state.h"
42 #include "compiler/nir/nir.h"
43
44 #include "utils.h"
45 #include "util/disk_cache.h"
46 #include "util/xmlpool.h"
47
48 #include "common/gen_defines.h"
49
50 static const __DRIconfigOptionsExtension brw_config_options = {
51 .base = { __DRI_CONFIG_OPTIONS, 1 },
52 .xml =
53 DRI_CONF_BEGIN
54 DRI_CONF_SECTION_PERFORMANCE
55 /* Options correspond to DRI_CONF_BO_REUSE_DISABLED,
56 * DRI_CONF_BO_REUSE_ALL
57 */
58 DRI_CONF_OPT_BEGIN_V(bo_reuse, enum, 1, "0:1")
59 DRI_CONF_DESC_BEGIN(en, "Buffer object reuse")
60 DRI_CONF_ENUM(0, "Disable buffer object reuse")
61 DRI_CONF_ENUM(1, "Enable reuse of all sizes of buffer objects")
62 DRI_CONF_DESC_END
63 DRI_CONF_OPT_END
64 DRI_CONF_MESA_NO_ERROR("false")
65 DRI_CONF_SECTION_END
66
67 DRI_CONF_SECTION_QUALITY
68 DRI_CONF_PRECISE_TRIG("false")
69
70 DRI_CONF_OPT_BEGIN(clamp_max_samples, int, -1)
71 DRI_CONF_DESC(en, "Clamp the value of GL_MAX_SAMPLES to the "
72 "given integer. If negative, then do not clamp.")
73 DRI_CONF_OPT_END
74 DRI_CONF_SECTION_END
75
76 DRI_CONF_SECTION_DEBUG
77 DRI_CONF_NO_RAST("false")
78 DRI_CONF_ALWAYS_FLUSH_BATCH("false")
79 DRI_CONF_ALWAYS_FLUSH_CACHE("false")
80 DRI_CONF_DISABLE_THROTTLING("false")
81 DRI_CONF_FORCE_GLSL_EXTENSIONS_WARN("false")
82 DRI_CONF_FORCE_GLSL_VERSION(0)
83 DRI_CONF_DISABLE_GLSL_LINE_CONTINUATIONS("false")
84 DRI_CONF_DISABLE_BLEND_FUNC_EXTENDED("false")
85 DRI_CONF_DUAL_COLOR_BLEND_BY_LOCATION("false")
86 DRI_CONF_ALLOW_GLSL_EXTENSION_DIRECTIVE_MIDSHADER("false")
87 DRI_CONF_ALLOW_GLSL_BUILTIN_VARIABLE_REDECLARATION("false")
88 DRI_CONF_ALLOW_GLSL_CROSS_STAGE_INTERPOLATION_MISMATCH("false")
89 DRI_CONF_ALLOW_HIGHER_COMPAT_VERSION("false")
90 DRI_CONF_FORCE_GLSL_ABS_SQRT("false")
91
92 DRI_CONF_OPT_BEGIN_B(shader_precompile, "true")
93 DRI_CONF_DESC(en, "Perform code generation at shader link time.")
94 DRI_CONF_OPT_END
95 DRI_CONF_SECTION_END
96
97 DRI_CONF_SECTION_MISCELLANEOUS
98 DRI_CONF_GLSL_ZERO_INIT("false")
99 DRI_CONF_ALLOW_RGB10_CONFIGS("false")
100 DRI_CONF_SECTION_END
101 DRI_CONF_END
102 };
103
104 #include "intel_batchbuffer.h"
105 #include "intel_buffers.h"
106 #include "brw_bufmgr.h"
107 #include "intel_fbo.h"
108 #include "intel_mipmap_tree.h"
109 #include "intel_screen.h"
110 #include "intel_tex.h"
111 #include "intel_image.h"
112
113 #include "brw_context.h"
114
115 #include "i915_drm.h"
116
117 /**
118 * For debugging purposes, this returns a time in seconds.
119 */
120 double
121 get_time(void)
122 {
123 struct timespec tp;
124
125 clock_gettime(CLOCK_MONOTONIC, &tp);
126
127 return tp.tv_sec + tp.tv_nsec / 1000000000.0;
128 }
129
130 static const __DRItexBufferExtension intelTexBufferExtension = {
131 .base = { __DRI_TEX_BUFFER, 3 },
132
133 .setTexBuffer = intelSetTexBuffer,
134 .setTexBuffer2 = intelSetTexBuffer2,
135 .releaseTexBuffer = intelReleaseTexBuffer,
136 };
137
138 static void
139 intel_dri2_flush_with_flags(__DRIcontext *cPriv,
140 __DRIdrawable *dPriv,
141 unsigned flags,
142 enum __DRI2throttleReason reason)
143 {
144 struct brw_context *brw = cPriv->driverPrivate;
145
146 if (!brw)
147 return;
148
149 struct gl_context *ctx = &brw->ctx;
150
151 FLUSH_VERTICES(ctx, 0);
152
153 if (flags & __DRI2_FLUSH_DRAWABLE)
154 intel_resolve_for_dri2_flush(brw, dPriv);
155
156 if (reason == __DRI2_THROTTLE_SWAPBUFFER)
157 brw->need_swap_throttle = true;
158 if (reason == __DRI2_THROTTLE_FLUSHFRONT)
159 brw->need_flush_throttle = true;
160
161 intel_batchbuffer_flush(brw);
162 }
163
164 /**
165 * Provides compatibility with loaders that only support the older (version
166 * 1-3) flush interface.
167 *
168 * That includes libGL up to Mesa 9.0, and the X Server at least up to 1.13.
169 */
170 static void
171 intel_dri2_flush(__DRIdrawable *drawable)
172 {
173 intel_dri2_flush_with_flags(drawable->driContextPriv, drawable,
174 __DRI2_FLUSH_DRAWABLE,
175 __DRI2_THROTTLE_SWAPBUFFER);
176 }
177
178 static const struct __DRI2flushExtensionRec intelFlushExtension = {
179 .base = { __DRI2_FLUSH, 4 },
180
181 .flush = intel_dri2_flush,
182 .invalidate = dri2InvalidateDrawable,
183 .flush_with_flags = intel_dri2_flush_with_flags,
184 };
185
186 static const struct intel_image_format intel_image_formats[] = {
187 { __DRI_IMAGE_FOURCC_ARGB2101010, __DRI_IMAGE_COMPONENTS_RGBA, 1,
188 { { 0, 0, 0, __DRI_IMAGE_FORMAT_ARGB2101010, 4 } } },
189
190 { __DRI_IMAGE_FOURCC_XRGB2101010, __DRI_IMAGE_COMPONENTS_RGB, 1,
191 { { 0, 0, 0, __DRI_IMAGE_FORMAT_XRGB2101010, 4 } } },
192
193 { __DRI_IMAGE_FOURCC_ABGR2101010, __DRI_IMAGE_COMPONENTS_RGBA, 1,
194 { { 0, 0, 0, __DRI_IMAGE_FORMAT_ABGR2101010, 4 } } },
195
196 { __DRI_IMAGE_FOURCC_XBGR2101010, __DRI_IMAGE_COMPONENTS_RGB, 1,
197 { { 0, 0, 0, __DRI_IMAGE_FORMAT_XBGR2101010, 4 } } },
198
199 { __DRI_IMAGE_FOURCC_ARGB8888, __DRI_IMAGE_COMPONENTS_RGBA, 1,
200 { { 0, 0, 0, __DRI_IMAGE_FORMAT_ARGB8888, 4 } } },
201
202 { __DRI_IMAGE_FOURCC_ABGR8888, __DRI_IMAGE_COMPONENTS_RGBA, 1,
203 { { 0, 0, 0, __DRI_IMAGE_FORMAT_ABGR8888, 4 } } },
204
205 { __DRI_IMAGE_FOURCC_SARGB8888, __DRI_IMAGE_COMPONENTS_RGBA, 1,
206 { { 0, 0, 0, __DRI_IMAGE_FORMAT_SARGB8, 4 } } },
207
208 { __DRI_IMAGE_FOURCC_XRGB8888, __DRI_IMAGE_COMPONENTS_RGB, 1,
209 { { 0, 0, 0, __DRI_IMAGE_FORMAT_XRGB8888, 4 }, } },
210
211 { __DRI_IMAGE_FOURCC_XBGR8888, __DRI_IMAGE_COMPONENTS_RGB, 1,
212 { { 0, 0, 0, __DRI_IMAGE_FORMAT_XBGR8888, 4 }, } },
213
214 { __DRI_IMAGE_FOURCC_ARGB1555, __DRI_IMAGE_COMPONENTS_RGBA, 1,
215 { { 0, 0, 0, __DRI_IMAGE_FORMAT_ARGB1555, 2 } } },
216
217 { __DRI_IMAGE_FOURCC_RGB565, __DRI_IMAGE_COMPONENTS_RGB, 1,
218 { { 0, 0, 0, __DRI_IMAGE_FORMAT_RGB565, 2 } } },
219
220 { __DRI_IMAGE_FOURCC_R8, __DRI_IMAGE_COMPONENTS_R, 1,
221 { { 0, 0, 0, __DRI_IMAGE_FORMAT_R8, 1 }, } },
222
223 { __DRI_IMAGE_FOURCC_R16, __DRI_IMAGE_COMPONENTS_R, 1,
224 { { 0, 0, 0, __DRI_IMAGE_FORMAT_R16, 1 }, } },
225
226 { __DRI_IMAGE_FOURCC_GR88, __DRI_IMAGE_COMPONENTS_RG, 1,
227 { { 0, 0, 0, __DRI_IMAGE_FORMAT_GR88, 2 }, } },
228
229 { __DRI_IMAGE_FOURCC_GR1616, __DRI_IMAGE_COMPONENTS_RG, 1,
230 { { 0, 0, 0, __DRI_IMAGE_FORMAT_GR1616, 2 }, } },
231
232 { __DRI_IMAGE_FOURCC_YUV410, __DRI_IMAGE_COMPONENTS_Y_U_V, 3,
233 { { 0, 0, 0, __DRI_IMAGE_FORMAT_R8, 1 },
234 { 1, 2, 2, __DRI_IMAGE_FORMAT_R8, 1 },
235 { 2, 2, 2, __DRI_IMAGE_FORMAT_R8, 1 } } },
236
237 { __DRI_IMAGE_FOURCC_YUV411, __DRI_IMAGE_COMPONENTS_Y_U_V, 3,
238 { { 0, 0, 0, __DRI_IMAGE_FORMAT_R8, 1 },
239 { 1, 2, 0, __DRI_IMAGE_FORMAT_R8, 1 },
240 { 2, 2, 0, __DRI_IMAGE_FORMAT_R8, 1 } } },
241
242 { __DRI_IMAGE_FOURCC_YUV420, __DRI_IMAGE_COMPONENTS_Y_U_V, 3,
243 { { 0, 0, 0, __DRI_IMAGE_FORMAT_R8, 1 },
244 { 1, 1, 1, __DRI_IMAGE_FORMAT_R8, 1 },
245 { 2, 1, 1, __DRI_IMAGE_FORMAT_R8, 1 } } },
246
247 { __DRI_IMAGE_FOURCC_YUV422, __DRI_IMAGE_COMPONENTS_Y_U_V, 3,
248 { { 0, 0, 0, __DRI_IMAGE_FORMAT_R8, 1 },
249 { 1, 1, 0, __DRI_IMAGE_FORMAT_R8, 1 },
250 { 2, 1, 0, __DRI_IMAGE_FORMAT_R8, 1 } } },
251
252 { __DRI_IMAGE_FOURCC_YUV444, __DRI_IMAGE_COMPONENTS_Y_U_V, 3,
253 { { 0, 0, 0, __DRI_IMAGE_FORMAT_R8, 1 },
254 { 1, 0, 0, __DRI_IMAGE_FORMAT_R8, 1 },
255 { 2, 0, 0, __DRI_IMAGE_FORMAT_R8, 1 } } },
256
257 { __DRI_IMAGE_FOURCC_YVU410, __DRI_IMAGE_COMPONENTS_Y_U_V, 3,
258 { { 0, 0, 0, __DRI_IMAGE_FORMAT_R8, 1 },
259 { 2, 2, 2, __DRI_IMAGE_FORMAT_R8, 1 },
260 { 1, 2, 2, __DRI_IMAGE_FORMAT_R8, 1 } } },
261
262 { __DRI_IMAGE_FOURCC_YVU411, __DRI_IMAGE_COMPONENTS_Y_U_V, 3,
263 { { 0, 0, 0, __DRI_IMAGE_FORMAT_R8, 1 },
264 { 2, 2, 0, __DRI_IMAGE_FORMAT_R8, 1 },
265 { 1, 2, 0, __DRI_IMAGE_FORMAT_R8, 1 } } },
266
267 { __DRI_IMAGE_FOURCC_YVU420, __DRI_IMAGE_COMPONENTS_Y_U_V, 3,
268 { { 0, 0, 0, __DRI_IMAGE_FORMAT_R8, 1 },
269 { 2, 1, 1, __DRI_IMAGE_FORMAT_R8, 1 },
270 { 1, 1, 1, __DRI_IMAGE_FORMAT_R8, 1 } } },
271
272 { __DRI_IMAGE_FOURCC_YVU422, __DRI_IMAGE_COMPONENTS_Y_U_V, 3,
273 { { 0, 0, 0, __DRI_IMAGE_FORMAT_R8, 1 },
274 { 2, 1, 0, __DRI_IMAGE_FORMAT_R8, 1 },
275 { 1, 1, 0, __DRI_IMAGE_FORMAT_R8, 1 } } },
276
277 { __DRI_IMAGE_FOURCC_YVU444, __DRI_IMAGE_COMPONENTS_Y_U_V, 3,
278 { { 0, 0, 0, __DRI_IMAGE_FORMAT_R8, 1 },
279 { 2, 0, 0, __DRI_IMAGE_FORMAT_R8, 1 },
280 { 1, 0, 0, __DRI_IMAGE_FORMAT_R8, 1 } } },
281
282 { __DRI_IMAGE_FOURCC_NV12, __DRI_IMAGE_COMPONENTS_Y_UV, 2,
283 { { 0, 0, 0, __DRI_IMAGE_FORMAT_R8, 1 },
284 { 1, 1, 1, __DRI_IMAGE_FORMAT_GR88, 2 } } },
285
286 { __DRI_IMAGE_FOURCC_NV16, __DRI_IMAGE_COMPONENTS_Y_UV, 2,
287 { { 0, 0, 0, __DRI_IMAGE_FORMAT_R8, 1 },
288 { 1, 1, 0, __DRI_IMAGE_FORMAT_GR88, 2 } } },
289
290 /* For YUYV and UYVY buffers, we set up two overlapping DRI images
291 * and treat them as planar buffers in the compositors.
292 * Plane 0 is GR88 and samples YU or YV pairs and places Y into
293 * the R component, while plane 1 is ARGB/ABGR and samples YUYV/UYVY
294 * clusters and places pairs and places U into the G component and
295 * V into A. This lets the texture sampler interpolate the Y
296 * components correctly when sampling from plane 0, and interpolate
297 * U and V correctly when sampling from plane 1. */
298 { __DRI_IMAGE_FOURCC_YUYV, __DRI_IMAGE_COMPONENTS_Y_XUXV, 2,
299 { { 0, 0, 0, __DRI_IMAGE_FORMAT_GR88, 2 },
300 { 0, 1, 0, __DRI_IMAGE_FORMAT_ARGB8888, 4 } } },
301 { __DRI_IMAGE_FOURCC_UYVY, __DRI_IMAGE_COMPONENTS_Y_UXVX, 2,
302 { { 0, 0, 0, __DRI_IMAGE_FORMAT_GR88, 2 },
303 { 0, 1, 0, __DRI_IMAGE_FORMAT_ABGR8888, 4 } } }
304 };
305
306 static const struct {
307 uint64_t modifier;
308 unsigned since_gen;
309 } supported_modifiers[] = {
310 { .modifier = DRM_FORMAT_MOD_LINEAR , .since_gen = 1 },
311 { .modifier = I915_FORMAT_MOD_X_TILED , .since_gen = 1 },
312 { .modifier = I915_FORMAT_MOD_Y_TILED , .since_gen = 6 },
313 { .modifier = I915_FORMAT_MOD_Y_TILED_CCS , .since_gen = 9 },
314 };
315
316 static bool
317 modifier_is_supported(const struct gen_device_info *devinfo,
318 const struct intel_image_format *fmt, int dri_format,
319 uint64_t modifier)
320 {
321 const struct isl_drm_modifier_info *modinfo =
322 isl_drm_modifier_get_info(modifier);
323 int i;
324
325 /* ISL had better know about the modifier */
326 if (!modinfo)
327 return false;
328
329 if (modinfo->aux_usage == ISL_AUX_USAGE_CCS_E) {
330 /* If INTEL_DEBUG=norbc is set, don't support any CCS_E modifiers */
331 if (unlikely(INTEL_DEBUG & DEBUG_NO_RBC))
332 return false;
333
334 /* CCS_E is not supported for planar images */
335 if (fmt && fmt->nplanes > 1)
336 return false;
337
338 if (fmt) {
339 assert(dri_format == 0);
340 dri_format = fmt->planes[0].dri_format;
341 }
342
343 mesa_format format = driImageFormatToGLFormat(dri_format);
344 format = _mesa_get_srgb_format_linear(format);
345 if (!isl_format_supports_ccs_e(devinfo,
346 brw_isl_format_for_mesa_format(format)))
347 return false;
348 }
349
350 for (i = 0; i < ARRAY_SIZE(supported_modifiers); i++) {
351 if (supported_modifiers[i].modifier != modifier)
352 continue;
353
354 return supported_modifiers[i].since_gen <= devinfo->gen;
355 }
356
357 return false;
358 }
359
360 static uint64_t
361 tiling_to_modifier(uint32_t tiling)
362 {
363 static const uint64_t map[] = {
364 [I915_TILING_NONE] = DRM_FORMAT_MOD_LINEAR,
365 [I915_TILING_X] = I915_FORMAT_MOD_X_TILED,
366 [I915_TILING_Y] = I915_FORMAT_MOD_Y_TILED,
367 };
368
369 assert(tiling < ARRAY_SIZE(map));
370
371 return map[tiling];
372 }
373
374 static void
375 intel_image_warn_if_unaligned(__DRIimage *image, const char *func)
376 {
377 uint32_t tiling, swizzle;
378 brw_bo_get_tiling(image->bo, &tiling, &swizzle);
379
380 if (tiling != I915_TILING_NONE && (image->offset & 0xfff)) {
381 _mesa_warning(NULL, "%s: offset 0x%08x not on tile boundary",
382 func, image->offset);
383 }
384 }
385
386 static const struct intel_image_format *
387 intel_image_format_lookup(int fourcc)
388 {
389 for (unsigned i = 0; i < ARRAY_SIZE(intel_image_formats); i++) {
390 if (intel_image_formats[i].fourcc == fourcc)
391 return &intel_image_formats[i];
392 }
393
394 return NULL;
395 }
396
397 static boolean
398 intel_image_get_fourcc(__DRIimage *image, int *fourcc)
399 {
400 if (image->planar_format) {
401 *fourcc = image->planar_format->fourcc;
402 return true;
403 }
404
405 for (unsigned i = 0; i < ARRAY_SIZE(intel_image_formats); i++) {
406 if (intel_image_formats[i].planes[0].dri_format == image->dri_format) {
407 *fourcc = intel_image_formats[i].fourcc;
408 return true;
409 }
410 }
411 return false;
412 }
413
414 static __DRIimage *
415 intel_allocate_image(struct intel_screen *screen, int dri_format,
416 void *loaderPrivate)
417 {
418 __DRIimage *image;
419
420 image = calloc(1, sizeof *image);
421 if (image == NULL)
422 return NULL;
423
424 image->screen = screen;
425 image->dri_format = dri_format;
426 image->offset = 0;
427
428 image->format = driImageFormatToGLFormat(dri_format);
429 if (dri_format != __DRI_IMAGE_FORMAT_NONE &&
430 image->format == MESA_FORMAT_NONE) {
431 free(image);
432 return NULL;
433 }
434
435 image->internal_format = _mesa_get_format_base_format(image->format);
436 image->data = loaderPrivate;
437
438 return image;
439 }
440
441 /**
442 * Sets up a DRIImage structure to point to a slice out of a miptree.
443 */
444 static void
445 intel_setup_image_from_mipmap_tree(struct brw_context *brw, __DRIimage *image,
446 struct intel_mipmap_tree *mt, GLuint level,
447 GLuint zoffset)
448 {
449 intel_miptree_make_shareable(brw, mt);
450
451 intel_miptree_check_level_layer(mt, level, zoffset);
452
453 image->width = minify(mt->surf.phys_level0_sa.width,
454 level - mt->first_level);
455 image->height = minify(mt->surf.phys_level0_sa.height,
456 level - mt->first_level);
457 image->pitch = mt->surf.row_pitch;
458
459 image->offset = intel_miptree_get_tile_offsets(mt, level, zoffset,
460 &image->tile_x,
461 &image->tile_y);
462
463 brw_bo_unreference(image->bo);
464 image->bo = mt->bo;
465 brw_bo_reference(mt->bo);
466 }
467
468 static __DRIimage *
469 intel_create_image_from_name(__DRIscreen *dri_screen,
470 int width, int height, int format,
471 int name, int pitch, void *loaderPrivate)
472 {
473 struct intel_screen *screen = dri_screen->driverPrivate;
474 __DRIimage *image;
475 int cpp;
476
477 image = intel_allocate_image(screen, format, loaderPrivate);
478 if (image == NULL)
479 return NULL;
480
481 if (image->format == MESA_FORMAT_NONE)
482 cpp = 1;
483 else
484 cpp = _mesa_get_format_bytes(image->format);
485
486 image->width = width;
487 image->height = height;
488 image->pitch = pitch * cpp;
489 image->bo = brw_bo_gem_create_from_name(screen->bufmgr, "image",
490 name);
491 if (!image->bo) {
492 free(image);
493 return NULL;
494 }
495 image->modifier = tiling_to_modifier(image->bo->tiling_mode);
496
497 return image;
498 }
499
500 static __DRIimage *
501 intel_create_image_from_renderbuffer(__DRIcontext *context,
502 int renderbuffer, void *loaderPrivate)
503 {
504 __DRIimage *image;
505 struct brw_context *brw = context->driverPrivate;
506 struct gl_context *ctx = &brw->ctx;
507 struct gl_renderbuffer *rb;
508 struct intel_renderbuffer *irb;
509
510 rb = _mesa_lookup_renderbuffer(ctx, renderbuffer);
511 if (!rb) {
512 _mesa_error(ctx, GL_INVALID_OPERATION, "glRenderbufferExternalMESA");
513 return NULL;
514 }
515
516 irb = intel_renderbuffer(rb);
517 intel_miptree_make_shareable(brw, irb->mt);
518 image = calloc(1, sizeof *image);
519 if (image == NULL)
520 return NULL;
521
522 image->internal_format = rb->InternalFormat;
523 image->format = rb->Format;
524 image->modifier = tiling_to_modifier(
525 isl_tiling_to_i915_tiling(irb->mt->surf.tiling));
526 image->offset = 0;
527 image->data = loaderPrivate;
528 brw_bo_unreference(image->bo);
529 image->bo = irb->mt->bo;
530 brw_bo_reference(irb->mt->bo);
531 image->width = rb->Width;
532 image->height = rb->Height;
533 image->pitch = irb->mt->surf.row_pitch;
534 image->dri_format = driGLFormatToImageFormat(image->format);
535 image->has_depthstencil = irb->mt->stencil_mt? true : false;
536
537 rb->NeedsFinishRenderTexture = true;
538 return image;
539 }
540
541 static __DRIimage *
542 intel_create_image_from_texture(__DRIcontext *context, int target,
543 unsigned texture, int zoffset,
544 int level,
545 unsigned *error,
546 void *loaderPrivate)
547 {
548 __DRIimage *image;
549 struct brw_context *brw = context->driverPrivate;
550 struct gl_texture_object *obj;
551 struct intel_texture_object *iobj;
552 GLuint face = 0;
553
554 obj = _mesa_lookup_texture(&brw->ctx, texture);
555 if (!obj || obj->Target != target) {
556 *error = __DRI_IMAGE_ERROR_BAD_PARAMETER;
557 return NULL;
558 }
559
560 if (target == GL_TEXTURE_CUBE_MAP)
561 face = zoffset;
562
563 _mesa_test_texobj_completeness(&brw->ctx, obj);
564 iobj = intel_texture_object(obj);
565 if (!obj->_BaseComplete || (level > 0 && !obj->_MipmapComplete)) {
566 *error = __DRI_IMAGE_ERROR_BAD_PARAMETER;
567 return NULL;
568 }
569
570 if (level < obj->BaseLevel || level > obj->_MaxLevel) {
571 *error = __DRI_IMAGE_ERROR_BAD_MATCH;
572 return NULL;
573 }
574
575 if (target == GL_TEXTURE_3D && obj->Image[face][level]->Depth < zoffset) {
576 *error = __DRI_IMAGE_ERROR_BAD_MATCH;
577 return NULL;
578 }
579 image = calloc(1, sizeof *image);
580 if (image == NULL) {
581 *error = __DRI_IMAGE_ERROR_BAD_ALLOC;
582 return NULL;
583 }
584
585 image->internal_format = obj->Image[face][level]->InternalFormat;
586 image->format = obj->Image[face][level]->TexFormat;
587 image->modifier = tiling_to_modifier(
588 isl_tiling_to_i915_tiling(iobj->mt->surf.tiling));
589 image->data = loaderPrivate;
590 intel_setup_image_from_mipmap_tree(brw, image, iobj->mt, level, zoffset);
591 image->dri_format = driGLFormatToImageFormat(image->format);
592 image->has_depthstencil = iobj->mt->stencil_mt? true : false;
593 image->planar_format = iobj->planar_format;
594 if (image->dri_format == MESA_FORMAT_NONE) {
595 *error = __DRI_IMAGE_ERROR_BAD_PARAMETER;
596 free(image);
597 return NULL;
598 }
599
600 *error = __DRI_IMAGE_ERROR_SUCCESS;
601 return image;
602 }
603
604 static void
605 intel_destroy_image(__DRIimage *image)
606 {
607 brw_bo_unreference(image->bo);
608 free(image);
609 }
610
611 enum modifier_priority {
612 MODIFIER_PRIORITY_INVALID = 0,
613 MODIFIER_PRIORITY_LINEAR,
614 MODIFIER_PRIORITY_X,
615 MODIFIER_PRIORITY_Y,
616 MODIFIER_PRIORITY_Y_CCS,
617 };
618
619 const uint64_t priority_to_modifier[] = {
620 [MODIFIER_PRIORITY_INVALID] = DRM_FORMAT_MOD_INVALID,
621 [MODIFIER_PRIORITY_LINEAR] = DRM_FORMAT_MOD_LINEAR,
622 [MODIFIER_PRIORITY_X] = I915_FORMAT_MOD_X_TILED,
623 [MODIFIER_PRIORITY_Y] = I915_FORMAT_MOD_Y_TILED,
624 [MODIFIER_PRIORITY_Y_CCS] = I915_FORMAT_MOD_Y_TILED_CCS,
625 };
626
627 static uint64_t
628 select_best_modifier(struct gen_device_info *devinfo,
629 int dri_format,
630 const uint64_t *modifiers,
631 const unsigned count)
632 {
633 enum modifier_priority prio = MODIFIER_PRIORITY_INVALID;
634
635 for (int i = 0; i < count; i++) {
636 if (!modifier_is_supported(devinfo, NULL, dri_format, modifiers[i]))
637 continue;
638
639 switch (modifiers[i]) {
640 case I915_FORMAT_MOD_Y_TILED_CCS:
641 prio = MAX2(prio, MODIFIER_PRIORITY_Y_CCS);
642 break;
643 case I915_FORMAT_MOD_Y_TILED:
644 prio = MAX2(prio, MODIFIER_PRIORITY_Y);
645 break;
646 case I915_FORMAT_MOD_X_TILED:
647 prio = MAX2(prio, MODIFIER_PRIORITY_X);
648 break;
649 case DRM_FORMAT_MOD_LINEAR:
650 prio = MAX2(prio, MODIFIER_PRIORITY_LINEAR);
651 break;
652 case DRM_FORMAT_MOD_INVALID:
653 default:
654 break;
655 }
656 }
657
658 return priority_to_modifier[prio];
659 }
660
661 static __DRIimage *
662 intel_create_image_common(__DRIscreen *dri_screen,
663 int width, int height, int format,
664 unsigned int use,
665 const uint64_t *modifiers,
666 unsigned count,
667 void *loaderPrivate)
668 {
669 __DRIimage *image;
670 struct intel_screen *screen = dri_screen->driverPrivate;
671 uint64_t modifier = DRM_FORMAT_MOD_INVALID;
672 bool ok;
673
674 /* Callers of this may specify a modifier, or a dri usage, but not both. The
675 * newer modifier interface deprecates the older usage flags newer modifier
676 * interface deprecates the older usage flags.
677 */
678 assert(!(use && count));
679
680 if (use & __DRI_IMAGE_USE_CURSOR) {
681 if (width != 64 || height != 64)
682 return NULL;
683 modifier = DRM_FORMAT_MOD_LINEAR;
684 }
685
686 if (use & __DRI_IMAGE_USE_LINEAR)
687 modifier = DRM_FORMAT_MOD_LINEAR;
688
689 if (modifier == DRM_FORMAT_MOD_INVALID) {
690 if (modifiers) {
691 /* User requested specific modifiers */
692 modifier = select_best_modifier(&screen->devinfo, format,
693 modifiers, count);
694 if (modifier == DRM_FORMAT_MOD_INVALID)
695 return NULL;
696 } else {
697 /* Historically, X-tiled was the default, and so lack of modifier means
698 * X-tiled.
699 */
700 modifier = I915_FORMAT_MOD_X_TILED;
701 }
702 }
703
704 image = intel_allocate_image(screen, format, loaderPrivate);
705 if (image == NULL)
706 return NULL;
707
708 const struct isl_drm_modifier_info *mod_info =
709 isl_drm_modifier_get_info(modifier);
710
711 struct isl_surf surf;
712 ok = isl_surf_init(&screen->isl_dev, &surf,
713 .dim = ISL_SURF_DIM_2D,
714 .format = brw_isl_format_for_mesa_format(image->format),
715 .width = width,
716 .height = height,
717 .depth = 1,
718 .levels = 1,
719 .array_len = 1,
720 .samples = 1,
721 .usage = ISL_SURF_USAGE_RENDER_TARGET_BIT |
722 ISL_SURF_USAGE_TEXTURE_BIT |
723 ISL_SURF_USAGE_STORAGE_BIT,
724 .tiling_flags = (1 << mod_info->tiling));
725 assert(ok);
726 if (!ok) {
727 free(image);
728 return NULL;
729 }
730
731 struct isl_surf aux_surf;
732 if (mod_info->aux_usage == ISL_AUX_USAGE_CCS_E) {
733 ok = isl_surf_get_ccs_surf(&screen->isl_dev, &surf, &aux_surf, 0);
734 if (!ok) {
735 free(image);
736 return NULL;
737 }
738 } else {
739 assert(mod_info->aux_usage == ISL_AUX_USAGE_NONE);
740 aux_surf.size = 0;
741 }
742
743 /* We request that the bufmgr zero the buffer for us for two reasons:
744 *
745 * 1) If a buffer gets re-used from the pool, we don't want to leak random
746 * garbage from our process to some other.
747 *
748 * 2) For images with CCS_E, we want to ensure that the CCS starts off in
749 * a valid state. A CCS value of 0 indicates that the given block is
750 * in the pass-through state which is what we want.
751 */
752 image->bo = brw_bo_alloc_tiled(screen->bufmgr, "image",
753 surf.size + aux_surf.size,
754 BRW_MEMZONE_OTHER,
755 isl_tiling_to_i915_tiling(mod_info->tiling),
756 surf.row_pitch, BO_ALLOC_ZEROED);
757 if (image->bo == NULL) {
758 free(image);
759 return NULL;
760 }
761 image->width = width;
762 image->height = height;
763 image->pitch = surf.row_pitch;
764 image->modifier = modifier;
765
766 if (aux_surf.size) {
767 image->aux_offset = surf.size;
768 image->aux_pitch = aux_surf.row_pitch;
769 image->aux_size = aux_surf.size;
770 }
771
772 return image;
773 }
774
775 static __DRIimage *
776 intel_create_image(__DRIscreen *dri_screen,
777 int width, int height, int format,
778 unsigned int use,
779 void *loaderPrivate)
780 {
781 return intel_create_image_common(dri_screen, width, height, format, use, NULL, 0,
782 loaderPrivate);
783 }
784
785 static void *
786 intel_map_image(__DRIcontext *context, __DRIimage *image,
787 int x0, int y0, int width, int height,
788 unsigned int flags, int *stride, void **map_info)
789 {
790 struct brw_context *brw = NULL;
791 struct brw_bo *bo = NULL;
792 void *raw_data = NULL;
793 GLuint pix_w = 1;
794 GLuint pix_h = 1;
795 GLint pix_bytes = 1;
796
797 if (!context || !image || !stride || !map_info || *map_info)
798 return NULL;
799
800 if (x0 < 0 || x0 >= image->width || width > image->width - x0)
801 return NULL;
802
803 if (y0 < 0 || y0 >= image->height || height > image->height - y0)
804 return NULL;
805
806 if (flags & MAP_INTERNAL_MASK)
807 return NULL;
808
809 brw = context->driverPrivate;
810 bo = image->bo;
811
812 assert(brw);
813 assert(bo);
814
815 /* DRI flags and GL_MAP.*_BIT flags are the same, so just pass them on. */
816 raw_data = brw_bo_map(brw, bo, flags);
817 if (!raw_data)
818 return NULL;
819
820 _mesa_get_format_block_size(image->format, &pix_w, &pix_h);
821 pix_bytes = _mesa_get_format_bytes(image->format);
822
823 assert(pix_w);
824 assert(pix_h);
825 assert(pix_bytes > 0);
826
827 raw_data += (x0 / pix_w) * pix_bytes + (y0 / pix_h) * image->pitch;
828
829 brw_bo_reference(bo);
830
831 *stride = image->pitch;
832 *map_info = bo;
833
834 return raw_data;
835 }
836
837 static void
838 intel_unmap_image(__DRIcontext *context, __DRIimage *image, void *map_info)
839 {
840 struct brw_bo *bo = map_info;
841
842 brw_bo_unmap(bo);
843 brw_bo_unreference(bo);
844 }
845
846 static __DRIimage *
847 intel_create_image_with_modifiers(__DRIscreen *dri_screen,
848 int width, int height, int format,
849 const uint64_t *modifiers,
850 const unsigned count,
851 void *loaderPrivate)
852 {
853 return intel_create_image_common(dri_screen, width, height, format, 0,
854 modifiers, count, loaderPrivate);
855 }
856
857 static GLboolean
858 intel_query_image(__DRIimage *image, int attrib, int *value)
859 {
860 switch (attrib) {
861 case __DRI_IMAGE_ATTRIB_STRIDE:
862 *value = image->pitch;
863 return true;
864 case __DRI_IMAGE_ATTRIB_HANDLE:
865 *value = brw_bo_export_gem_handle(image->bo);
866 return true;
867 case __DRI_IMAGE_ATTRIB_NAME:
868 return !brw_bo_flink(image->bo, (uint32_t *) value);
869 case __DRI_IMAGE_ATTRIB_FORMAT:
870 *value = image->dri_format;
871 return true;
872 case __DRI_IMAGE_ATTRIB_WIDTH:
873 *value = image->width;
874 return true;
875 case __DRI_IMAGE_ATTRIB_HEIGHT:
876 *value = image->height;
877 return true;
878 case __DRI_IMAGE_ATTRIB_COMPONENTS:
879 if (image->planar_format == NULL)
880 return false;
881 *value = image->planar_format->components;
882 return true;
883 case __DRI_IMAGE_ATTRIB_FD:
884 return !brw_bo_gem_export_to_prime(image->bo, value);
885 case __DRI_IMAGE_ATTRIB_FOURCC:
886 return intel_image_get_fourcc(image, value);
887 case __DRI_IMAGE_ATTRIB_NUM_PLANES:
888 if (isl_drm_modifier_has_aux(image->modifier)) {
889 assert(!image->planar_format || image->planar_format->nplanes == 1);
890 *value = 2;
891 } else if (image->planar_format) {
892 *value = image->planar_format->nplanes;
893 } else {
894 *value = 1;
895 }
896 return true;
897 case __DRI_IMAGE_ATTRIB_OFFSET:
898 *value = image->offset;
899 return true;
900 case __DRI_IMAGE_ATTRIB_MODIFIER_LOWER:
901 *value = (image->modifier & 0xffffffff);
902 return true;
903 case __DRI_IMAGE_ATTRIB_MODIFIER_UPPER:
904 *value = ((image->modifier >> 32) & 0xffffffff);
905 return true;
906
907 default:
908 return false;
909 }
910 }
911
912 static GLboolean
913 intel_query_format_modifier_attribs(__DRIscreen *dri_screen,
914 uint32_t fourcc, uint64_t modifier,
915 int attrib, uint64_t *value)
916 {
917 struct intel_screen *screen = dri_screen->driverPrivate;
918 const struct intel_image_format *f = intel_image_format_lookup(fourcc);
919
920 if (!modifier_is_supported(&screen->devinfo, f, 0, modifier))
921 return false;
922
923 switch (attrib) {
924 case __DRI_IMAGE_FORMAT_MODIFIER_ATTRIB_PLANE_COUNT:
925 *value = isl_drm_modifier_has_aux(modifier) ? 2 : f->nplanes;
926 return true;
927
928 default:
929 return false;
930 }
931 }
932
933 static __DRIimage *
934 intel_dup_image(__DRIimage *orig_image, void *loaderPrivate)
935 {
936 __DRIimage *image;
937
938 image = calloc(1, sizeof *image);
939 if (image == NULL)
940 return NULL;
941
942 brw_bo_reference(orig_image->bo);
943 image->bo = orig_image->bo;
944 image->internal_format = orig_image->internal_format;
945 image->planar_format = orig_image->planar_format;
946 image->dri_format = orig_image->dri_format;
947 image->format = orig_image->format;
948 image->modifier = orig_image->modifier;
949 image->offset = orig_image->offset;
950 image->width = orig_image->width;
951 image->height = orig_image->height;
952 image->pitch = orig_image->pitch;
953 image->tile_x = orig_image->tile_x;
954 image->tile_y = orig_image->tile_y;
955 image->has_depthstencil = orig_image->has_depthstencil;
956 image->data = loaderPrivate;
957 image->dma_buf_imported = orig_image->dma_buf_imported;
958 image->aux_offset = orig_image->aux_offset;
959 image->aux_pitch = orig_image->aux_pitch;
960
961 memcpy(image->strides, orig_image->strides, sizeof(image->strides));
962 memcpy(image->offsets, orig_image->offsets, sizeof(image->offsets));
963
964 return image;
965 }
966
967 static GLboolean
968 intel_validate_usage(__DRIimage *image, unsigned int use)
969 {
970 if (use & __DRI_IMAGE_USE_CURSOR) {
971 if (image->width != 64 || image->height != 64)
972 return GL_FALSE;
973 }
974
975 return GL_TRUE;
976 }
977
978 static __DRIimage *
979 intel_create_image_from_names(__DRIscreen *dri_screen,
980 int width, int height, int fourcc,
981 int *names, int num_names,
982 int *strides, int *offsets,
983 void *loaderPrivate)
984 {
985 const struct intel_image_format *f = NULL;
986 __DRIimage *image;
987 int i, index;
988
989 if (dri_screen == NULL || names == NULL || num_names != 1)
990 return NULL;
991
992 f = intel_image_format_lookup(fourcc);
993 if (f == NULL)
994 return NULL;
995
996 image = intel_create_image_from_name(dri_screen, width, height,
997 __DRI_IMAGE_FORMAT_NONE,
998 names[0], strides[0],
999 loaderPrivate);
1000
1001 if (image == NULL)
1002 return NULL;
1003
1004 image->planar_format = f;
1005 for (i = 0; i < f->nplanes; i++) {
1006 index = f->planes[i].buffer_index;
1007 image->offsets[index] = offsets[index];
1008 image->strides[index] = strides[index];
1009 }
1010
1011 return image;
1012 }
1013
1014 static __DRIimage *
1015 intel_create_image_from_fds_common(__DRIscreen *dri_screen,
1016 int width, int height, int fourcc,
1017 uint64_t modifier, int *fds, int num_fds,
1018 int *strides, int *offsets,
1019 void *loaderPrivate)
1020 {
1021 struct intel_screen *screen = dri_screen->driverPrivate;
1022 const struct intel_image_format *f;
1023 __DRIimage *image;
1024 int i, index;
1025 bool ok;
1026
1027 if (fds == NULL || num_fds < 1)
1028 return NULL;
1029
1030 f = intel_image_format_lookup(fourcc);
1031 if (f == NULL)
1032 return NULL;
1033
1034 if (modifier != DRM_FORMAT_MOD_INVALID &&
1035 !modifier_is_supported(&screen->devinfo, f, 0, modifier))
1036 return NULL;
1037
1038 if (f->nplanes == 1)
1039 image = intel_allocate_image(screen, f->planes[0].dri_format,
1040 loaderPrivate);
1041 else
1042 image = intel_allocate_image(screen, __DRI_IMAGE_FORMAT_NONE,
1043 loaderPrivate);
1044
1045 if (image == NULL)
1046 return NULL;
1047
1048 image->width = width;
1049 image->height = height;
1050 image->pitch = strides[0];
1051
1052 image->planar_format = f;
1053
1054 if (modifier != DRM_FORMAT_MOD_INVALID) {
1055 const struct isl_drm_modifier_info *mod_info =
1056 isl_drm_modifier_get_info(modifier);
1057 uint32_t tiling = isl_tiling_to_i915_tiling(mod_info->tiling);
1058 image->bo = brw_bo_gem_create_from_prime_tiled(screen->bufmgr, fds[0],
1059 tiling, strides[0]);
1060 } else {
1061 image->bo = brw_bo_gem_create_from_prime(screen->bufmgr, fds[0]);
1062 }
1063
1064 if (image->bo == NULL) {
1065 free(image);
1066 return NULL;
1067 }
1068
1069 /* We only support all planes from the same bo.
1070 * brw_bo_gem_create_from_prime() should return the same pointer for all
1071 * fds received here */
1072 for (i = 1; i < num_fds; i++) {
1073 struct brw_bo *aux = brw_bo_gem_create_from_prime(screen->bufmgr, fds[i]);
1074 brw_bo_unreference(aux);
1075 if (aux != image->bo) {
1076 brw_bo_unreference(image->bo);
1077 free(image);
1078 return NULL;
1079 }
1080 }
1081
1082 if (modifier != DRM_FORMAT_MOD_INVALID)
1083 image->modifier = modifier;
1084 else
1085 image->modifier = tiling_to_modifier(image->bo->tiling_mode);
1086
1087 const struct isl_drm_modifier_info *mod_info =
1088 isl_drm_modifier_get_info(image->modifier);
1089
1090 int size = 0;
1091 struct isl_surf surf;
1092 for (i = 0; i < f->nplanes; i++) {
1093 index = f->planes[i].buffer_index;
1094 image->offsets[index] = offsets[index];
1095 image->strides[index] = strides[index];
1096
1097 mesa_format format = driImageFormatToGLFormat(f->planes[i].dri_format);
1098
1099 ok = isl_surf_init(&screen->isl_dev, &surf,
1100 .dim = ISL_SURF_DIM_2D,
1101 .format = brw_isl_format_for_mesa_format(format),
1102 .width = image->width >> f->planes[i].width_shift,
1103 .height = image->height >> f->planes[i].height_shift,
1104 .depth = 1,
1105 .levels = 1,
1106 .array_len = 1,
1107 .samples = 1,
1108 .row_pitch = strides[index],
1109 .usage = ISL_SURF_USAGE_RENDER_TARGET_BIT |
1110 ISL_SURF_USAGE_TEXTURE_BIT |
1111 ISL_SURF_USAGE_STORAGE_BIT,
1112 .tiling_flags = (1 << mod_info->tiling));
1113 if (!ok) {
1114 brw_bo_unreference(image->bo);
1115 free(image);
1116 return NULL;
1117 }
1118
1119 const int end = offsets[index] + surf.size;
1120 if (size < end)
1121 size = end;
1122 }
1123
1124 if (mod_info->aux_usage == ISL_AUX_USAGE_CCS_E) {
1125 /* Even though we initialize surf in the loop above, we know that
1126 * anything with CCS_E will have exactly one plane so surf is properly
1127 * initialized when we get here.
1128 */
1129 assert(f->nplanes == 1);
1130
1131 image->aux_offset = offsets[1];
1132 image->aux_pitch = strides[1];
1133
1134 /* Scanout hardware requires that the CCS be placed after the main
1135 * surface in memory. We consider any CCS that is placed any earlier in
1136 * memory to be invalid and reject it.
1137 *
1138 * At some point in the future, this restriction may be relaxed if the
1139 * hardware becomes less strict but we may need a new modifier for that.
1140 */
1141 assert(size > 0);
1142 if (image->aux_offset < size) {
1143 brw_bo_unreference(image->bo);
1144 free(image);
1145 return NULL;
1146 }
1147
1148 struct isl_surf aux_surf;
1149 ok = isl_surf_get_ccs_surf(&screen->isl_dev, &surf, &aux_surf,
1150 image->aux_pitch);
1151 if (!ok) {
1152 brw_bo_unreference(image->bo);
1153 free(image);
1154 return NULL;
1155 }
1156
1157 image->aux_size = aux_surf.size;
1158
1159 const int end = image->aux_offset + aux_surf.size;
1160 if (size < end)
1161 size = end;
1162 } else {
1163 assert(mod_info->aux_usage == ISL_AUX_USAGE_NONE);
1164 }
1165
1166 /* Check that the requested image actually fits within the BO. 'size'
1167 * is already relative to the offsets, so we don't need to add that. */
1168 if (image->bo->size == 0) {
1169 image->bo->size = size;
1170 } else if (size > image->bo->size) {
1171 brw_bo_unreference(image->bo);
1172 free(image);
1173 return NULL;
1174 }
1175
1176 if (f->nplanes == 1) {
1177 image->offset = image->offsets[0];
1178 intel_image_warn_if_unaligned(image, __func__);
1179 }
1180
1181 return image;
1182 }
1183
1184 static __DRIimage *
1185 intel_create_image_from_fds(__DRIscreen *dri_screen,
1186 int width, int height, int fourcc,
1187 int *fds, int num_fds, int *strides, int *offsets,
1188 void *loaderPrivate)
1189 {
1190 return intel_create_image_from_fds_common(dri_screen, width, height, fourcc,
1191 DRM_FORMAT_MOD_INVALID,
1192 fds, num_fds, strides, offsets,
1193 loaderPrivate);
1194 }
1195
1196 static __DRIimage *
1197 intel_create_image_from_dma_bufs2(__DRIscreen *dri_screen,
1198 int width, int height,
1199 int fourcc, uint64_t modifier,
1200 int *fds, int num_fds,
1201 int *strides, int *offsets,
1202 enum __DRIYUVColorSpace yuv_color_space,
1203 enum __DRISampleRange sample_range,
1204 enum __DRIChromaSiting horizontal_siting,
1205 enum __DRIChromaSiting vertical_siting,
1206 unsigned *error,
1207 void *loaderPrivate)
1208 {
1209 __DRIimage *image;
1210 const struct intel_image_format *f = intel_image_format_lookup(fourcc);
1211
1212 if (!f) {
1213 *error = __DRI_IMAGE_ERROR_BAD_MATCH;
1214 return NULL;
1215 }
1216
1217 image = intel_create_image_from_fds_common(dri_screen, width, height,
1218 fourcc, modifier,
1219 fds, num_fds, strides, offsets,
1220 loaderPrivate);
1221
1222 /*
1223 * Invalid parameters and any inconsistencies between are assumed to be
1224 * checked by the caller. Therefore besides unsupported formats one can fail
1225 * only in allocation.
1226 */
1227 if (!image) {
1228 *error = __DRI_IMAGE_ERROR_BAD_ALLOC;
1229 return NULL;
1230 }
1231
1232 image->dma_buf_imported = true;
1233 image->yuv_color_space = yuv_color_space;
1234 image->sample_range = sample_range;
1235 image->horizontal_siting = horizontal_siting;
1236 image->vertical_siting = vertical_siting;
1237
1238 *error = __DRI_IMAGE_ERROR_SUCCESS;
1239 return image;
1240 }
1241
1242 static __DRIimage *
1243 intel_create_image_from_dma_bufs(__DRIscreen *dri_screen,
1244 int width, int height, int fourcc,
1245 int *fds, int num_fds,
1246 int *strides, int *offsets,
1247 enum __DRIYUVColorSpace yuv_color_space,
1248 enum __DRISampleRange sample_range,
1249 enum __DRIChromaSiting horizontal_siting,
1250 enum __DRIChromaSiting vertical_siting,
1251 unsigned *error,
1252 void *loaderPrivate)
1253 {
1254 return intel_create_image_from_dma_bufs2(dri_screen, width, height,
1255 fourcc, DRM_FORMAT_MOD_INVALID,
1256 fds, num_fds, strides, offsets,
1257 yuv_color_space,
1258 sample_range,
1259 horizontal_siting,
1260 vertical_siting,
1261 error,
1262 loaderPrivate);
1263 }
1264
1265 static GLboolean
1266 intel_query_dma_buf_formats(__DRIscreen *screen, int max,
1267 int *formats, int *count)
1268 {
1269 int i, j = 0;
1270
1271 if (max == 0) {
1272 /* Note, sRGB formats not included. */
1273 *count = ARRAY_SIZE(intel_image_formats) - 2;
1274 return true;
1275 }
1276
1277 for (i = 0; i < (ARRAY_SIZE(intel_image_formats)) && j < max; i++) {
1278 if (intel_image_formats[i].fourcc == __DRI_IMAGE_FOURCC_SARGB8888 ||
1279 intel_image_formats[i].fourcc == __DRI_IMAGE_FOURCC_SABGR8888)
1280 continue;
1281 formats[j++] = intel_image_formats[i].fourcc;
1282 }
1283
1284 *count = j;
1285 return true;
1286 }
1287
1288 static GLboolean
1289 intel_query_dma_buf_modifiers(__DRIscreen *_screen, int fourcc, int max,
1290 uint64_t *modifiers,
1291 unsigned int *external_only,
1292 int *count)
1293 {
1294 struct intel_screen *screen = _screen->driverPrivate;
1295 const struct intel_image_format *f;
1296 int num_mods = 0, i;
1297
1298 f = intel_image_format_lookup(fourcc);
1299 if (f == NULL)
1300 return false;
1301
1302 for (i = 0; i < ARRAY_SIZE(supported_modifiers); i++) {
1303 uint64_t modifier = supported_modifiers[i].modifier;
1304 if (!modifier_is_supported(&screen->devinfo, f, 0, modifier))
1305 continue;
1306
1307 num_mods++;
1308 if (max == 0)
1309 continue;
1310
1311 modifiers[num_mods - 1] = modifier;
1312 if (num_mods >= max)
1313 break;
1314 }
1315
1316 if (external_only != NULL) {
1317 for (i = 0; i < num_mods && i < max; i++) {
1318 if (f->components == __DRI_IMAGE_COMPONENTS_Y_U_V ||
1319 f->components == __DRI_IMAGE_COMPONENTS_Y_UV ||
1320 f->components == __DRI_IMAGE_COMPONENTS_Y_XUXV) {
1321 external_only[i] = GL_TRUE;
1322 }
1323 else {
1324 external_only[i] = GL_FALSE;
1325 }
1326 }
1327 }
1328
1329 *count = num_mods;
1330 return true;
1331 }
1332
1333 static __DRIimage *
1334 intel_from_planar(__DRIimage *parent, int plane, void *loaderPrivate)
1335 {
1336 int width, height, offset, stride, size, dri_format;
1337 __DRIimage *image;
1338
1339 if (parent == NULL)
1340 return NULL;
1341
1342 width = parent->width;
1343 height = parent->height;
1344
1345 const struct intel_image_format *f = parent->planar_format;
1346
1347 if (f && plane < f->nplanes) {
1348 /* Use the planar format definition. */
1349 width >>= f->planes[plane].width_shift;
1350 height >>= f->planes[plane].height_shift;
1351 dri_format = f->planes[plane].dri_format;
1352 int index = f->planes[plane].buffer_index;
1353 offset = parent->offsets[index];
1354 stride = parent->strides[index];
1355 size = height * stride;
1356 } else if (plane == 0) {
1357 /* The only plane of a non-planar image: copy the parent definition
1358 * directly. */
1359 dri_format = parent->dri_format;
1360 offset = parent->offset;
1361 stride = parent->pitch;
1362 size = height * stride;
1363 } else if (plane == 1 && parent->modifier != DRM_FORMAT_MOD_INVALID &&
1364 isl_drm_modifier_has_aux(parent->modifier)) {
1365 /* Auxiliary plane */
1366 dri_format = parent->dri_format;
1367 offset = parent->aux_offset;
1368 stride = parent->aux_pitch;
1369 size = parent->aux_size;
1370 } else {
1371 return NULL;
1372 }
1373
1374 if (offset + size > parent->bo->size) {
1375 _mesa_warning(NULL, "intel_from_planar: subimage out of bounds");
1376 return NULL;
1377 }
1378
1379 image = intel_allocate_image(parent->screen, dri_format, loaderPrivate);
1380 if (image == NULL)
1381 return NULL;
1382
1383 image->bo = parent->bo;
1384 brw_bo_reference(parent->bo);
1385 image->modifier = parent->modifier;
1386
1387 image->width = width;
1388 image->height = height;
1389 image->pitch = stride;
1390 image->offset = offset;
1391
1392 intel_image_warn_if_unaligned(image, __func__);
1393
1394 return image;
1395 }
1396
1397 static const __DRIimageExtension intelImageExtension = {
1398 .base = { __DRI_IMAGE, 16 },
1399
1400 .createImageFromName = intel_create_image_from_name,
1401 .createImageFromRenderbuffer = intel_create_image_from_renderbuffer,
1402 .destroyImage = intel_destroy_image,
1403 .createImage = intel_create_image,
1404 .queryImage = intel_query_image,
1405 .dupImage = intel_dup_image,
1406 .validateUsage = intel_validate_usage,
1407 .createImageFromNames = intel_create_image_from_names,
1408 .fromPlanar = intel_from_planar,
1409 .createImageFromTexture = intel_create_image_from_texture,
1410 .createImageFromFds = intel_create_image_from_fds,
1411 .createImageFromDmaBufs = intel_create_image_from_dma_bufs,
1412 .blitImage = NULL,
1413 .getCapabilities = NULL,
1414 .mapImage = intel_map_image,
1415 .unmapImage = intel_unmap_image,
1416 .createImageWithModifiers = intel_create_image_with_modifiers,
1417 .createImageFromDmaBufs2 = intel_create_image_from_dma_bufs2,
1418 .queryDmaBufFormats = intel_query_dma_buf_formats,
1419 .queryDmaBufModifiers = intel_query_dma_buf_modifiers,
1420 .queryDmaBufFormatModifierAttribs = intel_query_format_modifier_attribs,
1421 };
1422
1423 static uint64_t
1424 get_aperture_size(int fd)
1425 {
1426 struct drm_i915_gem_get_aperture aperture;
1427
1428 if (drmIoctl(fd, DRM_IOCTL_I915_GEM_GET_APERTURE, &aperture) != 0)
1429 return 0;
1430
1431 return aperture.aper_size;
1432 }
1433
1434 static int
1435 brw_query_renderer_integer(__DRIscreen *dri_screen,
1436 int param, unsigned int *value)
1437 {
1438 const struct intel_screen *const screen =
1439 (struct intel_screen *) dri_screen->driverPrivate;
1440
1441 switch (param) {
1442 case __DRI2_RENDERER_VENDOR_ID:
1443 value[0] = 0x8086;
1444 return 0;
1445 case __DRI2_RENDERER_DEVICE_ID:
1446 value[0] = screen->deviceID;
1447 return 0;
1448 case __DRI2_RENDERER_ACCELERATED:
1449 value[0] = 1;
1450 return 0;
1451 case __DRI2_RENDERER_VIDEO_MEMORY: {
1452 /* Once a batch uses more than 75% of the maximum mappable size, we
1453 * assume that there's some fragmentation, and we start doing extra
1454 * flushing, etc. That's the big cliff apps will care about.
1455 */
1456 const unsigned gpu_mappable_megabytes =
1457 screen->aperture_threshold / (1024 * 1024);
1458
1459 const long system_memory_pages = sysconf(_SC_PHYS_PAGES);
1460 const long system_page_size = sysconf(_SC_PAGE_SIZE);
1461
1462 if (system_memory_pages <= 0 || system_page_size <= 0)
1463 return -1;
1464
1465 const uint64_t system_memory_bytes = (uint64_t) system_memory_pages
1466 * (uint64_t) system_page_size;
1467
1468 const unsigned system_memory_megabytes =
1469 (unsigned) (system_memory_bytes / (1024 * 1024));
1470
1471 value[0] = MIN2(system_memory_megabytes, gpu_mappable_megabytes);
1472 return 0;
1473 }
1474 case __DRI2_RENDERER_UNIFIED_MEMORY_ARCHITECTURE:
1475 value[0] = 1;
1476 return 0;
1477 case __DRI2_RENDERER_HAS_TEXTURE_3D:
1478 value[0] = 1;
1479 return 0;
1480 case __DRI2_RENDERER_HAS_CONTEXT_PRIORITY:
1481 value[0] = 0;
1482 if (brw_hw_context_set_priority(screen->bufmgr,
1483 0, GEN_CONTEXT_HIGH_PRIORITY) == 0)
1484 value[0] |= __DRI2_RENDERER_HAS_CONTEXT_PRIORITY_HIGH;
1485 if (brw_hw_context_set_priority(screen->bufmgr,
1486 0, GEN_CONTEXT_LOW_PRIORITY) == 0)
1487 value[0] |= __DRI2_RENDERER_HAS_CONTEXT_PRIORITY_LOW;
1488 /* reset to default last, just in case */
1489 if (brw_hw_context_set_priority(screen->bufmgr,
1490 0, GEN_CONTEXT_MEDIUM_PRIORITY) == 0)
1491 value[0] |= __DRI2_RENDERER_HAS_CONTEXT_PRIORITY_MEDIUM;
1492 return 0;
1493 case __DRI2_RENDERER_HAS_FRAMEBUFFER_SRGB:
1494 value[0] = 1;
1495 return 0;
1496 default:
1497 return driQueryRendererIntegerCommon(dri_screen, param, value);
1498 }
1499
1500 return -1;
1501 }
1502
1503 static int
1504 brw_query_renderer_string(__DRIscreen *dri_screen,
1505 int param, const char **value)
1506 {
1507 const struct intel_screen *screen =
1508 (struct intel_screen *) dri_screen->driverPrivate;
1509
1510 switch (param) {
1511 case __DRI2_RENDERER_VENDOR_ID:
1512 value[0] = brw_vendor_string;
1513 return 0;
1514 case __DRI2_RENDERER_DEVICE_ID:
1515 value[0] = brw_get_renderer_string(screen);
1516 return 0;
1517 default:
1518 break;
1519 }
1520
1521 return -1;
1522 }
1523
1524 static void
1525 brw_set_cache_funcs(__DRIscreen *dri_screen,
1526 __DRIblobCacheSet set, __DRIblobCacheGet get)
1527 {
1528 const struct intel_screen *const screen =
1529 (struct intel_screen *) dri_screen->driverPrivate;
1530
1531 if (!screen->disk_cache)
1532 return;
1533
1534 disk_cache_set_callbacks(screen->disk_cache, set, get);
1535 }
1536
1537 static const __DRI2rendererQueryExtension intelRendererQueryExtension = {
1538 .base = { __DRI2_RENDERER_QUERY, 1 },
1539
1540 .queryInteger = brw_query_renderer_integer,
1541 .queryString = brw_query_renderer_string
1542 };
1543
1544 static const __DRIrobustnessExtension dri2Robustness = {
1545 .base = { __DRI2_ROBUSTNESS, 1 }
1546 };
1547
1548 static const __DRI2blobExtension intelBlobExtension = {
1549 .base = { __DRI2_BLOB, 1 },
1550 .set_cache_funcs = brw_set_cache_funcs
1551 };
1552
1553 static const __DRIextension *screenExtensions[] = {
1554 &intelTexBufferExtension.base,
1555 &intelFenceExtension.base,
1556 &intelFlushExtension.base,
1557 &intelImageExtension.base,
1558 &intelRendererQueryExtension.base,
1559 &dri2ConfigQueryExtension.base,
1560 &dri2NoErrorExtension.base,
1561 &intelBlobExtension.base,
1562 NULL
1563 };
1564
1565 static const __DRIextension *intelRobustScreenExtensions[] = {
1566 &intelTexBufferExtension.base,
1567 &intelFenceExtension.base,
1568 &intelFlushExtension.base,
1569 &intelImageExtension.base,
1570 &intelRendererQueryExtension.base,
1571 &dri2ConfigQueryExtension.base,
1572 &dri2Robustness.base,
1573 &dri2NoErrorExtension.base,
1574 &intelBlobExtension.base,
1575 NULL
1576 };
1577
1578 static int
1579 intel_get_param(struct intel_screen *screen, int param, int *value)
1580 {
1581 int ret = 0;
1582 struct drm_i915_getparam gp;
1583
1584 memset(&gp, 0, sizeof(gp));
1585 gp.param = param;
1586 gp.value = value;
1587
1588 if (drmIoctl(screen->driScrnPriv->fd, DRM_IOCTL_I915_GETPARAM, &gp) == -1) {
1589 ret = -errno;
1590 if (ret != -EINVAL)
1591 _mesa_warning(NULL, "drm_i915_getparam: %d", ret);
1592 }
1593
1594 return ret;
1595 }
1596
1597 static bool
1598 intel_get_boolean(struct intel_screen *screen, int param)
1599 {
1600 int value = 0;
1601 return (intel_get_param(screen, param, &value) == 0) && value;
1602 }
1603
1604 static int
1605 intel_get_integer(struct intel_screen *screen, int param)
1606 {
1607 int value = -1;
1608
1609 if (intel_get_param(screen, param, &value) == 0)
1610 return value;
1611
1612 return -1;
1613 }
1614
1615 static void
1616 intelDestroyScreen(__DRIscreen * sPriv)
1617 {
1618 struct intel_screen *screen = sPriv->driverPrivate;
1619
1620 brw_bufmgr_destroy(screen->bufmgr);
1621 driDestroyOptionInfo(&screen->optionCache);
1622
1623 disk_cache_destroy(screen->disk_cache);
1624
1625 ralloc_free(screen);
1626 sPriv->driverPrivate = NULL;
1627 }
1628
1629
1630 /**
1631 * Create a gl_framebuffer and attach it to __DRIdrawable::driverPrivate.
1632 *
1633 *_This implements driDriverAPI::createNewDrawable, which the DRI layer calls
1634 * when creating a EGLSurface, GLXDrawable, or GLXPixmap. Despite the name,
1635 * this does not allocate GPU memory.
1636 */
1637 static GLboolean
1638 intelCreateBuffer(__DRIscreen *dri_screen,
1639 __DRIdrawable * driDrawPriv,
1640 const struct gl_config * mesaVis, GLboolean isPixmap)
1641 {
1642 struct intel_renderbuffer *rb;
1643 struct intel_screen *screen = (struct intel_screen *)
1644 dri_screen->driverPrivate;
1645 mesa_format rgbFormat;
1646 unsigned num_samples =
1647 intel_quantize_num_samples(screen, mesaVis->samples);
1648
1649 if (isPixmap)
1650 return false;
1651
1652 struct gl_framebuffer *fb = CALLOC_STRUCT(gl_framebuffer);
1653 if (!fb)
1654 return false;
1655
1656 _mesa_initialize_window_framebuffer(fb, mesaVis);
1657
1658 if (screen->winsys_msaa_samples_override != -1) {
1659 num_samples = screen->winsys_msaa_samples_override;
1660 fb->Visual.samples = num_samples;
1661 }
1662
1663 if (mesaVis->redBits == 10 && mesaVis->alphaBits > 0) {
1664 rgbFormat = mesaVis->redMask == 0x3ff00000 ? MESA_FORMAT_B10G10R10A2_UNORM
1665 : MESA_FORMAT_R10G10B10A2_UNORM;
1666 } else if (mesaVis->redBits == 10) {
1667 rgbFormat = mesaVis->redMask == 0x3ff00000 ? MESA_FORMAT_B10G10R10X2_UNORM
1668 : MESA_FORMAT_R10G10B10X2_UNORM;
1669 } else if (mesaVis->redBits == 5) {
1670 rgbFormat = mesaVis->redMask == 0x1f ? MESA_FORMAT_R5G6B5_UNORM
1671 : MESA_FORMAT_B5G6R5_UNORM;
1672 } else if (mesaVis->sRGBCapable) {
1673 rgbFormat = mesaVis->redMask == 0xff ? MESA_FORMAT_R8G8B8A8_SRGB
1674 : MESA_FORMAT_B8G8R8A8_SRGB;
1675 } else if (mesaVis->alphaBits == 0) {
1676 rgbFormat = mesaVis->redMask == 0xff ? MESA_FORMAT_R8G8B8X8_UNORM
1677 : MESA_FORMAT_B8G8R8X8_UNORM;
1678 } else {
1679 rgbFormat = mesaVis->redMask == 0xff ? MESA_FORMAT_R8G8B8A8_SRGB
1680 : MESA_FORMAT_B8G8R8A8_SRGB;
1681 fb->Visual.sRGBCapable = true;
1682 }
1683
1684 /* mesaVis->sRGBCapable was set, user is asking for sRGB */
1685 bool srgb_cap_set = mesaVis->redBits >= 8 && mesaVis->sRGBCapable;
1686
1687 /* setup the hardware-based renderbuffers */
1688 rb = intel_create_winsys_renderbuffer(screen, rgbFormat, num_samples);
1689 _mesa_attach_and_own_rb(fb, BUFFER_FRONT_LEFT, &rb->Base.Base);
1690 rb->need_srgb = srgb_cap_set;
1691
1692 if (mesaVis->doubleBufferMode) {
1693 rb = intel_create_winsys_renderbuffer(screen, rgbFormat, num_samples);
1694 _mesa_attach_and_own_rb(fb, BUFFER_BACK_LEFT, &rb->Base.Base);
1695 rb->need_srgb = srgb_cap_set;
1696 }
1697
1698 /*
1699 * Assert here that the gl_config has an expected depth/stencil bit
1700 * combination: one of d24/s8, d16/s0, d0/s0. (See intelInitScreen2(),
1701 * which constructs the advertised configs.)
1702 */
1703 if (mesaVis->depthBits == 24) {
1704 assert(mesaVis->stencilBits == 8);
1705
1706 if (screen->devinfo.has_hiz_and_separate_stencil) {
1707 rb = intel_create_private_renderbuffer(screen,
1708 MESA_FORMAT_Z24_UNORM_X8_UINT,
1709 num_samples);
1710 _mesa_attach_and_own_rb(fb, BUFFER_DEPTH, &rb->Base.Base);
1711 rb = intel_create_private_renderbuffer(screen, MESA_FORMAT_S_UINT8,
1712 num_samples);
1713 _mesa_attach_and_own_rb(fb, BUFFER_STENCIL, &rb->Base.Base);
1714 } else {
1715 /*
1716 * Use combined depth/stencil. Note that the renderbuffer is
1717 * attached to two attachment points.
1718 */
1719 rb = intel_create_private_renderbuffer(screen,
1720 MESA_FORMAT_Z24_UNORM_S8_UINT,
1721 num_samples);
1722 _mesa_attach_and_own_rb(fb, BUFFER_DEPTH, &rb->Base.Base);
1723 _mesa_attach_and_reference_rb(fb, BUFFER_STENCIL, &rb->Base.Base);
1724 }
1725 }
1726 else if (mesaVis->depthBits == 16) {
1727 assert(mesaVis->stencilBits == 0);
1728 rb = intel_create_private_renderbuffer(screen, MESA_FORMAT_Z_UNORM16,
1729 num_samples);
1730 _mesa_attach_and_own_rb(fb, BUFFER_DEPTH, &rb->Base.Base);
1731 }
1732 else {
1733 assert(mesaVis->depthBits == 0);
1734 assert(mesaVis->stencilBits == 0);
1735 }
1736
1737 /* now add any/all software-based renderbuffers we may need */
1738 _swrast_add_soft_renderbuffers(fb,
1739 false, /* never sw color */
1740 false, /* never sw depth */
1741 false, /* never sw stencil */
1742 mesaVis->accumRedBits > 0,
1743 false, /* never sw alpha */
1744 false /* never sw aux */ );
1745 driDrawPriv->driverPrivate = fb;
1746
1747 return true;
1748 }
1749
1750 static void
1751 intelDestroyBuffer(__DRIdrawable * driDrawPriv)
1752 {
1753 struct gl_framebuffer *fb = driDrawPriv->driverPrivate;
1754
1755 _mesa_reference_framebuffer(&fb, NULL);
1756 }
1757
1758 static void
1759 intel_cs_timestamp_frequency(struct intel_screen *screen)
1760 {
1761 /* We shouldn't need to update gen_device_info.timestamp_frequency prior to
1762 * gen10, PCI-id is enough to figure it out.
1763 */
1764 assert(screen->devinfo.gen >= 10);
1765
1766 int ret, freq;
1767
1768 ret = intel_get_param(screen, I915_PARAM_CS_TIMESTAMP_FREQUENCY,
1769 &freq);
1770 if (ret < 0) {
1771 _mesa_warning(NULL,
1772 "Kernel 4.15 required to read the CS timestamp frequency.\n");
1773 return;
1774 }
1775
1776 screen->devinfo.timestamp_frequency = freq;
1777 }
1778
1779 static void
1780 intel_detect_sseu(struct intel_screen *screen)
1781 {
1782 assert(screen->devinfo.gen >= 8);
1783 int ret;
1784
1785 screen->subslice_total = -1;
1786 screen->eu_total = -1;
1787
1788 ret = intel_get_param(screen, I915_PARAM_SUBSLICE_TOTAL,
1789 &screen->subslice_total);
1790 if (ret < 0 && ret != -EINVAL)
1791 goto err_out;
1792
1793 ret = intel_get_param(screen,
1794 I915_PARAM_EU_TOTAL, &screen->eu_total);
1795 if (ret < 0 && ret != -EINVAL)
1796 goto err_out;
1797
1798 /* Without this information, we cannot get the right Braswell brandstrings,
1799 * and we have to use conservative numbers for GPGPU on many platforms, but
1800 * otherwise, things will just work.
1801 */
1802 if (screen->subslice_total < 1 || screen->eu_total < 1)
1803 _mesa_warning(NULL,
1804 "Kernel 4.1 required to properly query GPU properties.\n");
1805
1806 return;
1807
1808 err_out:
1809 screen->subslice_total = -1;
1810 screen->eu_total = -1;
1811 _mesa_warning(NULL, "Failed to query GPU properties (%s).\n", strerror(-ret));
1812 }
1813
1814 static bool
1815 intel_init_bufmgr(struct intel_screen *screen)
1816 {
1817 __DRIscreen *dri_screen = screen->driScrnPriv;
1818
1819 if (getenv("INTEL_NO_HW") != NULL)
1820 screen->no_hw = true;
1821
1822 screen->bufmgr = brw_bufmgr_init(&screen->devinfo, dri_screen->fd);
1823 if (screen->bufmgr == NULL) {
1824 fprintf(stderr, "[%s:%u] Error initializing buffer manager.\n",
1825 __func__, __LINE__);
1826 return false;
1827 }
1828
1829 if (!intel_get_boolean(screen, I915_PARAM_HAS_EXEC_NO_RELOC)) {
1830 fprintf(stderr, "[%s: %u] Kernel 3.9 required.\n", __func__, __LINE__);
1831 return false;
1832 }
1833
1834 return true;
1835 }
1836
1837 static bool
1838 intel_detect_swizzling(struct intel_screen *screen)
1839 {
1840 uint32_t tiling = I915_TILING_X;
1841 uint32_t swizzle_mode = 0;
1842 struct brw_bo *buffer =
1843 brw_bo_alloc_tiled(screen->bufmgr, "swizzle test", 32768,
1844 BRW_MEMZONE_OTHER, tiling, 512, 0);
1845 if (buffer == NULL)
1846 return false;
1847
1848 brw_bo_get_tiling(buffer, &tiling, &swizzle_mode);
1849 brw_bo_unreference(buffer);
1850
1851 return swizzle_mode != I915_BIT_6_SWIZZLE_NONE;
1852 }
1853
1854 static int
1855 intel_detect_timestamp(struct intel_screen *screen)
1856 {
1857 uint64_t dummy = 0, last = 0;
1858 int upper, lower, loops;
1859
1860 /* On 64bit systems, some old kernels trigger a hw bug resulting in the
1861 * TIMESTAMP register being shifted and the low 32bits always zero.
1862 *
1863 * More recent kernels offer an interface to read the full 36bits
1864 * everywhere.
1865 */
1866 if (brw_reg_read(screen->bufmgr, TIMESTAMP | 1, &dummy) == 0)
1867 return 3;
1868
1869 /* Determine if we have a 32bit or 64bit kernel by inspecting the
1870 * upper 32bits for a rapidly changing timestamp.
1871 */
1872 if (brw_reg_read(screen->bufmgr, TIMESTAMP, &last))
1873 return 0;
1874
1875 upper = lower = 0;
1876 for (loops = 0; loops < 10; loops++) {
1877 /* The TIMESTAMP should change every 80ns, so several round trips
1878 * through the kernel should be enough to advance it.
1879 */
1880 if (brw_reg_read(screen->bufmgr, TIMESTAMP, &dummy))
1881 return 0;
1882
1883 upper += (dummy >> 32) != (last >> 32);
1884 if (upper > 1) /* beware 32bit counter overflow */
1885 return 2; /* upper dword holds the low 32bits of the timestamp */
1886
1887 lower += (dummy & 0xffffffff) != (last & 0xffffffff);
1888 if (lower > 1)
1889 return 1; /* timestamp is unshifted */
1890
1891 last = dummy;
1892 }
1893
1894 /* No advancement? No timestamp! */
1895 return 0;
1896 }
1897
1898 /**
1899 * Test if we can use MI_LOAD_REGISTER_MEM from an untrusted batchbuffer.
1900 *
1901 * Some combinations of hardware and kernel versions allow this feature,
1902 * while others don't. Instead of trying to enumerate every case, just
1903 * try and write a register and see if works.
1904 */
1905 static bool
1906 intel_detect_pipelined_register(struct intel_screen *screen,
1907 int reg, uint32_t expected_value, bool reset)
1908 {
1909 if (screen->no_hw)
1910 return false;
1911
1912 struct brw_bo *results, *bo;
1913 uint32_t *batch;
1914 uint32_t offset = 0;
1915 void *map;
1916 bool success = false;
1917
1918 /* Create a zero'ed temporary buffer for reading our results */
1919 results = brw_bo_alloc(screen->bufmgr, "registers", 4096, BRW_MEMZONE_OTHER);
1920 if (results == NULL)
1921 goto err;
1922
1923 bo = brw_bo_alloc(screen->bufmgr, "batchbuffer", 4096, BRW_MEMZONE_OTHER);
1924 if (bo == NULL)
1925 goto err_results;
1926
1927 map = brw_bo_map(NULL, bo, MAP_WRITE);
1928 if (!map)
1929 goto err_batch;
1930
1931 batch = map;
1932
1933 /* Write the register. */
1934 *batch++ = MI_LOAD_REGISTER_IMM | (3 - 2);
1935 *batch++ = reg;
1936 *batch++ = expected_value;
1937
1938 /* Save the register's value back to the buffer. */
1939 *batch++ = MI_STORE_REGISTER_MEM | (3 - 2);
1940 *batch++ = reg;
1941 struct drm_i915_gem_relocation_entry reloc = {
1942 .offset = (char *) batch - (char *) map,
1943 .delta = offset * sizeof(uint32_t),
1944 .target_handle = results->gem_handle,
1945 .read_domains = I915_GEM_DOMAIN_INSTRUCTION,
1946 .write_domain = I915_GEM_DOMAIN_INSTRUCTION,
1947 };
1948 *batch++ = reloc.presumed_offset + reloc.delta;
1949
1950 /* And afterwards clear the register */
1951 if (reset) {
1952 *batch++ = MI_LOAD_REGISTER_IMM | (3 - 2);
1953 *batch++ = reg;
1954 *batch++ = 0;
1955 }
1956
1957 *batch++ = MI_BATCH_BUFFER_END;
1958
1959 struct drm_i915_gem_exec_object2 exec_objects[2] = {
1960 {
1961 .handle = results->gem_handle,
1962 },
1963 {
1964 .handle = bo->gem_handle,
1965 .relocation_count = 1,
1966 .relocs_ptr = (uintptr_t) &reloc,
1967 }
1968 };
1969
1970 struct drm_i915_gem_execbuffer2 execbuf = {
1971 .buffers_ptr = (uintptr_t) exec_objects,
1972 .buffer_count = 2,
1973 .batch_len = ALIGN((char *) batch - (char *) map, 8),
1974 .flags = I915_EXEC_RENDER,
1975 };
1976
1977 /* Don't bother with error checking - if the execbuf fails, the
1978 * value won't be written and we'll just report that there's no access.
1979 */
1980 __DRIscreen *dri_screen = screen->driScrnPriv;
1981 drmIoctl(dri_screen->fd, DRM_IOCTL_I915_GEM_EXECBUFFER2, &execbuf);
1982
1983 /* Check whether the value got written. */
1984 void *results_map = brw_bo_map(NULL, results, MAP_READ);
1985 if (results_map) {
1986 success = *((uint32_t *)results_map + offset) == expected_value;
1987 brw_bo_unmap(results);
1988 }
1989
1990 err_batch:
1991 brw_bo_unreference(bo);
1992 err_results:
1993 brw_bo_unreference(results);
1994 err:
1995 return success;
1996 }
1997
1998 static bool
1999 intel_detect_pipelined_so(struct intel_screen *screen)
2000 {
2001 const struct gen_device_info *devinfo = &screen->devinfo;
2002
2003 /* Supposedly, Broadwell just works. */
2004 if (devinfo->gen >= 8)
2005 return true;
2006
2007 if (devinfo->gen <= 6)
2008 return false;
2009
2010 /* See the big explanation about command parser versions below */
2011 if (screen->cmd_parser_version >= (devinfo->is_haswell ? 7 : 2))
2012 return true;
2013
2014 /* We use SO_WRITE_OFFSET0 since you're supposed to write it (unlike the
2015 * statistics registers), and we already reset it to zero before using it.
2016 */
2017 return intel_detect_pipelined_register(screen,
2018 GEN7_SO_WRITE_OFFSET(0),
2019 0x1337d0d0,
2020 false);
2021 }
2022
2023 /**
2024 * Return array of MSAA modes supported by the hardware. The array is
2025 * zero-terminated and sorted in decreasing order.
2026 */
2027 const int*
2028 intel_supported_msaa_modes(const struct intel_screen *screen)
2029 {
2030 static const int gen9_modes[] = {16, 8, 4, 2, 0, -1};
2031 static const int gen8_modes[] = {8, 4, 2, 0, -1};
2032 static const int gen7_modes[] = {8, 4, 0, -1};
2033 static const int gen6_modes[] = {4, 0, -1};
2034 static const int gen4_modes[] = {0, -1};
2035
2036 if (screen->devinfo.gen >= 9) {
2037 return gen9_modes;
2038 } else if (screen->devinfo.gen >= 8) {
2039 return gen8_modes;
2040 } else if (screen->devinfo.gen >= 7) {
2041 return gen7_modes;
2042 } else if (screen->devinfo.gen == 6) {
2043 return gen6_modes;
2044 } else {
2045 return gen4_modes;
2046 }
2047 }
2048
2049 static unsigned
2050 intel_loader_get_cap(const __DRIscreen *dri_screen, enum dri_loader_cap cap)
2051 {
2052 if (dri_screen->dri2.loader && dri_screen->dri2.loader->base.version >= 4 &&
2053 dri_screen->dri2.loader->getCapability)
2054 return dri_screen->dri2.loader->getCapability(dri_screen->loaderPrivate, cap);
2055
2056 if (dri_screen->image.loader && dri_screen->image.loader->base.version >= 2 &&
2057 dri_screen->image.loader->getCapability)
2058 return dri_screen->image.loader->getCapability(dri_screen->loaderPrivate, cap);
2059
2060 return 0;
2061 }
2062
2063 static __DRIconfig**
2064 intel_screen_make_configs(__DRIscreen *dri_screen)
2065 {
2066 static const mesa_format formats[] = {
2067 MESA_FORMAT_B5G6R5_UNORM,
2068 MESA_FORMAT_B8G8R8A8_UNORM,
2069 MESA_FORMAT_B8G8R8X8_UNORM,
2070
2071 MESA_FORMAT_B8G8R8A8_SRGB,
2072
2073 /* For 10 bpc, 30 bit depth framebuffers. */
2074 MESA_FORMAT_B10G10R10A2_UNORM,
2075 MESA_FORMAT_B10G10R10X2_UNORM,
2076
2077 /* The 32-bit RGBA format must not precede the 32-bit BGRA format.
2078 * Likewise for RGBX and BGRX. Otherwise, the GLX client and the GLX
2079 * server may disagree on which format the GLXFBConfig represents,
2080 * resulting in swapped color channels.
2081 *
2082 * The problem, as of 2017-05-30:
2083 * When matching a GLXFBConfig to a __DRIconfig, GLX ignores the channel
2084 * order and chooses the first __DRIconfig with the expected channel
2085 * sizes. Specifically, GLX compares the GLXFBConfig's and __DRIconfig's
2086 * __DRI_ATTRIB_{CHANNEL}_SIZE but ignores __DRI_ATTRIB_{CHANNEL}_MASK.
2087 *
2088 * EGL does not suffer from this problem. It correctly compares the
2089 * channel masks when matching EGLConfig to __DRIconfig.
2090 */
2091
2092 /* Required by Android, for HAL_PIXEL_FORMAT_RGBA_8888. */
2093 MESA_FORMAT_R8G8B8A8_UNORM,
2094
2095 /* Required by Android, for HAL_PIXEL_FORMAT_RGBX_8888. */
2096 MESA_FORMAT_R8G8B8X8_UNORM,
2097
2098 MESA_FORMAT_R8G8B8A8_SRGB,
2099 };
2100
2101 /* GLX_SWAP_COPY_OML is not supported due to page flipping. */
2102 static const GLenum back_buffer_modes[] = {
2103 __DRI_ATTRIB_SWAP_UNDEFINED, __DRI_ATTRIB_SWAP_NONE
2104 };
2105
2106 static const uint8_t singlesample_samples[1] = {0};
2107
2108 struct intel_screen *screen = dri_screen->driverPrivate;
2109 const struct gen_device_info *devinfo = &screen->devinfo;
2110 uint8_t depth_bits[4], stencil_bits[4];
2111 __DRIconfig **configs = NULL;
2112
2113 /* Expose only BGRA ordering if the loader doesn't support RGBA ordering. */
2114 unsigned num_formats;
2115 if (intel_loader_get_cap(dri_screen, DRI_LOADER_CAP_RGBA_ORDERING))
2116 num_formats = ARRAY_SIZE(formats);
2117 else
2118 num_formats = ARRAY_SIZE(formats) - 3; /* all - RGBA_ORDERING formats */
2119
2120 /* Shall we expose 10 bpc formats? */
2121 bool allow_rgb10_configs = driQueryOptionb(&screen->optionCache,
2122 "allow_rgb10_configs");
2123
2124 /* Generate singlesample configs without accumulation buffer. */
2125 for (unsigned i = 0; i < num_formats; i++) {
2126 __DRIconfig **new_configs;
2127 int num_depth_stencil_bits = 2;
2128
2129 if (!allow_rgb10_configs &&
2130 (formats[i] == MESA_FORMAT_B10G10R10A2_UNORM ||
2131 formats[i] == MESA_FORMAT_B10G10R10X2_UNORM))
2132 continue;
2133
2134 /* Starting with DRI2 protocol version 1.1 we can request a depth/stencil
2135 * buffer that has a different number of bits per pixel than the color
2136 * buffer, gen >= 6 supports this.
2137 */
2138 depth_bits[0] = 0;
2139 stencil_bits[0] = 0;
2140
2141 if (formats[i] == MESA_FORMAT_B5G6R5_UNORM) {
2142 depth_bits[1] = 16;
2143 stencil_bits[1] = 0;
2144 if (devinfo->gen >= 6) {
2145 depth_bits[2] = 24;
2146 stencil_bits[2] = 8;
2147 num_depth_stencil_bits = 3;
2148 }
2149 } else {
2150 depth_bits[1] = 24;
2151 stencil_bits[1] = 8;
2152 }
2153
2154 new_configs = driCreateConfigs(formats[i],
2155 depth_bits,
2156 stencil_bits,
2157 num_depth_stencil_bits,
2158 back_buffer_modes, 2,
2159 singlesample_samples, 1,
2160 false, false);
2161 configs = driConcatConfigs(configs, new_configs);
2162 }
2163
2164 /* Generate the minimum possible set of configs that include an
2165 * accumulation buffer.
2166 */
2167 for (unsigned i = 0; i < num_formats; i++) {
2168 __DRIconfig **new_configs;
2169
2170 if (!allow_rgb10_configs &&
2171 (formats[i] == MESA_FORMAT_B10G10R10A2_UNORM ||
2172 formats[i] == MESA_FORMAT_B10G10R10X2_UNORM))
2173 continue;
2174
2175 if (formats[i] == MESA_FORMAT_B5G6R5_UNORM) {
2176 depth_bits[0] = 16;
2177 stencil_bits[0] = 0;
2178 } else {
2179 depth_bits[0] = 24;
2180 stencil_bits[0] = 8;
2181 }
2182
2183 new_configs = driCreateConfigs(formats[i],
2184 depth_bits, stencil_bits, 1,
2185 back_buffer_modes, 1,
2186 singlesample_samples, 1,
2187 true, false);
2188 configs = driConcatConfigs(configs, new_configs);
2189 }
2190
2191 /* Generate multisample configs.
2192 *
2193 * This loop breaks early, and hence is a no-op, on gen < 6.
2194 *
2195 * Multisample configs must follow the singlesample configs in order to
2196 * work around an X server bug present in 1.12. The X server chooses to
2197 * associate the first listed RGBA888-Z24S8 config, regardless of its
2198 * sample count, with the 32-bit depth visual used for compositing.
2199 *
2200 * Only doublebuffer configs with GLX_SWAP_UNDEFINED_OML behavior are
2201 * supported. Singlebuffer configs are not supported because no one wants
2202 * them.
2203 */
2204 for (unsigned i = 0; i < num_formats; i++) {
2205 if (devinfo->gen < 6)
2206 break;
2207
2208 if (!allow_rgb10_configs &&
2209 (formats[i] == MESA_FORMAT_B10G10R10A2_UNORM ||
2210 formats[i] == MESA_FORMAT_B10G10R10X2_UNORM))
2211 continue;
2212
2213 __DRIconfig **new_configs;
2214 const int num_depth_stencil_bits = 2;
2215 int num_msaa_modes = 0;
2216 const uint8_t *multisample_samples = NULL;
2217
2218 depth_bits[0] = 0;
2219 stencil_bits[0] = 0;
2220
2221 if (formats[i] == MESA_FORMAT_B5G6R5_UNORM) {
2222 depth_bits[1] = 16;
2223 stencil_bits[1] = 0;
2224 } else {
2225 depth_bits[1] = 24;
2226 stencil_bits[1] = 8;
2227 }
2228
2229 if (devinfo->gen >= 9) {
2230 static const uint8_t multisample_samples_gen9[] = {2, 4, 8, 16};
2231 multisample_samples = multisample_samples_gen9;
2232 num_msaa_modes = ARRAY_SIZE(multisample_samples_gen9);
2233 } else if (devinfo->gen == 8) {
2234 static const uint8_t multisample_samples_gen8[] = {2, 4, 8};
2235 multisample_samples = multisample_samples_gen8;
2236 num_msaa_modes = ARRAY_SIZE(multisample_samples_gen8);
2237 } else if (devinfo->gen == 7) {
2238 static const uint8_t multisample_samples_gen7[] = {4, 8};
2239 multisample_samples = multisample_samples_gen7;
2240 num_msaa_modes = ARRAY_SIZE(multisample_samples_gen7);
2241 } else if (devinfo->gen == 6) {
2242 static const uint8_t multisample_samples_gen6[] = {4};
2243 multisample_samples = multisample_samples_gen6;
2244 num_msaa_modes = ARRAY_SIZE(multisample_samples_gen6);
2245 }
2246
2247 new_configs = driCreateConfigs(formats[i],
2248 depth_bits,
2249 stencil_bits,
2250 num_depth_stencil_bits,
2251 back_buffer_modes, 1,
2252 multisample_samples,
2253 num_msaa_modes,
2254 false, false);
2255 configs = driConcatConfigs(configs, new_configs);
2256 }
2257
2258 if (configs == NULL) {
2259 fprintf(stderr, "[%s:%u] Error creating FBConfig!\n", __func__,
2260 __LINE__);
2261 return NULL;
2262 }
2263
2264 return configs;
2265 }
2266
2267 static void
2268 set_max_gl_versions(struct intel_screen *screen)
2269 {
2270 __DRIscreen *dri_screen = screen->driScrnPriv;
2271 const bool has_astc = screen->devinfo.gen >= 9;
2272
2273 switch (screen->devinfo.gen) {
2274 case 11:
2275 case 10:
2276 case 9:
2277 case 8:
2278 dri_screen->max_gl_core_version = 45;
2279 dri_screen->max_gl_compat_version = 30;
2280 dri_screen->max_gl_es1_version = 11;
2281 dri_screen->max_gl_es2_version = has_astc ? 32 : 31;
2282 break;
2283 case 7:
2284 dri_screen->max_gl_core_version = 33;
2285 if (can_do_pipelined_register_writes(screen)) {
2286 dri_screen->max_gl_core_version = 42;
2287 if (screen->devinfo.is_haswell && can_do_compute_dispatch(screen))
2288 dri_screen->max_gl_core_version = 43;
2289 if (screen->devinfo.is_haswell && can_do_mi_math_and_lrr(screen))
2290 dri_screen->max_gl_core_version = 45;
2291 }
2292 dri_screen->max_gl_compat_version = 30;
2293 dri_screen->max_gl_es1_version = 11;
2294 dri_screen->max_gl_es2_version = screen->devinfo.is_haswell ? 31 : 30;
2295 break;
2296 case 6:
2297 dri_screen->max_gl_core_version = 33;
2298 dri_screen->max_gl_compat_version = 30;
2299 dri_screen->max_gl_es1_version = 11;
2300 dri_screen->max_gl_es2_version = 30;
2301 break;
2302 case 5:
2303 case 4:
2304 dri_screen->max_gl_core_version = 0;
2305 dri_screen->max_gl_compat_version = 21;
2306 dri_screen->max_gl_es1_version = 11;
2307 dri_screen->max_gl_es2_version = 20;
2308 break;
2309 default:
2310 unreachable("unrecognized intel_screen::gen");
2311 }
2312 }
2313
2314 /**
2315 * Return the revision (generally the revid field of the PCI header) of the
2316 * graphics device.
2317 */
2318 int
2319 intel_device_get_revision(int fd)
2320 {
2321 struct drm_i915_getparam gp;
2322 int revision;
2323 int ret;
2324
2325 memset(&gp, 0, sizeof(gp));
2326 gp.param = I915_PARAM_REVISION;
2327 gp.value = &revision;
2328
2329 ret = drmCommandWriteRead(fd, DRM_I915_GETPARAM, &gp, sizeof(gp));
2330 if (ret)
2331 revision = -1;
2332
2333 return revision;
2334 }
2335
2336 static void
2337 shader_debug_log_mesa(void *data, const char *fmt, ...)
2338 {
2339 struct brw_context *brw = (struct brw_context *)data;
2340 va_list args;
2341
2342 va_start(args, fmt);
2343 GLuint msg_id = 0;
2344 _mesa_gl_vdebug(&brw->ctx, &msg_id,
2345 MESA_DEBUG_SOURCE_SHADER_COMPILER,
2346 MESA_DEBUG_TYPE_OTHER,
2347 MESA_DEBUG_SEVERITY_NOTIFICATION, fmt, args);
2348 va_end(args);
2349 }
2350
2351 static void
2352 shader_perf_log_mesa(void *data, const char *fmt, ...)
2353 {
2354 struct brw_context *brw = (struct brw_context *)data;
2355
2356 va_list args;
2357 va_start(args, fmt);
2358
2359 if (unlikely(INTEL_DEBUG & DEBUG_PERF)) {
2360 va_list args_copy;
2361 va_copy(args_copy, args);
2362 vfprintf(stderr, fmt, args_copy);
2363 va_end(args_copy);
2364 }
2365
2366 if (brw->perf_debug) {
2367 GLuint msg_id = 0;
2368 _mesa_gl_vdebug(&brw->ctx, &msg_id,
2369 MESA_DEBUG_SOURCE_SHADER_COMPILER,
2370 MESA_DEBUG_TYPE_PERFORMANCE,
2371 MESA_DEBUG_SEVERITY_MEDIUM, fmt, args);
2372 }
2373 va_end(args);
2374 }
2375
2376 /**
2377 * This is the driver specific part of the createNewScreen entry point.
2378 * Called when using DRI2.
2379 *
2380 * \return the struct gl_config supported by this driver
2381 */
2382 static const
2383 __DRIconfig **intelInitScreen2(__DRIscreen *dri_screen)
2384 {
2385 struct intel_screen *screen;
2386
2387 if (dri_screen->image.loader) {
2388 } else if (dri_screen->dri2.loader->base.version <= 2 ||
2389 dri_screen->dri2.loader->getBuffersWithFormat == NULL) {
2390 fprintf(stderr,
2391 "\nERROR! DRI2 loader with getBuffersWithFormat() "
2392 "support required\n");
2393 return NULL;
2394 }
2395
2396 /* Allocate the private area */
2397 screen = rzalloc(NULL, struct intel_screen);
2398 if (!screen) {
2399 fprintf(stderr, "\nERROR! Allocating private area failed\n");
2400 return NULL;
2401 }
2402 /* parse information in __driConfigOptions */
2403 driOptionCache options;
2404 memset(&options, 0, sizeof(options));
2405
2406 driParseOptionInfo(&options, brw_config_options.xml);
2407 driParseConfigFiles(&screen->optionCache, &options, dri_screen->myNum, "i965");
2408 driDestroyOptionCache(&options);
2409
2410 screen->driScrnPriv = dri_screen;
2411 dri_screen->driverPrivate = (void *) screen;
2412
2413 screen->deviceID = gen_get_pci_device_id_override();
2414 if (screen->deviceID < 0)
2415 screen->deviceID = intel_get_integer(screen, I915_PARAM_CHIPSET_ID);
2416 else
2417 screen->no_hw = true;
2418
2419 if (!gen_get_device_info(screen->deviceID, &screen->devinfo))
2420 return NULL;
2421
2422 if (!intel_init_bufmgr(screen))
2423 return NULL;
2424
2425 const struct gen_device_info *devinfo = &screen->devinfo;
2426
2427 brw_process_intel_debug_variable();
2428
2429 if ((INTEL_DEBUG & DEBUG_SHADER_TIME) && devinfo->gen < 7) {
2430 fprintf(stderr,
2431 "shader_time debugging requires gen7 (Ivybridge) or better.\n");
2432 INTEL_DEBUG &= ~DEBUG_SHADER_TIME;
2433 }
2434
2435 if (intel_get_integer(screen, I915_PARAM_MMAP_GTT_VERSION) >= 1) {
2436 /* Theorectically unlimited! At least for individual objects...
2437 *
2438 * Currently the entire (global) address space for all GTT maps is
2439 * limited to 64bits. That is all objects on the system that are
2440 * setup for GTT mmapping must fit within 64bits. An attempt to use
2441 * one that exceeds the limit with fail in brw_bo_map_gtt().
2442 *
2443 * Long before we hit that limit, we will be practically limited by
2444 * that any single object must fit in physical memory (RAM). The upper
2445 * limit on the CPU's address space is currently 48bits (Skylake), of
2446 * which only 39bits can be physical memory. (The GPU itself also has
2447 * a 48bit addressable virtual space.) We can fit over 32 million
2448 * objects of the current maximum allocable size before running out
2449 * of mmap space.
2450 */
2451 screen->max_gtt_map_object_size = UINT64_MAX;
2452 } else {
2453 /* Estimate the size of the mappable aperture into the GTT. There's an
2454 * ioctl to get the whole GTT size, but not one to get the mappable subset.
2455 * It turns out it's basically always 256MB, though some ancient hardware
2456 * was smaller.
2457 */
2458 uint32_t gtt_size = 256 * 1024 * 1024;
2459
2460 /* We don't want to map two objects such that a memcpy between them would
2461 * just fault one mapping in and then the other over and over forever. So
2462 * we would need to divide the GTT size by 2. Additionally, some GTT is
2463 * taken up by things like the framebuffer and the ringbuffer and such, so
2464 * be more conservative.
2465 */
2466 screen->max_gtt_map_object_size = gtt_size / 4;
2467 }
2468
2469 screen->aperture_threshold = get_aperture_size(dri_screen->fd) * 3 / 4;
2470
2471 screen->hw_has_swizzling = intel_detect_swizzling(screen);
2472 screen->hw_has_timestamp = intel_detect_timestamp(screen);
2473
2474 isl_device_init(&screen->isl_dev, &screen->devinfo,
2475 screen->hw_has_swizzling);
2476
2477 if (devinfo->gen >= 10)
2478 intel_cs_timestamp_frequency(screen);
2479
2480 /* GENs prior to 8 do not support EU/Subslice info */
2481 if (devinfo->gen >= 8) {
2482 intel_detect_sseu(screen);
2483 } else if (devinfo->gen == 7) {
2484 screen->subslice_total = 1 << (devinfo->gt - 1);
2485 }
2486
2487 /* Gen7-7.5 kernel requirements / command parser saga:
2488 *
2489 * - pre-v3.16:
2490 * Haswell and Baytrail cannot use any privileged batchbuffer features.
2491 *
2492 * Ivybridge has aliasing PPGTT on by default, which accidentally marks
2493 * all batches secure, allowing them to use any feature with no checking.
2494 * This is effectively equivalent to a command parser version of
2495 * \infinity - everything is possible.
2496 *
2497 * The command parser does not exist, and querying the version will
2498 * return -EINVAL.
2499 *
2500 * - v3.16:
2501 * The kernel enables the command parser by default, for systems with
2502 * aliasing PPGTT enabled (Ivybridge and Haswell). However, the
2503 * hardware checker is still enabled, so Haswell and Baytrail cannot
2504 * do anything.
2505 *
2506 * Ivybridge goes from "everything is possible" to "only what the
2507 * command parser allows" (if the user boots with i915.cmd_parser=0,
2508 * then everything is possible again). We can only safely use features
2509 * allowed by the supported command parser version.
2510 *
2511 * Annoyingly, I915_PARAM_CMD_PARSER_VERSION reports the static version
2512 * implemented by the kernel, even if it's turned off. So, checking
2513 * for version > 0 does not mean that you can write registers. We have
2514 * to try it and see. The version does, however, indicate the age of
2515 * the kernel.
2516 *
2517 * Instead of matching the hardware checker's behavior of converting
2518 * privileged commands to MI_NOOP, it makes execbuf2 start returning
2519 * -EINVAL, making it dangerous to try and use privileged features.
2520 *
2521 * Effective command parser versions:
2522 * - Haswell: 0 (reporting 1, writes don't work)
2523 * - Baytrail: 0 (reporting 1, writes don't work)
2524 * - Ivybridge: 1 (enabled) or infinite (disabled)
2525 *
2526 * - v3.17:
2527 * Baytrail aliasing PPGTT is enabled, making it like Ivybridge:
2528 * effectively version 1 (enabled) or infinite (disabled).
2529 *
2530 * - v3.19: f1f55cc0556031c8ee3fe99dae7251e78b9b653b
2531 * Command parser v2 supports predicate writes.
2532 *
2533 * - Haswell: 0 (reporting 1, writes don't work)
2534 * - Baytrail: 2 (enabled) or infinite (disabled)
2535 * - Ivybridge: 2 (enabled) or infinite (disabled)
2536 *
2537 * So version >= 2 is enough to know that Ivybridge and Baytrail
2538 * will work. Haswell still can't do anything.
2539 *
2540 * - v4.0: Version 3 happened. Largely not relevant.
2541 *
2542 * - v4.1: 6702cf16e0ba8b0129f5aa1b6609d4e9c70bc13b
2543 * L3 config registers are properly saved and restored as part
2544 * of the hardware context. We can approximately detect this point
2545 * in time by checking if I915_PARAM_REVISION is recognized - it
2546 * landed in a later commit, but in the same release cycle.
2547 *
2548 * - v4.2: 245054a1fe33c06ad233e0d58a27ec7b64db9284
2549 * Command parser finally gains secure batch promotion. On Haswell,
2550 * the hardware checker gets disabled, which finally allows it to do
2551 * privileged commands.
2552 *
2553 * I915_PARAM_CMD_PARSER_VERSION reports 3. Effective versions:
2554 * - Haswell: 3 (enabled) or 0 (disabled)
2555 * - Baytrail: 3 (enabled) or infinite (disabled)
2556 * - Ivybridge: 3 (enabled) or infinite (disabled)
2557 *
2558 * Unfortunately, detecting this point in time is tricky, because
2559 * no version bump happened when this important change occurred.
2560 * On Haswell, if we can write any register, then the kernel is at
2561 * least this new, and we can start trusting the version number.
2562 *
2563 * - v4.4: 2bbe6bbb0dc94fd4ce287bdac9e1bd184e23057b and
2564 * Command parser reaches version 4, allowing access to Haswell
2565 * atomic scratch and chicken3 registers. If version >= 4, we know
2566 * the kernel is new enough to support privileged features on all
2567 * hardware. However, the user might have disabled it...and the
2568 * kernel will still report version 4. So we still have to guess
2569 * and check.
2570 *
2571 * - v4.4: 7b9748cb513a6bef4af87b79f0da3ff7e8b56cd8
2572 * Command parser v5 whitelists indirect compute shader dispatch
2573 * registers, needed for OpenGL 4.3 and later.
2574 *
2575 * - v4.8:
2576 * Command parser v7 lets us use MI_MATH on Haswell.
2577 *
2578 * Additionally, the kernel begins reporting version 0 when
2579 * the command parser is disabled, allowing us to skip the
2580 * guess-and-check step on Haswell. Unfortunately, this also
2581 * means that we can no longer use it as an indicator of the
2582 * age of the kernel.
2583 */
2584 if (intel_get_param(screen, I915_PARAM_CMD_PARSER_VERSION,
2585 &screen->cmd_parser_version) < 0) {
2586 /* Command parser does not exist - getparam is unrecognized */
2587 screen->cmd_parser_version = 0;
2588 }
2589
2590 /* Kernel 4.13 retuired for exec object capture */
2591 if (intel_get_boolean(screen, I915_PARAM_HAS_EXEC_CAPTURE)) {
2592 screen->kernel_features |= KERNEL_ALLOWS_EXEC_CAPTURE;
2593 }
2594
2595 if (intel_get_boolean(screen, I915_PARAM_HAS_EXEC_BATCH_FIRST)) {
2596 screen->kernel_features |= KERNEL_ALLOWS_EXEC_BATCH_FIRST;
2597 }
2598
2599 if (!intel_detect_pipelined_so(screen)) {
2600 /* We can't do anything, so the effective version is 0. */
2601 screen->cmd_parser_version = 0;
2602 } else {
2603 screen->kernel_features |= KERNEL_ALLOWS_SOL_OFFSET_WRITES;
2604 }
2605
2606 if (devinfo->gen >= 8 || screen->cmd_parser_version >= 2)
2607 screen->kernel_features |= KERNEL_ALLOWS_PREDICATE_WRITES;
2608
2609 /* Haswell requires command parser version 4 in order to have L3
2610 * atomic scratch1 and chicken3 bits
2611 */
2612 if (devinfo->is_haswell && screen->cmd_parser_version >= 4) {
2613 screen->kernel_features |=
2614 KERNEL_ALLOWS_HSW_SCRATCH1_AND_ROW_CHICKEN3;
2615 }
2616
2617 /* Haswell requires command parser version 6 in order to write to the
2618 * MI_MATH GPR registers, and version 7 in order to use
2619 * MI_LOAD_REGISTER_REG (which all users of MI_MATH use).
2620 */
2621 if (devinfo->gen >= 8 ||
2622 (devinfo->is_haswell && screen->cmd_parser_version >= 7)) {
2623 screen->kernel_features |= KERNEL_ALLOWS_MI_MATH_AND_LRR;
2624 }
2625
2626 /* Gen7 needs at least command parser version 5 to support compute */
2627 if (devinfo->gen >= 8 || screen->cmd_parser_version >= 5)
2628 screen->kernel_features |= KERNEL_ALLOWS_COMPUTE_DISPATCH;
2629
2630 if (intel_get_boolean(screen, I915_PARAM_HAS_CONTEXT_ISOLATION))
2631 screen->kernel_features |= KERNEL_ALLOWS_CONTEXT_ISOLATION;
2632
2633 const char *force_msaa = getenv("INTEL_FORCE_MSAA");
2634 if (force_msaa) {
2635 screen->winsys_msaa_samples_override =
2636 intel_quantize_num_samples(screen, atoi(force_msaa));
2637 printf("Forcing winsys sample count to %d\n",
2638 screen->winsys_msaa_samples_override);
2639 } else {
2640 screen->winsys_msaa_samples_override = -1;
2641 }
2642
2643 set_max_gl_versions(screen);
2644
2645 /* Notification of GPU resets requires hardware contexts and a kernel new
2646 * enough to support DRM_IOCTL_I915_GET_RESET_STATS. If the ioctl is
2647 * supported, calling it with a context of 0 will either generate EPERM or
2648 * no error. If the ioctl is not supported, it always generate EINVAL.
2649 * Use this to determine whether to advertise the __DRI2_ROBUSTNESS
2650 * extension to the loader.
2651 *
2652 * Don't even try on pre-Gen6, since we don't attempt to use contexts there.
2653 */
2654 if (devinfo->gen >= 6) {
2655 struct drm_i915_reset_stats stats;
2656 memset(&stats, 0, sizeof(stats));
2657
2658 const int ret = drmIoctl(dri_screen->fd, DRM_IOCTL_I915_GET_RESET_STATS, &stats);
2659
2660 screen->has_context_reset_notification =
2661 (ret != -1 || errno != EINVAL);
2662 }
2663
2664 dri_screen->extensions = !screen->has_context_reset_notification
2665 ? screenExtensions : intelRobustScreenExtensions;
2666
2667 screen->compiler = brw_compiler_create(screen, devinfo);
2668 screen->compiler->shader_debug_log = shader_debug_log_mesa;
2669 screen->compiler->shader_perf_log = shader_perf_log_mesa;
2670
2671 /* Changing the meaning of constant buffer pointers from a dynamic state
2672 * offset to an absolute address is only safe if the kernel isolates other
2673 * contexts from our changes.
2674 */
2675 screen->compiler->constant_buffer_0_is_relative = devinfo->gen < 8 ||
2676 !(screen->kernel_features & KERNEL_ALLOWS_CONTEXT_ISOLATION);
2677
2678 screen->compiler->supports_pull_constants = true;
2679
2680 screen->has_exec_fence =
2681 intel_get_boolean(screen, I915_PARAM_HAS_EXEC_FENCE);
2682
2683 intel_screen_init_surface_formats(screen);
2684
2685 if (INTEL_DEBUG & (DEBUG_BATCH | DEBUG_SUBMIT)) {
2686 unsigned int caps = intel_get_integer(screen, I915_PARAM_HAS_SCHEDULER);
2687 if (caps) {
2688 fprintf(stderr, "Kernel scheduler detected: %08x\n", caps);
2689 if (caps & I915_SCHEDULER_CAP_PRIORITY)
2690 fprintf(stderr, " - User priority sorting enabled\n");
2691 if (caps & I915_SCHEDULER_CAP_PREEMPTION)
2692 fprintf(stderr, " - Preemption enabled\n");
2693 }
2694 }
2695
2696 brw_disk_cache_init(screen);
2697
2698 return (const __DRIconfig**) intel_screen_make_configs(dri_screen);
2699 }
2700
2701 struct intel_buffer {
2702 __DRIbuffer base;
2703 struct brw_bo *bo;
2704 };
2705
2706 static __DRIbuffer *
2707 intelAllocateBuffer(__DRIscreen *dri_screen,
2708 unsigned attachment, unsigned format,
2709 int width, int height)
2710 {
2711 struct intel_buffer *intelBuffer;
2712 struct intel_screen *screen = dri_screen->driverPrivate;
2713
2714 assert(attachment == __DRI_BUFFER_FRONT_LEFT ||
2715 attachment == __DRI_BUFFER_BACK_LEFT);
2716
2717 intelBuffer = calloc(1, sizeof *intelBuffer);
2718 if (intelBuffer == NULL)
2719 return NULL;
2720
2721 /* The front and back buffers are color buffers, which are X tiled. GEN9+
2722 * supports Y tiled and compressed buffers, but there is no way to plumb that
2723 * through to here. */
2724 uint32_t pitch;
2725 int cpp = format / 8;
2726 intelBuffer->bo = brw_bo_alloc_tiled_2d(screen->bufmgr,
2727 "intelAllocateBuffer",
2728 width,
2729 height,
2730 cpp,
2731 BRW_MEMZONE_OTHER,
2732 I915_TILING_X, &pitch,
2733 BO_ALLOC_BUSY);
2734
2735 if (intelBuffer->bo == NULL) {
2736 free(intelBuffer);
2737 return NULL;
2738 }
2739
2740 brw_bo_flink(intelBuffer->bo, &intelBuffer->base.name);
2741
2742 intelBuffer->base.attachment = attachment;
2743 intelBuffer->base.cpp = cpp;
2744 intelBuffer->base.pitch = pitch;
2745
2746 return &intelBuffer->base;
2747 }
2748
2749 static void
2750 intelReleaseBuffer(__DRIscreen *dri_screen, __DRIbuffer *buffer)
2751 {
2752 struct intel_buffer *intelBuffer = (struct intel_buffer *) buffer;
2753
2754 brw_bo_unreference(intelBuffer->bo);
2755 free(intelBuffer);
2756 }
2757
2758 static const struct __DriverAPIRec brw_driver_api = {
2759 .InitScreen = intelInitScreen2,
2760 .DestroyScreen = intelDestroyScreen,
2761 .CreateContext = brwCreateContext,
2762 .DestroyContext = intelDestroyContext,
2763 .CreateBuffer = intelCreateBuffer,
2764 .DestroyBuffer = intelDestroyBuffer,
2765 .MakeCurrent = intelMakeCurrent,
2766 .UnbindContext = intelUnbindContext,
2767 .AllocateBuffer = intelAllocateBuffer,
2768 .ReleaseBuffer = intelReleaseBuffer
2769 };
2770
2771 static const struct __DRIDriverVtableExtensionRec brw_vtable = {
2772 .base = { __DRI_DRIVER_VTABLE, 1 },
2773 .vtable = &brw_driver_api,
2774 };
2775
2776 static const __DRIextension *brw_driver_extensions[] = {
2777 &driCoreExtension.base,
2778 &driImageDriverExtension.base,
2779 &driDRI2Extension.base,
2780 &brw_vtable.base,
2781 &brw_config_options.base,
2782 NULL
2783 };
2784
2785 PUBLIC const __DRIextension **__driDriverGetExtensions_i965(void)
2786 {
2787 globalDriverAPI = &brw_driver_api;
2788
2789 return brw_driver_extensions;
2790 }