DRI_CONF_DISABLE_BLEND_FUNC_EXTENDED("false")
DRI_CONF_DUAL_COLOR_BLEND_BY_LOCATION("false")
DRI_CONF_ALLOW_GLSL_EXTENSION_DIRECTIVE_MIDSHADER("false")
+ DRI_CONF_ALLOW_GLSL_BUILTIN_VARIABLE_REDECLARATION("false")
DRI_CONF_ALLOW_HIGHER_COMPAT_VERSION("false")
DRI_CONF_FORCE_GLSL_ABS_SQRT("false")
{ 0, 1, 0, __DRI_IMAGE_FORMAT_ARGB8888, 4 } } }
};
+static const struct {
+ uint32_t tiling;
+ uint64_t modifier;
+ unsigned since_gen;
+ unsigned height_align;
+} tiling_modifier_map[] = {
+ { .tiling = I915_TILING_NONE, .modifier = DRM_FORMAT_MOD_LINEAR,
+ .since_gen = 1, .height_align = 1 },
+ { .tiling = I915_TILING_X, .modifier = I915_FORMAT_MOD_X_TILED,
+ .since_gen = 1, .height_align = 8 },
+ { .tiling = I915_TILING_Y, .modifier = I915_FORMAT_MOD_Y_TILED,
+ .since_gen = 6, .height_align = 32 },
+};
+
+static bool
+modifier_is_supported(uint64_t modifier)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(tiling_modifier_map); i++) {
+ if (tiling_modifier_map[i].modifier == modifier)
+ return true;
+ }
+
+ return false;
+}
+
+static uint32_t
+modifier_to_tiling(uint64_t modifier)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(tiling_modifier_map); i++) {
+ if (tiling_modifier_map[i].modifier == modifier)
+ return tiling_modifier_map[i].tiling;
+ }
+
+ unreachable("modifier_to_tiling should only receive known modifiers");
+}
+
+static uint64_t
+tiling_to_modifier(uint32_t tiling)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(tiling_modifier_map); i++) {
+ if (tiling_modifier_map[i].tiling == tiling)
+ return tiling_modifier_map[i].modifier;
+ }
+
+ unreachable("tiling_to_modifier received unknown tiling mode");
+}
+
+static unsigned
+get_tiled_height(uint64_t modifier, unsigned height)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(tiling_modifier_map); i++) {
+ if (tiling_modifier_map[i].modifier == modifier)
+ return ALIGN(height, tiling_modifier_map[i].height_align);
+ }
+
+ unreachable("get_tiled_height received unknown tiling mode");
+}
+
static void
intel_image_warn_if_unaligned(__DRIimage *image, const char *func)
{
free(image);
return NULL;
}
+ image->modifier = tiling_to_modifier(image->bo->tiling_mode);
return image;
}
image->internal_format = rb->InternalFormat;
image->format = rb->Format;
+ image->modifier = tiling_to_modifier(irb->mt->tiling);
image->offset = 0;
image->data = loaderPrivate;
brw_bo_unreference(image->bo);
image->internal_format = obj->Image[face][level]->InternalFormat;
image->format = obj->Image[face][level]->TexFormat;
+ image->modifier = tiling_to_modifier(iobj->mt->tiling);
image->data = loaderPrivate;
intel_setup_image_from_mipmap_tree(brw, image, iobj->mt, level, zoffset);
image->dri_format = driGLFormatToImageFormat(image->format);
{
__DRIimage *image;
struct intel_screen *screen = dri_screen->driverPrivate;
- /* Historically, X-tiled was the default, and so lack of modifier means
- * X-tiled.
- */
- uint32_t tiling = I915_TILING_X;
+ uint32_t tiling;
+ uint64_t modifier = DRM_FORMAT_MOD_INVALID;
+ unsigned tiled_height;
int cpp;
- unsigned long pitch;
/* Callers of this may specify a modifier, or a dri usage, but not both. The
* newer modifier interface deprecates the older usage flags newer modifier
*/
assert(!(use && count));
- uint64_t modifier = select_best_modifier(&screen->devinfo, modifiers, count);
- switch (modifier) {
- case I915_FORMAT_MOD_X_TILED:
- assert(tiling == I915_TILING_X);
- break;
- case DRM_FORMAT_MOD_LINEAR:
- tiling = I915_TILING_NONE;
- break;
- case I915_FORMAT_MOD_Y_TILED:
- tiling = I915_TILING_Y;
- break;
- case DRM_FORMAT_MOD_INVALID:
- if (modifiers)
- return NULL;
- default:
- break;
- }
-
if (use & __DRI_IMAGE_USE_CURSOR) {
if (width != 64 || height != 64)
return NULL;
- tiling = I915_TILING_NONE;
+ modifier = DRM_FORMAT_MOD_LINEAR;
}
if (use & __DRI_IMAGE_USE_LINEAR)
- tiling = I915_TILING_NONE;
+ modifier = DRM_FORMAT_MOD_LINEAR;
+
+ if (modifier == DRM_FORMAT_MOD_INVALID) {
+ if (modifiers) {
+ /* User requested specific modifiers */
+ modifier = select_best_modifier(&screen->devinfo, modifiers, count);
+ if (modifier == DRM_FORMAT_MOD_INVALID)
+ return NULL;
+ } else {
+ /* Historically, X-tiled was the default, and so lack of modifier means
+ * X-tiled.
+ */
+ modifier = I915_FORMAT_MOD_X_TILED;
+ }
+ }
+ tiling = modifier_to_tiling(modifier);
+ tiled_height = get_tiled_height(modifier, height);
image = intel_allocate_image(screen, format, loaderPrivate);
if (image == NULL)
cpp = _mesa_get_format_bytes(image->format);
image->bo = brw_bo_alloc_tiled(screen->bufmgr, "image",
- width, height, cpp, &tiling,
- &pitch, 0);
+ width, tiled_height, cpp, tiling,
+ &image->pitch, 0);
if (image->bo == NULL) {
free(image);
return NULL;
}
image->width = width;
image->height = height;
- image->pitch = pitch;
image->modifier = modifier;
return image;
image->planar_format = orig_image->planar_format;
image->dri_format = orig_image->dri_format;
image->format = orig_image->format;
+ image->modifier = orig_image->modifier;
image->offset = orig_image->offset;
image->width = orig_image->width;
image->height = orig_image->height;
}
static __DRIimage *
-intel_create_image_from_fds(__DRIscreen *dri_screen,
- int width, int height, int fourcc,
- int *fds, int num_fds, int *strides, int *offsets,
- void *loaderPrivate)
+intel_create_image_from_fds_common(__DRIscreen *dri_screen,
+ int width, int height, int fourcc,
+ uint64_t modifier, int *fds, int num_fds,
+ int *strides, int *offsets,
+ void *loaderPrivate)
{
struct intel_screen *screen = dri_screen->driverPrivate;
struct intel_image_format *f;
__DRIimage *image;
+ unsigned tiled_height;
int i, index;
if (fds == NULL || num_fds < 1)
return NULL;
- /* We only support all planes from the same bo */
- for (i = 0; i < num_fds; i++)
- if (fds[0] != fds[i])
- return NULL;
-
f = intel_image_format_lookup(fourcc);
if (f == NULL)
return NULL;
+ if (modifier != DRM_FORMAT_MOD_INVALID && !modifier_is_supported(modifier))
+ return NULL;
+
if (f->nplanes == 1)
image = intel_allocate_image(screen, f->planes[0].dri_format,
loaderPrivate);
image->pitch = strides[0];
image->planar_format = f;
+
+ image->bo = brw_bo_gem_create_from_prime(screen->bufmgr, fds[0]);
+ if (image->bo == NULL) {
+ free(image);
+ return NULL;
+ }
+
+ /* We only support all planes from the same bo.
+ * brw_bo_gem_create_from_prime() should return the same pointer for all
+ * fds received here */
+ for (i = 1; i < num_fds; i++) {
+ struct brw_bo *aux = brw_bo_gem_create_from_prime(screen->bufmgr, fds[i]);
+ brw_bo_unreference(aux);
+ if (aux != image->bo) {
+ brw_bo_unreference(image->bo);
+ free(image);
+ return NULL;
+ }
+ }
+
+ if (modifier != DRM_FORMAT_MOD_INVALID)
+ image->modifier = modifier;
+ else
+ image->modifier = tiling_to_modifier(image->bo->tiling_mode);
+ tiled_height = get_tiled_height(image->modifier, height);
+
int size = 0;
for (i = 0; i < f->nplanes; i++) {
index = f->planes[i].buffer_index;
image->offsets[index] = offsets[index];
image->strides[index] = strides[index];
- const int plane_height = height >> f->planes[i].height_shift;
+ const int plane_height = tiled_height >> f->planes[i].height_shift;
const int end = offsets[index] + plane_height * strides[index];
if (size < end)
size = end;
}
- image->bo = brw_bo_gem_create_from_prime(screen->bufmgr,
- fds[0], size);
- if (image->bo == NULL) {
+ /* Check that the requested image actually fits within the BO. 'size'
+ * is already relative to the offsets, so we don't need to add that. */
+ if (image->bo->size == 0) {
+ image->bo->size = size;
+ } else if (size > image->bo->size) {
+ brw_bo_unreference(image->bo);
free(image);
return NULL;
}
}
static __DRIimage *
-intel_create_image_from_dma_bufs(__DRIscreen *dri_screen,
- int width, int height, int fourcc,
- int *fds, int num_fds,
- int *strides, int *offsets,
- enum __DRIYUVColorSpace yuv_color_space,
- enum __DRISampleRange sample_range,
- enum __DRIChromaSiting horizontal_siting,
- enum __DRIChromaSiting vertical_siting,
- unsigned *error,
- void *loaderPrivate)
+intel_create_image_from_fds(__DRIscreen *dri_screen,
+ int width, int height, int fourcc,
+ int *fds, int num_fds, int *strides, int *offsets,
+ void *loaderPrivate)
+{
+ return intel_create_image_from_fds_common(dri_screen, width, height, fourcc,
+ DRM_FORMAT_MOD_INVALID,
+ fds, num_fds, strides, offsets,
+ loaderPrivate);
+}
+
+static __DRIimage *
+intel_create_image_from_dma_bufs2(__DRIscreen *dri_screen,
+ int width, int height,
+ int fourcc, uint64_t modifier,
+ int *fds, int num_fds,
+ int *strides, int *offsets,
+ enum __DRIYUVColorSpace yuv_color_space,
+ enum __DRISampleRange sample_range,
+ enum __DRIChromaSiting horizontal_siting,
+ enum __DRIChromaSiting vertical_siting,
+ unsigned *error,
+ void *loaderPrivate)
{
__DRIimage *image;
struct intel_image_format *f = intel_image_format_lookup(fourcc);
return NULL;
}
- image = intel_create_image_from_fds(dri_screen, width, height, fourcc, fds,
- num_fds, strides, offsets,
- loaderPrivate);
+ image = intel_create_image_from_fds_common(dri_screen, width, height,
+ fourcc, modifier,
+ fds, num_fds, strides, offsets,
+ loaderPrivate);
/*
* Invalid parameters and any inconsistencies between are assumed to be
return image;
}
+static __DRIimage *
+intel_create_image_from_dma_bufs(__DRIscreen *dri_screen,
+ int width, int height, int fourcc,
+ int *fds, int num_fds,
+ int *strides, int *offsets,
+ enum __DRIYUVColorSpace yuv_color_space,
+ enum __DRISampleRange sample_range,
+ enum __DRIChromaSiting horizontal_siting,
+ enum __DRIChromaSiting vertical_siting,
+ unsigned *error,
+ void *loaderPrivate)
+{
+ return intel_create_image_from_dma_bufs2(dri_screen, width, height,
+ fourcc, DRM_FORMAT_MOD_INVALID,
+ fds, num_fds, strides, offsets,
+ yuv_color_space,
+ sample_range,
+ horizontal_siting,
+ vertical_siting,
+ error,
+ loaderPrivate);
+}
+
+static GLboolean
+intel_query_dma_buf_formats(__DRIscreen *screen, int max,
+ int *formats, int *count)
+{
+ int i, j = 0;
+
+ if (max == 0) {
+ *count = ARRAY_SIZE(intel_image_formats) - 1; /* not SARGB */
+ return true;
+ }
+
+ for (i = 0; i < (ARRAY_SIZE(intel_image_formats)) && j < max; i++) {
+ if (intel_image_formats[i].fourcc == __DRI_IMAGE_FOURCC_SARGB8888)
+ continue;
+ formats[j++] = intel_image_formats[i].fourcc;
+ }
+
+ *count = j;
+ return true;
+}
+
+static GLboolean
+intel_query_dma_buf_modifiers(__DRIscreen *_screen, int fourcc, int max,
+ uint64_t *modifiers,
+ unsigned int *external_only,
+ int *count)
+{
+ struct intel_screen *screen = _screen->driverPrivate;
+ struct intel_image_format *f;
+ int num_mods = 0, i;
+
+ f = intel_image_format_lookup(fourcc);
+ if (f == NULL)
+ return false;
+
+ for (i = 0; i < ARRAY_SIZE(tiling_modifier_map); i++) {
+ if (screen->devinfo.gen < tiling_modifier_map[i].since_gen)
+ continue;
+
+ num_mods++;
+ if (max == 0)
+ continue;
+
+ modifiers[num_mods - 1] = tiling_modifier_map[i].modifier;
+ if (num_mods >= max)
+ break;
+ }
+
+ if (external_only != NULL) {
+ for (i = 0; i < num_mods && i < max; i++) {
+ if (f->components == __DRI_IMAGE_COMPONENTS_Y_U_V ||
+ f->components == __DRI_IMAGE_COMPONENTS_Y_UV ||
+ f->components == __DRI_IMAGE_COMPONENTS_Y_XUXV) {
+ external_only[i] = GL_TRUE;
+ }
+ else {
+ external_only[i] = GL_FALSE;
+ }
+ }
+ }
+
+ *count = num_mods;
+ return true;
+}
+
static __DRIimage *
intel_from_planar(__DRIimage *parent, int plane, void *loaderPrivate)
{
image->bo = parent->bo;
brw_bo_reference(parent->bo);
+ image->modifier = parent->modifier;
image->width = width;
image->height = height;
}
static const __DRIimageExtension intelImageExtension = {
- .base = { __DRI_IMAGE, 14 },
+ .base = { __DRI_IMAGE, 15 },
.createImageFromName = intel_create_image_from_name,
.createImageFromRenderbuffer = intel_create_image_from_renderbuffer,
.mapImage = NULL,
.unmapImage = NULL,
.createImageWithModifiers = intel_create_image_with_modifiers,
+ .createImageFromDmaBufs2 = intel_create_image_from_dma_bufs2,
+ .queryDmaBufFormats = intel_query_dma_buf_formats,
+ .queryDmaBufModifiers = intel_query_dma_buf_modifiers,
};
static uint64_t
/**
- * This is called when we need to set up GL rendering to a new X window.
+ * Create a gl_framebuffer and attach it to __DRIdrawable::driverPrivate.
+ *
+ *_This implements driDriverAPI::createNewDrawable, which the DRI layer calls
+ * when creating a EGLSurface, GLXDrawable, or GLXPixmap. Despite the name,
+ * this does not allocate GPU memory.
*/
static GLboolean
intelCreateBuffer(__DRIscreen *dri_screen,
mesa_format rgbFormat;
unsigned num_samples =
intel_quantize_num_samples(screen, mesaVis->samples);
- struct gl_framebuffer *fb;
if (isPixmap)
return false;
- fb = CALLOC_STRUCT(gl_framebuffer);
+ struct gl_framebuffer *fb = CALLOC_STRUCT(gl_framebuffer);
if (!fb)
return false;
}
/* setup the hardware-based renderbuffers */
- rb = intel_create_renderbuffer(rgbFormat, num_samples);
- _mesa_add_renderbuffer_without_ref(fb, BUFFER_FRONT_LEFT, &rb->Base.Base);
+ rb = intel_create_winsys_renderbuffer(rgbFormat, num_samples);
+ _mesa_attach_and_own_rb(fb, BUFFER_FRONT_LEFT, &rb->Base.Base);
if (mesaVis->doubleBufferMode) {
- rb = intel_create_renderbuffer(rgbFormat, num_samples);
- _mesa_add_renderbuffer_without_ref(fb, BUFFER_BACK_LEFT, &rb->Base.Base);
+ rb = intel_create_winsys_renderbuffer(rgbFormat, num_samples);
+ _mesa_attach_and_own_rb(fb, BUFFER_BACK_LEFT, &rb->Base.Base);
}
/*
if (screen->devinfo.has_hiz_and_separate_stencil) {
rb = intel_create_private_renderbuffer(MESA_FORMAT_Z24_UNORM_X8_UINT,
num_samples);
- _mesa_add_renderbuffer_without_ref(fb, BUFFER_DEPTH, &rb->Base.Base);
+ _mesa_attach_and_own_rb(fb, BUFFER_DEPTH, &rb->Base.Base);
rb = intel_create_private_renderbuffer(MESA_FORMAT_S_UINT8,
num_samples);
- _mesa_add_renderbuffer_without_ref(fb, BUFFER_STENCIL,
- &rb->Base.Base);
+ _mesa_attach_and_own_rb(fb, BUFFER_STENCIL, &rb->Base.Base);
} else {
/*
* Use combined depth/stencil. Note that the renderbuffer is
*/
rb = intel_create_private_renderbuffer(MESA_FORMAT_Z24_UNORM_S8_UINT,
num_samples);
- _mesa_add_renderbuffer_without_ref(fb, BUFFER_DEPTH, &rb->Base.Base);
- _mesa_add_renderbuffer(fb, BUFFER_STENCIL, &rb->Base.Base);
+ _mesa_attach_and_own_rb(fb, BUFFER_DEPTH, &rb->Base.Base);
+ _mesa_attach_and_reference_rb(fb, BUFFER_STENCIL, &rb->Base.Base);
}
}
else if (mesaVis->depthBits == 16) {
assert(mesaVis->stencilBits == 0);
rb = intel_create_private_renderbuffer(MESA_FORMAT_Z_UNORM16,
num_samples);
- _mesa_add_renderbuffer_without_ref(fb, BUFFER_DEPTH, &rb->Base.Base);
+ _mesa_attach_and_own_rb(fb, BUFFER_DEPTH, &rb->Base.Base);
}
else {
assert(mesaVis->depthBits == 0);
intel_detect_swizzling(struct intel_screen *screen)
{
struct brw_bo *buffer;
- unsigned long flags = 0;
- unsigned long aligned_pitch;
+ unsigned flags = 0;
+ uint32_t aligned_pitch;
uint32_t tiling = I915_TILING_X;
uint32_t swizzle_mode = 0;
buffer = brw_bo_alloc_tiled(screen->bufmgr, "swizzle test",
- 64, 64, 4, &tiling, &aligned_pitch, flags);
+ 64, 64, 4, tiling, &aligned_pitch, flags);
if (buffer == NULL)
return false;
struct brw_bo *results, *bo;
uint32_t *batch;
uint32_t offset = 0;
+ void *map;
bool success = false;
/* Create a zero'ed temporary buffer for reading our results */
if (bo == NULL)
goto err_results;
- if (brw_bo_map(NULL, bo, 1))
+ map = brw_bo_map(NULL, bo, MAP_WRITE);
+ if (!map)
goto err_batch;
- batch = bo->virtual;
+ batch = map;
/* Write the register. */
*batch++ = MI_LOAD_REGISTER_IMM | (3 - 2);
*batch++ = MI_STORE_REGISTER_MEM | (3 - 2);
*batch++ = reg;
struct drm_i915_gem_relocation_entry reloc = {
- .offset = (char *) batch - (char *) bo->virtual,
+ .offset = (char *) batch - (char *) map,
.delta = offset * sizeof(uint32_t),
.target_handle = results->gem_handle,
.read_domains = I915_GEM_DOMAIN_INSTRUCTION,
struct drm_i915_gem_execbuffer2 execbuf = {
.buffers_ptr = (uintptr_t) exec_objects,
.buffer_count = 2,
- .batch_len = ALIGN((char *) batch - (char *) bo->virtual, 8),
+ .batch_len = ALIGN((char *) batch - (char *) map, 8),
.flags = I915_EXEC_RENDER,
};
drmIoctl(dri_screen->fd, DRM_IOCTL_I915_GEM_EXECBUFFER2, &execbuf);
/* Check whether the value got written. */
- if (brw_bo_map(NULL, results, false) == 0) {
- success = *((uint32_t *)results->virtual + offset) == expected_value;
+ void *results_map = brw_bo_map(NULL, results, MAP_READ);
+ if (results_map) {
+ success = *((uint32_t *)results_map + offset) == expected_value;
brw_bo_unmap(results);
}
break;
case 7:
dri_screen->max_gl_core_version = 33;
- if (screen->devinfo.is_haswell &&
- can_do_pipelined_register_writes(screen)) {
+ if (can_do_pipelined_register_writes(screen)) {
dri_screen->max_gl_core_version = 42;
- if (can_do_compute_dispatch(screen))
+ if (screen->devinfo.is_haswell && can_do_compute_dispatch(screen))
dri_screen->max_gl_core_version = 43;
- if (can_do_mi_math_and_lrr(screen))
+ if (screen->devinfo.is_haswell && can_do_mi_math_and_lrr(screen))
dri_screen->max_gl_core_version = 45;
}
dri_screen->max_gl_compat_version = 30;
screen->cmd_parser_version = 0;
}
+ /* Kernel 4.13 retuired for exec object capture */
+#ifndef I915_PARAM_HAS_EXEC_CAPTURE
+#define I915_PARAM_HAS_EXEC_CAPTURE 45
+#endif
+ if (intel_get_boolean(screen, I915_PARAM_HAS_EXEC_CAPTURE)) {
+ screen->kernel_features |= KERNEL_ALLOWS_EXEC_CAPTURE;
+ }
+
if (!intel_detect_pipelined_so(screen)) {
/* We can't do anything, so the effective version is 0. */
screen->cmd_parser_version = 0;
screen->kernel_features |= KERNEL_ALLOWS_SOL_OFFSET_WRITES;
}
+ if (devinfo->gen >= 8 || screen->cmd_parser_version >= 2)
+ screen->kernel_features |= KERNEL_ALLOWS_PREDICATE_WRITES;
+
+ /* Haswell requires command parser version 4 in order to have L3
+ * atomic scratch1 and chicken3 bits
+ */
+ if (devinfo->is_haswell && screen->cmd_parser_version >= 4) {
+ screen->kernel_features |=
+ KERNEL_ALLOWS_HSW_SCRATCH1_AND_ROW_CHICKEN3;
+ }
+
+ /* Haswell requires command parser version 6 in order to write to the
+ * MI_MATH GPR registers, and version 7 in order to use
+ * MI_LOAD_REGISTER_REG (which all users of MI_MATH use).
+ */
+ if (devinfo->gen >= 8 ||
+ (devinfo->is_haswell && screen->cmd_parser_version >= 7)) {
+ screen->kernel_features |= KERNEL_ALLOWS_MI_MATH_AND_LRR;
+ }
+
+ /* Gen7 needs at least command parser version 5 to support compute */
+ if (devinfo->gen >= 8 || screen->cmd_parser_version >= 5)
+ screen->kernel_features |= KERNEL_ALLOWS_COMPUTE_DISPATCH;
+
const char *force_msaa = getenv("INTEL_FORCE_MSAA");
if (force_msaa) {
screen->winsys_msaa_samples_override =
(ret != -1 || errno != EINVAL);
}
- if (devinfo->gen >= 8 || screen->cmd_parser_version >= 2)
- screen->kernel_features |= KERNEL_ALLOWS_PREDICATE_WRITES;
-
- /* Haswell requires command parser version 4 in order to have L3
- * atomic scratch1 and chicken3 bits
- */
- if (devinfo->is_haswell && screen->cmd_parser_version >= 4) {
- screen->kernel_features |=
- KERNEL_ALLOWS_HSW_SCRATCH1_AND_ROW_CHICKEN3;
- }
-
- /* Haswell requires command parser version 6 in order to write to the
- * MI_MATH GPR registers, and version 7 in order to use
- * MI_LOAD_REGISTER_REG (which all users of MI_MATH use).
- */
- if (devinfo->gen >= 8 ||
- (devinfo->is_haswell && screen->cmd_parser_version >= 7)) {
- screen->kernel_features |= KERNEL_ALLOWS_MI_MATH_AND_LRR;
- }
-
- /* Gen7 needs at least command parser version 5 to support compute */
- if (devinfo->gen >= 8 || screen->cmd_parser_version >= 5)
- screen->kernel_features |= KERNEL_ALLOWS_COMPUTE_DISPATCH;
-
dri_screen->extensions = !screen->has_context_reset_notification
? screenExtensions : intelRobustScreenExtensions;
/* The front and back buffers are color buffers, which are X tiled. GEN9+
* supports Y tiled and compressed buffers, but there is no way to plumb that
* through to here. */
- uint32_t tiling = I915_TILING_X;
- unsigned long pitch;
+ uint32_t pitch;
int cpp = format / 8;
intelBuffer->bo = brw_bo_alloc_tiled(screen->bufmgr,
"intelAllocateBuffer",
width,
height,
cpp,
- &tiling, &pitch,
+ I915_TILING_X, &pitch,
BO_ALLOC_FOR_RENDER);
if (intelBuffer->bo == NULL) {