#include "swrast/s_renderbuffer.h"
#include "util/ralloc.h"
#include "brw_defines.h"
+#include "brw_state.h"
#include "compiler/nir/nir.h"
#include "utils.h"
-#include "xmlpool.h"
-
-#ifndef DRM_FORMAT_MOD_INVALID
-#define DRM_FORMAT_MOD_INVALID ((1ULL<<56) - 1)
-#endif
-
-#ifndef DRM_FORMAT_MOD_LINEAR
-#define DRM_FORMAT_MOD_LINEAR 0
-#endif
+#include "util/xmlpool.h"
static const __DRIconfigOptionsExtension brw_config_options = {
.base = { __DRI_CONFIG_OPTIONS, 1 },
.xml =
DRI_CONF_BEGIN
DRI_CONF_SECTION_PERFORMANCE
- DRI_CONF_VBLANK_MODE(DRI_CONF_VBLANK_ALWAYS_SYNC)
/* Options correspond to DRI_CONF_BO_REUSE_DISABLED,
* DRI_CONF_BO_REUSE_ALL
*/
DRI_CONF_ENUM(1, "Enable reuse of all sizes of buffer objects")
DRI_CONF_DESC_END
DRI_CONF_OPT_END
+ DRI_CONF_MESA_NO_ERROR("false")
DRI_CONF_SECTION_END
DRI_CONF_SECTION_QUALITY
DRI_CONF_DISABLE_BLEND_FUNC_EXTENDED("false")
DRI_CONF_DUAL_COLOR_BLEND_BY_LOCATION("false")
DRI_CONF_ALLOW_GLSL_EXTENSION_DIRECTIVE_MIDSHADER("false")
+ DRI_CONF_ALLOW_GLSL_BUILTIN_VARIABLE_REDECLARATION("false")
DRI_CONF_ALLOW_HIGHER_COMPAT_VERSION("false")
DRI_CONF_FORCE_GLSL_ABS_SQRT("false")
.flush_with_flags = intel_dri2_flush_with_flags,
};
-static struct intel_image_format intel_image_formats[] = {
+static const struct intel_image_format intel_image_formats[] = {
{ __DRI_IMAGE_FOURCC_ARGB8888, __DRI_IMAGE_COMPONENTS_RGBA, 1,
{ { 0, 0, 0, __DRI_IMAGE_FORMAT_ARGB8888, 4 } } },
{ { 0, 0, 0, __DRI_IMAGE_FORMAT_R8, 1 },
{ 1, 1, 0, __DRI_IMAGE_FORMAT_GR88, 2 } } },
- /* For YUYV buffers, we set up two overlapping DRI images and treat
- * them as planar buffers in the compositors. Plane 0 is GR88 and
- * samples YU or YV pairs and places Y into the R component, while
- * plane 1 is ARGB and samples YUYV clusters and places pairs and
- * places U into the G component and V into A. This lets the
- * texture sampler interpolate the Y components correctly when
- * sampling from plane 0, and interpolate U and V correctly when
- * sampling from plane 1. */
+ /* For YUYV and UYVY buffers, we set up two overlapping DRI images
+ * and treat them as planar buffers in the compositors.
+ * Plane 0 is GR88 and samples YU or YV pairs and places Y into
+ * the R component, while plane 1 is ARGB/ABGR and samples YUYV/UYVY
+ * clusters and places pairs and places U into the G component and
+ * V into A. This lets the texture sampler interpolate the Y
+ * components correctly when sampling from plane 0, and interpolate
+ * U and V correctly when sampling from plane 1. */
{ __DRI_IMAGE_FOURCC_YUYV, __DRI_IMAGE_COMPONENTS_Y_XUXV, 2,
{ { 0, 0, 0, __DRI_IMAGE_FORMAT_GR88, 2 },
- { 0, 1, 0, __DRI_IMAGE_FORMAT_ARGB8888, 4 } } }
+ { 0, 1, 0, __DRI_IMAGE_FORMAT_ARGB8888, 4 } } },
+ { __DRI_IMAGE_FOURCC_UYVY, __DRI_IMAGE_COMPONENTS_Y_UXVX, 2,
+ { { 0, 0, 0, __DRI_IMAGE_FORMAT_GR88, 2 },
+ { 0, 1, 0, __DRI_IMAGE_FORMAT_ABGR8888, 4 } } }
+};
+
+static const struct {
+ uint64_t modifier;
+ unsigned since_gen;
+} supported_modifiers[] = {
+ { .modifier = DRM_FORMAT_MOD_LINEAR , .since_gen = 1 },
+ { .modifier = I915_FORMAT_MOD_X_TILED , .since_gen = 1 },
+ { .modifier = I915_FORMAT_MOD_Y_TILED , .since_gen = 6 },
+ { .modifier = I915_FORMAT_MOD_Y_TILED_CCS , .since_gen = 9 },
};
+static bool
+modifier_is_supported(const struct gen_device_info *devinfo,
+ const struct intel_image_format *fmt, int dri_format,
+ uint64_t modifier)
+{
+ const struct isl_drm_modifier_info *modinfo =
+ isl_drm_modifier_get_info(modifier);
+ int i;
+
+ /* ISL had better know about the modifier */
+ if (!modinfo)
+ return false;
+
+ if (modinfo->aux_usage == ISL_AUX_USAGE_CCS_E) {
+ /* If INTEL_DEBUG=norbc is set, don't support any CCS_E modifiers */
+ if (unlikely(INTEL_DEBUG & DEBUG_NO_RBC))
+ return false;
+
+ /* CCS_E is not supported for planar images */
+ if (fmt && fmt->nplanes > 1)
+ return false;
+
+ if (fmt) {
+ assert(dri_format == 0);
+ dri_format = fmt->planes[0].dri_format;
+ }
+
+ mesa_format format = driImageFormatToGLFormat(dri_format);
+ format = _mesa_get_srgb_format_linear(format);
+ if (!isl_format_supports_ccs_e(devinfo,
+ brw_isl_format_for_mesa_format(format)))
+ return false;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(supported_modifiers); i++) {
+ if (supported_modifiers[i].modifier != modifier)
+ continue;
+
+ return supported_modifiers[i].since_gen <= devinfo->gen;
+ }
+
+ return false;
+}
+
+static uint64_t
+tiling_to_modifier(uint32_t tiling)
+{
+ static const uint64_t map[] = {
+ [I915_TILING_NONE] = DRM_FORMAT_MOD_LINEAR,
+ [I915_TILING_X] = I915_FORMAT_MOD_X_TILED,
+ [I915_TILING_Y] = I915_FORMAT_MOD_Y_TILED,
+ };
+
+ assert(tiling < ARRAY_SIZE(map));
+
+ return map[tiling];
+}
+
static void
intel_image_warn_if_unaligned(__DRIimage *image, const char *func)
{
uint32_t tiling, swizzle;
- drm_bacon_bo_get_tiling(image->bo, &tiling, &swizzle);
+ brw_bo_get_tiling(image->bo, &tiling, &swizzle);
if (tiling != I915_TILING_NONE && (image->offset & 0xfff)) {
_mesa_warning(NULL, "%s: offset 0x%08x not on tile boundary",
}
}
-static struct intel_image_format *
+static const struct intel_image_format *
intel_image_format_lookup(int fourcc)
{
- struct intel_image_format *f = NULL;
-
for (unsigned i = 0; i < ARRAY_SIZE(intel_image_formats); i++) {
- if (intel_image_formats[i].fourcc == fourcc) {
- f = &intel_image_formats[i];
- break;
- }
+ if (intel_image_formats[i].fourcc == fourcc)
+ return &intel_image_formats[i];
}
- return f;
+ return NULL;
}
static boolean intel_lookup_fourcc(int dri_format, int *fourcc)
intel_miptree_check_level_layer(mt, level, zoffset);
- image->width = minify(mt->physical_width0, level - mt->first_level);
- image->height = minify(mt->physical_height0, level - mt->first_level);
- image->pitch = mt->pitch;
+ image->width = minify(mt->surf.phys_level0_sa.width,
+ level - mt->first_level);
+ image->height = minify(mt->surf.phys_level0_sa.height,
+ level - mt->first_level);
+ image->pitch = mt->surf.row_pitch;
image->offset = intel_miptree_get_tile_offsets(mt, level, zoffset,
&image->tile_x,
&image->tile_y);
- drm_bacon_bo_unreference(image->bo);
+ brw_bo_unreference(image->bo);
image->bo = mt->bo;
- drm_bacon_bo_reference(mt->bo);
+ brw_bo_reference(mt->bo);
}
static __DRIimage *
image->width = width;
image->height = height;
image->pitch = pitch * cpp;
- image->bo = drm_bacon_bo_gem_create_from_name(screen->bufmgr, "image",
+ image->bo = brw_bo_gem_create_from_name(screen->bufmgr, "image",
name);
if (!image->bo) {
free(image);
return NULL;
}
+ image->modifier = tiling_to_modifier(image->bo->tiling_mode);
return image;
}
image->internal_format = rb->InternalFormat;
image->format = rb->Format;
+ image->modifier = tiling_to_modifier(
+ isl_tiling_to_i915_tiling(irb->mt->surf.tiling));
image->offset = 0;
image->data = loaderPrivate;
- drm_bacon_bo_unreference(image->bo);
+ brw_bo_unreference(image->bo);
image->bo = irb->mt->bo;
- drm_bacon_bo_reference(irb->mt->bo);
+ brw_bo_reference(irb->mt->bo);
image->width = rb->Width;
image->height = rb->Height;
- image->pitch = irb->mt->pitch;
+ image->pitch = irb->mt->surf.row_pitch;
image->dri_format = driGLFormatToImageFormat(image->format);
image->has_depthstencil = irb->mt->stencil_mt? true : false;
image->internal_format = obj->Image[face][level]->InternalFormat;
image->format = obj->Image[face][level]->TexFormat;
+ image->modifier = tiling_to_modifier(
+ isl_tiling_to_i915_tiling(iobj->mt->surf.tiling));
image->data = loaderPrivate;
intel_setup_image_from_mipmap_tree(brw, image, iobj->mt, level, zoffset);
image->dri_format = driGLFormatToImageFormat(image->format);
static void
intel_destroy_image(__DRIimage *image)
{
- drm_bacon_bo_unreference(image->bo);
+ brw_bo_unreference(image->bo);
free(image);
}
MODIFIER_PRIORITY_LINEAR,
MODIFIER_PRIORITY_X,
MODIFIER_PRIORITY_Y,
+ MODIFIER_PRIORITY_Y_CCS,
};
const uint64_t priority_to_modifier[] = {
[MODIFIER_PRIORITY_LINEAR] = DRM_FORMAT_MOD_LINEAR,
[MODIFIER_PRIORITY_X] = I915_FORMAT_MOD_X_TILED,
[MODIFIER_PRIORITY_Y] = I915_FORMAT_MOD_Y_TILED,
+ [MODIFIER_PRIORITY_Y_CCS] = I915_FORMAT_MOD_Y_TILED_CCS,
};
static uint64_t
select_best_modifier(struct gen_device_info *devinfo,
+ int dri_format,
const uint64_t *modifiers,
const unsigned count)
{
enum modifier_priority prio = MODIFIER_PRIORITY_INVALID;
for (int i = 0; i < count; i++) {
+ if (!modifier_is_supported(devinfo, NULL, dri_format, modifiers[i]))
+ continue;
+
switch (modifiers[i]) {
+ case I915_FORMAT_MOD_Y_TILED_CCS:
+ prio = MAX2(prio, MODIFIER_PRIORITY_Y_CCS);
+ break;
case I915_FORMAT_MOD_Y_TILED:
prio = MAX2(prio, MODIFIER_PRIORITY_Y);
break;
{
__DRIimage *image;
struct intel_screen *screen = dri_screen->driverPrivate;
- /* Historically, X-tiled was the default, and so lack of modifier means
- * X-tiled.
- */
- uint32_t tiling = I915_TILING_X;
- int cpp;
- unsigned long pitch;
+ uint64_t modifier = DRM_FORMAT_MOD_INVALID;
+ bool ok;
/* Callers of this may specify a modifier, or a dri usage, but not both. The
* newer modifier interface deprecates the older usage flags newer modifier
*/
assert(!(use && count));
- uint64_t modifier = select_best_modifier(&screen->devinfo, modifiers, count);
- switch (modifier) {
- case I915_FORMAT_MOD_X_TILED:
- assert(tiling == I915_TILING_X);
- break;
- case DRM_FORMAT_MOD_LINEAR:
- tiling = I915_TILING_NONE;
- break;
- case I915_FORMAT_MOD_Y_TILED:
- tiling = I915_TILING_Y;
- break;
- case DRM_FORMAT_MOD_INVALID:
- if (modifiers)
- return NULL;
- default:
- break;
- }
-
if (use & __DRI_IMAGE_USE_CURSOR) {
if (width != 64 || height != 64)
return NULL;
- tiling = I915_TILING_NONE;
+ modifier = DRM_FORMAT_MOD_LINEAR;
}
if (use & __DRI_IMAGE_USE_LINEAR)
- tiling = I915_TILING_NONE;
+ modifier = DRM_FORMAT_MOD_LINEAR;
+
+ if (modifier == DRM_FORMAT_MOD_INVALID) {
+ if (modifiers) {
+ /* User requested specific modifiers */
+ modifier = select_best_modifier(&screen->devinfo, format,
+ modifiers, count);
+ if (modifier == DRM_FORMAT_MOD_INVALID)
+ return NULL;
+ } else {
+ /* Historically, X-tiled was the default, and so lack of modifier means
+ * X-tiled.
+ */
+ modifier = I915_FORMAT_MOD_X_TILED;
+ }
+ }
image = intel_allocate_image(screen, format, loaderPrivate);
if (image == NULL)
return NULL;
- cpp = _mesa_get_format_bytes(image->format);
- image->bo = drm_bacon_bo_alloc_tiled(screen->bufmgr, "image",
- width, height, cpp, &tiling,
- &pitch, 0);
+ const struct isl_drm_modifier_info *mod_info =
+ isl_drm_modifier_get_info(modifier);
+
+ struct isl_surf surf;
+ ok = isl_surf_init(&screen->isl_dev, &surf,
+ .dim = ISL_SURF_DIM_2D,
+ .format = brw_isl_format_for_mesa_format(image->format),
+ .width = width,
+ .height = height,
+ .depth = 1,
+ .levels = 1,
+ .array_len = 1,
+ .samples = 1,
+ .usage = ISL_SURF_USAGE_RENDER_TARGET_BIT |
+ ISL_SURF_USAGE_TEXTURE_BIT |
+ ISL_SURF_USAGE_STORAGE_BIT,
+ .tiling_flags = (1 << mod_info->tiling));
+ assert(ok);
+ if (!ok) {
+ free(image);
+ return NULL;
+ }
+
+ struct isl_surf aux_surf;
+ if (mod_info->aux_usage == ISL_AUX_USAGE_CCS_E) {
+ ok = isl_surf_get_ccs_surf(&screen->isl_dev, &surf, &aux_surf, 0);
+ if (!ok) {
+ free(image);
+ return NULL;
+ }
+ } else {
+ assert(mod_info->aux_usage == ISL_AUX_USAGE_NONE);
+ aux_surf.size = 0;
+ }
+
+ /* We request that the bufmgr zero the buffer for us for two reasons:
+ *
+ * 1) If a buffer gets re-used from the pool, we don't want to leak random
+ * garbage from our process to some other.
+ *
+ * 2) For images with CCS_E, we want to ensure that the CCS starts off in
+ * a valid state. A CCS value of 0 indicates that the given block is
+ * in the pass-through state which is what we want.
+ */
+ image->bo = brw_bo_alloc_tiled(screen->bufmgr, "image",
+ surf.size + aux_surf.size,
+ isl_tiling_to_i915_tiling(mod_info->tiling),
+ surf.row_pitch, BO_ALLOC_ZEROED);
if (image->bo == NULL) {
free(image);
return NULL;
}
image->width = width;
image->height = height;
- image->pitch = pitch;
+ image->pitch = surf.row_pitch;
image->modifier = modifier;
+ if (aux_surf.size) {
+ image->aux_offset = surf.size;
+ image->aux_pitch = aux_surf.row_pitch;
+ }
+
return image;
}
*value = image->pitch;
return true;
case __DRI_IMAGE_ATTRIB_HANDLE:
- *value = image->bo->handle;
+ *value = image->bo->gem_handle;
return true;
case __DRI_IMAGE_ATTRIB_NAME:
- return !drm_bacon_bo_flink(image->bo, (uint32_t *) value);
+ return !brw_bo_flink(image->bo, (uint32_t *) value);
case __DRI_IMAGE_ATTRIB_FORMAT:
*value = image->dri_format;
return true;
*value = image->planar_format->components;
return true;
case __DRI_IMAGE_ATTRIB_FD:
- return !drm_bacon_bo_gem_export_to_prime(image->bo, value);
+ return !brw_bo_gem_export_to_prime(image->bo, value);
case __DRI_IMAGE_ATTRIB_FOURCC:
return intel_lookup_fourcc(image->dri_format, value);
case __DRI_IMAGE_ATTRIB_NUM_PLANES:
- *value = 1;
+ *value = isl_drm_modifier_has_aux(image->modifier) ? 2 : 1;
return true;
case __DRI_IMAGE_ATTRIB_OFFSET:
*value = image->offset;
if (image == NULL)
return NULL;
- drm_bacon_bo_reference(orig_image->bo);
+ brw_bo_reference(orig_image->bo);
image->bo = orig_image->bo;
image->internal_format = orig_image->internal_format;
image->planar_format = orig_image->planar_format;
image->dri_format = orig_image->dri_format;
image->format = orig_image->format;
+ image->modifier = orig_image->modifier;
image->offset = orig_image->offset;
image->width = orig_image->width;
image->height = orig_image->height;
int *strides, int *offsets,
void *loaderPrivate)
{
- struct intel_image_format *f = NULL;
+ const struct intel_image_format *f = NULL;
__DRIimage *image;
int i, index;
}
static __DRIimage *
-intel_create_image_from_fds(__DRIscreen *dri_screen,
- int width, int height, int fourcc,
- int *fds, int num_fds, int *strides, int *offsets,
- void *loaderPrivate)
+intel_create_image_from_fds_common(__DRIscreen *dri_screen,
+ int width, int height, int fourcc,
+ uint64_t modifier, int *fds, int num_fds,
+ int *strides, int *offsets,
+ void *loaderPrivate)
{
struct intel_screen *screen = dri_screen->driverPrivate;
- struct intel_image_format *f;
+ const struct intel_image_format *f;
__DRIimage *image;
int i, index;
+ bool ok;
if (fds == NULL || num_fds < 1)
return NULL;
- /* We only support all planes from the same bo */
- for (i = 0; i < num_fds; i++)
- if (fds[0] != fds[i])
- return NULL;
-
f = intel_image_format_lookup(fourcc);
if (f == NULL)
return NULL;
+ if (modifier != DRM_FORMAT_MOD_INVALID &&
+ !modifier_is_supported(&screen->devinfo, f, 0, modifier))
+ return NULL;
+
if (f->nplanes == 1)
image = intel_allocate_image(screen, f->planes[0].dri_format,
loaderPrivate);
image->pitch = strides[0];
image->planar_format = f;
+
+ image->bo = brw_bo_gem_create_from_prime(screen->bufmgr, fds[0]);
+ if (image->bo == NULL) {
+ free(image);
+ return NULL;
+ }
+
+ /* We only support all planes from the same bo.
+ * brw_bo_gem_create_from_prime() should return the same pointer for all
+ * fds received here */
+ for (i = 1; i < num_fds; i++) {
+ struct brw_bo *aux = brw_bo_gem_create_from_prime(screen->bufmgr, fds[i]);
+ brw_bo_unreference(aux);
+ if (aux != image->bo) {
+ brw_bo_unreference(image->bo);
+ free(image);
+ return NULL;
+ }
+ }
+
+ if (modifier != DRM_FORMAT_MOD_INVALID)
+ image->modifier = modifier;
+ else
+ image->modifier = tiling_to_modifier(image->bo->tiling_mode);
+
+ const struct isl_drm_modifier_info *mod_info =
+ isl_drm_modifier_get_info(image->modifier);
+
int size = 0;
+ struct isl_surf surf;
for (i = 0; i < f->nplanes; i++) {
index = f->planes[i].buffer_index;
image->offsets[index] = offsets[index];
image->strides[index] = strides[index];
- const int plane_height = height >> f->planes[i].height_shift;
- const int end = offsets[index] + plane_height * strides[index];
+ mesa_format format = driImageFormatToGLFormat(f->planes[i].dri_format);
+
+ ok = isl_surf_init(&screen->isl_dev, &surf,
+ .dim = ISL_SURF_DIM_2D,
+ .format = brw_isl_format_for_mesa_format(format),
+ .width = image->width >> f->planes[i].width_shift,
+ .height = image->height >> f->planes[i].height_shift,
+ .depth = 1,
+ .levels = 1,
+ .array_len = 1,
+ .samples = 1,
+ .row_pitch = strides[index],
+ .usage = ISL_SURF_USAGE_RENDER_TARGET_BIT |
+ ISL_SURF_USAGE_TEXTURE_BIT |
+ ISL_SURF_USAGE_STORAGE_BIT,
+ .tiling_flags = (1 << mod_info->tiling));
+ if (!ok) {
+ brw_bo_unreference(image->bo);
+ free(image);
+ return NULL;
+ }
+
+ const int end = offsets[index] + surf.size;
if (size < end)
size = end;
}
- image->bo = drm_bacon_bo_gem_create_from_prime(screen->bufmgr,
- fds[0], size);
- if (image->bo == NULL) {
+ if (mod_info->aux_usage == ISL_AUX_USAGE_CCS_E) {
+ /* Even though we initialize surf in the loop above, we know that
+ * anything with CCS_E will have exactly one plane so surf is properly
+ * initialized when we get here.
+ */
+ assert(f->nplanes == 1);
+
+ image->aux_offset = offsets[1];
+ image->aux_pitch = strides[1];
+
+ /* Scanout hardware requires that the CCS be placed after the main
+ * surface in memory. We consider any CCS that is placed any earlier in
+ * memory to be invalid and reject it.
+ *
+ * At some point in the future, this restriction may be relaxed if the
+ * hardware becomes less strict but we may need a new modifier for that.
+ */
+ assert(size > 0);
+ if (image->aux_offset < size) {
+ brw_bo_unreference(image->bo);
+ free(image);
+ return NULL;
+ }
+
+ struct isl_surf aux_surf;
+ ok = isl_surf_get_ccs_surf(&screen->isl_dev, &surf, &aux_surf,
+ image->aux_pitch);
+ if (!ok) {
+ brw_bo_unreference(image->bo);
+ free(image);
+ return NULL;
+ }
+
+ const int end = image->aux_offset + aux_surf.size;
+ if (size < end)
+ size = end;
+ } else {
+ assert(mod_info->aux_usage == ISL_AUX_USAGE_NONE);
+ }
+
+ /* Check that the requested image actually fits within the BO. 'size'
+ * is already relative to the offsets, so we don't need to add that. */
+ if (image->bo->size == 0) {
+ image->bo->size = size;
+ } else if (size > image->bo->size) {
+ brw_bo_unreference(image->bo);
free(image);
return NULL;
}
}
static __DRIimage *
-intel_create_image_from_dma_bufs(__DRIscreen *dri_screen,
- int width, int height, int fourcc,
- int *fds, int num_fds,
- int *strides, int *offsets,
- enum __DRIYUVColorSpace yuv_color_space,
- enum __DRISampleRange sample_range,
- enum __DRIChromaSiting horizontal_siting,
- enum __DRIChromaSiting vertical_siting,
- unsigned *error,
- void *loaderPrivate)
+intel_create_image_from_fds(__DRIscreen *dri_screen,
+ int width, int height, int fourcc,
+ int *fds, int num_fds, int *strides, int *offsets,
+ void *loaderPrivate)
+{
+ return intel_create_image_from_fds_common(dri_screen, width, height, fourcc,
+ DRM_FORMAT_MOD_INVALID,
+ fds, num_fds, strides, offsets,
+ loaderPrivate);
+}
+
+static __DRIimage *
+intel_create_image_from_dma_bufs2(__DRIscreen *dri_screen,
+ int width, int height,
+ int fourcc, uint64_t modifier,
+ int *fds, int num_fds,
+ int *strides, int *offsets,
+ enum __DRIYUVColorSpace yuv_color_space,
+ enum __DRISampleRange sample_range,
+ enum __DRIChromaSiting horizontal_siting,
+ enum __DRIChromaSiting vertical_siting,
+ unsigned *error,
+ void *loaderPrivate)
{
__DRIimage *image;
- struct intel_image_format *f = intel_image_format_lookup(fourcc);
+ const struct intel_image_format *f = intel_image_format_lookup(fourcc);
if (!f) {
*error = __DRI_IMAGE_ERROR_BAD_MATCH;
return NULL;
}
- image = intel_create_image_from_fds(dri_screen, width, height, fourcc, fds,
- num_fds, strides, offsets,
- loaderPrivate);
+ image = intel_create_image_from_fds_common(dri_screen, width, height,
+ fourcc, modifier,
+ fds, num_fds, strides, offsets,
+ loaderPrivate);
/*
* Invalid parameters and any inconsistencies between are assumed to be
}
static __DRIimage *
-intel_from_planar(__DRIimage *parent, int plane, void *loaderPrivate)
+intel_create_image_from_dma_bufs(__DRIscreen *dri_screen,
+ int width, int height, int fourcc,
+ int *fds, int num_fds,
+ int *strides, int *offsets,
+ enum __DRIYUVColorSpace yuv_color_space,
+ enum __DRISampleRange sample_range,
+ enum __DRIChromaSiting horizontal_siting,
+ enum __DRIChromaSiting vertical_siting,
+ unsigned *error,
+ void *loaderPrivate)
{
- int width, height, offset, stride, dri_format, index;
- struct intel_image_format *f;
- __DRIimage *image;
+ return intel_create_image_from_dma_bufs2(dri_screen, width, height,
+ fourcc, DRM_FORMAT_MOD_INVALID,
+ fds, num_fds, strides, offsets,
+ yuv_color_space,
+ sample_range,
+ horizontal_siting,
+ vertical_siting,
+ error,
+ loaderPrivate);
+}
- if (parent == NULL || parent->planar_format == NULL)
- return NULL;
+static GLboolean
+intel_query_dma_buf_formats(__DRIscreen *screen, int max,
+ int *formats, int *count)
+{
+ int i, j = 0;
+
+ if (max == 0) {
+ *count = ARRAY_SIZE(intel_image_formats) - 1; /* not SARGB */
+ return true;
+ }
- f = parent->planar_format;
+ for (i = 0; i < (ARRAY_SIZE(intel_image_formats)) && j < max; i++) {
+ if (intel_image_formats[i].fourcc == __DRI_IMAGE_FOURCC_SARGB8888)
+ continue;
+ formats[j++] = intel_image_formats[i].fourcc;
+ }
- if (plane >= f->nplanes)
- return NULL;
+ *count = j;
+ return true;
+}
+
+static GLboolean
+intel_query_dma_buf_modifiers(__DRIscreen *_screen, int fourcc, int max,
+ uint64_t *modifiers,
+ unsigned int *external_only,
+ int *count)
+{
+ struct intel_screen *screen = _screen->driverPrivate;
+ const struct intel_image_format *f;
+ int num_mods = 0, i;
- width = parent->width >> f->planes[plane].width_shift;
- height = parent->height >> f->planes[plane].height_shift;
- dri_format = f->planes[plane].dri_format;
- index = f->planes[plane].buffer_index;
- offset = parent->offsets[index];
- stride = parent->strides[index];
+ f = intel_image_format_lookup(fourcc);
+ if (f == NULL)
+ return false;
- image = intel_allocate_image(parent->screen, dri_format, loaderPrivate);
- if (image == NULL)
- return NULL;
+ for (i = 0; i < ARRAY_SIZE(supported_modifiers); i++) {
+ uint64_t modifier = supported_modifiers[i].modifier;
+ if (!modifier_is_supported(&screen->devinfo, f, 0, modifier))
+ continue;
- if (offset + height * stride > parent->bo->size) {
- _mesa_warning(NULL, "intel_create_sub_image: subimage out of bounds");
- free(image);
+ num_mods++;
+ if (max == 0)
+ continue;
+
+ modifiers[num_mods - 1] = modifier;
+ if (num_mods >= max)
+ break;
+ }
+
+ if (external_only != NULL) {
+ for (i = 0; i < num_mods && i < max; i++) {
+ if (f->components == __DRI_IMAGE_COMPONENTS_Y_U_V ||
+ f->components == __DRI_IMAGE_COMPONENTS_Y_UV ||
+ f->components == __DRI_IMAGE_COMPONENTS_Y_XUXV) {
+ external_only[i] = GL_TRUE;
+ }
+ else {
+ external_only[i] = GL_FALSE;
+ }
+ }
+ }
+
+ *count = num_mods;
+ return true;
+}
+
+static __DRIimage *
+intel_from_planar(__DRIimage *parent, int plane, void *loaderPrivate)
+{
+ int width, height, offset, stride, dri_format, index;
+ const struct intel_image_format *f;
+ __DRIimage *image;
+
+ if (parent == NULL) {
return NULL;
+ } else if (parent->planar_format == NULL) {
+ const bool is_aux =
+ isl_drm_modifier_has_aux(parent->modifier) && plane == 1;
+ if (!is_aux)
+ return NULL;
+
+ width = parent->width;
+ height = parent->height;
+ dri_format = parent->dri_format;
+ offset = parent->aux_offset;
+ stride = parent->aux_pitch;
+ } else {
+ /* Planar formats don't support aux buffers/images */
+ assert(!isl_drm_modifier_has_aux(parent->modifier));
+ f = parent->planar_format;
+
+ if (plane >= f->nplanes)
+ return NULL;
+
+ width = parent->width >> f->planes[plane].width_shift;
+ height = parent->height >> f->planes[plane].height_shift;
+ dri_format = f->planes[plane].dri_format;
+ index = f->planes[plane].buffer_index;
+ offset = parent->offsets[index];
+ stride = parent->strides[index];
+
+ if (offset + height * stride > parent->bo->size) {
+ _mesa_warning(NULL, "intel_create_sub_image: subimage out of bounds");
+ return NULL;
+ }
}
+ image = intel_allocate_image(parent->screen, dri_format, loaderPrivate);
+ if (image == NULL)
+ return NULL;
+
image->bo = parent->bo;
- drm_bacon_bo_reference(parent->bo);
+ brw_bo_reference(parent->bo);
+ image->modifier = parent->modifier;
image->width = width;
image->height = height;
}
static const __DRIimageExtension intelImageExtension = {
- .base = { __DRI_IMAGE, 14 },
+ .base = { __DRI_IMAGE, 15 },
.createImageFromName = intel_create_image_from_name,
.createImageFromRenderbuffer = intel_create_image_from_renderbuffer,
.mapImage = NULL,
.unmapImage = NULL,
.createImageWithModifiers = intel_create_image_with_modifiers,
+ .createImageFromDmaBufs2 = intel_create_image_from_dma_bufs2,
+ .queryDmaBufFormats = intel_query_dma_buf_formats,
+ .queryDmaBufModifiers = intel_query_dma_buf_modifiers,
};
static uint64_t
&intelImageExtension.base,
&intelRendererQueryExtension.base,
&dri2ConfigQueryExtension.base,
+ &dri2NoErrorExtension.base,
NULL
};
&intelRendererQueryExtension.base,
&dri2ConfigQueryExtension.base,
&dri2Robustness.base,
+ &dri2NoErrorExtension.base,
NULL
};
{
struct intel_screen *screen = sPriv->driverPrivate;
- drm_bacon_bufmgr_destroy(screen->bufmgr);
+ brw_bufmgr_destroy(screen->bufmgr);
driDestroyOptionInfo(&screen->optionCache);
ralloc_free(screen);
/**
- * This is called when we need to set up GL rendering to a new X window.
+ * Create a gl_framebuffer and attach it to __DRIdrawable::driverPrivate.
+ *
+ *_This implements driDriverAPI::createNewDrawable, which the DRI layer calls
+ * when creating a EGLSurface, GLXDrawable, or GLXPixmap. Despite the name,
+ * this does not allocate GPU memory.
*/
static GLboolean
intelCreateBuffer(__DRIscreen *dri_screen,
mesa_format rgbFormat;
unsigned num_samples =
intel_quantize_num_samples(screen, mesaVis->samples);
- struct gl_framebuffer *fb;
if (isPixmap)
return false;
- fb = CALLOC_STRUCT(gl_framebuffer);
+ struct gl_framebuffer *fb = CALLOC_STRUCT(gl_framebuffer);
if (!fb)
return false;
}
/* setup the hardware-based renderbuffers */
- rb = intel_create_renderbuffer(rgbFormat, num_samples);
- _mesa_add_renderbuffer_without_ref(fb, BUFFER_FRONT_LEFT, &rb->Base.Base);
+ rb = intel_create_winsys_renderbuffer(screen, rgbFormat, num_samples);
+ _mesa_attach_and_own_rb(fb, BUFFER_FRONT_LEFT, &rb->Base.Base);
if (mesaVis->doubleBufferMode) {
- rb = intel_create_renderbuffer(rgbFormat, num_samples);
- _mesa_add_renderbuffer_without_ref(fb, BUFFER_BACK_LEFT, &rb->Base.Base);
+ rb = intel_create_winsys_renderbuffer(screen, rgbFormat, num_samples);
+ _mesa_attach_and_own_rb(fb, BUFFER_BACK_LEFT, &rb->Base.Base);
}
/*
assert(mesaVis->stencilBits == 8);
if (screen->devinfo.has_hiz_and_separate_stencil) {
- rb = intel_create_private_renderbuffer(MESA_FORMAT_Z24_UNORM_X8_UINT,
+ rb = intel_create_private_renderbuffer(screen,
+ MESA_FORMAT_Z24_UNORM_X8_UINT,
num_samples);
- _mesa_add_renderbuffer_without_ref(fb, BUFFER_DEPTH, &rb->Base.Base);
- rb = intel_create_private_renderbuffer(MESA_FORMAT_S_UINT8,
+ _mesa_attach_and_own_rb(fb, BUFFER_DEPTH, &rb->Base.Base);
+ rb = intel_create_private_renderbuffer(screen, MESA_FORMAT_S_UINT8,
num_samples);
- _mesa_add_renderbuffer_without_ref(fb, BUFFER_STENCIL,
- &rb->Base.Base);
+ _mesa_attach_and_own_rb(fb, BUFFER_STENCIL, &rb->Base.Base);
} else {
/*
* Use combined depth/stencil. Note that the renderbuffer is
* attached to two attachment points.
*/
- rb = intel_create_private_renderbuffer(MESA_FORMAT_Z24_UNORM_S8_UINT,
+ rb = intel_create_private_renderbuffer(screen,
+ MESA_FORMAT_Z24_UNORM_S8_UINT,
num_samples);
- _mesa_add_renderbuffer_without_ref(fb, BUFFER_DEPTH, &rb->Base.Base);
- _mesa_add_renderbuffer(fb, BUFFER_STENCIL, &rb->Base.Base);
+ _mesa_attach_and_own_rb(fb, BUFFER_DEPTH, &rb->Base.Base);
+ _mesa_attach_and_reference_rb(fb, BUFFER_STENCIL, &rb->Base.Base);
}
}
else if (mesaVis->depthBits == 16) {
assert(mesaVis->stencilBits == 0);
- rb = intel_create_private_renderbuffer(MESA_FORMAT_Z_UNORM16,
+ rb = intel_create_private_renderbuffer(screen, MESA_FORMAT_Z_UNORM16,
num_samples);
- _mesa_add_renderbuffer_without_ref(fb, BUFFER_DEPTH, &rb->Base.Base);
+ _mesa_attach_and_own_rb(fb, BUFFER_DEPTH, &rb->Base.Base);
}
else {
assert(mesaVis->depthBits == 0);
if (getenv("INTEL_NO_HW") != NULL)
screen->no_hw = true;
- screen->bufmgr = drm_bacon_bufmgr_gem_init(&screen->devinfo,
- dri_screen->fd, BATCH_SZ);
+ screen->bufmgr = brw_bufmgr_init(&screen->devinfo, dri_screen->fd, BATCH_SZ);
if (screen->bufmgr == NULL) {
fprintf(stderr, "[%s:%u] Error initializing buffer manager.\n",
__func__, __LINE__);
static bool
intel_detect_swizzling(struct intel_screen *screen)
{
- drm_bacon_bo *buffer;
- unsigned long flags = 0;
- unsigned long aligned_pitch;
+ struct brw_bo *buffer;
+ unsigned flags = 0;
+ uint32_t aligned_pitch;
uint32_t tiling = I915_TILING_X;
uint32_t swizzle_mode = 0;
- buffer = drm_bacon_bo_alloc_tiled(screen->bufmgr, "swizzle test",
- 64, 64, 4,
- &tiling, &aligned_pitch, flags);
+ buffer = brw_bo_alloc_tiled_2d(screen->bufmgr, "swizzle test",
+ 64, 64, 4, tiling, &aligned_pitch, flags);
if (buffer == NULL)
return false;
- drm_bacon_bo_get_tiling(buffer, &tiling, &swizzle_mode);
- drm_bacon_bo_unreference(buffer);
+ brw_bo_get_tiling(buffer, &tiling, &swizzle_mode);
+ brw_bo_unreference(buffer);
if (swizzle_mode == I915_BIT_6_SWIZZLE_NONE)
return false;
* More recent kernels offer an interface to read the full 36bits
* everywhere.
*/
- if (drm_bacon_reg_read(screen->bufmgr, TIMESTAMP | 1, &dummy) == 0)
+ if (brw_reg_read(screen->bufmgr, TIMESTAMP | 1, &dummy) == 0)
return 3;
/* Determine if we have a 32bit or 64bit kernel by inspecting the
* upper 32bits for a rapidly changing timestamp.
*/
- if (drm_bacon_reg_read(screen->bufmgr, TIMESTAMP, &last))
+ if (brw_reg_read(screen->bufmgr, TIMESTAMP, &last))
return 0;
upper = lower = 0;
/* The TIMESTAMP should change every 80ns, so several round trips
* through the kernel should be enough to advance it.
*/
- if (drm_bacon_reg_read(screen->bufmgr, TIMESTAMP, &dummy))
+ if (brw_reg_read(screen->bufmgr, TIMESTAMP, &dummy))
return 0;
upper += (dummy >> 32) != (last >> 32);
intel_detect_pipelined_register(struct intel_screen *screen,
int reg, uint32_t expected_value, bool reset)
{
- drm_bacon_bo *results, *bo;
+ if (screen->no_hw)
+ return false;
+
+ struct brw_bo *results, *bo;
uint32_t *batch;
uint32_t offset = 0;
+ void *map;
bool success = false;
/* Create a zero'ed temporary buffer for reading our results */
- results = drm_bacon_bo_alloc(screen->bufmgr, "registers", 4096, 0);
+ results = brw_bo_alloc(screen->bufmgr, "registers", 4096, 0);
if (results == NULL)
goto err;
- bo = drm_bacon_bo_alloc(screen->bufmgr, "batchbuffer", 4096, 0);
+ bo = brw_bo_alloc(screen->bufmgr, "batchbuffer", 4096, 0);
if (bo == NULL)
goto err_results;
- if (drm_bacon_bo_map(bo, 1))
+ map = brw_bo_map(NULL, bo, MAP_WRITE);
+ if (!map)
goto err_batch;
- batch = bo->virtual;
+ batch = map;
/* Write the register. */
*batch++ = MI_LOAD_REGISTER_IMM | (3 - 2);
/* Save the register's value back to the buffer. */
*batch++ = MI_STORE_REGISTER_MEM | (3 - 2);
*batch++ = reg;
- drm_bacon_bo_emit_reloc(bo, (char *)batch -(char *)bo->virtual,
- results, offset*sizeof(uint32_t),
- I915_GEM_DOMAIN_INSTRUCTION,
- I915_GEM_DOMAIN_INSTRUCTION);
- *batch++ = ((uint32_t) results->offset64) + offset*sizeof(uint32_t);
+ struct drm_i915_gem_relocation_entry reloc = {
+ .offset = (char *) batch - (char *) map,
+ .delta = offset * sizeof(uint32_t),
+ .target_handle = results->gem_handle,
+ .read_domains = I915_GEM_DOMAIN_INSTRUCTION,
+ .write_domain = I915_GEM_DOMAIN_INSTRUCTION,
+ };
+ *batch++ = reloc.presumed_offset + reloc.delta;
/* And afterwards clear the register */
if (reset) {
*batch++ = MI_BATCH_BUFFER_END;
- drm_bacon_bo_mrb_exec(bo, ALIGN((char *)batch - (char *)bo->virtual, 8),
- I915_EXEC_RENDER);
+ struct drm_i915_gem_exec_object2 exec_objects[2] = {
+ {
+ .handle = results->gem_handle,
+ },
+ {
+ .handle = bo->gem_handle,
+ .relocation_count = 1,
+ .relocs_ptr = (uintptr_t) &reloc,
+ }
+ };
+
+ struct drm_i915_gem_execbuffer2 execbuf = {
+ .buffers_ptr = (uintptr_t) exec_objects,
+ .buffer_count = 2,
+ .batch_len = ALIGN((char *) batch - (char *) map, 8),
+ .flags = I915_EXEC_RENDER,
+ };
+
+ /* Don't bother with error checking - if the execbuf fails, the
+ * value won't be written and we'll just report that there's no access.
+ */
+ __DRIscreen *dri_screen = screen->driScrnPriv;
+ drmIoctl(dri_screen->fd, DRM_IOCTL_I915_GEM_EXECBUFFER2, &execbuf);
/* Check whether the value got written. */
- if (drm_bacon_bo_map(results, false) == 0) {
- success = *((uint32_t *)results->virtual + offset) == expected_value;
- drm_bacon_bo_unmap(results);
+ void *results_map = brw_bo_map(NULL, results, MAP_READ);
+ if (results_map) {
+ success = *((uint32_t *)results_map + offset) == expected_value;
+ brw_bo_unmap(results);
}
err_batch:
- drm_bacon_bo_unreference(bo);
+ brw_bo_unreference(bo);
err_results:
- drm_bacon_bo_unreference(results);
+ brw_bo_unreference(results);
err:
return success;
}
static const mesa_format formats[] = {
MESA_FORMAT_B5G6R5_UNORM,
MESA_FORMAT_B8G8R8A8_UNORM,
- MESA_FORMAT_B8G8R8X8_UNORM
+ MESA_FORMAT_B8G8R8X8_UNORM,
+
+ /* The 32-bit RGBA format must not precede the 32-bit BGRA format.
+ * Likewise for RGBX and BGRX. Otherwise, the GLX client and the GLX
+ * server may disagree on which format the GLXFBConfig represents,
+ * resulting in swapped color channels.
+ *
+ * The problem, as of 2017-05-30:
+ * When matching a GLXFBConfig to a __DRIconfig, GLX ignores the channel
+ * order and chooses the first __DRIconfig with the expected channel
+ * sizes. Specifically, GLX compares the GLXFBConfig's and __DRIconfig's
+ * __DRI_ATTRIB_{CHANNEL}_SIZE but ignores __DRI_ATTRIB_{CHANNEL}_MASK.
+ *
+ * EGL does not suffer from this problem. It correctly compares the
+ * channel masks when matching EGLConfig to __DRIconfig.
+ */
+
+ /* Required by Android, for HAL_PIXEL_FORMAT_RGBA_8888. */
+ MESA_FORMAT_R8G8B8A8_UNORM,
+
+ /* Required by Android, for HAL_PIXEL_FORMAT_RGBX_8888. */
+ MESA_FORMAT_R8G8B8X8_UNORM,
};
/* GLX_SWAP_COPY_OML is not supported due to page flipping. */
static const GLenum back_buffer_modes[] = {
- GLX_SWAP_UNDEFINED_OML, GLX_NONE,
+ __DRI_ATTRIB_SWAP_UNDEFINED, __DRI_ATTRIB_SWAP_NONE
};
static const uint8_t singlesample_samples[1] = {0};
- static const uint8_t multisample_samples[2] = {4, 8};
struct intel_screen *screen = dri_screen->driverPrivate;
const struct gen_device_info *devinfo = &screen->devinfo;
__DRIconfig **new_configs;
const int num_depth_stencil_bits = 2;
int num_msaa_modes = 0;
+ const uint8_t *multisample_samples = NULL;
depth_bits[0] = 0;
stencil_bits[0] = 0;
stencil_bits[1] = 8;
}
- if (devinfo->gen >= 7)
- num_msaa_modes = 2;
- else if (devinfo->gen == 6)
- num_msaa_modes = 1;
+ if (devinfo->gen >= 9) {
+ static const uint8_t multisample_samples_gen9[] = {2, 4, 8, 16};
+ multisample_samples = multisample_samples_gen9;
+ num_msaa_modes = ARRAY_SIZE(multisample_samples_gen9);
+ } else if (devinfo->gen == 8) {
+ static const uint8_t multisample_samples_gen8[] = {2, 4, 8};
+ multisample_samples = multisample_samples_gen8;
+ num_msaa_modes = ARRAY_SIZE(multisample_samples_gen8);
+ } else if (devinfo->gen == 7) {
+ static const uint8_t multisample_samples_gen7[] = {4, 8};
+ multisample_samples = multisample_samples_gen7;
+ num_msaa_modes = ARRAY_SIZE(multisample_samples_gen7);
+ } else if (devinfo->gen == 6) {
+ static const uint8_t multisample_samples_gen6[] = {4};
+ multisample_samples = multisample_samples_gen6;
+ num_msaa_modes = ARRAY_SIZE(multisample_samples_gen6);
+ }
new_configs = driCreateConfigs(formats[i],
depth_bits,
const bool has_astc = screen->devinfo.gen >= 9;
switch (screen->devinfo.gen) {
+ case 10:
case 9:
case 8:
dri_screen->max_gl_core_version = 45;
break;
case 7:
dri_screen->max_gl_core_version = 33;
- if (screen->devinfo.is_haswell &&
- can_do_pipelined_register_writes(screen)) {
+ if (can_do_pipelined_register_writes(screen)) {
dri_screen->max_gl_core_version = 42;
- if (can_do_compute_dispatch(screen))
+ if (screen->devinfo.is_haswell && can_do_compute_dispatch(screen))
dri_screen->max_gl_core_version = 43;
- if (can_do_mi_math_and_lrr(screen))
+ if (screen->devinfo.is_haswell && can_do_mi_math_and_lrr(screen))
dri_screen->max_gl_core_version = 45;
}
dri_screen->max_gl_compat_version = 30;
{ "bdw", 0x162e },
{ "skl", 0x1912 },
{ "kbl", 0x5912 },
+ { "cnl", 0x5a52 },
};
for (unsigned i = 0; i < ARRAY_SIZE(name_map); i++) {
return name_map[i].pci_id;
}
- return strtod(devid_override, NULL);
+ return strtol(devid_override, NULL, 0);
}
/**
* Currently the entire (global) address space for all GTT maps is
* limited to 64bits. That is all objects on the system that are
* setup for GTT mmapping must fit within 64bits. An attempt to use
- * one that exceeds the limit with fail in drm_bacon_bo_map_gtt().
+ * one that exceeds the limit with fail in brw_bo_map_gtt().
*
* Long before we hit that limit, we will be practically limited by
* that any single object must fit in physical memory (RAM). The upper
screen->hw_has_swizzling = intel_detect_swizzling(screen);
screen->hw_has_timestamp = intel_detect_timestamp(screen);
+ isl_device_init(&screen->isl_dev, &screen->devinfo,
+ screen->hw_has_swizzling);
+
/* GENs prior to 8 do not support EU/Subslice info */
if (devinfo->gen >= 8) {
intel_detect_sseu(screen);
screen->cmd_parser_version = 0;
}
+ /* Kernel 4.13 retuired for exec object capture */
+ if (intel_get_boolean(screen, I915_PARAM_HAS_EXEC_CAPTURE)) {
+ screen->kernel_features |= KERNEL_ALLOWS_EXEC_CAPTURE;
+ }
+
+ if (intel_get_boolean(screen, I915_PARAM_HAS_EXEC_BATCH_FIRST)) {
+ screen->kernel_features |= KERNEL_ALLOWS_EXEC_BATCH_FIRST;
+ }
+
if (!intel_detect_pipelined_so(screen)) {
/* We can't do anything, so the effective version is 0. */
screen->cmd_parser_version = 0;
screen->kernel_features |= KERNEL_ALLOWS_SOL_OFFSET_WRITES;
}
+ if (devinfo->gen >= 8 || screen->cmd_parser_version >= 2)
+ screen->kernel_features |= KERNEL_ALLOWS_PREDICATE_WRITES;
+
+ /* Haswell requires command parser version 4 in order to have L3
+ * atomic scratch1 and chicken3 bits
+ */
+ if (devinfo->is_haswell && screen->cmd_parser_version >= 4) {
+ screen->kernel_features |=
+ KERNEL_ALLOWS_HSW_SCRATCH1_AND_ROW_CHICKEN3;
+ }
+
+ /* Haswell requires command parser version 6 in order to write to the
+ * MI_MATH GPR registers, and version 7 in order to use
+ * MI_LOAD_REGISTER_REG (which all users of MI_MATH use).
+ */
+ if (devinfo->gen >= 8 ||
+ (devinfo->is_haswell && screen->cmd_parser_version >= 7)) {
+ screen->kernel_features |= KERNEL_ALLOWS_MI_MATH_AND_LRR;
+ }
+
+ /* Gen7 needs at least command parser version 5 to support compute */
+ if (devinfo->gen >= 8 || screen->cmd_parser_version >= 5)
+ screen->kernel_features |= KERNEL_ALLOWS_COMPUTE_DISPATCH;
+
const char *force_msaa = getenv("INTEL_FORCE_MSAA");
if (force_msaa) {
screen->winsys_msaa_samples_override =
(ret != -1 || errno != EINVAL);
}
- if (devinfo->gen >= 8 || screen->cmd_parser_version >= 2)
- screen->kernel_features |= KERNEL_ALLOWS_PREDICATE_WRITES;
-
- /* Haswell requires command parser version 4 in order to have L3
- * atomic scratch1 and chicken3 bits
- */
- if (devinfo->is_haswell && screen->cmd_parser_version >= 4) {
- screen->kernel_features |=
- KERNEL_ALLOWS_HSW_SCRATCH1_AND_ROW_CHICKEN3;
- }
-
- /* Haswell requires command parser version 6 in order to write to the
- * MI_MATH GPR registers, and version 7 in order to use
- * MI_LOAD_REGISTER_REG (which all users of MI_MATH use).
- */
- if (devinfo->gen >= 8 ||
- (devinfo->is_haswell && screen->cmd_parser_version >= 7)) {
- screen->kernel_features |= KERNEL_ALLOWS_MI_MATH_AND_LRR;
- }
-
- /* Gen7 needs at least command parser version 5 to support compute */
- if (devinfo->gen >= 8 || screen->cmd_parser_version >= 5)
- screen->kernel_features |= KERNEL_ALLOWS_COMPUTE_DISPATCH;
-
dri_screen->extensions = !screen->has_context_reset_notification
? screenExtensions : intelRobustScreenExtensions;
screen->compiler = brw_compiler_create(screen, devinfo);
screen->compiler->shader_debug_log = shader_debug_log_mesa;
screen->compiler->shader_perf_log = shader_perf_log_mesa;
+ screen->compiler->constant_buffer_0_is_relative = devinfo->gen < 8;
screen->program_id = 1;
screen->has_exec_fence =
intel_get_boolean(screen, I915_PARAM_HAS_EXEC_FENCE);
+ intel_screen_init_surface_formats(screen);
+
return (const __DRIconfig**) intel_screen_make_configs(dri_screen);
}
struct intel_buffer {
__DRIbuffer base;
- drm_bacon_bo *bo;
+ struct brw_bo *bo;
};
static __DRIbuffer *
/* The front and back buffers are color buffers, which are X tiled. GEN9+
* supports Y tiled and compressed buffers, but there is no way to plumb that
* through to here. */
- uint32_t tiling = I915_TILING_X;
- unsigned long pitch;
+ uint32_t pitch;
int cpp = format / 8;
- intelBuffer->bo = drm_bacon_bo_alloc_tiled(screen->bufmgr,
- "intelAllocateBuffer",
- width,
- height,
- cpp,
- &tiling, &pitch,
- BO_ALLOC_FOR_RENDER);
+ intelBuffer->bo = brw_bo_alloc_tiled_2d(screen->bufmgr,
+ "intelAllocateBuffer",
+ width,
+ height,
+ cpp,
+ I915_TILING_X, &pitch,
+ BO_ALLOC_BUSY);
if (intelBuffer->bo == NULL) {
free(intelBuffer);
return NULL;
}
- drm_bacon_bo_flink(intelBuffer->bo, &intelBuffer->base.name);
+ brw_bo_flink(intelBuffer->bo, &intelBuffer->base.name);
intelBuffer->base.attachment = attachment;
intelBuffer->base.cpp = cpp;
{
struct intel_buffer *intelBuffer = (struct intel_buffer *) buffer;
- drm_bacon_bo_unreference(intelBuffer->bo);
+ brw_bo_unreference(intelBuffer->bo);
free(intelBuffer);
}