DRI_CONF_DISABLE_BLEND_FUNC_EXTENDED("false")
DRI_CONF_DUAL_COLOR_BLEND_BY_LOCATION("false")
DRI_CONF_ALLOW_GLSL_EXTENSION_DIRECTIVE_MIDSHADER("false")
+ DRI_CONF_ALLOW_GLSL_BUILTIN_VARIABLE_REDECLARATION("false")
DRI_CONF_ALLOW_HIGHER_COMPAT_VERSION("false")
DRI_CONF_FORCE_GLSL_ABS_SQRT("false")
{ 0, 1, 0, __DRI_IMAGE_FORMAT_ARGB8888, 4 } } }
};
+static const struct {
+ uint32_t tiling;
+ uint64_t modifier;
+ unsigned height_align;
+} tiling_modifier_map[] = {
+ { .tiling = I915_TILING_NONE, .modifier = DRM_FORMAT_MOD_LINEAR,
+ .height_align = 1 },
+ { .tiling = I915_TILING_X, .modifier = I915_FORMAT_MOD_X_TILED,
+ .height_align = 8 },
+ { .tiling = I915_TILING_Y, .modifier = I915_FORMAT_MOD_Y_TILED,
+ .height_align = 32 },
+};
+
+static uint32_t
+modifier_to_tiling(uint64_t modifier)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(tiling_modifier_map); i++) {
+ if (tiling_modifier_map[i].modifier == modifier)
+ return tiling_modifier_map[i].tiling;
+ }
+
+ unreachable("modifier_to_tiling should only receive known modifiers");
+}
+
+static uint64_t
+tiling_to_modifier(uint32_t tiling)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(tiling_modifier_map); i++) {
+ if (tiling_modifier_map[i].tiling == tiling)
+ return tiling_modifier_map[i].modifier;
+ }
+
+ unreachable("tiling_to_modifier received unknown tiling mode");
+}
+
+static unsigned
+get_tiled_height(uint64_t modifier, unsigned height)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(tiling_modifier_map); i++) {
+ if (tiling_modifier_map[i].modifier == modifier)
+ return ALIGN(height, tiling_modifier_map[i].height_align);
+ }
+
+ unreachable("get_tiled_height received unknown tiling mode");
+}
+
static void
intel_image_warn_if_unaligned(__DRIimage *image, const char *func)
{
uint32_t tiling, swizzle;
- drm_bacon_bo_get_tiling(image->bo, &tiling, &swizzle);
+ brw_bo_get_tiling(image->bo, &tiling, &swizzle);
if (tiling != I915_TILING_NONE && (image->offset & 0xfff)) {
_mesa_warning(NULL, "%s: offset 0x%08x not on tile boundary",
&image->tile_x,
&image->tile_y);
- drm_bacon_bo_unreference(image->bo);
+ brw_bo_unreference(image->bo);
image->bo = mt->bo;
- drm_bacon_bo_reference(mt->bo);
+ brw_bo_reference(mt->bo);
}
static __DRIimage *
image->width = width;
image->height = height;
image->pitch = pitch * cpp;
- image->bo = drm_bacon_bo_gem_create_from_name(screen->bufmgr, "image",
+ image->bo = brw_bo_gem_create_from_name(screen->bufmgr, "image",
name);
if (!image->bo) {
free(image);
return NULL;
}
+ image->modifier = tiling_to_modifier(image->bo->tiling_mode);
return image;
}
image->internal_format = rb->InternalFormat;
image->format = rb->Format;
+ image->modifier = tiling_to_modifier(irb->mt->tiling);
image->offset = 0;
image->data = loaderPrivate;
- drm_bacon_bo_unreference(image->bo);
+ brw_bo_unreference(image->bo);
image->bo = irb->mt->bo;
- drm_bacon_bo_reference(irb->mt->bo);
+ brw_bo_reference(irb->mt->bo);
image->width = rb->Width;
image->height = rb->Height;
image->pitch = irb->mt->pitch;
image->internal_format = obj->Image[face][level]->InternalFormat;
image->format = obj->Image[face][level]->TexFormat;
+ image->modifier = tiling_to_modifier(iobj->mt->tiling);
image->data = loaderPrivate;
intel_setup_image_from_mipmap_tree(brw, image, iobj->mt, level, zoffset);
image->dri_format = driGLFormatToImageFormat(image->format);
static void
intel_destroy_image(__DRIimage *image)
{
- drm_bacon_bo_unreference(image->bo);
+ brw_bo_unreference(image->bo);
free(image);
}
{
__DRIimage *image;
struct intel_screen *screen = dri_screen->driverPrivate;
- /* Historically, X-tiled was the default, and so lack of modifier means
- * X-tiled.
- */
- uint32_t tiling = I915_TILING_X;
+ uint32_t tiling;
+ uint64_t modifier = DRM_FORMAT_MOD_INVALID;
+ unsigned tiled_height;
int cpp;
- unsigned long pitch;
/* Callers of this may specify a modifier, or a dri usage, but not both. The
* newer modifier interface deprecates the older usage flags newer modifier
*/
assert(!(use && count));
- uint64_t modifier = select_best_modifier(&screen->devinfo, modifiers, count);
- switch (modifier) {
- case I915_FORMAT_MOD_X_TILED:
- assert(tiling == I915_TILING_X);
- break;
- case DRM_FORMAT_MOD_LINEAR:
- tiling = I915_TILING_NONE;
- break;
- case I915_FORMAT_MOD_Y_TILED:
- tiling = I915_TILING_Y;
- break;
- case DRM_FORMAT_MOD_INVALID:
- if (modifiers)
- return NULL;
- default:
- break;
- }
-
if (use & __DRI_IMAGE_USE_CURSOR) {
if (width != 64 || height != 64)
return NULL;
- tiling = I915_TILING_NONE;
+ modifier = DRM_FORMAT_MOD_LINEAR;
}
if (use & __DRI_IMAGE_USE_LINEAR)
- tiling = I915_TILING_NONE;
+ modifier = DRM_FORMAT_MOD_LINEAR;
+
+ if (modifier == DRM_FORMAT_MOD_INVALID) {
+ if (modifiers) {
+ /* User requested specific modifiers */
+ modifier = select_best_modifier(&screen->devinfo, modifiers, count);
+ if (modifier == DRM_FORMAT_MOD_INVALID)
+ return NULL;
+ } else {
+ /* Historically, X-tiled was the default, and so lack of modifier means
+ * X-tiled.
+ */
+ modifier = I915_FORMAT_MOD_X_TILED;
+ }
+ }
+ tiling = modifier_to_tiling(modifier);
+ tiled_height = get_tiled_height(modifier, height);
image = intel_allocate_image(screen, format, loaderPrivate);
if (image == NULL)
return NULL;
cpp = _mesa_get_format_bytes(image->format);
- image->bo = drm_bacon_bo_alloc_tiled(screen->bufmgr, "image",
- width, height, cpp, &tiling,
- &pitch, 0);
+ image->bo = brw_bo_alloc_tiled(screen->bufmgr, "image",
+ width, tiled_height, cpp, tiling,
+ &image->pitch, 0);
if (image->bo == NULL) {
free(image);
return NULL;
}
image->width = width;
image->height = height;
- image->pitch = pitch;
image->modifier = modifier;
return image;
*value = image->bo->gem_handle;
return true;
case __DRI_IMAGE_ATTRIB_NAME:
- return !drm_bacon_bo_flink(image->bo, (uint32_t *) value);
+ return !brw_bo_flink(image->bo, (uint32_t *) value);
case __DRI_IMAGE_ATTRIB_FORMAT:
*value = image->dri_format;
return true;
*value = image->planar_format->components;
return true;
case __DRI_IMAGE_ATTRIB_FD:
- return !drm_bacon_bo_gem_export_to_prime(image->bo, value);
+ return !brw_bo_gem_export_to_prime(image->bo, value);
case __DRI_IMAGE_ATTRIB_FOURCC:
return intel_lookup_fourcc(image->dri_format, value);
case __DRI_IMAGE_ATTRIB_NUM_PLANES:
if (image == NULL)
return NULL;
- drm_bacon_bo_reference(orig_image->bo);
+ brw_bo_reference(orig_image->bo);
image->bo = orig_image->bo;
image->internal_format = orig_image->internal_format;
image->planar_format = orig_image->planar_format;
image->dri_format = orig_image->dri_format;
image->format = orig_image->format;
+ image->modifier = orig_image->modifier;
image->offset = orig_image->offset;
image->width = orig_image->width;
image->height = orig_image->height;
struct intel_screen *screen = dri_screen->driverPrivate;
struct intel_image_format *f;
__DRIimage *image;
+ unsigned tiled_height;
int i, index;
if (fds == NULL || num_fds < 1)
image->pitch = strides[0];
image->planar_format = f;
+
+ image->bo = brw_bo_gem_create_from_prime(screen->bufmgr, fds[0]);
+ if (image->bo == NULL) {
+ free(image);
+ return NULL;
+ }
+
+ image->modifier = tiling_to_modifier(image->bo->tiling_mode);
+ tiled_height = get_tiled_height(image->modifier, height);
+
int size = 0;
for (i = 0; i < f->nplanes; i++) {
index = f->planes[i].buffer_index;
image->offsets[index] = offsets[index];
image->strides[index] = strides[index];
- const int plane_height = height >> f->planes[i].height_shift;
+ const int plane_height = tiled_height >> f->planes[i].height_shift;
const int end = offsets[index] + plane_height * strides[index];
if (size < end)
size = end;
}
- image->bo = drm_bacon_bo_gem_create_from_prime(screen->bufmgr,
- fds[0], size);
- if (image->bo == NULL) {
+ /* Check that the requested image actually fits within the BO. 'size'
+ * is already relative to the offsets, so we don't need to add that. */
+ if (image->bo->size == 0) {
+ image->bo->size = size;
+ } else if (size > image->bo->size) {
+ brw_bo_unreference(image->bo);
free(image);
return NULL;
}
}
image->bo = parent->bo;
- drm_bacon_bo_reference(parent->bo);
+ brw_bo_reference(parent->bo);
+ image->modifier = parent->modifier;
image->width = width;
image->height = height;
{
struct intel_screen *screen = sPriv->driverPrivate;
- drm_bacon_bufmgr_destroy(screen->bufmgr);
+ brw_bufmgr_destroy(screen->bufmgr);
driDestroyOptionInfo(&screen->optionCache);
ralloc_free(screen);
/**
- * This is called when we need to set up GL rendering to a new X window.
+ * Create a gl_framebuffer and attach it to __DRIdrawable::driverPrivate.
+ *
+ *_This implements driDriverAPI::createNewDrawable, which the DRI layer calls
+ * when creating a EGLSurface, GLXDrawable, or GLXPixmap. Despite the name,
+ * this does not allocate GPU memory.
*/
static GLboolean
intelCreateBuffer(__DRIscreen *dri_screen,
mesa_format rgbFormat;
unsigned num_samples =
intel_quantize_num_samples(screen, mesaVis->samples);
- struct gl_framebuffer *fb;
if (isPixmap)
return false;
- fb = CALLOC_STRUCT(gl_framebuffer);
+ struct gl_framebuffer *fb = CALLOC_STRUCT(gl_framebuffer);
if (!fb)
return false;
}
/* setup the hardware-based renderbuffers */
- rb = intel_create_renderbuffer(rgbFormat, num_samples);
- _mesa_add_renderbuffer_without_ref(fb, BUFFER_FRONT_LEFT, &rb->Base.Base);
+ rb = intel_create_winsys_renderbuffer(rgbFormat, num_samples);
+ _mesa_attach_and_own_rb(fb, BUFFER_FRONT_LEFT, &rb->Base.Base);
if (mesaVis->doubleBufferMode) {
- rb = intel_create_renderbuffer(rgbFormat, num_samples);
- _mesa_add_renderbuffer_without_ref(fb, BUFFER_BACK_LEFT, &rb->Base.Base);
+ rb = intel_create_winsys_renderbuffer(rgbFormat, num_samples);
+ _mesa_attach_and_own_rb(fb, BUFFER_BACK_LEFT, &rb->Base.Base);
}
/*
if (screen->devinfo.has_hiz_and_separate_stencil) {
rb = intel_create_private_renderbuffer(MESA_FORMAT_Z24_UNORM_X8_UINT,
num_samples);
- _mesa_add_renderbuffer_without_ref(fb, BUFFER_DEPTH, &rb->Base.Base);
+ _mesa_attach_and_own_rb(fb, BUFFER_DEPTH, &rb->Base.Base);
rb = intel_create_private_renderbuffer(MESA_FORMAT_S_UINT8,
num_samples);
- _mesa_add_renderbuffer_without_ref(fb, BUFFER_STENCIL,
- &rb->Base.Base);
+ _mesa_attach_and_own_rb(fb, BUFFER_STENCIL, &rb->Base.Base);
} else {
/*
* Use combined depth/stencil. Note that the renderbuffer is
*/
rb = intel_create_private_renderbuffer(MESA_FORMAT_Z24_UNORM_S8_UINT,
num_samples);
- _mesa_add_renderbuffer_without_ref(fb, BUFFER_DEPTH, &rb->Base.Base);
- _mesa_add_renderbuffer(fb, BUFFER_STENCIL, &rb->Base.Base);
+ _mesa_attach_and_own_rb(fb, BUFFER_DEPTH, &rb->Base.Base);
+ _mesa_attach_and_reference_rb(fb, BUFFER_STENCIL, &rb->Base.Base);
}
}
else if (mesaVis->depthBits == 16) {
assert(mesaVis->stencilBits == 0);
rb = intel_create_private_renderbuffer(MESA_FORMAT_Z_UNORM16,
num_samples);
- _mesa_add_renderbuffer_without_ref(fb, BUFFER_DEPTH, &rb->Base.Base);
+ _mesa_attach_and_own_rb(fb, BUFFER_DEPTH, &rb->Base.Base);
}
else {
assert(mesaVis->depthBits == 0);
if (getenv("INTEL_NO_HW") != NULL)
screen->no_hw = true;
- screen->bufmgr = drm_bacon_bufmgr_gem_init(&screen->devinfo,
- dri_screen->fd, BATCH_SZ);
+ screen->bufmgr = brw_bufmgr_init(&screen->devinfo, dri_screen->fd, BATCH_SZ);
if (screen->bufmgr == NULL) {
fprintf(stderr, "[%s:%u] Error initializing buffer manager.\n",
__func__, __LINE__);
static bool
intel_detect_swizzling(struct intel_screen *screen)
{
- drm_bacon_bo *buffer;
- unsigned long flags = 0;
- unsigned long aligned_pitch;
+ struct brw_bo *buffer;
+ unsigned flags = 0;
+ uint32_t aligned_pitch;
uint32_t tiling = I915_TILING_X;
uint32_t swizzle_mode = 0;
- buffer = drm_bacon_bo_alloc_tiled(screen->bufmgr, "swizzle test",
- 64, 64, 4,
- &tiling, &aligned_pitch, flags);
+ buffer = brw_bo_alloc_tiled(screen->bufmgr, "swizzle test",
+ 64, 64, 4, tiling, &aligned_pitch, flags);
if (buffer == NULL)
return false;
- drm_bacon_bo_get_tiling(buffer, &tiling, &swizzle_mode);
- drm_bacon_bo_unreference(buffer);
+ brw_bo_get_tiling(buffer, &tiling, &swizzle_mode);
+ brw_bo_unreference(buffer);
if (swizzle_mode == I915_BIT_6_SWIZZLE_NONE)
return false;
* More recent kernels offer an interface to read the full 36bits
* everywhere.
*/
- if (drm_bacon_reg_read(screen->bufmgr, TIMESTAMP | 1, &dummy) == 0)
+ if (brw_reg_read(screen->bufmgr, TIMESTAMP | 1, &dummy) == 0)
return 3;
/* Determine if we have a 32bit or 64bit kernel by inspecting the
* upper 32bits for a rapidly changing timestamp.
*/
- if (drm_bacon_reg_read(screen->bufmgr, TIMESTAMP, &last))
+ if (brw_reg_read(screen->bufmgr, TIMESTAMP, &last))
return 0;
upper = lower = 0;
/* The TIMESTAMP should change every 80ns, so several round trips
* through the kernel should be enough to advance it.
*/
- if (drm_bacon_reg_read(screen->bufmgr, TIMESTAMP, &dummy))
+ if (brw_reg_read(screen->bufmgr, TIMESTAMP, &dummy))
return 0;
upper += (dummy >> 32) != (last >> 32);
if (screen->no_hw)
return false;
- drm_bacon_bo *results, *bo;
+ struct brw_bo *results, *bo;
uint32_t *batch;
uint32_t offset = 0;
+ void *map;
bool success = false;
/* Create a zero'ed temporary buffer for reading our results */
- results = drm_bacon_bo_alloc(screen->bufmgr, "registers", 4096, 0);
+ results = brw_bo_alloc(screen->bufmgr, "registers", 4096, 0);
if (results == NULL)
goto err;
- bo = drm_bacon_bo_alloc(screen->bufmgr, "batchbuffer", 4096, 0);
+ bo = brw_bo_alloc(screen->bufmgr, "batchbuffer", 4096, 0);
if (bo == NULL)
goto err_results;
- if (drm_bacon_bo_map(bo, 1))
+ map = brw_bo_map(NULL, bo, MAP_WRITE);
+ if (!map)
goto err_batch;
- batch = bo->virtual;
+ batch = map;
/* Write the register. */
*batch++ = MI_LOAD_REGISTER_IMM | (3 - 2);
*batch++ = MI_STORE_REGISTER_MEM | (3 - 2);
*batch++ = reg;
struct drm_i915_gem_relocation_entry reloc = {
- .offset = (char *) batch - (char *) bo->virtual,
+ .offset = (char *) batch - (char *) map,
.delta = offset * sizeof(uint32_t),
.target_handle = results->gem_handle,
.read_domains = I915_GEM_DOMAIN_INSTRUCTION,
struct drm_i915_gem_execbuffer2 execbuf = {
.buffers_ptr = (uintptr_t) exec_objects,
.buffer_count = 2,
- .batch_len = ALIGN((char *) batch - (char *) bo->virtual, 8),
+ .batch_len = ALIGN((char *) batch - (char *) map, 8),
.flags = I915_EXEC_RENDER,
};
drmIoctl(dri_screen->fd, DRM_IOCTL_I915_GEM_EXECBUFFER2, &execbuf);
/* Check whether the value got written. */
- if (drm_bacon_bo_map(results, false) == 0) {
- success = *((uint32_t *)results->virtual + offset) == expected_value;
- drm_bacon_bo_unmap(results);
+ void *results_map = brw_bo_map(NULL, results, MAP_READ);
+ if (results_map) {
+ success = *((uint32_t *)results_map + offset) == expected_value;
+ brw_bo_unmap(results);
}
err_batch:
- drm_bacon_bo_unreference(bo);
+ brw_bo_unreference(bo);
err_results:
- drm_bacon_bo_unreference(results);
+ brw_bo_unreference(results);
err:
return success;
}
break;
case 7:
dri_screen->max_gl_core_version = 33;
- if (screen->devinfo.is_haswell &&
- can_do_pipelined_register_writes(screen)) {
+ if (can_do_pipelined_register_writes(screen)) {
dri_screen->max_gl_core_version = 42;
- if (can_do_compute_dispatch(screen))
+ if (screen->devinfo.is_haswell && can_do_compute_dispatch(screen))
dri_screen->max_gl_core_version = 43;
- if (can_do_mi_math_and_lrr(screen))
+ if (screen->devinfo.is_haswell && can_do_mi_math_and_lrr(screen))
dri_screen->max_gl_core_version = 45;
}
dri_screen->max_gl_compat_version = 30;
* Currently the entire (global) address space for all GTT maps is
* limited to 64bits. That is all objects on the system that are
* setup for GTT mmapping must fit within 64bits. An attempt to use
- * one that exceeds the limit with fail in drm_bacon_bo_map_gtt().
+ * one that exceeds the limit with fail in brw_bo_map_gtt().
*
* Long before we hit that limit, we will be practically limited by
* that any single object must fit in physical memory (RAM). The upper
screen->cmd_parser_version = 0;
}
+ /* Kernel 4.13 retuired for exec object capture */
+#ifndef I915_PARAM_HAS_EXEC_CAPTURE
+#define I915_PARAM_HAS_EXEC_CAPTURE 45
+#endif
+ if (intel_get_boolean(screen, I915_PARAM_HAS_EXEC_CAPTURE)) {
+ screen->kernel_features |= KERNEL_ALLOWS_EXEC_CAPTURE;
+ }
+
if (!intel_detect_pipelined_so(screen)) {
/* We can't do anything, so the effective version is 0. */
screen->cmd_parser_version = 0;
screen->kernel_features |= KERNEL_ALLOWS_SOL_OFFSET_WRITES;
}
+ if (devinfo->gen >= 8 || screen->cmd_parser_version >= 2)
+ screen->kernel_features |= KERNEL_ALLOWS_PREDICATE_WRITES;
+
+ /* Haswell requires command parser version 4 in order to have L3
+ * atomic scratch1 and chicken3 bits
+ */
+ if (devinfo->is_haswell && screen->cmd_parser_version >= 4) {
+ screen->kernel_features |=
+ KERNEL_ALLOWS_HSW_SCRATCH1_AND_ROW_CHICKEN3;
+ }
+
+ /* Haswell requires command parser version 6 in order to write to the
+ * MI_MATH GPR registers, and version 7 in order to use
+ * MI_LOAD_REGISTER_REG (which all users of MI_MATH use).
+ */
+ if (devinfo->gen >= 8 ||
+ (devinfo->is_haswell && screen->cmd_parser_version >= 7)) {
+ screen->kernel_features |= KERNEL_ALLOWS_MI_MATH_AND_LRR;
+ }
+
+ /* Gen7 needs at least command parser version 5 to support compute */
+ if (devinfo->gen >= 8 || screen->cmd_parser_version >= 5)
+ screen->kernel_features |= KERNEL_ALLOWS_COMPUTE_DISPATCH;
+
const char *force_msaa = getenv("INTEL_FORCE_MSAA");
if (force_msaa) {
screen->winsys_msaa_samples_override =
(ret != -1 || errno != EINVAL);
}
- if (devinfo->gen >= 8 || screen->cmd_parser_version >= 2)
- screen->kernel_features |= KERNEL_ALLOWS_PREDICATE_WRITES;
-
- /* Haswell requires command parser version 4 in order to have L3
- * atomic scratch1 and chicken3 bits
- */
- if (devinfo->is_haswell && screen->cmd_parser_version >= 4) {
- screen->kernel_features |=
- KERNEL_ALLOWS_HSW_SCRATCH1_AND_ROW_CHICKEN3;
- }
-
- /* Haswell requires command parser version 6 in order to write to the
- * MI_MATH GPR registers, and version 7 in order to use
- * MI_LOAD_REGISTER_REG (which all users of MI_MATH use).
- */
- if (devinfo->gen >= 8 ||
- (devinfo->is_haswell && screen->cmd_parser_version >= 7)) {
- screen->kernel_features |= KERNEL_ALLOWS_MI_MATH_AND_LRR;
- }
-
- /* Gen7 needs at least command parser version 5 to support compute */
- if (devinfo->gen >= 8 || screen->cmd_parser_version >= 5)
- screen->kernel_features |= KERNEL_ALLOWS_COMPUTE_DISPATCH;
-
dri_screen->extensions = !screen->has_context_reset_notification
? screenExtensions : intelRobustScreenExtensions;
struct intel_buffer {
__DRIbuffer base;
- drm_bacon_bo *bo;
+ struct brw_bo *bo;
};
static __DRIbuffer *
/* The front and back buffers are color buffers, which are X tiled. GEN9+
* supports Y tiled and compressed buffers, but there is no way to plumb that
* through to here. */
- uint32_t tiling = I915_TILING_X;
- unsigned long pitch;
+ uint32_t pitch;
int cpp = format / 8;
- intelBuffer->bo = drm_bacon_bo_alloc_tiled(screen->bufmgr,
- "intelAllocateBuffer",
- width,
- height,
- cpp,
- &tiling, &pitch,
- BO_ALLOC_FOR_RENDER);
+ intelBuffer->bo = brw_bo_alloc_tiled(screen->bufmgr,
+ "intelAllocateBuffer",
+ width,
+ height,
+ cpp,
+ I915_TILING_X, &pitch,
+ BO_ALLOC_FOR_RENDER);
if (intelBuffer->bo == NULL) {
free(intelBuffer);
return NULL;
}
- drm_bacon_bo_flink(intelBuffer->bo, &intelBuffer->base.name);
+ brw_bo_flink(intelBuffer->bo, &intelBuffer->base.name);
intelBuffer->base.attachment = attachment;
intelBuffer->base.cpp = cpp;
{
struct intel_buffer *intelBuffer = (struct intel_buffer *) buffer;
- drm_bacon_bo_unreference(intelBuffer->bo);
+ brw_bo_unreference(intelBuffer->bo);
free(intelBuffer);
}