uint64_t size,
unsigned flags,
uint32_t tiling_mode,
- uint32_t stride, uint64_t alignment)
+ uint32_t stride)
{
struct brw_bo *bo;
unsigned int page_size = getpagesize();
bo = LIST_ENTRY(struct brw_bo, bucket->head.prev, head);
list_del(&bo->head);
alloc_from_cache = true;
- bo->align = alignment;
+ bo->align = 0;
} else {
- assert(alignment == 0);
/* For non-render-target BOs (where we're probably
* going to map it first thing in order to fill it
* with data), check if the last BO in the cache is
bo->gem_handle = create.handle;
bo->bufmgr = bufmgr;
- bo->align = alignment;
+ bo->align = 0;
bo->tiling_mode = I915_TILING_NONE;
bo->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
brw_bo_alloc(struct brw_bufmgr *bufmgr,
const char *name, uint64_t size, uint64_t alignment)
{
- return bo_alloc_internal(bufmgr, name, size, 0, I915_TILING_NONE, 0, 0);
+ return bo_alloc_internal(bufmgr, name, size, 0, I915_TILING_NONE, 0);
}
struct brw_bo *
uint64_t size, uint32_t tiling_mode, uint32_t pitch,
unsigned flags)
{
- return bo_alloc_internal(bufmgr, name, size, flags, tiling_mode, pitch, 0);
+ return bo_alloc_internal(bufmgr, name, size, flags, tiling_mode, pitch);
}
struct brw_bo *
if (tiling == I915_TILING_NONE)
stride = 0;
- return bo_alloc_internal(bufmgr, name, size, flags, tiling, stride, 0);
+ return bo_alloc_internal(bufmgr, name, size, flags, tiling, stride);
}
/**