if (!slab->entries)
goto fail_buffer;
- LIST_INITHEAD(&slab->base.free);
+ list_inithead(&slab->base.free);
base_hash = __sync_fetch_and_add(&ws->next_bo_hash, slab->base.num_entries);
bo->u.slab.entry.group_index = group_index;
bo->u.slab.real = slab->buffer;
- LIST_ADDTAIL(&bo->u.slab.entry.head, &slab->base.free);
+ list_addtail(&bo->u.slab.entry.head, &slab->base.free);
}
return &slab->base;
static struct pb_buffer *radeon_winsys_bo_from_handle(struct radeon_winsys *rws,
struct winsys_handle *whandle,
- unsigned vm_alignment,
- unsigned *stride,
- unsigned *offset)
+ unsigned vm_alignment)
{
struct radeon_drm_winsys *ws = radeon_drm_winsys(rws);
struct radeon_bo *bo;
unsigned handle;
uint64_t size = 0;
- if (!offset && whandle->offset != 0) {
- fprintf(stderr, "attempt to import unsupported winsys offset %u\n",
- whandle->offset);
- return NULL;
- }
-
/* We must maintain a list of pairs <handle, bo>, so that we always return
* the same BO for one particular handle. If we didn't do that and created
* more than one BO for the same handle and then relocated them in a CS,
done:
mtx_unlock(&ws->bo_handles_mutex);
- if (stride)
- *stride = whandle->stride;
- if (offset)
- *offset = whandle->offset;
-
if (ws->info.r600_has_virtual_memory && !bo->va) {
struct drm_radeon_gem_va va;
return NULL;
}
-static bool radeon_winsys_bo_get_handle(struct pb_buffer *buffer,
- unsigned stride, unsigned offset,
- unsigned slice_size,
+static bool radeon_winsys_bo_get_handle(struct radeon_winsys *rws,
+ struct pb_buffer *buffer,
struct winsys_handle *whandle)
{
struct drm_gem_flink flink;
return false;
}
- whandle->stride = stride;
- whandle->offset = offset;
- whandle->offset += slice_size * whandle->layer;
-
return true;
}