return NULL;
}
- bo = rzalloc(dev->memctx, struct panfrost_bo);
- assert(bo);
+ bo = pan_lookup_bo(dev, create_bo.handle);
+ assert(!memcmp(bo, &((struct panfrost_bo){}), sizeof(*bo)));
+
bo->size = create_bo.size;
bo->gpu = create_bo.offset;
bo->gem_handle = create_bo.handle;
assert(0);
}
- ralloc_free(bo);
+ /* BO will be freed with the sparse array, but zero to indicate free */
+ memset(bo, 0, sizeof(*bo));
}
/* Returns true if the BO is ready, false otherwise.
p_atomic_set(&bo->refcnt, 1);
- pthread_mutex_lock(&dev->active_bos_lock);
- _mesa_set_add(bo->dev->active_bos, bo);
- pthread_mutex_unlock(&dev->active_bos_lock);
-
if (dev->debug & (PAN_DBG_TRACE | PAN_DBG_SYNC)) {
if (flags & PAN_BO_INVISIBLE)
pandecode_inject_mmap(bo->gpu, NULL, bo->size, NULL);
struct panfrost_device *dev = bo->dev;
- pthread_mutex_lock(&dev->active_bos_lock);
+ pthread_mutex_lock(&dev->bo_map_lock);
+
/* Someone might have imported this BO while we were waiting for the
* lock, let's make sure it's still not referenced before freeing it.
*/
if (p_atomic_read(&bo->refcnt) == 0) {
- _mesa_set_remove_key(bo->dev->active_bos, bo);
-
/* When the reference count goes to zero, we need to cleanup */
panfrost_bo_munmap(bo);
*/
if (!panfrost_bo_cache_put(bo))
panfrost_bo_free(bo);
+
}
- pthread_mutex_unlock(&dev->active_bos_lock);
+ pthread_mutex_unlock(&dev->bo_map_lock);
}
struct panfrost_bo *
panfrost_bo_import(struct panfrost_device *dev, int fd)
{
- struct panfrost_bo *bo, *newbo = rzalloc(dev->memctx, struct panfrost_bo);
+ struct panfrost_bo *bo;
struct drm_panfrost_get_bo_offset get_bo_offset = {0,};
- struct set_entry *entry;
ASSERTED int ret;
unsigned gem_handle;
- newbo->dev = dev;
-
ret = drmPrimeFDToHandle(dev->fd, fd, &gem_handle);
assert(!ret);
- newbo->gem_handle = gem_handle;
+ pthread_mutex_lock(&dev->bo_map_lock);
+ bo = pan_lookup_bo(dev, gem_handle);
- pthread_mutex_lock(&dev->active_bos_lock);
- entry = _mesa_set_search_or_add(dev->active_bos, newbo);
- assert(entry);
- bo = (struct panfrost_bo *)entry->key;
- if (newbo == bo) {
+ if (!bo->dev) {
get_bo_offset.handle = gem_handle;
ret = drmIoctl(dev->fd, DRM_IOCTL_PANFROST_GET_BO_OFFSET, &get_bo_offset);
assert(!ret);
- newbo->gpu = (mali_ptr) get_bo_offset.offset;
- newbo->size = lseek(fd, 0, SEEK_END);
- newbo->flags |= PAN_BO_DONT_REUSE | PAN_BO_IMPORTED;
- assert(newbo->size > 0);
- p_atomic_set(&newbo->refcnt, 1);
+ bo->dev = dev;
+ bo->gpu = (mali_ptr) get_bo_offset.offset;
+ bo->size = lseek(fd, 0, SEEK_END);
+ bo->flags = PAN_BO_DONT_REUSE | PAN_BO_IMPORTED;
+ bo->gem_handle = gem_handle;
+ assert(bo->size > 0);
+ p_atomic_set(&bo->refcnt, 1);
// TODO map and unmap on demand?
- panfrost_bo_mmap(newbo);
+ panfrost_bo_mmap(bo);
} else {
- ralloc_free(newbo);
/* bo->refcnt == 0 can happen if the BO
* was being released but panfrost_bo_import() acquired the
* lock before panfrost_bo_unreference(). In that case, refcnt
panfrost_bo_reference(bo);
assert(bo->cpu);
}
- pthread_mutex_unlock(&dev->active_bos_lock);
+ pthread_mutex_unlock(&dev->bo_map_lock);
return bo;
}
}
}
-static uint32_t
-panfrost_active_bos_hash(const void *key)
-{
- const struct panfrost_bo *bo = key;
-
- return _mesa_hash_data(&bo->gem_handle, sizeof(bo->gem_handle));
-}
-
-static bool
-panfrost_active_bos_cmp(const void *keya, const void *keyb)
-{
- const struct panfrost_bo *a = keya, *b = keyb;
-
- return a->gem_handle == b->gem_handle;
-}
-
void
panfrost_open_device(void *memctx, int fd, struct panfrost_device *dev)
{
dev->kernel_version = drmGetVersion(fd);
dev->quirks = panfrost_get_quirks(dev->gpu_id);
- pthread_mutex_init(&dev->active_bos_lock, NULL);
- dev->active_bos = _mesa_set_create(memctx,
- panfrost_active_bos_hash, panfrost_active_bos_cmp);
-
util_sparse_array_init(&dev->bo_map, sizeof(struct panfrost_bo), 512);
pthread_mutex_init(&dev->bo_cache.lock, NULL);
{
panfrost_bo_cache_evict_all(dev);
pthread_mutex_destroy(&dev->bo_cache.lock);
- pthread_mutex_destroy(&dev->active_bos_lock);
drmFreeVersion(dev->kernel_version);
util_sparse_array_finish(&dev->bo_map);