struct gen_device_info;
-typedef struct _drm_bacon_bufmgr drm_bacon_bufmgr;
typedef struct _drm_bacon_bo drm_bacon_bo;
struct _drm_bacon_bo {
#endif
/** Buffer manager context associated with this buffer object */
- drm_bacon_bufmgr *bufmgr;
+ struct brw_bufmgr *bufmgr;
/** The GEM handle for this buffer object. */
uint32_t gem_handle;
* address space or graphics device aperture. They must be mapped
* using bo_map() or drm_bacon_gem_bo_map_gtt() to be used by the CPU.
*/
-drm_bacon_bo *drm_bacon_bo_alloc(drm_bacon_bufmgr *bufmgr, const char *name,
+drm_bacon_bo *drm_bacon_bo_alloc(struct brw_bufmgr *bufmgr, const char *name,
unsigned long size, unsigned int alignment);
/**
* Allocate a buffer object, hinting that it will be used as a
*
* This is otherwise the same as bo_alloc.
*/
-drm_bacon_bo *drm_bacon_bo_alloc_for_render(drm_bacon_bufmgr *bufmgr,
+drm_bacon_bo *drm_bacon_bo_alloc_for_render(struct brw_bufmgr *bufmgr,
const char *name,
unsigned long size,
unsigned int alignment);
* 'tiling_mode' field on return, as well as the pitch value, which
* may have been rounded up to accommodate for tiling restrictions.
*/
-drm_bacon_bo *drm_bacon_bo_alloc_tiled(drm_bacon_bufmgr *bufmgr,
+drm_bacon_bo *drm_bacon_bo_alloc_tiled(struct brw_bufmgr *bufmgr,
const char *name,
int x, int y, int cpp,
uint32_t *tiling_mode,
/**
* Tears down the buffer manager instance.
*/
-void drm_bacon_bufmgr_destroy(drm_bacon_bufmgr *bufmgr);
+void brw_bufmgr_destroy(struct brw_bufmgr *bufmgr);
/**
* Ask that the buffer be placed in tiling mode
int drm_bacon_bo_is_reusable(drm_bacon_bo *bo);
/* drm_bacon_bufmgr_gem.c */
-drm_bacon_bufmgr *drm_bacon_bufmgr_gem_init(struct gen_device_info *devinfo,
- int fd, int batch_size);
-drm_bacon_bo *drm_bacon_bo_gem_create_from_name(drm_bacon_bufmgr *bufmgr,
+struct brw_bufmgr *brw_bufmgr_init(struct gen_device_info *devinfo,
+ int fd, int batch_size);
+drm_bacon_bo *drm_bacon_bo_gem_create_from_name(struct brw_bufmgr *bufmgr,
const char *name,
unsigned int handle);
-void drm_bacon_bufmgr_gem_enable_reuse(drm_bacon_bufmgr *bufmgr);
-void drm_bacon_bufmgr_gem_set_vma_cache_size(drm_bacon_bufmgr *bufmgr,
+void brw_bufmgr_enable_reuse(struct brw_bufmgr *bufmgr);
+void brw_bufmgr_gem_set_vma_cache_size(struct brw_bufmgr *bufmgr,
int limit);
int drm_bacon_gem_bo_map_unsynchronized(drm_bacon_bo *bo);
int drm_bacon_gem_bo_map_gtt(drm_bacon_bo *bo);
int drm_bacon_gem_bo_wait(drm_bacon_bo *bo, int64_t timeout_ns);
-uint32_t brw_create_hw_context(drm_bacon_bufmgr *bufmgr);
-void brw_destroy_hw_context(drm_bacon_bufmgr *bufmgr, uint32_t ctx_id);
+uint32_t brw_create_hw_context(struct brw_bufmgr *bufmgr);
+void brw_destroy_hw_context(struct brw_bufmgr *bufmgr, uint32_t ctx_id);
int drm_bacon_bo_gem_export_to_prime(drm_bacon_bo *bo, int *prime_fd);
-drm_bacon_bo *drm_bacon_bo_gem_create_from_prime(drm_bacon_bufmgr *bufmgr,
+drm_bacon_bo *drm_bacon_bo_gem_create_from_prime(struct brw_bufmgr *bufmgr,
int prime_fd, int size);
-int drm_bacon_reg_read(drm_bacon_bufmgr *bufmgr,
+int drm_bacon_reg_read(struct brw_bufmgr *bufmgr,
uint32_t offset,
uint64_t *result);
unsigned long size;
};
-typedef struct _drm_bacon_bufmgr {
+struct brw_bufmgr {
int fd;
pthread_mutex_t lock;
unsigned int has_llc : 1;
unsigned int bo_reuse : 1;
-} drm_bacon_bufmgr;
+};
static int
bo_set_tiling_internal(drm_bacon_bo *bo, uint32_t tiling_mode, uint32_t stride);
}
static unsigned long
-bo_tile_size(drm_bacon_bufmgr *bufmgr, unsigned long size,
+bo_tile_size(struct brw_bufmgr *bufmgr, unsigned long size,
uint32_t *tiling_mode)
{
if (*tiling_mode == I915_TILING_NONE)
* change.
*/
static unsigned long
-bo_tile_pitch(drm_bacon_bufmgr *bufmgr,
+bo_tile_pitch(struct brw_bufmgr *bufmgr,
unsigned long pitch, uint32_t *tiling_mode)
{
unsigned long tile_width;
}
static struct bo_cache_bucket *
-bucket_for_size(drm_bacon_bufmgr *bufmgr, unsigned long size)
+bucket_for_size(struct brw_bufmgr *bufmgr, unsigned long size)
{
int i;
int
drm_bacon_bo_busy(drm_bacon_bo *bo)
{
- drm_bacon_bufmgr *bufmgr = bo->bufmgr;
+ struct brw_bufmgr *bufmgr = bo->bufmgr;
struct drm_i915_gem_busy busy;
int ret;
/* drop the oldest entries that have been purged by the kernel */
static void
-drm_bacon_gem_bo_cache_purge_bucket(drm_bacon_bufmgr *bufmgr,
+drm_bacon_gem_bo_cache_purge_bucket(struct brw_bufmgr *bufmgr,
struct bo_cache_bucket *bucket)
{
while (!list_empty(&bucket->head)) {
}
static drm_bacon_bo *
-bo_alloc_internal(drm_bacon_bufmgr *bufmgr,
+bo_alloc_internal(struct brw_bufmgr *bufmgr,
const char *name,
unsigned long size,
unsigned long flags,
}
drm_bacon_bo *
-drm_bacon_bo_alloc_for_render(drm_bacon_bufmgr *bufmgr,
+drm_bacon_bo_alloc_for_render(struct brw_bufmgr *bufmgr,
const char *name,
unsigned long size,
unsigned int alignment)
}
drm_bacon_bo *
-drm_bacon_bo_alloc(drm_bacon_bufmgr *bufmgr,
+drm_bacon_bo_alloc(struct brw_bufmgr *bufmgr,
const char *name,
unsigned long size,
unsigned int alignment)
}
drm_bacon_bo *
-drm_bacon_bo_alloc_tiled(drm_bacon_bufmgr *bufmgr, const char *name,
+drm_bacon_bo_alloc_tiled(struct brw_bufmgr *bufmgr, const char *name,
int x, int y, int cpp, uint32_t *tiling_mode,
unsigned long *pitch, unsigned long flags)
{
* to another.
*/
drm_bacon_bo *
-drm_bacon_bo_gem_create_from_name(drm_bacon_bufmgr *bufmgr,
+drm_bacon_bo_gem_create_from_name(struct brw_bufmgr *bufmgr,
const char *name,
unsigned int handle)
{
static void
bo_free(drm_bacon_bo *bo)
{
- drm_bacon_bufmgr *bufmgr = bo->bufmgr;
+ struct brw_bufmgr *bufmgr = bo->bufmgr;
struct drm_gem_close close;
struct hash_entry *entry;
int ret;
/** Frees all cached buffers significantly older than @time. */
static void
-cleanup_bo_cache(drm_bacon_bufmgr *bufmgr, time_t time)
+cleanup_bo_cache(struct brw_bufmgr *bufmgr, time_t time)
{
int i;
}
static void
-bo_purge_vma_cache(drm_bacon_bufmgr *bufmgr)
+bo_purge_vma_cache(struct brw_bufmgr *bufmgr)
{
int limit;
}
static void
-bo_close_vma(drm_bacon_bufmgr *bufmgr, drm_bacon_bo *bo)
+bo_close_vma(struct brw_bufmgr *bufmgr, drm_bacon_bo *bo)
{
bufmgr->vma_open--;
list_addtail(&bo->vma_list, &bufmgr->vma_cache);
}
static void
-bo_open_vma(drm_bacon_bufmgr *bufmgr, drm_bacon_bo *bo)
+bo_open_vma(struct brw_bufmgr *bufmgr, drm_bacon_bo *bo)
{
bufmgr->vma_open++;
list_del(&bo->vma_list);
static void
bo_unreference_final(drm_bacon_bo *bo, time_t time)
{
- drm_bacon_bufmgr *bufmgr = bo->bufmgr;
+ struct brw_bufmgr *bufmgr = bo->bufmgr;
struct bo_cache_bucket *bucket;
DBG("bo_unreference final: %d (%s)\n",
assert(p_atomic_read(&bo->refcount) > 0);
if (atomic_add_unless(&bo->refcount, -1, 1)) {
- drm_bacon_bufmgr *bufmgr = bo->bufmgr;
+ struct brw_bufmgr *bufmgr = bo->bufmgr;
struct timespec time;
clock_gettime(CLOCK_MONOTONIC, &time);
int
drm_bacon_bo_map(drm_bacon_bo *bo, int write_enable)
{
- drm_bacon_bufmgr *bufmgr = bo->bufmgr;
+ struct brw_bufmgr *bufmgr = bo->bufmgr;
struct drm_i915_gem_set_domain set_domain;
int ret;
static int
map_gtt(drm_bacon_bo *bo)
{
- drm_bacon_bufmgr *bufmgr = bo->bufmgr;
+ struct brw_bufmgr *bufmgr = bo->bufmgr;
int ret;
if (bo->map_count++ == 0)
int
drm_bacon_gem_bo_map_gtt(drm_bacon_bo *bo)
{
- drm_bacon_bufmgr *bufmgr = bo->bufmgr;
+ struct brw_bufmgr *bufmgr = bo->bufmgr;
struct drm_i915_gem_set_domain set_domain;
int ret;
int
drm_bacon_gem_bo_map_unsynchronized(drm_bacon_bo *bo)
{
- drm_bacon_bufmgr *bufmgr = bo->bufmgr;
+ struct brw_bufmgr *bufmgr = bo->bufmgr;
int ret;
/* If the CPU cache isn't coherent with the GTT, then use a
int
drm_bacon_bo_unmap(drm_bacon_bo *bo)
{
- drm_bacon_bufmgr *bufmgr = bo->bufmgr;
+ struct brw_bufmgr *bufmgr = bo->bufmgr;
int ret = 0;
if (bo == NULL)
drm_bacon_bo_subdata(drm_bacon_bo *bo, unsigned long offset,
unsigned long size, const void *data)
{
- drm_bacon_bufmgr *bufmgr = bo->bufmgr;
+ struct brw_bufmgr *bufmgr = bo->bufmgr;
struct drm_i915_gem_pwrite pwrite;
int ret;
drm_bacon_bo_get_subdata(drm_bacon_bo *bo, unsigned long offset,
unsigned long size, void *data)
{
- drm_bacon_bufmgr *bufmgr = bo->bufmgr;
+ struct brw_bufmgr *bufmgr = bo->bufmgr;
struct drm_i915_gem_pread pread;
int ret;
int
drm_bacon_gem_bo_wait(drm_bacon_bo *bo, int64_t timeout_ns)
{
- drm_bacon_bufmgr *bufmgr = bo->bufmgr;
+ struct brw_bufmgr *bufmgr = bo->bufmgr;
struct drm_i915_gem_wait wait;
int ret;
void
drm_bacon_gem_bo_start_gtt_access(drm_bacon_bo *bo, int write_enable)
{
- drm_bacon_bufmgr *bufmgr = bo->bufmgr;
+ struct brw_bufmgr *bufmgr = bo->bufmgr;
struct drm_i915_gem_set_domain set_domain;
int ret;
}
void
-drm_bacon_bufmgr_destroy(drm_bacon_bufmgr *bufmgr)
+brw_bufmgr_destroy(struct brw_bufmgr *bufmgr)
{
pthread_mutex_destroy(&bufmgr->lock);
static int
bo_set_tiling_internal(drm_bacon_bo *bo, uint32_t tiling_mode, uint32_t stride)
{
- drm_bacon_bufmgr *bufmgr = bo->bufmgr;
+ struct brw_bufmgr *bufmgr = bo->bufmgr;
struct drm_i915_gem_set_tiling set_tiling;
int ret;
}
drm_bacon_bo *
-drm_bacon_bo_gem_create_from_prime(drm_bacon_bufmgr *bufmgr, int prime_fd, int size)
+drm_bacon_bo_gem_create_from_prime(struct brw_bufmgr *bufmgr, int prime_fd, int size)
{
int ret;
uint32_t handle;
int
drm_bacon_bo_gem_export_to_prime(drm_bacon_bo *bo, int *prime_fd)
{
- drm_bacon_bufmgr *bufmgr = bo->bufmgr;
+ struct brw_bufmgr *bufmgr = bo->bufmgr;
if (drmPrimeHandleToFD(bufmgr->fd, bo->gem_handle,
DRM_CLOEXEC, prime_fd) != 0)
int
drm_bacon_bo_flink(drm_bacon_bo *bo, uint32_t *name)
{
- drm_bacon_bufmgr *bufmgr = bo->bufmgr;
+ struct brw_bufmgr *bufmgr = bo->bufmgr;
if (!bo->global_name) {
struct drm_gem_flink flink;
* in flight at once.
*/
void
-drm_bacon_bufmgr_gem_enable_reuse(drm_bacon_bufmgr *bufmgr)
+brw_bufmgr_enable_reuse(struct brw_bufmgr *bufmgr)
{
bufmgr->bo_reuse = true;
}
}
static void
-add_bucket(drm_bacon_bufmgr *bufmgr, int size)
+add_bucket(struct brw_bufmgr *bufmgr, int size)
{
unsigned int i = bufmgr->num_buckets;
}
static void
-init_cache_buckets(drm_bacon_bufmgr *bufmgr)
+init_cache_buckets(struct brw_bufmgr *bufmgr)
{
unsigned long size, cache_max_size = 64 * 1024 * 1024;
}
void
-drm_bacon_bufmgr_gem_set_vma_cache_size(drm_bacon_bufmgr *bufmgr, int limit)
+brw_bufmgr_gem_set_vma_cache_size(struct brw_bufmgr *bufmgr, int limit)
{
bufmgr->vma_max = limit;
}
uint32_t
-brw_create_hw_context(drm_bacon_bufmgr *bufmgr)
+brw_create_hw_context(struct brw_bufmgr *bufmgr)
{
struct drm_i915_gem_context_create create;
int ret;
}
void
-brw_destroy_hw_context(drm_bacon_bufmgr *bufmgr, uint32_t ctx_id)
+brw_destroy_hw_context(struct brw_bufmgr *bufmgr, uint32_t ctx_id)
{
struct drm_i915_gem_context_destroy d = { .ctx_id = ctx_id };
}
int
-drm_bacon_reg_read(drm_bacon_bufmgr *bufmgr,
+drm_bacon_reg_read(struct brw_bufmgr *bufmgr,
uint32_t offset,
uint64_t *result)
{
void *drm_bacon_gem_bo_map__gtt(drm_bacon_bo *bo)
{
- drm_bacon_bufmgr *bufmgr = bo->bufmgr;
+ struct brw_bufmgr *bufmgr = bo->bufmgr;
if (bo->gtt_virtual)
return bo->gtt_virtual;
void *drm_bacon_gem_bo_map__cpu(drm_bacon_bo *bo)
{
- drm_bacon_bufmgr *bufmgr = bo->bufmgr;
+ struct brw_bufmgr *bufmgr = bo->bufmgr;
if (bo->mem_virtual)
return bo->mem_virtual;
void *drm_bacon_gem_bo_map__wc(drm_bacon_bo *bo)
{
- drm_bacon_bufmgr *bufmgr = bo->bufmgr;
+ struct brw_bufmgr *bufmgr = bo->bufmgr;
if (bo->wc_virtual)
return bo->wc_virtual;
*
* \param fd File descriptor of the opened DRM device.
*/
-drm_bacon_bufmgr *
-drm_bacon_bufmgr_gem_init(struct gen_device_info *devinfo,
- int fd, int batch_size)
+struct brw_bufmgr *
+brw_bufmgr_init(struct gen_device_info *devinfo, int fd, int batch_size)
{
- drm_bacon_bufmgr *bufmgr;
+ struct brw_bufmgr *bufmgr;
bufmgr = calloc(1, sizeof(*bufmgr));
if (bufmgr == NULL)