#define ETIME ETIMEDOUT
#endif
#include "common/gen_clflush.h"
-#include "common/gen_debug.h"
+#include "dev/gen_debug.h"
#include "common/gen_gem.h"
#include "dev/gen_device_info.h"
#include "main/macros.h"
return c == unless;
}
+static const char *
+memzone_name(enum iris_memory_zone memzone)
+{
+ const char *names[] = {
+ [IRIS_MEMZONE_SHADER] = "shader",
+ [IRIS_MEMZONE_BINDER] = "binder",
+ [IRIS_MEMZONE_SURFACE] = "surface",
+ [IRIS_MEMZONE_DYNAMIC] = "dynamic",
+ [IRIS_MEMZONE_OTHER] = "other",
+ [IRIS_MEMZONE_BORDER_COLOR_POOL] = "bordercolor",
+ };
+ assert(memzone < ARRAY_SIZE(names));
+ return names[memzone];
+}
+
/**
* Iris fixed-size bucketing VMA allocator.
*
&bufmgr->cache_bucket[index] : NULL;
}
-static enum iris_memory_zone
-memzone_for_address(uint64_t address)
+enum iris_memory_zone
+iris_memzone_for_address(uint64_t address)
{
STATIC_ASSERT(IRIS_MEMZONE_OTHER_START > IRIS_MEMZONE_DYNAMIC_START);
STATIC_ASSERT(IRIS_MEMZONE_DYNAMIC_START > IRIS_MEMZONE_SURFACE_START);
if (address > IRIS_MEMZONE_DYNAMIC_START)
return IRIS_MEMZONE_DYNAMIC;
- if (address > IRIS_MEMZONE_BINDER_START)
- return IRIS_MEMZONE_BINDER;
-
- if (address > IRIS_MEMZONE_SURFACE_START)
+ if (address >= IRIS_MEMZONE_SURFACE_START)
return IRIS_MEMZONE_SURFACE;
+ if (address >= IRIS_MEMZONE_BINDER_START)
+ return IRIS_MEMZONE_BINDER;
+
return IRIS_MEMZONE_SHADER;
}
static void
bucket_vma_free(struct bo_cache_bucket *bucket, uint64_t address)
{
- enum iris_memory_zone memzone = memzone_for_address(address);
+ enum iris_memory_zone memzone = iris_memzone_for_address(address);
struct util_dynarray *vma_list = &bucket->vma_list[memzone];
const uint64_t node_bytes = 64ull * bucket->size;
struct vma_bucket_node *node = NULL;
enum iris_memory_zone memzone,
uint64_t size)
{
- /* Bucketing is not worth using for binders...we'll never have 64... */
- if (memzone == IRIS_MEMZONE_BINDER)
- return NULL;
-
/* Skip using the bucket allocator for very large sizes, as it allocates
* 64 of them and this can balloon rather quickly.
*/
if (memzone == IRIS_MEMZONE_BORDER_COLOR_POOL)
return IRIS_BORDER_COLOR_POOL_ADDRESS;
+ /* The binder handles its own allocations. Return non-zero here. */
+ if (memzone == IRIS_MEMZONE_BINDER)
+ return IRIS_MEMZONE_BINDER_START;
+
struct bo_cache_bucket *bucket =
get_bucket_allocator(bufmgr, memzone, size);
uint64_t addr;
if (address == 0ull)
return;
- enum iris_memory_zone memzone = memzone_for_address(address);
+ enum iris_memory_zone memzone = iris_memzone_for_address(address);
+
+ /* The binder handles its own allocations. */
+ if (memzone == IRIS_MEMZONE_BINDER)
+ return;
+
struct bo_cache_bucket *bucket =
get_bucket_allocator(bufmgr, memzone, size);
if (flags & BO_ALLOC_ZEROED)
zeroed = true;
+ if ((flags & BO_ALLOC_COHERENT) && !bufmgr->has_llc) {
+ bo_size = MAX2(ALIGN(size, page_size), page_size);
+ bucket = NULL;
+ goto skip_cache;
+ }
+
/* Round the allocated size up to a power of two number of pages. */
bucket = bucket_for_size(bufmgr, size);
/* If the cached BO isn't in the right memory zone, free the old
* memory and assign it a new address.
*/
- if (memzone != memzone_for_address(bo->gtt_offset)) {
- vma_free(bufmgr, bo->gtt_offset, bo_size);
+ if (memzone != iris_memzone_for_address(bo->gtt_offset)) {
+ vma_free(bufmgr, bo->gtt_offset, bo->size);
bo->gtt_offset = 0ull;
}
} else {
+skip_cache:
bo = bo_calloc();
if (!bo)
goto err;
mtx_unlock(&bufmgr->lock);
- DBG("bo_create: buf %d (%s) %llub\n", bo->gem_handle, bo->name,
- (unsigned long long) size);
+ if ((flags & BO_ALLOC_COHERENT) && !bo->cache_coherent) {
+ struct drm_i915_gem_caching arg = {
+ .handle = bo->gem_handle,
+ .caching = 1,
+ };
+ if (drm_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_SET_CACHING, &arg) == 0) {
+ bo->cache_coherent = true;
+ bo->reusable = false;
+ }
+ }
+
+ DBG("bo_create: buf %d (%s) (%s memzone) %llub\n", bo->gem_handle,
+ bo->name, memzone_name(memzone), (unsigned long long) size);
return bo;
* most drawing while non-persistent mappings are active, we may still use
* the GPU for blits or other operations, causing batches to happen at
* inconvenient times.
+ *
+ * If RAW is set, we expect the caller to be able to handle a WC buffer
+ * more efficiently than the involuntary clflushes.
*/
- if (flags & (MAP_PERSISTENT | MAP_COHERENT | MAP_ASYNC))
+ if (flags & (MAP_PERSISTENT | MAP_COHERENT | MAP_ASYNC | MAP_RAW))
return false;
return !(flags & MAP_WRITE);
_mesa_hash_table_destroy(bufmgr->handle_table, NULL);
for (int z = 0; z < IRIS_MEMZONE_COUNT; z++) {
- util_vma_heap_finish(&bufmgr->vma_allocator[z]);
+ if (z != IRIS_MEMZONE_BINDER)
+ util_vma_heap_finish(&bufmgr->vma_allocator[z]);
}
free(bufmgr);
return NULL;
}
+static void
+iris_bo_make_external_locked(struct iris_bo *bo)
+{
+ if (!bo->external) {
+ _mesa_hash_table_insert(bo->bufmgr->handle_table, &bo->gem_handle, bo);
+ bo->external = true;
+ }
+}
+
static void
iris_bo_make_external(struct iris_bo *bo)
{
struct iris_bufmgr *bufmgr = bo->bufmgr;
- if (!bo->external) {
- mtx_lock(&bufmgr->lock);
- if (!bo->external) {
- _mesa_hash_table_insert(bufmgr->handle_table, &bo->gem_handle, bo);
- bo->external = true;
- }
- mtx_unlock(&bufmgr->lock);
- }
+ if (bo->external)
+ return;
+
+ mtx_lock(&bufmgr->lock);
+ iris_bo_make_external_locked(bo);
+ mtx_unlock(&bufmgr->lock);
}
int
if (drm_ioctl(bufmgr->fd, DRM_IOCTL_GEM_FLINK, &flink))
return -errno;
- iris_bo_make_external(bo);
mtx_lock(&bufmgr->lock);
if (!bo->global_name) {
+ iris_bo_make_external_locked(bo);
bo->global_name = flink.name;
_mesa_hash_table_insert(bufmgr->name_table, &bo->global_name, bo);
}
return ret;
}
+static uint64_t
+iris_gtt_size(int fd)
+{
+ /* We use the default (already allocated) context to determine
+ * the default configuration of the virtual address space.
+ */
+ struct drm_i915_gem_context_param p = {
+ .param = I915_CONTEXT_PARAM_GTT_SIZE,
+ };
+ if (!drm_ioctl(fd, DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM, &p))
+ return p.value;
+
+ return 0;
+}
+
/**
* Initializes the GEM buffer manager, which uses the kernel to allocate, map,
* and manage map buffer objections.
struct iris_bufmgr *
iris_bufmgr_init(struct gen_device_info *devinfo, int fd)
{
+ uint64_t gtt_size = iris_gtt_size(fd);
+ if (gtt_size <= IRIS_MEMZONE_OTHER_START)
+ return NULL;
+
struct iris_bufmgr *bufmgr = calloc(1, sizeof(*bufmgr));
if (bufmgr == NULL)
return NULL;
STATIC_ASSERT(IRIS_MEMZONE_SHADER_START == 0ull);
const uint64_t _4GB = 1ull << 32;
+ /* The STATE_BASE_ADDRESS size field can only hold 1 page shy of 4GB */
+ const uint64_t _4GB_minus_1 = _4GB - PAGE_SIZE;
+
util_vma_heap_init(&bufmgr->vma_allocator[IRIS_MEMZONE_SHADER],
- PAGE_SIZE, _4GB);
- util_vma_heap_init(&bufmgr->vma_allocator[IRIS_MEMZONE_BINDER],
- IRIS_MEMZONE_BINDER_START,
- IRIS_MAX_BINDERS * IRIS_BINDER_SIZE);
+ PAGE_SIZE, _4GB_minus_1 - PAGE_SIZE);
util_vma_heap_init(&bufmgr->vma_allocator[IRIS_MEMZONE_SURFACE],
IRIS_MEMZONE_SURFACE_START,
- _4GB - IRIS_MAX_BINDERS * IRIS_BINDER_SIZE);
+ _4GB_minus_1 - IRIS_MAX_BINDERS * IRIS_BINDER_SIZE);
util_vma_heap_init(&bufmgr->vma_allocator[IRIS_MEMZONE_DYNAMIC],
IRIS_MEMZONE_DYNAMIC_START + IRIS_BORDER_COLOR_POOL_SIZE,
- _4GB - IRIS_BORDER_COLOR_POOL_SIZE);
+ _4GB_minus_1 - IRIS_BORDER_COLOR_POOL_SIZE);
util_vma_heap_init(&bufmgr->vma_allocator[IRIS_MEMZONE_OTHER],
IRIS_MEMZONE_OTHER_START,
- (1ull << 48) - IRIS_MEMZONE_OTHER_START);
+ gtt_size - IRIS_MEMZONE_OTHER_START);
// XXX: driconf
bufmgr->bo_reuse = env_var_as_boolean("bo_reuse", true);