#ifndef ETIME
#define ETIME ETIMEDOUT
#endif
+#include "common/gen_clflush.h"
#include "common/gen_debug.h"
#include "common/gen_device_info.h"
#include "libdrm_macros.h"
#define VG(x)
#endif
+/* VALGRIND_FREELIKE_BLOCK unfortunately does not actually undo the earlier
+ * VALGRIND_MALLOCLIKE_BLOCK but instead leaves vg convinced the memory is
+ * leaked. All because it does not call VG(cli_free) from its
+ * VG_USERREQ__FREELIKE_BLOCK handler. Instead of treating the memory like
+ * and allocation, we mark it available for use upon mmapping and remove
+ * it upon unmapping.
+ */
+#define VG_DEFINED(ptr, size) VG(VALGRIND_MAKE_MEM_DEFINED(ptr, size))
+#define VG_NOACCESS(ptr, size) VG(VALGRIND_MAKE_MEM_NOACCESS(ptr, size))
+
#define memclear(s) memset(&s, 0, sizeof(s))
#define FILE_DEBUG_FLAG DEBUG_BUFMGR
struct hash_table *name_table;
struct hash_table *handle_table;
- unsigned int has_llc:1;
- unsigned int bo_reuse:1;
+ bool has_llc:1;
+ bool has_mmap_wc:1;
+ bool bo_reuse:1;
};
static int bo_set_tiling_internal(struct brw_bo *bo, uint32_t tiling_mode,
if (ret == 0) {
bo->idle = !busy.busy;
return busy.busy;
- } else {
- return false;
}
- return (ret == 0 && busy.busy);
+ return false;
}
int
bo_alloc_internal(struct brw_bufmgr *bufmgr,
const char *name,
uint64_t size,
- unsigned long flags,
+ unsigned flags,
uint32_t tiling_mode,
uint32_t stride, uint64_t alignment)
{
bool alloc_from_cache;
uint64_t bo_size;
bool for_render = false;
+ bool zeroed = false;
if (flags & BO_ALLOC_FOR_RENDER)
for_render = true;
+ if (flags & BO_ALLOC_ZEROED)
+ zeroed = true;
+
+ /* FOR_RENDER really means "I'm ok with a busy BO". This doesn't really
+ * jive with ZEROED as we have to wait for it to be idle before we can
+ * memset. Just disallow that combination.
+ */
+ assert(!(for_render && zeroed));
+
/* Round the allocated size up to a power of two number of pages. */
bucket = bucket_for_size(bufmgr, size);
retry:
alloc_from_cache = false;
if (bucket != NULL && !list_empty(&bucket->head)) {
- if (for_render) {
+ if (for_render && !zeroed) {
/* Allocate new render-target BOs from the tail (MRU)
* of the list, as it will likely be hot in the GPU
- * cache and in the aperture for us.
+ * cache and in the aperture for us. If the caller
+ * asked us to zero the buffer, we don't want this
+ * because we are going to mmap it.
*/
bo = LIST_ENTRY(struct brw_bo, bucket->head.prev, head);
list_del(&bo->head);
bo_free(bo);
goto retry;
}
+
+ if (zeroed) {
+ void *map = brw_bo_map(NULL, bo, MAP_WRITE | MAP_RAW);
+ if (!map) {
+ bo_free(bo);
+ goto retry;
+ }
+ memset(map, 0, bo_size);
+ }
}
}
goto err;
bo->size = bo_size;
+ bo->idle = true;
memclear(create);
create.size = bo_size;
+ /* All new BOs we get from the kernel are zeroed, so we don't need to
+ * worry about that here.
+ */
ret = drmIoctl(bufmgr->fd, DRM_IOCTL_I915_GEM_CREATE, &create);
if (ret != 0) {
free(bo);
if (bo_set_tiling_internal(bo, tiling_mode, stride))
goto err_free;
+
+ /* Calling set_domain() will allocate pages for the BO outside of the
+ * struct mutex lock in the kernel, which is more efficient than waiting
+ * to create them during the first execbuf that uses the BO.
+ */
+ struct drm_i915_gem_set_domain sd = {
+ .handle = bo->gem_handle,
+ .read_domains = I915_GEM_DOMAIN_CPU,
+ .write_domain = 0,
+ };
+
+ if (drmIoctl(bo->bufmgr->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &sd) != 0)
+ goto err_free;
}
bo->name = name;
p_atomic_set(&bo->refcount, 1);
bo->reusable = true;
+ bo->cache_coherent = bufmgr->has_llc;
pthread_mutex_unlock(&bufmgr->lock);
struct brw_bo *
brw_bo_alloc_tiled(struct brw_bufmgr *bufmgr, const char *name,
- int x, int y, int cpp, uint32_t tiling,
- uint32_t *pitch, unsigned long flags)
+ uint64_t size, uint32_t tiling_mode, uint32_t pitch,
+ unsigned flags)
+{
+ return bo_alloc_internal(bufmgr, name, size, flags, tiling_mode, pitch, 0);
+}
+
+struct brw_bo *
+brw_bo_alloc_tiled_2d(struct brw_bufmgr *bufmgr, const char *name,
+ int x, int y, int cpp, uint32_t tiling,
+ uint32_t *pitch, unsigned flags)
{
uint64_t size;
uint32_t stride;
bo->size = open_arg.size;
bo->offset64 = 0;
- bo->virtual = NULL;
bo->bufmgr = bufmgr;
bo->gem_handle = open_arg.handle;
bo->name = name;
bo->global_name = handle;
bo->reusable = false;
+ bo->external = true;
_mesa_hash_table_insert(bufmgr->handle_table, &bo->gem_handle, bo);
_mesa_hash_table_insert(bufmgr->name_table, &bo->global_name, bo);
struct hash_entry *entry;
int ret;
- if (bo->mem_virtual) {
- VG(VALGRIND_FREELIKE_BLOCK(bo->mem_virtual, 0));
- drm_munmap(bo->mem_virtual, bo->size);
+ if (bo->map_cpu) {
+ VG_NOACCESS(bo->map_cpu, bo->size);
+ drm_munmap(bo->map_cpu, bo->size);
}
- if (bo->wc_virtual) {
- VG(VALGRIND_FREELIKE_BLOCK(bo->wc_virtual, 0));
- drm_munmap(bo->wc_virtual, bo->size);
+ if (bo->map_wc) {
+ VG_NOACCESS(bo->map_wc, bo->size);
+ drm_munmap(bo->map_wc, bo->size);
}
- if (bo->gtt_virtual) {
- drm_munmap(bo->gtt_virtual, bo->size);
+ if (bo->map_gtt) {
+ VG_NOACCESS(bo->map_gtt, bo->size);
+ drm_munmap(bo->map_gtt, bo->size);
}
if (bo->global_name) {
free(bo);
}
-static void
-bo_mark_mmaps_incoherent(struct brw_bo *bo)
-{
-#if HAVE_VALGRIND
- if (bo->mem_virtual)
- VALGRIND_MAKE_MEM_NOACCESS(bo->mem_virtual, bo->size);
-
- if (bo->wc_virtual)
- VALGRIND_MAKE_MEM_NOACCESS(bo->wc_virtual, bo->size);
-
- if (bo->gtt_virtual)
- VALGRIND_MAKE_MEM_NOACCESS(bo->gtt_virtual, bo->size);
-#endif
-}
-
/** Frees all cached buffers significantly older than @time. */
static void
cleanup_bo_cache(struct brw_bufmgr *bufmgr, time_t time)
DBG("bo_unreference final: %d (%s)\n", bo->gem_handle, bo->name);
- /* Clear any left-over mappings */
- if (bo->map_count) {
- DBG("bo freed with non-zero map-count %d\n", bo->map_count);
- bo->map_count = 0;
- bo_mark_mmaps_incoherent(bo);
- }
-
bucket = bucket_for_size(bufmgr, bo->size);
/* Put the buffer into our internal cache for reuse if we can. */
if (bufmgr->bo_reuse && bo->reusable && bucket != NULL &&
bo->free_time = time;
bo->name = NULL;
+ bo->kflags = 0;
list_addtail(&bo->head, &bucket->head);
} else {
}
static void
-set_domain(struct brw_context *brw, const char *action,
- struct brw_bo *bo, uint32_t read_domains, uint32_t write_domain)
+bo_wait_with_stall_warning(struct brw_context *brw,
+ struct brw_bo *bo,
+ const char *action)
{
- struct drm_i915_gem_set_domain sd = {
- .handle = bo->gem_handle,
- .read_domains = read_domains,
- .write_domain = write_domain,
- };
-
double elapsed = unlikely(brw && brw->perf_debug) ? -get_time() : 0.0;
- if (drmIoctl(bo->bufmgr->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &sd) != 0) {
- DBG("%s:%d: Error setting memory domains %d (%08x %08x): %s.\n",
- __FILE__, __LINE__, bo->gem_handle, read_domains, write_domain,
- strerror(errno));
- }
+ brw_bo_wait_rendering(bo);
if (unlikely(brw && brw->perf_debug)) {
elapsed += get_time();
}
}
-int
-brw_bo_map(struct brw_context *brw, struct brw_bo *bo, int write_enable)
+static void
+print_flags(unsigned flags)
+{
+ if (flags & MAP_READ)
+ DBG("READ ");
+ if (flags & MAP_WRITE)
+ DBG("WRITE ");
+ if (flags & MAP_ASYNC)
+ DBG("ASYNC ");
+ if (flags & MAP_PERSISTENT)
+ DBG("PERSISTENT ");
+ if (flags & MAP_COHERENT)
+ DBG("COHERENT ");
+ if (flags & MAP_RAW)
+ DBG("RAW ");
+ DBG("\n");
+}
+
+static void *
+brw_bo_map_cpu(struct brw_context *brw, struct brw_bo *bo, unsigned flags)
{
struct brw_bufmgr *bufmgr = bo->bufmgr;
- int ret;
- pthread_mutex_lock(&bufmgr->lock);
+ /* We disallow CPU maps for writing to non-coherent buffers, as the
+ * CPU map can become invalidated when a batch is flushed out, which
+ * can happen at unpredictable times. You should use WC maps instead.
+ */
+ assert(bo->cache_coherent || !(flags & MAP_WRITE));
- if (!bo->mem_virtual) {
+ if (!bo->map_cpu) {
struct drm_i915_gem_mmap mmap_arg;
+ void *map;
- DBG("bo_map: %d (%s), map_count=%d\n",
- bo->gem_handle, bo->name, bo->map_count);
+ DBG("brw_bo_map_cpu: %d (%s)\n", bo->gem_handle, bo->name);
memclear(mmap_arg);
mmap_arg.handle = bo->gem_handle;
mmap_arg.size = bo->size;
- ret = drmIoctl(bufmgr->fd, DRM_IOCTL_I915_GEM_MMAP, &mmap_arg);
+ int ret = drmIoctl(bufmgr->fd, DRM_IOCTL_I915_GEM_MMAP, &mmap_arg);
if (ret != 0) {
ret = -errno;
DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
__FILE__, __LINE__, bo->gem_handle, bo->name, strerror(errno));
- pthread_mutex_unlock(&bufmgr->lock);
- return ret;
+ return NULL;
+ }
+ map = (void *) (uintptr_t) mmap_arg.addr_ptr;
+ VG_DEFINED(map, bo->size);
+
+ if (p_atomic_cmpxchg(&bo->map_cpu, NULL, map)) {
+ VG_NOACCESS(map, bo->size);
+ drm_munmap(map, bo->size);
}
- bo->map_count++;
- VG(VALGRIND_MALLOCLIKE_BLOCK(mmap_arg.addr_ptr, mmap_arg.size, 0, 1));
- bo->mem_virtual = (void *) (uintptr_t) mmap_arg.addr_ptr;
}
- DBG("bo_map: %d (%s) -> %p\n", bo->gem_handle, bo->name, bo->mem_virtual);
- bo->virtual = bo->mem_virtual;
+ assert(bo->map_cpu);
- set_domain(brw, "CPU mapping", bo, I915_GEM_DOMAIN_CPU,
- write_enable ? I915_GEM_DOMAIN_CPU : 0);
+ DBG("brw_bo_map_cpu: %d (%s) -> %p, ", bo->gem_handle, bo->name,
+ bo->map_cpu);
+ print_flags(flags);
- bo_mark_mmaps_incoherent(bo);
- VG(VALGRIND_MAKE_MEM_DEFINED(bo->mem_virtual, bo->size));
- pthread_mutex_unlock(&bufmgr->lock);
+ if (!(flags & MAP_ASYNC)) {
+ bo_wait_with_stall_warning(brw, bo, "CPU mapping");
+ }
- return 0;
+ if (!bo->cache_coherent) {
+ /* If we're reusing an existing CPU mapping, the CPU caches may
+ * contain stale data from the last time we read from that mapping.
+ * (With the BO cache, it might even be data from a previous buffer!)
+ * Even if it's a brand new mapping, the kernel may have zeroed the
+ * buffer via CPU writes.
+ *
+ * We need to invalidate those cachelines so that we see the latest
+ * contents, and so long as we only read from the CPU mmap we do not
+ * need to write those cachelines back afterwards.
+ */
+ gen_invalidate_range(bo->map_cpu, bo->size);
+ }
+
+ return bo->map_cpu;
}
-static int
-map_gtt(struct brw_bo *bo)
+static void *
+brw_bo_map_wc(struct brw_context *brw, struct brw_bo *bo, unsigned flags)
{
struct brw_bufmgr *bufmgr = bo->bufmgr;
- int ret;
- /* Get a mapping of the buffer if we haven't before. */
- if (bo->gtt_virtual == NULL) {
- struct drm_i915_gem_mmap_gtt mmap_arg;
+ if (!bufmgr->has_mmap_wc)
+ return NULL;
+
+ if (!bo->map_wc) {
+ struct drm_i915_gem_mmap mmap_arg;
+ void *map;
- DBG("bo_map_gtt: mmap %d (%s), map_count=%d\n",
- bo->gem_handle, bo->name, bo->map_count);
+ DBG("brw_bo_map_wc: %d (%s)\n", bo->gem_handle, bo->name);
memclear(mmap_arg);
mmap_arg.handle = bo->gem_handle;
-
- /* Get the fake offset back... */
- ret = drmIoctl(bufmgr->fd, DRM_IOCTL_I915_GEM_MMAP_GTT, &mmap_arg);
+ mmap_arg.size = bo->size;
+ mmap_arg.flags = I915_MMAP_WC;
+ int ret = drmIoctl(bufmgr->fd, DRM_IOCTL_I915_GEM_MMAP, &mmap_arg);
if (ret != 0) {
ret = -errno;
- DBG("%s:%d: Error preparing buffer map %d (%s): %s .\n",
+ DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
__FILE__, __LINE__, bo->gem_handle, bo->name, strerror(errno));
- return ret;
+ return NULL;
}
- /* and mmap it */
- bo->gtt_virtual = drm_mmap(0, bo->size, PROT_READ | PROT_WRITE,
- MAP_SHARED, bufmgr->fd, mmap_arg.offset);
- if (bo->gtt_virtual == MAP_FAILED) {
- bo->gtt_virtual = NULL;
- ret = -errno;
- DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
- __FILE__, __LINE__, bo->gem_handle, bo->name, strerror(errno));
- return ret;
+ map = (void *) (uintptr_t) mmap_arg.addr_ptr;
+ VG_DEFINED(map, bo->size);
+
+ if (p_atomic_cmpxchg(&bo->map_wc, NULL, map)) {
+ VG_NOACCESS(map, bo->size);
+ drm_munmap(map, bo->size);
}
}
+ assert(bo->map_wc);
- bo->map_count++;
- bo->virtual = bo->gtt_virtual;
+ DBG("brw_bo_map_wc: %d (%s) -> %p\n", bo->gem_handle, bo->name, bo->map_wc);
+ print_flags(flags);
- DBG("bo_map_gtt: %d (%s) -> %p\n", bo->gem_handle, bo->name,
- bo->gtt_virtual);
+ if (!(flags & MAP_ASYNC)) {
+ bo_wait_with_stall_warning(brw, bo, "WC mapping");
+ }
- return 0;
+ return bo->map_wc;
}
-int
-brw_bo_map_gtt(struct brw_context *brw, struct brw_bo *bo)
+static void *
+brw_bo_map_gtt(struct brw_context *brw, struct brw_bo *bo, unsigned flags)
{
struct brw_bufmgr *bufmgr = bo->bufmgr;
- int ret;
-
- pthread_mutex_lock(&bufmgr->lock);
- ret = map_gtt(bo);
- if (ret) {
- pthread_mutex_unlock(&bufmgr->lock);
- return ret;
- }
+ /* Get a mapping of the buffer if we haven't before. */
+ if (bo->map_gtt == NULL) {
+ struct drm_i915_gem_mmap_gtt mmap_arg;
+ void *map;
- /* Now move it to the GTT domain so that the GPU and CPU
- * caches are flushed and the GPU isn't actively using the
- * buffer.
- *
- * The pagefault handler does this domain change for us when
- * it has unbound the BO from the GTT, but it's up to us to
- * tell it when we're about to use things if we had done
- * rendering and it still happens to be bound to the GTT.
- */
- set_domain(brw, "GTT mapping", bo,
- I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
+ DBG("bo_map_gtt: mmap %d (%s)\n", bo->gem_handle, bo->name);
- bo_mark_mmaps_incoherent(bo);
- VG(VALGRIND_MAKE_MEM_DEFINED(bo->gtt_virtual, bo->size));
- pthread_mutex_unlock(&bufmgr->lock);
+ memclear(mmap_arg);
+ mmap_arg.handle = bo->gem_handle;
- return 0;
-}
+ /* Get the fake offset back... */
+ int ret = drmIoctl(bufmgr->fd, DRM_IOCTL_I915_GEM_MMAP_GTT, &mmap_arg);
+ if (ret != 0) {
+ DBG("%s:%d: Error preparing buffer map %d (%s): %s .\n",
+ __FILE__, __LINE__, bo->gem_handle, bo->name, strerror(errno));
+ return NULL;
+ }
-/**
- * Performs a mapping of the buffer object like the normal GTT
- * mapping, but avoids waiting for the GPU to be done reading from or
- * rendering to the buffer.
- *
- * This is used in the implementation of GL_ARB_map_buffer_range: The
- * user asks to create a buffer, then does a mapping, fills some
- * space, runs a drawing command, then asks to map it again without
- * synchronizing because it guarantees that it won't write over the
- * data that the GPU is busy using (or, more specifically, that if it
- * does write over the data, it acknowledges that rendering is
- * undefined).
- */
+ /* and mmap it. */
+ map = drm_mmap(0, bo->size, PROT_READ | PROT_WRITE,
+ MAP_SHARED, bufmgr->fd, mmap_arg.offset);
+ if (map == MAP_FAILED) {
+ DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
+ __FILE__, __LINE__, bo->gem_handle, bo->name, strerror(errno));
+ return NULL;
+ }
-int
-brw_bo_map_unsynchronized(struct brw_context *brw, struct brw_bo *bo)
-{
- struct brw_bufmgr *bufmgr = bo->bufmgr;
- int ret;
+ /* We don't need to use VALGRIND_MALLOCLIKE_BLOCK because Valgrind will
+ * already intercept this mmap call. However, for consistency between
+ * all the mmap paths, we mark the pointer as defined now and mark it
+ * as inaccessible afterwards.
+ */
+ VG_DEFINED(map, bo->size);
- /* If the CPU cache isn't coherent with the GTT, then use a
- * regular synchronized mapping. The problem is that we don't
- * track where the buffer was last used on the CPU side in
- * terms of brw_bo_map vs brw_bo_map_gtt, so
- * we would potentially corrupt the buffer even when the user
- * does reasonable things.
- */
- if (!bufmgr->has_llc)
- return brw_bo_map_gtt(brw, bo);
+ if (p_atomic_cmpxchg(&bo->map_gtt, NULL, map)) {
+ VG_NOACCESS(map, bo->size);
+ drm_munmap(map, bo->size);
+ }
+ }
+ assert(bo->map_gtt);
- pthread_mutex_lock(&bufmgr->lock);
+ DBG("bo_map_gtt: %d (%s) -> %p, ", bo->gem_handle, bo->name, bo->map_gtt);
+ print_flags(flags);
- ret = map_gtt(bo);
- if (ret == 0) {
- bo_mark_mmaps_incoherent(bo);
- VG(VALGRIND_MAKE_MEM_DEFINED(bo->gtt_virtual, bo->size));
+ if (!(flags & MAP_ASYNC)) {
+ bo_wait_with_stall_warning(brw, bo, "GTT mapping");
}
- pthread_mutex_unlock(&bufmgr->lock);
-
- return ret;
+ return bo->map_gtt;
}
-int
-brw_bo_unmap(struct brw_bo *bo)
+static bool
+can_map_cpu(struct brw_bo *bo, unsigned flags)
{
- struct brw_bufmgr *bufmgr = bo->bufmgr;
- int ret = 0;
+ if (bo->cache_coherent)
+ return true;
- if (bo == NULL)
- return 0;
+ /* If PERSISTENT or COHERENT are set, the mmapping needs to remain valid
+ * across batch flushes where the kernel will change cache domains of the
+ * bo, invalidating continued access to the CPU mmap on non-LLC device.
+ *
+ * Similarly, ASYNC typically means that the buffer will be accessed via
+ * both the CPU and the GPU simultaneously. Batches may be executed that
+ * use the BO even while it is mapped. While OpenGL technically disallows
+ * most drawing while non-persistent mappings are active, we may still use
+ * the GPU for blits or other operations, causing batches to happen at
+ * inconvenient times.
+ */
+ if (flags & (MAP_PERSISTENT | MAP_COHERENT | MAP_ASYNC))
+ return false;
- pthread_mutex_lock(&bufmgr->lock);
+ return !(flags & MAP_WRITE);
+}
- if (bo->map_count <= 0) {
- DBG("attempted to unmap an unmapped bo\n");
- pthread_mutex_unlock(&bufmgr->lock);
- /* Preserve the old behaviour of just treating this as a
- * no-op rather than reporting the error.
- */
- return 0;
- }
+void *
+brw_bo_map(struct brw_context *brw, struct brw_bo *bo, unsigned flags)
+{
+ if (bo->tiling_mode != I915_TILING_NONE && !(flags & MAP_RAW))
+ return brw_bo_map_gtt(brw, bo, flags);
+
+ void *map;
+
+ if (can_map_cpu(bo, flags))
+ map = brw_bo_map_cpu(brw, bo, flags);
+ else
+ map = brw_bo_map_wc(brw, bo, flags);
- if (--bo->map_count == 0) {
- bo_mark_mmaps_incoherent(bo);
- bo->virtual = NULL;
+ /* Allow the attempt to fail by falling back to the GTT where necessary.
+ *
+ * Not every buffer can be mmaped directly using the CPU (or WC), for
+ * example buffers that wrap stolen memory or are imported from other
+ * devices. For those, we have little choice but to use a GTT mmapping.
+ * However, if we use a slow GTT mmapping for reads where we expected fast
+ * access, that order of magnitude difference in throughput will be clearly
+ * expressed by angry users.
+ *
+ * We skip MAP_RAW because we want to avoid map_gtt's fence detiling.
+ */
+ if (!map && !(flags & MAP_RAW)) {
+ perf_debug("Fallback GTT mapping for %s with access flags %x\n",
+ bo->name, flags);
+ map = brw_bo_map_gtt(brw, bo, flags);
}
- pthread_mutex_unlock(&bufmgr->lock);
- return ret;
+ return map;
}
int
return ret;
}
-int
-brw_bo_get_subdata(struct brw_bo *bo, uint64_t offset,
- uint64_t size, void *data)
-{
- struct brw_bufmgr *bufmgr = bo->bufmgr;
- struct drm_i915_gem_pread pread;
- int ret;
-
- memclear(pread);
- pread.handle = bo->gem_handle;
- pread.offset = offset;
- pread.size = size;
- pread.data_ptr = (uint64_t) (uintptr_t) data;
- ret = drmIoctl(bufmgr->fd, DRM_IOCTL_I915_GEM_PREAD, &pread);
- if (ret != 0) {
- ret = -errno;
- DBG("%s:%d: Error reading data from buffer %d: "
- "(%"PRIu64" %"PRIu64") %s .\n",
- __FILE__, __LINE__, bo->gem_handle, offset, size, strerror(errno));
- }
-
- return ret;
-}
-
/** Waits for all GPU rendering with the object to have completed. */
void
-brw_bo_wait_rendering(struct brw_context *brw, struct brw_bo *bo)
+brw_bo_wait_rendering(struct brw_bo *bo)
{
- set_domain(brw, "waiting for",
- bo, I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
+ /* We require a kernel recent enough for WAIT_IOCTL support.
+ * See intel_init_bufmgr()
+ */
+ brw_bo_wait(bo, -1);
}
/**
struct drm_i915_gem_wait wait;
int ret;
+ /* If we know it's idle, don't bother with the kernel round trip */
+ if (bo->idle && !bo->external)
+ return 0;
+
memclear(wait);
wait.bo_handle = bo->gem_handle;
wait.timeout_ns = timeout_ns;
}
struct brw_bo *
-brw_bo_gem_create_from_prime(struct brw_bufmgr *bufmgr, int prime_fd,
- int size)
+brw_bo_gem_create_from_prime(struct brw_bufmgr *bufmgr, int prime_fd)
{
int ret;
uint32_t handle;
ret = lseek(prime_fd, 0, SEEK_END);
if (ret != -1)
bo->size = ret;
- else
- bo->size = size;
bo->bufmgr = bufmgr;
bo->name = "prime";
bo->reusable = false;
+ bo->external = true;
memclear(get_tiling);
get_tiling.handle = bo->gem_handle;
return -errno;
bo->reusable = false;
+ bo->external = true;
return 0;
}
if (!bo->global_name) {
bo->global_name = flink.name;
bo->reusable = false;
+ bo->external = true;
_mesa_hash_table_insert(bufmgr->name_table, &bo->global_name, bo);
}
return ret;
}
-void *
-brw_bo_map__gtt(struct brw_bo *bo)
-{
- struct brw_bufmgr *bufmgr = bo->bufmgr;
-
- if (bo->gtt_virtual)
- return bo->gtt_virtual;
-
- pthread_mutex_lock(&bufmgr->lock);
- if (bo->gtt_virtual == NULL) {
- struct drm_i915_gem_mmap_gtt mmap_arg;
- void *ptr;
-
- DBG("bo_map_gtt: mmap %d (%s), map_count=%d\n",
- bo->gem_handle, bo->name, bo->map_count);
-
- memclear(mmap_arg);
- mmap_arg.handle = bo->gem_handle;
-
- /* Get the fake offset back... */
- ptr = MAP_FAILED;
- if (drmIoctl(bufmgr->fd, DRM_IOCTL_I915_GEM_MMAP_GTT, &mmap_arg) == 0) {
- /* and mmap it */
- ptr = drm_mmap(0, bo->size, PROT_READ | PROT_WRITE,
- MAP_SHARED, bufmgr->fd, mmap_arg.offset);
- }
- if (ptr == MAP_FAILED) {
- --bo->map_count;
- ptr = NULL;
- }
-
- bo->gtt_virtual = ptr;
- }
- pthread_mutex_unlock(&bufmgr->lock);
-
- return bo->gtt_virtual;
-}
-
-void *
-brw_bo_map__cpu(struct brw_bo *bo)
-{
- struct brw_bufmgr *bufmgr = bo->bufmgr;
-
- if (bo->mem_virtual)
- return bo->mem_virtual;
-
- pthread_mutex_lock(&bufmgr->lock);
- if (!bo->mem_virtual) {
- struct drm_i915_gem_mmap mmap_arg;
-
- DBG("bo_map: %d (%s), map_count=%d\n",
- bo->gem_handle, bo->name, bo->map_count);
-
- memclear(mmap_arg);
- mmap_arg.handle = bo->gem_handle;
- mmap_arg.size = bo->size;
- if (drmIoctl(bufmgr->fd, DRM_IOCTL_I915_GEM_MMAP, &mmap_arg)) {
- DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
- __FILE__, __LINE__, bo->gem_handle, bo->name, strerror(errno));
- } else {
- bo->map_count++;
- VG(VALGRIND_MALLOCLIKE_BLOCK
- (mmap_arg.addr_ptr, mmap_arg.size, 0, 1));
- bo->mem_virtual = (void *) (uintptr_t) mmap_arg.addr_ptr;
- }
- }
- pthread_mutex_unlock(&bufmgr->lock);
-
- return bo->mem_virtual;
-}
-
-void *
-brw_bo_map__wc(struct brw_bo *bo)
+static int
+gem_param(int fd, int name)
{
- struct brw_bufmgr *bufmgr = bo->bufmgr;
+ drm_i915_getparam_t gp;
+ int v = -1; /* No param uses (yet) the sign bit, reserve it for errors */
- if (bo->wc_virtual)
- return bo->wc_virtual;
-
- pthread_mutex_lock(&bufmgr->lock);
- if (!bo->wc_virtual) {
- struct drm_i915_gem_mmap mmap_arg;
-
- DBG("bo_map: %d (%s), map_count=%d\n",
- bo->gem_handle, bo->name, bo->map_count);
-
- memclear(mmap_arg);
- mmap_arg.handle = bo->gem_handle;
- mmap_arg.size = bo->size;
- mmap_arg.flags = I915_MMAP_WC;
- if (drmIoctl(bufmgr->fd, DRM_IOCTL_I915_GEM_MMAP, &mmap_arg)) {
- DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
- __FILE__, __LINE__, bo->gem_handle, bo->name, strerror(errno));
- } else {
- bo->map_count++;
- VG(VALGRIND_MALLOCLIKE_BLOCK
- (mmap_arg.addr_ptr, mmap_arg.size, 0, 1));
- bo->wc_virtual = (void *) (uintptr_t) mmap_arg.addr_ptr;
- }
- }
- pthread_mutex_unlock(&bufmgr->lock);
+ memset(&gp, 0, sizeof(gp));
+ gp.param = name;
+ gp.value = &v;
+ if (drmIoctl(fd, DRM_IOCTL_I915_GETPARAM, &gp))
+ return -1;
- return bo->wc_virtual;
+ return v;
}
/**
}
bufmgr->has_llc = devinfo->has_llc;
+ bufmgr->has_mmap_wc = gem_param(fd, I915_PARAM_MMAP_VERSION) > 0;
init_cache_buckets(bufmgr);