#ifndef ETIME
#define ETIME ETIMEDOUT
#endif
+#include "common/gen_clflush.h"
#include "common/gen_debug.h"
#include "common/gen_device_info.h"
#include "libdrm_macros.h"
#define VG(x)
#endif
+/* VALGRIND_FREELIKE_BLOCK unfortunately does not actually undo the earlier
+ * VALGRIND_MALLOCLIKE_BLOCK but instead leaves vg convinced the memory is
+ * leaked. All because it does not call VG(cli_free) from its
+ * VG_USERREQ__FREELIKE_BLOCK handler. Instead of treating the memory like
+ * and allocation, we mark it available for use upon mmapping and remove
+ * it upon unmapping.
+ */
+#define VG_DEFINED(ptr, size) VG(VALGRIND_MAKE_MEM_DEFINED(ptr, size))
+#define VG_NOACCESS(ptr, size) VG(VALGRIND_MAKE_MEM_NOACCESS(ptr, size))
+
#define memclear(s) memset(&s, 0, sizeof(s))
#define FILE_DEBUG_FLAG DEBUG_BUFMGR
struct hash_table *handle_table;
bool has_llc:1;
+ bool has_mmap_wc:1;
bool bo_reuse:1;
};
bool alloc_from_cache;
uint64_t bo_size;
bool for_render = false;
+ bool zeroed = false;
if (flags & BO_ALLOC_FOR_RENDER)
for_render = true;
+ if (flags & BO_ALLOC_ZEROED)
+ zeroed = true;
+
+ /* FOR_RENDER really means "I'm ok with a busy BO". This doesn't really
+ * jive with ZEROED as we have to wait for it to be idle before we can
+ * memset. Just disallow that combination.
+ */
+ assert(!(for_render && zeroed));
+
/* Round the allocated size up to a power of two number of pages. */
bucket = bucket_for_size(bufmgr, size);
retry:
alloc_from_cache = false;
if (bucket != NULL && !list_empty(&bucket->head)) {
- if (for_render) {
+ if (for_render && !zeroed) {
/* Allocate new render-target BOs from the tail (MRU)
* of the list, as it will likely be hot in the GPU
- * cache and in the aperture for us.
+ * cache and in the aperture for us. If the caller
+ * asked us to zero the buffer, we don't want this
+ * because we are going to mmap it.
*/
bo = LIST_ENTRY(struct brw_bo, bucket->head.prev, head);
list_del(&bo->head);
bo_free(bo);
goto retry;
}
+
+ if (zeroed) {
+ void *map = brw_bo_map(NULL, bo, MAP_WRITE | MAP_RAW);
+ if (!map) {
+ bo_free(bo);
+ goto retry;
+ }
+ memset(map, 0, bo_size);
+ }
}
}
memclear(create);
create.size = bo_size;
+ /* All new BOs we get from the kernel are zeroed, so we don't need to
+ * worry about that here.
+ */
ret = drmIoctl(bufmgr->fd, DRM_IOCTL_I915_GEM_CREATE, &create);
if (ret != 0) {
free(bo);
if (bo_set_tiling_internal(bo, tiling_mode, stride))
goto err_free;
+
+ /* Calling set_domain() will allocate pages for the BO outside of the
+ * struct mutex lock in the kernel, which is more efficient than waiting
+ * to create them during the first execbuf that uses the BO.
+ */
+ struct drm_i915_gem_set_domain sd = {
+ .handle = bo->gem_handle,
+ .read_domains = I915_GEM_DOMAIN_CPU,
+ .write_domain = 0,
+ };
+
+ if (drmIoctl(bo->bufmgr->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &sd) != 0)
+ goto err_free;
}
bo->name = name;
int ret;
if (bo->map_cpu) {
- VG(VALGRIND_FREELIKE_BLOCK(bo->map_cpu, 0));
+ VG_NOACCESS(bo->map_cpu, bo->size);
drm_munmap(bo->map_cpu, bo->size);
}
if (bo->map_wc) {
- VG(VALGRIND_FREELIKE_BLOCK(bo->map_wc, 0));
+ VG_NOACCESS(bo->map_wc, bo->size);
drm_munmap(bo->map_wc, bo->size);
}
if (bo->map_gtt) {
+ VG_NOACCESS(bo->map_gtt, bo->size);
drm_munmap(bo->map_gtt, bo->size);
}
}
static void
-set_domain(struct brw_context *brw, const char *action,
- struct brw_bo *bo, uint32_t read_domains, uint32_t write_domain)
+bo_wait_with_stall_warning(struct brw_context *brw,
+ struct brw_bo *bo,
+ const char *action)
{
- struct drm_i915_gem_set_domain sd = {
- .handle = bo->gem_handle,
- .read_domains = read_domains,
- .write_domain = write_domain,
- };
-
double elapsed = unlikely(brw && brw->perf_debug) ? -get_time() : 0.0;
- if (drmIoctl(bo->bufmgr->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &sd) != 0) {
- DBG("%s:%d: Error setting memory domains %d (%08x %08x): %s.\n",
- __FILE__, __LINE__, bo->gem_handle, read_domains, write_domain,
- strerror(errno));
- }
+ brw_bo_wait_rendering(bo);
if (unlikely(brw && brw->perf_debug)) {
elapsed += get_time();
__FILE__, __LINE__, bo->gem_handle, bo->name, strerror(errno));
return NULL;
}
- VG(VALGRIND_MALLOCLIKE_BLOCK(mmap_arg.addr_ptr, mmap_arg.size, 0, 1));
map = (void *) (uintptr_t) mmap_arg.addr_ptr;
+ VG_DEFINED(map, bo->size);
if (p_atomic_cmpxchg(&bo->map_cpu, NULL, map)) {
- VG(VALGRIND_FREELIKE_BLOCK(map, 0));
+ VG_NOACCESS(map, bo->size);
drm_munmap(map, bo->size);
}
}
+ assert(bo->map_cpu);
+
DBG("brw_bo_map_cpu: %d (%s) -> %p, ", bo->gem_handle, bo->name,
bo->map_cpu);
print_flags(flags);
- if (!(flags & MAP_ASYNC) || !bufmgr->has_llc) {
- set_domain(brw, "CPU mapping", bo, I915_GEM_DOMAIN_CPU,
- flags & MAP_WRITE ? I915_GEM_DOMAIN_CPU : 0);
+ if (!(flags & MAP_ASYNC)) {
+ bo_wait_with_stall_warning(brw, bo, "CPU mapping");
+ }
+
+ if (!bo->cache_coherent) {
+ /* If we're reusing an existing CPU mapping, the CPU caches may
+ * contain stale data from the last time we read from that mapping.
+ * (With the BO cache, it might even be data from a previous buffer!)
+ * Even if it's a brand new mapping, the kernel may have zeroed the
+ * buffer via CPU writes.
+ *
+ * We need to invalidate those cachelines so that we see the latest
+ * contents, and so long as we only read from the CPU mmap we do not
+ * need to write those cachelines back afterwards.
+ */
+ gen_invalidate_range(bo->map_cpu, bo->size);
}
return bo->map_cpu;
}
+static void *
+brw_bo_map_wc(struct brw_context *brw, struct brw_bo *bo, unsigned flags)
+{
+ struct brw_bufmgr *bufmgr = bo->bufmgr;
+
+ if (!bufmgr->has_mmap_wc)
+ return NULL;
+
+ if (!bo->map_wc) {
+ struct drm_i915_gem_mmap mmap_arg;
+ void *map;
+
+ DBG("brw_bo_map_wc: %d (%s)\n", bo->gem_handle, bo->name);
+
+ memclear(mmap_arg);
+ mmap_arg.handle = bo->gem_handle;
+ mmap_arg.size = bo->size;
+ mmap_arg.flags = I915_MMAP_WC;
+ int ret = drmIoctl(bufmgr->fd, DRM_IOCTL_I915_GEM_MMAP, &mmap_arg);
+ if (ret != 0) {
+ ret = -errno;
+ DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
+ __FILE__, __LINE__, bo->gem_handle, bo->name, strerror(errno));
+ return NULL;
+ }
+
+ map = (void *) (uintptr_t) mmap_arg.addr_ptr;
+ VG_DEFINED(map, bo->size);
+
+ if (p_atomic_cmpxchg(&bo->map_wc, NULL, map)) {
+ VG_NOACCESS(map, bo->size);
+ drm_munmap(map, bo->size);
+ }
+ }
+ assert(bo->map_wc);
+
+ DBG("brw_bo_map_wc: %d (%s) -> %p\n", bo->gem_handle, bo->name, bo->map_wc);
+ print_flags(flags);
+
+ if (!(flags & MAP_ASYNC)) {
+ bo_wait_with_stall_warning(brw, bo, "WC mapping");
+ }
+
+ return bo->map_wc;
+}
+
static void *
brw_bo_map_gtt(struct brw_context *brw, struct brw_bo *bo, unsigned flags)
{
if (ret != 0) {
DBG("%s:%d: Error preparing buffer map %d (%s): %s .\n",
__FILE__, __LINE__, bo->gem_handle, bo->name, strerror(errno));
- pthread_mutex_unlock(&bufmgr->lock);
return NULL;
}
- /* and mmap it. We don't need to use VALGRIND_MALLOCLIKE_BLOCK
- * because Valgrind will already intercept this mmap call.
- */
+ /* and mmap it. */
map = drm_mmap(0, bo->size, PROT_READ | PROT_WRITE,
MAP_SHARED, bufmgr->fd, mmap_arg.offset);
if (map == MAP_FAILED) {
return NULL;
}
+ /* We don't need to use VALGRIND_MALLOCLIKE_BLOCK because Valgrind will
+ * already intercept this mmap call. However, for consistency between
+ * all the mmap paths, we mark the pointer as defined now and mark it
+ * as inaccessible afterwards.
+ */
+ VG_DEFINED(map, bo->size);
+
if (p_atomic_cmpxchg(&bo->map_gtt, NULL, map)) {
+ VG_NOACCESS(map, bo->size);
drm_munmap(map, bo->size);
}
}
+ assert(bo->map_gtt);
DBG("bo_map_gtt: %d (%s) -> %p, ", bo->gem_handle, bo->name, bo->map_gtt);
print_flags(flags);
- if (!(flags & MAP_ASYNC) || !bufmgr->has_llc) {
- set_domain(brw, "GTT mapping", bo,
- I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
+ if (!(flags & MAP_ASYNC)) {
+ bo_wait_with_stall_warning(brw, bo, "GTT mapping");
}
return bo->map_gtt;
{
if (bo->tiling_mode != I915_TILING_NONE && !(flags & MAP_RAW))
return brw_bo_map_gtt(brw, bo, flags);
- else if (can_map_cpu(bo, flags))
- return brw_bo_map_cpu(brw, bo, flags);
+
+ void *map;
+
+ if (can_map_cpu(bo, flags))
+ map = brw_bo_map_cpu(brw, bo, flags);
else
- return brw_bo_map_gtt(brw, bo, flags);
+ map = brw_bo_map_wc(brw, bo, flags);
+
+ /* Allow the attempt to fail by falling back to the GTT where necessary.
+ *
+ * Not every buffer can be mmaped directly using the CPU (or WC), for
+ * example buffers that wrap stolen memory or are imported from other
+ * devices. For those, we have little choice but to use a GTT mmapping.
+ * However, if we use a slow GTT mmapping for reads where we expected fast
+ * access, that order of magnitude difference in throughput will be clearly
+ * expressed by angry users.
+ *
+ * We skip MAP_RAW because we want to avoid map_gtt's fence detiling.
+ */
+ if (!map && !(flags & MAP_RAW)) {
+ perf_debug("Fallback GTT mapping for %s with access flags %x\n",
+ bo->name, flags);
+ map = brw_bo_map_gtt(brw, bo, flags);
+ }
+
+ return map;
}
int
return ret;
}
-int
-brw_bo_get_subdata(struct brw_bo *bo, uint64_t offset,
- uint64_t size, void *data)
-{
- struct brw_bufmgr *bufmgr = bo->bufmgr;
- struct drm_i915_gem_pread pread;
- int ret;
-
- memclear(pread);
- pread.handle = bo->gem_handle;
- pread.offset = offset;
- pread.size = size;
- pread.data_ptr = (uint64_t) (uintptr_t) data;
- ret = drmIoctl(bufmgr->fd, DRM_IOCTL_I915_GEM_PREAD, &pread);
- if (ret != 0) {
- ret = -errno;
- DBG("%s:%d: Error reading data from buffer %d: "
- "(%"PRIu64" %"PRIu64") %s .\n",
- __FILE__, __LINE__, bo->gem_handle, offset, size, strerror(errno));
- }
-
- return ret;
-}
-
/** Waits for all GPU rendering with the object to have completed. */
void
brw_bo_wait_rendering(struct brw_bo *bo)
struct drm_i915_gem_wait wait;
int ret;
+ /* If we know it's idle, don't bother with the kernel round trip */
+ if (bo->idle && !bo->external)
+ return 0;
+
memclear(wait);
wait.bo_handle = bo->gem_handle;
wait.timeout_ns = timeout_ns;
return ret;
}
+static int
+gem_param(int fd, int name)
+{
+ drm_i915_getparam_t gp;
+ int v = -1; /* No param uses (yet) the sign bit, reserve it for errors */
+
+ memset(&gp, 0, sizeof(gp));
+ gp.param = name;
+ gp.value = &v;
+ if (drmIoctl(fd, DRM_IOCTL_I915_GETPARAM, &gp))
+ return -1;
+
+ return v;
+}
+
/**
* Initializes the GEM buffer manager, which uses the kernel to allocate, map,
* and manage map buffer objections.
}
bufmgr->has_llc = devinfo->has_llc;
+ bufmgr->has_mmap_wc = gem_param(fd, I915_PARAM_MMAP_VERSION) > 0;
init_cache_buckets(bufmgr);