static int verbose = 0;
static bool device_override;
+#define MAX_FD_COUNT 64
#define MAX_BO_COUNT 64 * 1024
struct bo {
}
static struct bo *
-get_bo(uint32_t handle)
+get_bo(unsigned fd, uint32_t handle)
{
struct bo *bo;
fail_if(handle >= MAX_BO_COUNT, "bo handle too large\n");
- bo = &bos[handle];
+ fail_if(fd >= MAX_FD_COUNT, "bo fd too large\n");
+ bo = &bos[handle + fd * MAX_BO_COUNT];
return bo;
}
static struct aub_file aub_file;
static void *
-relocate_bo(struct bo *bo, const struct drm_i915_gem_execbuffer2 *execbuffer2,
+relocate_bo(int fd, struct bo *bo, const struct drm_i915_gem_execbuffer2 *execbuffer2,
const struct drm_i915_gem_exec_object2 *obj)
{
const struct drm_i915_gem_exec_object2 *exec_objects =
handle = relocs[i].target_handle;
aub_write_reloc(&devinfo, ((char *)relocated) + relocs[i].offset,
- get_bo(handle)->offset + relocs[i].delta);
+ get_bo(fd, handle)->offset + relocs[i].delta);
}
return relocated;
for (uint32_t i = 0; i < execbuffer2->buffer_count; i++) {
obj = &exec_objects[i];
- bo = get_bo(obj->handle);
+ bo = get_bo(fd, obj->handle);
/* If bo->size == 0, this means they passed us an invalid
* buffer. The kernel will reject it and so should we.
batch_index = (execbuffer2->flags & I915_EXEC_BATCH_FIRST) ? 0 :
execbuffer2->buffer_count - 1;
- batch_bo = get_bo(exec_objects[batch_index].handle);
+ batch_bo = get_bo(fd, exec_objects[batch_index].handle);
for (uint32_t i = 0; i < execbuffer2->buffer_count; i++) {
obj = &exec_objects[i];
- bo = get_bo(obj->handle);
+ bo = get_bo(fd, obj->handle);
if (obj->relocation_count > 0)
- data = relocate_bo(bo, execbuffer2, obj);
+ data = relocate_bo(fd, bo, execbuffer2, obj);
else
data = bo->map;
}
static void
-add_new_bo(int handle, uint64_t size, void *map)
+add_new_bo(unsigned fd, int handle, uint64_t size, void *map)
{
- struct bo *bo = &bos[handle];
+ struct bo *bo = &bos[handle + fd * MAX_BO_COUNT];
fail_if(handle >= MAX_BO_COUNT, "bo handle out of range\n");
+ fail_if(fd >= MAX_FD_COUNT, "bo fd out of range\n");
fail_if(size == 0, "bo size is invalid\n");
bo->size = size;
}
static void
-remove_bo(int handle)
+remove_bo(int fd, int handle)
{
- struct bo *bo = get_bo(handle);
+ struct bo *bo = get_bo(fd, handle);
if (bo->map && !IS_USERPTR(bo->map))
munmap(bo->map, bo->size);
}
fclose(config);
- bos = calloc(MAX_BO_COUNT, sizeof(bos[0]));
+ bos = calloc(MAX_FD_COUNT * MAX_BO_COUNT, sizeof(bos[0]));
fail_if(bos == NULL, "out of memory\n");
}
ret = libc_ioctl(fd, request, argp);
if (ret == 0)
- add_new_bo(create->handle, create->size, NULL);
+ add_new_bo(fd, create->handle, create->size, NULL);
return ret;
}
ret = libc_ioctl(fd, request, argp);
if (ret == 0)
- add_new_bo(userptr->handle, userptr->user_size,
+ add_new_bo(fd, userptr->handle, userptr->user_size,
(void *) (uintptr_t) (userptr->user_ptr | USERPTR_FLAG));
+
return ret;
}
case DRM_IOCTL_GEM_CLOSE: {
struct drm_gem_close *close = argp;
- remove_bo(close->handle);
+ remove_bo(fd, close->handle);
return libc_ioctl(fd, request, argp);
}
ret = libc_ioctl(fd, request, argp);
if (ret == 0)
- add_new_bo(open->handle, open->size, NULL);
+ add_new_bo(fd, open->handle, open->size, NULL);
return ret;
}
size = lseek(prime->fd, 0, SEEK_END);
fail_if(size == -1, "failed to get prime bo size\n");
- add_new_bo(prime->handle, size, NULL);
+ add_new_bo(fd, prime->handle, size, NULL);
+
}
return ret;