#include <i915_drm.h>
#include <intel_bufmgr.h>
+#include "os/os_thread.h"
#include "state_tracker/drm_driver.h"
#include "pipe/p_state.h"
#include "util/u_inlines.h"
#include "util/u_memory.h"
#include "util/u_debug.h"
-#include "../intel_winsys.h"
-
-#define BATCH_SZ (8192 * sizeof(uint32_t))
+#include "intel_drm_public.h"
struct intel_winsys {
int fd;
drm_intel_bufmgr *bufmgr;
struct intel_winsys_info info;
+ /* these are protected by the mutex */
+ pipe_mutex mutex;
+ drm_intel_context *first_gem_ctx;
struct drm_intel_decode *decode;
};
+static drm_intel_context *
+gem_ctx(const struct intel_context *ctx)
+{
+ return (drm_intel_context *) ctx;
+}
+
+static drm_intel_bo *
+gem_bo(const struct intel_bo *bo)
+{
+ return (drm_intel_bo *) bo;
+}
+
static bool
get_param(struct intel_winsys *winsys, int param, int *value)
{
}
static bool
-init_info(struct intel_winsys *winsys)
+test_reg_read(struct intel_winsys *winsys, uint32_t reg)
+{
+ uint64_t dummy;
+
+ return !drm_intel_reg_read(winsys->bufmgr, reg, &dummy);
+}
+
+static bool
+probe_winsys(struct intel_winsys *winsys)
{
struct intel_winsys_info *info = &winsys->info;
int val;
- /* follow the classic driver here */
+ /*
+ * When we need the Nth vertex from a user vertex buffer, and the vertex is
+ * uploaded to, say, the beginning of a bo, we want the first vertex in the
+ * bo to be fetched. One way to do this is to set the base address of the
+ * vertex buffer to
+ *
+ * bo->offset64 + (vb->buffer_offset - vb->stride * N).
+ *
+ * The second term may be negative, and we need kernel support to do that.
+ *
+ * This check is taken from the classic driver. u_vbuf_upload_buffers()
+ * guarantees the term is never negative, but it is good to require a
+ * recent kernel.
+ */
get_param(winsys, I915_PARAM_HAS_RELAXED_DELTA, &val);
if (!val) {
debug_error("kernel 2.6.39 required");
info->devid = drm_intel_bufmgr_gem_get_devid(winsys->bufmgr);
+ if (drm_intel_get_aperture_sizes(winsys->fd,
+ &info->aperture_mappable, &info->aperture_total)) {
+ debug_error("failed to query aperture sizes");
+ return false;
+ }
+
get_param(winsys, I915_PARAM_HAS_LLC, &val);
info->has_llc = val;
+ info->has_address_swizzling = test_address_swizzling(winsys);
+
+ winsys->first_gem_ctx = drm_intel_gem_context_create(winsys->bufmgr);
+ info->has_logical_context = (winsys->first_gem_ctx != NULL);
+
+ get_param(winsys, I915_PARAM_HAS_ALIASING_PPGTT, &val);
+ info->has_ppgtt = val;
+
+ /* test TIMESTAMP read */
+ info->has_timestamp = test_reg_read(winsys, 0x2358);
get_param(winsys, I915_PARAM_HAS_GEN7_SOL_RESET, &val);
info->has_gen7_sol_reset = val;
- info->has_address_swizzling = test_address_swizzling(winsys);
-
return true;
}
struct intel_winsys *
intel_winsys_create_for_fd(int fd)
{
+ /* so that we can have enough (up to 4094) relocs per bo */
+ const int batch_size = sizeof(uint32_t) * 8192;
struct intel_winsys *winsys;
winsys = CALLOC_STRUCT(intel_winsys);
winsys->fd = fd;
- winsys->bufmgr = drm_intel_bufmgr_gem_init(winsys->fd, BATCH_SZ);
+ winsys->bufmgr = drm_intel_bufmgr_gem_init(winsys->fd, batch_size);
if (!winsys->bufmgr) {
debug_error("failed to create GEM buffer manager");
FREE(winsys);
return NULL;
}
- if (!init_info(winsys)) {
+ pipe_mutex_init(winsys->mutex);
+
+ if (!probe_winsys(winsys)) {
+ pipe_mutex_destroy(winsys->mutex);
drm_intel_bufmgr_destroy(winsys->bufmgr);
FREE(winsys);
return NULL;
}
+ /*
+ * No need to implicitly set up a fence register for each non-linear reloc
+ * entry. INTEL_RELOC_FENCE will be set on reloc entries that need them.
+ */
drm_intel_bufmgr_gem_enable_fenced_relocs(winsys->bufmgr);
+
drm_intel_bufmgr_gem_enable_reuse(winsys->bufmgr);
return winsys;
if (winsys->decode)
drm_intel_decode_context_free(winsys->decode);
+ if (winsys->first_gem_ctx)
+ drm_intel_gem_context_destroy(winsys->first_gem_ctx);
+
+ pipe_mutex_destroy(winsys->mutex);
drm_intel_bufmgr_destroy(winsys->bufmgr);
FREE(winsys);
}
struct intel_context *
intel_winsys_create_context(struct intel_winsys *winsys)
{
- return (struct intel_context *)
- drm_intel_gem_context_create(winsys->bufmgr);
+ drm_intel_context *gem_ctx;
+
+ /* try the preallocated context first */
+ pipe_mutex_lock(winsys->mutex);
+ gem_ctx = winsys->first_gem_ctx;
+ winsys->first_gem_ctx = NULL;
+ pipe_mutex_unlock(winsys->mutex);
+
+ if (!gem_ctx)
+ gem_ctx = drm_intel_gem_context_create(winsys->bufmgr);
+
+ return (struct intel_context *) gem_ctx;
}
void
intel_winsys_destroy_context(struct intel_winsys *winsys,
struct intel_context *ctx)
{
- drm_intel_gem_context_destroy((drm_intel_context *) ctx);
+ drm_intel_gem_context_destroy(gem_ctx(ctx));
}
int
return drm_intel_reg_read(winsys->bufmgr, reg, val);
}
+int
+intel_winsys_get_reset_stats(struct intel_winsys *winsys,
+ struct intel_context *ctx,
+ uint32_t *active_lost,
+ uint32_t *pending_lost)
+{
+ uint32_t reset_count;
+
+ return drm_intel_get_reset_stats(gem_ctx(ctx),
+ &reset_count, active_lost, pending_lost);
+}
+
struct intel_bo *
-intel_winsys_alloc_buffer(struct intel_winsys *winsys,
- const char *name,
- unsigned long size,
- unsigned long flags)
+intel_winsys_alloc_bo(struct intel_winsys *winsys,
+ const char *name,
+ unsigned long size,
+ bool cpu_init)
{
- const int alignment = 4096; /* always page-aligned */
+ const unsigned int alignment = 4096; /* always page-aligned */
drm_intel_bo *bo;
- if (flags == INTEL_ALLOC_FOR_RENDER) {
+ if (cpu_init) {
+ bo = drm_intel_bo_alloc(winsys->bufmgr, name, size, alignment);
+ } else {
bo = drm_intel_bo_alloc_for_render(winsys->bufmgr,
name, size, alignment);
}
- else {
- assert(!flags);
- bo = drm_intel_bo_alloc(winsys->bufmgr, name, size, alignment);
- }
return (struct intel_bo *) bo;
}
struct intel_bo *
-intel_winsys_alloc_texture(struct intel_winsys *winsys,
- const char *name,
- int width, int height, int cpp,
- enum intel_tiling_mode tiling,
- unsigned long flags,
- unsigned long *pitch)
+intel_winsys_import_userptr(struct intel_winsys *winsys,
+ const char *name,
+ void *userptr,
+ unsigned long size,
+ unsigned long flags)
{
- uint32_t real_tiling = tiling;
- drm_intel_bo *bo;
-
- bo = drm_intel_bo_alloc_tiled(winsys->bufmgr, name,
- width, height, cpp, &real_tiling, pitch, flags);
- if (!bo)
- return NULL;
-
- if (real_tiling != tiling) {
- assert(!"tiling mismatch");
- drm_intel_bo_unreference(bo);
- return NULL;
- }
-
- return (struct intel_bo *) bo;
+ return NULL;
}
struct intel_bo *
intel_winsys_import_handle(struct intel_winsys *winsys,
const char *name,
const struct winsys_handle *handle,
- int width, int height, int cpp,
+ unsigned long height,
enum intel_tiling_mode *tiling,
unsigned long *pitch)
{
drm_intel_bo *bo;
int err;
+ if (handle->offset != 0) {
+ debug_error("attempt to import unsupported winsys offset");
+ return NULL;
+ }
+
switch (handle->type) {
case DRM_API_HANDLE_TYPE_SHARED:
{
struct intel_bo *bo,
enum intel_tiling_mode tiling,
unsigned long pitch,
+ unsigned long height,
struct winsys_handle *handle)
{
int err = 0;
{
uint32_t name;
- err = drm_intel_bo_flink((drm_intel_bo *) bo, &name);
+ err = drm_intel_bo_flink(gem_bo(bo), &name);
if (!err)
handle->handle = name;
}
break;
case DRM_API_HANDLE_TYPE_KMS:
- handle->handle = ((drm_intel_bo *) bo)->handle;
+ handle->handle = gem_bo(bo)->handle;
break;
-#if 0
case DRM_API_HANDLE_TYPE_FD:
{
int fd;
- err = drm_intel_bo_gem_export_to_prime((drm_intel_bo *) bo, &fd);
+ err = drm_intel_bo_gem_export_to_prime(gem_bo(bo), &fd);
if (!err)
handle->handle = fd;
}
break;
-#endif
default:
err = -EINVAL;
break;
return 0;
}
+bool
+intel_winsys_can_submit_bo(struct intel_winsys *winsys,
+ struct intel_bo **bo_array,
+ int count)
+{
+ return !drm_intel_bufmgr_check_aperture_space((drm_intel_bo **) bo_array,
+ count);
+}
+
int
-intel_winsys_check_aperture_space(struct intel_winsys *winsys,
- struct intel_bo **bo_array,
- int count)
+intel_winsys_submit_bo(struct intel_winsys *winsys,
+ enum intel_ring_type ring,
+ struct intel_bo *bo, int used,
+ struct intel_context *ctx,
+ unsigned long flags)
{
- return drm_intel_bufmgr_check_aperture_space((drm_intel_bo **) bo_array,
- count);
+ const unsigned long exec_flags = (unsigned long) ring | flags;
+
+ /* logical contexts are only available for the render ring */
+ if (ring != INTEL_RING_RENDER)
+ ctx = NULL;
+
+ if (ctx) {
+ return drm_intel_gem_bo_context_exec(gem_bo(bo),
+ (drm_intel_context *) ctx, used, exec_flags);
+ }
+ else {
+ return drm_intel_bo_mrb_exec(gem_bo(bo),
+ used, NULL, 0, 0, exec_flags);
+ }
}
void
-intel_winsys_decode_commands(struct intel_winsys *winsys,
- struct intel_bo *bo, int used)
+intel_winsys_decode_bo(struct intel_winsys *winsys,
+ struct intel_bo *bo, int used)
{
- int err;
+ void *ptr;
+
+ ptr = intel_bo_map(bo, false);
+ if (!ptr) {
+ debug_printf("failed to map buffer for decoding\n");
+ return;
+ }
+
+ pipe_mutex_lock(winsys->mutex);
if (!winsys->decode) {
winsys->decode = drm_intel_decode_context_alloc(winsys->info.devid);
- if (!winsys->decode)
+ if (!winsys->decode) {
+ pipe_mutex_unlock(winsys->mutex);
+ intel_bo_unmap(bo);
return;
+ }
/* debug_printf()/debug_error() uses stderr by default */
drm_intel_decode_set_output_file(winsys->decode, stderr);
}
- err = intel_bo_map(bo, false);
- if (err) {
- debug_printf("failed to map buffer for decoding\n");
- return;
- }
-
/* in dwords */
used /= 4;
drm_intel_decode_set_batch_pointer(winsys->decode,
- intel_bo_get_virtual(bo), intel_bo_get_offset(bo), used);
+ ptr, gem_bo(bo)->offset64, used);
drm_intel_decode(winsys->decode);
+ pipe_mutex_unlock(winsys->mutex);
+
intel_bo_unmap(bo);
}
-void
-intel_bo_reference(struct intel_bo *bo)
+struct intel_bo *
+intel_bo_ref(struct intel_bo *bo)
{
- drm_intel_bo_reference((drm_intel_bo *) bo);
+ if (bo)
+ drm_intel_bo_reference(gem_bo(bo));
+
+ return bo;
}
void
-intel_bo_unreference(struct intel_bo *bo)
+intel_bo_unref(struct intel_bo *bo)
{
- drm_intel_bo_unreference((drm_intel_bo *) bo);
+ if (bo)
+ drm_intel_bo_unreference(gem_bo(bo));
}
-unsigned long
-intel_bo_get_size(const struct intel_bo *bo)
+int
+intel_bo_set_tiling(struct intel_bo *bo,
+ enum intel_tiling_mode tiling,
+ unsigned long pitch)
{
- return ((drm_intel_bo *) bo)->size;
-}
+ uint32_t real_tiling = tiling;
+ int err;
-unsigned long
-intel_bo_get_offset(const struct intel_bo *bo)
-{
- return ((drm_intel_bo *) bo)->offset;
+ switch (tiling) {
+ case INTEL_TILING_X:
+ if (pitch % 512)
+ return -1;
+ break;
+ case INTEL_TILING_Y:
+ if (pitch % 128)
+ return -1;
+ break;
+ default:
+ break;
+ }
+
+ err = drm_intel_bo_set_tiling(gem_bo(bo), &real_tiling, pitch);
+ if (err || real_tiling != tiling) {
+ assert(!"tiling mismatch");
+ return -1;
+ }
+
+ return 0;
}
void *
-intel_bo_get_virtual(const struct intel_bo *bo)
+intel_bo_map(struct intel_bo *bo, bool write_enable)
{
- return ((drm_intel_bo *) bo)->virtual;
+ int err;
+
+ err = drm_intel_bo_map(gem_bo(bo), write_enable);
+ if (err) {
+ debug_error("failed to map bo");
+ return NULL;
+ }
+
+ return gem_bo(bo)->virtual;
}
-int
-intel_bo_map(struct intel_bo *bo, bool write_enable)
+void *
+intel_bo_map_async(struct intel_bo *bo)
{
- return drm_intel_bo_map((drm_intel_bo *) bo, write_enable);
+ return NULL;
}
-int
+void *
intel_bo_map_gtt(struct intel_bo *bo)
{
- return drm_intel_gem_bo_map_gtt((drm_intel_bo *) bo);
+ int err;
+
+ err = drm_intel_gem_bo_map_gtt(gem_bo(bo));
+ if (err) {
+ debug_error("failed to map bo");
+ return NULL;
+ }
+
+ return gem_bo(bo)->virtual;
}
-int
-intel_bo_map_unsynchronized(struct intel_bo *bo)
+void *
+intel_bo_map_gtt_async(struct intel_bo *bo)
{
- return drm_intel_gem_bo_map_unsynchronized((drm_intel_bo *) bo);
+ int err;
+
+ err = drm_intel_gem_bo_map_unsynchronized(gem_bo(bo));
+ if (err) {
+ debug_error("failed to map bo");
+ return NULL;
+ }
+
+ return gem_bo(bo)->virtual;
}
void
{
int err;
- err = drm_intel_bo_unmap((drm_intel_bo *) bo);
+ err = drm_intel_bo_unmap(gem_bo(bo));
assert(!err);
}
intel_bo_pwrite(struct intel_bo *bo, unsigned long offset,
unsigned long size, const void *data)
{
- return drm_intel_bo_subdata((drm_intel_bo *) bo, offset, size, data);
+ return drm_intel_bo_subdata(gem_bo(bo), offset, size, data);
}
int
intel_bo_pread(struct intel_bo *bo, unsigned long offset,
unsigned long size, void *data)
{
- return drm_intel_bo_get_subdata((drm_intel_bo *) bo, offset, size, data);
+ return drm_intel_bo_get_subdata(gem_bo(bo), offset, size, data);
}
int
-intel_bo_emit_reloc(struct intel_bo *bo, uint32_t offset,
- struct intel_bo *target_bo, uint32_t target_offset,
- uint32_t read_domains, uint32_t write_domain)
+intel_bo_add_reloc(struct intel_bo *bo, uint32_t offset,
+ struct intel_bo *target_bo, uint32_t target_offset,
+ uint32_t flags, uint64_t *presumed_offset)
{
- return drm_intel_bo_emit_reloc((drm_intel_bo *) bo, offset,
- (drm_intel_bo *) target_bo, target_offset,
- read_domains, write_domain);
+ uint32_t read_domains, write_domain;
+ int err;
+
+ if (flags & INTEL_RELOC_WRITE) {
+ /*
+ * Because of the translation to domains, INTEL_RELOC_GGTT should only
+ * be set on GEN6 when the bo is written by MI_* or PIPE_CONTROL. The
+ * kernel will translate it back to INTEL_RELOC_GGTT.
+ */
+ write_domain = (flags & INTEL_RELOC_GGTT) ?
+ I915_GEM_DOMAIN_INSTRUCTION : I915_GEM_DOMAIN_RENDER;
+ read_domains = write_domain;
+ } else {
+ write_domain = 0;
+ read_domains = I915_GEM_DOMAIN_RENDER |
+ I915_GEM_DOMAIN_SAMPLER |
+ I915_GEM_DOMAIN_INSTRUCTION |
+ I915_GEM_DOMAIN_VERTEX;
+ }
+
+ if (flags & INTEL_RELOC_FENCE) {
+ err = drm_intel_bo_emit_reloc_fence(gem_bo(bo), offset,
+ gem_bo(target_bo), target_offset,
+ read_domains, write_domain);
+ } else {
+ err = drm_intel_bo_emit_reloc(gem_bo(bo), offset,
+ gem_bo(target_bo), target_offset,
+ read_domains, write_domain);
+ }
+
+ *presumed_offset = gem_bo(target_bo)->offset64 + target_offset;
+
+ return err;
}
int
intel_bo_get_reloc_count(struct intel_bo *bo)
{
- return drm_intel_gem_bo_get_reloc_count((drm_intel_bo *) bo);
+ return drm_intel_gem_bo_get_reloc_count(gem_bo(bo));
}
void
-intel_bo_clear_relocs(struct intel_bo *bo, int start)
+intel_bo_truncate_relocs(struct intel_bo *bo, int start)
{
- return drm_intel_gem_bo_clear_relocs((drm_intel_bo *) bo, start);
+ drm_intel_gem_bo_clear_relocs(gem_bo(bo), start);
}
bool
-intel_bo_references(struct intel_bo *bo, struct intel_bo *target_bo)
+intel_bo_has_reloc(struct intel_bo *bo, struct intel_bo *target_bo)
{
- return drm_intel_bo_references((drm_intel_bo *) bo,
- (drm_intel_bo *) target_bo);
-}
-
-int
-intel_bo_exec(struct intel_bo *bo, int used,
- struct intel_context *ctx, unsigned long flags)
-{
- if (ctx) {
- return drm_intel_gem_bo_context_exec((drm_intel_bo *) bo,
- (drm_intel_context *) ctx, used, flags);
- }
- else {
- return drm_intel_bo_mrb_exec((drm_intel_bo *) bo,
- used, NULL, 0, 0, flags);
- }
+ return drm_intel_bo_references(gem_bo(bo), gem_bo(target_bo));
}
int
{
int err;
- err = drm_intel_gem_bo_wait((drm_intel_bo *) bo, timeout);
+ if (timeout >= 0) {
+ err = drm_intel_gem_bo_wait(gem_bo(bo), timeout);
+ } else {
+ drm_intel_bo_wait_rendering(gem_bo(bo));
+ err = 0;
+ }
+
/* consider the bo idle on errors */
if (err && err != -ETIME)
err = 0;