#define RADEON_CS_RING_COMPUTE 1
#endif
+#ifndef RADEON_CS_RING_DMA
+#define RADEON_CS_RING_DMA 2
+#endif
+
#ifndef RADEON_CS_END_OF_FRAME
#define RADEON_CS_END_OF_FRAME 0x04
#endif
FREE(csc->relocs);
}
-DEBUG_GET_ONCE_BOOL_OPTION(thread, "RADEON_THREAD", TRUE)
-static PIPE_THREAD_ROUTINE(radeon_drm_cs_emit_ioctl, param);
-static struct radeon_winsys_cs *radeon_drm_cs_create(struct radeon_winsys *rws)
+static struct radeon_winsys_cs *radeon_drm_cs_create(struct radeon_winsys *rws, enum ring_type ring_type)
{
struct radeon_drm_winsys *ws = radeon_drm_winsys(rws);
struct radeon_drm_cs *cs;
if (!cs) {
return NULL;
}
- pipe_semaphore_init(&cs->flush_queued, 0);
pipe_semaphore_init(&cs->flush_completed, 0);
cs->ws = ws;
cs->csc = &cs->csc1;
cs->cst = &cs->csc2;
cs->base.buf = cs->csc->buf;
+ cs->base.ring_type = ring_type;
p_atomic_inc(&ws->num_cs);
- if (cs->ws->num_cpus > 1 && debug_get_option_thread())
- cs->thread = pipe_thread_create(radeon_drm_cs_emit_ioctl, cs);
return &cs->base;
}
return -1;
}
-static unsigned radeon_add_reloc(struct radeon_cs_context *csc,
+static unsigned radeon_add_reloc(struct radeon_drm_cs *cs,
struct radeon_bo *bo,
enum radeon_bo_usage usage,
enum radeon_bo_domain domains,
enum radeon_bo_domain *added_domains)
{
+ struct radeon_cs_context *csc = cs->csc;
struct drm_radeon_cs_reloc *reloc;
- unsigned i;
unsigned hash = bo->handle & (sizeof(csc->is_handle_added)-1);
enum radeon_bo_domain rd = usage & RADEON_USAGE_READ ? domains : 0;
enum radeon_bo_domain wd = usage & RADEON_USAGE_WRITE ? domains : 0;
+ bool update_hash = TRUE;
+ int i;
+ *added_domains = 0;
if (csc->is_handle_added[hash]) {
i = csc->reloc_indices_hashlist[hash];
reloc = &csc->relocs[i];
- if (reloc->handle == bo->handle) {
- update_reloc_domains(reloc, rd, wd, added_domains);
- return i;
+ if (reloc->handle != bo->handle) {
+ /* Hash collision, look for the BO in the list of relocs linearly. */
+ for (i = csc->crelocs - 1; i >= 0; i--) {
+ reloc = &csc->relocs[i];
+ if (reloc->handle == bo->handle) {
+ /*printf("write_reloc collision, hash: %i, handle: %i\n", hash, bo->handle);*/
+ break;
+ }
+ }
}
- /* Hash collision, look for the BO in the list of relocs linearly. */
- for (i = csc->crelocs; i != 0;) {
- --i;
- reloc = &csc->relocs[i];
- if (reloc->handle == bo->handle) {
- update_reloc_domains(reloc, rd, wd, added_domains);
-
+ if (i >= 0) {
+ /* On DMA ring we need to emit as many relocation as there is use of the bo
+ * thus each time this function is call we should grow add again the bo to
+ * the relocation buffer
+ *
+ * Do not update the hash table if it's dma ring, so that first hash always point
+ * to first bo relocation which will the one used by the kernel. Following relocation
+ * will be ignore by the kernel memory placement (but still use by the kernel to
+ * update the cmd stream with proper buffer offset).
+ */
+ update_hash = FALSE;
+ update_reloc_domains(reloc, rd, wd, added_domains);
+ if (cs->base.ring_type != RING_DMA) {
csc->reloc_indices_hashlist[hash] = i;
- /*printf("write_reloc collision, hash: %i, handle: %i\n", hash, bo->handle);*/
return i;
}
}
reloc->flags = 0;
csc->is_handle_added[hash] = TRUE;
- csc->reloc_indices_hashlist[hash] = csc->crelocs;
+ if (update_hash) {
+ csc->reloc_indices_hashlist[hash] = csc->crelocs;
+ }
csc->chunks[1].length_dw += RELOC_DWORDS;
struct radeon_drm_cs *cs = radeon_drm_cs(rcs);
struct radeon_bo *bo = (struct radeon_bo*)buf;
enum radeon_bo_domain added_domains;
-
- unsigned index = radeon_add_reloc(cs->csc, bo, usage, domains, &added_domains);
+ unsigned index = radeon_add_reloc(cs, bo, usage, domains, &added_domains);
if (added_domains & RADEON_DOMAIN_GTT)
cs->csc->used_gart += bo->base.size;
{
struct radeon_drm_cs *cs = radeon_drm_cs(rcs);
struct radeon_bo *bo = (struct radeon_bo*)buf;
-
unsigned index = radeon_get_reloc(cs->csc, bo);
if (index == -1) {
OUT_CS(&cs->base, index * RELOC_DWORDS);
}
-static void radeon_drm_cs_emit_ioctl_oneshot(struct radeon_cs_context *csc)
+void radeon_drm_cs_emit_ioctl_oneshot(struct radeon_cs_context *csc)
{
unsigned i;
radeon_cs_context_cleanup(csc);
}
-static PIPE_THREAD_ROUTINE(radeon_drm_cs_emit_ioctl, param)
+/*
+ * Make sure previous submission of this cs are completed
+ */
+void radeon_drm_cs_sync_flush(struct radeon_winsys_cs *rcs)
{
- struct radeon_drm_cs *cs = (struct radeon_drm_cs*)param;
-
- while (1) {
- pipe_semaphore_wait(&cs->flush_queued);
- if (cs->kill_thread)
- break;
- radeon_drm_cs_emit_ioctl_oneshot(cs->cst);
- pipe_semaphore_signal(&cs->flush_completed);
- }
- pipe_semaphore_signal(&cs->flush_completed);
- return NULL;
-}
+ struct radeon_drm_cs *cs = radeon_drm_cs(rcs);
-void radeon_drm_cs_sync_flush(struct radeon_drm_cs *cs)
-{
/* Wait for any pending ioctl to complete. */
- if (cs->thread && cs->flush_started) {
+ if (cs->ws->thread && cs->flush_started) {
pipe_semaphore_wait(&cs->flush_completed);
cs->flush_started = 0;
}
fprintf(stderr, "radeon: command stream overflowed\n");
}
- radeon_drm_cs_sync_flush(cs);
+ radeon_drm_cs_sync_flush(rcs);
/* Flip command streams. */
tmp = cs->csc;
cs->cst = tmp;
/* If the CS is not empty or overflowed, emit it in a separate thread. */
- if (cs->base.cdw && cs->base.cdw <= RADEON_MAX_CMDBUF_DWORDS &&
- !debug_get_option_noop()) {
+ if (cs->base.cdw && cs->base.cdw <= RADEON_MAX_CMDBUF_DWORDS && !debug_get_option_noop()) {
unsigned i, crelocs = cs->cst->crelocs;
cs->cst->chunks[0].length_dw = cs->base.cdw;
p_atomic_inc(&cs->cst->relocs_bo[i]->num_active_ioctls);
}
- cs->cst->flags[0] = 0;
- cs->cst->flags[1] = RADEON_CS_RING_GFX;
- cs->cst->cs.num_chunks = 2;
- if (flags & RADEON_FLUSH_KEEP_TILING_FLAGS) {
- cs->cst->flags[0] |= RADEON_CS_KEEP_TILING_FLAGS;
- cs->cst->cs.num_chunks = 3;
- }
- if (cs->ws->info.r600_virtual_address) {
- cs->cst->flags[0] |= RADEON_CS_USE_VM;
- cs->cst->cs.num_chunks = 3;
- }
- if (flags & RADEON_FLUSH_END_OF_FRAME) {
- cs->cst->flags[0] |= RADEON_CS_END_OF_FRAME;
- cs->cst->cs.num_chunks = 3;
- }
- if (flags & RADEON_FLUSH_COMPUTE) {
- cs->cst->flags[1] = RADEON_CS_RING_COMPUTE;
+ switch (cs->base.ring_type) {
+ case RING_DMA:
+ cs->cst->flags[0] = 0;
+ cs->cst->flags[1] = RADEON_CS_RING_DMA;
cs->cst->cs.num_chunks = 3;
+ if (cs->ws->info.r600_virtual_address) {
+ cs->cst->flags[0] |= RADEON_CS_USE_VM;
+ }
+ break;
+ default:
+ case RING_GFX:
+ cs->cst->flags[0] = 0;
+ cs->cst->flags[1] = RADEON_CS_RING_GFX;
+ cs->cst->cs.num_chunks = 2;
+ if (flags & RADEON_FLUSH_KEEP_TILING_FLAGS) {
+ cs->cst->flags[0] |= RADEON_CS_KEEP_TILING_FLAGS;
+ cs->cst->cs.num_chunks = 3;
+ }
+ if (cs->ws->info.r600_virtual_address) {
+ cs->cst->flags[0] |= RADEON_CS_USE_VM;
+ cs->cst->cs.num_chunks = 3;
+ }
+ if (flags & RADEON_FLUSH_END_OF_FRAME) {
+ cs->cst->flags[0] |= RADEON_CS_END_OF_FRAME;
+ cs->cst->cs.num_chunks = 3;
+ }
+ if (flags & RADEON_FLUSH_COMPUTE) {
+ cs->cst->flags[1] = RADEON_CS_RING_COMPUTE;
+ cs->cst->cs.num_chunks = 3;
+ }
+ break;
}
- if (cs->thread &&
- (flags & RADEON_FLUSH_ASYNC)) {
+ if (cs->ws->thread && (flags & RADEON_FLUSH_ASYNC)) {
cs->flush_started = 1;
- pipe_semaphore_signal(&cs->flush_queued);
+ radeon_drm_ws_queue_cs(cs->ws, cs);
} else {
+ pipe_mutex_lock(cs->ws->cs_stack_lock);
+ if (cs->ws->thread) {
+ while (p_atomic_read(&cs->ws->ncs)) {
+ pipe_condvar_wait(cs->ws->cs_queue_empty, cs->ws->cs_stack_lock);
+ }
+ }
+ pipe_mutex_unlock(cs->ws->cs_stack_lock);
radeon_drm_cs_emit_ioctl_oneshot(cs->cst);
}
} else {
static void radeon_drm_cs_destroy(struct radeon_winsys_cs *rcs)
{
struct radeon_drm_cs *cs = radeon_drm_cs(rcs);
- radeon_drm_cs_sync_flush(cs);
- if (cs->thread) {
- cs->kill_thread = 1;
- pipe_semaphore_signal(&cs->flush_queued);
- pipe_semaphore_wait(&cs->flush_completed);
- pipe_thread_wait(cs->thread);
- }
- pipe_semaphore_destroy(&cs->flush_queued);
+
+ radeon_drm_cs_sync_flush(rcs);
pipe_semaphore_destroy(&cs->flush_completed);
radeon_cs_context_cleanup(&cs->csc1);
radeon_cs_context_cleanup(&cs->csc2);
void *user)
{
struct radeon_drm_cs *cs = radeon_drm_cs(rcs);
+
cs->flush_cs = flush;
cs->flush_data = user;
}
ws->base.cs_flush = radeon_drm_cs_flush;
ws->base.cs_set_flush_callback = radeon_drm_cs_set_flush;
ws->base.cs_is_buffer_referenced = radeon_bo_is_referenced;
+ ws->base.cs_sync_flush = radeon_drm_cs_sync_flush;
}
break;
}
+ /* Check for dma */
+ ws->info.r600_has_dma = FALSE;
+ if (ws->info.chip_class >= R700 && ws->info.drm_minor >= 27) {
+ ws->info.r600_has_dma = TRUE;
+ }
+
/* Get GEM info. */
retval = drmCommandWriteRead(ws->fd, DRM_RADEON_GEM_INFO,
&gem_info, sizeof(gem_info));
{
struct radeon_drm_winsys *ws = (struct radeon_drm_winsys*)rws;
+ if (ws->thread) {
+ ws->kill_thread = 1;
+ pipe_semaphore_signal(&ws->cs_queued);
+ pipe_thread_wait(ws->thread);
+ }
+ pipe_semaphore_destroy(&ws->cs_queued);
+ pipe_condvar_destroy(ws->cs_queue_empty);
+
if (!pipe_reference(&ws->base.reference, NULL)) {
return;
}
pipe_mutex_destroy(ws->hyperz_owner_mutex);
pipe_mutex_destroy(ws->cmask_owner_mutex);
+ pipe_mutex_destroy(ws->cs_stack_lock);
ws->cman->destroy(ws->cman);
ws->kman->destroy(ws->kman);
return pointer_to_intptr(key1) != pointer_to_intptr(key2);
}
+void radeon_drm_ws_queue_cs(struct radeon_drm_winsys *ws, struct radeon_drm_cs *cs)
+{
+retry:
+ pipe_mutex_lock(ws->cs_stack_lock);
+ if (p_atomic_read(&ws->ncs) >= RING_LAST) {
+ /* no room left for a flush */
+ pipe_mutex_unlock(ws->cs_stack_lock);
+ goto retry;
+ }
+ ws->cs_stack[p_atomic_read(&ws->ncs)] = cs;
+ p_atomic_inc(&ws->ncs);
+ pipe_mutex_unlock(ws->cs_stack_lock);
+ pipe_semaphore_signal(&ws->cs_queued);
+}
+
+static PIPE_THREAD_ROUTINE(radeon_drm_cs_emit_ioctl, param)
+{
+ struct radeon_drm_winsys *ws = (struct radeon_drm_winsys *)param;
+ struct radeon_drm_cs *cs;
+ unsigned i, empty_stack;
+
+ while (1) {
+ pipe_semaphore_wait(&ws->cs_queued);
+ if (ws->kill_thread)
+ break;
+next:
+ pipe_mutex_lock(ws->cs_stack_lock);
+ cs = ws->cs_stack[0];
+ pipe_mutex_unlock(ws->cs_stack_lock);
+
+ if (cs) {
+ radeon_drm_cs_emit_ioctl_oneshot(cs->cst);
+
+ pipe_mutex_lock(ws->cs_stack_lock);
+ for (i = 1; i < p_atomic_read(&ws->ncs); i++) {
+ ws->cs_stack[i - 1] = ws->cs_stack[i];
+ }
+ ws->cs_stack[p_atomic_read(&ws->ncs) - 1] = NULL;
+ empty_stack = p_atomic_dec_zero(&ws->ncs);
+ if (empty_stack) {
+ pipe_condvar_signal(ws->cs_queue_empty);
+ }
+ pipe_mutex_unlock(ws->cs_stack_lock);
+
+ pipe_semaphore_signal(&cs->flush_completed);
+
+ if (!empty_stack) {
+ goto next;
+ }
+ }
+ }
+ pipe_mutex_lock(ws->cs_stack_lock);
+ for (i = 0; i < p_atomic_read(&ws->ncs); i++) {
+ pipe_semaphore_signal(&ws->cs_stack[i]->flush_completed);
+ ws->cs_stack[i] = NULL;
+ }
+ p_atomic_set(&ws->ncs, 0);
+ pipe_condvar_signal(ws->cs_queue_empty);
+ pipe_mutex_unlock(ws->cs_stack_lock);
+ return NULL;
+}
+
+DEBUG_GET_ONCE_BOOL_OPTION(thread, "RADEON_THREAD", TRUE)
+static PIPE_THREAD_ROUTINE(radeon_drm_cs_emit_ioctl, param);
+
struct radeon_winsys *radeon_drm_winsys_create(int fd)
{
struct radeon_drm_winsys *ws;
pipe_mutex_init(ws->hyperz_owner_mutex);
pipe_mutex_init(ws->cmask_owner_mutex);
+ pipe_mutex_init(ws->cs_stack_lock);
+
+ p_atomic_set(&ws->ncs, 0);
+ pipe_semaphore_init(&ws->cs_queued, 0);
+ pipe_condvar_init(ws->cs_queue_empty);
+ if (ws->num_cpus > 1 && debug_get_option_thread())
+ ws->thread = pipe_thread_create(radeon_drm_cs_emit_ioctl, ws);
return &ws->base;