radeon/winsys: increase the IB size for VM
[mesa.git] / src / gallium / winsys / radeon / drm / radeon_drm_cs.c
index 2239059cc531d0ee8d5d769cfe0ef2cb8ac05700..45eef294369d67d553a9bda56be70bbce971ef35 100644 (file)
 #include "radeon_drm_cs.h"
 
 #include "util/u_memory.h"
+#include "os/os_time.h"
 
 #include <stdio.h>
 #include <stdlib.h>
 #include <stdint.h>
 #include <xf86drm.h>
 
-#ifndef RADEON_CHUNK_ID_FLAGS
-#define RADEON_CHUNK_ID_FLAGS  0x03
-
-/* The first dword of RADEON_CHUNK_ID_FLAGS is a uint32 of these flags: */
-#define RADEON_CS_KEEP_TILING_FLAGS 0x01
-#endif
 
 #define RELOC_DWORDS (sizeof(struct drm_radeon_cs_reloc) / sizeof(uint32_t))
 
-static boolean radeon_init_cs_context(struct radeon_cs_context *csc, int fd)
+static struct pipe_fence_handle *
+radeon_cs_create_fence(struct radeon_winsys_cs *rcs);
+static void radeon_fence_reference(struct pipe_fence_handle **dst,
+                                   struct pipe_fence_handle *src);
+
+static boolean radeon_init_cs_context(struct radeon_cs_context *csc,
+                                      struct radeon_drm_winsys *ws)
 {
-    csc->fd = fd;
+    int i;
+
+    csc->buf = MALLOC(ws->ib_max_size);
+    if (!csc->buf)
+        return FALSE;
+    csc->fd = ws->fd;
     csc->nrelocs = 512;
     csc->relocs_bo = (struct radeon_bo**)
                      CALLOC(1, csc->nrelocs * sizeof(struct radeon_bo*));
     if (!csc->relocs_bo) {
+        FREE(csc->buf);
         return FALSE;
     }
 
     csc->relocs = (struct drm_radeon_cs_reloc*)
                   CALLOC(1, csc->nrelocs * sizeof(struct drm_radeon_cs_reloc));
     if (!csc->relocs) {
+        FREE(csc->buf);
         FREE(csc->relocs_bo);
         return FALSE;
     }
@@ -104,7 +112,7 @@ static boolean radeon_init_cs_context(struct radeon_cs_context *csc, int fd)
     csc->chunks[1].length_dw = 0;
     csc->chunks[1].chunk_data = (uint64_t)(uintptr_t)csc->relocs;
     csc->chunks[2].chunk_id = RADEON_CHUNK_ID_FLAGS;
-    csc->chunks[2].length_dw = 1;
+    csc->chunks[2].length_dw = 2;
     csc->chunks[2].chunk_data = (uint64_t)(uintptr_t)&csc->flags;
 
     csc->chunk_array[0] = (uint64_t)(uintptr_t)&csc->chunks[0];
@@ -112,6 +120,10 @@ static boolean radeon_init_cs_context(struct radeon_cs_context *csc, int fd)
     csc->chunk_array[2] = (uint64_t)(uintptr_t)&csc->chunks[2];
 
     csc->cs.chunks = (uint64_t)(uintptr_t)csc->chunk_array;
+
+    for (i = 0; i < Elements(csc->reloc_indices_hashlist); i++) {
+        csc->reloc_indices_hashlist[i] = -1;
+    }
     return TRUE;
 }
 
@@ -130,7 +142,10 @@ static void radeon_cs_context_cleanup(struct radeon_cs_context *csc)
     csc->chunks[1].length_dw = 0;
     csc->used_gart = 0;
     csc->used_vram = 0;
-    memset(csc->is_handle_added, 0, sizeof(csc->is_handle_added));
+
+    for (i = 0; i < Elements(csc->reloc_indices_hashlist); i++) {
+        csc->reloc_indices_hashlist[i] = -1;
+    }
 }
 
 static void radeon_destroy_cs_context(struct radeon_cs_context *csc)
@@ -138,12 +153,17 @@ static void radeon_destroy_cs_context(struct radeon_cs_context *csc)
     radeon_cs_context_cleanup(csc);
     FREE(csc->relocs_bo);
     FREE(csc->relocs);
+    FREE(csc->buf);
 }
 
-DEBUG_GET_ONCE_BOOL_OPTION(thread, "RADEON_THREAD", TRUE)
-static PIPE_THREAD_ROUTINE(radeon_drm_cs_emit_ioctl, param);
 
-static struct radeon_winsys_cs *radeon_drm_cs_create(struct radeon_winsys *rws)
+static struct radeon_winsys_cs *
+radeon_drm_cs_create(struct radeon_winsys *rws,
+                     enum ring_type ring_type,
+                     void (*flush)(void *ctx, unsigned flags,
+                                   struct pipe_fence_handle **fence),
+                     void *flush_ctx,
+                     struct radeon_winsys_cs_handle *trace_buf)
 {
     struct radeon_drm_winsys *ws = radeon_drm_winsys(rws);
     struct radeon_drm_cs *cs;
@@ -152,16 +172,18 @@ static struct radeon_winsys_cs *radeon_drm_cs_create(struct radeon_winsys *rws)
     if (!cs) {
         return NULL;
     }
-    pipe_semaphore_init(&cs->flush_queued, 0);
-    pipe_semaphore_init(&cs->flush_completed, 0);
+    pipe_semaphore_init(&cs->flush_completed, 1);
 
     cs->ws = ws;
+    cs->flush_cs = flush;
+    cs->flush_data = flush_ctx;
+    cs->trace_buf = (struct radeon_bo*)trace_buf;
 
-    if (!radeon_init_cs_context(&cs->csc1, cs->ws->fd)) {
+    if (!radeon_init_cs_context(&cs->csc1, cs->ws)) {
         FREE(cs);
         return NULL;
     }
-    if (!radeon_init_cs_context(&cs->csc2, cs->ws->fd)) {
+    if (!radeon_init_cs_context(&cs->csc2, cs->ws)) {
         radeon_destroy_cs_context(&cs->csc1);
         FREE(cs);
         return NULL;
@@ -171,90 +193,91 @@ static struct radeon_winsys_cs *radeon_drm_cs_create(struct radeon_winsys *rws)
     cs->csc = &cs->csc1;
     cs->cst = &cs->csc2;
     cs->base.buf = cs->csc->buf;
+    cs->base.ring_type = ring_type;
+    cs->base.max_dw = ws->ib_max_size / 4;
 
     p_atomic_inc(&ws->num_cs);
-    if (cs->ws->num_cpus > 1 && debug_get_option_thread())
-        cs->thread = pipe_thread_create(radeon_drm_cs_emit_ioctl, cs);
     return &cs->base;
 }
 
 #define OUT_CS(cs, value) (cs)->buf[(cs)->cdw++] = (value)
 
-static INLINE void update_reloc_domains(struct drm_radeon_cs_reloc *reloc,
-                                        enum radeon_bo_usage usage,
-                                        unsigned domains)
+static inline void update_reloc(struct drm_radeon_cs_reloc *reloc,
+                                enum radeon_bo_domain rd,
+                                enum radeon_bo_domain wd,
+                                unsigned priority,
+                                enum radeon_bo_domain *added_domains)
 {
-    if (usage & RADEON_USAGE_READ)
-        reloc->read_domains |= domains;
-    if (usage & RADEON_USAGE_WRITE)
-        reloc->write_domain |= domains;
+    *added_domains = (rd | wd) & ~(reloc->read_domains | reloc->write_domain);
+
+    reloc->read_domains |= rd;
+    reloc->write_domain |= wd;
+    reloc->flags = MAX2(reloc->flags, priority);
 }
 
 int radeon_get_reloc(struct radeon_cs_context *csc, struct radeon_bo *bo)
 {
-    struct drm_radeon_cs_reloc *reloc;
-    unsigned i;
-    unsigned hash = bo->handle & (sizeof(csc->is_handle_added)-1);
-
-    if (csc->is_handle_added[hash]) {
-        reloc = csc->relocs_hashlist[hash];
-        if (reloc->handle == bo->handle) {
-            return csc->reloc_indices_hashlist[hash];
-        }
-
-        /* Hash collision, look for the BO in the list of relocs linearly. */
-        for (i = csc->crelocs; i != 0;) {
-            --i;
-            reloc = &csc->relocs[i];
-            if (reloc->handle == bo->handle) {
-                /* Put this reloc in the hash list.
-                 * This will prevent additional hash collisions if there are
-                 * several subsequent get_reloc calls of the same buffer.
-                 *
-                 * Example: Assuming buffers A,B,C collide in the hash list,
-                 * the following sequence of relocs:
-                 *         AAAAAAAAAAABBBBBBBBBBBBBBCCCCCCCC
-                 * will collide here: ^ and here:   ^,
-                 * meaning that we should get very few collisions in the end. */
-                csc->relocs_hashlist[hash] = reloc;
-                csc->reloc_indices_hashlist[hash] = i;
-                /*printf("write_reloc collision, hash: %i, handle: %i\n", hash, bo->handle);*/
-                return i;
-            }
+    unsigned hash = bo->handle & (Elements(csc->reloc_indices_hashlist)-1);
+    int i = csc->reloc_indices_hashlist[hash];
+
+    /* not found or found */
+    if (i == -1 || csc->relocs_bo[i] == bo)
+        return i;
+
+    /* Hash collision, look for the BO in the list of relocs linearly. */
+    for (i = csc->crelocs - 1; i >= 0; i--) {
+        if (csc->relocs_bo[i] == bo) {
+            /* Put this reloc in the hash list.
+             * This will prevent additional hash collisions if there are
+             * several consecutive get_reloc calls for the same buffer.
+             *
+             * Example: Assuming buffers A,B,C collide in the hash list,
+             * the following sequence of relocs:
+             *         AAAAAAAAAAABBBBBBBBBBBBBBCCCCCCCC
+             * will collide here: ^ and here:   ^,
+             * meaning that we should get very few collisions in the end. */
+            csc->reloc_indices_hashlist[hash] = i;
+            return i;
         }
     }
-
     return -1;
 }
 
-static unsigned radeon_add_reloc(struct radeon_cs_context *csc,
+static unsigned radeon_add_reloc(struct radeon_drm_cs *cs,
                                  struct radeon_bo *bo,
                                  enum radeon_bo_usage usage,
-                                 unsigned *added_domains)
+                                 enum radeon_bo_domain domains,
+                                 unsigned priority,
+                                 enum radeon_bo_domain *added_domains)
 {
+    struct radeon_cs_context *csc = cs->csc;
     struct drm_radeon_cs_reloc *reloc;
-    unsigned i;
-    unsigned hash = bo->handle & (sizeof(csc->is_handle_added)-1);
-
-    if (csc->is_handle_added[hash]) {
-        reloc = csc->relocs_hashlist[hash];
-        if (reloc->handle == bo->handle) {
-            update_reloc_domains(reloc, usage, bo->reloc_domains);
-            return csc->reloc_indices_hashlist[hash];
-        }
-
-        /* Hash collision, look for the BO in the list of relocs linearly. */
-        for (i = csc->crelocs; i != 0;) {
-            --i;
-            reloc = &csc->relocs[i];
-            if (reloc->handle == bo->handle) {
-                update_reloc_domains(reloc, usage, bo->reloc_domains);
-
-                csc->relocs_hashlist[hash] = reloc;
-                csc->reloc_indices_hashlist[hash] = i;
-                /*printf("write_reloc collision, hash: %i, handle: %i\n", hash, bo->handle);*/
-                return i;
-            }
+    unsigned hash = bo->handle & (Elements(csc->reloc_indices_hashlist)-1);
+    enum radeon_bo_domain rd = usage & RADEON_USAGE_READ ? domains : 0;
+    enum radeon_bo_domain wd = usage & RADEON_USAGE_WRITE ? domains : 0;
+    int i = -1;
+
+    priority = MIN2(priority, 15);
+    *added_domains = 0;
+
+    i = radeon_get_reloc(csc, bo);
+
+    if (i >= 0) {
+        reloc = &csc->relocs[i];
+        update_reloc(reloc, rd, wd, priority, added_domains);
+
+        /* For async DMA, every add_reloc call must add a buffer to the list
+         * no matter how many duplicates there are. This is due to the fact
+         * the DMA CS checker doesn't use NOP packets for offset patching,
+         * but always uses the i-th buffer from the list to patch the i-th
+         * offset. If there are N offsets in a DMA CS, there must also be N
+         * buffers in the relocation list.
+         *
+         * This doesn't have to be done if virtual memory is enabled,
+         * because there is no offset patching with virtual memory.
+         */
+        if (cs->base.ring_type != RING_DMA || cs->ws->info.r600_virtual_address) {
+            return i;
         }
     }
 
@@ -264,10 +287,10 @@ static unsigned radeon_add_reloc(struct radeon_cs_context *csc,
         csc->nrelocs += 10;
 
         size = csc->nrelocs * sizeof(struct radeon_bo*);
-        csc->relocs_bo = (struct radeon_bo**)realloc(csc->relocs_bo, size);
+        csc->relocs_bo = realloc(csc->relocs_bo, size);
 
         size = csc->nrelocs * sizeof(struct drm_radeon_cs_reloc);
-        csc->relocs = (struct drm_radeon_cs_reloc*)realloc(csc->relocs, size);
+        csc->relocs = realloc(csc->relocs, size);
 
         csc->chunks[1].chunk_data = (uint64_t)(uintptr_t)csc->relocs;
     }
@@ -278,40 +301,45 @@ static unsigned radeon_add_reloc(struct radeon_cs_context *csc,
     p_atomic_inc(&bo->num_cs_references);
     reloc = &csc->relocs[csc->crelocs];
     reloc->handle = bo->handle;
-    if (usage & RADEON_USAGE_READ)
-        reloc->read_domains = bo->reloc_domains;
-    if (usage & RADEON_USAGE_WRITE)
-        reloc->write_domain = bo->reloc_domains;
-    reloc->flags = 0;
-
-    csc->is_handle_added[hash] = TRUE;
-    csc->relocs_hashlist[hash] = reloc;
+    reloc->read_domains = rd;
+    reloc->write_domain = wd;
+    reloc->flags = priority;
+
     csc->reloc_indices_hashlist[hash] = csc->crelocs;
 
     csc->chunks[1].length_dw += RELOC_DWORDS;
 
-    *added_domains = bo->reloc_domains;
+    *added_domains = rd | wd;
     return csc->crelocs++;
 }
 
 static unsigned radeon_drm_cs_add_reloc(struct radeon_winsys_cs *rcs,
                                         struct radeon_winsys_cs_handle *buf,
-                                        enum radeon_bo_usage usage)
+                                        enum radeon_bo_usage usage,
+                                        enum radeon_bo_domain domains,
+                                        enum radeon_bo_priority priority)
 {
     struct radeon_drm_cs *cs = radeon_drm_cs(rcs);
     struct radeon_bo *bo = (struct radeon_bo*)buf;
-    unsigned added_domains = 0;
-
-    unsigned index = radeon_add_reloc(cs->csc, bo, usage, &added_domains);
+    enum radeon_bo_domain added_domains;
+    unsigned index = radeon_add_reloc(cs, bo, usage, domains, priority, &added_domains);
 
-    if (added_domains & RADEON_GEM_DOMAIN_GTT)
+    if (added_domains & RADEON_DOMAIN_GTT)
         cs->csc->used_gart += bo->base.size;
-    if (added_domains & RADEON_GEM_DOMAIN_VRAM)
+    if (added_domains & RADEON_DOMAIN_VRAM)
         cs->csc->used_vram += bo->base.size;
 
     return index;
 }
 
+static int radeon_drm_cs_get_reloc(struct radeon_winsys_cs *rcs,
+                                   struct radeon_winsys_cs_handle *buf)
+{
+    struct radeon_drm_cs *cs = radeon_drm_cs(rcs);
+
+    return radeon_get_reloc(cs->csc, (struct radeon_bo*)buf);
+}
+
 static boolean radeon_drm_cs_validate(struct radeon_winsys_cs *rcs)
 {
     struct radeon_drm_cs *cs = radeon_drm_cs(rcs);
@@ -335,7 +363,7 @@ static boolean radeon_drm_cs_validate(struct radeon_winsys_cs *rcs)
 
         /* Flush if there are any relocs. Clean up otherwise. */
         if (cs->csc->crelocs) {
-            cs->flush_cs(cs->flush_data, RADEON_FLUSH_ASYNC);
+            cs->flush_cs(cs->flush_data, RADEON_FLUSH_ASYNC, NULL);
         } else {
             radeon_cs_context_cleanup(cs->csc);
 
@@ -348,24 +376,17 @@ static boolean radeon_drm_cs_validate(struct radeon_winsys_cs *rcs)
     return status;
 }
 
-static void radeon_drm_cs_write_reloc(struct radeon_winsys_cs *rcs,
-                                      struct radeon_winsys_cs_handle *buf)
+static boolean radeon_drm_cs_memory_below_limit(struct radeon_winsys_cs *rcs, uint64_t vram, uint64_t gtt)
 {
     struct radeon_drm_cs *cs = radeon_drm_cs(rcs);
-    struct radeon_bo *bo = (struct radeon_bo*)buf;
-
-    unsigned index = radeon_get_reloc(cs->csc, bo);
-
-    if (index == -1) {
-        fprintf(stderr, "radeon: Cannot get a relocation in %s.\n", __func__);
-        return;
-    }
+    boolean status =
+        (cs->csc->used_gart + gtt) < cs->ws->info.gart_size * 0.7 &&
+        (cs->csc->used_vram + vram) < cs->ws->info.vram_size * 0.7;
 
-    OUT_CS(&cs->base, 0xc0001000);
-    OUT_CS(&cs->base, index * RELOC_DWORDS);
+    return status;
 }
 
-static void radeon_drm_cs_emit_ioctl_oneshot(struct radeon_cs_context *csc)
+void radeon_drm_cs_emit_ioctl_oneshot(struct radeon_drm_cs *cs, struct radeon_cs_context *csc)
 {
     unsigned i;
 
@@ -384,51 +405,98 @@ static void radeon_drm_cs_emit_ioctl_oneshot(struct radeon_cs_context *csc)
         }
     }
 
+    if (cs->trace_buf) {
+        radeon_dump_cs_on_lockup(cs, csc);
+    }
+
     for (i = 0; i < csc->crelocs; i++)
         p_atomic_dec(&csc->relocs_bo[i]->num_active_ioctls);
 
     radeon_cs_context_cleanup(csc);
 }
 
-static PIPE_THREAD_ROUTINE(radeon_drm_cs_emit_ioctl, param)
+/*
+ * Make sure previous submission of this cs are completed
+ */
+void radeon_drm_cs_sync_flush(struct radeon_winsys_cs *rcs)
 {
-    struct radeon_drm_cs *cs = (struct radeon_drm_cs*)param;
-
-    while (1) {
-        pipe_semaphore_wait(&cs->flush_queued);
-        if (cs->kill_thread)
-            break;
-        radeon_drm_cs_emit_ioctl_oneshot(cs->cst);
-        pipe_semaphore_signal(&cs->flush_completed);
-    }
-    pipe_semaphore_signal(&cs->flush_completed);
-    return NULL;
-}
+    struct radeon_drm_cs *cs = radeon_drm_cs(rcs);
 
-void radeon_drm_cs_sync_flush(struct radeon_drm_cs *cs)
-{
     /* Wait for any pending ioctl to complete. */
-    if (cs->thread && cs->flush_started) {
+    if (cs->ws->thread) {
         pipe_semaphore_wait(&cs->flush_completed);
-        cs->flush_started = 0;
+        pipe_semaphore_signal(&cs->flush_completed);
     }
 }
 
-static void radeon_drm_cs_flush(struct radeon_winsys_cs *rcs, unsigned flags)
+DEBUG_GET_ONCE_BOOL_OPTION(noop, "RADEON_NOOP", FALSE)
+
+static void radeon_drm_cs_flush(struct radeon_winsys_cs *rcs,
+                                unsigned flags,
+                                struct pipe_fence_handle **fence,
+                                uint32_t cs_trace_id)
 {
     struct radeon_drm_cs *cs = radeon_drm_cs(rcs);
     struct radeon_cs_context *tmp;
 
-    radeon_drm_cs_sync_flush(cs);
+    switch (cs->base.ring_type) {
+    case RING_DMA:
+        /* pad DMA ring to 8 DWs */
+        if (cs->ws->info.chip_class <= SI) {
+            while (rcs->cdw & 7)
+                OUT_CS(&cs->base, 0xf0000000); /* NOP packet */
+        } else {
+            while (rcs->cdw & 7)
+                OUT_CS(&cs->base, 0x00000000); /* NOP packet */
+        }
+        break;
+    case RING_GFX:
+        /* pad DMA ring to 8 DWs to meet CP fetch alignment requirements
+         * r6xx, requires at least 4 dw alignment to avoid a hw bug.
+         * hawaii with old firmware needs type2 nop packet.
+         * accel_working2 with value 3 indicates the new firmware.
+         */
+        if (cs->ws->info.chip_class <= SI ||
+            (cs->ws->info.family == CHIP_HAWAII &&
+             cs->ws->accel_working2 < 3)) {
+            while (rcs->cdw & 7)
+                OUT_CS(&cs->base, 0x80000000); /* type2 nop packet */
+        } else {
+            while (rcs->cdw & 7)
+                OUT_CS(&cs->base, 0xffff1000); /* type3 nop packet */
+        }
+        break;
+    case RING_UVD:
+        while (rcs->cdw & 15)
+            OUT_CS(&cs->base, 0x80000000); /* type2 nop packet */
+        break;
+    default:
+        break;
+    }
+
+    if (rcs->cdw > rcs->max_dw) {
+       fprintf(stderr, "radeon: command stream overflowed\n");
+    }
 
-    /* Flip command streams. */
+    if (fence) {
+        radeon_fence_reference(fence, NULL);
+        *fence = radeon_cs_create_fence(rcs);
+    }
+
+    radeon_drm_cs_sync_flush(rcs);
+
+    /* Swap command streams. */
     tmp = cs->csc;
     cs->csc = cs->cst;
     cs->cst = tmp;
 
-    /* If the CS is not empty, emit it in a separate thread. */
-    if (cs->base.cdw) {
-        unsigned i, crelocs = cs->cst->crelocs;
+    cs->cst->cs_trace_id = cs_trace_id;
+
+    /* If the CS is not empty or overflowed, emit it in a separate thread. */
+    if (cs->base.cdw && cs->base.cdw <= cs->base.max_dw && !debug_get_option_noop()) {
+        unsigned i, crelocs;
+
+        crelocs = cs->cst->crelocs;
 
         cs->cst->chunks[0].length_dw = cs->base.cdw;
 
@@ -437,19 +505,59 @@ static void radeon_drm_cs_flush(struct radeon_winsys_cs *rcs, unsigned flags)
             p_atomic_inc(&cs->cst->relocs_bo[i]->num_active_ioctls);
         }
 
-        if (flags & RADEON_FLUSH_KEEP_TILING_FLAGS) {
+        switch (cs->base.ring_type) {
+        case RING_DMA:
+            cs->cst->flags[0] = 0;
+            cs->cst->flags[1] = RADEON_CS_RING_DMA;
             cs->cst->cs.num_chunks = 3;
-            cs->cst->flags = RADEON_CS_KEEP_TILING_FLAGS;
-        } else {
+            if (cs->ws->info.r600_virtual_address) {
+                cs->cst->flags[0] |= RADEON_CS_USE_VM;
+            }
+            break;
+
+        case RING_UVD:
+            cs->cst->flags[0] = 0;
+            cs->cst->flags[1] = RADEON_CS_RING_UVD;
+            cs->cst->cs.num_chunks = 3;
+            break;
+
+        case RING_VCE:
+            cs->cst->flags[0] = 0;
+            cs->cst->flags[1] = RADEON_CS_RING_VCE;
+            cs->cst->cs.num_chunks = 3;
+            break;
+
+        default:
+        case RING_GFX:
+            cs->cst->flags[0] = 0;
+            cs->cst->flags[1] = RADEON_CS_RING_GFX;
             cs->cst->cs.num_chunks = 2;
+            if (flags & RADEON_FLUSH_KEEP_TILING_FLAGS) {
+                cs->cst->flags[0] |= RADEON_CS_KEEP_TILING_FLAGS;
+                cs->cst->cs.num_chunks = 3;
+            }
+            if (cs->ws->info.r600_virtual_address) {
+                cs->cst->flags[0] |= RADEON_CS_USE_VM;
+                cs->cst->cs.num_chunks = 3;
+            }
+            if (flags & RADEON_FLUSH_END_OF_FRAME) {
+                cs->cst->flags[0] |= RADEON_CS_END_OF_FRAME;
+                cs->cst->cs.num_chunks = 3;
+            }
+            if (flags & RADEON_FLUSH_COMPUTE) {
+                cs->cst->flags[1] = RADEON_CS_RING_COMPUTE;
+                cs->cst->cs.num_chunks = 3;
+            }
+            break;
         }
 
-        if (cs->thread &&
-            (flags & RADEON_FLUSH_ASYNC)) {
-            cs->flush_started = 1;
-            pipe_semaphore_signal(&cs->flush_queued);
+        if (cs->ws->thread) {
+            pipe_semaphore_wait(&cs->flush_completed);
+            radeon_drm_ws_queue_cs(cs->ws, cs);
+            if (!(flags & RADEON_FLUSH_ASYNC))
+                radeon_drm_cs_sync_flush(rcs);
         } else {
-            radeon_drm_cs_emit_ioctl_oneshot(cs->cst);
+            radeon_drm_cs_emit_ioctl_oneshot(cs, cs->cst);
         }
     } else {
         radeon_cs_context_cleanup(cs->cst);
@@ -458,20 +566,15 @@ static void radeon_drm_cs_flush(struct radeon_winsys_cs *rcs, unsigned flags)
     /* Prepare a new CS. */
     cs->base.buf = cs->csc->buf;
     cs->base.cdw = 0;
+
+    cs->ws->num_cs_flushes++;
 }
 
 static void radeon_drm_cs_destroy(struct radeon_winsys_cs *rcs)
 {
     struct radeon_drm_cs *cs = radeon_drm_cs(rcs);
-    radeon_drm_cs_sync_flush(cs);
-    if (cs->thread) {
-        cs->kill_thread = 1;
-        pipe_semaphore_signal(&cs->flush_queued);
-        pipe_semaphore_wait(&cs->flush_completed);
-        pipe_thread_wait(cs->thread);
-        pipe_thread_destroy(cs->thread);
-    }
-    pipe_semaphore_destroy(&cs->flush_queued);
+
+    radeon_drm_cs_sync_flush(rcs);
     pipe_semaphore_destroy(&cs->flush_completed);
     radeon_cs_context_cleanup(&cs->csc1);
     radeon_cs_context_cleanup(&cs->csc2);
@@ -481,22 +584,80 @@ static void radeon_drm_cs_destroy(struct radeon_winsys_cs *rcs)
     FREE(cs);
 }
 
-static void radeon_drm_cs_set_flush(struct radeon_winsys_cs *rcs,
-                                    void (*flush)(void *ctx, unsigned flags),
-                                    void *user)
+static boolean radeon_bo_is_referenced(struct radeon_winsys_cs *rcs,
+                                       struct radeon_winsys_cs_handle *_buf,
+                                       enum radeon_bo_usage usage)
 {
     struct radeon_drm_cs *cs = radeon_drm_cs(rcs);
-    cs->flush_cs = flush;
-    cs->flush_data = user;
+    struct radeon_bo *bo = (struct radeon_bo*)_buf;
+    int index;
+
+    if (!bo->num_cs_references)
+        return FALSE;
+
+    index = radeon_get_reloc(cs->csc, bo);
+    if (index == -1)
+        return FALSE;
+
+    if ((usage & RADEON_USAGE_WRITE) && cs->csc->relocs[index].write_domain)
+        return TRUE;
+    if ((usage & RADEON_USAGE_READ) && cs->csc->relocs[index].read_domains)
+        return TRUE;
+
+    return FALSE;
 }
 
-static boolean radeon_bo_is_referenced(struct radeon_winsys_cs *rcs,
-                                       struct radeon_winsys_cs_handle *_buf)
+/* FENCES */
+
+static struct pipe_fence_handle *
+radeon_cs_create_fence(struct radeon_winsys_cs *rcs)
 {
     struct radeon_drm_cs *cs = radeon_drm_cs(rcs);
-    struct radeon_bo *bo = (struct radeon_bo*)_buf;
+    struct pb_buffer *fence;
+
+    /* Create a fence, which is a dummy BO. */
+    fence = cs->ws->base.buffer_create(&cs->ws->base, 1, 1, TRUE,
+                                       RADEON_DOMAIN_GTT, 0);
+    /* Add the fence as a dummy relocation. */
+    cs->ws->base.cs_add_reloc(rcs, cs->ws->base.buffer_get_cs_handle(fence),
+                              RADEON_USAGE_READWRITE, RADEON_DOMAIN_GTT,
+                              RADEON_PRIO_MIN);
+    return (struct pipe_fence_handle*)fence;
+}
 
-    return radeon_bo_is_referenced_by_cs(cs, bo);
+static bool radeon_fence_wait(struct radeon_winsys *ws,
+                              struct pipe_fence_handle *fence,
+                              uint64_t timeout)
+{
+    struct pb_buffer *rfence = (struct pb_buffer*)fence;
+
+    if (timeout == 0)
+        return !ws->buffer_is_busy(rfence, RADEON_USAGE_READWRITE);
+
+    if (timeout != PIPE_TIMEOUT_INFINITE) {
+        int64_t start_time = os_time_get();
+
+        /* Convert to microseconds. */
+        timeout /= 1000;
+
+        /* Wait in a loop. */
+        while (ws->buffer_is_busy(rfence, RADEON_USAGE_READWRITE)) {
+            if (os_time_get() - start_time >= timeout) {
+                return FALSE;
+            }
+            os_time_sleep(10);
+        }
+        return TRUE;
+    }
+
+    ws->buffer_wait(rfence, RADEON_USAGE_READWRITE);
+    return TRUE;
+}
+
+static void radeon_fence_reference(struct pipe_fence_handle **dst,
+                                   struct pipe_fence_handle *src)
+{
+    pb_reference((struct pb_buffer**)dst, (struct pb_buffer*)src);
 }
 
 void radeon_drm_cs_init_functions(struct radeon_drm_winsys *ws)
@@ -504,9 +665,12 @@ void radeon_drm_cs_init_functions(struct radeon_drm_winsys *ws)
     ws->base.cs_create = radeon_drm_cs_create;
     ws->base.cs_destroy = radeon_drm_cs_destroy;
     ws->base.cs_add_reloc = radeon_drm_cs_add_reloc;
+    ws->base.cs_get_reloc = radeon_drm_cs_get_reloc;
     ws->base.cs_validate = radeon_drm_cs_validate;
-    ws->base.cs_write_reloc = radeon_drm_cs_write_reloc;
+    ws->base.cs_memory_below_limit = radeon_drm_cs_memory_below_limit;
     ws->base.cs_flush = radeon_drm_cs_flush;
-    ws->base.cs_set_flush_callback = radeon_drm_cs_set_flush;
     ws->base.cs_is_buffer_referenced = radeon_bo_is_referenced;
+    ws->base.cs_sync_flush = radeon_drm_cs_sync_flush;
+    ws->base.fence_wait = radeon_fence_wait;
+    ws->base.fence_reference = radeon_fence_reference;
 }