gallium: add support for programmable sample locations
[mesa.git] / src / gallium / drivers / r600 / compute_memory_pool.c
index 9fd253f406eab9ed7a1291d608ce28cbbd3c0789..981d944b8d0a1fcb8227d25bc763ae328290639b 100644 (file)
@@ -26,7 +26,7 @@
 #include "pipe/p_state.h"
 #include "pipe/p_context.h"
 #include "util/u_blitter.h"
-#include "util/u_double_list.h"
+#include "util/list.h"
 #include "util/u_transfer.h"
 #include "util/u_surface.h"
 #include "util/u_pack_color.h"
 #include <inttypes.h>
 
 #define ITEM_ALIGNMENT 1024
+
+/* A few forward declarations of static functions */
+static void compute_memory_shadow(struct compute_memory_pool* pool,
+       struct pipe_context *pipe, int device_to_host);
+
+static void compute_memory_defrag(struct compute_memory_pool *pool,
+       struct pipe_resource *src, struct pipe_resource *dst,
+       struct pipe_context *pipe);
+
+static int compute_memory_promote_item(struct compute_memory_pool *pool,
+       struct compute_memory_item *item, struct pipe_context *pipe,
+       int64_t allocated);
+
+static void compute_memory_move_item(struct compute_memory_pool *pool,
+       struct pipe_resource *src, struct pipe_resource *dst,
+       struct compute_memory_item *item, uint64_t new_start_in_dw,
+       struct pipe_context *pipe);
+
+static void compute_memory_transfer(struct compute_memory_pool* pool,
+       struct pipe_context * pipe, int device_to_host,
+       struct compute_memory_item* chunk, void* data,
+       int offset_in_chunk, int size);
+
 /**
- * Creates a new pool
+ * Creates a new pool.
  */
 struct compute_memory_pool* compute_memory_pool_new(
        struct r600_screen * rscreen)
 {
        struct compute_memory_pool* pool = (struct compute_memory_pool*)
                                CALLOC(sizeof(struct compute_memory_pool), 1);
-       if (pool == NULL)
+       if (!pool)
                return NULL;
 
        COMPUTE_DBG(rscreen, "* compute_memory_pool_new()\n");
@@ -66,6 +89,12 @@ struct compute_memory_pool* compute_memory_pool_new(
        return pool;
 }
 
+/**
+ * Initializes the pool with a size of \a initial_size_in_dw.
+ * \param pool                 The pool to be initialized.
+ * \param initial_size_in_dw   The initial size.
+ * \see compute_memory_grow_defrag_pool
+ */
 static void compute_memory_pool_init(struct compute_memory_pool * pool,
        unsigned initial_size_in_dw)
 {
@@ -73,138 +102,85 @@ static void compute_memory_pool_init(struct compute_memory_pool * pool,
        COMPUTE_DBG(pool->screen, "* compute_memory_pool_init() initial_size_in_dw = %u\n",
                initial_size_in_dw);
 
-       pool->shadow = (uint32_t*)CALLOC(initial_size_in_dw, 4);
-       if (pool->shadow == NULL)
-               return;
-
        pool->size_in_dw = initial_size_in_dw;
-       pool->bo = (struct r600_resource*)r600_compute_buffer_alloc_vram(pool->screen,
-                                                       pool->size_in_dw * 4);
+       pool->bo = r600_compute_buffer_alloc_vram(pool->screen,
+                                                 pool->size_in_dw * 4);
 }
 
 /**
- * Frees all stuff in the pool and the pool struct itself too
+ * Frees all stuff in the pool and the pool struct itself too.
  */
 void compute_memory_pool_delete(struct compute_memory_pool* pool)
 {
        COMPUTE_DBG(pool->screen, "* compute_memory_pool_delete()\n");
        free(pool->shadow);
-       if (pool->bo) {
-               pool->screen->b.b.resource_destroy((struct pipe_screen *)
-                       pool->screen, (struct pipe_resource *)pool->bo);
-       }
+       pipe_resource_reference(&pool->bo, NULL);
+       /* In theory, all of the items were freed in compute_memory_free.
+        * Just delete the list heads
+        */
+       free(pool->item_list);
+       free(pool->unallocated_list);
+       /* And then the pool itself */
        free(pool);
 }
 
 /**
- * Searches for an empty space in the pool, return with the pointer to the
- * allocatable space in the pool, returns -1 on failure.
+ * Reallocates and defragments the pool, conserves data.
+ * \returns -1 if it fails, 0 otherwise
+ * \see compute_memory_finalize_pending
  */
-int64_t compute_memory_prealloc_chunk(
-       struct compute_memory_pool* pool,
-       int64_t size_in_dw)
+static int compute_memory_grow_defrag_pool(struct compute_memory_pool *pool,
+       struct pipe_context *pipe, int new_size_in_dw)
 {
-       struct compute_memory_item *item;
-
-       int last_end = 0;
-
-       assert(size_in_dw <= pool->size_in_dw);
-
-       COMPUTE_DBG(pool->screen, "* compute_memory_prealloc_chunk() size_in_dw = %" PRId64 "\n",
-               size_in_dw);
+       new_size_in_dw = align(new_size_in_dw, ITEM_ALIGNMENT);
 
-       LIST_FOR_EACH_ENTRY(item, pool->item_list, link) {
-               if (last_end + size_in_dw <= item->start_in_dw) {
-                       return last_end;
-               }
-
-               last_end = item->start_in_dw + align(item->size_in_dw, ITEM_ALIGNMENT);
-       }
+       COMPUTE_DBG(pool->screen, "* compute_memory_grow_defrag_pool() "
+               "new_size_in_dw = %d (%d bytes)\n",
+               new_size_in_dw, new_size_in_dw * 4);
 
-       if (pool->size_in_dw - last_end < size_in_dw) {
-               return -1;
-       }
+       assert(new_size_in_dw >= pool->size_in_dw);
 
-       return last_end;
-}
+       if (!pool->bo) {
+               compute_memory_pool_init(pool, MAX2(new_size_in_dw, 1024 * 16));
+       } else {
+               struct r600_resource *temp = NULL;
 
-/**
- *  Search for the chunk where we can link our new chunk after it.
- */
-struct list_head *compute_memory_postalloc_chunk(
-       struct compute_memory_pool* pool,
-       int64_t start_in_dw)
-{
-       struct compute_memory_item *item;
-       struct compute_memory_item *next;
-       struct list_head *next_link;
+               temp = r600_compute_buffer_alloc_vram(pool->screen, new_size_in_dw * 4);
 
-       COMPUTE_DBG(pool->screen, "* compute_memory_postalloc_chunck() start_in_dw = %" PRId64 "\n",
-               start_in_dw);
+               if (temp != NULL) {
+                       struct pipe_resource *src = (struct pipe_resource *)pool->bo;
+                       struct pipe_resource *dst = (struct pipe_resource *)temp;
 
-       /* Check if we can insert it in the front of the list */
-       item = LIST_ENTRY(struct compute_memory_item, pool->item_list->next, link);
-       if (LIST_IS_EMPTY(pool->item_list) || item->start_in_dw > start_in_dw) {
-               return pool->item_list;
-       }
+                       COMPUTE_DBG(pool->screen, "  Growing and defragmenting the pool "
+                                       "using a temporary resource\n");
 
-       LIST_FOR_EACH_ENTRY(item, pool->item_list, link) {
-               next_link = item->link.next;
+                       compute_memory_defrag(pool, src, dst, pipe);
 
-               if (next_link != pool->item_list) {
-                       next = container_of(next_link, item, link);
-                       if (item->start_in_dw < start_in_dw
-                               && next->start_in_dw > start_in_dw) {
-                               return &item->link;
-                       }
+                       /* Release the old buffer */
+                       pipe_resource_reference(&pool->bo, NULL);
+                       pool->bo = temp;
+                       pool->size_in_dw = new_size_in_dw;
                }
                else {
-                       /* end of chain */
-                       assert(item->start_in_dw < start_in_dw);
-                       return &item->link;
-               }
-       }
-
-       assert(0 && "unreachable");
-       return NULL;
-}
+                       COMPUTE_DBG(pool->screen, "  The creation of the temporary resource failed\n"
+                               "  Falling back to using 'shadow'\n");
 
-/**
- * Reallocates pool, conserves data.
- * @returns -1 if it fails, 0 otherwise
- */
-int compute_memory_grow_pool(struct compute_memory_pool* pool,
-       struct pipe_context * pipe, int new_size_in_dw)
-{
-       COMPUTE_DBG(pool->screen, "* compute_memory_grow_pool() "
-               "new_size_in_dw = %d (%d bytes)\n",
-               new_size_in_dw, new_size_in_dw * 4);
-
-       assert(new_size_in_dw >= pool->size_in_dw);
-
-       if (!pool->bo) {
-               compute_memory_pool_init(pool, MAX2(new_size_in_dw, 1024 * 16));
-               if (pool->shadow == NULL)
-                       return -1;
-       } else {
-               new_size_in_dw = align(new_size_in_dw, ITEM_ALIGNMENT);
-
-               COMPUTE_DBG(pool->screen, "  Aligned size = %d (%d bytes)\n",
-                       new_size_in_dw, new_size_in_dw * 4);
+                       compute_memory_shadow(pool, pipe, 1);
+                       pool->shadow = realloc(pool->shadow, new_size_in_dw * 4);
+                       if (pool->shadow == NULL)
+                               return -1;
 
-               compute_memory_shadow(pool, pipe, 1);
-               pool->shadow = realloc(pool->shadow, new_size_in_dw*4);
-               if (pool->shadow == NULL)
-                       return -1;
+                       pool->size_in_dw = new_size_in_dw;
+                       /* Release the old buffer */
+                       pipe_resource_reference(&pool->bo, NULL);
+                       pool->bo = r600_compute_buffer_alloc_vram(pool->screen, pool->size_in_dw * 4);
+                       compute_memory_shadow(pool, pipe, 0);
 
-               pool->size_in_dw = new_size_in_dw;
-               pool->screen->b.b.resource_destroy(
-                       (struct pipe_screen *)pool->screen,
-                       (struct pipe_resource *)pool->bo);
-               pool->bo = (struct r600_resource*)r600_compute_buffer_alloc_vram(
-                                                       pool->screen,
-                                                       pool->size_in_dw * 4);
-               compute_memory_shadow(pool, pipe, 0);
+                       if (pool->status & POOL_FRAGMENTED) {
+                               struct pipe_resource *src = (struct pipe_resource *)pool->bo;
+                               compute_memory_defrag(pool, src, src, pipe);
+                       }
+               }
        }
 
        return 0;
@@ -212,8 +188,10 @@ int compute_memory_grow_pool(struct compute_memory_pool* pool,
 
 /**
  * Copy pool from device to host, or host to device.
+ * \param device_to_host 1 for device->host, 0 for host->device
+ * \see compute_memory_grow_defrag_pool
  */
-void compute_memory_shadow(struct compute_memory_pool* pool,
+static void compute_memory_shadow(struct compute_memory_pool* pool,
        struct pipe_context * pipe, int device_to_host)
 {
        struct compute_memory_item chunk;
@@ -229,8 +207,10 @@ void compute_memory_shadow(struct compute_memory_pool* pool,
 }
 
 /**
- * Allocates pending allocations in the pool
- * @returns -1 if it fails, 0 otherwise
+ * Moves all the items marked for promotion from the \a unallocated_list
+ * to the \a item_list.
+ * \return -1 if it fails, 0 otherwise
+ * \see evergreen_set_global_binding
  */
 int compute_memory_finalize_pending(struct compute_memory_pool* pool,
        struct pipe_context * pipe)
@@ -246,8 +226,8 @@ int compute_memory_finalize_pending(struct compute_memory_pool* pool,
        COMPUTE_DBG(pool->screen, "* compute_memory_finalize_pending()\n");
 
        LIST_FOR_EACH_ENTRY(item, pool->item_list, link) {
-               COMPUTE_DBG(pool->screen, "  + list: offset = %" PRId64 " id = %" PRId64 " size = %" PRId64
-                       " (%" PRId64 " bytes)\n",item->start_in_dw, item->id,
+               COMPUTE_DBG(pool->screen, "  + list: offset = %"PRIi64" id = %"PRIi64" size = %"PRIi64" "
+                       "(%"PRIi64" bytes)\n", item->start_in_dw, item->id,
                        item->size_in_dw, item->size_in_dw * 4);
        }
 
@@ -267,15 +247,15 @@ int compute_memory_finalize_pending(struct compute_memory_pool* pool,
                return 0;
        }
 
-       if (pool->status & POOL_FRAGMENTED) {
-               compute_memory_defrag(pool, pipe);
-       }
-
        if (pool->size_in_dw < allocated + unallocated) {
-               err = compute_memory_grow_pool(pool, pipe, allocated + unallocated);
+               err = compute_memory_grow_defrag_pool(pool, pipe, allocated + unallocated);
                if (err == -1)
                        return -1;
        }
+       else if (pool->status & POOL_FRAGMENTED) {
+               struct pipe_resource *src = (struct pipe_resource *)pool->bo;
+               compute_memory_defrag(pool, src, src, pipe);
+       }
 
        /* After defragmenting the pool, allocated is equal to the first available
         * position for new items in the pool */
@@ -301,8 +281,12 @@ int compute_memory_finalize_pending(struct compute_memory_pool* pool,
 /**
  * Defragments the pool, so that there's no gap between items.
  * \param pool The pool to be defragmented
+ * \param src  The origin resource
+ * \param dst  The destination resource
+ * \see compute_memory_grow_defrag_pool and compute_memory_finalize_pending
  */
-void compute_memory_defrag(struct compute_memory_pool *pool,
+static void compute_memory_defrag(struct compute_memory_pool *pool,
+       struct pipe_resource *src, struct pipe_resource *dst,
        struct pipe_context *pipe)
 {
        struct compute_memory_item *item;
@@ -312,10 +296,11 @@ void compute_memory_defrag(struct compute_memory_pool *pool,
 
        last_pos = 0;
        LIST_FOR_EACH_ENTRY(item, pool->item_list, link) {
-               if (item->start_in_dw != last_pos) {
-                       assert(last_pos < item->start_in_dw);
+               if (src != dst || item->start_in_dw != last_pos) {
+                       assert(last_pos <= item->start_in_dw);
 
-                       compute_memory_move_item(pool, item, last_pos, pipe);
+                       compute_memory_move_item(pool, src, dst,
+                                       item, last_pos, pipe);
                }
 
                last_pos += align(item->size_in_dw, ITEM_ALIGNMENT);
@@ -324,7 +309,13 @@ void compute_memory_defrag(struct compute_memory_pool *pool,
        pool->status &= ~POOL_FRAGMENTED;
 }
 
-int compute_memory_promote_item(struct compute_memory_pool *pool,
+/**
+ * Moves an item from the \a unallocated_list to the \a item_list.
+ * \param item The item that will be promoted.
+ * \return -1 if it fails, 0 otherwise
+ * \see compute_memory_finalize_pending
+ */
+static int compute_memory_promote_item(struct compute_memory_pool *pool,
                struct compute_memory_item *item, struct pipe_context *pipe,
                int64_t start_in_dw)
 {
@@ -334,11 +325,12 @@ int compute_memory_promote_item(struct compute_memory_pool *pool,
        struct pipe_resource *dst = (struct pipe_resource *)pool->bo;
        struct pipe_box box;
 
-       COMPUTE_DBG(pool->screen, "  + Found space for Item %p id = %" PRId64
-                       " start_in_dw = %" PRId64 " (%" PRId64 " bytes) "
-                       "size_in_dw = %" PRId64 " (%" PRId64 " bytes)\n",
-                       item, item->id, start_in_dw, start_in_dw * 4,
-                       item->size_in_dw, item->size_in_dw * 4);
+       COMPUTE_DBG(pool->screen, "* compute_memory_promote_item()\n"
+                       "  + Promoting Item: %"PRIi64" , starting at: %"PRIi64" (%"PRIi64" bytes) "
+                       "size: %"PRIi64" (%"PRIi64" bytes)\n\t\t\tnew start: %"PRIi64" (%"PRIi64" bytes)\n",
+                       item->id, item->start_in_dw, item->start_in_dw * 4,
+                       item->size_in_dw, item->size_in_dw * 4,
+                       start_in_dw, start_in_dw * 4);
 
        /* Remove the item from the unallocated list */
        list_del(&item->link);
@@ -347,7 +339,7 @@ int compute_memory_promote_item(struct compute_memory_pool *pool,
        list_addtail(&item->link, pool->item_list);
        item->start_in_dw = start_in_dw;
 
-       if (src != NULL) {
+       if (src) {
                u_box_1d(0, item->size_in_dw * 4, &box);
 
                rctx->b.b.resource_copy_region(pipe,
@@ -367,6 +359,11 @@ int compute_memory_promote_item(struct compute_memory_pool *pool,
        return 0;
 }
 
+/**
+ * Moves an item from the \a item_list to the \a unallocated_list.
+ * \param item The item that will be demoted
+ * \see r600_compute_global_transfer_map
+ */
 void compute_memory_demote_item(struct compute_memory_pool *pool,
        struct compute_memory_item *item, struct pipe_context *pipe)
 {
@@ -375,6 +372,11 @@ void compute_memory_demote_item(struct compute_memory_pool *pool,
        struct pipe_resource *dst;
        struct pipe_box box;
 
+       COMPUTE_DBG(pool->screen, "* compute_memory_demote_item()\n"
+                       "  + Demoting Item: %"PRIi64", starting at: %"PRIi64" (%"PRIi64" bytes) "
+                       "size: %"PRIi64" (%"PRIi64" bytes)\n", item->id, item->start_in_dw,
+                       item->start_in_dw * 4, item->size_in_dw, item->size_in_dw * 4);
+
        /* First, we remove the item from the item_list */
        list_del(&item->link);
 
@@ -384,7 +386,7 @@ void compute_memory_demote_item(struct compute_memory_pool *pool,
        /* We check if the intermediate buffer exists, and if it
         * doesn't, we create it again */
        if (item->real_buffer == NULL) {
-               item->real_buffer = (struct r600_resource*)r600_compute_buffer_alloc_vram(
+               item->real_buffer = r600_compute_buffer_alloc_vram(
                                pool->screen, item->size_in_dw * 4);
        }
 
@@ -407,31 +409,30 @@ void compute_memory_demote_item(struct compute_memory_pool *pool,
 }
 
 /**
- * Moves the item \a item forward in the pool to \a new_start_in_dw
+ * Moves the item \a item forward from the resource \a src to the
+ * resource \a dst at \a new_start_in_dw
  *
  * This function assumes two things:
- * 1) The item is \b only moved forward
+ * 1) The item is \b only moved forward, unless src is different from dst
  * 2) The item \b won't change it's position inside the \a item_list
  *
  * \param item                 The item that will be moved
  * \param new_start_in_dw      The new position of the item in \a item_list
  * \see compute_memory_defrag
  */
-void compute_memory_move_item(struct compute_memory_pool *pool,
+static void compute_memory_move_item(struct compute_memory_pool *pool,
+       struct pipe_resource *src, struct pipe_resource *dst,
        struct compute_memory_item *item, uint64_t new_start_in_dw,
        struct pipe_context *pipe)
 {
        struct pipe_screen *screen = (struct pipe_screen *)pool->screen;
        struct r600_context *rctx = (struct r600_context *)pipe;
-       struct pipe_resource *src = (struct pipe_resource *)pool->bo;
-       struct pipe_resource *dst;
        struct pipe_box box;
 
-       struct compute_memory_item *prev;
+       MAYBE_UNUSED struct compute_memory_item *prev;
 
        COMPUTE_DBG(pool->screen, "* compute_memory_move_item()\n"
-                       "  + Moving item %" PRId64 " from %" PRId64
-                       " (%" PRId64 " bytes) to %" PRId64 " (%" PRId64 " bytes)\n",
+                       "  + Moving item %"PRIi64" from %"PRIi64" (%"PRIi64" bytes) to %"PRIu64" (%"PRIu64" bytes)\n",
                        item->id, item->start_in_dw, item->start_in_dw * 4,
                        new_start_in_dw, new_start_in_dw * 4);
 
@@ -442,9 +443,9 @@ void compute_memory_move_item(struct compute_memory_pool *pool,
 
        u_box_1d(item->start_in_dw * 4, item->size_in_dw * 4, &box);
 
-       /* If the ranges don't overlap, we can just copy the item directly */
-       if (new_start_in_dw + item->size_in_dw <= item->start_in_dw) {
-               dst = (struct pipe_resource *)pool->bo;
+       /* If the ranges don't overlap, or we are copying from one resource
+        * to another, we can just copy the item directly */
+       if (src != dst || new_start_in_dw + item->size_in_dw <= item->start_in_dw) {
 
                rctx->b.b.resource_copy_region(pipe,
                        dst, 0, new_start_in_dw * 4, 0, 0,
@@ -452,24 +453,21 @@ void compute_memory_move_item(struct compute_memory_pool *pool,
        } else {
                /* The ranges overlap, we will try first to use an intermediate
                 * resource to move the item */
-               dst = (struct pipe_resource *)r600_compute_buffer_alloc_vram(
-                               pool->screen, item->size_in_dw * 4);
+               struct pipe_resource *tmp = (struct pipe_resource *)
+                       r600_compute_buffer_alloc_vram(pool->screen, item->size_in_dw * 4);
 
-               if (dst != NULL) {
+               if (tmp != NULL) {
                        rctx->b.b.resource_copy_region(pipe,
-                               dst, 0, 0, 0, 0,
+                               tmp, 0, 0, 0, 0,
                                src, 0, &box);
 
-                       src = dst;
-                       dst = (struct pipe_resource *)pool->bo;
-
                        box.x = 0;
 
                        rctx->b.b.resource_copy_region(pipe,
                                dst, 0, new_start_in_dw * 4, 0, 0,
-                               src, 0, &box);
+                               tmp, 0, &box);
 
-                       pool->screen->b.b.resource_destroy(screen, src);
+                       pool->screen->b.b.resource_destroy(screen, tmp);
 
                } else {
                        /* The allocation of the temporary resource failed,
@@ -497,13 +495,17 @@ void compute_memory_move_item(struct compute_memory_pool *pool,
        item->start_in_dw = new_start_in_dw;
 }
 
+/**
+ * Frees the memory asociated to the item with id \a id from the pool.
+ * \param id   The id of the item to be freed.
+ */
 void compute_memory_free(struct compute_memory_pool* pool, int64_t id)
 {
        struct compute_memory_item *item, *next;
        struct pipe_screen *screen = (struct pipe_screen *)pool->screen;
        struct pipe_resource *res;
 
-       COMPUTE_DBG(pool->screen, "* compute_memory_free() id + %" PRId64 "\n", id);
+       COMPUTE_DBG(pool->screen, "* compute_memory_free() id + %"PRIi64" \n", id);
 
        LIST_FOR_EACH_ENTRY_SAFE(item, next, pool->item_list, link) {
 
@@ -551,7 +553,11 @@ void compute_memory_free(struct compute_memory_pool* pool, int64_t id)
 }
 
 /**
- * Creates pending allocations
+ * Creates pending allocations for new items, these items are
+ * placed in the unallocated_list.
+ * \param size_in_dw   The size, in double words, of the new item.
+ * \return The new item
+ * \see r600_compute_global_buffer_create
  */
 struct compute_memory_item* compute_memory_alloc(
        struct compute_memory_pool* pool,
@@ -559,13 +565,12 @@ struct compute_memory_item* compute_memory_alloc(
 {
        struct compute_memory_item *new_item = NULL;
 
-       COMPUTE_DBG(pool->screen, "* compute_memory_alloc() size_in_dw = %" PRId64
-                                 " (%" PRId64 " bytes)\n",
+       COMPUTE_DBG(pool->screen, "* compute_memory_alloc() size_in_dw = %"PRIi64" (%"PRIi64" bytes)\n",
                        size_in_dw, 4 * size_in_dw);
 
        new_item = (struct compute_memory_item *)
                                CALLOC(sizeof(struct compute_memory_item), 1);
-       if (new_item == NULL)
+       if (!new_item)
                return NULL;
 
        new_item->size_in_dw = size_in_dw;
@@ -576,17 +581,18 @@ struct compute_memory_item* compute_memory_alloc(
 
        list_addtail(&new_item->link, pool->unallocated_list);
 
-       COMPUTE_DBG(pool->screen, "  + Adding item %p id = %" PRId64 " size = %" PRId64
-                                 " (%" PRId64 " bytes)\n",
+       COMPUTE_DBG(pool->screen, "  + Adding item %p id = %"PRIi64" size = %"PRIi64" (%"PRIi64" bytes)\n",
                        new_item, new_item->id, new_item->size_in_dw,
                        new_item->size_in_dw * 4);
        return new_item;
 }
 
 /**
- * Transfer data host<->device, offset and size is in bytes
+ * Transfer data host<->device, offset and size is in bytes.
+ * \param device_to_host 1 for device->host, 0 for host->device.
+ * \see compute_memory_shadow
  */
-void compute_memory_transfer(
+static void compute_memory_transfer(
        struct compute_memory_pool* pool,
        struct pipe_context * pipe,
        int device_to_host,
@@ -626,18 +632,3 @@ void compute_memory_transfer(
                pipe->transfer_unmap(pipe, xfer);
        }
 }
-
-/**
- * Transfer data between chunk<->data, it is for VRAM<->GART transfers
- */
-void compute_memory_transfer_direct(
-       struct compute_memory_pool* pool,
-       int chunk_to_data,
-       struct compute_memory_item* chunk,
-       struct r600_resource* data,
-       int offset_in_chunk,
-       int offset_in_data,
-       int size)
-{
-       ///TODO: DMA
-}