r600g/compute: Add a function for moving items in the pool
authorBruno Jiménez <brunojimen@gmail.com>
Wed, 16 Jul 2014 21:12:43 +0000 (23:12 +0200)
committerTom Stellard <thomas.stellard@amd.com>
Wed, 23 Jul 2014 14:29:17 +0000 (10:29 -0400)
This function will be used in the future by compute_memory_defrag
to move items forward in the pool.

It does so by first checking for overlaping ranges, if the ranges
don't overlap it will copy the contents directly. If they overlap
it will try first to make a temporary buffer, if this buffer fails
to allocate, it will finally fall back to a mapping.

Note that it will only be needed to move items forward, it only
checks for overlapping ranges in that case. If needed, it can
easily be added by changing the first if.

Reviewed-by: Tom Stellard <thomas.stellard@amd.com>
src/gallium/drivers/r600/compute_memory_pool.c
src/gallium/drivers/r600/compute_memory_pool.h

index fe19d9ef81c578b696809955e67200cb150a27e6..0b4131808514f300ad2b09f0037f09d6837258e4 100644 (file)
@@ -408,6 +408,95 @@ void compute_memory_demote_item(struct compute_memory_pool *pool,
        item->start_in_dw = -1;
 }
 
+/**
+ * Moves the item \a item forward in the pool to \a new_start_in_dw
+ *
+ * This function assumes two things:
+ * 1) The item is \b only moved forward
+ * 2) The item \b won't change it's position inside the \a item_list
+ *
+ * \param item                 The item that will be moved
+ * \param new_start_in_dw      The new position of the item in \a item_list
+ */
+void compute_memory_move_item(struct compute_memory_pool *pool,
+       struct compute_memory_item *item, uint64_t new_start_in_dw,
+       struct pipe_context *pipe)
+{
+       struct pipe_screen *screen = (struct pipe_screen *)pool->screen;
+       struct r600_context *rctx = (struct r600_context *)pipe;
+       struct pipe_resource *src = (struct pipe_resource *)pool->bo;
+       struct pipe_resource *dst;
+       struct pipe_box box;
+
+       struct compute_memory_item *prev;
+
+       COMPUTE_DBG(pool->screen, "* compute_memory_move_item()\n"
+                       "  + Moving item %i from %u (%u bytes) to %u (%u bytes)\n",
+                       item->id, item->start_in_dw, item->start_in_dw * 4,
+                       new_start_in_dw, new_start_in_dw * 4);
+
+       if (pool->item_list != item->link.prev) {
+               prev = container_of(item->link.prev, item, link);
+               assert(prev->start_in_dw + prev->size_in_dw <= new_start_in_dw);
+       }
+
+       u_box_1d(item->start_in_dw * 4, item->size_in_dw * 4, &box);
+
+       /* If the ranges don't overlap, we can just copy the item directly */
+       if (new_start_in_dw + item->size_in_dw <= item->start_in_dw) {
+               dst = (struct pipe_resource *)pool->bo;
+
+               rctx->b.b.resource_copy_region(pipe,
+                       dst, 0, new_start_in_dw * 4, 0, 0,
+                       src, 0, &box);
+       } else {
+               /* The ranges overlap, we will try first to use an intermediate
+                * resource to move the item */
+               dst = (struct pipe_resource *)r600_compute_buffer_alloc_vram(
+                               pool->screen, item->size_in_dw * 4);
+
+               if (dst != NULL) {
+                       rctx->b.b.resource_copy_region(pipe,
+                               dst, 0, 0, 0, 0,
+                               src, 0, &box);
+
+                       src = dst;
+                       dst = (struct pipe_resource *)pool->bo;
+
+                       box.x = 0;
+
+                       rctx->b.b.resource_copy_region(pipe,
+                               dst, 0, new_start_in_dw * 4, 0, 0,
+                               src, 0, &box);
+
+                       pool->screen->b.b.resource_destroy(screen, src);
+
+               } else {
+                       /* The allocation of the temporary resource failed,
+                        * falling back to use mappings */
+                       uint32_t *map;
+                       int64_t offset;
+                       struct pipe_transfer *trans;
+
+                       offset = item->start_in_dw - new_start_in_dw;
+
+                       u_box_1d(new_start_in_dw * 4, (offset + item->size_in_dw) * 4, &box);
+
+                       map = pipe->transfer_map(pipe, src, 0, PIPE_TRANSFER_READ_WRITE,
+                               &box, &trans);
+
+                       assert(map);
+                       assert(trans);
+
+                       memmove(map, map + offset, item->size_in_dw * 4);
+
+                       pipe->transfer_unmap(pipe, trans);
+               }
+       }
+
+       item->start_in_dw = new_start_in_dw;
+}
+
 void compute_memory_free(struct compute_memory_pool* pool, int64_t id)
 {
        struct compute_memory_item *item, *next;
index 259474a493dfb2a9fb9e886e257cbadada1fa58b..73320100dbd1613d8c6bb810d4b1c5828ad70f21 100644 (file)
@@ -93,6 +93,10 @@ int compute_memory_promote_item(struct compute_memory_pool *pool,
 void compute_memory_demote_item(struct compute_memory_pool *pool,
        struct compute_memory_item *item, struct pipe_context *pipe);
 
+void compute_memory_move_item(struct compute_memory_pool *pool,
+       struct compute_memory_item *item, uint64_t new_start_in_dw,
+       struct pipe_context *pipe);
+
 void compute_memory_free(struct compute_memory_pool* pool, int64_t id);
 struct compute_memory_item* compute_memory_alloc(struct compute_memory_pool* pool, int64_t size_in_dw); ///Creates pending allocations