2 * Permission is hereby granted, free of charge, to any person obtaining a
3 * copy of this software and associated documentation files (the "Software"),
4 * to deal in the Software without restriction, including without limitation
5 * on the rights to use, copy, modify, merge, publish, distribute, sub
6 * license, and/or sell copies of the Software, and to permit persons to whom
7 * the Software is furnished to do so, subject to the following conditions:
9 * The above copyright notice and this permission notice (including the next
10 * paragraph) shall be included in all copies or substantial portions of the
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 * Adam Rak <adam.rak@streamnovation.com>
25 #include "pipe/p_defines.h"
26 #include "pipe/p_state.h"
27 #include "pipe/p_context.h"
28 #include "util/u_blitter.h"
29 #include "util/u_double_list.h"
30 #include "util/u_transfer.h"
31 #include "util/u_surface.h"
32 #include "util/u_pack_color.h"
33 #include "util/u_math.h"
34 #include "util/u_memory.h"
35 #include "util/u_inlines.h"
36 #include "util/u_framebuffer.h"
37 #include "r600_shader.h"
38 #include "r600_pipe.h"
39 #include "r600_formats.h"
40 #include "compute_memory_pool.h"
41 #include "evergreen_compute.h"
42 #include "evergreen_compute_internal.h"
45 #define ITEM_ALIGNMENT 1024
49 struct compute_memory_pool
* compute_memory_pool_new(
50 struct r600_screen
* rscreen
)
52 struct compute_memory_pool
* pool
= (struct compute_memory_pool
*)
53 CALLOC(sizeof(struct compute_memory_pool
), 1);
57 COMPUTE_DBG(rscreen
, "* compute_memory_pool_new()\n");
59 pool
->screen
= rscreen
;
60 pool
->item_list
= (struct list_head
*)
61 CALLOC(sizeof(struct list_head
), 1);
62 pool
->unallocated_list
= (struct list_head
*)
63 CALLOC(sizeof(struct list_head
), 1);
64 list_inithead(pool
->item_list
);
65 list_inithead(pool
->unallocated_list
);
69 static void compute_memory_pool_init(struct compute_memory_pool
* pool
,
70 unsigned initial_size_in_dw
)
73 COMPUTE_DBG(pool
->screen
, "* compute_memory_pool_init() initial_size_in_dw = %ld\n",
76 pool
->shadow
= (uint32_t*)CALLOC(initial_size_in_dw
, 4);
77 if (pool
->shadow
== NULL
)
80 pool
->size_in_dw
= initial_size_in_dw
;
81 pool
->bo
= (struct r600_resource
*)r600_compute_buffer_alloc_vram(pool
->screen
,
82 pool
->size_in_dw
* 4);
86 * Frees all stuff in the pool and the pool struct itself too
88 void compute_memory_pool_delete(struct compute_memory_pool
* pool
)
90 COMPUTE_DBG(pool
->screen
, "* compute_memory_pool_delete()\n");
93 pool
->screen
->b
.b
.resource_destroy((struct pipe_screen
*)
94 pool
->screen
, (struct pipe_resource
*)pool
->bo
);
100 * Searches for an empty space in the pool, return with the pointer to the
101 * allocatable space in the pool, returns -1 on failure.
103 int64_t compute_memory_prealloc_chunk(
104 struct compute_memory_pool
* pool
,
107 struct compute_memory_item
*item
;
111 assert(size_in_dw
<= pool
->size_in_dw
);
113 COMPUTE_DBG(pool
->screen
, "* compute_memory_prealloc_chunk() size_in_dw = %ld\n",
116 LIST_FOR_EACH_ENTRY(item
, pool
->item_list
, link
) {
117 if (last_end
+ size_in_dw
<= item
->start_in_dw
) {
121 last_end
= item
->start_in_dw
+ align(item
->size_in_dw
, ITEM_ALIGNMENT
);
124 if (pool
->size_in_dw
- last_end
< size_in_dw
) {
132 * Search for the chunk where we can link our new chunk after it.
134 struct list_head
*compute_memory_postalloc_chunk(
135 struct compute_memory_pool
* pool
,
138 struct compute_memory_item
*item
;
139 struct compute_memory_item
*next
;
140 struct list_head
*next_link
;
142 COMPUTE_DBG(pool
->screen
, "* compute_memory_postalloc_chunck() start_in_dw = %ld\n",
145 /* Check if we can insert it in the front of the list */
146 item
= LIST_ENTRY(struct compute_memory_item
, pool
->item_list
->next
, link
);
147 if (LIST_IS_EMPTY(pool
->item_list
) || item
->start_in_dw
> start_in_dw
) {
148 return pool
->item_list
;
151 LIST_FOR_EACH_ENTRY(item
, pool
->item_list
, link
) {
152 next_link
= item
->link
.next
;
154 if (next_link
!= pool
->item_list
) {
155 next
= container_of(next_link
, item
, link
);
156 if (item
->start_in_dw
< start_in_dw
157 && next
->start_in_dw
> start_in_dw
) {
163 assert(item
->start_in_dw
< start_in_dw
);
168 assert(0 && "unreachable");
173 * Reallocates pool, conserves data.
174 * @returns -1 if it fails, 0 otherwise
176 int compute_memory_grow_pool(struct compute_memory_pool
* pool
,
177 struct pipe_context
* pipe
, int new_size_in_dw
)
179 COMPUTE_DBG(pool
->screen
, "* compute_memory_grow_pool() "
180 "new_size_in_dw = %d (%d bytes)\n",
181 new_size_in_dw
, new_size_in_dw
* 4);
183 assert(new_size_in_dw
>= pool
->size_in_dw
);
186 compute_memory_pool_init(pool
, MAX2(new_size_in_dw
, 1024 * 16));
187 if (pool
->shadow
== NULL
)
190 new_size_in_dw
= align(new_size_in_dw
, ITEM_ALIGNMENT
);
192 COMPUTE_DBG(pool
->screen
, " Aligned size = %d (%d bytes)\n",
193 new_size_in_dw
, new_size_in_dw
* 4);
195 compute_memory_shadow(pool
, pipe
, 1);
196 pool
->shadow
= realloc(pool
->shadow
, new_size_in_dw
*4);
197 if (pool
->shadow
== NULL
)
200 pool
->size_in_dw
= new_size_in_dw
;
201 pool
->screen
->b
.b
.resource_destroy(
202 (struct pipe_screen
*)pool
->screen
,
203 (struct pipe_resource
*)pool
->bo
);
204 pool
->bo
= (struct r600_resource
*)r600_compute_buffer_alloc_vram(
206 pool
->size_in_dw
* 4);
207 compute_memory_shadow(pool
, pipe
, 0);
214 * Copy pool from device to host, or host to device.
216 void compute_memory_shadow(struct compute_memory_pool
* pool
,
217 struct pipe_context
* pipe
, int device_to_host
)
219 struct compute_memory_item chunk
;
221 COMPUTE_DBG(pool
->screen
, "* compute_memory_shadow() device_to_host = %d\n",
225 chunk
.start_in_dw
= 0;
226 chunk
.size_in_dw
= pool
->size_in_dw
;
227 compute_memory_transfer(pool
, pipe
, device_to_host
, &chunk
,
228 pool
->shadow
, 0, pool
->size_in_dw
*4);
232 * Allocates pending allocations in the pool
233 * @returns -1 if it fails, 0 otherwise
235 int compute_memory_finalize_pending(struct compute_memory_pool
* pool
,
236 struct pipe_context
* pipe
)
238 struct compute_memory_item
*item
, *next
;
240 int64_t allocated
= 0;
241 int64_t unallocated
= 0;
246 COMPUTE_DBG(pool
->screen
, "* compute_memory_finalize_pending()\n");
248 LIST_FOR_EACH_ENTRY(item
, pool
->item_list
, link
) {
249 COMPUTE_DBG(pool
->screen
, " + list: offset = %i id = %i size = %i "
250 "(%i bytes)\n",item
->start_in_dw
, item
->id
,
251 item
->size_in_dw
, item
->size_in_dw
* 4);
254 /* Calculate the total allocated size */
255 LIST_FOR_EACH_ENTRY(item
, pool
->item_list
, link
) {
256 allocated
+= align(item
->size_in_dw
, ITEM_ALIGNMENT
);
259 /* Calculate the total unallocated size of the items that
260 * will be promoted to the pool */
261 LIST_FOR_EACH_ENTRY(item
, pool
->unallocated_list
, link
) {
262 if (item
->status
& ITEM_FOR_PROMOTING
)
263 unallocated
+= align(item
->size_in_dw
, ITEM_ALIGNMENT
);
266 if (unallocated
== 0) {
270 if (pool
->status
& POOL_FRAGMENTED
) {
271 compute_memory_defrag(pool
, pipe
);
274 if (pool
->size_in_dw
< allocated
+ unallocated
) {
275 err
= compute_memory_grow_pool(pool
, pipe
, allocated
+ unallocated
);
280 /* After defragmenting the pool, allocated is equal to the first available
281 * position for new items in the pool */
282 last_pos
= allocated
;
284 /* Loop through all the unallocated items, check if they are marked
285 * for promoting, allocate space for them and add them to the item_list. */
286 LIST_FOR_EACH_ENTRY_SAFE(item
, next
, pool
->unallocated_list
, link
) {
287 if (item
->status
& ITEM_FOR_PROMOTING
) {
288 err
= compute_memory_promote_item(pool
, item
, pipe
, last_pos
);
289 item
->status
&= ~ITEM_FOR_PROMOTING
;
291 last_pos
+= align(item
->size_in_dw
, ITEM_ALIGNMENT
);
302 * Defragments the pool, so that there's no gap between items.
303 * \param pool The pool to be defragmented
305 void compute_memory_defrag(struct compute_memory_pool
*pool
,
306 struct pipe_context
*pipe
)
308 struct compute_memory_item
*item
;
311 COMPUTE_DBG(pool
->screen
, "* compute_memory_defrag()\n");
314 LIST_FOR_EACH_ENTRY(item
, pool
->item_list
, link
) {
315 if (item
->start_in_dw
!= last_pos
) {
316 assert(last_pos
< item
->start_in_dw
);
318 compute_memory_move_item(pool
, item
, last_pos
, pipe
);
321 last_pos
+= align(item
->size_in_dw
, ITEM_ALIGNMENT
);
324 pool
->status
&= ~POOL_FRAGMENTED
;
327 int compute_memory_promote_item(struct compute_memory_pool
*pool
,
328 struct compute_memory_item
*item
, struct pipe_context
*pipe
,
331 struct pipe_screen
*screen
= (struct pipe_screen
*)pool
->screen
;
332 struct r600_context
*rctx
= (struct r600_context
*)pipe
;
333 struct pipe_resource
*src
= (struct pipe_resource
*)item
->real_buffer
;
334 struct pipe_resource
*dst
= (struct pipe_resource
*)pool
->bo
;
337 COMPUTE_DBG(pool
->screen
, " + Found space for Item %p id = %u "
338 "start_in_dw = %u (%u bytes) size_in_dw = %u (%u bytes)\n",
339 item
, item
->id
, start_in_dw
, start_in_dw
* 4,
340 item
->size_in_dw
, item
->size_in_dw
* 4);
342 /* Remove the item from the unallocated list */
343 list_del(&item
->link
);
345 /* Add it back to the item_list */
346 list_addtail(&item
->link
, pool
->item_list
);
347 item
->start_in_dw
= start_in_dw
;
350 u_box_1d(0, item
->size_in_dw
* 4, &box
);
352 rctx
->b
.b
.resource_copy_region(pipe
,
353 dst
, 0, item
->start_in_dw
* 4, 0 ,0,
356 /* We check if the item is mapped for reading.
357 * In this case, we need to keep the temporary buffer 'alive'
358 * because it is possible to keep a map active for reading
359 * while a kernel (that reads from it) executes */
360 if (!(item
->status
& ITEM_MAPPED_FOR_READING
)) {
361 pool
->screen
->b
.b
.resource_destroy(screen
, src
);
362 item
->real_buffer
= NULL
;
369 void compute_memory_demote_item(struct compute_memory_pool
*pool
,
370 struct compute_memory_item
*item
, struct pipe_context
*pipe
)
372 struct r600_context
*rctx
= (struct r600_context
*)pipe
;
373 struct pipe_resource
*src
= (struct pipe_resource
*)pool
->bo
;
374 struct pipe_resource
*dst
;
377 /* First, we remove the item from the item_list */
378 list_del(&item
->link
);
380 /* Now we add it to the unallocated list */
381 list_addtail(&item
->link
, pool
->unallocated_list
);
383 /* We check if the intermediate buffer exists, and if it
384 * doesn't, we create it again */
385 if (item
->real_buffer
== NULL
) {
386 item
->real_buffer
= (struct r600_resource
*)r600_compute_buffer_alloc_vram(
387 pool
->screen
, item
->size_in_dw
* 4);
390 dst
= (struct pipe_resource
*)item
->real_buffer
;
392 /* We transfer the memory from the item in the pool to the
393 * temporary buffer */
394 u_box_1d(item
->start_in_dw
* 4, item
->size_in_dw
* 4, &box
);
396 rctx
->b
.b
.resource_copy_region(pipe
,
400 /* Remember to mark the buffer as 'pending' by setting start_in_dw to -1 */
401 item
->start_in_dw
= -1;
403 if (item
->link
.next
!= pool
->item_list
) {
404 pool
->status
|= POOL_FRAGMENTED
;
409 * Moves the item \a item forward in the pool to \a new_start_in_dw
411 * This function assumes two things:
412 * 1) The item is \b only moved forward
413 * 2) The item \b won't change it's position inside the \a item_list
415 * \param item The item that will be moved
416 * \param new_start_in_dw The new position of the item in \a item_list
417 * \see compute_memory_defrag
419 void compute_memory_move_item(struct compute_memory_pool
*pool
,
420 struct compute_memory_item
*item
, uint64_t new_start_in_dw
,
421 struct pipe_context
*pipe
)
423 struct pipe_screen
*screen
= (struct pipe_screen
*)pool
->screen
;
424 struct r600_context
*rctx
= (struct r600_context
*)pipe
;
425 struct pipe_resource
*src
= (struct pipe_resource
*)pool
->bo
;
426 struct pipe_resource
*dst
;
429 struct compute_memory_item
*prev
;
431 COMPUTE_DBG(pool
->screen
, "* compute_memory_move_item()\n"
432 " + Moving item %i from %u (%u bytes) to %u (%u bytes)\n",
433 item
->id
, item
->start_in_dw
, item
->start_in_dw
* 4,
434 new_start_in_dw
, new_start_in_dw
* 4);
436 if (pool
->item_list
!= item
->link
.prev
) {
437 prev
= container_of(item
->link
.prev
, item
, link
);
438 assert(prev
->start_in_dw
+ prev
->size_in_dw
<= new_start_in_dw
);
441 u_box_1d(item
->start_in_dw
* 4, item
->size_in_dw
* 4, &box
);
443 /* If the ranges don't overlap, we can just copy the item directly */
444 if (new_start_in_dw
+ item
->size_in_dw
<= item
->start_in_dw
) {
445 dst
= (struct pipe_resource
*)pool
->bo
;
447 rctx
->b
.b
.resource_copy_region(pipe
,
448 dst
, 0, new_start_in_dw
* 4, 0, 0,
451 /* The ranges overlap, we will try first to use an intermediate
452 * resource to move the item */
453 dst
= (struct pipe_resource
*)r600_compute_buffer_alloc_vram(
454 pool
->screen
, item
->size_in_dw
* 4);
457 rctx
->b
.b
.resource_copy_region(pipe
,
462 dst
= (struct pipe_resource
*)pool
->bo
;
466 rctx
->b
.b
.resource_copy_region(pipe
,
467 dst
, 0, new_start_in_dw
* 4, 0, 0,
470 pool
->screen
->b
.b
.resource_destroy(screen
, src
);
473 /* The allocation of the temporary resource failed,
474 * falling back to use mappings */
477 struct pipe_transfer
*trans
;
479 offset
= item
->start_in_dw
- new_start_in_dw
;
481 u_box_1d(new_start_in_dw
* 4, (offset
+ item
->size_in_dw
) * 4, &box
);
483 map
= pipe
->transfer_map(pipe
, src
, 0, PIPE_TRANSFER_READ_WRITE
,
489 memmove(map
, map
+ offset
, item
->size_in_dw
* 4);
491 pipe
->transfer_unmap(pipe
, trans
);
495 item
->start_in_dw
= new_start_in_dw
;
498 void compute_memory_free(struct compute_memory_pool
* pool
, int64_t id
)
500 struct compute_memory_item
*item
, *next
;
501 struct pipe_screen
*screen
= (struct pipe_screen
*)pool
->screen
;
502 struct pipe_resource
*res
;
504 COMPUTE_DBG(pool
->screen
, "* compute_memory_free() id + %ld \n", id
);
506 LIST_FOR_EACH_ENTRY_SAFE(item
, next
, pool
->item_list
, link
) {
508 if (item
->id
== id
) {
510 if (item
->link
.next
!= pool
->item_list
) {
511 pool
->status
|= POOL_FRAGMENTED
;
514 list_del(&item
->link
);
516 if (item
->real_buffer
) {
517 res
= (struct pipe_resource
*)item
->real_buffer
;
518 pool
->screen
->b
.b
.resource_destroy(
528 LIST_FOR_EACH_ENTRY_SAFE(item
, next
, pool
->unallocated_list
, link
) {
530 if (item
->id
== id
) {
531 list_del(&item
->link
);
533 if (item
->real_buffer
) {
534 res
= (struct pipe_resource
*)item
->real_buffer
;
535 pool
->screen
->b
.b
.resource_destroy(
545 fprintf(stderr
, "Internal error, invalid id %"PRIi64
" "
546 "for compute_memory_free\n", id
);
548 assert(0 && "error");
552 * Creates pending allocations
554 struct compute_memory_item
* compute_memory_alloc(
555 struct compute_memory_pool
* pool
,
558 struct compute_memory_item
*new_item
= NULL
;
560 COMPUTE_DBG(pool
->screen
, "* compute_memory_alloc() size_in_dw = %ld (%ld bytes)\n",
561 size_in_dw
, 4 * size_in_dw
);
563 new_item
= (struct compute_memory_item
*)
564 CALLOC(sizeof(struct compute_memory_item
), 1);
565 if (new_item
== NULL
)
568 new_item
->size_in_dw
= size_in_dw
;
569 new_item
->start_in_dw
= -1; /* mark pending */
570 new_item
->id
= pool
->next_id
++;
571 new_item
->pool
= pool
;
572 new_item
->real_buffer
= NULL
;
574 list_addtail(&new_item
->link
, pool
->unallocated_list
);
576 COMPUTE_DBG(pool
->screen
, " + Adding item %p id = %u size = %u (%u bytes)\n",
577 new_item
, new_item
->id
, new_item
->size_in_dw
,
578 new_item
->size_in_dw
* 4);
583 * Transfer data host<->device, offset and size is in bytes
585 void compute_memory_transfer(
586 struct compute_memory_pool
* pool
,
587 struct pipe_context
* pipe
,
589 struct compute_memory_item
* chunk
,
594 int64_t aligned_size
= pool
->size_in_dw
;
595 struct pipe_resource
* gart
= (struct pipe_resource
*)pool
->bo
;
596 int64_t internal_offset
= chunk
->start_in_dw
*4 + offset_in_chunk
;
598 struct pipe_transfer
*xfer
;
603 COMPUTE_DBG(pool
->screen
, "* compute_memory_transfer() device_to_host = %d, "
604 "offset_in_chunk = %d, size = %d\n", device_to_host
,
605 offset_in_chunk
, size
);
607 if (device_to_host
) {
608 map
= pipe
->transfer_map(pipe
, gart
, 0, PIPE_TRANSFER_READ
,
609 &(struct pipe_box
) { .width
= aligned_size
* 4,
610 .height
= 1, .depth
= 1 }, &xfer
);
613 memcpy(data
, map
+ internal_offset
, size
);
614 pipe
->transfer_unmap(pipe
, xfer
);
616 map
= pipe
->transfer_map(pipe
, gart
, 0, PIPE_TRANSFER_WRITE
,
617 &(struct pipe_box
) { .width
= aligned_size
* 4,
618 .height
= 1, .depth
= 1 }, &xfer
);
621 memcpy(map
+ internal_offset
, data
, size
);
622 pipe
->transfer_unmap(pipe
, xfer
);
627 * Transfer data between chunk<->data, it is for VRAM<->GART transfers
629 void compute_memory_transfer_direct(
630 struct compute_memory_pool
* pool
,
632 struct compute_memory_item
* chunk
,
633 struct r600_resource
* data
,