2 * Permission is hereby granted, free of charge, to any person obtaining a
3 * copy of this software and associated documentation files (the "Software"),
4 * to deal in the Software without restriction, including without limitation
5 * on the rights to use, copy, modify, merge, publish, distribute, sub
6 * license, and/or sell copies of the Software, and to permit persons to whom
7 * the Software is furnished to do so, subject to the following conditions:
9 * The above copyright notice and this permission notice (including the next
10 * paragraph) shall be included in all copies or substantial portions of the
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 * Adam Rak <adam.rak@streamnovation.com>
25 #include "pipe/p_defines.h"
26 #include "pipe/p_state.h"
27 #include "pipe/p_context.h"
28 #include "util/u_blitter.h"
29 #include "util/u_double_list.h"
30 #include "util/u_transfer.h"
31 #include "util/u_surface.h"
32 #include "util/u_pack_color.h"
33 #include "util/u_memory.h"
34 #include "util/u_inlines.h"
35 #include "util/u_framebuffer.h"
37 #include "r600_resource.h"
38 #include "r600_shader.h"
39 #include "r600_pipe.h"
40 #include "r600_formats.h"
41 #include "compute_memory_pool.h"
42 #include "evergreen_compute_internal.h"
44 static struct r600_resource_texture
* create_pool_texture(struct r600_screen
* screen
,
48 struct pipe_resource templ
;
49 struct r600_resource_texture
* tex
;
51 if (size_in_dw
== 0) {
54 memset(&templ
, 0, sizeof(templ
));
55 templ
.target
= PIPE_TEXTURE_1D
;
56 templ
.format
= PIPE_FORMAT_R32_UINT
;
57 templ
.bind
= PIPE_BIND_CUSTOM
;
58 templ
.usage
= PIPE_USAGE_IMMUTABLE
;
60 templ
.width0
= size_in_dw
;
65 tex
= (struct r600_resource_texture
*)r600_texture_create(
66 &screen
->screen
, &templ
);
67 /* XXX: Propagate this error */
68 assert(tex
&& "Out of memory");
76 struct compute_memory_pool
* compute_memory_pool_new(
77 int64_t initial_size_in_dw
,
78 struct r600_screen
* rscreen
)
80 struct compute_memory_pool
* pool
= (struct compute_memory_pool
*)
81 CALLOC(sizeof(struct compute_memory_pool
), 1);
83 COMPUTE_DBG("* compute_memory_pool_new() initial_size_in_dw = %ld\n",
87 pool
->size_in_dw
= initial_size_in_dw
;
88 pool
->screen
= rscreen
;
89 pool
->bo
= (struct r600_resource
*)create_pool_texture(pool
->screen
,
91 pool
->shadow
= (uint32_t*)CALLOC(4, pool
->size_in_dw
);
97 * Frees all stuff in the pool and the pool struct itself too
99 void compute_memory_pool_delete(struct compute_memory_pool
* pool
)
101 COMPUTE_DBG("* compute_memory_pool_delete()\n");
104 pool
->screen
->screen
.resource_destroy((struct pipe_screen
*)
105 pool
->screen
, (struct pipe_resource
*)pool
->bo
);
111 * Searches for an empty space in the pool, return with the pointer to the
112 * allocatable space in the pool, returns -1 on failure.
114 int64_t compute_memory_prealloc_chunk(
115 struct compute_memory_pool
* pool
,
118 assert(size_in_dw
<= pool
->size_in_dw
);
120 struct compute_memory_item
*item
;
124 COMPUTE_DBG("* compute_memory_prealloc_chunk() size_in_dw = %ld\n",
127 for (item
= pool
->item_list
; item
; item
= item
->next
) {
128 if (item
->start_in_dw
> -1) {
129 if (item
->start_in_dw
-last_end
> size_in_dw
) {
133 last_end
= item
->start_in_dw
+ item
->size_in_dw
;
134 last_end
+= (1024 - last_end
% 1024);
138 if (pool
->size_in_dw
- last_end
< size_in_dw
) {
146 * Search for the chunk where we can link our new chunk after it.
148 struct compute_memory_item
* compute_memory_postalloc_chunk(
149 struct compute_memory_pool
* pool
,
152 struct compute_memory_item
* item
;
154 COMPUTE_DBG("* compute_memory_postalloc_chunck() start_in_dw = %ld\n",
157 for (item
= pool
->item_list
; item
; item
= item
->next
) {
159 if (item
->start_in_dw
< start_in_dw
160 && item
->next
->start_in_dw
> start_in_dw
) {
166 assert(item
->start_in_dw
< start_in_dw
);
171 assert(0 && "unreachable");
176 * Reallocates pool, conserves data
178 void compute_memory_grow_pool(struct compute_memory_pool
* pool
,
179 struct pipe_context
* pipe
, int new_size_in_dw
)
181 COMPUTE_DBG("* compute_memory_grow_pool() new_size_in_dw = %d\n",
184 assert(new_size_in_dw
>= pool
->size_in_dw
);
186 new_size_in_dw
+= 1024 - (new_size_in_dw
% 1024);
188 COMPUTE_DBG(" Aligned size = %d\n", new_size_in_dw
);
191 compute_memory_shadow(pool
, pipe
, 1);
193 pool
->shadow
= (uint32_t*)realloc(pool
->shadow
, new_size_in_dw
*4);
194 pool
->size_in_dw
= new_size_in_dw
;
196 pool
->screen
->screen
.resource_destroy(
197 (struct pipe_screen
*)pool
->screen
,
198 (struct pipe_resource
*)pool
->bo
);
199 pool
->bo
= (struct r600_resource
*)create_pool_texture(
202 compute_memory_shadow(pool
, pipe
, 0);
204 pool
->bo
= (struct r600_resource
*)create_pool_texture(
211 * Copy pool from device to host, or host to device.
213 void compute_memory_shadow(struct compute_memory_pool
* pool
,
214 struct pipe_context
* pipe
, int device_to_host
)
216 struct compute_memory_item chunk
;
218 COMPUTE_DBG("* compute_memory_shadow() device_to_host = %d\n",
222 chunk
.start_in_dw
= 0;
223 chunk
.size_in_dw
= pool
->size_in_dw
;
224 chunk
.prev
= chunk
.next
= NULL
;
225 compute_memory_transfer(pool
, pipe
, device_to_host
, &chunk
,
226 pool
->shadow
, 0, pool
->size_in_dw
*4);
230 * Allocates pending allocations in the pool
232 void compute_memory_finalize_pending(struct compute_memory_pool
* pool
,
233 struct pipe_context
* pipe
)
235 struct compute_memory_item
*pending_list
= NULL
, *end_p
= NULL
;
236 struct compute_memory_item
*item
, *next
;
238 int64_t allocated
= 0;
239 int64_t unallocated
= 0;
241 COMPUTE_DBG("* compute_memory_finalize_pending()\n");
243 for (item
= pool
->item_list
; item
; item
= item
->next
) {
244 COMPUTE_DBG("list: %i %p\n", item
->start_in_dw
, item
->next
);
247 for (item
= pool
->item_list
; item
; item
= next
) {
251 if (item
->start_in_dw
== -1) {
260 item
->prev
->next
= next
;
263 pool
->item_list
= next
;
267 next
->prev
= item
->prev
;
274 unallocated
+= item
->size_in_dw
+1024;
277 allocated
+= item
->size_in_dw
;
281 if (pool
->size_in_dw
< allocated
+unallocated
) {
282 compute_memory_grow_pool(pool
, pipe
, allocated
+unallocated
);
285 for (item
= pending_list
; item
; item
= next
) {
290 while ((start_in_dw
=compute_memory_prealloc_chunk(pool
,
291 item
->size_in_dw
)) == -1) {
292 int64_t need
= item
->size_in_dw
+2048 -
293 (pool
->size_in_dw
- allocated
);
295 need
+= 1024 - (need
% 1024);
298 compute_memory_grow_pool(pool
,
300 pool
->size_in_dw
+ need
);
303 need
= pool
->size_in_dw
/ 10;
304 need
+= 1024 - (need
% 1024);
305 compute_memory_grow_pool(pool
,
307 pool
->size_in_dw
+ need
);
311 item
->start_in_dw
= start_in_dw
;
315 if (pool
->item_list
) {
316 struct compute_memory_item
*pos
;
318 pos
= compute_memory_postalloc_chunk(pool
, start_in_dw
);
320 item
->next
= pos
->next
;
324 item
->next
->prev
= item
;
328 pool
->item_list
= item
;
331 allocated
+= item
->size_in_dw
;
336 void compute_memory_free(struct compute_memory_pool
* pool
, int64_t id
)
338 struct compute_memory_item
*item
, *next
;
340 COMPUTE_DBG("* compute_memory_free() id + %ld \n", id
);
342 for (item
= pool
->item_list
; item
; item
= next
) {
345 if (item
->id
== id
) {
347 item
->prev
->next
= item
->next
;
350 pool
->item_list
= item
->next
;
354 item
->next
->prev
= item
->prev
;
363 fprintf(stderr
, "Internal error, invalid id %ld "
364 "for compute_memory_free\n", id
);
366 assert(0 && "error");
370 * Creates pending allocations
372 struct compute_memory_item
* compute_memory_alloc(
373 struct compute_memory_pool
* pool
,
376 struct compute_memory_item
*new_item
;
378 COMPUTE_DBG("* compute_memory_alloc() size_in_dw = %ld\n", size_in_dw
);
380 new_item
= (struct compute_memory_item
*)
381 CALLOC(sizeof(struct compute_memory_item
), 1);
382 new_item
->size_in_dw
= size_in_dw
;
383 new_item
->start_in_dw
= -1; /* mark pending */
384 new_item
->id
= pool
->next_id
++;
385 new_item
->pool
= pool
;
387 struct compute_memory_item
*last_item
;
389 if (pool
->item_list
) {
390 for (last_item
= pool
->item_list
; last_item
->next
;
391 last_item
= last_item
->next
);
393 last_item
->next
= new_item
;
394 new_item
->prev
= last_item
;
397 pool
->item_list
= new_item
;
404 * Transfer data host<->device, offset and size is in bytes
406 void compute_memory_transfer(
407 struct compute_memory_pool
* pool
,
408 struct pipe_context
* pipe
,
410 struct compute_memory_item
* chunk
,
415 int64_t aligned_size
= pool
->size_in_dw
;
416 struct pipe_resource
* gart
= (struct pipe_resource
*)pool
->bo
;
417 int64_t internal_offset
= chunk
->start_in_dw
*4 + offset_in_chunk
;
419 struct pipe_transfer
*xfer
;
424 COMPUTE_DBG("* compute_memory_transfer() device_to_host = %d, "
425 "offset_in_chunk = %d, size = %d\n", device_to_host
,
426 offset_in_chunk
, size
);
430 xfer
= pipe
->get_transfer(pipe
, gart
, 0, PIPE_TRANSFER_READ
,
431 &(struct pipe_box
) { .width
= aligned_size
,
432 .height
= 1, .depth
= 1 });
434 map
= pipe
->transfer_map(pipe
, xfer
);
436 memcpy(data
, map
+ internal_offset
, size
);
437 pipe
->transfer_unmap(pipe
, xfer
);
438 pipe
->transfer_destroy(pipe
, xfer
);
440 xfer
= pipe
->get_transfer(pipe
, gart
, 0, PIPE_TRANSFER_WRITE
,
441 &(struct pipe_box
) { .width
= aligned_size
,
442 .height
= 1, .depth
= 1 });
444 map
= pipe
->transfer_map(pipe
, xfer
);
446 memcpy(map
+ internal_offset
, data
, size
);
447 pipe
->transfer_unmap(pipe
, xfer
);
448 pipe
->transfer_destroy(pipe
, xfer
);
453 * Transfer data between chunk<->data, it is for VRAM<->GART transfers
455 void compute_memory_transfer_direct(
456 struct compute_memory_pool
* pool
,
458 struct compute_memory_item
* chunk
,
459 struct r600_resource
* data
,