r600g/compute: Allow compute_memory_move_item to move items between resources
[mesa.git] / src / gallium / drivers / r600 / compute_memory_pool.h
1 /*
2 * Permission is hereby granted, free of charge, to any person obtaining a
3 * copy of this software and associated documentation files (the "Software"),
4 * to deal in the Software without restriction, including without limitation
5 * on the rights to use, copy, modify, merge, publish, distribute, sub
6 * license, and/or sell copies of the Software, and to permit persons to whom
7 * the Software is furnished to do so, subject to the following conditions:
8 *
9 * The above copyright notice and this permission notice (including the next
10 * paragraph) shall be included in all copies or substantial portions of the
11 * Software.
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * Authors:
22 * Adam Rak <adam.rak@streamnovation.com>
23 */
24
25 #ifndef COMPUTE_MEMORY_POOL
26 #define COMPUTE_MEMORY_POOL
27
28 #include <stdlib.h>
29
30 #define ITEM_MAPPED_FOR_READING (1<<0)
31 #define ITEM_MAPPED_FOR_WRITING (1<<1)
32 #define ITEM_FOR_PROMOTING (1<<2)
33 #define ITEM_FOR_DEMOTING (1<<3)
34
35 #define POOL_FRAGMENTED (1<<0)
36
37 struct compute_memory_pool;
38
39 struct compute_memory_item
40 {
41 int64_t id; ///ID of the memory chunk
42
43 uint32_t status; ///Will track the status of the item
44
45 int64_t start_in_dw; ///Start pointer in dwords relative in the pool bo
46 int64_t size_in_dw; ///Size of the chunk in dwords
47
48 struct r600_resource *real_buffer;
49
50 struct compute_memory_pool* pool;
51
52 struct list_head link;
53 };
54
55 struct compute_memory_pool
56 {
57 int64_t next_id; ///For generating unique IDs for memory chunks
58 int64_t size_in_dw; ///Size of the pool in dwords
59
60 struct r600_resource *bo; ///The pool buffer object resource
61 struct r600_screen *screen;
62
63 uint32_t *shadow; ///host copy of the pool, used for defragmentation
64
65 uint32_t status; /**< Status of the pool */
66
67 struct list_head *item_list; ///Allocated memory chunks in the buffer,they must be ordered by "start_in_dw"
68 struct list_head *unallocated_list; ///Unallocated memory chunks
69 };
70
71
72 static inline int is_item_in_pool(struct compute_memory_item *item)
73 {
74 return item->start_in_dw != -1;
75 }
76
77 struct compute_memory_pool* compute_memory_pool_new(struct r600_screen *rscreen); ///Creates a new pool
78 void compute_memory_pool_delete(struct compute_memory_pool* pool); ///Frees all stuff in the pool and the pool struct itself too
79
80 int64_t compute_memory_prealloc_chunk(struct compute_memory_pool* pool, int64_t size_in_dw); ///searches for an empty space in the pool, return with the pointer to the allocatable space in the pool, returns -1 on failure
81
82 struct list_head *compute_memory_postalloc_chunk(struct compute_memory_pool* pool, int64_t start_in_dw); ///search for the chunk where we can link our new chunk after it
83
84 int compute_memory_grow_pool(struct compute_memory_pool* pool, struct pipe_context * pipe,
85 int new_size_in_dw);
86
87 void compute_memory_shadow(struct compute_memory_pool* pool,
88 struct pipe_context * pipe, int device_to_host);
89
90 int compute_memory_finalize_pending(struct compute_memory_pool* pool,
91 struct pipe_context * pipe);
92
93 void compute_memory_defrag(struct compute_memory_pool *pool,
94 struct pipe_context *pipe);
95
96 int compute_memory_promote_item(struct compute_memory_pool *pool,
97 struct compute_memory_item *item, struct pipe_context *pipe,
98 int64_t start_in_dw);
99
100 void compute_memory_demote_item(struct compute_memory_pool *pool,
101 struct compute_memory_item *item, struct pipe_context *pipe);
102
103 void compute_memory_move_item(struct compute_memory_pool *pool,
104 struct pipe_resource *src, struct pipe_resource *dst,
105 struct compute_memory_item *item, uint64_t new_start_in_dw,
106 struct pipe_context *pipe);
107
108 void compute_memory_free(struct compute_memory_pool* pool, int64_t id);
109 struct compute_memory_item* compute_memory_alloc(struct compute_memory_pool* pool, int64_t size_in_dw); ///Creates pending allocations
110
111 void compute_memory_transfer(struct compute_memory_pool* pool,
112 struct pipe_context * pipe, int device_to_host,
113 struct compute_memory_item* chunk, void* data,
114 int offset_in_chunk, int size);
115
116 void compute_memory_transfer_direct(struct compute_memory_pool* pool, int chunk_to_data, struct compute_memory_item* chunk, struct r600_resource* data, int offset_in_chunk, int offset_in_data, int size); ///Transfer data between chunk<->data, it is for VRAM<->GART transfers
117
118 #endif