intel: tools: split memory management out of aubinator
[mesa.git] / src / intel / tools / aub_mem.c
1 /*
2 * Copyright © 2016-2018 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <string.h>
25 #include <unistd.h>
26 #include <sys/types.h>
27 #include <sys/mman.h>
28
29 #include "aub_mem.h"
30
31 #ifndef HAVE_MEMFD_CREATE
32 #include <sys/syscall.h>
33
34 static inline int
35 memfd_create(const char *name, unsigned int flags)
36 {
37 return syscall(SYS_memfd_create, name, flags);
38 }
39 #endif
40
41 struct bo_map {
42 struct list_head link;
43 struct gen_batch_decode_bo bo;
44 bool unmap_after_use;
45 };
46
47 struct ggtt_entry {
48 struct rb_node node;
49 uint64_t virt_addr;
50 uint64_t phys_addr;
51 };
52
53 struct phys_mem {
54 struct rb_node node;
55 uint64_t fd_offset;
56 uint64_t phys_addr;
57 uint8_t *data;
58 };
59
60 static void
61 add_gtt_bo_map(struct aub_mem *mem, struct gen_batch_decode_bo bo, bool unmap_after_use)
62 {
63 struct bo_map *m = calloc(1, sizeof(*m));
64
65 m->bo = bo;
66 m->unmap_after_use = unmap_after_use;
67 list_add(&m->link, &mem->maps);
68 }
69
70 void
71 aub_mem_clear_bo_maps(struct aub_mem *mem)
72 {
73 list_for_each_entry_safe(struct bo_map, i, &mem->maps, link) {
74 if (i->unmap_after_use)
75 munmap((void *)i->bo.map, i->bo.size);
76 list_del(&i->link);
77 free(i);
78 }
79 }
80
81 static inline struct ggtt_entry *
82 ggtt_entry_next(struct ggtt_entry *entry)
83 {
84 if (!entry)
85 return NULL;
86 struct rb_node *node = rb_node_next(&entry->node);
87 if (!node)
88 return NULL;
89 return rb_node_data(struct ggtt_entry, node, node);
90 }
91
92 static inline int
93 cmp_uint64(uint64_t a, uint64_t b)
94 {
95 if (a < b)
96 return -1;
97 if (a > b)
98 return 1;
99 return 0;
100 }
101
102 static inline int
103 cmp_ggtt_entry(const struct rb_node *node, const void *addr)
104 {
105 struct ggtt_entry *entry = rb_node_data(struct ggtt_entry, node, node);
106 return cmp_uint64(entry->virt_addr, *(const uint64_t *)addr);
107 }
108
109 static struct ggtt_entry *
110 ensure_ggtt_entry(struct aub_mem *mem, uint64_t virt_addr)
111 {
112 struct rb_node *node = rb_tree_search_sloppy(&mem->ggtt, &virt_addr,
113 cmp_ggtt_entry);
114 int cmp = 0;
115 if (!node || (cmp = cmp_ggtt_entry(node, &virt_addr))) {
116 struct ggtt_entry *new_entry = calloc(1, sizeof(*new_entry));
117 new_entry->virt_addr = virt_addr;
118 rb_tree_insert_at(&mem->ggtt, node, &new_entry->node, cmp > 0);
119 node = &new_entry->node;
120 }
121
122 return rb_node_data(struct ggtt_entry, node, node);
123 }
124
125 static struct ggtt_entry *
126 search_ggtt_entry(struct aub_mem *mem, uint64_t virt_addr)
127 {
128 virt_addr &= ~0xfff;
129
130 struct rb_node *node = rb_tree_search(&mem->ggtt, &virt_addr, cmp_ggtt_entry);
131
132 if (!node)
133 return NULL;
134
135 return rb_node_data(struct ggtt_entry, node, node);
136 }
137
138 static inline int
139 cmp_phys_mem(const struct rb_node *node, const void *addr)
140 {
141 struct phys_mem *mem = rb_node_data(struct phys_mem, node, node);
142 return cmp_uint64(mem->phys_addr, *(uint64_t *)addr);
143 }
144
145 static struct phys_mem *
146 ensure_phys_mem(struct aub_mem *mem, uint64_t phys_addr)
147 {
148 struct rb_node *node = rb_tree_search_sloppy(&mem->mem, &phys_addr, cmp_phys_mem);
149 int cmp = 0;
150 if (!node || (cmp = cmp_phys_mem(node, &phys_addr))) {
151 struct phys_mem *new_mem = calloc(1, sizeof(*new_mem));
152 new_mem->phys_addr = phys_addr;
153 new_mem->fd_offset = mem->mem_fd_len;
154
155 MAYBE_UNUSED int ftruncate_res = ftruncate(mem->mem_fd, mem->mem_fd_len += 4096);
156 assert(ftruncate_res == 0);
157
158 new_mem->data = mmap(NULL, 4096, PROT_READ | PROT_WRITE, MAP_SHARED,
159 mem->mem_fd, new_mem->fd_offset);
160 assert(new_mem->data != MAP_FAILED);
161
162 rb_tree_insert_at(&mem->mem, node, &new_mem->node, cmp > 0);
163 node = &new_mem->node;
164 }
165
166 return rb_node_data(struct phys_mem, node, node);
167 }
168
169 static struct phys_mem *
170 search_phys_mem(struct aub_mem *mem, uint64_t phys_addr)
171 {
172 phys_addr &= ~0xfff;
173
174 struct rb_node *node = rb_tree_search(&mem->mem, &phys_addr, cmp_phys_mem);
175
176 if (!node)
177 return NULL;
178
179 return rb_node_data(struct phys_mem, node, node);
180 }
181
182 void
183 aub_mem_local_write(void *_mem, uint64_t address,
184 const void *data, uint32_t size)
185 {
186 struct aub_mem *mem = _mem;
187 struct gen_batch_decode_bo bo = {
188 .map = data,
189 .addr = address,
190 .size = size,
191 };
192 add_gtt_bo_map(mem, bo, false);
193 }
194
195 void
196 aub_mem_ggtt_entry_write(void *_mem, uint64_t address,
197 const void *_data, uint32_t _size)
198 {
199 struct aub_mem *mem = _mem;
200 uint64_t virt_addr = (address / sizeof(uint64_t)) << 12;
201 const uint64_t *data = _data;
202 size_t size = _size / sizeof(*data);
203 for (const uint64_t *entry = data;
204 entry < data + size;
205 entry++, virt_addr += 4096) {
206 struct ggtt_entry *pt = ensure_ggtt_entry(mem, virt_addr);
207 pt->phys_addr = *entry;
208 }
209 }
210
211 void
212 aub_mem_phys_write(void *_mem, uint64_t phys_address,
213 const void *data, uint32_t size)
214 {
215 struct aub_mem *mem = _mem;
216 uint32_t to_write = size;
217 for (uint64_t page = phys_address & ~0xfff; page < phys_address + size; page += 4096) {
218 struct phys_mem *pmem = ensure_phys_mem(mem, page);
219 uint64_t offset = MAX2(page, phys_address) - page;
220 uint32_t size_this_page = MIN2(to_write, 4096 - offset);
221 to_write -= size_this_page;
222 memcpy(pmem->data + offset, data, size_this_page);
223 data = (const uint8_t *)data + size_this_page;
224 }
225 }
226
227 void
228 aub_mem_ggtt_write(void *_mem, uint64_t virt_address,
229 const void *data, uint32_t size)
230 {
231 struct aub_mem *mem = _mem;
232 uint32_t to_write = size;
233 for (uint64_t page = virt_address & ~0xfff; page < virt_address + size; page += 4096) {
234 struct ggtt_entry *entry = search_ggtt_entry(mem, page);
235 assert(entry && entry->phys_addr & 0x1);
236
237 uint64_t offset = MAX2(page, virt_address) - page;
238 uint32_t size_this_page = MIN2(to_write, 4096 - offset);
239 to_write -= size_this_page;
240
241 uint64_t phys_page = entry->phys_addr & ~0xfff; /* Clear the validity bits. */
242 aub_mem_phys_write(mem, phys_page + offset, data, size_this_page);
243 data = (const uint8_t *)data + size_this_page;
244 }
245 }
246
247 struct gen_batch_decode_bo
248 aub_mem_get_ggtt_bo(void *_mem, uint64_t address)
249 {
250 struct aub_mem *mem = _mem;
251 struct gen_batch_decode_bo bo = {0};
252
253 list_for_each_entry(struct bo_map, i, &mem->maps, link)
254 if (i->bo.addr <= address && i->bo.addr + i->bo.size > address)
255 return i->bo;
256
257 address &= ~0xfff;
258
259 struct ggtt_entry *start =
260 (struct ggtt_entry *)rb_tree_search_sloppy(&mem->ggtt, &address,
261 cmp_ggtt_entry);
262 if (start && start->virt_addr < address)
263 start = ggtt_entry_next(start);
264 if (!start)
265 return bo;
266
267 struct ggtt_entry *last = start;
268 for (struct ggtt_entry *i = ggtt_entry_next(last);
269 i && last->virt_addr + 4096 == i->virt_addr;
270 last = i, i = ggtt_entry_next(last))
271 ;
272
273 bo.addr = MIN2(address, start->virt_addr);
274 bo.size = last->virt_addr - bo.addr + 4096;
275 bo.map = mmap(NULL, bo.size, PROT_READ, MAP_SHARED | MAP_ANONYMOUS, -1, 0);
276 assert(bo.map != MAP_FAILED);
277
278 for (struct ggtt_entry *i = start;
279 i;
280 i = i == last ? NULL : ggtt_entry_next(i)) {
281 uint64_t phys_addr = i->phys_addr & ~0xfff;
282 struct phys_mem *phys_mem = search_phys_mem(mem, phys_addr);
283
284 if (!phys_mem)
285 continue;
286
287 uint32_t map_offset = i->virt_addr - address;
288 void *res = mmap((uint8_t *)bo.map + map_offset, 4096, PROT_READ,
289 MAP_SHARED | MAP_FIXED, mem->mem_fd, phys_mem->fd_offset);
290 assert(res != MAP_FAILED);
291 }
292
293 add_gtt_bo_map(mem, bo, true);
294
295 return bo;
296 }
297
298 static struct phys_mem *
299 ppgtt_walk(struct aub_mem *mem, uint64_t pml4, uint64_t address)
300 {
301 uint64_t shift = 39;
302 uint64_t addr = pml4;
303 for (int level = 4; level > 0; level--) {
304 struct phys_mem *table = search_phys_mem(mem, addr);
305 if (!table)
306 return NULL;
307 int index = (address >> shift) & 0x1ff;
308 uint64_t entry = ((uint64_t *)table->data)[index];
309 if (!(entry & 1))
310 return NULL;
311 addr = entry & ~0xfff;
312 shift -= 9;
313 }
314 return search_phys_mem(mem, addr);
315 }
316
317 static bool
318 ppgtt_mapped(struct aub_mem *mem, uint64_t pml4, uint64_t address)
319 {
320 return ppgtt_walk(mem, pml4, address) != NULL;
321 }
322
323 struct gen_batch_decode_bo
324 aub_mem_get_ppgtt_bo(void *_mem, uint64_t address)
325 {
326 struct aub_mem *mem = _mem;
327 struct gen_batch_decode_bo bo = {0};
328
329 address &= ~0xfff;
330
331 if (!ppgtt_mapped(mem, mem->pml4, address))
332 return bo;
333
334 /* Map everything until the first gap since we don't know how much the
335 * decoder actually needs.
336 */
337 uint64_t end = address;
338 while (ppgtt_mapped(mem, mem->pml4, end))
339 end += 4096;
340
341 bo.addr = address;
342 bo.size = end - address;
343 bo.map = mmap(NULL, bo.size, PROT_READ, MAP_SHARED | MAP_ANONYMOUS, -1, 0);
344 assert(bo.map != MAP_FAILED);
345
346 for (uint64_t page = address; page < end; page += 4096) {
347 struct phys_mem *phys_mem = ppgtt_walk(mem, mem->pml4, page);
348
349 void *res = mmap((uint8_t *)bo.map + (page - bo.addr), 4096, PROT_READ,
350 MAP_SHARED | MAP_FIXED, mem->mem_fd, phys_mem->fd_offset);
351 assert(res != MAP_FAILED);
352 }
353
354 add_gtt_bo_map(mem, bo, true);
355
356 return bo;
357 }
358
359 bool
360 aub_mem_init(struct aub_mem *mem)
361 {
362 memset(mem, 0, sizeof(*mem));
363
364 list_inithead(&mem->maps);
365
366 mem->mem_fd = memfd_create("phys memory", 0);
367
368 return mem->mem_fd != -1;
369 }
370
371 void
372 aub_mem_fini(struct aub_mem *mem)
373 {
374 if (mem->mem_fd == -1)
375 return;
376
377 aub_mem_clear_bo_maps(mem);
378
379
380 rb_tree_foreach_safe(struct ggtt_entry, entry, &mem->ggtt, node) {
381 rb_tree_remove(&mem->ggtt, &entry->node);
382 free(entry);
383 }
384 rb_tree_foreach_safe(struct phys_mem, entry, &mem->mem, node) {
385 rb_tree_remove(&mem->mem, &entry->node);
386 free(entry);
387 }
388
389 close(mem->mem_fd);
390 mem->mem_fd = -1;
391 }