intel: tools: aubmem: map gtt data to aub file
[mesa.git] / src / intel / tools / aub_mem.c
1 /*
2 * Copyright © 2016-2018 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <string.h>
25 #include <unistd.h>
26 #include <sys/types.h>
27 #include <sys/mman.h>
28
29 #include "aub_mem.h"
30
31 #ifndef HAVE_MEMFD_CREATE
32 #include <sys/syscall.h>
33
34 static inline int
35 memfd_create(const char *name, unsigned int flags)
36 {
37 return syscall(SYS_memfd_create, name, flags);
38 }
39 #endif
40
41 struct bo_map {
42 struct list_head link;
43 struct gen_batch_decode_bo bo;
44 bool unmap_after_use;
45 };
46
47 struct ggtt_entry {
48 struct rb_node node;
49 uint64_t virt_addr;
50 uint64_t phys_addr;
51 };
52
53 struct phys_mem {
54 struct rb_node node;
55 uint64_t fd_offset;
56 uint64_t phys_addr;
57 uint8_t *data;
58 const uint8_t *aub_data;
59 };
60
61 static void
62 add_gtt_bo_map(struct aub_mem *mem, struct gen_batch_decode_bo bo, bool unmap_after_use)
63 {
64 struct bo_map *m = calloc(1, sizeof(*m));
65
66 m->bo = bo;
67 m->unmap_after_use = unmap_after_use;
68 list_add(&m->link, &mem->maps);
69 }
70
71 void
72 aub_mem_clear_bo_maps(struct aub_mem *mem)
73 {
74 list_for_each_entry_safe(struct bo_map, i, &mem->maps, link) {
75 if (i->unmap_after_use)
76 munmap((void *)i->bo.map, i->bo.size);
77 list_del(&i->link);
78 free(i);
79 }
80 }
81
82 static inline struct ggtt_entry *
83 ggtt_entry_next(struct ggtt_entry *entry)
84 {
85 if (!entry)
86 return NULL;
87 struct rb_node *node = rb_node_next(&entry->node);
88 if (!node)
89 return NULL;
90 return rb_node_data(struct ggtt_entry, node, node);
91 }
92
93 static inline int
94 cmp_uint64(uint64_t a, uint64_t b)
95 {
96 if (a < b)
97 return -1;
98 if (a > b)
99 return 1;
100 return 0;
101 }
102
103 static inline int
104 cmp_ggtt_entry(const struct rb_node *node, const void *addr)
105 {
106 struct ggtt_entry *entry = rb_node_data(struct ggtt_entry, node, node);
107 return cmp_uint64(entry->virt_addr, *(const uint64_t *)addr);
108 }
109
110 static struct ggtt_entry *
111 ensure_ggtt_entry(struct aub_mem *mem, uint64_t virt_addr)
112 {
113 struct rb_node *node = rb_tree_search_sloppy(&mem->ggtt, &virt_addr,
114 cmp_ggtt_entry);
115 int cmp = 0;
116 if (!node || (cmp = cmp_ggtt_entry(node, &virt_addr))) {
117 struct ggtt_entry *new_entry = calloc(1, sizeof(*new_entry));
118 new_entry->virt_addr = virt_addr;
119 rb_tree_insert_at(&mem->ggtt, node, &new_entry->node, cmp > 0);
120 node = &new_entry->node;
121 }
122
123 return rb_node_data(struct ggtt_entry, node, node);
124 }
125
126 static struct ggtt_entry *
127 search_ggtt_entry(struct aub_mem *mem, uint64_t virt_addr)
128 {
129 virt_addr &= ~0xfff;
130
131 struct rb_node *node = rb_tree_search(&mem->ggtt, &virt_addr, cmp_ggtt_entry);
132
133 if (!node)
134 return NULL;
135
136 return rb_node_data(struct ggtt_entry, node, node);
137 }
138
139 static inline int
140 cmp_phys_mem(const struct rb_node *node, const void *addr)
141 {
142 struct phys_mem *mem = rb_node_data(struct phys_mem, node, node);
143 return cmp_uint64(mem->phys_addr, *(uint64_t *)addr);
144 }
145
146 static struct phys_mem *
147 ensure_phys_mem(struct aub_mem *mem, uint64_t phys_addr)
148 {
149 struct rb_node *node = rb_tree_search_sloppy(&mem->mem, &phys_addr, cmp_phys_mem);
150 int cmp = 0;
151 if (!node || (cmp = cmp_phys_mem(node, &phys_addr))) {
152 struct phys_mem *new_mem = calloc(1, sizeof(*new_mem));
153 new_mem->phys_addr = phys_addr;
154 new_mem->fd_offset = mem->mem_fd_len;
155
156 MAYBE_UNUSED int ftruncate_res = ftruncate(mem->mem_fd, mem->mem_fd_len += 4096);
157 assert(ftruncate_res == 0);
158
159 new_mem->data = mmap(NULL, 4096, PROT_READ | PROT_WRITE, MAP_SHARED,
160 mem->mem_fd, new_mem->fd_offset);
161 assert(new_mem->data != MAP_FAILED);
162
163 rb_tree_insert_at(&mem->mem, node, &new_mem->node, cmp > 0);
164 node = &new_mem->node;
165 }
166
167 return rb_node_data(struct phys_mem, node, node);
168 }
169
170 static struct phys_mem *
171 search_phys_mem(struct aub_mem *mem, uint64_t phys_addr)
172 {
173 phys_addr &= ~0xfff;
174
175 struct rb_node *node = rb_tree_search(&mem->mem, &phys_addr, cmp_phys_mem);
176
177 if (!node)
178 return NULL;
179
180 return rb_node_data(struct phys_mem, node, node);
181 }
182
183 void
184 aub_mem_local_write(void *_mem, uint64_t address,
185 const void *data, uint32_t size)
186 {
187 struct aub_mem *mem = _mem;
188 struct gen_batch_decode_bo bo = {
189 .map = data,
190 .addr = address,
191 .size = size,
192 };
193 add_gtt_bo_map(mem, bo, false);
194 }
195
196 void
197 aub_mem_ggtt_entry_write(void *_mem, uint64_t address,
198 const void *_data, uint32_t _size)
199 {
200 struct aub_mem *mem = _mem;
201 uint64_t virt_addr = (address / sizeof(uint64_t)) << 12;
202 const uint64_t *data = _data;
203 size_t size = _size / sizeof(*data);
204 for (const uint64_t *entry = data;
205 entry < data + size;
206 entry++, virt_addr += 4096) {
207 struct ggtt_entry *pt = ensure_ggtt_entry(mem, virt_addr);
208 pt->phys_addr = *entry;
209 }
210 }
211
212 void
213 aub_mem_phys_write(void *_mem, uint64_t phys_address,
214 const void *data, uint32_t size)
215 {
216 struct aub_mem *mem = _mem;
217 uint32_t to_write = size;
218 for (uint64_t page = phys_address & ~0xfff; page < phys_address + size; page += 4096) {
219 struct phys_mem *pmem = ensure_phys_mem(mem, page);
220 uint64_t offset = MAX2(page, phys_address) - page;
221 uint32_t size_this_page = MIN2(to_write, 4096 - offset);
222 to_write -= size_this_page;
223 memcpy(pmem->data + offset, data, size_this_page);
224 pmem->aub_data = data - offset;
225 data = (const uint8_t *)data + size_this_page;
226 }
227 }
228
229 void
230 aub_mem_ggtt_write(void *_mem, uint64_t virt_address,
231 const void *data, uint32_t size)
232 {
233 struct aub_mem *mem = _mem;
234 uint32_t to_write = size;
235 for (uint64_t page = virt_address & ~0xfff; page < virt_address + size; page += 4096) {
236 struct ggtt_entry *entry = search_ggtt_entry(mem, page);
237 assert(entry && entry->phys_addr & 0x1);
238
239 uint64_t offset = MAX2(page, virt_address) - page;
240 uint32_t size_this_page = MIN2(to_write, 4096 - offset);
241 to_write -= size_this_page;
242
243 uint64_t phys_page = entry->phys_addr & ~0xfff; /* Clear the validity bits. */
244 aub_mem_phys_write(mem, phys_page + offset, data, size_this_page);
245 data = (const uint8_t *)data + size_this_page;
246 }
247 }
248
249 struct gen_batch_decode_bo
250 aub_mem_get_ggtt_bo(void *_mem, uint64_t address)
251 {
252 struct aub_mem *mem = _mem;
253 struct gen_batch_decode_bo bo = {0};
254
255 list_for_each_entry(struct bo_map, i, &mem->maps, link)
256 if (i->bo.addr <= address && i->bo.addr + i->bo.size > address)
257 return i->bo;
258
259 address &= ~0xfff;
260
261 struct ggtt_entry *start =
262 (struct ggtt_entry *)rb_tree_search_sloppy(&mem->ggtt, &address,
263 cmp_ggtt_entry);
264 if (start && start->virt_addr < address)
265 start = ggtt_entry_next(start);
266 if (!start)
267 return bo;
268
269 struct ggtt_entry *last = start;
270 for (struct ggtt_entry *i = ggtt_entry_next(last);
271 i && last->virt_addr + 4096 == i->virt_addr;
272 last = i, i = ggtt_entry_next(last))
273 ;
274
275 bo.addr = MIN2(address, start->virt_addr);
276 bo.size = last->virt_addr - bo.addr + 4096;
277 bo.map = mmap(NULL, bo.size, PROT_READ, MAP_SHARED | MAP_ANONYMOUS, -1, 0);
278 assert(bo.map != MAP_FAILED);
279
280 for (struct ggtt_entry *i = start;
281 i;
282 i = i == last ? NULL : ggtt_entry_next(i)) {
283 uint64_t phys_addr = i->phys_addr & ~0xfff;
284 struct phys_mem *phys_mem = search_phys_mem(mem, phys_addr);
285
286 if (!phys_mem)
287 continue;
288
289 uint32_t map_offset = i->virt_addr - address;
290 void *res = mmap((uint8_t *)bo.map + map_offset, 4096, PROT_READ,
291 MAP_SHARED | MAP_FIXED, mem->mem_fd, phys_mem->fd_offset);
292 assert(res != MAP_FAILED);
293 }
294
295 add_gtt_bo_map(mem, bo, true);
296
297 return bo;
298 }
299
300 static struct phys_mem *
301 ppgtt_walk(struct aub_mem *mem, uint64_t pml4, uint64_t address)
302 {
303 uint64_t shift = 39;
304 uint64_t addr = pml4;
305 for (int level = 4; level > 0; level--) {
306 struct phys_mem *table = search_phys_mem(mem, addr);
307 if (!table)
308 return NULL;
309 int index = (address >> shift) & 0x1ff;
310 uint64_t entry = ((uint64_t *)table->data)[index];
311 if (!(entry & 1))
312 return NULL;
313 addr = entry & ~0xfff;
314 shift -= 9;
315 }
316 return search_phys_mem(mem, addr);
317 }
318
319 static bool
320 ppgtt_mapped(struct aub_mem *mem, uint64_t pml4, uint64_t address)
321 {
322 return ppgtt_walk(mem, pml4, address) != NULL;
323 }
324
325 struct gen_batch_decode_bo
326 aub_mem_get_ppgtt_bo(void *_mem, uint64_t address)
327 {
328 struct aub_mem *mem = _mem;
329 struct gen_batch_decode_bo bo = {0};
330
331 address &= ~0xfff;
332
333 if (!ppgtt_mapped(mem, mem->pml4, address))
334 return bo;
335
336 /* Map everything until the first gap since we don't know how much the
337 * decoder actually needs.
338 */
339 uint64_t end = address;
340 while (ppgtt_mapped(mem, mem->pml4, end))
341 end += 4096;
342
343 bo.addr = address;
344 bo.size = end - address;
345 bo.map = mmap(NULL, bo.size, PROT_READ, MAP_SHARED | MAP_ANONYMOUS, -1, 0);
346 assert(bo.map != MAP_FAILED);
347
348 for (uint64_t page = address; page < end; page += 4096) {
349 struct phys_mem *phys_mem = ppgtt_walk(mem, mem->pml4, page);
350
351 void *res = mmap((uint8_t *)bo.map + (page - bo.addr), 4096, PROT_READ,
352 MAP_SHARED | MAP_FIXED, mem->mem_fd, phys_mem->fd_offset);
353 assert(res != MAP_FAILED);
354 }
355
356 add_gtt_bo_map(mem, bo, true);
357
358 return bo;
359 }
360
361 bool
362 aub_mem_init(struct aub_mem *mem)
363 {
364 memset(mem, 0, sizeof(*mem));
365
366 list_inithead(&mem->maps);
367
368 mem->mem_fd = memfd_create("phys memory", 0);
369
370 return mem->mem_fd != -1;
371 }
372
373 void
374 aub_mem_fini(struct aub_mem *mem)
375 {
376 if (mem->mem_fd == -1)
377 return;
378
379 aub_mem_clear_bo_maps(mem);
380
381
382 rb_tree_foreach_safe(struct ggtt_entry, entry, &mem->ggtt, node) {
383 rb_tree_remove(&mem->ggtt, &entry->node);
384 free(entry);
385 }
386 rb_tree_foreach_safe(struct phys_mem, entry, &mem->mem, node) {
387 rb_tree_remove(&mem->mem, &entry->node);
388 free(entry);
389 }
390
391 close(mem->mem_fd);
392 mem->mem_fd = -1;
393 }
394
395 struct gen_batch_decode_bo
396 aub_mem_get_phys_addr_data(struct aub_mem *mem, uint64_t phys_addr)
397 {
398 struct phys_mem *page = search_phys_mem(mem, phys_addr);
399 return page ?
400 (struct gen_batch_decode_bo) { .map = page->data, .addr = page->phys_addr, .size = 4096 } :
401 (struct gen_batch_decode_bo) {};
402 }
403
404 struct gen_batch_decode_bo
405 aub_mem_get_ppgtt_addr_data(struct aub_mem *mem, uint64_t virt_addr)
406 {
407 struct phys_mem *page = ppgtt_walk(mem, mem->pml4, virt_addr);
408 return page ?
409 (struct gen_batch_decode_bo) { .map = page->data, .addr = virt_addr & ~((1ULL << 12) - 1), .size = 4096 } :
410 (struct gen_batch_decode_bo) {};
411 }
412
413 struct gen_batch_decode_bo
414 aub_mem_get_ppgtt_addr_aub_data(struct aub_mem *mem, uint64_t virt_addr)
415 {
416 struct phys_mem *page = ppgtt_walk(mem, mem->pml4, virt_addr);
417 return page ?
418 (struct gen_batch_decode_bo) { .map = page->aub_data, .addr = virt_addr & ~((1ULL << 12) - 1), .size = 4096 } :
419 (struct gen_batch_decode_bo) {};
420 }