intel/aux-map: Add some #defines
[mesa.git] / src / intel / common / gen_aux_map.c
1 /*
2 * Copyright (c) 2018 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 /**
25 * The aux map provides a multi-level lookup of the main surface address which
26 * ends up providing information about the auxiliary surface data, including
27 * the address where the auxiliary data resides.
28 *
29 * The 48-bit VMA (GPU) address of the main surface is split to do the address
30 * lookup:
31 *
32 * 48 bit address of main surface
33 * +--------+--------+--------+------+
34 * | 47:36 | 35:24 | 23:16 | 15:0 |
35 * | L3-idx | L2-idx | L1-idx | ... |
36 * +--------+--------+--------+------+
37 *
38 * The GFX_AUX_TABLE_BASE_ADDR points to a buffer. The L3 Table Entry is
39 * located by indexing into this buffer as a uint64_t array using the L3-idx
40 * value. The 64-bit L3 entry is defined as:
41 *
42 * +-------+-------------+------+---+
43 * | 63:48 | 47:15 | 14:1 | 0 |
44 * | ... | L2-tbl-addr | ... | V |
45 * +-------+-------------+------+---+
46 *
47 * If the `V` (valid) bit is set, then the L2-tbl-addr gives the address for
48 * the level-2 table entries, with the lower address bits filled with zero.
49 * The L2 Table Entry is located by indexing into this buffer as a uint64_t
50 * array using the L2-idx value. The 64-bit L2 entry is similar to the L3
51 * entry, except with 2 additional address bits:
52 *
53 * +-------+-------------+------+---+
54 * | 63:48 | 47:13 | 12:1 | 0 |
55 * | ... | L1-tbl-addr | ... | V |
56 * +-------+-------------+------+---+
57 *
58 * If the `V` bit is set, then the L1-tbl-addr gives the address for the
59 * level-1 table entries, with the lower address bits filled with zero. The L1
60 * Table Entry is located by indexing into this buffer as a uint64_t array
61 * using the L1-idx value. The 64-bit L1 entry is defined as:
62 *
63 * +--------+------+-------+-------+-------+---------------+-----+---+
64 * | 63:58 | 57 | 56:54 | 53:52 | 51:48 | 47:8 | 7:1 | 0 |
65 * | Format | Y/Cr | Depth | TM | ... | aux-data-addr | ... | V |
66 * +--------+------+-------+-------+-------+---------------+-----+---+
67 *
68 * Where:
69 * - Format: See `get_format_encoding`
70 * - Y/Cr: 0=not-Y/Cr, 1=Y/Cr
71 * - (bit) Depth: See `get_bpp_encoding`
72 * - TM (Tile-mode): 0=Ys, 1=Y, 2=rsvd, 3=rsvd
73 * - aux-data-addr: VMA/GPU address for the aux-data
74 * - V: entry is valid
75 */
76
77 #include "gen_aux_map.h"
78 #include "gen_gem.h"
79
80 #include "dev/gen_device_info.h"
81
82 #include "drm-uapi/i915_drm.h"
83 #include "util/list.h"
84 #include "util/ralloc.h"
85 #include "util/u_atomic.h"
86 #include "main/macros.h"
87
88 #include <inttypes.h>
89 #include <stdlib.h>
90 #include <stdio.h>
91 #include <pthread.h>
92
93 static const bool aux_map_debug = false;
94
95 struct aux_map_buffer {
96 struct list_head link;
97 struct gen_buffer *buffer;
98 };
99
100 struct gen_aux_map_context {
101 void *driver_ctx;
102 pthread_mutex_t mutex;
103 struct gen_mapped_pinned_buffer_alloc *buffer_alloc;
104 uint32_t num_buffers;
105 struct list_head buffers;
106 uint64_t level3_base_addr;
107 uint64_t *level3_map;
108 uint32_t tail_offset, tail_remaining;
109 uint32_t state_num;
110 };
111
112 static bool
113 add_buffer(struct gen_aux_map_context *ctx)
114 {
115 struct aux_map_buffer *buf = ralloc(ctx, struct aux_map_buffer);
116 if (!buf)
117 return false;
118
119 const uint32_t size = 0x100000;
120 buf->buffer = ctx->buffer_alloc->alloc(ctx->driver_ctx, size);
121 if (!buf->buffer) {
122 ralloc_free(buf);
123 return false;
124 }
125
126 assert(buf->buffer->map != NULL);
127
128 list_addtail(&buf->link, &ctx->buffers);
129 ctx->tail_offset = 0;
130 ctx->tail_remaining = size;
131 p_atomic_inc(&ctx->num_buffers);
132
133 return true;
134 }
135
136 static void
137 advance_current_pos(struct gen_aux_map_context *ctx, uint32_t size)
138 {
139 assert(ctx->tail_remaining >= size);
140 ctx->tail_remaining -= size;
141 ctx->tail_offset += size;
142 }
143
144 static bool
145 align_and_verify_space(struct gen_aux_map_context *ctx, uint32_t size,
146 uint32_t align)
147 {
148 if (ctx->tail_remaining < size)
149 return false;
150
151 struct aux_map_buffer *tail =
152 list_last_entry(&ctx->buffers, struct aux_map_buffer, link);
153 uint64_t gpu = tail->buffer->gpu + ctx->tail_offset;
154 uint64_t aligned = align64(gpu, align);
155
156 if ((aligned - gpu) + size > ctx->tail_remaining) {
157 return false;
158 } else {
159 if (aligned - gpu > 0)
160 advance_current_pos(ctx, aligned - gpu);
161 return true;
162 }
163 }
164
165 static void
166 get_current_pos(struct gen_aux_map_context *ctx, uint64_t *gpu, uint64_t **map)
167 {
168 assert(!list_is_empty(&ctx->buffers));
169 struct aux_map_buffer *tail =
170 list_last_entry(&ctx->buffers, struct aux_map_buffer, link);
171 if (gpu)
172 *gpu = tail->buffer->gpu + ctx->tail_offset;
173 if (map)
174 *map = (uint64_t*)((uint8_t*)tail->buffer->map + ctx->tail_offset);
175 }
176
177 static bool
178 add_sub_table(struct gen_aux_map_context *ctx, uint32_t size,
179 uint32_t align, uint64_t *gpu, uint64_t **map)
180 {
181 if (!align_and_verify_space(ctx, size, align)) {
182 if (!add_buffer(ctx))
183 return false;
184 UNUSED bool aligned = align_and_verify_space(ctx, size, align);
185 assert(aligned);
186 }
187 get_current_pos(ctx, gpu, map);
188 memset(*map, 0, size);
189 advance_current_pos(ctx, size);
190 return true;
191 }
192
193 uint32_t
194 gen_aux_map_get_state_num(struct gen_aux_map_context *ctx)
195 {
196 return p_atomic_read(&ctx->state_num);
197 }
198
199 struct gen_aux_map_context *
200 gen_aux_map_init(void *driver_ctx,
201 struct gen_mapped_pinned_buffer_alloc *buffer_alloc,
202 const struct gen_device_info *devinfo)
203 {
204 struct gen_aux_map_context *ctx;
205 if (devinfo->gen < 12)
206 return NULL;
207
208 ctx = ralloc(NULL, struct gen_aux_map_context);
209 if (!ctx)
210 return NULL;
211
212 if (pthread_mutex_init(&ctx->mutex, NULL))
213 return NULL;
214
215 ctx->driver_ctx = driver_ctx;
216 ctx->buffer_alloc = buffer_alloc;
217 ctx->num_buffers = 0;
218 list_inithead(&ctx->buffers);
219 ctx->tail_offset = 0;
220 ctx->tail_remaining = 0;
221 ctx->state_num = 0;
222
223 if (add_sub_table(ctx, 32 * 1024, 32 * 1024, &ctx->level3_base_addr,
224 &ctx->level3_map)) {
225 if (aux_map_debug)
226 fprintf(stderr, "AUX-MAP L3: 0x%"PRIx64", map=%p\n",
227 ctx->level3_base_addr, ctx->level3_map);
228 p_atomic_inc(&ctx->state_num);
229 return ctx;
230 } else {
231 ralloc_free(ctx);
232 return NULL;
233 }
234 }
235
236 void
237 gen_aux_map_finish(struct gen_aux_map_context *ctx)
238 {
239 if (!ctx)
240 return;
241
242 pthread_mutex_destroy(&ctx->mutex);
243 list_for_each_entry_safe(struct aux_map_buffer, buf, &ctx->buffers, link) {
244 ctx->buffer_alloc->free(ctx->driver_ctx, buf->buffer);
245 list_del(&buf->link);
246 p_atomic_dec(&ctx->num_buffers);
247 ralloc_free(buf);
248 }
249
250 ralloc_free(ctx);
251 }
252
253 uint64_t
254 gen_aux_map_get_base(struct gen_aux_map_context *ctx)
255 {
256 /**
257 * This get initialized in gen_aux_map_init, and never changes, so there is
258 * no need to lock the mutex.
259 */
260 return ctx->level3_base_addr;
261 }
262
263 static struct aux_map_buffer *
264 find_buffer(struct gen_aux_map_context *ctx, uint64_t addr)
265 {
266 list_for_each_entry(struct aux_map_buffer, buf, &ctx->buffers, link) {
267 if (buf->buffer->gpu <= addr && buf->buffer->gpu_end > addr) {
268 return buf;
269 }
270 }
271 return NULL;
272 }
273
274 static uint64_t *
275 get_u64_entry_ptr(struct gen_aux_map_context *ctx, uint64_t addr)
276 {
277 struct aux_map_buffer *buf = find_buffer(ctx, addr);
278 assert(buf);
279 uintptr_t map_offset = addr - buf->buffer->gpu;
280 return (uint64_t*)((uint8_t*)buf->buffer->map + map_offset);
281 }
282
283 static uint8_t
284 get_format_encoding(const struct isl_surf *isl_surf)
285 {
286 switch(isl_surf->format) {
287 case ISL_FORMAT_R32G32B32A32_FLOAT: return 0x11;
288 case ISL_FORMAT_R32G32B32X32_FLOAT: return 0x11;
289 case ISL_FORMAT_R32G32B32A32_SINT: return 0x12;
290 case ISL_FORMAT_R32G32B32A32_UINT: return 0x13;
291 case ISL_FORMAT_R16G16B16A16_UNORM: return 0x14;
292 case ISL_FORMAT_R16G16B16A16_SNORM: return 0x15;
293 case ISL_FORMAT_R16G16B16A16_SINT: return 0x16;
294 case ISL_FORMAT_R16G16B16A16_UINT: return 0x17;
295 case ISL_FORMAT_R16G16B16A16_FLOAT: return 0x10;
296 case ISL_FORMAT_R16G16B16X16_FLOAT: return 0x10;
297 case ISL_FORMAT_R32G32_FLOAT: return 0x11;
298 case ISL_FORMAT_R32G32_SINT: return 0x12;
299 case ISL_FORMAT_R32G32_UINT: return 0x13;
300 case ISL_FORMAT_B8G8R8A8_UNORM: return 0xA;
301 case ISL_FORMAT_B8G8R8X8_UNORM: return 0xA;
302 case ISL_FORMAT_B8G8R8A8_UNORM_SRGB: return 0xA;
303 case ISL_FORMAT_B8G8R8X8_UNORM_SRGB: return 0xA;
304 case ISL_FORMAT_R10G10B10A2_UNORM: return 0x18;
305 case ISL_FORMAT_R10G10B10A2_UNORM_SRGB: return 0x18;
306 case ISL_FORMAT_R10G10B10_FLOAT_A2_UNORM: return 0x19;
307 case ISL_FORMAT_R10G10B10A2_UINT: return 0x1A;
308 case ISL_FORMAT_R8G8B8A8_UNORM: return 0xA;
309 case ISL_FORMAT_R8G8B8A8_UNORM_SRGB: return 0xA;
310 case ISL_FORMAT_R8G8B8A8_SNORM: return 0x1B;
311 case ISL_FORMAT_R8G8B8A8_SINT: return 0x1C;
312 case ISL_FORMAT_R8G8B8A8_UINT: return 0x1D;
313 case ISL_FORMAT_R16G16_UNORM: return 0x14;
314 case ISL_FORMAT_R16G16_SNORM: return 0x15;
315 case ISL_FORMAT_R16G16_SINT: return 0x16;
316 case ISL_FORMAT_R16G16_UINT: return 0x17;
317 case ISL_FORMAT_R16G16_FLOAT: return 0x10;
318 case ISL_FORMAT_B10G10R10A2_UNORM: return 0x18;
319 case ISL_FORMAT_B10G10R10A2_UNORM_SRGB: return 0x18;
320 case ISL_FORMAT_R11G11B10_FLOAT: return 0x1E;
321 case ISL_FORMAT_R32_SINT: return 0x12;
322 case ISL_FORMAT_R32_UINT: return 0x13;
323 case ISL_FORMAT_R32_FLOAT: return 0x11;
324 case ISL_FORMAT_R24_UNORM_X8_TYPELESS: return 0x11;
325 case ISL_FORMAT_B5G6R5_UNORM: return 0xA;
326 case ISL_FORMAT_B5G6R5_UNORM_SRGB: return 0xA;
327 case ISL_FORMAT_B5G5R5A1_UNORM: return 0xA;
328 case ISL_FORMAT_B5G5R5A1_UNORM_SRGB: return 0xA;
329 case ISL_FORMAT_B4G4R4A4_UNORM: return 0xA;
330 case ISL_FORMAT_B4G4R4A4_UNORM_SRGB: return 0xA;
331 case ISL_FORMAT_R8G8_UNORM: return 0xA;
332 case ISL_FORMAT_R8G8_SNORM: return 0x1B;
333 case ISL_FORMAT_R8G8_SINT: return 0x1C;
334 case ISL_FORMAT_R8G8_UINT: return 0x1D;
335 case ISL_FORMAT_R16_UNORM: return 0x14;
336 case ISL_FORMAT_R16_SNORM: return 0x15;
337 case ISL_FORMAT_R16_SINT: return 0x16;
338 case ISL_FORMAT_R16_UINT: return 0x17;
339 case ISL_FORMAT_R16_FLOAT: return 0x10;
340 case ISL_FORMAT_B5G5R5X1_UNORM: return 0xA;
341 case ISL_FORMAT_B5G5R5X1_UNORM_SRGB: return 0xA;
342 case ISL_FORMAT_A1B5G5R5_UNORM: return 0xA;
343 case ISL_FORMAT_A4B4G4R4_UNORM: return 0xA;
344 case ISL_FORMAT_R8_UNORM: return 0xA;
345 case ISL_FORMAT_R8_SNORM: return 0x1B;
346 case ISL_FORMAT_R8_SINT: return 0x1C;
347 case ISL_FORMAT_R8_UINT: return 0x1D;
348 case ISL_FORMAT_A8_UNORM: return 0xA;
349 default:
350 unreachable("Unsupported aux-map format!");
351 return 0;
352 }
353 }
354
355 static uint8_t
356 get_bpp_encoding(uint16_t bpp)
357 {
358 switch (bpp) {
359 case 16: return 0;
360 case 10: return 1;
361 case 12: return 2;
362 case 8: return 4;
363 case 32: return 5;
364 case 64: return 6;
365 case 128: return 7;
366 default:
367 unreachable("Unsupported bpp!");
368 return 0;
369 }
370 }
371
372 #define GEN_AUX_MAP_ENTRY_Y_TILED_BIT (0x1ull << 52)
373 #define GEN_AUX_MAP_ENTRY_VALID_BIT 0x1ull
374
375 static void
376 add_mapping(struct gen_aux_map_context *ctx, uint64_t address,
377 uint64_t aux_address, const struct isl_surf *isl_surf,
378 bool *state_changed)
379 {
380 if (aux_map_debug)
381 fprintf(stderr, "AUX-MAP 0x%"PRIx64" => 0x%"PRIx64"\n", address,
382 aux_address);
383
384 uint32_t l3_index = (address >> 36) & 0xfff;
385 uint64_t *l3_entry = &ctx->level3_map[l3_index];
386
387 uint64_t *l2_map;
388 if ((*l3_entry & GEN_AUX_MAP_ENTRY_VALID_BIT) == 0) {
389 uint64_t l2_gpu;
390 if (add_sub_table(ctx, 32 * 1024, 32 * 1024, &l2_gpu, &l2_map)) {
391 if (aux_map_debug)
392 fprintf(stderr, "AUX-MAP L3[0x%x]: 0x%"PRIx64", map=%p\n",
393 l3_index, l2_gpu, l2_map);
394 } else {
395 unreachable("Failed to add L2 Aux-Map Page Table!");
396 }
397 *l3_entry = (l2_gpu & 0xffffffff8000ULL) | 1;
398 } else {
399 uint64_t l2_addr = gen_canonical_address(*l3_entry & ~0x7fffULL);
400 l2_map = get_u64_entry_ptr(ctx, l2_addr);
401 }
402 uint32_t l2_index = (address >> 24) & 0xfff;
403 uint64_t *l2_entry = &l2_map[l2_index];
404
405 uint64_t *l1_map;
406 if ((*l2_entry & GEN_AUX_MAP_ENTRY_VALID_BIT) == 0) {
407 uint64_t l1_gpu;
408 if (add_sub_table(ctx, 8 * 1024, 8 * 1024, &l1_gpu, &l1_map)) {
409 if (aux_map_debug)
410 fprintf(stderr, "AUX-MAP L2[0x%x]: 0x%"PRIx64", map=%p\n",
411 l2_index, l1_gpu, l1_map);
412 } else {
413 unreachable("Failed to add L1 Aux-Map Page Table!");
414 }
415 *l2_entry = (l1_gpu & 0xffffffffe000ULL) | 1;
416 } else {
417 uint64_t l1_addr = gen_canonical_address(*l2_entry & ~0x1fffULL);
418 l1_map = get_u64_entry_ptr(ctx, l1_addr);
419 }
420 uint32_t l1_index = (address >> 16) & 0xff;
421 uint64_t *l1_entry = &l1_map[l1_index];
422
423 const struct isl_format_layout *fmtl =
424 isl_format_get_layout(isl_surf->format);
425 uint16_t bpp = fmtl->bpb;
426 assert(fmtl->bw == 1 && fmtl->bh == 1 && fmtl->bd == 1);
427 if (aux_map_debug)
428 fprintf(stderr, "AUX-MAP entry %s, bpp=%d\n",
429 isl_format_get_name(isl_surf->format), bpp);
430
431 const uint64_t l1_data =
432 (aux_address & 0xffffffffff00ULL) |
433 ((uint64_t)get_format_encoding(isl_surf) << 58) |
434 ((uint64_t)get_bpp_encoding(bpp) << 54) |
435 GEN_AUX_MAP_ENTRY_Y_TILED_BIT |
436 GEN_AUX_MAP_ENTRY_VALID_BIT;
437
438 const uint64_t current_l1_data = *l1_entry;
439 if ((current_l1_data & GEN_AUX_MAP_ENTRY_VALID_BIT) == 0) {
440 assert((aux_address & 0xffULL) == 0);
441 if (aux_map_debug)
442 fprintf(stderr, "AUX-MAP L1[0x%x] 0x%"PRIx64" -> 0x%"PRIx64"\n",
443 l1_index, current_l1_data, l1_data);
444 /**
445 * We use non-zero bits in 63:1 to indicate the entry had been filled
446 * previously. If these bits are non-zero and they don't exactly match
447 * what we want to program into the entry, then we must force the
448 * aux-map tables to be flushed.
449 */
450 if (current_l1_data != 0 && \
451 (current_l1_data | GEN_AUX_MAP_ENTRY_VALID_BIT) != l1_data)
452 *state_changed = true;
453 *l1_entry = l1_data;
454 } else {
455 if (aux_map_debug)
456 fprintf(stderr, "AUX-MAP L1[0x%x] is already marked valid!\n",
457 l1_index);
458 assert(*l1_entry == l1_data);
459 }
460 }
461
462 void
463 gen_aux_map_add_image(struct gen_aux_map_context *ctx,
464 const struct isl_surf *isl_surf, uint64_t address,
465 uint64_t aux_address)
466 {
467 bool state_changed = false;
468 pthread_mutex_lock(&ctx->mutex);
469 uint64_t map_addr = address;
470 uint64_t dest_aux_addr = aux_address;
471 assert(align64(address, GEN_AUX_MAP_MAIN_PAGE_SIZE) == address);
472 assert(align64(aux_address, GEN_AUX_MAP_AUX_PAGE_SIZE) == aux_address);
473 while (map_addr - address < isl_surf->size_B) {
474 add_mapping(ctx, map_addr, dest_aux_addr, isl_surf, &state_changed);
475 map_addr += GEN_AUX_MAP_MAIN_PAGE_SIZE;
476 dest_aux_addr += GEN_AUX_MAP_AUX_PAGE_SIZE;
477 }
478 pthread_mutex_unlock(&ctx->mutex);
479 if (state_changed)
480 p_atomic_inc(&ctx->state_num);
481 }
482
483 /**
484 * We mark the leaf entry as invalid, but we don't attempt to cleanup the
485 * other levels of translation mappings. Since we attempt to re-use VMA
486 * ranges, hopefully this will not lead to unbounded growth of the translation
487 * tables.
488 */
489 static void
490 remove_mapping(struct gen_aux_map_context *ctx, uint64_t address,
491 bool *state_changed)
492 {
493 uint32_t l3_index = (address >> 36) & 0xfff;
494 uint64_t *l3_entry = &ctx->level3_map[l3_index];
495
496 uint64_t *l2_map;
497 if ((*l3_entry & GEN_AUX_MAP_ENTRY_VALID_BIT) == 0) {
498 return;
499 } else {
500 uint64_t l2_addr = gen_canonical_address(*l3_entry & ~0x7fffULL);
501 l2_map = get_u64_entry_ptr(ctx, l2_addr);
502 }
503 uint32_t l2_index = (address >> 24) & 0xfff;
504 uint64_t *l2_entry = &l2_map[l2_index];
505
506 uint64_t *l1_map;
507 if ((*l2_entry & GEN_AUX_MAP_ENTRY_VALID_BIT) == 0) {
508 return;
509 } else {
510 uint64_t l1_addr = gen_canonical_address(*l2_entry & ~0x1fffULL);
511 l1_map = get_u64_entry_ptr(ctx, l1_addr);
512 }
513 uint32_t l1_index = (address >> 16) & 0xff;
514 uint64_t *l1_entry = &l1_map[l1_index];
515
516 const uint64_t current_l1_data = *l1_entry;
517 const uint64_t l1_data = current_l1_data & ~1ull;
518
519 if ((current_l1_data & GEN_AUX_MAP_ENTRY_VALID_BIT) == 0) {
520 return;
521 } else {
522 if (aux_map_debug)
523 fprintf(stderr, "AUX-MAP [0x%x][0x%x][0x%x] L1 entry removed!\n",
524 l3_index, l2_index, l1_index);
525 /**
526 * We use non-zero bits in 63:1 to indicate the entry had been filled
527 * previously. In the unlikely event that these are all zero, we force a
528 * flush of the aux-map tables.
529 */
530 if (unlikely(l1_data == 0))
531 *state_changed = true;
532 *l1_entry = l1_data;
533 }
534 }
535
536 void
537 gen_aux_map_unmap_range(struct gen_aux_map_context *ctx, uint64_t address,
538 uint64_t size)
539 {
540 bool state_changed = false;
541 pthread_mutex_lock(&ctx->mutex);
542 if (aux_map_debug)
543 fprintf(stderr, "AUX-MAP remove 0x%"PRIx64"-0x%"PRIx64"\n", address,
544 address + size);
545
546 uint64_t map_addr = address;
547 assert(align64(address, GEN_AUX_MAP_MAIN_PAGE_SIZE) == address);
548 while (map_addr - address < size) {
549 remove_mapping(ctx, map_addr, &state_changed);
550 map_addr += 64 * 1024;
551 }
552 pthread_mutex_unlock(&ctx->mutex);
553 if (state_changed)
554 p_atomic_inc(&ctx->state_num);
555 }
556
557 uint32_t
558 gen_aux_map_get_num_buffers(struct gen_aux_map_context *ctx)
559 {
560 return p_atomic_read(&ctx->num_buffers);
561 }
562
563 void
564 gen_aux_map_fill_bos(struct gen_aux_map_context *ctx, void **driver_bos,
565 uint32_t max_bos)
566 {
567 assert(p_atomic_read(&ctx->num_buffers) >= max_bos);
568 uint32_t i = 0;
569 list_for_each_entry(struct aux_map_buffer, buf, &ctx->buffers, link) {
570 if (i >= max_bos)
571 return;
572 driver_bos[i++] = buf->buffer->driver_bo;
573 }
574 }