2 * Copyright (c) 2018 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * The aux map provides a multi-level lookup of the main surface address which
26 * ends up providing information about the auxiliary surface data, including
27 * the address where the auxiliary data resides.
29 * The 48-bit VMA (GPU) address of the main surface is split to do the address
32 * 48 bit address of main surface
33 * +--------+--------+--------+------+
34 * | 47:36 | 35:24 | 23:16 | 15:0 |
35 * | L3-idx | L2-idx | L1-idx | ... |
36 * +--------+--------+--------+------+
38 * The GFX_AUX_TABLE_BASE_ADDR points to a buffer. The L3 Table Entry is
39 * located by indexing into this buffer as a uint64_t array using the L3-idx
40 * value. The 64-bit L3 entry is defined as:
42 * +-------+-------------+------+---+
43 * | 63:48 | 47:15 | 14:1 | 0 |
44 * | ... | L2-tbl-addr | ... | V |
45 * +-------+-------------+------+---+
47 * If the `V` (valid) bit is set, then the L2-tbl-addr gives the address for
48 * the level-2 table entries, with the lower address bits filled with zero.
49 * The L2 Table Entry is located by indexing into this buffer as a uint64_t
50 * array using the L2-idx value. The 64-bit L2 entry is similar to the L3
51 * entry, except with 2 additional address bits:
53 * +-------+-------------+------+---+
54 * | 63:48 | 47:13 | 12:1 | 0 |
55 * | ... | L1-tbl-addr | ... | V |
56 * +-------+-------------+------+---+
58 * If the `V` bit is set, then the L1-tbl-addr gives the address for the
59 * level-1 table entries, with the lower address bits filled with zero. The L1
60 * Table Entry is located by indexing into this buffer as a uint64_t array
61 * using the L1-idx value. The 64-bit L1 entry is defined as:
63 * +--------+------+-------+-------+-------+---------------+-----+---+
64 * | 63:58 | 57 | 56:54 | 53:52 | 51:48 | 47:8 | 7:1 | 0 |
65 * | Format | Y/Cr | Depth | TM | ... | aux-data-addr | ... | V |
66 * +--------+------+-------+-------+-------+---------------+-----+---+
69 * - Format: See `get_format_encoding`
70 * - Y/Cr: 0=not-Y/Cr, 1=Y/Cr
71 * - (bit) Depth: See `get_bpp_encoding`
72 * - TM (Tile-mode): 0=Ys, 1=Y, 2=rsvd, 3=rsvd
73 * - aux-data-addr: VMA/GPU address for the aux-data
77 #include "gen_aux_map.h"
80 #include "dev/gen_device_info.h"
82 #include "drm-uapi/i915_drm.h"
83 #include "util/list.h"
84 #include "util/ralloc.h"
85 #include "util/u_atomic.h"
86 #include "main/macros.h"
93 static const bool aux_map_debug
= false;
95 struct aux_map_buffer
{
96 struct list_head link
;
97 struct gen_buffer
*buffer
;
100 struct gen_aux_map_context
{
102 pthread_mutex_t mutex
;
103 struct gen_mapped_pinned_buffer_alloc
*buffer_alloc
;
104 uint32_t num_buffers
;
105 struct list_head buffers
;
106 uint64_t level3_base_addr
;
107 uint64_t *level3_map
;
108 uint32_t tail_offset
, tail_remaining
;
113 add_buffer(struct gen_aux_map_context
*ctx
)
115 struct aux_map_buffer
*buf
= ralloc(ctx
, struct aux_map_buffer
);
119 const uint32_t size
= 0x100000;
120 buf
->buffer
= ctx
->buffer_alloc
->alloc(ctx
->driver_ctx
, size
);
126 assert(buf
->buffer
->map
!= NULL
);
128 list_addtail(&buf
->link
, &ctx
->buffers
);
129 ctx
->tail_offset
= 0;
130 ctx
->tail_remaining
= size
;
131 p_atomic_inc(&ctx
->num_buffers
);
137 advance_current_pos(struct gen_aux_map_context
*ctx
, uint32_t size
)
139 assert(ctx
->tail_remaining
>= size
);
140 ctx
->tail_remaining
-= size
;
141 ctx
->tail_offset
+= size
;
145 align_and_verify_space(struct gen_aux_map_context
*ctx
, uint32_t size
,
148 if (ctx
->tail_remaining
< size
)
151 struct aux_map_buffer
*tail
=
152 list_last_entry(&ctx
->buffers
, struct aux_map_buffer
, link
);
153 uint64_t gpu
= tail
->buffer
->gpu
+ ctx
->tail_offset
;
154 uint64_t aligned
= align64(gpu
, align
);
156 if ((aligned
- gpu
) + size
> ctx
->tail_remaining
) {
159 if (aligned
- gpu
> 0)
160 advance_current_pos(ctx
, aligned
- gpu
);
166 get_current_pos(struct gen_aux_map_context
*ctx
, uint64_t *gpu
, uint64_t **map
)
168 assert(!list_is_empty(&ctx
->buffers
));
169 struct aux_map_buffer
*tail
=
170 list_last_entry(&ctx
->buffers
, struct aux_map_buffer
, link
);
172 *gpu
= tail
->buffer
->gpu
+ ctx
->tail_offset
;
174 *map
= (uint64_t*)((uint8_t*)tail
->buffer
->map
+ ctx
->tail_offset
);
178 add_sub_table(struct gen_aux_map_context
*ctx
, uint32_t size
,
179 uint32_t align
, uint64_t *gpu
, uint64_t **map
)
181 if (!align_and_verify_space(ctx
, size
, align
)) {
182 if (!add_buffer(ctx
))
184 UNUSED
bool aligned
= align_and_verify_space(ctx
, size
, align
);
187 get_current_pos(ctx
, gpu
, map
);
188 memset(*map
, 0, size
);
189 advance_current_pos(ctx
, size
);
194 gen_aux_map_get_state_num(struct gen_aux_map_context
*ctx
)
196 return p_atomic_read(&ctx
->state_num
);
199 struct gen_aux_map_context
*
200 gen_aux_map_init(void *driver_ctx
,
201 struct gen_mapped_pinned_buffer_alloc
*buffer_alloc
,
202 const struct gen_device_info
*devinfo
)
204 struct gen_aux_map_context
*ctx
;
205 if (devinfo
->gen
< 12)
208 ctx
= ralloc(NULL
, struct gen_aux_map_context
);
212 if (pthread_mutex_init(&ctx
->mutex
, NULL
))
215 ctx
->driver_ctx
= driver_ctx
;
216 ctx
->buffer_alloc
= buffer_alloc
;
217 ctx
->num_buffers
= 0;
218 list_inithead(&ctx
->buffers
);
219 ctx
->tail_offset
= 0;
220 ctx
->tail_remaining
= 0;
223 if (add_sub_table(ctx
, 32 * 1024, 32 * 1024, &ctx
->level3_base_addr
,
226 fprintf(stderr
, "AUX-MAP L3: 0x%"PRIx64
", map=%p\n",
227 ctx
->level3_base_addr
, ctx
->level3_map
);
228 p_atomic_inc(&ctx
->state_num
);
237 gen_aux_map_finish(struct gen_aux_map_context
*ctx
)
242 pthread_mutex_destroy(&ctx
->mutex
);
243 list_for_each_entry_safe(struct aux_map_buffer
, buf
, &ctx
->buffers
, link
) {
244 ctx
->buffer_alloc
->free(ctx
->driver_ctx
, buf
->buffer
);
245 list_del(&buf
->link
);
246 p_atomic_dec(&ctx
->num_buffers
);
254 gen_aux_map_get_base(struct gen_aux_map_context
*ctx
)
257 * This get initialized in gen_aux_map_init, and never changes, so there is
258 * no need to lock the mutex.
260 return ctx
->level3_base_addr
;
263 static struct aux_map_buffer
*
264 find_buffer(struct gen_aux_map_context
*ctx
, uint64_t addr
)
266 list_for_each_entry(struct aux_map_buffer
, buf
, &ctx
->buffers
, link
) {
267 if (buf
->buffer
->gpu
<= addr
&& buf
->buffer
->gpu_end
> addr
) {
275 get_u64_entry_ptr(struct gen_aux_map_context
*ctx
, uint64_t addr
)
277 struct aux_map_buffer
*buf
= find_buffer(ctx
, addr
);
279 uintptr_t map_offset
= addr
- buf
->buffer
->gpu
;
280 return (uint64_t*)((uint8_t*)buf
->buffer
->map
+ map_offset
);
284 get_format_encoding(const struct isl_surf
*isl_surf
)
286 switch(isl_surf
->format
) {
287 case ISL_FORMAT_R32G32B32A32_FLOAT
: return 0x11;
288 case ISL_FORMAT_R32G32B32X32_FLOAT
: return 0x11;
289 case ISL_FORMAT_R32G32B32A32_SINT
: return 0x12;
290 case ISL_FORMAT_R32G32B32A32_UINT
: return 0x13;
291 case ISL_FORMAT_R16G16B16A16_UNORM
: return 0x14;
292 case ISL_FORMAT_R16G16B16A16_SNORM
: return 0x15;
293 case ISL_FORMAT_R16G16B16A16_SINT
: return 0x16;
294 case ISL_FORMAT_R16G16B16A16_UINT
: return 0x17;
295 case ISL_FORMAT_R16G16B16A16_FLOAT
: return 0x10;
296 case ISL_FORMAT_R16G16B16X16_FLOAT
: return 0x10;
297 case ISL_FORMAT_R32G32_FLOAT
: return 0x11;
298 case ISL_FORMAT_R32G32_SINT
: return 0x12;
299 case ISL_FORMAT_R32G32_UINT
: return 0x13;
300 case ISL_FORMAT_B8G8R8A8_UNORM
: return 0xA;
301 case ISL_FORMAT_B8G8R8X8_UNORM
: return 0xA;
302 case ISL_FORMAT_B8G8R8A8_UNORM_SRGB
: return 0xA;
303 case ISL_FORMAT_B8G8R8X8_UNORM_SRGB
: return 0xA;
304 case ISL_FORMAT_R10G10B10A2_UNORM
: return 0x18;
305 case ISL_FORMAT_R10G10B10A2_UNORM_SRGB
: return 0x18;
306 case ISL_FORMAT_R10G10B10_FLOAT_A2_UNORM
: return 0x19;
307 case ISL_FORMAT_R10G10B10A2_UINT
: return 0x1A;
308 case ISL_FORMAT_R8G8B8A8_UNORM
: return 0xA;
309 case ISL_FORMAT_R8G8B8A8_UNORM_SRGB
: return 0xA;
310 case ISL_FORMAT_R8G8B8A8_SNORM
: return 0x1B;
311 case ISL_FORMAT_R8G8B8A8_SINT
: return 0x1C;
312 case ISL_FORMAT_R8G8B8A8_UINT
: return 0x1D;
313 case ISL_FORMAT_R16G16_UNORM
: return 0x14;
314 case ISL_FORMAT_R16G16_SNORM
: return 0x15;
315 case ISL_FORMAT_R16G16_SINT
: return 0x16;
316 case ISL_FORMAT_R16G16_UINT
: return 0x17;
317 case ISL_FORMAT_R16G16_FLOAT
: return 0x10;
318 case ISL_FORMAT_B10G10R10A2_UNORM
: return 0x18;
319 case ISL_FORMAT_B10G10R10A2_UNORM_SRGB
: return 0x18;
320 case ISL_FORMAT_R11G11B10_FLOAT
: return 0x1E;
321 case ISL_FORMAT_R32_SINT
: return 0x12;
322 case ISL_FORMAT_R32_UINT
: return 0x13;
323 case ISL_FORMAT_R32_FLOAT
: return 0x11;
324 case ISL_FORMAT_R24_UNORM_X8_TYPELESS
: return 0x11;
325 case ISL_FORMAT_B5G6R5_UNORM
: return 0xA;
326 case ISL_FORMAT_B5G6R5_UNORM_SRGB
: return 0xA;
327 case ISL_FORMAT_B5G5R5A1_UNORM
: return 0xA;
328 case ISL_FORMAT_B5G5R5A1_UNORM_SRGB
: return 0xA;
329 case ISL_FORMAT_B4G4R4A4_UNORM
: return 0xA;
330 case ISL_FORMAT_B4G4R4A4_UNORM_SRGB
: return 0xA;
331 case ISL_FORMAT_R8G8_UNORM
: return 0xA;
332 case ISL_FORMAT_R8G8_SNORM
: return 0x1B;
333 case ISL_FORMAT_R8G8_SINT
: return 0x1C;
334 case ISL_FORMAT_R8G8_UINT
: return 0x1D;
335 case ISL_FORMAT_R16_UNORM
: return 0x14;
336 case ISL_FORMAT_R16_SNORM
: return 0x15;
337 case ISL_FORMAT_R16_SINT
: return 0x16;
338 case ISL_FORMAT_R16_UINT
: return 0x17;
339 case ISL_FORMAT_R16_FLOAT
: return 0x10;
340 case ISL_FORMAT_B5G5R5X1_UNORM
: return 0xA;
341 case ISL_FORMAT_B5G5R5X1_UNORM_SRGB
: return 0xA;
342 case ISL_FORMAT_A1B5G5R5_UNORM
: return 0xA;
343 case ISL_FORMAT_A4B4G4R4_UNORM
: return 0xA;
344 case ISL_FORMAT_R8_UNORM
: return 0xA;
345 case ISL_FORMAT_R8_SNORM
: return 0x1B;
346 case ISL_FORMAT_R8_SINT
: return 0x1C;
347 case ISL_FORMAT_R8_UINT
: return 0x1D;
348 case ISL_FORMAT_A8_UNORM
: return 0xA;
350 unreachable("Unsupported aux-map format!");
356 get_bpp_encoding(uint16_t bpp
)
367 unreachable("Unsupported bpp!");
372 #define GEN_AUX_MAP_ENTRY_Y_TILED_BIT (0x1ull << 52)
373 #define GEN_AUX_MAP_ENTRY_VALID_BIT 0x1ull
376 gen_aux_map_format_bits_for_isl_surf(const struct isl_surf
*isl_surf
)
378 const struct isl_format_layout
*fmtl
=
379 isl_format_get_layout(isl_surf
->format
);
381 uint16_t bpp
= fmtl
->bpb
;
382 assert(fmtl
->bw
== 1 && fmtl
->bh
== 1 && fmtl
->bd
== 1);
384 fprintf(stderr
, "AUX-MAP entry %s, bpp=%d\n",
385 isl_format_get_name(isl_surf
->format
), bpp
);
387 assert(isl_tiling_is_any_y(isl_surf
->tiling
));
389 uint64_t format_bits
=
390 ((uint64_t)get_format_encoding(isl_surf
) << 58) |
391 ((uint64_t)get_bpp_encoding(bpp
) << 54) |
392 GEN_AUX_MAP_ENTRY_Y_TILED_BIT
;
394 assert((format_bits
& GEN_AUX_MAP_FORMAT_BITS_MASK
) == format_bits
);
400 get_aux_entry(struct gen_aux_map_context
*ctx
, uint64_t address
,
401 uint32_t *l1_index_out
, uint64_t *l1_entry_addr_out
,
402 uint64_t **l1_entry_map_out
)
404 uint32_t l3_index
= (address
>> 36) & 0xfff;
405 uint64_t *l3_entry
= &ctx
->level3_map
[l3_index
];
408 if ((*l3_entry
& GEN_AUX_MAP_ENTRY_VALID_BIT
) == 0) {
410 if (add_sub_table(ctx
, 32 * 1024, 32 * 1024, &l2_gpu
, &l2_map
)) {
412 fprintf(stderr
, "AUX-MAP L3[0x%x]: 0x%"PRIx64
", map=%p\n",
413 l3_index
, l2_gpu
, l2_map
);
415 unreachable("Failed to add L2 Aux-Map Page Table!");
417 *l3_entry
= (l2_gpu
& 0xffffffff8000ULL
) | 1;
419 uint64_t l2_addr
= gen_canonical_address(*l3_entry
& ~0x7fffULL
);
420 l2_map
= get_u64_entry_ptr(ctx
, l2_addr
);
422 uint32_t l2_index
= (address
>> 24) & 0xfff;
423 uint64_t *l2_entry
= &l2_map
[l2_index
];
425 uint64_t l1_addr
, *l1_map
;
426 if ((*l2_entry
& GEN_AUX_MAP_ENTRY_VALID_BIT
) == 0) {
427 if (add_sub_table(ctx
, 8 * 1024, 8 * 1024, &l1_addr
, &l1_map
)) {
429 fprintf(stderr
, "AUX-MAP L2[0x%x]: 0x%"PRIx64
", map=%p\n",
430 l2_index
, l1_addr
, l1_map
);
432 unreachable("Failed to add L1 Aux-Map Page Table!");
434 *l2_entry
= (l1_addr
& 0xffffffffe000ULL
) | 1;
436 l1_addr
= gen_canonical_address(*l2_entry
& ~0x1fffULL
);
437 l1_map
= get_u64_entry_ptr(ctx
, l1_addr
);
439 uint32_t l1_index
= (address
>> 16) & 0xff;
441 *l1_index_out
= l1_index
;
442 if (l1_entry_addr_out
)
443 *l1_entry_addr_out
= l1_addr
+ l1_index
* sizeof(*l1_map
);
444 if (l1_entry_map_out
)
445 *l1_entry_map_out
= &l1_map
[l1_index
];
449 add_mapping(struct gen_aux_map_context
*ctx
, uint64_t address
,
450 uint64_t aux_address
, uint64_t format_bits
,
454 fprintf(stderr
, "AUX-MAP 0x%"PRIx64
" => 0x%"PRIx64
"\n", address
,
459 get_aux_entry(ctx
, address
, &l1_index
, NULL
, &l1_entry
);
461 const uint64_t l1_data
=
462 (aux_address
& GEN_AUX_MAP_ADDRESS_MASK
) |
464 GEN_AUX_MAP_ENTRY_VALID_BIT
;
466 const uint64_t current_l1_data
= *l1_entry
;
467 if ((current_l1_data
& GEN_AUX_MAP_ENTRY_VALID_BIT
) == 0) {
468 assert((aux_address
& 0xffULL
) == 0);
470 fprintf(stderr
, "AUX-MAP L1[0x%x] 0x%"PRIx64
" -> 0x%"PRIx64
"\n",
471 l1_index
, current_l1_data
, l1_data
);
473 * We use non-zero bits in 63:1 to indicate the entry had been filled
474 * previously. If these bits are non-zero and they don't exactly match
475 * what we want to program into the entry, then we must force the
476 * aux-map tables to be flushed.
478 if (current_l1_data
!= 0 && \
479 (current_l1_data
| GEN_AUX_MAP_ENTRY_VALID_BIT
) != l1_data
)
480 *state_changed
= true;
484 fprintf(stderr
, "AUX-MAP L1[0x%x] is already marked valid!\n",
486 assert(*l1_entry
== l1_data
);
491 gen_aux_map_get_entry(struct gen_aux_map_context
*ctx
,
493 uint64_t *entry_address
)
495 pthread_mutex_lock(&ctx
->mutex
);
496 uint64_t *l1_entry_map
;
497 get_aux_entry(ctx
, address
, NULL
, entry_address
, &l1_entry_map
);
498 pthread_mutex_unlock(&ctx
->mutex
);
504 gen_aux_map_add_mapping(struct gen_aux_map_context
*ctx
, uint64_t address
,
505 uint64_t aux_address
, uint64_t main_size_B
,
506 uint64_t format_bits
)
508 bool state_changed
= false;
509 pthread_mutex_lock(&ctx
->mutex
);
510 uint64_t map_addr
= address
;
511 uint64_t dest_aux_addr
= aux_address
;
512 assert(align64(address
, GEN_AUX_MAP_MAIN_PAGE_SIZE
) == address
);
513 assert(align64(aux_address
, GEN_AUX_MAP_AUX_PAGE_SIZE
) == aux_address
);
514 while (map_addr
- address
< main_size_B
) {
515 add_mapping(ctx
, map_addr
, dest_aux_addr
, format_bits
, &state_changed
);
516 map_addr
+= GEN_AUX_MAP_MAIN_PAGE_SIZE
;
517 dest_aux_addr
+= GEN_AUX_MAP_AUX_PAGE_SIZE
;
519 pthread_mutex_unlock(&ctx
->mutex
);
521 p_atomic_inc(&ctx
->state_num
);
525 gen_aux_map_add_image(struct gen_aux_map_context
*ctx
,
526 const struct isl_surf
*isl_surf
, uint64_t address
,
527 uint64_t aux_address
)
529 gen_aux_map_add_mapping(ctx
, address
, aux_address
, isl_surf
->size_B
,
530 gen_aux_map_format_bits_for_isl_surf(isl_surf
));
534 * We mark the leaf entry as invalid, but we don't attempt to cleanup the
535 * other levels of translation mappings. Since we attempt to re-use VMA
536 * ranges, hopefully this will not lead to unbounded growth of the translation
540 remove_mapping(struct gen_aux_map_context
*ctx
, uint64_t address
,
543 uint32_t l3_index
= (address
>> 36) & 0xfff;
544 uint64_t *l3_entry
= &ctx
->level3_map
[l3_index
];
547 if ((*l3_entry
& GEN_AUX_MAP_ENTRY_VALID_BIT
) == 0) {
550 uint64_t l2_addr
= gen_canonical_address(*l3_entry
& ~0x7fffULL
);
551 l2_map
= get_u64_entry_ptr(ctx
, l2_addr
);
553 uint32_t l2_index
= (address
>> 24) & 0xfff;
554 uint64_t *l2_entry
= &l2_map
[l2_index
];
557 if ((*l2_entry
& GEN_AUX_MAP_ENTRY_VALID_BIT
) == 0) {
560 uint64_t l1_addr
= gen_canonical_address(*l2_entry
& ~0x1fffULL
);
561 l1_map
= get_u64_entry_ptr(ctx
, l1_addr
);
563 uint32_t l1_index
= (address
>> 16) & 0xff;
564 uint64_t *l1_entry
= &l1_map
[l1_index
];
566 const uint64_t current_l1_data
= *l1_entry
;
567 const uint64_t l1_data
= current_l1_data
& ~1ull;
569 if ((current_l1_data
& GEN_AUX_MAP_ENTRY_VALID_BIT
) == 0) {
573 fprintf(stderr
, "AUX-MAP [0x%x][0x%x][0x%x] L1 entry removed!\n",
574 l3_index
, l2_index
, l1_index
);
576 * We use non-zero bits in 63:1 to indicate the entry had been filled
577 * previously. In the unlikely event that these are all zero, we force a
578 * flush of the aux-map tables.
580 if (unlikely(l1_data
== 0))
581 *state_changed
= true;
587 gen_aux_map_unmap_range(struct gen_aux_map_context
*ctx
, uint64_t address
,
590 bool state_changed
= false;
591 pthread_mutex_lock(&ctx
->mutex
);
593 fprintf(stderr
, "AUX-MAP remove 0x%"PRIx64
"-0x%"PRIx64
"\n", address
,
596 uint64_t map_addr
= address
;
597 assert(align64(address
, GEN_AUX_MAP_MAIN_PAGE_SIZE
) == address
);
598 while (map_addr
- address
< size
) {
599 remove_mapping(ctx
, map_addr
, &state_changed
);
600 map_addr
+= 64 * 1024;
602 pthread_mutex_unlock(&ctx
->mutex
);
604 p_atomic_inc(&ctx
->state_num
);
608 gen_aux_map_get_num_buffers(struct gen_aux_map_context
*ctx
)
610 return p_atomic_read(&ctx
->num_buffers
);
614 gen_aux_map_fill_bos(struct gen_aux_map_context
*ctx
, void **driver_bos
,
617 assert(p_atomic_read(&ctx
->num_buffers
) >= max_bos
);
619 list_for_each_entry(struct aux_map_buffer
, buf
, &ctx
->buffers
, link
) {
622 driver_bos
[i
++] = buf
->buffer
->driver_bo
;