2 * Copyright © 2014-2017 Broadcom
3 * Copyright (C) 2012 Rob Clark <robclark@freedesktop.org>
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include "util/u_blit.h"
26 #include "util/u_memory.h"
27 #include "util/u_format.h"
28 #include "util/u_inlines.h"
29 #include "util/u_surface.h"
30 #include "util/u_transfer_helper.h"
31 #include "util/u_upload_mgr.h"
32 #include "util/u_format_zs.h"
34 #include "drm_fourcc.h"
35 #include "vc5_screen.h"
36 #include "vc5_context.h"
37 #include "vc5_resource.h"
38 #include "vc5_tiling.h"
39 #include "broadcom/cle/v3d_packet_v33_pack.h"
41 #ifndef DRM_FORMAT_MOD_INVALID
42 #define DRM_FORMAT_MOD_INVALID ((1ULL << 56) - 1)
46 vc5_debug_resource_layout(struct vc5_resource
*rsc
, const char *caller
)
48 if (!(V3D_DEBUG
& V3D_DEBUG_SURFACE
))
51 struct pipe_resource
*prsc
= &rsc
->base
;
53 if (prsc
->target
== PIPE_BUFFER
) {
55 "rsc %s %p (format %s), %dx%d buffer @0x%08x-0x%08x\n",
57 util_format_short_name(prsc
->format
),
58 prsc
->width0
, prsc
->height0
,
60 rsc
->bo
->offset
+ rsc
->bo
->size
- 1);
64 static const char *const tiling_descriptions
[] = {
65 [VC5_TILING_RASTER
] = "R",
66 [VC5_TILING_LINEARTILE
] = "LT",
67 [VC5_TILING_UBLINEAR_1_COLUMN
] = "UB1",
68 [VC5_TILING_UBLINEAR_2_COLUMN
] = "UB2",
69 [VC5_TILING_UIF_NO_XOR
] = "UIF",
70 [VC5_TILING_UIF_XOR
] = "UIF^",
73 for (int i
= 0; i
<= prsc
->last_level
; i
++) {
74 struct vc5_resource_slice
*slice
= &rsc
->slices
[i
];
76 int level_width
= slice
->stride
/ rsc
->cpp
;
77 int level_height
= slice
->padded_height
;
79 u_minify(util_next_power_of_two(prsc
->depth0
), i
);
82 "rsc %s %p (format %s), %dx%d: "
83 "level %d (%s) %dx%dx%d -> %dx%dx%d, stride %d@0x%08x\n",
85 util_format_short_name(prsc
->format
),
86 prsc
->width0
, prsc
->height0
,
87 i
, tiling_descriptions
[slice
->tiling
],
88 u_minify(prsc
->width0
, i
),
89 u_minify(prsc
->height0
, i
),
90 u_minify(prsc
->depth0
, i
),
95 rsc
->bo
->offset
+ slice
->offset
);
100 vc5_resource_bo_alloc(struct vc5_resource
*rsc
)
102 struct pipe_resource
*prsc
= &rsc
->base
;
103 struct pipe_screen
*pscreen
= prsc
->screen
;
106 bo
= vc5_bo_alloc(vc5_screen(pscreen
), rsc
->size
, "resource");
108 vc5_bo_unreference(&rsc
->bo
);
110 vc5_debug_resource_layout(rsc
, "alloc");
118 vc5_resource_transfer_unmap(struct pipe_context
*pctx
,
119 struct pipe_transfer
*ptrans
)
121 struct vc5_context
*vc5
= vc5_context(pctx
);
122 struct vc5_transfer
*trans
= vc5_transfer(ptrans
);
125 struct vc5_resource
*rsc
= vc5_resource(ptrans
->resource
);
126 struct vc5_resource_slice
*slice
= &rsc
->slices
[ptrans
->level
];
128 if (ptrans
->usage
& PIPE_TRANSFER_WRITE
) {
129 for (int z
= 0; z
< ptrans
->box
.depth
; z
++) {
130 void *dst
= rsc
->bo
->map
+
131 vc5_layer_offset(&rsc
->base
,
134 vc5_store_tiled_image(dst
,
138 ptrans
->box
.height
* z
),
140 slice
->tiling
, rsc
->cpp
,
141 slice
->padded_height
,
148 pipe_resource_reference(&ptrans
->resource
, NULL
);
149 slab_free(&vc5
->transfer_pool
, ptrans
);
153 vc5_resource_transfer_map(struct pipe_context
*pctx
,
154 struct pipe_resource
*prsc
,
155 unsigned level
, unsigned usage
,
156 const struct pipe_box
*box
,
157 struct pipe_transfer
**pptrans
)
159 struct vc5_context
*vc5
= vc5_context(pctx
);
160 struct vc5_resource
*rsc
= vc5_resource(prsc
);
161 struct vc5_transfer
*trans
;
162 struct pipe_transfer
*ptrans
;
163 enum pipe_format format
= prsc
->format
;
166 /* MSAA maps should have been handled by u_transfer_helper. */
167 assert(prsc
->nr_samples
<= 1);
169 /* Upgrade DISCARD_RANGE to WHOLE_RESOURCE if the whole resource is
172 if ((usage
& PIPE_TRANSFER_DISCARD_RANGE
) &&
173 !(usage
& PIPE_TRANSFER_UNSYNCHRONIZED
) &&
174 !(prsc
->flags
& PIPE_RESOURCE_FLAG_MAP_COHERENT
) &&
175 prsc
->last_level
== 0 &&
176 prsc
->width0
== box
->width
&&
177 prsc
->height0
== box
->height
&&
178 prsc
->depth0
== box
->depth
&&
179 prsc
->array_size
== 1 &&
181 usage
|= PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE
;
184 if (usage
& PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE
) {
185 if (vc5_resource_bo_alloc(rsc
)) {
186 /* If it might be bound as one of our vertex buffers
187 * or UBOs, make sure we re-emit vertex buffer state
190 if (prsc
->bind
& PIPE_BIND_VERTEX_BUFFER
)
191 vc5
->dirty
|= VC5_DIRTY_VTXBUF
;
192 if (prsc
->bind
& PIPE_BIND_CONSTANT_BUFFER
)
193 vc5
->dirty
|= VC5_DIRTY_CONSTBUF
;
195 /* If we failed to reallocate, flush users so that we
196 * don't violate any syncing requirements.
198 vc5_flush_jobs_reading_resource(vc5
, prsc
);
200 } else if (!(usage
& PIPE_TRANSFER_UNSYNCHRONIZED
)) {
201 /* If we're writing and the buffer is being used by the CL, we
202 * have to flush the CL first. If we're only reading, we need
203 * to flush if the CL has written our buffer.
205 if (usage
& PIPE_TRANSFER_WRITE
)
206 vc5_flush_jobs_reading_resource(vc5
, prsc
);
208 vc5_flush_jobs_writing_resource(vc5
, prsc
);
211 if (usage
& PIPE_TRANSFER_WRITE
) {
213 rsc
->initialized_buffers
= ~0;
216 trans
= slab_alloc(&vc5
->transfer_pool
);
220 /* XXX: Handle DONTBLOCK, DISCARD_RANGE, PERSISTENT, COHERENT. */
222 /* slab_alloc_st() doesn't zero: */
223 memset(trans
, 0, sizeof(*trans
));
224 ptrans
= &trans
->base
;
226 pipe_resource_reference(&ptrans
->resource
, prsc
);
227 ptrans
->level
= level
;
228 ptrans
->usage
= usage
;
231 /* Note that the current kernel implementation is synchronous, so no
232 * need to do syncing stuff here yet.
235 if (usage
& PIPE_TRANSFER_UNSYNCHRONIZED
)
236 buf
= vc5_bo_map_unsynchronized(rsc
->bo
);
238 buf
= vc5_bo_map(rsc
->bo
);
240 fprintf(stderr
, "Failed to map bo\n");
246 /* Our load/store routines work on entire compressed blocks. */
247 ptrans
->box
.x
/= util_format_get_blockwidth(format
);
248 ptrans
->box
.y
/= util_format_get_blockheight(format
);
249 ptrans
->box
.width
= DIV_ROUND_UP(ptrans
->box
.width
,
250 util_format_get_blockwidth(format
));
251 ptrans
->box
.height
= DIV_ROUND_UP(ptrans
->box
.height
,
252 util_format_get_blockheight(format
));
254 struct vc5_resource_slice
*slice
= &rsc
->slices
[level
];
256 /* No direct mappings of tiled, since we need to manually
259 if (usage
& PIPE_TRANSFER_MAP_DIRECTLY
)
262 ptrans
->stride
= ptrans
->box
.width
* rsc
->cpp
;
263 ptrans
->layer_stride
= ptrans
->stride
* ptrans
->box
.height
;
265 trans
->map
= malloc(ptrans
->layer_stride
* ptrans
->box
.depth
);
267 if (usage
& PIPE_TRANSFER_READ
) {
268 for (int z
= 0; z
< ptrans
->box
.depth
; z
++) {
269 void *src
= rsc
->bo
->map
+
270 vc5_layer_offset(&rsc
->base
,
273 vc5_load_tiled_image((trans
->map
+
275 ptrans
->box
.height
* z
),
279 slice
->tiling
, rsc
->cpp
,
280 slice
->padded_height
,
286 ptrans
->stride
= slice
->stride
;
287 ptrans
->layer_stride
= ptrans
->stride
;
289 return buf
+ slice
->offset
+
290 ptrans
->box
.y
* ptrans
->stride
+
291 ptrans
->box
.x
* rsc
->cpp
+
292 ptrans
->box
.z
* rsc
->cube_map_stride
;
297 vc5_resource_transfer_unmap(pctx
, ptrans
);
302 vc5_resource_destroy(struct pipe_screen
*pscreen
,
303 struct pipe_resource
*prsc
)
305 struct vc5_resource
*rsc
= vc5_resource(prsc
);
307 vc5_bo_unreference(&rsc
->bo
);
312 vc5_resource_get_handle(struct pipe_screen
*pscreen
,
313 struct pipe_context
*pctx
,
314 struct pipe_resource
*prsc
,
315 struct winsys_handle
*whandle
,
318 struct vc5_resource
*rsc
= vc5_resource(prsc
);
319 struct vc5_bo
*bo
= rsc
->bo
;
321 whandle
->stride
= rsc
->slices
[0].stride
;
323 /* If we're passing some reference to our BO out to some other part of
324 * the system, then we can't do any optimizations about only us being
325 * the ones seeing it (like BO caching).
329 switch (whandle
->type
) {
330 case DRM_API_HANDLE_TYPE_SHARED
:
331 return vc5_bo_flink(bo
, &whandle
->handle
);
332 case DRM_API_HANDLE_TYPE_KMS
:
333 whandle
->handle
= bo
->handle
;
335 case DRM_API_HANDLE_TYPE_FD
:
336 whandle
->handle
= vc5_bo_get_dmabuf(bo
);
337 return whandle
->handle
!= -1;
343 #define PAGE_UB_ROWS (VC5_UIFCFG_PAGE_SIZE / VC5_UIFBLOCK_ROW_SIZE)
344 #define PAGE_UB_ROWS_TIMES_1_5 ((PAGE_UB_ROWS * 3) >> 1)
345 #define PAGE_CACHE_UB_ROWS (VC5_PAGE_CACHE_SIZE / VC5_UIFBLOCK_ROW_SIZE)
346 #define PAGE_CACHE_MINUS_1_5_UB_ROWS (PAGE_CACHE_UB_ROWS - PAGE_UB_ROWS_TIMES_1_5)
349 * Computes the HW's UIFblock padding for a given height/cpp.
351 * The goal of the padding is to keep pages of the same color (bank number) at
352 * least half a page away from each other vertically when crossing between
353 * between columns of UIF blocks.
356 vc5_get_ub_pad(struct vc5_resource
*rsc
, uint32_t height
)
358 uint32_t utile_h
= vc5_utile_height(rsc
->cpp
);
359 uint32_t uif_block_h
= utile_h
* 2;
360 uint32_t height_ub
= height
/ uif_block_h
;
362 uint32_t height_offset_in_pc
= height_ub
% PAGE_CACHE_UB_ROWS
;
364 /* For the perfectly-aligned-for-UIF-XOR case, don't add any pad. */
365 if (height_offset_in_pc
== 0)
368 /* Try padding up to where we're offset by at least half a page. */
369 if (height_offset_in_pc
< PAGE_UB_ROWS_TIMES_1_5
) {
370 /* If we fit entirely in the page cache, don't pad. */
371 if (height_ub
< PAGE_CACHE_UB_ROWS
)
374 return PAGE_UB_ROWS_TIMES_1_5
- height_offset_in_pc
;
377 /* If we're close to being aligned to page cache size, then round up
380 if (height_offset_in_pc
> PAGE_CACHE_MINUS_1_5_UB_ROWS
)
381 return PAGE_CACHE_UB_ROWS
- height_offset_in_pc
;
383 /* Otherwise, we're far enough away (top and bottom) to not need any
390 vc5_setup_slices(struct vc5_resource
*rsc
)
392 struct pipe_resource
*prsc
= &rsc
->base
;
393 uint32_t width
= prsc
->width0
;
394 uint32_t height
= prsc
->height0
;
395 uint32_t depth
= prsc
->depth0
;
396 uint32_t pot_width
= util_next_power_of_two(width
);
397 uint32_t pot_height
= util_next_power_of_two(height
);
398 uint32_t pot_depth
= util_next_power_of_two(depth
);
400 uint32_t utile_w
= vc5_utile_width(rsc
->cpp
);
401 uint32_t utile_h
= vc5_utile_height(rsc
->cpp
);
402 uint32_t uif_block_w
= utile_w
* 2;
403 uint32_t uif_block_h
= utile_h
* 2;
404 uint32_t block_width
= util_format_get_blockwidth(prsc
->format
);
405 uint32_t block_height
= util_format_get_blockheight(prsc
->format
);
406 bool msaa
= prsc
->nr_samples
> 1;
407 /* MSAA textures/renderbuffers are always laid out as single-level
412 for (int i
= prsc
->last_level
; i
>= 0; i
--) {
413 struct vc5_resource_slice
*slice
= &rsc
->slices
[i
];
415 uint32_t level_width
, level_height
, level_depth
;
417 level_width
= u_minify(width
, i
);
418 level_height
= u_minify(height
, i
);
420 level_width
= u_minify(pot_width
, i
);
421 level_height
= u_minify(pot_height
, i
);
424 level_depth
= u_minify(depth
, i
);
426 level_depth
= u_minify(pot_depth
, i
);
433 level_width
= DIV_ROUND_UP(level_width
, block_width
);
434 level_height
= DIV_ROUND_UP(level_height
, block_height
);
437 slice
->tiling
= VC5_TILING_RASTER
;
438 if (prsc
->target
== PIPE_TEXTURE_1D
)
439 level_width
= align(level_width
, 64 / rsc
->cpp
);
441 if ((i
!= 0 || !uif_top
) &&
442 (level_width
<= utile_w
||
443 level_height
<= utile_h
)) {
444 slice
->tiling
= VC5_TILING_LINEARTILE
;
445 level_width
= align(level_width
, utile_w
);
446 level_height
= align(level_height
, utile_h
);
447 } else if ((i
!= 0 || !uif_top
) &&
448 level_width
<= uif_block_w
) {
449 slice
->tiling
= VC5_TILING_UBLINEAR_1_COLUMN
;
450 level_width
= align(level_width
, uif_block_w
);
451 level_height
= align(level_height
, uif_block_h
);
452 } else if ((i
!= 0 || !uif_top
) &&
453 level_width
<= 2 * uif_block_w
) {
454 slice
->tiling
= VC5_TILING_UBLINEAR_2_COLUMN
;
455 level_width
= align(level_width
, 2 * uif_block_w
);
456 level_height
= align(level_height
, uif_block_h
);
458 /* We align the width to a 4-block column of
459 * UIF blocks, but we only align height to UIF
462 level_width
= align(level_width
,
464 level_height
= align(level_height
,
467 slice
->ub_pad
= vc5_get_ub_pad(rsc
,
469 level_height
+= slice
->ub_pad
* uif_block_h
;
471 /* If the padding set us to to be aligned to
472 * the page cache size, then the HW will use
473 * the XOR bit on odd columns to get us
474 * perfectly misaligned
476 if ((level_height
/ uif_block_h
) %
477 (VC5_PAGE_CACHE_SIZE
/
478 VC5_UIFBLOCK_ROW_SIZE
) == 0) {
479 slice
->tiling
= VC5_TILING_UIF_XOR
;
481 slice
->tiling
= VC5_TILING_UIF_NO_XOR
;
486 slice
->offset
= offset
;
487 slice
->stride
= level_width
* rsc
->cpp
;
488 slice
->padded_height
= level_height
;
489 slice
->size
= level_height
* slice
->stride
;
491 offset
+= slice
->size
* level_depth
;
494 /* The HW aligns level 1's base to a page if any of level 1 or
495 * below could be UIF XOR. The lower levels then inherit the
496 * alignment for as long as necesary, thanks to being power of
500 level_width
> 4 * uif_block_w
&&
501 level_height
> PAGE_CACHE_MINUS_1_5_UB_ROWS
* uif_block_h
) {
502 offset
= align(offset
, VC5_UIFCFG_PAGE_SIZE
);
507 /* UIF/UBLINEAR levels need to be aligned to UIF-blocks, and LT only
508 * needs to be aligned to utile boundaries. Since tiles are laid out
509 * from small to big in memory, we need to align the later UIF slices
510 * to UIF blocks, if they were preceded by non-UIF-block-aligned LT
513 * We additionally align to 4k, which improves UIF XOR performance.
515 uint32_t page_align_offset
= (align(rsc
->slices
[0].offset
, 4096) -
516 rsc
->slices
[0].offset
);
517 if (page_align_offset
) {
518 rsc
->size
+= page_align_offset
;
519 for (int i
= 0; i
<= prsc
->last_level
; i
++)
520 rsc
->slices
[i
].offset
+= page_align_offset
;
523 /* Arrays and cube textures have a stride which is the distance from
524 * one full mipmap tree to the next (64b aligned). For 3D textures,
525 * we need to program the stride between slices of miplevel 0.
527 if (prsc
->target
!= PIPE_TEXTURE_3D
) {
528 rsc
->cube_map_stride
= align(rsc
->slices
[0].offset
+
529 rsc
->slices
[0].size
, 64);
530 rsc
->size
+= rsc
->cube_map_stride
* (prsc
->array_size
- 1);
532 rsc
->cube_map_stride
= rsc
->slices
[0].size
;
537 vc5_layer_offset(struct pipe_resource
*prsc
, uint32_t level
, uint32_t layer
)
539 struct vc5_resource
*rsc
= vc5_resource(prsc
);
540 struct vc5_resource_slice
*slice
= &rsc
->slices
[level
];
542 if (prsc
->target
== PIPE_TEXTURE_3D
)
543 return slice
->offset
+ layer
* slice
->size
;
545 return slice
->offset
+ layer
* rsc
->cube_map_stride
;
548 static struct vc5_resource
*
549 vc5_resource_setup(struct pipe_screen
*pscreen
,
550 const struct pipe_resource
*tmpl
)
552 struct vc5_screen
*screen
= vc5_screen(pscreen
);
553 struct vc5_resource
*rsc
= CALLOC_STRUCT(vc5_resource
);
556 struct pipe_resource
*prsc
= &rsc
->base
;
560 pipe_reference_init(&prsc
->reference
, 1);
561 prsc
->screen
= pscreen
;
563 if (prsc
->nr_samples
<= 1) {
564 rsc
->cpp
= util_format_get_blocksize(prsc
->format
);
566 assert(vc5_rt_format_supported(&screen
->devinfo
, prsc
->format
));
567 uint32_t output_image_format
=
568 vc5_get_rt_format(&screen
->devinfo
, prsc
->format
);
569 uint32_t internal_type
;
570 uint32_t internal_bpp
;
571 vc5_get_internal_type_bpp_for_output_format(&screen
->devinfo
,
575 switch (internal_bpp
) {
576 case V3D_INTERNAL_BPP_32
:
579 case V3D_INTERNAL_BPP_64
:
582 case V3D_INTERNAL_BPP_128
:
594 find_modifier(uint64_t needle
, const uint64_t *haystack
, int count
)
598 for (i
= 0; i
< count
; i
++) {
599 if (haystack
[i
] == needle
)
606 static struct pipe_resource
*
607 vc5_resource_create_with_modifiers(struct pipe_screen
*pscreen
,
608 const struct pipe_resource
*tmpl
,
609 const uint64_t *modifiers
,
612 bool linear_ok
= find_modifier(DRM_FORMAT_MOD_LINEAR
, modifiers
, count
);
613 struct vc5_resource
*rsc
= vc5_resource_setup(pscreen
, tmpl
);
614 struct pipe_resource
*prsc
= &rsc
->base
;
615 /* Use a tiled layout if we can, for better 3D performance. */
616 bool should_tile
= true;
618 /* VBOs/PBOs are untiled (and 1 height). */
619 if (tmpl
->target
== PIPE_BUFFER
)
622 /* Cursors are always linear, and the user can request linear as well.
624 if (tmpl
->bind
& (PIPE_BIND_LINEAR
| PIPE_BIND_CURSOR
))
627 /* 1D and 1D_ARRAY textures are always raster-order. */
628 if (tmpl
->target
== PIPE_TEXTURE_1D
||
629 tmpl
->target
== PIPE_TEXTURE_1D_ARRAY
)
632 /* Scanout BOs for simulator need to be linear for interaction with
635 if (using_vc5_simulator
&&
636 tmpl
->bind
& (PIPE_BIND_SHARED
| PIPE_BIND_SCANOUT
))
639 /* No user-specified modifier; determine our own. */
640 if (count
== 1 && modifiers
[0] == DRM_FORMAT_MOD_INVALID
) {
642 rsc
->tiled
= should_tile
;
643 } else if (should_tile
&&
644 find_modifier(DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED
,
647 } else if (linear_ok
) {
650 fprintf(stderr
, "Unsupported modifier requested\n");
654 rsc
->internal_format
= prsc
->format
;
656 vc5_setup_slices(rsc
);
657 if (!vc5_resource_bo_alloc(rsc
))
662 vc5_resource_destroy(pscreen
, prsc
);
666 struct pipe_resource
*
667 vc5_resource_create(struct pipe_screen
*pscreen
,
668 const struct pipe_resource
*tmpl
)
670 const uint64_t mod
= DRM_FORMAT_MOD_INVALID
;
671 return vc5_resource_create_with_modifiers(pscreen
, tmpl
, &mod
, 1);
674 static struct pipe_resource
*
675 vc5_resource_from_handle(struct pipe_screen
*pscreen
,
676 const struct pipe_resource
*tmpl
,
677 struct winsys_handle
*whandle
,
680 struct vc5_screen
*screen
= vc5_screen(pscreen
);
681 struct vc5_resource
*rsc
= vc5_resource_setup(pscreen
, tmpl
);
682 struct pipe_resource
*prsc
= &rsc
->base
;
683 struct vc5_resource_slice
*slice
= &rsc
->slices
[0];
688 switch (whandle
->modifier
) {
689 case DRM_FORMAT_MOD_LINEAR
:
695 "Attempt to import unsupported modifier 0x%llx\n",
696 (long long)whandle
->modifier
);
700 if (whandle
->offset
!= 0) {
702 "Attempt to import unsupported winsys offset %u\n",
707 switch (whandle
->type
) {
708 case DRM_API_HANDLE_TYPE_SHARED
:
709 rsc
->bo
= vc5_bo_open_name(screen
,
710 whandle
->handle
, whandle
->stride
);
712 case DRM_API_HANDLE_TYPE_FD
:
713 rsc
->bo
= vc5_bo_open_dmabuf(screen
,
714 whandle
->handle
, whandle
->stride
);
718 "Attempt to import unsupported handle type %d\n",
726 vc5_setup_slices(rsc
);
727 vc5_debug_resource_layout(rsc
, "import");
729 if (whandle
->stride
!= slice
->stride
) {
730 static bool warned
= false;
734 "Attempting to import %dx%d %s with "
735 "unsupported stride %d instead of %d\n",
736 prsc
->width0
, prsc
->height0
,
737 util_format_short_name(prsc
->format
),
747 vc5_resource_destroy(pscreen
, prsc
);
751 static struct pipe_surface
*
752 vc5_create_surface(struct pipe_context
*pctx
,
753 struct pipe_resource
*ptex
,
754 const struct pipe_surface
*surf_tmpl
)
756 struct vc5_context
*vc5
= vc5_context(pctx
);
757 struct vc5_screen
*screen
= vc5
->screen
;
758 struct vc5_surface
*surface
= CALLOC_STRUCT(vc5_surface
);
759 struct vc5_resource
*rsc
= vc5_resource(ptex
);
764 assert(surf_tmpl
->u
.tex
.first_layer
== surf_tmpl
->u
.tex
.last_layer
);
766 struct pipe_surface
*psurf
= &surface
->base
;
767 unsigned level
= surf_tmpl
->u
.tex
.level
;
768 struct vc5_resource_slice
*slice
= &rsc
->slices
[level
];
770 pipe_reference_init(&psurf
->reference
, 1);
771 pipe_resource_reference(&psurf
->texture
, ptex
);
773 psurf
->context
= pctx
;
774 psurf
->format
= surf_tmpl
->format
;
775 psurf
->width
= u_minify(ptex
->width0
, level
);
776 psurf
->height
= u_minify(ptex
->height0
, level
);
777 psurf
->u
.tex
.level
= level
;
778 psurf
->u
.tex
.first_layer
= surf_tmpl
->u
.tex
.first_layer
;
779 psurf
->u
.tex
.last_layer
= surf_tmpl
->u
.tex
.last_layer
;
781 surface
->offset
= vc5_layer_offset(ptex
, level
,
782 psurf
->u
.tex
.first_layer
);
783 surface
->tiling
= slice
->tiling
;
785 surface
->format
= vc5_get_rt_format(&screen
->devinfo
, psurf
->format
);
787 if (util_format_is_depth_or_stencil(psurf
->format
)) {
788 switch (psurf
->format
) {
789 case PIPE_FORMAT_Z16_UNORM
:
790 surface
->internal_type
= V3D_INTERNAL_TYPE_DEPTH_16
;
792 case PIPE_FORMAT_Z32_FLOAT
:
793 case PIPE_FORMAT_Z32_FLOAT_S8X24_UINT
:
794 surface
->internal_type
= V3D_INTERNAL_TYPE_DEPTH_32F
;
797 surface
->internal_type
= V3D_INTERNAL_TYPE_DEPTH_24
;
801 vc5_get_internal_type_bpp_for_output_format(&screen
->devinfo
,
804 surface
->internal_type
= type
;
805 surface
->internal_bpp
= bpp
;
808 if (surface
->tiling
== VC5_TILING_UIF_NO_XOR
||
809 surface
->tiling
== VC5_TILING_UIF_XOR
) {
810 surface
->padded_height_of_output_image_in_uif_blocks
=
811 (slice
->padded_height
/
812 (2 * vc5_utile_height(rsc
->cpp
)));
815 if (rsc
->separate_stencil
) {
816 surface
->separate_stencil
=
817 vc5_create_surface(pctx
, &rsc
->separate_stencil
->base
,
821 return &surface
->base
;
825 vc5_surface_destroy(struct pipe_context
*pctx
, struct pipe_surface
*psurf
)
827 struct vc5_surface
*surf
= vc5_surface(psurf
);
829 if (surf
->separate_stencil
)
830 pipe_surface_reference(&surf
->separate_stencil
, NULL
);
832 pipe_resource_reference(&psurf
->texture
, NULL
);
837 vc5_flush_resource(struct pipe_context
*pctx
, struct pipe_resource
*resource
)
839 /* All calls to flush_resource are followed by a flush of the context,
840 * so there's nothing to do.
844 static enum pipe_format
845 vc5_resource_get_internal_format(struct pipe_resource
*prsc
)
847 return vc5_resource(prsc
)->internal_format
;
851 vc5_resource_set_stencil(struct pipe_resource
*prsc
,
852 struct pipe_resource
*stencil
)
854 vc5_resource(prsc
)->separate_stencil
= vc5_resource(stencil
);
857 static struct pipe_resource
*
858 vc5_resource_get_stencil(struct pipe_resource
*prsc
)
860 struct vc5_resource
*rsc
= vc5_resource(prsc
);
862 return &rsc
->separate_stencil
->base
;
865 static const struct u_transfer_vtbl transfer_vtbl
= {
866 .resource_create
= vc5_resource_create
,
867 .resource_destroy
= vc5_resource_destroy
,
868 .transfer_map
= vc5_resource_transfer_map
,
869 .transfer_unmap
= vc5_resource_transfer_unmap
,
870 .transfer_flush_region
= u_default_transfer_flush_region
,
871 .get_internal_format
= vc5_resource_get_internal_format
,
872 .set_stencil
= vc5_resource_set_stencil
,
873 .get_stencil
= vc5_resource_get_stencil
,
877 vc5_resource_screen_init(struct pipe_screen
*pscreen
)
879 pscreen
->resource_create_with_modifiers
=
880 vc5_resource_create_with_modifiers
;
881 pscreen
->resource_create
= u_transfer_helper_resource_create
;
882 pscreen
->resource_from_handle
= vc5_resource_from_handle
;
883 pscreen
->resource_get_handle
= vc5_resource_get_handle
;
884 pscreen
->resource_destroy
= u_transfer_helper_resource_destroy
;
885 pscreen
->transfer_helper
= u_transfer_helper_create(&transfer_vtbl
,
890 vc5_resource_context_init(struct pipe_context
*pctx
)
892 pctx
->transfer_map
= u_transfer_helper_transfer_map
;
893 pctx
->transfer_flush_region
= u_transfer_helper_transfer_flush_region
;
894 pctx
->transfer_unmap
= u_transfer_helper_transfer_unmap
;
895 pctx
->buffer_subdata
= u_default_buffer_subdata
;
896 pctx
->texture_subdata
= u_default_texture_subdata
;
897 pctx
->create_surface
= vc5_create_surface
;
898 pctx
->surface_destroy
= vc5_surface_destroy
;
899 pctx
->resource_copy_region
= util_resource_copy_region
;
900 pctx
->blit
= vc5_blit
;
901 pctx
->flush_resource
= vc5_flush_resource
;