2 * Copyright © 2014 Broadcom
3 * Copyright (C) 2012 Rob Clark <robclark@freedesktop.org>
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include "util/u_blit.h"
26 #include "util/u_memory.h"
27 #include "util/u_format.h"
28 #include "util/u_inlines.h"
29 #include "util/u_surface.h"
30 #include "util/u_upload_mgr.h"
32 #include "vc4_screen.h"
33 #include "vc4_context.h"
34 #include "vc4_resource.h"
35 #include "vc4_tiling.h"
37 static bool miptree_debug
= false;
40 vc4_resource_bo_alloc(struct vc4_resource
*rsc
)
42 struct pipe_resource
*prsc
= &rsc
->base
;
43 struct pipe_screen
*pscreen
= prsc
->screen
;
47 fprintf(stderr
, "alloc %p: size %d + offset %d -> %d\n",
50 rsc
->slices
[0].offset
,
51 rsc
->slices
[0].offset
+
53 rsc
->cube_map_stride
* (prsc
->array_size
- 1));
56 bo
= vc4_bo_alloc(vc4_screen(pscreen
),
57 rsc
->slices
[0].offset
+
59 rsc
->cube_map_stride
* (prsc
->array_size
- 1),
62 vc4_bo_unreference(&rsc
->bo
);
71 vc4_resource_transfer_unmap(struct pipe_context
*pctx
,
72 struct pipe_transfer
*ptrans
)
74 struct vc4_context
*vc4
= vc4_context(pctx
);
75 struct vc4_transfer
*trans
= vc4_transfer(ptrans
);
78 struct vc4_resource
*rsc
;
79 struct vc4_resource_slice
*slice
;
80 if (trans
->ss_resource
) {
81 rsc
= vc4_resource(trans
->ss_resource
);
82 slice
= &rsc
->slices
[0];
84 rsc
= vc4_resource(ptrans
->resource
);
85 slice
= &rsc
->slices
[ptrans
->level
];
88 if (ptrans
->usage
& PIPE_TRANSFER_WRITE
) {
89 vc4_store_tiled_image(rsc
->bo
->map
+ slice
->offset
+
90 ptrans
->box
.z
* rsc
->cube_map_stride
,
92 trans
->map
, ptrans
->stride
,
93 slice
->tiling
, rsc
->cpp
,
99 if (trans
->ss_resource
&& (ptrans
->usage
& PIPE_TRANSFER_WRITE
)) {
100 struct pipe_blit_info blit
;
101 memset(&blit
, 0, sizeof(blit
));
103 blit
.src
.resource
= trans
->ss_resource
;
104 blit
.src
.format
= trans
->ss_resource
->format
;
105 blit
.src
.box
.width
= trans
->ss_box
.width
;
106 blit
.src
.box
.height
= trans
->ss_box
.height
;
107 blit
.src
.box
.depth
= 1;
109 blit
.dst
.resource
= ptrans
->resource
;
110 blit
.dst
.format
= ptrans
->resource
->format
;
111 blit
.dst
.level
= ptrans
->level
;
112 blit
.dst
.box
= trans
->ss_box
;
114 blit
.mask
= util_format_get_mask(ptrans
->resource
->format
);
115 blit
.filter
= PIPE_TEX_FILTER_NEAREST
;
117 pctx
->blit(pctx
, &blit
);
119 pipe_resource_reference(&trans
->ss_resource
, NULL
);
122 pipe_resource_reference(&ptrans
->resource
, NULL
);
123 slab_free(&vc4
->transfer_pool
, ptrans
);
126 static struct pipe_resource
*
127 vc4_get_temp_resource(struct pipe_context
*pctx
,
128 struct pipe_resource
*prsc
,
129 const struct pipe_box
*box
)
131 struct pipe_resource temp_setup
;
133 memset(&temp_setup
, 0, sizeof(temp_setup
));
134 temp_setup
.target
= prsc
->target
;
135 temp_setup
.format
= prsc
->format
;
136 temp_setup
.width0
= box
->width
;
137 temp_setup
.height0
= box
->height
;
138 temp_setup
.depth0
= 1;
139 temp_setup
.array_size
= 1;
141 return pctx
->screen
->resource_create(pctx
->screen
, &temp_setup
);
145 vc4_resource_transfer_map(struct pipe_context
*pctx
,
146 struct pipe_resource
*prsc
,
147 unsigned level
, unsigned usage
,
148 const struct pipe_box
*box
,
149 struct pipe_transfer
**pptrans
)
151 struct vc4_context
*vc4
= vc4_context(pctx
);
152 struct vc4_resource
*rsc
= vc4_resource(prsc
);
153 struct vc4_transfer
*trans
;
154 struct pipe_transfer
*ptrans
;
155 enum pipe_format format
= prsc
->format
;
158 /* Upgrade DISCARD_RANGE to WHOLE_RESOURCE if the whole resource is
161 if ((usage
& PIPE_TRANSFER_DISCARD_RANGE
) &&
162 !(usage
& PIPE_TRANSFER_UNSYNCHRONIZED
) &&
163 !(prsc
->flags
& PIPE_RESOURCE_FLAG_MAP_COHERENT
) &&
164 prsc
->last_level
== 0 &&
165 prsc
->width0
== box
->width
&&
166 prsc
->height0
== box
->height
&&
167 prsc
->depth0
== box
->depth
&&
168 prsc
->array_size
== 1 &&
170 usage
|= PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE
;
173 if (usage
& PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE
) {
174 if (vc4_resource_bo_alloc(rsc
)) {
175 /* If it might be bound as one of our vertex buffers,
176 * make sure we re-emit vertex buffer state.
178 if (prsc
->bind
& PIPE_BIND_VERTEX_BUFFER
)
179 vc4
->dirty
|= VC4_DIRTY_VTXBUF
;
181 /* If we failed to reallocate, flush users so that we
182 * don't violate any syncing requirements.
184 vc4_flush_jobs_reading_resource(vc4
, prsc
);
186 } else if (!(usage
& PIPE_TRANSFER_UNSYNCHRONIZED
)) {
187 /* If we're writing and the buffer is being used by the CL, we
188 * have to flush the CL first. If we're only reading, we need
189 * to flush if the CL has written our buffer.
191 if (usage
& PIPE_TRANSFER_WRITE
)
192 vc4_flush_jobs_reading_resource(vc4
, prsc
);
194 vc4_flush_jobs_writing_resource(vc4
, prsc
);
197 if (usage
& PIPE_TRANSFER_WRITE
) {
199 rsc
->initialized_buffers
= ~0;
202 trans
= slab_alloc(&vc4
->transfer_pool
);
206 /* XXX: Handle DONTBLOCK, DISCARD_RANGE, PERSISTENT, COHERENT. */
208 /* slab_alloc_st() doesn't zero: */
209 memset(trans
, 0, sizeof(*trans
));
210 ptrans
= &trans
->base
;
212 pipe_resource_reference(&ptrans
->resource
, prsc
);
213 ptrans
->level
= level
;
214 ptrans
->usage
= usage
;
217 /* If the resource is multisampled, we need to resolve to single
218 * sample. This seems like it should be handled at a higher layer.
220 if (prsc
->nr_samples
> 1) {
221 trans
->ss_resource
= vc4_get_temp_resource(pctx
, prsc
, box
);
222 if (!trans
->ss_resource
)
224 assert(!trans
->ss_resource
->nr_samples
);
226 /* The ptrans->box gets modified for tile alignment, so save
227 * the original box for unmap time.
229 trans
->ss_box
= *box
;
231 if (usage
& PIPE_TRANSFER_READ
) {
232 struct pipe_blit_info blit
;
233 memset(&blit
, 0, sizeof(blit
));
235 blit
.src
.resource
= ptrans
->resource
;
236 blit
.src
.format
= ptrans
->resource
->format
;
237 blit
.src
.level
= ptrans
->level
;
238 blit
.src
.box
= trans
->ss_box
;
240 blit
.dst
.resource
= trans
->ss_resource
;
241 blit
.dst
.format
= trans
->ss_resource
->format
;
242 blit
.dst
.box
.width
= trans
->ss_box
.width
;
243 blit
.dst
.box
.height
= trans
->ss_box
.height
;
244 blit
.dst
.box
.depth
= 1;
246 blit
.mask
= util_format_get_mask(prsc
->format
);
247 blit
.filter
= PIPE_TEX_FILTER_NEAREST
;
249 pctx
->blit(pctx
, &blit
);
250 vc4_flush_jobs_writing_resource(vc4
, blit
.dst
.resource
);
253 /* The rest of the mapping process should use our temporary. */
254 prsc
= trans
->ss_resource
;
255 rsc
= vc4_resource(prsc
);
261 if (usage
& PIPE_TRANSFER_UNSYNCHRONIZED
)
262 buf
= vc4_bo_map_unsynchronized(rsc
->bo
);
264 buf
= vc4_bo_map(rsc
->bo
);
266 fprintf(stderr
, "Failed to map bo\n");
272 struct vc4_resource_slice
*slice
= &rsc
->slices
[level
];
274 uint32_t utile_w
= vc4_utile_width(rsc
->cpp
);
275 uint32_t utile_h
= vc4_utile_height(rsc
->cpp
);
277 /* No direct mappings of tiled, since we need to manually
280 if (usage
& PIPE_TRANSFER_MAP_DIRECTLY
)
283 if (format
== PIPE_FORMAT_ETC1_RGB8
) {
284 /* ETC1 is arranged as 64-bit blocks, where each block
285 * is 4x4 pixels. Texture tiling operates on the
286 * 64-bit block the way it would an uncompressed
289 assert(!(ptrans
->box
.x
& 3));
290 assert(!(ptrans
->box
.y
& 3));
293 ptrans
->box
.width
= (ptrans
->box
.width
+ 3) >> 2;
294 ptrans
->box
.height
= (ptrans
->box
.height
+ 3) >> 2;
297 /* We need to align the box to utile boundaries, since that's
298 * what load/store operates on. This may cause us to need to
299 * read out the original contents in that border area. Right
300 * now we just read out the entire contents, including the
301 * middle area that will just get overwritten.
303 uint32_t box_start_x
= ptrans
->box
.x
& (utile_w
- 1);
304 uint32_t box_start_y
= ptrans
->box
.y
& (utile_h
- 1);
305 bool needs_load
= (usage
& PIPE_TRANSFER_READ
) != 0;
308 ptrans
->box
.width
+= box_start_x
;
309 ptrans
->box
.x
-= box_start_x
;
313 ptrans
->box
.height
+= box_start_y
;
314 ptrans
->box
.y
-= box_start_y
;
317 if (ptrans
->box
.width
& (utile_w
- 1)) {
318 /* We only need to force a load if our border region
319 * we're extending into is actually part of the
322 uint32_t slice_width
= u_minify(prsc
->width0
, level
);
323 if (ptrans
->box
.x
+ ptrans
->box
.width
!= slice_width
)
325 ptrans
->box
.width
= align(ptrans
->box
.width
, utile_w
);
327 if (ptrans
->box
.height
& (utile_h
- 1)) {
328 uint32_t slice_height
= u_minify(prsc
->height0
, level
);
329 if (ptrans
->box
.y
+ ptrans
->box
.height
!= slice_height
)
331 ptrans
->box
.height
= align(ptrans
->box
.height
, utile_h
);
334 ptrans
->stride
= ptrans
->box
.width
* rsc
->cpp
;
335 ptrans
->layer_stride
= ptrans
->stride
* ptrans
->box
.height
;
337 trans
->map
= malloc(ptrans
->layer_stride
* ptrans
->box
.depth
);
340 vc4_load_tiled_image(trans
->map
, ptrans
->stride
,
341 buf
+ slice
->offset
+
342 ptrans
->box
.z
* rsc
->cube_map_stride
,
344 slice
->tiling
, rsc
->cpp
,
348 box_start_x
* rsc
->cpp
+
349 box_start_y
* ptrans
->stride
);
351 ptrans
->stride
= slice
->stride
;
352 ptrans
->layer_stride
= ptrans
->stride
;
354 return buf
+ slice
->offset
+
355 ptrans
->box
.y
/ util_format_get_blockheight(format
) * ptrans
->stride
+
356 ptrans
->box
.x
/ util_format_get_blockwidth(format
) * rsc
->cpp
+
357 ptrans
->box
.z
* rsc
->cube_map_stride
;
362 vc4_resource_transfer_unmap(pctx
, ptrans
);
367 vc4_resource_destroy(struct pipe_screen
*pscreen
,
368 struct pipe_resource
*prsc
)
370 struct vc4_screen
*screen
= vc4_screen(pscreen
);
371 struct vc4_resource
*rsc
= vc4_resource(prsc
);
372 pipe_resource_reference(&rsc
->shadow_parent
, NULL
);
373 vc4_bo_unreference(&rsc
->bo
);
376 renderonly_scanout_destroy(rsc
->scanout
, screen
->ro
);
382 vc4_resource_get_handle(struct pipe_screen
*pscreen
,
383 struct pipe_context
*pctx
,
384 struct pipe_resource
*prsc
,
385 struct winsys_handle
*whandle
,
388 struct vc4_screen
*screen
= vc4_screen(pscreen
);
389 struct vc4_resource
*rsc
= vc4_resource(prsc
);
391 whandle
->stride
= rsc
->slices
[0].stride
;
393 /* If we're passing some reference to our BO out to some other part of
394 * the system, then we can't do any optimizations about only us being
395 * the ones seeing it (like BO caching or shadow update avoidance).
397 rsc
->bo
->private = false;
399 switch (whandle
->type
) {
400 case DRM_API_HANDLE_TYPE_SHARED
:
402 /* This could probably be supported, assuming that a
403 * control node was used for pl111.
405 fprintf(stderr
, "flink unsupported with pl111\n");
409 return vc4_bo_flink(rsc
->bo
, &whandle
->handle
);
410 case DRM_API_HANDLE_TYPE_KMS
:
411 if (screen
->ro
&& renderonly_get_handle(rsc
->scanout
, whandle
))
413 whandle
->handle
= rsc
->bo
->handle
;
415 case DRM_API_HANDLE_TYPE_FD
:
416 /* FDs are cross-device, so we can export directly from vc4.
418 whandle
->handle
= vc4_bo_get_dmabuf(rsc
->bo
);
419 return whandle
->handle
!= -1;
426 vc4_setup_slices(struct vc4_resource
*rsc
)
428 struct pipe_resource
*prsc
= &rsc
->base
;
429 uint32_t width
= prsc
->width0
;
430 uint32_t height
= prsc
->height0
;
431 if (prsc
->format
== PIPE_FORMAT_ETC1_RGB8
) {
432 width
= (width
+ 3) >> 2;
433 height
= (height
+ 3) >> 2;
436 uint32_t pot_width
= util_next_power_of_two(width
);
437 uint32_t pot_height
= util_next_power_of_two(height
);
439 uint32_t utile_w
= vc4_utile_width(rsc
->cpp
);
440 uint32_t utile_h
= vc4_utile_height(rsc
->cpp
);
442 for (int i
= prsc
->last_level
; i
>= 0; i
--) {
443 struct vc4_resource_slice
*slice
= &rsc
->slices
[i
];
445 uint32_t level_width
, level_height
;
448 level_height
= height
;
450 level_width
= u_minify(pot_width
, i
);
451 level_height
= u_minify(pot_height
, i
);
455 slice
->tiling
= VC4_TILING_FORMAT_LINEAR
;
456 if (prsc
->nr_samples
> 1) {
457 /* MSAA (4x) surfaces are stored as raw tile buffer contents. */
458 level_width
= align(level_width
, 32);
459 level_height
= align(level_height
, 32);
461 level_width
= align(level_width
, utile_w
);
464 if (vc4_size_is_lt(level_width
, level_height
,
466 slice
->tiling
= VC4_TILING_FORMAT_LT
;
467 level_width
= align(level_width
, utile_w
);
468 level_height
= align(level_height
, utile_h
);
470 slice
->tiling
= VC4_TILING_FORMAT_T
;
471 level_width
= align(level_width
,
473 level_height
= align(level_height
,
478 slice
->offset
= offset
;
479 slice
->stride
= (level_width
* rsc
->cpp
*
480 MAX2(prsc
->nr_samples
, 1));
481 slice
->size
= level_height
* slice
->stride
;
483 offset
+= slice
->size
;
486 static const char tiling_chars
[] = {
487 [VC4_TILING_FORMAT_LINEAR
] = 'R',
488 [VC4_TILING_FORMAT_LT
] = 'L',
489 [VC4_TILING_FORMAT_T
] = 'T'
492 "rsc setup %p (format %s: vc4 %d), %dx%d: "
493 "level %d (%c) -> %dx%d, stride %d@0x%08x\n",
495 util_format_short_name(prsc
->format
),
497 prsc
->width0
, prsc
->height0
,
498 i
, tiling_chars
[slice
->tiling
],
499 level_width
, level_height
,
500 slice
->stride
, slice
->offset
);
504 /* The texture base pointer that has to point to level 0 doesn't have
505 * intra-page bits, so we have to align it, and thus shift up all the
508 uint32_t page_align_offset
= (align(rsc
->slices
[0].offset
, 4096) -
509 rsc
->slices
[0].offset
);
510 if (page_align_offset
) {
511 for (int i
= 0; i
<= prsc
->last_level
; i
++)
512 rsc
->slices
[i
].offset
+= page_align_offset
;
515 /* Cube map faces appear as whole miptrees at a page-aligned offset
516 * from the first face's miptree.
518 if (prsc
->target
== PIPE_TEXTURE_CUBE
) {
519 rsc
->cube_map_stride
= align(rsc
->slices
[0].offset
+
520 rsc
->slices
[0].size
, 4096);
524 static struct vc4_resource
*
525 vc4_resource_setup(struct pipe_screen
*pscreen
,
526 const struct pipe_resource
*tmpl
)
528 struct vc4_resource
*rsc
= CALLOC_STRUCT(vc4_resource
);
531 struct pipe_resource
*prsc
= &rsc
->base
;
535 pipe_reference_init(&prsc
->reference
, 1);
536 prsc
->screen
= pscreen
;
538 if (prsc
->nr_samples
<= 1)
539 rsc
->cpp
= util_format_get_blocksize(tmpl
->format
);
541 rsc
->cpp
= sizeof(uint32_t);
548 static enum vc4_texture_data_type
549 get_resource_texture_format(struct pipe_resource
*prsc
)
551 struct vc4_resource
*rsc
= vc4_resource(prsc
);
552 uint8_t format
= vc4_get_tex_format(prsc
->format
);
555 if (prsc
->nr_samples
> 1) {
558 assert(format
== VC4_TEXTURE_TYPE_RGBA8888
);
559 return VC4_TEXTURE_TYPE_RGBA32R
;
566 struct pipe_resource
*
567 vc4_resource_create(struct pipe_screen
*pscreen
,
568 const struct pipe_resource
*tmpl
)
570 struct vc4_screen
*screen
= vc4_screen(pscreen
);
571 struct vc4_resource
*rsc
= vc4_resource_setup(pscreen
, tmpl
);
572 struct pipe_resource
*prsc
= &rsc
->base
;
574 /* We have to make shared be untiled, since we don't have any way to
575 * communicate metadata about tiling currently.
577 if (tmpl
->target
== PIPE_BUFFER
||
578 tmpl
->nr_samples
> 1 ||
579 (tmpl
->bind
& (PIPE_BIND_SCANOUT
|
582 PIPE_BIND_CURSOR
))) {
588 if (tmpl
->target
!= PIPE_BUFFER
)
589 rsc
->vc4_format
= get_resource_texture_format(prsc
);
591 vc4_setup_slices(rsc
);
592 if (!vc4_resource_bo_alloc(rsc
))
595 if (screen
->ro
&& tmpl
->bind
& PIPE_BIND_SCANOUT
) {
597 renderonly_scanout_for_resource(prsc
, screen
->ro
);
604 vc4_resource_destroy(pscreen
, prsc
);
608 static struct pipe_resource
*
609 vc4_resource_from_handle(struct pipe_screen
*pscreen
,
610 const struct pipe_resource
*tmpl
,
611 struct winsys_handle
*whandle
,
614 struct vc4_screen
*screen
= vc4_screen(pscreen
);
615 struct vc4_resource
*rsc
= vc4_resource_setup(pscreen
, tmpl
);
616 struct pipe_resource
*prsc
= &rsc
->base
;
617 struct vc4_resource_slice
*slice
= &rsc
->slices
[0];
618 uint32_t expected_stride
=
619 align(prsc
->width0
, vc4_utile_width(rsc
->cpp
)) * rsc
->cpp
;
624 if (whandle
->stride
!= expected_stride
) {
625 static bool warned
= false;
629 "Attempting to import %dx%d %s with "
630 "unsupported stride %d instead of %d\n",
631 prsc
->width0
, prsc
->height0
,
632 util_format_short_name(prsc
->format
),
641 if (whandle
->offset
!= 0) {
643 "Attempt to import unsupported winsys offset %u\n",
648 switch (whandle
->type
) {
649 case DRM_API_HANDLE_TYPE_SHARED
:
650 rsc
->bo
= vc4_bo_open_name(screen
,
651 whandle
->handle
, whandle
->stride
);
653 case DRM_API_HANDLE_TYPE_FD
:
654 rsc
->bo
= vc4_bo_open_dmabuf(screen
,
655 whandle
->handle
, whandle
->stride
);
659 "Attempt to import unsupported handle type %d\n",
666 slice
->stride
= whandle
->stride
;
667 slice
->tiling
= VC4_TILING_FORMAT_LINEAR
;
669 rsc
->vc4_format
= get_resource_texture_format(prsc
);
672 /* Make sure that renderonly has a handle to our buffer in the
673 * display's fd, so that a later renderonly_get_handle()
674 * returns correct handles or GEM names.
677 renderonly_create_gpu_import_for_resource(prsc
,
685 "rsc import %p (format %d), %dx%d: "
686 "level 0 (R) -> stride %d@0x%08x\n",
687 rsc
, rsc
->vc4_format
,
688 prsc
->width0
, prsc
->height0
,
689 slice
->stride
, slice
->offset
);
695 vc4_resource_destroy(pscreen
, prsc
);
699 static struct pipe_surface
*
700 vc4_create_surface(struct pipe_context
*pctx
,
701 struct pipe_resource
*ptex
,
702 const struct pipe_surface
*surf_tmpl
)
704 struct vc4_surface
*surface
= CALLOC_STRUCT(vc4_surface
);
705 struct vc4_resource
*rsc
= vc4_resource(ptex
);
710 assert(surf_tmpl
->u
.tex
.first_layer
== surf_tmpl
->u
.tex
.last_layer
);
712 struct pipe_surface
*psurf
= &surface
->base
;
713 unsigned level
= surf_tmpl
->u
.tex
.level
;
715 pipe_reference_init(&psurf
->reference
, 1);
716 pipe_resource_reference(&psurf
->texture
, ptex
);
718 psurf
->context
= pctx
;
719 psurf
->format
= surf_tmpl
->format
;
720 psurf
->width
= u_minify(ptex
->width0
, level
);
721 psurf
->height
= u_minify(ptex
->height0
, level
);
722 psurf
->u
.tex
.level
= level
;
723 psurf
->u
.tex
.first_layer
= surf_tmpl
->u
.tex
.first_layer
;
724 psurf
->u
.tex
.last_layer
= surf_tmpl
->u
.tex
.last_layer
;
725 surface
->offset
= (rsc
->slices
[level
].offset
+
726 psurf
->u
.tex
.first_layer
* rsc
->cube_map_stride
);
727 surface
->tiling
= rsc
->slices
[level
].tiling
;
729 return &surface
->base
;
733 vc4_surface_destroy(struct pipe_context
*pctx
, struct pipe_surface
*psurf
)
735 pipe_resource_reference(&psurf
->texture
, NULL
);
740 vc4_dump_surface_non_msaa(struct pipe_surface
*psurf
)
742 struct pipe_resource
*prsc
= psurf
->texture
;
743 struct vc4_resource
*rsc
= vc4_resource(prsc
);
744 uint32_t *map
= vc4_bo_map(rsc
->bo
);
745 uint32_t stride
= rsc
->slices
[0].stride
/ 4;
746 uint32_t width
= psurf
->width
;
747 uint32_t height
= psurf
->height
;
748 uint32_t chunk_w
= width
/ 79;
749 uint32_t chunk_h
= height
/ 40;
750 uint32_t found_colors
[10];
751 uint32_t num_found_colors
= 0;
753 if (rsc
->vc4_format
!= VC4_TEXTURE_TYPE_RGBA32R
) {
754 fprintf(stderr
, "%s: Unsupported format %s\n",
755 __func__
, util_format_short_name(psurf
->format
));
759 for (int by
= 0; by
< height
; by
+= chunk_h
) {
760 for (int bx
= 0; bx
< width
; bx
+= chunk_w
) {
761 int all_found_color
= -1; /* nothing found */
763 for (int y
= by
; y
< MIN2(height
, by
+ chunk_h
); y
++) {
764 for (int x
= bx
; x
< MIN2(width
, bx
+ chunk_w
); x
++) {
765 uint32_t pix
= map
[y
* stride
+ x
];
768 for (i
= 0; i
< num_found_colors
; i
++) {
769 if (pix
== found_colors
[i
])
772 if (i
== num_found_colors
&&
774 ARRAY_SIZE(found_colors
)) {
775 found_colors
[num_found_colors
++] = pix
;
778 if (i
< num_found_colors
) {
779 if (all_found_color
== -1)
781 else if (i
!= all_found_color
)
782 all_found_color
= ARRAY_SIZE(found_colors
);
786 /* If all pixels for this chunk have a consistent
787 * value, then print a character for it. Either a
788 * fixed name (particularly common for piglit tests),
789 * or a runtime-generated number.
791 if (all_found_color
>= 0 &&
792 all_found_color
< ARRAY_SIZE(found_colors
)) {
793 static const struct {
805 for (i
= 0; i
< ARRAY_SIZE(named_colors
); i
++) {
806 if (named_colors
[i
].val
==
807 found_colors
[all_found_color
]) {
808 fprintf(stderr
, "%s",
813 /* For unnamed colors, print a number and the
814 * numbers will have values printed at the
817 if (i
== ARRAY_SIZE(named_colors
)) {
818 fprintf(stderr
, "%c",
819 '0' + all_found_color
);
822 /* If there's no consistent color, print this.
824 fprintf(stderr
, ".");
827 fprintf(stderr
, "\n");
830 for (int i
= 0; i
< num_found_colors
; i
++) {
831 fprintf(stderr
, "color %d: 0x%08x\n", i
, found_colors
[i
]);
836 vc4_surface_msaa_get_sample(struct pipe_surface
*psurf
,
837 uint32_t x
, uint32_t y
, uint32_t sample
)
839 struct pipe_resource
*prsc
= psurf
->texture
;
840 struct vc4_resource
*rsc
= vc4_resource(prsc
);
841 uint32_t tile_w
= 32, tile_h
= 32;
842 uint32_t tiles_w
= DIV_ROUND_UP(psurf
->width
, 32);
844 uint32_t tile_x
= x
/ tile_w
;
845 uint32_t tile_y
= y
/ tile_h
;
846 uint32_t *tile
= (vc4_bo_map(rsc
->bo
) +
847 VC4_TILE_BUFFER_SIZE
* (tile_y
* tiles_w
+ tile_x
));
848 uint32_t subtile_x
= x
% tile_w
;
849 uint32_t subtile_y
= y
% tile_h
;
851 uint32_t quad_samples
= VC4_MAX_SAMPLES
* 4;
852 uint32_t tile_stride
= quad_samples
* tile_w
/ 2;
854 return *((uint32_t *)tile
+
855 (subtile_y
>> 1) * tile_stride
+
856 (subtile_x
>> 1) * quad_samples
+
857 ((subtile_y
& 1) << 1) +
863 vc4_dump_surface_msaa_char(struct pipe_surface
*psurf
,
864 uint32_t start_x
, uint32_t start_y
,
865 uint32_t w
, uint32_t h
)
867 bool all_same_color
= true;
868 uint32_t all_pix
= 0;
870 for (int y
= start_y
; y
< start_y
+ h
; y
++) {
871 for (int x
= start_x
; x
< start_x
+ w
; x
++) {
872 for (int s
= 0; s
< VC4_MAX_SAMPLES
; s
++) {
873 uint32_t pix
= vc4_surface_msaa_get_sample(psurf
,
876 if (x
== start_x
&& y
== start_y
)
878 else if (all_pix
!= pix
)
879 all_same_color
= false;
883 if (all_same_color
) {
884 static const struct {
896 for (i
= 0; i
< ARRAY_SIZE(named_colors
); i
++) {
897 if (named_colors
[i
].val
== all_pix
) {
898 fprintf(stderr
, "%s",
903 fprintf(stderr
, "x");
905 fprintf(stderr
, ".");
910 vc4_dump_surface_msaa(struct pipe_surface
*psurf
)
912 uint32_t tile_w
= 32, tile_h
= 32;
913 uint32_t tiles_w
= DIV_ROUND_UP(psurf
->width
, tile_w
);
914 uint32_t tiles_h
= DIV_ROUND_UP(psurf
->height
, tile_h
);
915 uint32_t char_w
= 140, char_h
= 60;
916 uint32_t char_w_per_tile
= char_w
/ tiles_w
- 1;
917 uint32_t char_h_per_tile
= char_h
/ tiles_h
- 1;
919 fprintf(stderr
, "Surface: %dx%d (%dx MSAA)\n",
920 psurf
->width
, psurf
->height
, psurf
->texture
->nr_samples
);
922 for (int x
= 0; x
< (char_w_per_tile
+ 1) * tiles_w
; x
++)
923 fprintf(stderr
, "-");
924 fprintf(stderr
, "\n");
926 for (int ty
= 0; ty
< psurf
->height
; ty
+= tile_h
) {
927 for (int y
= 0; y
< char_h_per_tile
; y
++) {
929 for (int tx
= 0; tx
< psurf
->width
; tx
+= tile_w
) {
930 for (int x
= 0; x
< char_w_per_tile
; x
++) {
931 uint32_t bx1
= (x
* tile_w
/
933 uint32_t bx2
= ((x
+ 1) * tile_w
/
935 uint32_t by1
= (y
* tile_h
/
937 uint32_t by2
= ((y
+ 1) * tile_h
/
940 vc4_dump_surface_msaa_char(psurf
,
946 fprintf(stderr
, "|");
948 fprintf(stderr
, "\n");
951 for (int x
= 0; x
< (char_w_per_tile
+ 1) * tiles_w
; x
++)
952 fprintf(stderr
, "-");
953 fprintf(stderr
, "\n");
957 /** Debug routine to dump the contents of an 8888 surface to the console */
959 vc4_dump_surface(struct pipe_surface
*psurf
)
964 if (psurf
->texture
->nr_samples
> 1)
965 vc4_dump_surface_msaa(psurf
);
967 vc4_dump_surface_non_msaa(psurf
);
971 vc4_flush_resource(struct pipe_context
*pctx
, struct pipe_resource
*resource
)
973 /* All calls to flush_resource are followed by a flush of the context,
974 * so there's nothing to do.
979 vc4_update_shadow_baselevel_texture(struct pipe_context
*pctx
,
980 struct pipe_sampler_view
*view
)
982 struct vc4_resource
*shadow
= vc4_resource(view
->texture
);
983 struct vc4_resource
*orig
= vc4_resource(shadow
->shadow_parent
);
986 if (shadow
->writes
== orig
->writes
&& orig
->bo
->private)
989 perf_debug("Updating %dx%d@%d shadow texture due to %s\n",
990 orig
->base
.width0
, orig
->base
.height0
,
991 view
->u
.tex
.first_level
,
992 view
->u
.tex
.first_level
? "base level" : "raster layout");
994 for (int i
= 0; i
<= shadow
->base
.last_level
; i
++) {
995 unsigned width
= u_minify(shadow
->base
.width0
, i
);
996 unsigned height
= u_minify(shadow
->base
.height0
, i
);
997 struct pipe_blit_info info
= {
999 .resource
= &shadow
->base
,
1009 .format
= shadow
->base
.format
,
1012 .resource
= &orig
->base
,
1013 .level
= view
->u
.tex
.first_level
+ i
,
1022 .format
= orig
->base
.format
,
1026 pctx
->blit(pctx
, &info
);
1029 shadow
->writes
= orig
->writes
;
1033 * Converts a 4-byte index buffer to 2 bytes.
1035 * Since GLES2 only has support for 1 and 2-byte indices, the hardware doesn't
1036 * include 4-byte index support, and we have to shrink it down.
1038 * There's no fallback support for when indices end up being larger than 2^16,
1039 * though it will at least assertion fail. Also, if the original index data
1040 * was in user memory, it would be nice to not have uploaded it to a VBO
1041 * before translating.
1043 struct pipe_resource
*
1044 vc4_get_shadow_index_buffer(struct pipe_context
*pctx
,
1045 const struct pipe_draw_info
*info
,
1048 uint32_t *shadow_offset
)
1050 struct vc4_context
*vc4
= vc4_context(pctx
);
1051 struct vc4_resource
*orig
= vc4_resource(info
->index
.resource
);
1052 perf_debug("Fallback conversion for %d uint indices\n", count
);
1055 struct pipe_resource
*shadow_rsc
= NULL
;
1056 u_upload_alloc(vc4
->uploader
, 0, count
* 2, 4,
1057 shadow_offset
, &shadow_rsc
, &data
);
1058 uint16_t *dst
= data
;
1060 struct pipe_transfer
*src_transfer
= NULL
;
1061 const uint32_t *src
;
1062 if (info
->has_user_indices
) {
1063 src
= info
->index
.user
;
1065 src
= pipe_buffer_map_range(pctx
, &orig
->base
,
1068 PIPE_TRANSFER_READ
, &src_transfer
);
1071 for (int i
= 0; i
< count
; i
++) {
1072 uint32_t src_index
= src
[i
];
1073 assert(src_index
<= 0xffff);
1078 pctx
->transfer_unmap(pctx
, src_transfer
);
1084 vc4_resource_screen_init(struct pipe_screen
*pscreen
)
1086 pscreen
->resource_create
= vc4_resource_create
;
1087 pscreen
->resource_from_handle
= vc4_resource_from_handle
;
1088 pscreen
->resource_destroy
= u_resource_destroy_vtbl
;
1089 pscreen
->resource_get_handle
= vc4_resource_get_handle
;
1090 pscreen
->resource_destroy
= vc4_resource_destroy
;
1094 vc4_resource_context_init(struct pipe_context
*pctx
)
1096 pctx
->transfer_map
= vc4_resource_transfer_map
;
1097 pctx
->transfer_flush_region
= u_default_transfer_flush_region
;
1098 pctx
->transfer_unmap
= vc4_resource_transfer_unmap
;
1099 pctx
->buffer_subdata
= u_default_buffer_subdata
;
1100 pctx
->texture_subdata
= u_default_texture_subdata
;
1101 pctx
->create_surface
= vc4_create_surface
;
1102 pctx
->surface_destroy
= vc4_surface_destroy
;
1103 pctx
->resource_copy_region
= util_resource_copy_region
;
1104 pctx
->blit
= vc4_blit
;
1105 pctx
->flush_resource
= vc4_flush_resource
;