1 /**************************************************************************
3 * Copyright 2009 VMware, Inc. All Rights Reserved.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial portions
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
18 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
20 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
21 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
22 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
23 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 **************************************************************************/
29 * Surface utility functions.
35 #include "pipe/p_defines.h"
36 #include "pipe/p_screen.h"
37 #include "pipe/p_state.h"
39 #include "util/u_format.h"
40 #include "util/u_inlines.h"
41 #include "util/u_rect.h"
42 #include "util/u_surface.h"
43 #include "util/u_pack_color.h"
47 * Initialize a pipe_surface object. 'view' is considered to have
48 * uninitialized contents.
51 u_surface_default_template(struct pipe_surface
*surf
,
52 const struct pipe_resource
*texture
)
54 memset(surf
, 0, sizeof(*surf
));
56 surf
->format
= texture
->format
;
61 * Copy 2D rect from one place to another.
62 * Position and sizes are in pixels.
63 * src_stride may be negative to do vertical flip of pixels from source.
66 util_copy_rect(ubyte
* dst
,
67 enum pipe_format format
,
79 int src_stride_pos
= src_stride
< 0 ? -src_stride
: src_stride
;
80 int blocksize
= util_format_get_blocksize(format
);
81 int blockwidth
= util_format_get_blockwidth(format
);
82 int blockheight
= util_format_get_blockheight(format
);
84 assert(blocksize
> 0);
85 assert(blockwidth
> 0);
86 assert(blockheight
> 0);
90 width
= (width
+ blockwidth
- 1)/blockwidth
;
91 height
= (height
+ blockheight
- 1)/blockheight
;
95 dst
+= dst_x
* blocksize
;
96 src
+= src_x
* blocksize
;
97 dst
+= dst_y
* dst_stride
;
98 src
+= src_y
* src_stride_pos
;
101 if (width
== dst_stride
&& width
== src_stride
)
102 memcpy(dst
, src
, height
* width
);
104 for (i
= 0; i
< height
; i
++) {
105 memcpy(dst
, src
, width
);
114 * Copy 3D box from one place to another.
115 * Position and sizes are in pixels.
118 util_copy_box(ubyte
* dst
,
119 enum pipe_format format
,
120 unsigned dst_stride
, unsigned dst_slice_stride
,
121 unsigned dst_x
, unsigned dst_y
, unsigned dst_z
,
122 unsigned width
, unsigned height
, unsigned depth
,
124 int src_stride
, unsigned src_slice_stride
,
125 unsigned src_x
, unsigned src_y
, unsigned src_z
)
128 dst
+= dst_z
* dst_slice_stride
;
129 src
+= src_z
* src_slice_stride
;
130 for (z
= 0; z
< depth
; ++z
) {
140 dst
+= dst_slice_stride
;
141 src
+= src_slice_stride
;
147 util_fill_rect(ubyte
* dst
,
148 enum pipe_format format
,
154 union util_color
*uc
)
156 const struct util_format_description
*desc
= util_format_description(format
);
159 int blocksize
= desc
->block
.bits
/ 8;
160 int blockwidth
= desc
->block
.width
;
161 int blockheight
= desc
->block
.height
;
163 assert(blocksize
> 0);
164 assert(blockwidth
> 0);
165 assert(blockheight
> 0);
168 dst_y
/= blockheight
;
169 width
= (width
+ blockwidth
- 1)/blockwidth
;
170 height
= (height
+ blockheight
- 1)/blockheight
;
172 dst
+= dst_x
* blocksize
;
173 dst
+= dst_y
* dst_stride
;
174 width_size
= width
* blocksize
;
178 if(dst_stride
== width_size
)
179 memset(dst
, uc
->ub
, height
* width_size
);
181 for (i
= 0; i
< height
; i
++) {
182 memset(dst
, uc
->ub
, width_size
);
188 for (i
= 0; i
< height
; i
++) {
189 uint16_t *row
= (uint16_t *)dst
;
190 for (j
= 0; j
< width
; j
++)
196 for (i
= 0; i
< height
; i
++) {
197 uint32_t *row
= (uint32_t *)dst
;
198 for (j
= 0; j
< width
; j
++)
204 for (i
= 0; i
< height
; i
++) {
206 for (j
= 0; j
< width
; j
++) {
207 memcpy(row
, uc
, blocksize
);
218 util_fill_box(ubyte
* dst
,
219 enum pipe_format format
,
221 unsigned layer_stride
,
228 union util_color
*uc
)
231 dst
+= z
* layer_stride
;
232 for (layer
= z
; layer
< depth
; layer
++) {
233 util_fill_rect(dst
, format
,
235 x
, y
, width
, height
, uc
);
241 /** Mipmap level size computation, with minimum block size */
242 static inline unsigned
243 minify(unsigned value
, unsigned levels
, unsigned blocksize
)
245 return MAX2(blocksize
, value
>> levels
);
250 * Fallback function for pipe->resource_copy_region().
251 * We support copying between different formats (including compressed/
252 * uncompressed) if the bytes per block or pixel matches. If copying
253 * compressed -> uncompressed, the dst region is reduced by the block
254 * width, height. If copying uncompressed -> compressed, the dest region
255 * is expanded by the block width, height. See GL_ARB_copy_image.
256 * Note: (X,Y)=(0,0) is always the upper-left corner.
259 util_resource_copy_region(struct pipe_context
*pipe
,
260 struct pipe_resource
*dst
,
262 unsigned dst_x
, unsigned dst_y
, unsigned dst_z
,
263 struct pipe_resource
*src
,
265 const struct pipe_box
*src_box_in
)
267 struct pipe_transfer
*src_trans
, *dst_trans
;
269 const uint8_t *src_map
;
270 MAYBE_UNUSED
enum pipe_format src_format
;
271 enum pipe_format dst_format
;
272 struct pipe_box src_box
, dst_box
;
273 unsigned src_bs
, dst_bs
, src_bw
, dst_bw
, src_bh
, dst_bh
;
279 assert((src
->target
== PIPE_BUFFER
&& dst
->target
== PIPE_BUFFER
) ||
280 (src
->target
!= PIPE_BUFFER
&& dst
->target
!= PIPE_BUFFER
));
282 src_format
= src
->format
;
283 dst_format
= dst
->format
;
286 src_box
= *src_box_in
;
292 dst_box
.width
= src_box
.width
;
293 dst_box
.height
= src_box
.height
;
294 dst_box
.depth
= src_box
.depth
;
296 src_bs
= util_format_get_blocksize(src_format
);
297 src_bw
= util_format_get_blockwidth(src_format
);
298 src_bh
= util_format_get_blockheight(src_format
);
299 dst_bs
= util_format_get_blocksize(dst_format
);
300 dst_bw
= util_format_get_blockwidth(dst_format
);
301 dst_bh
= util_format_get_blockheight(dst_format
);
303 /* Note: all box positions and sizes are in pixels */
304 if (src_bw
> 1 && dst_bw
== 1) {
305 /* Copy from compressed to uncompressed.
306 * Shrink dest box by the src block size.
308 dst_box
.width
/= src_bw
;
309 dst_box
.height
/= src_bh
;
311 else if (src_bw
== 1 && dst_bw
> 1) {
312 /* Copy from uncompressed to compressed.
313 * Expand dest box by the dest block size.
315 dst_box
.width
*= dst_bw
;
316 dst_box
.height
*= dst_bh
;
319 /* compressed -> compressed or uncompressed -> uncompressed copy */
320 assert(src_bw
== dst_bw
);
321 assert(src_bh
== dst_bh
);
324 assert(src_bs
== dst_bs
);
325 if (src_bs
!= dst_bs
) {
326 /* This can happen if we fail to do format checking before hand.
332 /* check that region boxes are block aligned */
333 assert(src_box
.x
% src_bw
== 0);
334 assert(src_box
.y
% src_bh
== 0);
335 assert(src_box
.width
% src_bw
== 0 || src_box
.width
< src_bw
);
336 assert(src_box
.height
% src_bh
== 0 || src_box
.height
< src_bh
);
337 assert(dst_box
.x
% dst_bw
== 0);
338 assert(dst_box
.y
% dst_bh
== 0);
339 assert(dst_box
.width
% dst_bw
== 0 || dst_box
.width
< dst_bw
);
340 assert(dst_box
.height
% dst_bh
== 0 || dst_box
.height
< src_bh
);
342 /* check that region boxes are not out of bounds */
343 assert(src_box
.x
+ src_box
.width
<=
344 minify(src
->width0
, src_level
, src_bw
));
345 assert(src_box
.y
+ src_box
.height
<=
346 minify(src
->height0
, src_level
, src_bh
));
347 assert(dst_box
.x
+ dst_box
.width
<=
348 minify(dst
->width0
, dst_level
, dst_bw
));
349 assert(dst_box
.y
+ dst_box
.height
<=
350 minify(dst
->height0
, dst_level
, dst_bh
));
352 /* check that total number of src, dest bytes match */
353 assert((src_box
.width
/ src_bw
) * (src_box
.height
/ src_bh
) * src_bs
==
354 (dst_box
.width
/ dst_bw
) * (dst_box
.height
/ dst_bh
) * dst_bs
);
356 src_map
= pipe
->transfer_map(pipe
,
360 &src_box
, &src_trans
);
366 dst_map
= pipe
->transfer_map(pipe
,
369 PIPE_TRANSFER_WRITE
|
370 PIPE_TRANSFER_DISCARD_RANGE
, &dst_box
,
377 if (dst
->target
== PIPE_BUFFER
&& src
->target
== PIPE_BUFFER
) {
378 assert(src_box
.height
== 1);
379 assert(src_box
.depth
== 1);
380 memcpy(dst_map
, src_map
, src_box
.width
);
382 util_copy_box(dst_map
,
384 dst_trans
->stride
, dst_trans
->layer_stride
,
386 src_box
.width
, src_box
.height
, src_box
.depth
,
388 src_trans
->stride
, src_trans
->layer_stride
,
392 pipe
->transfer_unmap(pipe
, dst_trans
);
394 pipe
->transfer_unmap(pipe
, src_trans
);
401 #define UBYTE_TO_USHORT(B) ((B) | ((B) << 8))
405 * Fallback for pipe->clear_render_target() function.
406 * XXX this looks too hackish to be really useful.
407 * cpp > 4 looks like a gross hack at best...
408 * Plus can't use these transfer fallbacks when clearing
409 * multisampled surfaces for instance.
410 * Clears all bound layers.
413 util_clear_render_target(struct pipe_context
*pipe
,
414 struct pipe_surface
*dst
,
415 const union pipe_color_union
*color
,
416 unsigned dstx
, unsigned dsty
,
417 unsigned width
, unsigned height
)
419 struct pipe_transfer
*dst_trans
;
424 assert(dst
->texture
);
428 if (dst
->texture
->target
== PIPE_BUFFER
) {
430 * The fill naturally works on the surface format, however
431 * the transfer uses resource format which is just bytes for buffers.
434 unsigned pixstride
= util_format_get_blocksize(dst
->format
);
435 dx
= (dst
->u
.buf
.first_element
+ dstx
) * pixstride
;
436 w
= width
* pixstride
;
438 dst_map
= pipe_transfer_map(pipe
,
446 max_layer
= dst
->u
.tex
.last_layer
- dst
->u
.tex
.first_layer
;
447 dst_map
= pipe_transfer_map_3d(pipe
,
451 dstx
, dsty
, dst
->u
.tex
.first_layer
,
452 width
, height
, max_layer
+ 1, &dst_trans
);
458 enum pipe_format format
= dst
->format
;
459 assert(dst_trans
->stride
> 0);
461 if (util_format_is_pure_integer(format
)) {
463 * We expect int/uint clear values here, though some APIs
464 * might disagree (but in any case util_pack_color()
465 * couldn't handle it)...
467 if (util_format_is_pure_sint(format
)) {
468 util_format_write_4i(format
, color
->i
, 0, &uc
, 0, 0, 0, 1, 1);
471 assert(util_format_is_pure_uint(format
));
472 util_format_write_4ui(format
, color
->ui
, 0, &uc
, 0, 0, 0, 1, 1);
476 util_pack_color(color
->f
, format
, &uc
);
479 util_fill_box(dst_map
, dst
->format
,
480 dst_trans
->stride
, dst_trans
->layer_stride
,
481 0, 0, 0, width
, height
, max_layer
+ 1, &uc
);
483 pipe
->transfer_unmap(pipe
, dst_trans
);
488 * Fallback for pipe->clear_stencil() function.
489 * sw fallback doesn't look terribly useful here.
490 * Plus can't use these transfer fallbacks when clearing
491 * multisampled surfaces for instance.
492 * Clears all bound layers.
495 util_clear_depth_stencil(struct pipe_context
*pipe
,
496 struct pipe_surface
*dst
,
497 unsigned clear_flags
,
500 unsigned dstx
, unsigned dsty
,
501 unsigned width
, unsigned height
)
503 enum pipe_format format
= dst
->format
;
504 struct pipe_transfer
*dst_trans
;
506 boolean need_rmw
= FALSE
;
507 unsigned max_layer
, layer
;
509 if ((clear_flags
& PIPE_CLEAR_DEPTHSTENCIL
) &&
510 ((clear_flags
& PIPE_CLEAR_DEPTHSTENCIL
) != PIPE_CLEAR_DEPTHSTENCIL
) &&
511 util_format_is_depth_and_stencil(format
))
514 assert(dst
->texture
);
518 max_layer
= dst
->u
.tex
.last_layer
- dst
->u
.tex
.first_layer
;
519 dst_map
= pipe_transfer_map_3d(pipe
,
522 (need_rmw
? PIPE_TRANSFER_READ_WRITE
:
523 PIPE_TRANSFER_WRITE
),
524 dstx
, dsty
, dst
->u
.tex
.first_layer
,
525 width
, height
, max_layer
+ 1, &dst_trans
);
529 unsigned dst_stride
= dst_trans
->stride
;
530 uint64_t zstencil
= util_pack64_z_stencil(format
, depth
, stencil
);
531 ubyte
*dst_layer
= dst_map
;
533 assert(dst_trans
->stride
> 0);
535 for (layer
= 0; layer
<= max_layer
; layer
++) {
538 switch (util_format_get_blocksize(format
)) {
540 assert(format
== PIPE_FORMAT_S8_UINT
);
541 if(dst_stride
== width
)
542 memset(dst_map
, (uint8_t) zstencil
, height
* width
);
544 for (i
= 0; i
< height
; i
++) {
545 memset(dst_map
, (uint8_t) zstencil
, width
);
546 dst_map
+= dst_stride
;
551 assert(format
== PIPE_FORMAT_Z16_UNORM
);
552 for (i
= 0; i
< height
; i
++) {
553 uint16_t *row
= (uint16_t *)dst_map
;
554 for (j
= 0; j
< width
; j
++)
555 *row
++ = (uint16_t) zstencil
;
556 dst_map
+= dst_stride
;
561 for (i
= 0; i
< height
; i
++) {
562 uint32_t *row
= (uint32_t *)dst_map
;
563 for (j
= 0; j
< width
; j
++)
564 *row
++ = (uint32_t) zstencil
;
565 dst_map
+= dst_stride
;
570 if (format
== PIPE_FORMAT_Z24_UNORM_S8_UINT
)
571 dst_mask
= 0x00ffffff;
573 assert(format
== PIPE_FORMAT_S8_UINT_Z24_UNORM
);
574 dst_mask
= 0xffffff00;
576 if (clear_flags
& PIPE_CLEAR_DEPTH
)
577 dst_mask
= ~dst_mask
;
578 for (i
= 0; i
< height
; i
++) {
579 uint32_t *row
= (uint32_t *)dst_map
;
580 for (j
= 0; j
< width
; j
++) {
581 uint32_t tmp
= *row
& dst_mask
;
582 *row
++ = tmp
| ((uint32_t) zstencil
& ~dst_mask
);
584 dst_map
+= dst_stride
;
590 for (i
= 0; i
< height
; i
++) {
591 uint64_t *row
= (uint64_t *)dst_map
;
592 for (j
= 0; j
< width
; j
++)
594 dst_map
+= dst_stride
;
600 if (clear_flags
& PIPE_CLEAR_DEPTH
)
601 src_mask
= 0x00000000ffffffffull
;
603 src_mask
= 0x000000ff00000000ull
;
605 for (i
= 0; i
< height
; i
++) {
606 uint64_t *row
= (uint64_t *)dst_map
;
607 for (j
= 0; j
< width
; j
++) {
608 uint64_t tmp
= *row
& ~src_mask
;
609 *row
++ = tmp
| (zstencil
& src_mask
);
611 dst_map
+= dst_stride
;
619 dst_layer
+= dst_trans
->layer_stride
;
622 pipe
->transfer_unmap(pipe
, dst_trans
);
627 /* Return if the box is totally inside the resource.
630 is_box_inside_resource(const struct pipe_resource
*res
,
631 const struct pipe_box
*box
,
634 unsigned width
= 1, height
= 1, depth
= 1;
636 switch (res
->target
) {
642 case PIPE_TEXTURE_1D
:
643 width
= u_minify(res
->width0
, level
);
647 case PIPE_TEXTURE_2D
:
648 case PIPE_TEXTURE_RECT
:
649 width
= u_minify(res
->width0
, level
);
650 height
= u_minify(res
->height0
, level
);
653 case PIPE_TEXTURE_3D
:
654 width
= u_minify(res
->width0
, level
);
655 height
= u_minify(res
->height0
, level
);
656 depth
= u_minify(res
->depth0
, level
);
658 case PIPE_TEXTURE_CUBE
:
659 width
= u_minify(res
->width0
, level
);
660 height
= u_minify(res
->height0
, level
);
663 case PIPE_TEXTURE_1D_ARRAY
:
664 width
= u_minify(res
->width0
, level
);
666 depth
= res
->array_size
;
668 case PIPE_TEXTURE_2D_ARRAY
:
669 width
= u_minify(res
->width0
, level
);
670 height
= u_minify(res
->height0
, level
);
671 depth
= res
->array_size
;
673 case PIPE_TEXTURE_CUBE_ARRAY
:
674 width
= u_minify(res
->width0
, level
);
675 height
= u_minify(res
->height0
, level
);
676 depth
= res
->array_size
;
677 assert(res
->array_size
% 6 == 0);
679 case PIPE_MAX_TEXTURE_TYPES
:
683 return box
->x
>= 0 &&
684 box
->x
+ box
->width
<= (int) width
&&
686 box
->y
+ box
->height
<= (int) height
&&
688 box
->z
+ box
->depth
<= (int) depth
;
692 get_sample_count(const struct pipe_resource
*res
)
694 return res
->nr_samples
? res
->nr_samples
: 1;
698 * Try to do a blit using resource_copy_region. The function calls
699 * resource_copy_region if the blit description is compatible with it.
701 * It returns TRUE if the blit was done using resource_copy_region.
703 * It returns FALSE otherwise and the caller must fall back to a more generic
704 * codepath for the blit operation. (e.g. by using u_blitter)
707 util_try_blit_via_copy_region(struct pipe_context
*ctx
,
708 const struct pipe_blit_info
*blit
)
710 unsigned mask
= util_format_get_mask(blit
->dst
.format
);
712 /* No format conversions. */
713 if (blit
->src
.resource
->format
!= blit
->src
.format
||
714 blit
->dst
.resource
->format
!= blit
->dst
.format
||
715 !util_is_format_compatible(
716 util_format_description(blit
->src
.resource
->format
),
717 util_format_description(blit
->dst
.resource
->format
))) {
721 /* No masks, no filtering, no scissor. */
722 if ((blit
->mask
& mask
) != mask
||
723 blit
->filter
!= PIPE_TEX_FILTER_NEAREST
||
724 blit
->scissor_enable
) {
729 if (blit
->src
.box
.width
< 0 ||
730 blit
->src
.box
.height
< 0 ||
731 blit
->src
.box
.depth
< 0) {
736 if (blit
->src
.box
.width
!= blit
->dst
.box
.width
||
737 blit
->src
.box
.height
!= blit
->dst
.box
.height
||
738 blit
->src
.box
.depth
!= blit
->dst
.box
.depth
) {
742 /* No out-of-bounds access. */
743 if (!is_box_inside_resource(blit
->src
.resource
, &blit
->src
.box
,
745 !is_box_inside_resource(blit
->dst
.resource
, &blit
->dst
.box
,
750 /* Sample counts must match. */
751 if (get_sample_count(blit
->src
.resource
) !=
752 get_sample_count(blit
->dst
.resource
)) {
756 if (blit
->alpha_blend
)
759 ctx
->resource_copy_region(ctx
, blit
->dst
.resource
, blit
->dst
.level
,
760 blit
->dst
.box
.x
, blit
->dst
.box
.y
, blit
->dst
.box
.z
,
761 blit
->src
.resource
, blit
->src
.level
,