util: Move gallium's PIPE_FORMAT utils to /util/format/
[mesa.git] / src / gallium / auxiliary / util / u_surface.c
1 /**************************************************************************
2 *
3 * Copyright 2009 VMware, Inc. All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial portions
15 * of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
18 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
20 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
21 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
22 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
23 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 **************************************************************************/
26
27 /**
28 * @file
29 * Surface utility functions.
30 *
31 * @author Brian Paul
32 */
33
34
35 #include "pipe/p_defines.h"
36 #include "pipe/p_screen.h"
37 #include "pipe/p_state.h"
38
39 #include "util/format/u_format.h"
40 #include "util/u_inlines.h"
41 #include "util/u_rect.h"
42 #include "util/u_surface.h"
43 #include "util/u_pack_color.h"
44
45
46 /**
47 * Initialize a pipe_surface object. 'view' is considered to have
48 * uninitialized contents.
49 */
50 void
51 u_surface_default_template(struct pipe_surface *surf,
52 const struct pipe_resource *texture)
53 {
54 memset(surf, 0, sizeof(*surf));
55
56 surf->format = texture->format;
57 }
58
59
60 /**
61 * Copy 3D box from one place to another.
62 * Position and sizes are in pixels.
63 */
64 void
65 util_copy_box(ubyte * dst,
66 enum pipe_format format,
67 unsigned dst_stride, unsigned dst_slice_stride,
68 unsigned dst_x, unsigned dst_y, unsigned dst_z,
69 unsigned width, unsigned height, unsigned depth,
70 const ubyte * src,
71 int src_stride, unsigned src_slice_stride,
72 unsigned src_x, unsigned src_y, unsigned src_z)
73 {
74 unsigned z;
75 dst += dst_z * dst_slice_stride;
76 src += src_z * src_slice_stride;
77 for (z = 0; z < depth; ++z) {
78 util_copy_rect(dst,
79 format,
80 dst_stride,
81 dst_x, dst_y,
82 width, height,
83 src,
84 src_stride,
85 src_x, src_y);
86
87 dst += dst_slice_stride;
88 src += src_slice_stride;
89 }
90 }
91
92
93 void
94 util_fill_rect(ubyte * dst,
95 enum pipe_format format,
96 unsigned dst_stride,
97 unsigned dst_x,
98 unsigned dst_y,
99 unsigned width,
100 unsigned height,
101 union util_color *uc)
102 {
103 const struct util_format_description *desc = util_format_description(format);
104 unsigned i, j;
105 unsigned width_size;
106 int blocksize = desc->block.bits / 8;
107 int blockwidth = desc->block.width;
108 int blockheight = desc->block.height;
109
110 assert(blocksize > 0);
111 assert(blockwidth > 0);
112 assert(blockheight > 0);
113
114 dst_x /= blockwidth;
115 dst_y /= blockheight;
116 width = (width + blockwidth - 1)/blockwidth;
117 height = (height + blockheight - 1)/blockheight;
118
119 dst += dst_x * blocksize;
120 dst += dst_y * dst_stride;
121 width_size = width * blocksize;
122
123 switch (blocksize) {
124 case 1:
125 if(dst_stride == width_size)
126 memset(dst, uc->ub, height * width_size);
127 else {
128 for (i = 0; i < height; i++) {
129 memset(dst, uc->ub, width_size);
130 dst += dst_stride;
131 }
132 }
133 break;
134 case 2:
135 for (i = 0; i < height; i++) {
136 uint16_t *row = (uint16_t *)dst;
137 for (j = 0; j < width; j++)
138 *row++ = uc->us;
139 dst += dst_stride;
140 }
141 break;
142 case 4:
143 for (i = 0; i < height; i++) {
144 uint32_t *row = (uint32_t *)dst;
145 for (j = 0; j < width; j++)
146 *row++ = uc->ui[0];
147 dst += dst_stride;
148 }
149 break;
150 default:
151 for (i = 0; i < height; i++) {
152 ubyte *row = dst;
153 for (j = 0; j < width; j++) {
154 memcpy(row, uc, blocksize);
155 row += blocksize;
156 }
157 dst += dst_stride;
158 }
159 break;
160 }
161 }
162
163
164 void
165 util_fill_box(ubyte * dst,
166 enum pipe_format format,
167 unsigned stride,
168 unsigned layer_stride,
169 unsigned x,
170 unsigned y,
171 unsigned z,
172 unsigned width,
173 unsigned height,
174 unsigned depth,
175 union util_color *uc)
176 {
177 unsigned layer;
178 dst += z * layer_stride;
179 for (layer = z; layer < depth; layer++) {
180 util_fill_rect(dst, format,
181 stride,
182 x, y, width, height, uc);
183 dst += layer_stride;
184 }
185 }
186
187
188 /**
189 * Fallback function for pipe->resource_copy_region().
190 * We support copying between different formats (including compressed/
191 * uncompressed) if the bytes per block or pixel matches. If copying
192 * compressed -> uncompressed, the dst region is reduced by the block
193 * width, height. If copying uncompressed -> compressed, the dest region
194 * is expanded by the block width, height. See GL_ARB_copy_image.
195 * Note: (X,Y)=(0,0) is always the upper-left corner.
196 */
197 void
198 util_resource_copy_region(struct pipe_context *pipe,
199 struct pipe_resource *dst,
200 unsigned dst_level,
201 unsigned dst_x, unsigned dst_y, unsigned dst_z,
202 struct pipe_resource *src,
203 unsigned src_level,
204 const struct pipe_box *src_box_in)
205 {
206 struct pipe_transfer *src_trans, *dst_trans;
207 uint8_t *dst_map;
208 const uint8_t *src_map;
209 enum pipe_format src_format;
210 enum pipe_format dst_format;
211 struct pipe_box src_box, dst_box;
212 unsigned src_bs, dst_bs, src_bw, dst_bw, src_bh, dst_bh;
213
214 assert(src && dst);
215 if (!src || !dst)
216 return;
217
218 assert((src->target == PIPE_BUFFER && dst->target == PIPE_BUFFER) ||
219 (src->target != PIPE_BUFFER && dst->target != PIPE_BUFFER));
220
221 src_format = src->format;
222 dst_format = dst->format;
223
224 /* init src box */
225 src_box = *src_box_in;
226
227 /* init dst box */
228 dst_box.x = dst_x;
229 dst_box.y = dst_y;
230 dst_box.z = dst_z;
231 dst_box.width = src_box.width;
232 dst_box.height = src_box.height;
233 dst_box.depth = src_box.depth;
234
235 src_bs = util_format_get_blocksize(src_format);
236 src_bw = util_format_get_blockwidth(src_format);
237 src_bh = util_format_get_blockheight(src_format);
238 dst_bs = util_format_get_blocksize(dst_format);
239 dst_bw = util_format_get_blockwidth(dst_format);
240 dst_bh = util_format_get_blockheight(dst_format);
241
242 /* Note: all box positions and sizes are in pixels */
243 if (src_bw > 1 && dst_bw == 1) {
244 /* Copy from compressed to uncompressed.
245 * Shrink dest box by the src block size.
246 */
247 dst_box.width /= src_bw;
248 dst_box.height /= src_bh;
249 }
250 else if (src_bw == 1 && dst_bw > 1) {
251 /* Copy from uncompressed to compressed.
252 * Expand dest box by the dest block size.
253 */
254 dst_box.width *= dst_bw;
255 dst_box.height *= dst_bh;
256 }
257 else {
258 /* compressed -> compressed or uncompressed -> uncompressed copy */
259 assert(src_bw == dst_bw);
260 assert(src_bh == dst_bh);
261 }
262
263 assert(src_bs == dst_bs);
264 if (src_bs != dst_bs) {
265 /* This can happen if we fail to do format checking before hand.
266 * Don't crash below.
267 */
268 return;
269 }
270
271 /* check that region boxes are block aligned */
272 assert(src_box.x % src_bw == 0);
273 assert(src_box.y % src_bh == 0);
274 assert(dst_box.x % dst_bw == 0);
275 assert(dst_box.y % dst_bh == 0);
276
277 /* check that region boxes are not out of bounds */
278 assert(src_box.x + src_box.width <= (int)u_minify(src->width0, src_level));
279 assert(src_box.y + src_box.height <= (int)u_minify(src->height0, src_level));
280 assert(dst_box.x + dst_box.width <= (int)u_minify(dst->width0, dst_level));
281 assert(dst_box.y + dst_box.height <= (int)u_minify(dst->height0, dst_level));
282
283 /* check that total number of src, dest bytes match */
284 assert((src_box.width / src_bw) * (src_box.height / src_bh) * src_bs ==
285 (dst_box.width / dst_bw) * (dst_box.height / dst_bh) * dst_bs);
286
287 src_map = pipe->transfer_map(pipe,
288 src,
289 src_level,
290 PIPE_TRANSFER_READ,
291 &src_box, &src_trans);
292 assert(src_map);
293 if (!src_map) {
294 goto no_src_map;
295 }
296
297 dst_map = pipe->transfer_map(pipe,
298 dst,
299 dst_level,
300 PIPE_TRANSFER_WRITE |
301 PIPE_TRANSFER_DISCARD_RANGE, &dst_box,
302 &dst_trans);
303 assert(dst_map);
304 if (!dst_map) {
305 goto no_dst_map;
306 }
307
308 if (dst->target == PIPE_BUFFER && src->target == PIPE_BUFFER) {
309 assert(src_box.height == 1);
310 assert(src_box.depth == 1);
311 memcpy(dst_map, src_map, src_box.width);
312 } else {
313 util_copy_box(dst_map,
314 src_format,
315 dst_trans->stride, dst_trans->layer_stride,
316 0, 0, 0,
317 src_box.width, src_box.height, src_box.depth,
318 src_map,
319 src_trans->stride, src_trans->layer_stride,
320 0, 0, 0);
321 }
322
323 pipe->transfer_unmap(pipe, dst_trans);
324 no_dst_map:
325 pipe->transfer_unmap(pipe, src_trans);
326 no_src_map:
327 ;
328 }
329
330 static void
331 util_clear_color_texture_helper(struct pipe_transfer *dst_trans,
332 ubyte *dst_map,
333 enum pipe_format format,
334 const union pipe_color_union *color,
335 unsigned width, unsigned height, unsigned depth)
336 {
337 union util_color uc;
338
339 assert(dst_trans->stride > 0);
340
341 if (util_format_is_pure_integer(format)) {
342 /*
343 * We expect int/uint clear values here, though some APIs
344 * might disagree (but in any case util_pack_color()
345 * couldn't handle it)...
346 */
347 if (util_format_is_pure_sint(format)) {
348 util_format_write_4i(format, color->i, 0, &uc, 0, 0, 0, 1, 1);
349 } else {
350 assert(util_format_is_pure_uint(format));
351 util_format_write_4ui(format, color->ui, 0, &uc, 0, 0, 0, 1, 1);
352 }
353 } else {
354 util_pack_color(color->f, format, &uc);
355 }
356
357 util_fill_box(dst_map, format,
358 dst_trans->stride, dst_trans->layer_stride,
359 0, 0, 0, width, height, depth, &uc);
360 }
361
362 static void
363 util_clear_color_texture(struct pipe_context *pipe,
364 struct pipe_resource *texture,
365 enum pipe_format format,
366 const union pipe_color_union *color,
367 unsigned level,
368 unsigned dstx, unsigned dsty, unsigned dstz,
369 unsigned width, unsigned height, unsigned depth)
370 {
371 struct pipe_transfer *dst_trans;
372 ubyte *dst_map;
373
374 dst_map = pipe_transfer_map_3d(pipe,
375 texture,
376 level,
377 PIPE_TRANSFER_WRITE,
378 dstx, dsty, dstz,
379 width, height, depth,
380 &dst_trans);
381 if (!dst_map)
382 return;
383
384 if (dst_trans->stride > 0) {
385 util_clear_color_texture_helper(dst_trans, dst_map, format, color,
386 width, height, depth);
387 }
388 pipe->transfer_unmap(pipe, dst_trans);
389 }
390
391
392 #define UBYTE_TO_USHORT(B) ((B) | ((B) << 8))
393
394
395 /**
396 * Fallback for pipe->clear_render_target() function.
397 * XXX this looks too hackish to be really useful.
398 * cpp > 4 looks like a gross hack at best...
399 * Plus can't use these transfer fallbacks when clearing
400 * multisampled surfaces for instance.
401 * Clears all bound layers.
402 */
403 void
404 util_clear_render_target(struct pipe_context *pipe,
405 struct pipe_surface *dst,
406 const union pipe_color_union *color,
407 unsigned dstx, unsigned dsty,
408 unsigned width, unsigned height)
409 {
410 struct pipe_transfer *dst_trans;
411 ubyte *dst_map;
412
413 assert(dst->texture);
414 if (!dst->texture)
415 return;
416
417 if (dst->texture->target == PIPE_BUFFER) {
418 /*
419 * The fill naturally works on the surface format, however
420 * the transfer uses resource format which is just bytes for buffers.
421 */
422 unsigned dx, w;
423 unsigned pixstride = util_format_get_blocksize(dst->format);
424 dx = (dst->u.buf.first_element + dstx) * pixstride;
425 w = width * pixstride;
426 dst_map = pipe_transfer_map(pipe,
427 dst->texture,
428 0, 0,
429 PIPE_TRANSFER_WRITE,
430 dx, 0, w, 1,
431 &dst_trans);
432 if (dst_map) {
433 util_clear_color_texture_helper(dst_trans, dst_map, dst->format,
434 color, width, height, 1);
435 pipe->transfer_unmap(pipe, dst_trans);
436 }
437 }
438 else {
439 unsigned depth = dst->u.tex.last_layer - dst->u.tex.first_layer + 1;
440 util_clear_color_texture(pipe, dst->texture, dst->format, color,
441 dst->u.tex.level, dstx, dsty,
442 dst->u.tex.first_layer, width, height, depth);
443 }
444 }
445
446 static void
447 util_clear_depth_stencil_texture(struct pipe_context *pipe,
448 struct pipe_resource *texture,
449 enum pipe_format format,
450 unsigned clear_flags,
451 uint64_t zstencil, unsigned level,
452 unsigned dstx, unsigned dsty, unsigned dstz,
453 unsigned width, unsigned height, unsigned depth)
454 {
455 struct pipe_transfer *dst_trans;
456 ubyte *dst_map;
457 boolean need_rmw = FALSE;
458 unsigned dst_stride;
459 ubyte *dst_layer;
460 unsigned i, j, layer;
461
462 if ((clear_flags & PIPE_CLEAR_DEPTHSTENCIL) &&
463 ((clear_flags & PIPE_CLEAR_DEPTHSTENCIL) != PIPE_CLEAR_DEPTHSTENCIL) &&
464 util_format_is_depth_and_stencil(format))
465 need_rmw = TRUE;
466
467 dst_map = pipe_transfer_map_3d(pipe,
468 texture,
469 level,
470 (need_rmw ? PIPE_TRANSFER_READ_WRITE :
471 PIPE_TRANSFER_WRITE),
472 dstx, dsty, dstz,
473 width, height, depth, &dst_trans);
474 assert(dst_map);
475 if (!dst_map)
476 return;
477
478 dst_stride = dst_trans->stride;
479 dst_layer = dst_map;
480 assert(dst_trans->stride > 0);
481
482 for (layer = 0; layer < depth; layer++) {
483 dst_map = dst_layer;
484
485 switch (util_format_get_blocksize(format)) {
486 case 1:
487 assert(format == PIPE_FORMAT_S8_UINT);
488 if(dst_stride == width)
489 memset(dst_map, (uint8_t) zstencil, height * width);
490 else {
491 for (i = 0; i < height; i++) {
492 memset(dst_map, (uint8_t) zstencil, width);
493 dst_map += dst_stride;
494 }
495 }
496 break;
497 case 2:
498 assert(format == PIPE_FORMAT_Z16_UNORM);
499 for (i = 0; i < height; i++) {
500 uint16_t *row = (uint16_t *)dst_map;
501 for (j = 0; j < width; j++)
502 *row++ = (uint16_t) zstencil;
503 dst_map += dst_stride;
504 }
505 break;
506 case 4:
507 if (!need_rmw) {
508 for (i = 0; i < height; i++) {
509 uint32_t *row = (uint32_t *)dst_map;
510 for (j = 0; j < width; j++)
511 *row++ = (uint32_t) zstencil;
512 dst_map += dst_stride;
513 }
514 }
515 else {
516 uint32_t dst_mask;
517 if (format == PIPE_FORMAT_Z24_UNORM_S8_UINT)
518 dst_mask = 0x00ffffff;
519 else {
520 assert(format == PIPE_FORMAT_S8_UINT_Z24_UNORM);
521 dst_mask = 0xffffff00;
522 }
523 if (clear_flags & PIPE_CLEAR_DEPTH)
524 dst_mask = ~dst_mask;
525 for (i = 0; i < height; i++) {
526 uint32_t *row = (uint32_t *)dst_map;
527 for (j = 0; j < width; j++) {
528 uint32_t tmp = *row & dst_mask;
529 *row++ = tmp | ((uint32_t) zstencil & ~dst_mask);
530 }
531 dst_map += dst_stride;
532 }
533 }
534 break;
535 case 8:
536 if (!need_rmw) {
537 for (i = 0; i < height; i++) {
538 uint64_t *row = (uint64_t *)dst_map;
539 for (j = 0; j < width; j++)
540 *row++ = zstencil;
541 dst_map += dst_stride;
542 }
543 }
544 else {
545 uint64_t src_mask;
546
547 if (clear_flags & PIPE_CLEAR_DEPTH)
548 src_mask = 0x00000000ffffffffull;
549 else
550 src_mask = 0x000000ff00000000ull;
551
552 for (i = 0; i < height; i++) {
553 uint64_t *row = (uint64_t *)dst_map;
554 for (j = 0; j < width; j++) {
555 uint64_t tmp = *row & ~src_mask;
556 *row++ = tmp | (zstencil & src_mask);
557 }
558 dst_map += dst_stride;
559 }
560 }
561 break;
562 default:
563 assert(0);
564 break;
565 }
566 dst_layer += dst_trans->layer_stride;
567 }
568
569 pipe->transfer_unmap(pipe, dst_trans);
570 }
571
572
573 void
574 util_clear_texture(struct pipe_context *pipe,
575 struct pipe_resource *tex,
576 unsigned level,
577 const struct pipe_box *box,
578 const void *data)
579 {
580 const struct util_format_description *desc =
581 util_format_description(tex->format);
582
583 if (level > tex->last_level)
584 return;
585
586 if (util_format_is_depth_or_stencil(tex->format)) {
587 unsigned clear = 0;
588 float depth = 0.0f;
589 uint8_t stencil = 0;
590 uint64_t zstencil;
591
592 if (util_format_has_depth(desc)) {
593 clear |= PIPE_CLEAR_DEPTH;
594 desc->unpack_z_float(&depth, 0, data, 0, 1, 1);
595 }
596
597 if (util_format_has_stencil(desc)) {
598 clear |= PIPE_CLEAR_STENCIL;
599 desc->unpack_s_8uint(&stencil, 0, data, 0, 1, 1);
600 }
601
602 zstencil = util_pack64_z_stencil(tex->format, depth, stencil);
603
604 util_clear_depth_stencil_texture(pipe, tex, tex->format, clear, zstencil,
605 level, box->x, box->y, box->z,
606 box->width, box->height, box->depth);
607 } else {
608 union pipe_color_union color;
609 if (util_format_is_pure_uint(tex->format))
610 desc->unpack_rgba_uint(color.ui, 0, data, 0, 1, 1);
611 else if (util_format_is_pure_sint(tex->format))
612 desc->unpack_rgba_sint(color.i, 0, data, 0, 1, 1);
613 else
614 desc->unpack_rgba_float(color.f, 0, data, 0, 1, 1);
615
616 util_clear_color_texture(pipe, tex, tex->format, &color, level,
617 box->x, box->y, box->z,
618 box->width, box->height, box->depth);
619 }
620 }
621
622
623 /**
624 * Fallback for pipe->clear_stencil() function.
625 * sw fallback doesn't look terribly useful here.
626 * Plus can't use these transfer fallbacks when clearing
627 * multisampled surfaces for instance.
628 * Clears all bound layers.
629 */
630 void
631 util_clear_depth_stencil(struct pipe_context *pipe,
632 struct pipe_surface *dst,
633 unsigned clear_flags,
634 double depth,
635 unsigned stencil,
636 unsigned dstx, unsigned dsty,
637 unsigned width, unsigned height)
638 {
639 uint64_t zstencil;
640 unsigned max_layer;
641
642 assert(dst->texture);
643 if (!dst->texture)
644 return;
645
646 zstencil = util_pack64_z_stencil(dst->format, depth, stencil);
647 max_layer = dst->u.tex.last_layer - dst->u.tex.first_layer;
648 util_clear_depth_stencil_texture(pipe, dst->texture, dst->format,
649 clear_flags, zstencil, dst->u.tex.level,
650 dstx, dsty, dst->u.tex.first_layer,
651 width, height, max_layer + 1);
652 }
653
654
655 /* Return if the box is totally inside the resource.
656 */
657 static boolean
658 is_box_inside_resource(const struct pipe_resource *res,
659 const struct pipe_box *box,
660 unsigned level)
661 {
662 unsigned width = 1, height = 1, depth = 1;
663
664 switch (res->target) {
665 case PIPE_BUFFER:
666 width = res->width0;
667 height = 1;
668 depth = 1;
669 break;
670 case PIPE_TEXTURE_1D:
671 width = u_minify(res->width0, level);
672 height = 1;
673 depth = 1;
674 break;
675 case PIPE_TEXTURE_2D:
676 case PIPE_TEXTURE_RECT:
677 width = u_minify(res->width0, level);
678 height = u_minify(res->height0, level);
679 depth = 1;
680 break;
681 case PIPE_TEXTURE_3D:
682 width = u_minify(res->width0, level);
683 height = u_minify(res->height0, level);
684 depth = u_minify(res->depth0, level);
685 break;
686 case PIPE_TEXTURE_CUBE:
687 width = u_minify(res->width0, level);
688 height = u_minify(res->height0, level);
689 depth = 6;
690 break;
691 case PIPE_TEXTURE_1D_ARRAY:
692 width = u_minify(res->width0, level);
693 height = 1;
694 depth = res->array_size;
695 break;
696 case PIPE_TEXTURE_2D_ARRAY:
697 width = u_minify(res->width0, level);
698 height = u_minify(res->height0, level);
699 depth = res->array_size;
700 break;
701 case PIPE_TEXTURE_CUBE_ARRAY:
702 width = u_minify(res->width0, level);
703 height = u_minify(res->height0, level);
704 depth = res->array_size;
705 assert(res->array_size % 6 == 0);
706 break;
707 case PIPE_MAX_TEXTURE_TYPES:
708 break;
709 }
710
711 return box->x >= 0 &&
712 box->x + box->width <= (int) width &&
713 box->y >= 0 &&
714 box->y + box->height <= (int) height &&
715 box->z >= 0 &&
716 box->z + box->depth <= (int) depth;
717 }
718
719 static unsigned
720 get_sample_count(const struct pipe_resource *res)
721 {
722 return res->nr_samples ? res->nr_samples : 1;
723 }
724
725
726 /**
727 * Check if a blit() command can be implemented with a resource_copy_region().
728 * If tight_format_check is true, only allow the resource_copy_region() if
729 * the blit src/dst formats are identical, ignoring the resource formats.
730 * Otherwise, check for format casting and compatibility.
731 */
732 boolean
733 util_can_blit_via_copy_region(const struct pipe_blit_info *blit,
734 boolean tight_format_check)
735 {
736 const struct util_format_description *src_desc, *dst_desc;
737
738 src_desc = util_format_description(blit->src.resource->format);
739 dst_desc = util_format_description(blit->dst.resource->format);
740
741 if (tight_format_check) {
742 /* no format conversions allowed */
743 if (blit->src.format != blit->dst.format) {
744 return FALSE;
745 }
746 }
747 else {
748 /* do loose format compatibility checking */
749 if (blit->src.resource->format != blit->src.format ||
750 blit->dst.resource->format != blit->dst.format ||
751 !util_is_format_compatible(src_desc, dst_desc)) {
752 return FALSE;
753 }
754 }
755
756 unsigned mask = util_format_get_mask(blit->dst.format);
757
758 /* No masks, no filtering, no scissor, no blending */
759 if ((blit->mask & mask) != mask ||
760 blit->filter != PIPE_TEX_FILTER_NEAREST ||
761 blit->scissor_enable ||
762 blit->num_window_rectangles > 0 ||
763 blit->alpha_blend) {
764 return FALSE;
765 }
766
767 /* Only the src box can have negative dims for flipping */
768 assert(blit->dst.box.width >= 1);
769 assert(blit->dst.box.height >= 1);
770 assert(blit->dst.box.depth >= 1);
771
772 /* No scaling or flipping */
773 if (blit->src.box.width != blit->dst.box.width ||
774 blit->src.box.height != blit->dst.box.height ||
775 blit->src.box.depth != blit->dst.box.depth) {
776 return FALSE;
777 }
778
779 /* No out-of-bounds access. */
780 if (!is_box_inside_resource(blit->src.resource, &blit->src.box,
781 blit->src.level) ||
782 !is_box_inside_resource(blit->dst.resource, &blit->dst.box,
783 blit->dst.level)) {
784 return FALSE;
785 }
786
787 /* Sample counts must match. */
788 if (get_sample_count(blit->src.resource) !=
789 get_sample_count(blit->dst.resource)) {
790 return FALSE;
791 }
792
793 return TRUE;
794 }
795
796
797 /**
798 * Try to do a blit using resource_copy_region. The function calls
799 * resource_copy_region if the blit description is compatible with it.
800 *
801 * It returns TRUE if the blit was done using resource_copy_region.
802 *
803 * It returns FALSE otherwise and the caller must fall back to a more generic
804 * codepath for the blit operation. (e.g. by using u_blitter)
805 */
806 boolean
807 util_try_blit_via_copy_region(struct pipe_context *ctx,
808 const struct pipe_blit_info *blit)
809 {
810 if (util_can_blit_via_copy_region(blit, FALSE)) {
811 ctx->resource_copy_region(ctx, blit->dst.resource, blit->dst.level,
812 blit->dst.box.x, blit->dst.box.y,
813 blit->dst.box.z,
814 blit->src.resource, blit->src.level,
815 &blit->src.box);
816 return TRUE;
817 }
818 else {
819 return FALSE;
820 }
821 }