gallivm/img: refactor out the texel return type (v2)
[mesa.git] / src / gallium / auxiliary / util / u_surface.c
1 /**************************************************************************
2 *
3 * Copyright 2009 VMware, Inc. All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial portions
15 * of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
18 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
20 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
21 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
22 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
23 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 **************************************************************************/
26
27 /**
28 * @file
29 * Surface utility functions.
30 *
31 * @author Brian Paul
32 */
33
34
35 #include "pipe/p_defines.h"
36 #include "pipe/p_screen.h"
37 #include "pipe/p_state.h"
38
39 #include "util/format/u_format.h"
40 #include "util/u_inlines.h"
41 #include "util/u_rect.h"
42 #include "util/u_surface.h"
43 #include "util/u_pack_color.h"
44 #include "util/u_memset.h"
45
46 /**
47 * Initialize a pipe_surface object. 'view' is considered to have
48 * uninitialized contents.
49 */
50 void
51 u_surface_default_template(struct pipe_surface *surf,
52 const struct pipe_resource *texture)
53 {
54 memset(surf, 0, sizeof(*surf));
55
56 surf->format = texture->format;
57 }
58
59
60 /**
61 * Copy 3D box from one place to another.
62 * Position and sizes are in pixels.
63 */
64 void
65 util_copy_box(ubyte * dst,
66 enum pipe_format format,
67 unsigned dst_stride, unsigned dst_slice_stride,
68 unsigned dst_x, unsigned dst_y, unsigned dst_z,
69 unsigned width, unsigned height, unsigned depth,
70 const ubyte * src,
71 int src_stride, unsigned src_slice_stride,
72 unsigned src_x, unsigned src_y, unsigned src_z)
73 {
74 unsigned z;
75 dst += dst_z * dst_slice_stride;
76 src += src_z * src_slice_stride;
77 for (z = 0; z < depth; ++z) {
78 util_copy_rect(dst,
79 format,
80 dst_stride,
81 dst_x, dst_y,
82 width, height,
83 src,
84 src_stride,
85 src_x, src_y);
86
87 dst += dst_slice_stride;
88 src += src_slice_stride;
89 }
90 }
91
92
93 void
94 util_fill_rect(ubyte * dst,
95 enum pipe_format format,
96 unsigned dst_stride,
97 unsigned dst_x,
98 unsigned dst_y,
99 unsigned width,
100 unsigned height,
101 union util_color *uc)
102 {
103 const struct util_format_description *desc = util_format_description(format);
104 unsigned i, j;
105 unsigned width_size;
106 int blocksize = desc->block.bits / 8;
107 int blockwidth = desc->block.width;
108 int blockheight = desc->block.height;
109
110 assert(blocksize > 0);
111 assert(blockwidth > 0);
112 assert(blockheight > 0);
113
114 dst_x /= blockwidth;
115 dst_y /= blockheight;
116 width = (width + blockwidth - 1)/blockwidth;
117 height = (height + blockheight - 1)/blockheight;
118
119 dst += dst_x * blocksize;
120 dst += dst_y * dst_stride;
121 width_size = width * blocksize;
122
123 switch (blocksize) {
124 case 1:
125 if(dst_stride == width_size)
126 memset(dst, uc->ub, height * width_size);
127 else {
128 for (i = 0; i < height; i++) {
129 memset(dst, uc->ub, width_size);
130 dst += dst_stride;
131 }
132 }
133 break;
134 case 2:
135 for (i = 0; i < height; i++) {
136 uint16_t *row = (uint16_t *)dst;
137 for (j = 0; j < width; j++)
138 *row++ = uc->us;
139 dst += dst_stride;
140 }
141 break;
142 case 4:
143 for (i = 0; i < height; i++) {
144 util_memset32(dst, uc->ui[0], width);
145 dst += dst_stride;
146 }
147 break;
148 default:
149 for (i = 0; i < height; i++) {
150 ubyte *row = dst;
151 for (j = 0; j < width; j++) {
152 memcpy(row, uc, blocksize);
153 row += blocksize;
154 }
155 dst += dst_stride;
156 }
157 break;
158 }
159 }
160
161
162 void
163 util_fill_box(ubyte * dst,
164 enum pipe_format format,
165 unsigned stride,
166 unsigned layer_stride,
167 unsigned x,
168 unsigned y,
169 unsigned z,
170 unsigned width,
171 unsigned height,
172 unsigned depth,
173 union util_color *uc)
174 {
175 unsigned layer;
176 dst += z * layer_stride;
177 for (layer = z; layer < depth; layer++) {
178 util_fill_rect(dst, format,
179 stride,
180 x, y, width, height, uc);
181 dst += layer_stride;
182 }
183 }
184
185
186 /**
187 * Fallback function for pipe->resource_copy_region().
188 * We support copying between different formats (including compressed/
189 * uncompressed) if the bytes per block or pixel matches. If copying
190 * compressed -> uncompressed, the dst region is reduced by the block
191 * width, height. If copying uncompressed -> compressed, the dest region
192 * is expanded by the block width, height. See GL_ARB_copy_image.
193 * Note: (X,Y)=(0,0) is always the upper-left corner.
194 */
195 void
196 util_resource_copy_region(struct pipe_context *pipe,
197 struct pipe_resource *dst,
198 unsigned dst_level,
199 unsigned dst_x, unsigned dst_y, unsigned dst_z,
200 struct pipe_resource *src,
201 unsigned src_level,
202 const struct pipe_box *src_box_in)
203 {
204 struct pipe_transfer *src_trans, *dst_trans;
205 uint8_t *dst_map;
206 const uint8_t *src_map;
207 enum pipe_format src_format;
208 enum pipe_format dst_format;
209 struct pipe_box src_box, dst_box;
210 unsigned src_bs, dst_bs, src_bw, dst_bw, src_bh, dst_bh;
211
212 assert(src && dst);
213 if (!src || !dst)
214 return;
215
216 assert((src->target == PIPE_BUFFER && dst->target == PIPE_BUFFER) ||
217 (src->target != PIPE_BUFFER && dst->target != PIPE_BUFFER));
218
219 src_format = src->format;
220 dst_format = dst->format;
221
222 /* init src box */
223 src_box = *src_box_in;
224
225 /* init dst box */
226 dst_box.x = dst_x;
227 dst_box.y = dst_y;
228 dst_box.z = dst_z;
229 dst_box.width = src_box.width;
230 dst_box.height = src_box.height;
231 dst_box.depth = src_box.depth;
232
233 src_bs = util_format_get_blocksize(src_format);
234 src_bw = util_format_get_blockwidth(src_format);
235 src_bh = util_format_get_blockheight(src_format);
236 dst_bs = util_format_get_blocksize(dst_format);
237 dst_bw = util_format_get_blockwidth(dst_format);
238 dst_bh = util_format_get_blockheight(dst_format);
239
240 /* Note: all box positions and sizes are in pixels */
241 if (src_bw > 1 && dst_bw == 1) {
242 /* Copy from compressed to uncompressed.
243 * Shrink dest box by the src block size.
244 */
245 dst_box.width /= src_bw;
246 dst_box.height /= src_bh;
247 }
248 else if (src_bw == 1 && dst_bw > 1) {
249 /* Copy from uncompressed to compressed.
250 * Expand dest box by the dest block size.
251 */
252 dst_box.width *= dst_bw;
253 dst_box.height *= dst_bh;
254 }
255 else {
256 /* compressed -> compressed or uncompressed -> uncompressed copy */
257 assert(src_bw == dst_bw);
258 assert(src_bh == dst_bh);
259 }
260
261 assert(src_bs == dst_bs);
262 if (src_bs != dst_bs) {
263 /* This can happen if we fail to do format checking before hand.
264 * Don't crash below.
265 */
266 return;
267 }
268
269 /* check that region boxes are block aligned */
270 assert(src_box.x % src_bw == 0);
271 assert(src_box.y % src_bh == 0);
272 assert(dst_box.x % dst_bw == 0);
273 assert(dst_box.y % dst_bh == 0);
274
275 /* check that region boxes are not out of bounds */
276 assert(src_box.x + src_box.width <= (int)u_minify(src->width0, src_level));
277 assert(src_box.y + src_box.height <= (int)u_minify(src->height0, src_level));
278 assert(dst_box.x + dst_box.width <= (int)u_minify(dst->width0, dst_level));
279 assert(dst_box.y + dst_box.height <= (int)u_minify(dst->height0, dst_level));
280
281 /* check that total number of src, dest bytes match */
282 assert((src_box.width / src_bw) * (src_box.height / src_bh) * src_bs ==
283 (dst_box.width / dst_bw) * (dst_box.height / dst_bh) * dst_bs);
284
285 src_map = pipe->transfer_map(pipe,
286 src,
287 src_level,
288 PIPE_TRANSFER_READ,
289 &src_box, &src_trans);
290 assert(src_map);
291 if (!src_map) {
292 goto no_src_map;
293 }
294
295 dst_map = pipe->transfer_map(pipe,
296 dst,
297 dst_level,
298 PIPE_TRANSFER_WRITE |
299 PIPE_TRANSFER_DISCARD_RANGE, &dst_box,
300 &dst_trans);
301 assert(dst_map);
302 if (!dst_map) {
303 goto no_dst_map;
304 }
305
306 if (dst->target == PIPE_BUFFER && src->target == PIPE_BUFFER) {
307 assert(src_box.height == 1);
308 assert(src_box.depth == 1);
309 memcpy(dst_map, src_map, src_box.width);
310 } else {
311 util_copy_box(dst_map,
312 src_format,
313 dst_trans->stride, dst_trans->layer_stride,
314 0, 0, 0,
315 src_box.width, src_box.height, src_box.depth,
316 src_map,
317 src_trans->stride, src_trans->layer_stride,
318 0, 0, 0);
319 }
320
321 pipe->transfer_unmap(pipe, dst_trans);
322 no_dst_map:
323 pipe->transfer_unmap(pipe, src_trans);
324 no_src_map:
325 ;
326 }
327
328 static void
329 util_clear_color_texture_helper(struct pipe_transfer *dst_trans,
330 ubyte *dst_map,
331 enum pipe_format format,
332 const union pipe_color_union *color,
333 unsigned width, unsigned height, unsigned depth)
334 {
335 union util_color uc;
336
337 assert(dst_trans->stride > 0);
338
339 util_pack_color_union(format, &uc, color);
340
341 util_fill_box(dst_map, format,
342 dst_trans->stride, dst_trans->layer_stride,
343 0, 0, 0, width, height, depth, &uc);
344 }
345
346 static void
347 util_clear_color_texture(struct pipe_context *pipe,
348 struct pipe_resource *texture,
349 enum pipe_format format,
350 const union pipe_color_union *color,
351 unsigned level,
352 unsigned dstx, unsigned dsty, unsigned dstz,
353 unsigned width, unsigned height, unsigned depth)
354 {
355 struct pipe_transfer *dst_trans;
356 ubyte *dst_map;
357
358 dst_map = pipe_transfer_map_3d(pipe,
359 texture,
360 level,
361 PIPE_TRANSFER_WRITE,
362 dstx, dsty, dstz,
363 width, height, depth,
364 &dst_trans);
365 if (!dst_map)
366 return;
367
368 if (dst_trans->stride > 0) {
369 util_clear_color_texture_helper(dst_trans, dst_map, format, color,
370 width, height, depth);
371 }
372 pipe->transfer_unmap(pipe, dst_trans);
373 }
374
375
376 #define UBYTE_TO_USHORT(B) ((B) | ((B) << 8))
377
378
379 /**
380 * Fallback for pipe->clear_render_target() function.
381 * XXX this looks too hackish to be really useful.
382 * cpp > 4 looks like a gross hack at best...
383 * Plus can't use these transfer fallbacks when clearing
384 * multisampled surfaces for instance.
385 * Clears all bound layers.
386 */
387 void
388 util_clear_render_target(struct pipe_context *pipe,
389 struct pipe_surface *dst,
390 const union pipe_color_union *color,
391 unsigned dstx, unsigned dsty,
392 unsigned width, unsigned height)
393 {
394 struct pipe_transfer *dst_trans;
395 ubyte *dst_map;
396
397 assert(dst->texture);
398 if (!dst->texture)
399 return;
400
401 if (dst->texture->target == PIPE_BUFFER) {
402 /*
403 * The fill naturally works on the surface format, however
404 * the transfer uses resource format which is just bytes for buffers.
405 */
406 unsigned dx, w;
407 unsigned pixstride = util_format_get_blocksize(dst->format);
408 dx = (dst->u.buf.first_element + dstx) * pixstride;
409 w = width * pixstride;
410 dst_map = pipe_transfer_map(pipe,
411 dst->texture,
412 0, 0,
413 PIPE_TRANSFER_WRITE,
414 dx, 0, w, 1,
415 &dst_trans);
416 if (dst_map) {
417 util_clear_color_texture_helper(dst_trans, dst_map, dst->format,
418 color, width, height, 1);
419 pipe->transfer_unmap(pipe, dst_trans);
420 }
421 }
422 else {
423 unsigned depth = dst->u.tex.last_layer - dst->u.tex.first_layer + 1;
424 util_clear_color_texture(pipe, dst->texture, dst->format, color,
425 dst->u.tex.level, dstx, dsty,
426 dst->u.tex.first_layer, width, height, depth);
427 }
428 }
429
430 static void
431 util_fill_zs_rect(ubyte *dst_map,
432 enum pipe_format format,
433 bool need_rmw,
434 unsigned clear_flags,
435 unsigned dst_stride,
436 unsigned width,
437 unsigned height,
438 uint64_t zstencil)
439 {
440 unsigned i, j;
441 switch (util_format_get_blocksize(format)) {
442 case 1:
443 assert(format == PIPE_FORMAT_S8_UINT);
444 if(dst_stride == width)
445 memset(dst_map, (uint8_t) zstencil, height * width);
446 else {
447 for (i = 0; i < height; i++) {
448 memset(dst_map, (uint8_t) zstencil, width);
449 dst_map += dst_stride;
450 }
451 }
452 break;
453 case 2:
454 assert(format == PIPE_FORMAT_Z16_UNORM);
455 for (i = 0; i < height; i++) {
456 uint16_t *row = (uint16_t *)dst_map;
457 for (j = 0; j < width; j++)
458 *row++ = (uint16_t) zstencil;
459 dst_map += dst_stride;
460 }
461 break;
462 case 4:
463 if (!need_rmw) {
464 for (i = 0; i < height; i++) {
465 util_memset32(dst_map, (uint32_t)zstencil, width);
466 dst_map += dst_stride;
467 }
468 }
469 else {
470 uint32_t dst_mask;
471 if (format == PIPE_FORMAT_Z24_UNORM_S8_UINT)
472 dst_mask = 0x00ffffff;
473 else {
474 assert(format == PIPE_FORMAT_S8_UINT_Z24_UNORM);
475 dst_mask = 0xffffff00;
476 }
477 if (clear_flags & PIPE_CLEAR_DEPTH)
478 dst_mask = ~dst_mask;
479 for (i = 0; i < height; i++) {
480 uint32_t *row = (uint32_t *)dst_map;
481 for (j = 0; j < width; j++) {
482 uint32_t tmp = *row & dst_mask;
483 *row++ = tmp | ((uint32_t) zstencil & ~dst_mask);
484 }
485 dst_map += dst_stride;
486 }
487 }
488 break;
489 case 8:
490 if (!need_rmw) {
491 for (i = 0; i < height; i++) {
492 uint64_t *row = (uint64_t *)dst_map;
493 for (j = 0; j < width; j++)
494 *row++ = zstencil;
495 dst_map += dst_stride;
496 }
497 }
498 else {
499 uint64_t src_mask;
500
501 if (clear_flags & PIPE_CLEAR_DEPTH)
502 src_mask = 0x00000000ffffffffull;
503 else
504 src_mask = 0x000000ff00000000ull;
505
506 for (i = 0; i < height; i++) {
507 uint64_t *row = (uint64_t *)dst_map;
508 for (j = 0; j < width; j++) {
509 uint64_t tmp = *row & ~src_mask;
510 *row++ = tmp | (zstencil & src_mask);
511 }
512 dst_map += dst_stride;
513 }
514 }
515 break;
516 default:
517 assert(0);
518 break;
519 }
520 }
521
522 void
523 util_fill_zs_box(ubyte *dst,
524 enum pipe_format format,
525 bool need_rmw,
526 unsigned clear_flags,
527 unsigned stride,
528 unsigned layer_stride,
529 unsigned width,
530 unsigned height,
531 unsigned depth,
532 uint64_t zstencil)
533 {
534 unsigned layer;
535
536 for (layer = 0; layer < depth; layer++) {
537 util_fill_zs_rect(dst, format, need_rmw, clear_flags, stride,
538 width, height, zstencil);
539 dst += layer_stride;
540 }
541 }
542
543 static void
544 util_clear_depth_stencil_texture(struct pipe_context *pipe,
545 struct pipe_resource *texture,
546 enum pipe_format format,
547 unsigned clear_flags,
548 uint64_t zstencil, unsigned level,
549 unsigned dstx, unsigned dsty, unsigned dstz,
550 unsigned width, unsigned height, unsigned depth)
551 {
552 struct pipe_transfer *dst_trans;
553 ubyte *dst_map;
554 boolean need_rmw = FALSE;
555
556 if ((clear_flags & PIPE_CLEAR_DEPTHSTENCIL) &&
557 ((clear_flags & PIPE_CLEAR_DEPTHSTENCIL) != PIPE_CLEAR_DEPTHSTENCIL) &&
558 util_format_is_depth_and_stencil(format))
559 need_rmw = TRUE;
560
561 dst_map = pipe_transfer_map_3d(pipe,
562 texture,
563 level,
564 (need_rmw ? PIPE_TRANSFER_READ_WRITE :
565 PIPE_TRANSFER_WRITE),
566 dstx, dsty, dstz,
567 width, height, depth, &dst_trans);
568 assert(dst_map);
569 if (!dst_map)
570 return;
571
572 assert(dst_trans->stride > 0);
573
574 util_fill_zs_box(dst_map, format, need_rmw, clear_flags,
575 dst_trans->stride,
576 dst_trans->layer_stride, width, height,
577 depth, zstencil);
578
579 pipe->transfer_unmap(pipe, dst_trans);
580 }
581
582
583 void
584 util_clear_texture(struct pipe_context *pipe,
585 struct pipe_resource *tex,
586 unsigned level,
587 const struct pipe_box *box,
588 const void *data)
589 {
590 const struct util_format_description *desc =
591 util_format_description(tex->format);
592
593 if (level > tex->last_level)
594 return;
595
596 if (util_format_is_depth_or_stencil(tex->format)) {
597 unsigned clear = 0;
598 float depth = 0.0f;
599 uint8_t stencil = 0;
600 uint64_t zstencil;
601
602 if (util_format_has_depth(desc)) {
603 clear |= PIPE_CLEAR_DEPTH;
604 util_format_unpack_z_float(tex->format, &depth, data, 1);
605 }
606
607 if (util_format_has_stencil(desc)) {
608 clear |= PIPE_CLEAR_STENCIL;
609 util_format_unpack_s_8uint(tex->format, &stencil, data, 1);
610 }
611
612 zstencil = util_pack64_z_stencil(tex->format, depth, stencil);
613
614 util_clear_depth_stencil_texture(pipe, tex, tex->format, clear, zstencil,
615 level, box->x, box->y, box->z,
616 box->width, box->height, box->depth);
617 } else {
618 union pipe_color_union color;
619 util_format_unpack_rgba(tex->format, color.ui, data, 1);
620
621 util_clear_color_texture(pipe, tex, tex->format, &color, level,
622 box->x, box->y, box->z,
623 box->width, box->height, box->depth);
624 }
625 }
626
627
628 /**
629 * Fallback for pipe->clear_stencil() function.
630 * sw fallback doesn't look terribly useful here.
631 * Plus can't use these transfer fallbacks when clearing
632 * multisampled surfaces for instance.
633 * Clears all bound layers.
634 */
635 void
636 util_clear_depth_stencil(struct pipe_context *pipe,
637 struct pipe_surface *dst,
638 unsigned clear_flags,
639 double depth,
640 unsigned stencil,
641 unsigned dstx, unsigned dsty,
642 unsigned width, unsigned height)
643 {
644 uint64_t zstencil;
645 unsigned max_layer;
646
647 assert(dst->texture);
648 if (!dst->texture)
649 return;
650
651 zstencil = util_pack64_z_stencil(dst->format, depth, stencil);
652 max_layer = dst->u.tex.last_layer - dst->u.tex.first_layer;
653 util_clear_depth_stencil_texture(pipe, dst->texture, dst->format,
654 clear_flags, zstencil, dst->u.tex.level,
655 dstx, dsty, dst->u.tex.first_layer,
656 width, height, max_layer + 1);
657 }
658
659
660 /* Return if the box is totally inside the resource.
661 */
662 static boolean
663 is_box_inside_resource(const struct pipe_resource *res,
664 const struct pipe_box *box,
665 unsigned level)
666 {
667 unsigned width = 1, height = 1, depth = 1;
668
669 switch (res->target) {
670 case PIPE_BUFFER:
671 width = res->width0;
672 height = 1;
673 depth = 1;
674 break;
675 case PIPE_TEXTURE_1D:
676 width = u_minify(res->width0, level);
677 height = 1;
678 depth = 1;
679 break;
680 case PIPE_TEXTURE_2D:
681 case PIPE_TEXTURE_RECT:
682 width = u_minify(res->width0, level);
683 height = u_minify(res->height0, level);
684 depth = 1;
685 break;
686 case PIPE_TEXTURE_3D:
687 width = u_minify(res->width0, level);
688 height = u_minify(res->height0, level);
689 depth = u_minify(res->depth0, level);
690 break;
691 case PIPE_TEXTURE_CUBE:
692 width = u_minify(res->width0, level);
693 height = u_minify(res->height0, level);
694 depth = 6;
695 break;
696 case PIPE_TEXTURE_1D_ARRAY:
697 width = u_minify(res->width0, level);
698 height = 1;
699 depth = res->array_size;
700 break;
701 case PIPE_TEXTURE_2D_ARRAY:
702 width = u_minify(res->width0, level);
703 height = u_minify(res->height0, level);
704 depth = res->array_size;
705 break;
706 case PIPE_TEXTURE_CUBE_ARRAY:
707 width = u_minify(res->width0, level);
708 height = u_minify(res->height0, level);
709 depth = res->array_size;
710 assert(res->array_size % 6 == 0);
711 break;
712 case PIPE_MAX_TEXTURE_TYPES:
713 break;
714 }
715
716 return box->x >= 0 &&
717 box->x + box->width <= (int) width &&
718 box->y >= 0 &&
719 box->y + box->height <= (int) height &&
720 box->z >= 0 &&
721 box->z + box->depth <= (int) depth;
722 }
723
724 static unsigned
725 get_sample_count(const struct pipe_resource *res)
726 {
727 return res->nr_samples ? res->nr_samples : 1;
728 }
729
730
731 /**
732 * Check if a blit() command can be implemented with a resource_copy_region().
733 * If tight_format_check is true, only allow the resource_copy_region() if
734 * the blit src/dst formats are identical, ignoring the resource formats.
735 * Otherwise, check for format casting and compatibility.
736 */
737 boolean
738 util_can_blit_via_copy_region(const struct pipe_blit_info *blit,
739 boolean tight_format_check)
740 {
741 const struct util_format_description *src_desc, *dst_desc;
742
743 src_desc = util_format_description(blit->src.resource->format);
744 dst_desc = util_format_description(blit->dst.resource->format);
745
746 if (tight_format_check) {
747 /* no format conversions allowed */
748 if (blit->src.format != blit->dst.format) {
749 return FALSE;
750 }
751 }
752 else {
753 /* do loose format compatibility checking */
754 if (blit->src.resource->format != blit->src.format ||
755 blit->dst.resource->format != blit->dst.format ||
756 !util_is_format_compatible(src_desc, dst_desc)) {
757 return FALSE;
758 }
759 }
760
761 unsigned mask = util_format_get_mask(blit->dst.format);
762
763 /* No masks, no filtering, no scissor, no blending */
764 if ((blit->mask & mask) != mask ||
765 blit->filter != PIPE_TEX_FILTER_NEAREST ||
766 blit->scissor_enable ||
767 blit->num_window_rectangles > 0 ||
768 blit->alpha_blend) {
769 return FALSE;
770 }
771
772 /* Only the src box can have negative dims for flipping */
773 assert(blit->dst.box.width >= 1);
774 assert(blit->dst.box.height >= 1);
775 assert(blit->dst.box.depth >= 1);
776
777 /* No scaling or flipping */
778 if (blit->src.box.width != blit->dst.box.width ||
779 blit->src.box.height != blit->dst.box.height ||
780 blit->src.box.depth != blit->dst.box.depth) {
781 return FALSE;
782 }
783
784 /* No out-of-bounds access. */
785 if (!is_box_inside_resource(blit->src.resource, &blit->src.box,
786 blit->src.level) ||
787 !is_box_inside_resource(blit->dst.resource, &blit->dst.box,
788 blit->dst.level)) {
789 return FALSE;
790 }
791
792 /* Sample counts must match. */
793 if (get_sample_count(blit->src.resource) !=
794 get_sample_count(blit->dst.resource)) {
795 return FALSE;
796 }
797
798 return TRUE;
799 }
800
801
802 /**
803 * Try to do a blit using resource_copy_region. The function calls
804 * resource_copy_region if the blit description is compatible with it.
805 *
806 * It returns TRUE if the blit was done using resource_copy_region.
807 *
808 * It returns FALSE otherwise and the caller must fall back to a more generic
809 * codepath for the blit operation. (e.g. by using u_blitter)
810 */
811 boolean
812 util_try_blit_via_copy_region(struct pipe_context *ctx,
813 const struct pipe_blit_info *blit)
814 {
815 if (util_can_blit_via_copy_region(blit, FALSE)) {
816 ctx->resource_copy_region(ctx, blit->dst.resource, blit->dst.level,
817 blit->dst.box.x, blit->dst.box.y,
818 blit->dst.box.z,
819 blit->src.resource, blit->src.level,
820 &blit->src.box);
821 return TRUE;
822 }
823 else {
824 return FALSE;
825 }
826 }