util: update some assertions in util_resource_copy_region()
[mesa.git] / src / gallium / auxiliary / util / u_surface.c
1 /**************************************************************************
2 *
3 * Copyright 2009 VMware, Inc. All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial portions
15 * of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
18 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
20 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
21 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
22 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
23 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 **************************************************************************/
26
27 /**
28 * @file
29 * Surface utility functions.
30 *
31 * @author Brian Paul
32 */
33
34
35 #include "pipe/p_defines.h"
36 #include "pipe/p_screen.h"
37 #include "pipe/p_state.h"
38
39 #include "util/u_format.h"
40 #include "util/u_inlines.h"
41 #include "util/u_rect.h"
42 #include "util/u_surface.h"
43 #include "util/u_pack_color.h"
44
45
46 /**
47 * Initialize a pipe_surface object. 'view' is considered to have
48 * uninitialized contents.
49 */
50 void
51 u_surface_default_template(struct pipe_surface *surf,
52 const struct pipe_resource *texture)
53 {
54 memset(surf, 0, sizeof(*surf));
55
56 surf->format = texture->format;
57 }
58
59
60 /**
61 * Copy 2D rect from one place to another.
62 * Position and sizes are in pixels.
63 * src_stride may be negative to do vertical flip of pixels from source.
64 */
65 void
66 util_copy_rect(ubyte * dst,
67 enum pipe_format format,
68 unsigned dst_stride,
69 unsigned dst_x,
70 unsigned dst_y,
71 unsigned width,
72 unsigned height,
73 const ubyte * src,
74 int src_stride,
75 unsigned src_x,
76 unsigned src_y)
77 {
78 unsigned i;
79 int src_stride_pos = src_stride < 0 ? -src_stride : src_stride;
80 int blocksize = util_format_get_blocksize(format);
81 int blockwidth = util_format_get_blockwidth(format);
82 int blockheight = util_format_get_blockheight(format);
83
84 assert(blocksize > 0);
85 assert(blockwidth > 0);
86 assert(blockheight > 0);
87
88 dst_x /= blockwidth;
89 dst_y /= blockheight;
90 width = (width + blockwidth - 1)/blockwidth;
91 height = (height + blockheight - 1)/blockheight;
92 src_x /= blockwidth;
93 src_y /= blockheight;
94
95 dst += dst_x * blocksize;
96 src += src_x * blocksize;
97 dst += dst_y * dst_stride;
98 src += src_y * src_stride_pos;
99 width *= blocksize;
100
101 if (width == dst_stride && width == src_stride)
102 memcpy(dst, src, height * width);
103 else {
104 for (i = 0; i < height; i++) {
105 memcpy(dst, src, width);
106 dst += dst_stride;
107 src += src_stride;
108 }
109 }
110 }
111
112
113 /**
114 * Copy 3D box from one place to another.
115 * Position and sizes are in pixels.
116 */
117 void
118 util_copy_box(ubyte * dst,
119 enum pipe_format format,
120 unsigned dst_stride, unsigned dst_slice_stride,
121 unsigned dst_x, unsigned dst_y, unsigned dst_z,
122 unsigned width, unsigned height, unsigned depth,
123 const ubyte * src,
124 int src_stride, unsigned src_slice_stride,
125 unsigned src_x, unsigned src_y, unsigned src_z)
126 {
127 unsigned z;
128 dst += dst_z * dst_slice_stride;
129 src += src_z * src_slice_stride;
130 for (z = 0; z < depth; ++z) {
131 util_copy_rect(dst,
132 format,
133 dst_stride,
134 dst_x, dst_y,
135 width, height,
136 src,
137 src_stride,
138 src_x, src_y);
139
140 dst += dst_slice_stride;
141 src += src_slice_stride;
142 }
143 }
144
145
146 void
147 util_fill_rect(ubyte * dst,
148 enum pipe_format format,
149 unsigned dst_stride,
150 unsigned dst_x,
151 unsigned dst_y,
152 unsigned width,
153 unsigned height,
154 union util_color *uc)
155 {
156 const struct util_format_description *desc = util_format_description(format);
157 unsigned i, j;
158 unsigned width_size;
159 int blocksize = desc->block.bits / 8;
160 int blockwidth = desc->block.width;
161 int blockheight = desc->block.height;
162
163 assert(blocksize > 0);
164 assert(blockwidth > 0);
165 assert(blockheight > 0);
166
167 dst_x /= blockwidth;
168 dst_y /= blockheight;
169 width = (width + blockwidth - 1)/blockwidth;
170 height = (height + blockheight - 1)/blockheight;
171
172 dst += dst_x * blocksize;
173 dst += dst_y * dst_stride;
174 width_size = width * blocksize;
175
176 switch (blocksize) {
177 case 1:
178 if(dst_stride == width_size)
179 memset(dst, uc->ub, height * width_size);
180 else {
181 for (i = 0; i < height; i++) {
182 memset(dst, uc->ub, width_size);
183 dst += dst_stride;
184 }
185 }
186 break;
187 case 2:
188 for (i = 0; i < height; i++) {
189 uint16_t *row = (uint16_t *)dst;
190 for (j = 0; j < width; j++)
191 *row++ = uc->us;
192 dst += dst_stride;
193 }
194 break;
195 case 4:
196 for (i = 0; i < height; i++) {
197 uint32_t *row = (uint32_t *)dst;
198 for (j = 0; j < width; j++)
199 *row++ = uc->ui[0];
200 dst += dst_stride;
201 }
202 break;
203 default:
204 for (i = 0; i < height; i++) {
205 ubyte *row = dst;
206 for (j = 0; j < width; j++) {
207 memcpy(row, uc, blocksize);
208 row += blocksize;
209 }
210 dst += dst_stride;
211 }
212 break;
213 }
214 }
215
216
217 void
218 util_fill_box(ubyte * dst,
219 enum pipe_format format,
220 unsigned stride,
221 unsigned layer_stride,
222 unsigned x,
223 unsigned y,
224 unsigned z,
225 unsigned width,
226 unsigned height,
227 unsigned depth,
228 union util_color *uc)
229 {
230 unsigned layer;
231 dst += z * layer_stride;
232 for (layer = z; layer < depth; layer++) {
233 util_fill_rect(dst, format,
234 stride,
235 x, y, width, height, uc);
236 dst += layer_stride;
237 }
238 }
239
240
241 /** Mipmap level size computation, with minimum block size */
242 static inline unsigned
243 minify(unsigned value, unsigned levels, unsigned blocksize)
244 {
245 return MAX2(blocksize, value >> levels);
246 }
247
248
249 /**
250 * Fallback function for pipe->resource_copy_region().
251 * We support copying between different formats (including compressed/
252 * uncompressed) if the bytes per block or pixel matches. If copying
253 * compressed -> uncompressed, the dst region is reduced by the block
254 * width, height. If copying uncompressed -> compressed, the dest region
255 * is expanded by the block width, height. See GL_ARB_copy_image.
256 * Note: (X,Y)=(0,0) is always the upper-left corner.
257 */
258 void
259 util_resource_copy_region(struct pipe_context *pipe,
260 struct pipe_resource *dst,
261 unsigned dst_level,
262 unsigned dst_x, unsigned dst_y, unsigned dst_z,
263 struct pipe_resource *src,
264 unsigned src_level,
265 const struct pipe_box *src_box_in)
266 {
267 struct pipe_transfer *src_trans, *dst_trans;
268 uint8_t *dst_map;
269 const uint8_t *src_map;
270 MAYBE_UNUSED enum pipe_format src_format;
271 enum pipe_format dst_format;
272 struct pipe_box src_box, dst_box;
273 unsigned src_bs, dst_bs, src_bw, dst_bw, src_bh, dst_bh;
274
275 assert(src && dst);
276 if (!src || !dst)
277 return;
278
279 assert((src->target == PIPE_BUFFER && dst->target == PIPE_BUFFER) ||
280 (src->target != PIPE_BUFFER && dst->target != PIPE_BUFFER));
281
282 src_format = src->format;
283 dst_format = dst->format;
284
285 /* init src box */
286 src_box = *src_box_in;
287
288 /* init dst box */
289 dst_box.x = dst_x;
290 dst_box.y = dst_y;
291 dst_box.z = dst_z;
292 dst_box.width = src_box.width;
293 dst_box.height = src_box.height;
294 dst_box.depth = src_box.depth;
295
296 src_bs = util_format_get_blocksize(src_format);
297 src_bw = util_format_get_blockwidth(src_format);
298 src_bh = util_format_get_blockheight(src_format);
299 dst_bs = util_format_get_blocksize(dst_format);
300 dst_bw = util_format_get_blockwidth(dst_format);
301 dst_bh = util_format_get_blockheight(dst_format);
302
303 /* Note: all box positions and sizes are in pixels */
304 if (src_bw > 1 && dst_bw == 1) {
305 /* Copy from compressed to uncompressed.
306 * Shrink dest box by the src block size.
307 */
308 dst_box.width /= src_bw;
309 dst_box.height /= src_bh;
310 }
311 else if (src_bw == 1 && dst_bw > 1) {
312 /* Copy from uncompressed to compressed.
313 * Expand dest box by the dest block size.
314 */
315 dst_box.width *= dst_bw;
316 dst_box.height *= dst_bh;
317 }
318 else {
319 /* compressed -> compressed or uncompressed -> uncompressed copy */
320 assert(src_bw == dst_bw);
321 assert(src_bh == dst_bh);
322 }
323
324 assert(src_bs == dst_bs);
325 if (src_bs != dst_bs) {
326 /* This can happen if we fail to do format checking before hand.
327 * Don't crash below.
328 */
329 return;
330 }
331
332 /* check that region boxes are block aligned */
333 assert(src_box.x % src_bw == 0);
334 assert(src_box.y % src_bh == 0);
335 assert(src_box.width % src_bw == 0 ||
336 src_box.x + src_box.width == minify(src->width0, src_level, src_bw));
337 assert(src_box.height % src_bh == 0 ||
338 src_box.y + src_box.height == minify(src->height0, src_level, src_bh));
339 assert(dst_box.x % dst_bw == 0);
340 assert(dst_box.y % dst_bh == 0);
341 assert(dst_box.width % dst_bw == 0 ||
342 dst_box.x + dst_box.width == minify(dst->width0, dst_level, dst_bw));
343 assert(dst_box.height % dst_bh == 0 ||
344 dst_box.y + dst_box.height == minify(dst->height0, dst_level, dst_bh));
345
346 /* check that region boxes are not out of bounds */
347 assert(src_box.x + src_box.width <=
348 minify(src->width0, src_level, src_bw));
349 assert(src_box.y + src_box.height <=
350 minify(src->height0, src_level, src_bh));
351 assert(dst_box.x + dst_box.width <=
352 minify(dst->width0, dst_level, dst_bw));
353 assert(dst_box.y + dst_box.height <=
354 minify(dst->height0, dst_level, dst_bh));
355
356 /* check that total number of src, dest bytes match */
357 assert((src_box.width / src_bw) * (src_box.height / src_bh) * src_bs ==
358 (dst_box.width / dst_bw) * (dst_box.height / dst_bh) * dst_bs);
359
360 src_map = pipe->transfer_map(pipe,
361 src,
362 src_level,
363 PIPE_TRANSFER_READ,
364 &src_box, &src_trans);
365 assert(src_map);
366 if (!src_map) {
367 goto no_src_map;
368 }
369
370 dst_map = pipe->transfer_map(pipe,
371 dst,
372 dst_level,
373 PIPE_TRANSFER_WRITE |
374 PIPE_TRANSFER_DISCARD_RANGE, &dst_box,
375 &dst_trans);
376 assert(dst_map);
377 if (!dst_map) {
378 goto no_dst_map;
379 }
380
381 if (dst->target == PIPE_BUFFER && src->target == PIPE_BUFFER) {
382 assert(src_box.height == 1);
383 assert(src_box.depth == 1);
384 memcpy(dst_map, src_map, src_box.width);
385 } else {
386 util_copy_box(dst_map,
387 src_format,
388 dst_trans->stride, dst_trans->layer_stride,
389 0, 0, 0,
390 src_box.width, src_box.height, src_box.depth,
391 src_map,
392 src_trans->stride, src_trans->layer_stride,
393 0, 0, 0);
394 }
395
396 pipe->transfer_unmap(pipe, dst_trans);
397 no_dst_map:
398 pipe->transfer_unmap(pipe, src_trans);
399 no_src_map:
400 ;
401 }
402
403
404
405 #define UBYTE_TO_USHORT(B) ((B) | ((B) << 8))
406
407
408 /**
409 * Fallback for pipe->clear_render_target() function.
410 * XXX this looks too hackish to be really useful.
411 * cpp > 4 looks like a gross hack at best...
412 * Plus can't use these transfer fallbacks when clearing
413 * multisampled surfaces for instance.
414 * Clears all bound layers.
415 */
416 void
417 util_clear_render_target(struct pipe_context *pipe,
418 struct pipe_surface *dst,
419 const union pipe_color_union *color,
420 unsigned dstx, unsigned dsty,
421 unsigned width, unsigned height)
422 {
423 struct pipe_transfer *dst_trans;
424 ubyte *dst_map;
425 union util_color uc;
426 unsigned max_layer;
427
428 assert(dst->texture);
429 if (!dst->texture)
430 return;
431
432 if (dst->texture->target == PIPE_BUFFER) {
433 /*
434 * The fill naturally works on the surface format, however
435 * the transfer uses resource format which is just bytes for buffers.
436 */
437 unsigned dx, w;
438 unsigned pixstride = util_format_get_blocksize(dst->format);
439 dx = (dst->u.buf.first_element + dstx) * pixstride;
440 w = width * pixstride;
441 max_layer = 0;
442 dst_map = pipe_transfer_map(pipe,
443 dst->texture,
444 0, 0,
445 PIPE_TRANSFER_WRITE,
446 dx, 0, w, 1,
447 &dst_trans);
448 }
449 else {
450 max_layer = dst->u.tex.last_layer - dst->u.tex.first_layer;
451 dst_map = pipe_transfer_map_3d(pipe,
452 dst->texture,
453 dst->u.tex.level,
454 PIPE_TRANSFER_WRITE,
455 dstx, dsty, dst->u.tex.first_layer,
456 width, height, max_layer + 1, &dst_trans);
457 }
458
459 assert(dst_map);
460
461 if (dst_map) {
462 enum pipe_format format = dst->format;
463 assert(dst_trans->stride > 0);
464
465 if (util_format_is_pure_integer(format)) {
466 /*
467 * We expect int/uint clear values here, though some APIs
468 * might disagree (but in any case util_pack_color()
469 * couldn't handle it)...
470 */
471 if (util_format_is_pure_sint(format)) {
472 util_format_write_4i(format, color->i, 0, &uc, 0, 0, 0, 1, 1);
473 }
474 else {
475 assert(util_format_is_pure_uint(format));
476 util_format_write_4ui(format, color->ui, 0, &uc, 0, 0, 0, 1, 1);
477 }
478 }
479 else {
480 util_pack_color(color->f, format, &uc);
481 }
482
483 util_fill_box(dst_map, dst->format,
484 dst_trans->stride, dst_trans->layer_stride,
485 0, 0, 0, width, height, max_layer + 1, &uc);
486
487 pipe->transfer_unmap(pipe, dst_trans);
488 }
489 }
490
491 /**
492 * Fallback for pipe->clear_stencil() function.
493 * sw fallback doesn't look terribly useful here.
494 * Plus can't use these transfer fallbacks when clearing
495 * multisampled surfaces for instance.
496 * Clears all bound layers.
497 */
498 void
499 util_clear_depth_stencil(struct pipe_context *pipe,
500 struct pipe_surface *dst,
501 unsigned clear_flags,
502 double depth,
503 unsigned stencil,
504 unsigned dstx, unsigned dsty,
505 unsigned width, unsigned height)
506 {
507 enum pipe_format format = dst->format;
508 struct pipe_transfer *dst_trans;
509 ubyte *dst_map;
510 boolean need_rmw = FALSE;
511 unsigned max_layer, layer;
512
513 if ((clear_flags & PIPE_CLEAR_DEPTHSTENCIL) &&
514 ((clear_flags & PIPE_CLEAR_DEPTHSTENCIL) != PIPE_CLEAR_DEPTHSTENCIL) &&
515 util_format_is_depth_and_stencil(format))
516 need_rmw = TRUE;
517
518 assert(dst->texture);
519 if (!dst->texture)
520 return;
521
522 max_layer = dst->u.tex.last_layer - dst->u.tex.first_layer;
523 dst_map = pipe_transfer_map_3d(pipe,
524 dst->texture,
525 dst->u.tex.level,
526 (need_rmw ? PIPE_TRANSFER_READ_WRITE :
527 PIPE_TRANSFER_WRITE),
528 dstx, dsty, dst->u.tex.first_layer,
529 width, height, max_layer + 1, &dst_trans);
530 assert(dst_map);
531
532 if (dst_map) {
533 unsigned dst_stride = dst_trans->stride;
534 uint64_t zstencil = util_pack64_z_stencil(format, depth, stencil);
535 ubyte *dst_layer = dst_map;
536 unsigned i, j;
537 assert(dst_trans->stride > 0);
538
539 for (layer = 0; layer <= max_layer; layer++) {
540 dst_map = dst_layer;
541
542 switch (util_format_get_blocksize(format)) {
543 case 1:
544 assert(format == PIPE_FORMAT_S8_UINT);
545 if(dst_stride == width)
546 memset(dst_map, (uint8_t) zstencil, height * width);
547 else {
548 for (i = 0; i < height; i++) {
549 memset(dst_map, (uint8_t) zstencil, width);
550 dst_map += dst_stride;
551 }
552 }
553 break;
554 case 2:
555 assert(format == PIPE_FORMAT_Z16_UNORM);
556 for (i = 0; i < height; i++) {
557 uint16_t *row = (uint16_t *)dst_map;
558 for (j = 0; j < width; j++)
559 *row++ = (uint16_t) zstencil;
560 dst_map += dst_stride;
561 }
562 break;
563 case 4:
564 if (!need_rmw) {
565 for (i = 0; i < height; i++) {
566 uint32_t *row = (uint32_t *)dst_map;
567 for (j = 0; j < width; j++)
568 *row++ = (uint32_t) zstencil;
569 dst_map += dst_stride;
570 }
571 }
572 else {
573 uint32_t dst_mask;
574 if (format == PIPE_FORMAT_Z24_UNORM_S8_UINT)
575 dst_mask = 0x00ffffff;
576 else {
577 assert(format == PIPE_FORMAT_S8_UINT_Z24_UNORM);
578 dst_mask = 0xffffff00;
579 }
580 if (clear_flags & PIPE_CLEAR_DEPTH)
581 dst_mask = ~dst_mask;
582 for (i = 0; i < height; i++) {
583 uint32_t *row = (uint32_t *)dst_map;
584 for (j = 0; j < width; j++) {
585 uint32_t tmp = *row & dst_mask;
586 *row++ = tmp | ((uint32_t) zstencil & ~dst_mask);
587 }
588 dst_map += dst_stride;
589 }
590 }
591 break;
592 case 8:
593 if (!need_rmw) {
594 for (i = 0; i < height; i++) {
595 uint64_t *row = (uint64_t *)dst_map;
596 for (j = 0; j < width; j++)
597 *row++ = zstencil;
598 dst_map += dst_stride;
599 }
600 }
601 else {
602 uint64_t src_mask;
603
604 if (clear_flags & PIPE_CLEAR_DEPTH)
605 src_mask = 0x00000000ffffffffull;
606 else
607 src_mask = 0x000000ff00000000ull;
608
609 for (i = 0; i < height; i++) {
610 uint64_t *row = (uint64_t *)dst_map;
611 for (j = 0; j < width; j++) {
612 uint64_t tmp = *row & ~src_mask;
613 *row++ = tmp | (zstencil & src_mask);
614 }
615 dst_map += dst_stride;
616 }
617 }
618 break;
619 default:
620 assert(0);
621 break;
622 }
623 dst_layer += dst_trans->layer_stride;
624 }
625
626 pipe->transfer_unmap(pipe, dst_trans);
627 }
628 }
629
630
631 /* Return if the box is totally inside the resource.
632 */
633 static boolean
634 is_box_inside_resource(const struct pipe_resource *res,
635 const struct pipe_box *box,
636 unsigned level)
637 {
638 unsigned width = 1, height = 1, depth = 1;
639
640 switch (res->target) {
641 case PIPE_BUFFER:
642 width = res->width0;
643 height = 1;
644 depth = 1;
645 break;
646 case PIPE_TEXTURE_1D:
647 width = u_minify(res->width0, level);
648 height = 1;
649 depth = 1;
650 break;
651 case PIPE_TEXTURE_2D:
652 case PIPE_TEXTURE_RECT:
653 width = u_minify(res->width0, level);
654 height = u_minify(res->height0, level);
655 depth = 1;
656 break;
657 case PIPE_TEXTURE_3D:
658 width = u_minify(res->width0, level);
659 height = u_minify(res->height0, level);
660 depth = u_minify(res->depth0, level);
661 break;
662 case PIPE_TEXTURE_CUBE:
663 width = u_minify(res->width0, level);
664 height = u_minify(res->height0, level);
665 depth = 6;
666 break;
667 case PIPE_TEXTURE_1D_ARRAY:
668 width = u_minify(res->width0, level);
669 height = 1;
670 depth = res->array_size;
671 break;
672 case PIPE_TEXTURE_2D_ARRAY:
673 width = u_minify(res->width0, level);
674 height = u_minify(res->height0, level);
675 depth = res->array_size;
676 break;
677 case PIPE_TEXTURE_CUBE_ARRAY:
678 width = u_minify(res->width0, level);
679 height = u_minify(res->height0, level);
680 depth = res->array_size;
681 assert(res->array_size % 6 == 0);
682 break;
683 case PIPE_MAX_TEXTURE_TYPES:
684 break;
685 }
686
687 return box->x >= 0 &&
688 box->x + box->width <= (int) width &&
689 box->y >= 0 &&
690 box->y + box->height <= (int) height &&
691 box->z >= 0 &&
692 box->z + box->depth <= (int) depth;
693 }
694
695 static unsigned
696 get_sample_count(const struct pipe_resource *res)
697 {
698 return res->nr_samples ? res->nr_samples : 1;
699 }
700
701 /**
702 * Try to do a blit using resource_copy_region. The function calls
703 * resource_copy_region if the blit description is compatible with it.
704 *
705 * It returns TRUE if the blit was done using resource_copy_region.
706 *
707 * It returns FALSE otherwise and the caller must fall back to a more generic
708 * codepath for the blit operation. (e.g. by using u_blitter)
709 */
710 boolean
711 util_try_blit_via_copy_region(struct pipe_context *ctx,
712 const struct pipe_blit_info *blit)
713 {
714 unsigned mask = util_format_get_mask(blit->dst.format);
715
716 /* No format conversions. */
717 if (blit->src.resource->format != blit->src.format ||
718 blit->dst.resource->format != blit->dst.format ||
719 !util_is_format_compatible(
720 util_format_description(blit->src.resource->format),
721 util_format_description(blit->dst.resource->format))) {
722 return FALSE;
723 }
724
725 /* No masks, no filtering, no scissor. */
726 if ((blit->mask & mask) != mask ||
727 blit->filter != PIPE_TEX_FILTER_NEAREST ||
728 blit->scissor_enable) {
729 return FALSE;
730 }
731
732 /* No flipping. */
733 if (blit->src.box.width < 0 ||
734 blit->src.box.height < 0 ||
735 blit->src.box.depth < 0) {
736 return FALSE;
737 }
738
739 /* No scaling. */
740 if (blit->src.box.width != blit->dst.box.width ||
741 blit->src.box.height != blit->dst.box.height ||
742 blit->src.box.depth != blit->dst.box.depth) {
743 return FALSE;
744 }
745
746 /* No out-of-bounds access. */
747 if (!is_box_inside_resource(blit->src.resource, &blit->src.box,
748 blit->src.level) ||
749 !is_box_inside_resource(blit->dst.resource, &blit->dst.box,
750 blit->dst.level)) {
751 return FALSE;
752 }
753
754 /* Sample counts must match. */
755 if (get_sample_count(blit->src.resource) !=
756 get_sample_count(blit->dst.resource)) {
757 return FALSE;
758 }
759
760 if (blit->alpha_blend)
761 return FALSE;
762
763 ctx->resource_copy_region(ctx, blit->dst.resource, blit->dst.level,
764 blit->dst.box.x, blit->dst.box.y, blit->dst.box.z,
765 blit->src.resource, blit->src.level,
766 &blit->src.box);
767 return TRUE;
768 }