e48e47c2ec29df3fde2c84857492b8495e1bec3f
[mesa.git] / src / gallium / auxiliary / util / u_surface.c
1 /**************************************************************************
2 *
3 * Copyright 2009 VMware, Inc. All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial portions
15 * of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
18 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
20 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
21 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
22 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
23 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 **************************************************************************/
26
27 /**
28 * @file
29 * Surface utility functions.
30 *
31 * @author Brian Paul
32 */
33
34
35 #include "pipe/p_defines.h"
36 #include "pipe/p_screen.h"
37 #include "pipe/p_state.h"
38
39 #include "util/u_format.h"
40 #include "util/u_inlines.h"
41 #include "util/u_rect.h"
42 #include "util/u_surface.h"
43 #include "util/u_pack_color.h"
44
45
46 /**
47 * Initialize a pipe_surface object. 'view' is considered to have
48 * uninitialized contents.
49 */
50 void
51 u_surface_default_template(struct pipe_surface *surf,
52 const struct pipe_resource *texture)
53 {
54 memset(surf, 0, sizeof(*surf));
55
56 surf->format = texture->format;
57 }
58
59
60 /**
61 * Copy 2D rect from one place to another.
62 * Position and sizes are in pixels.
63 * src_stride may be negative to do vertical flip of pixels from source.
64 */
65 void
66 util_copy_rect(ubyte * dst,
67 enum pipe_format format,
68 unsigned dst_stride,
69 unsigned dst_x,
70 unsigned dst_y,
71 unsigned width,
72 unsigned height,
73 const ubyte * src,
74 int src_stride,
75 unsigned src_x,
76 unsigned src_y)
77 {
78 unsigned i;
79 int src_stride_pos = src_stride < 0 ? -src_stride : src_stride;
80 int blocksize = util_format_get_blocksize(format);
81 int blockwidth = util_format_get_blockwidth(format);
82 int blockheight = util_format_get_blockheight(format);
83
84 assert(blocksize > 0);
85 assert(blockwidth > 0);
86 assert(blockheight > 0);
87
88 dst_x /= blockwidth;
89 dst_y /= blockheight;
90 width = (width + blockwidth - 1)/blockwidth;
91 height = (height + blockheight - 1)/blockheight;
92 src_x /= blockwidth;
93 src_y /= blockheight;
94
95 dst += dst_x * blocksize;
96 src += src_x * blocksize;
97 dst += dst_y * dst_stride;
98 src += src_y * src_stride_pos;
99 width *= blocksize;
100
101 if (width == dst_stride && width == src_stride)
102 memcpy(dst, src, height * width);
103 else {
104 for (i = 0; i < height; i++) {
105 memcpy(dst, src, width);
106 dst += dst_stride;
107 src += src_stride;
108 }
109 }
110 }
111
112
113 /**
114 * Copy 3D box from one place to another.
115 * Position and sizes are in pixels.
116 */
117 void
118 util_copy_box(ubyte * dst,
119 enum pipe_format format,
120 unsigned dst_stride, unsigned dst_slice_stride,
121 unsigned dst_x, unsigned dst_y, unsigned dst_z,
122 unsigned width, unsigned height, unsigned depth,
123 const ubyte * src,
124 int src_stride, unsigned src_slice_stride,
125 unsigned src_x, unsigned src_y, unsigned src_z)
126 {
127 unsigned z;
128 dst += dst_z * dst_slice_stride;
129 src += src_z * src_slice_stride;
130 for (z = 0; z < depth; ++z) {
131 util_copy_rect(dst,
132 format,
133 dst_stride,
134 dst_x, dst_y,
135 width, height,
136 src,
137 src_stride,
138 src_x, src_y);
139
140 dst += dst_slice_stride;
141 src += src_slice_stride;
142 }
143 }
144
145
146 void
147 util_fill_rect(ubyte * dst,
148 enum pipe_format format,
149 unsigned dst_stride,
150 unsigned dst_x,
151 unsigned dst_y,
152 unsigned width,
153 unsigned height,
154 union util_color *uc)
155 {
156 const struct util_format_description *desc = util_format_description(format);
157 unsigned i, j;
158 unsigned width_size;
159 int blocksize = desc->block.bits / 8;
160 int blockwidth = desc->block.width;
161 int blockheight = desc->block.height;
162
163 assert(blocksize > 0);
164 assert(blockwidth > 0);
165 assert(blockheight > 0);
166
167 dst_x /= blockwidth;
168 dst_y /= blockheight;
169 width = (width + blockwidth - 1)/blockwidth;
170 height = (height + blockheight - 1)/blockheight;
171
172 dst += dst_x * blocksize;
173 dst += dst_y * dst_stride;
174 width_size = width * blocksize;
175
176 switch (blocksize) {
177 case 1:
178 if(dst_stride == width_size)
179 memset(dst, uc->ub, height * width_size);
180 else {
181 for (i = 0; i < height; i++) {
182 memset(dst, uc->ub, width_size);
183 dst += dst_stride;
184 }
185 }
186 break;
187 case 2:
188 for (i = 0; i < height; i++) {
189 uint16_t *row = (uint16_t *)dst;
190 for (j = 0; j < width; j++)
191 *row++ = uc->us;
192 dst += dst_stride;
193 }
194 break;
195 case 4:
196 for (i = 0; i < height; i++) {
197 uint32_t *row = (uint32_t *)dst;
198 for (j = 0; j < width; j++)
199 *row++ = uc->ui[0];
200 dst += dst_stride;
201 }
202 break;
203 default:
204 for (i = 0; i < height; i++) {
205 ubyte *row = dst;
206 for (j = 0; j < width; j++) {
207 memcpy(row, uc, blocksize);
208 row += blocksize;
209 }
210 dst += dst_stride;
211 }
212 break;
213 }
214 }
215
216
217 void
218 util_fill_box(ubyte * dst,
219 enum pipe_format format,
220 unsigned stride,
221 unsigned layer_stride,
222 unsigned x,
223 unsigned y,
224 unsigned z,
225 unsigned width,
226 unsigned height,
227 unsigned depth,
228 union util_color *uc)
229 {
230 unsigned layer;
231 dst += z * layer_stride;
232 for (layer = z; layer < depth; layer++) {
233 util_fill_rect(dst, format,
234 stride,
235 x, y, width, height, uc);
236 dst += layer_stride;
237 }
238 }
239
240
241 /** Mipmap level size computation, with minimum block size */
242 static inline unsigned
243 minify(unsigned value, unsigned levels, unsigned blocksize)
244 {
245 return MAX2(blocksize, value >> levels);
246 }
247
248
249 /**
250 * Fallback function for pipe->resource_copy_region().
251 * We support copying between different formats (including compressed/
252 * uncompressed) if the bytes per block or pixel matches. If copying
253 * compressed -> uncompressed, the dst region is reduced by the block
254 * width, height. If copying uncompressed -> compressed, the dest region
255 * is expanded by the block width, height. See GL_ARB_copy_image.
256 * Note: (X,Y)=(0,0) is always the upper-left corner.
257 */
258 void
259 util_resource_copy_region(struct pipe_context *pipe,
260 struct pipe_resource *dst,
261 unsigned dst_level,
262 unsigned dst_x, unsigned dst_y, unsigned dst_z,
263 struct pipe_resource *src,
264 unsigned src_level,
265 const struct pipe_box *src_box_in)
266 {
267 struct pipe_transfer *src_trans, *dst_trans;
268 uint8_t *dst_map;
269 const uint8_t *src_map;
270 MAYBE_UNUSED enum pipe_format src_format;
271 enum pipe_format dst_format;
272 struct pipe_box src_box, dst_box;
273 unsigned src_bs, dst_bs, src_bw, dst_bw, src_bh, dst_bh;
274
275 assert(src && dst);
276 if (!src || !dst)
277 return;
278
279 assert((src->target == PIPE_BUFFER && dst->target == PIPE_BUFFER) ||
280 (src->target != PIPE_BUFFER && dst->target != PIPE_BUFFER));
281
282 src_format = src->format;
283 dst_format = dst->format;
284
285 /* init src box */
286 src_box = *src_box_in;
287
288 /* init dst box */
289 dst_box.x = dst_x;
290 dst_box.y = dst_y;
291 dst_box.z = dst_z;
292 dst_box.width = src_box.width;
293 dst_box.height = src_box.height;
294 dst_box.depth = src_box.depth;
295
296 src_bs = util_format_get_blocksize(src_format);
297 src_bw = util_format_get_blockwidth(src_format);
298 src_bh = util_format_get_blockheight(src_format);
299 dst_bs = util_format_get_blocksize(dst_format);
300 dst_bw = util_format_get_blockwidth(dst_format);
301 dst_bh = util_format_get_blockheight(dst_format);
302
303 /* Note: all box positions and sizes are in pixels */
304 if (src_bw > 1 && dst_bw == 1) {
305 /* Copy from compressed to uncompressed.
306 * Shrink dest box by the src block size.
307 */
308 dst_box.width /= src_bw;
309 dst_box.height /= src_bh;
310 }
311 else if (src_bw == 1 && dst_bw > 1) {
312 /* Copy from uncompressed to compressed.
313 * Expand dest box by the dest block size.
314 */
315 dst_box.width *= dst_bw;
316 dst_box.height *= dst_bh;
317 }
318 else {
319 /* compressed -> compressed or uncompressed -> uncompressed copy */
320 assert(src_bw == dst_bw);
321 assert(src_bh == dst_bh);
322 }
323
324 assert(src_bs == dst_bs);
325 if (src_bs != dst_bs) {
326 /* This can happen if we fail to do format checking before hand.
327 * Don't crash below.
328 */
329 return;
330 }
331
332 /* check that region boxes are block aligned */
333 assert(src_box.x % src_bw == 0);
334 assert(src_box.y % src_bh == 0);
335 assert(src_box.width % src_bw == 0 || src_box.width < src_bw);
336 assert(src_box.height % src_bh == 0 || src_box.height < src_bh);
337 assert(dst_box.x % dst_bw == 0);
338 assert(dst_box.y % dst_bh == 0);
339 assert(dst_box.width % dst_bw == 0 || dst_box.width < dst_bw);
340 assert(dst_box.height % dst_bh == 0 || dst_box.height < src_bh);
341
342 /* check that region boxes are not out of bounds */
343 assert(src_box.x + src_box.width <=
344 minify(src->width0, src_level, src_bw));
345 assert(src_box.y + src_box.height <=
346 minify(src->height0, src_level, src_bh));
347 assert(dst_box.x + dst_box.width <=
348 minify(dst->width0, dst_level, dst_bw));
349 assert(dst_box.y + dst_box.height <=
350 minify(dst->height0, dst_level, dst_bh));
351
352 /* check that total number of src, dest bytes match */
353 assert((src_box.width / src_bw) * (src_box.height / src_bh) * src_bs ==
354 (dst_box.width / dst_bw) * (dst_box.height / dst_bh) * dst_bs);
355
356 src_map = pipe->transfer_map(pipe,
357 src,
358 src_level,
359 PIPE_TRANSFER_READ,
360 &src_box, &src_trans);
361 assert(src_map);
362 if (!src_map) {
363 goto no_src_map;
364 }
365
366 dst_map = pipe->transfer_map(pipe,
367 dst,
368 dst_level,
369 PIPE_TRANSFER_WRITE |
370 PIPE_TRANSFER_DISCARD_RANGE, &dst_box,
371 &dst_trans);
372 assert(dst_map);
373 if (!dst_map) {
374 goto no_dst_map;
375 }
376
377 if (dst->target == PIPE_BUFFER && src->target == PIPE_BUFFER) {
378 assert(src_box.height == 1);
379 assert(src_box.depth == 1);
380 memcpy(dst_map, src_map, src_box.width);
381 } else {
382 util_copy_box(dst_map,
383 src_format,
384 dst_trans->stride, dst_trans->layer_stride,
385 0, 0, 0,
386 src_box.width, src_box.height, src_box.depth,
387 src_map,
388 src_trans->stride, src_trans->layer_stride,
389 0, 0, 0);
390 }
391
392 pipe->transfer_unmap(pipe, dst_trans);
393 no_dst_map:
394 pipe->transfer_unmap(pipe, src_trans);
395 no_src_map:
396 ;
397 }
398
399
400
401 #define UBYTE_TO_USHORT(B) ((B) | ((B) << 8))
402
403
404 /**
405 * Fallback for pipe->clear_render_target() function.
406 * XXX this looks too hackish to be really useful.
407 * cpp > 4 looks like a gross hack at best...
408 * Plus can't use these transfer fallbacks when clearing
409 * multisampled surfaces for instance.
410 * Clears all bound layers.
411 */
412 void
413 util_clear_render_target(struct pipe_context *pipe,
414 struct pipe_surface *dst,
415 const union pipe_color_union *color,
416 unsigned dstx, unsigned dsty,
417 unsigned width, unsigned height)
418 {
419 struct pipe_transfer *dst_trans;
420 ubyte *dst_map;
421 union util_color uc;
422 unsigned max_layer;
423
424 assert(dst->texture);
425 if (!dst->texture)
426 return;
427
428 if (dst->texture->target == PIPE_BUFFER) {
429 /*
430 * The fill naturally works on the surface format, however
431 * the transfer uses resource format which is just bytes for buffers.
432 */
433 unsigned dx, w;
434 unsigned pixstride = util_format_get_blocksize(dst->format);
435 dx = (dst->u.buf.first_element + dstx) * pixstride;
436 w = width * pixstride;
437 max_layer = 0;
438 dst_map = pipe_transfer_map(pipe,
439 dst->texture,
440 0, 0,
441 PIPE_TRANSFER_WRITE,
442 dx, 0, w, 1,
443 &dst_trans);
444 }
445 else {
446 max_layer = dst->u.tex.last_layer - dst->u.tex.first_layer;
447 dst_map = pipe_transfer_map_3d(pipe,
448 dst->texture,
449 dst->u.tex.level,
450 PIPE_TRANSFER_WRITE,
451 dstx, dsty, dst->u.tex.first_layer,
452 width, height, max_layer + 1, &dst_trans);
453 }
454
455 assert(dst_map);
456
457 if (dst_map) {
458 enum pipe_format format = dst->format;
459 assert(dst_trans->stride > 0);
460
461 if (util_format_is_pure_integer(format)) {
462 /*
463 * We expect int/uint clear values here, though some APIs
464 * might disagree (but in any case util_pack_color()
465 * couldn't handle it)...
466 */
467 if (util_format_is_pure_sint(format)) {
468 util_format_write_4i(format, color->i, 0, &uc, 0, 0, 0, 1, 1);
469 }
470 else {
471 assert(util_format_is_pure_uint(format));
472 util_format_write_4ui(format, color->ui, 0, &uc, 0, 0, 0, 1, 1);
473 }
474 }
475 else {
476 util_pack_color(color->f, format, &uc);
477 }
478
479 util_fill_box(dst_map, dst->format,
480 dst_trans->stride, dst_trans->layer_stride,
481 0, 0, 0, width, height, max_layer + 1, &uc);
482
483 pipe->transfer_unmap(pipe, dst_trans);
484 }
485 }
486
487 /**
488 * Fallback for pipe->clear_stencil() function.
489 * sw fallback doesn't look terribly useful here.
490 * Plus can't use these transfer fallbacks when clearing
491 * multisampled surfaces for instance.
492 * Clears all bound layers.
493 */
494 void
495 util_clear_depth_stencil(struct pipe_context *pipe,
496 struct pipe_surface *dst,
497 unsigned clear_flags,
498 double depth,
499 unsigned stencil,
500 unsigned dstx, unsigned dsty,
501 unsigned width, unsigned height)
502 {
503 enum pipe_format format = dst->format;
504 struct pipe_transfer *dst_trans;
505 ubyte *dst_map;
506 boolean need_rmw = FALSE;
507 unsigned max_layer, layer;
508
509 if ((clear_flags & PIPE_CLEAR_DEPTHSTENCIL) &&
510 ((clear_flags & PIPE_CLEAR_DEPTHSTENCIL) != PIPE_CLEAR_DEPTHSTENCIL) &&
511 util_format_is_depth_and_stencil(format))
512 need_rmw = TRUE;
513
514 assert(dst->texture);
515 if (!dst->texture)
516 return;
517
518 max_layer = dst->u.tex.last_layer - dst->u.tex.first_layer;
519 dst_map = pipe_transfer_map_3d(pipe,
520 dst->texture,
521 dst->u.tex.level,
522 (need_rmw ? PIPE_TRANSFER_READ_WRITE :
523 PIPE_TRANSFER_WRITE),
524 dstx, dsty, dst->u.tex.first_layer,
525 width, height, max_layer + 1, &dst_trans);
526 assert(dst_map);
527
528 if (dst_map) {
529 unsigned dst_stride = dst_trans->stride;
530 uint64_t zstencil = util_pack64_z_stencil(format, depth, stencil);
531 ubyte *dst_layer = dst_map;
532 unsigned i, j;
533 assert(dst_trans->stride > 0);
534
535 for (layer = 0; layer <= max_layer; layer++) {
536 dst_map = dst_layer;
537
538 switch (util_format_get_blocksize(format)) {
539 case 1:
540 assert(format == PIPE_FORMAT_S8_UINT);
541 if(dst_stride == width)
542 memset(dst_map, (uint8_t) zstencil, height * width);
543 else {
544 for (i = 0; i < height; i++) {
545 memset(dst_map, (uint8_t) zstencil, width);
546 dst_map += dst_stride;
547 }
548 }
549 break;
550 case 2:
551 assert(format == PIPE_FORMAT_Z16_UNORM);
552 for (i = 0; i < height; i++) {
553 uint16_t *row = (uint16_t *)dst_map;
554 for (j = 0; j < width; j++)
555 *row++ = (uint16_t) zstencil;
556 dst_map += dst_stride;
557 }
558 break;
559 case 4:
560 if (!need_rmw) {
561 for (i = 0; i < height; i++) {
562 uint32_t *row = (uint32_t *)dst_map;
563 for (j = 0; j < width; j++)
564 *row++ = (uint32_t) zstencil;
565 dst_map += dst_stride;
566 }
567 }
568 else {
569 uint32_t dst_mask;
570 if (format == PIPE_FORMAT_Z24_UNORM_S8_UINT)
571 dst_mask = 0x00ffffff;
572 else {
573 assert(format == PIPE_FORMAT_S8_UINT_Z24_UNORM);
574 dst_mask = 0xffffff00;
575 }
576 if (clear_flags & PIPE_CLEAR_DEPTH)
577 dst_mask = ~dst_mask;
578 for (i = 0; i < height; i++) {
579 uint32_t *row = (uint32_t *)dst_map;
580 for (j = 0; j < width; j++) {
581 uint32_t tmp = *row & dst_mask;
582 *row++ = tmp | ((uint32_t) zstencil & ~dst_mask);
583 }
584 dst_map += dst_stride;
585 }
586 }
587 break;
588 case 8:
589 if (!need_rmw) {
590 for (i = 0; i < height; i++) {
591 uint64_t *row = (uint64_t *)dst_map;
592 for (j = 0; j < width; j++)
593 *row++ = zstencil;
594 dst_map += dst_stride;
595 }
596 }
597 else {
598 uint64_t src_mask;
599
600 if (clear_flags & PIPE_CLEAR_DEPTH)
601 src_mask = 0x00000000ffffffffull;
602 else
603 src_mask = 0x000000ff00000000ull;
604
605 for (i = 0; i < height; i++) {
606 uint64_t *row = (uint64_t *)dst_map;
607 for (j = 0; j < width; j++) {
608 uint64_t tmp = *row & ~src_mask;
609 *row++ = tmp | (zstencil & src_mask);
610 }
611 dst_map += dst_stride;
612 }
613 }
614 break;
615 default:
616 assert(0);
617 break;
618 }
619 dst_layer += dst_trans->layer_stride;
620 }
621
622 pipe->transfer_unmap(pipe, dst_trans);
623 }
624 }
625
626
627 /* Return if the box is totally inside the resource.
628 */
629 static boolean
630 is_box_inside_resource(const struct pipe_resource *res,
631 const struct pipe_box *box,
632 unsigned level)
633 {
634 unsigned width = 1, height = 1, depth = 1;
635
636 switch (res->target) {
637 case PIPE_BUFFER:
638 width = res->width0;
639 height = 1;
640 depth = 1;
641 break;
642 case PIPE_TEXTURE_1D:
643 width = u_minify(res->width0, level);
644 height = 1;
645 depth = 1;
646 break;
647 case PIPE_TEXTURE_2D:
648 case PIPE_TEXTURE_RECT:
649 width = u_minify(res->width0, level);
650 height = u_minify(res->height0, level);
651 depth = 1;
652 break;
653 case PIPE_TEXTURE_3D:
654 width = u_minify(res->width0, level);
655 height = u_minify(res->height0, level);
656 depth = u_minify(res->depth0, level);
657 break;
658 case PIPE_TEXTURE_CUBE:
659 width = u_minify(res->width0, level);
660 height = u_minify(res->height0, level);
661 depth = 6;
662 break;
663 case PIPE_TEXTURE_1D_ARRAY:
664 width = u_minify(res->width0, level);
665 height = 1;
666 depth = res->array_size;
667 break;
668 case PIPE_TEXTURE_2D_ARRAY:
669 width = u_minify(res->width0, level);
670 height = u_minify(res->height0, level);
671 depth = res->array_size;
672 break;
673 case PIPE_TEXTURE_CUBE_ARRAY:
674 width = u_minify(res->width0, level);
675 height = u_minify(res->height0, level);
676 depth = res->array_size;
677 assert(res->array_size % 6 == 0);
678 break;
679 case PIPE_MAX_TEXTURE_TYPES:
680 break;
681 }
682
683 return box->x >= 0 &&
684 box->x + box->width <= (int) width &&
685 box->y >= 0 &&
686 box->y + box->height <= (int) height &&
687 box->z >= 0 &&
688 box->z + box->depth <= (int) depth;
689 }
690
691 static unsigned
692 get_sample_count(const struct pipe_resource *res)
693 {
694 return res->nr_samples ? res->nr_samples : 1;
695 }
696
697 /**
698 * Try to do a blit using resource_copy_region. The function calls
699 * resource_copy_region if the blit description is compatible with it.
700 *
701 * It returns TRUE if the blit was done using resource_copy_region.
702 *
703 * It returns FALSE otherwise and the caller must fall back to a more generic
704 * codepath for the blit operation. (e.g. by using u_blitter)
705 */
706 boolean
707 util_try_blit_via_copy_region(struct pipe_context *ctx,
708 const struct pipe_blit_info *blit)
709 {
710 unsigned mask = util_format_get_mask(blit->dst.format);
711
712 /* No format conversions. */
713 if (blit->src.resource->format != blit->src.format ||
714 blit->dst.resource->format != blit->dst.format ||
715 !util_is_format_compatible(
716 util_format_description(blit->src.resource->format),
717 util_format_description(blit->dst.resource->format))) {
718 return FALSE;
719 }
720
721 /* No masks, no filtering, no scissor. */
722 if ((blit->mask & mask) != mask ||
723 blit->filter != PIPE_TEX_FILTER_NEAREST ||
724 blit->scissor_enable) {
725 return FALSE;
726 }
727
728 /* No flipping. */
729 if (blit->src.box.width < 0 ||
730 blit->src.box.height < 0 ||
731 blit->src.box.depth < 0) {
732 return FALSE;
733 }
734
735 /* No scaling. */
736 if (blit->src.box.width != blit->dst.box.width ||
737 blit->src.box.height != blit->dst.box.height ||
738 blit->src.box.depth != blit->dst.box.depth) {
739 return FALSE;
740 }
741
742 /* No out-of-bounds access. */
743 if (!is_box_inside_resource(blit->src.resource, &blit->src.box,
744 blit->src.level) ||
745 !is_box_inside_resource(blit->dst.resource, &blit->dst.box,
746 blit->dst.level)) {
747 return FALSE;
748 }
749
750 /* Sample counts must match. */
751 if (get_sample_count(blit->src.resource) !=
752 get_sample_count(blit->dst.resource)) {
753 return FALSE;
754 }
755
756 if (blit->alpha_blend)
757 return FALSE;
758
759 ctx->resource_copy_region(ctx, blit->dst.resource, blit->dst.level,
760 blit->dst.box.x, blit->dst.box.y, blit->dst.box.z,
761 blit->src.resource, blit->src.level,
762 &blit->src.box);
763 return TRUE;
764 }