2 * Mesa 3-D graphics library
4 * Copyright 2012 Intel Corporation
5 * Copyright 2013 Google
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
28 * Chad Versace <chad.versace@linux.intel.com>
29 * Frank Henigman <fjhenigman@google.com>
34 #include "util/macros.h"
36 #include "brw_context.h"
37 #include "intel_tiled_memcpy.h"
40 #include <tmmintrin.h>
43 #define FILE_DEBUG_FLAG DEBUG_TEXTURE
45 #define ALIGN_DOWN(a, b) ROUND_DOWN_TO(a, b)
46 #define ALIGN_UP(a, b) ALIGN(a, b)
48 /* Tile dimensions. Width and span are in bytes, height is in pixels (i.e.
49 * unitless). A "span" is the most number of bytes we can copy from linear
50 * to tiled without needing to calculate a new destination address.
52 static const uint32_t xtile_width
= 512;
53 static const uint32_t xtile_height
= 8;
54 static const uint32_t xtile_span
= 64;
55 static const uint32_t ytile_width
= 128;
56 static const uint32_t ytile_height
= 32;
57 static const uint32_t ytile_span
= 16;
60 static const uint8_t rgba8_permutation
[16] =
61 { 2,1,0,3, 6,5,4,7, 10,9,8,11, 14,13,12,15 };
63 /* NOTE: dst must be 16-byte aligned. src may be unaligned. */
64 #define rgba8_copy_16_aligned_dst(dst, src) \
65 _mm_store_si128((__m128i *)(dst), \
66 _mm_shuffle_epi8(_mm_loadu_si128((__m128i *)(src)), \
67 *(__m128i *) rgba8_permutation))
69 /* NOTE: src must be 16-byte aligned. dst may be unaligned. */
70 #define rgba8_copy_16_aligned_src(dst, src) \
71 _mm_storeu_si128((__m128i *)(dst), \
72 _mm_shuffle_epi8(_mm_load_si128((__m128i *)(src)), \
73 *(__m128i *) rgba8_permutation))
77 * Copy RGBA to BGRA - swap R and B, with the destination 16-byte aligned.
80 rgba8_copy_aligned_dst(void *dst
, const void *src
, size_t bytes
)
83 uint8_t const *s
= src
;
87 assert(!(((uintptr_t)dst
) & 0xf));
88 rgba8_copy_16_aligned_dst(d
+ 0, s
+ 0);
93 assert(!(((uintptr_t)dst
) & 0xf));
94 rgba8_copy_16_aligned_dst(d
+ 0, s
+ 0);
95 rgba8_copy_16_aligned_dst(d
+16, s
+16);
96 rgba8_copy_16_aligned_dst(d
+32, s
+32);
97 rgba8_copy_16_aligned_dst(d
+48, s
+48);
115 * Copy RGBA to BGRA - swap R and B, with the source 16-byte aligned.
118 rgba8_copy_aligned_src(void *dst
, const void *src
, size_t bytes
)
121 uint8_t const *s
= src
;
125 assert(!(((uintptr_t)src
) & 0xf));
126 rgba8_copy_16_aligned_src(d
+ 0, s
+ 0);
131 assert(!(((uintptr_t)src
) & 0xf));
132 rgba8_copy_16_aligned_src(d
+ 0, s
+ 0);
133 rgba8_copy_16_aligned_src(d
+16, s
+16);
134 rgba8_copy_16_aligned_src(d
+32, s
+32);
135 rgba8_copy_16_aligned_src(d
+48, s
+48);
153 * Each row from y0 to y1 is copied in three parts: [x0,x1), [x1,x2), [x2,x3).
154 * These ranges are in bytes, i.e. pixels * bytes-per-pixel.
155 * The first and last ranges must be shorter than a "span" (the longest linear
156 * stretch within a tile) and the middle must equal a whole number of spans.
157 * Ranges may be empty. The region copied must land entirely within one tile.
158 * 'dst' is the start of the tile and 'src' is the corresponding
159 * address to copy from, though copying begins at (x0, y0).
160 * To enable swizzling 'swizzle_bit' must be 1<<6, otherwise zero.
161 * Swizzling flips bit 6 in the copy destination offset, when certain other
162 * bits are set in it.
164 typedef void (*tile_copy_fn
)(uint32_t x0
, uint32_t x1
, uint32_t x2
, uint32_t x3
,
165 uint32_t y0
, uint32_t y1
,
166 char *dst
, const char *src
,
167 int32_t linear_pitch
,
168 uint32_t swizzle_bit
,
169 mem_copy_fn mem_copy
);
172 * Copy texture data from linear to X tile layout.
174 * \copydoc tile_copy_fn
177 linear_to_xtiled(uint32_t x0
, uint32_t x1
, uint32_t x2
, uint32_t x3
,
178 uint32_t y0
, uint32_t y1
,
179 char *dst
, const char *src
,
181 uint32_t swizzle_bit
,
182 mem_copy_fn mem_copy
)
184 /* The copy destination offset for each range copied is the sum of
185 * an X offset 'x0' or 'xo' and a Y offset 'yo.'
189 src
+= (ptrdiff_t)y0
* src_pitch
;
191 for (yo
= y0
* xtile_width
; yo
< y1
* xtile_width
; yo
+= xtile_width
) {
192 /* Bits 9 and 10 of the copy destination offset control swizzling.
193 * Only 'yo' contributes to those bits in the total offset,
194 * so calculate 'swizzle' just once per row.
195 * Move bits 9 and 10 three and four places respectively down
196 * to bit 6 and xor them.
198 uint32_t swizzle
= ((yo
>> 3) ^ (yo
>> 4)) & swizzle_bit
;
200 mem_copy(dst
+ ((x0
+ yo
) ^ swizzle
), src
+ x0
, x1
- x0
);
202 for (xo
= x1
; xo
< x2
; xo
+= xtile_span
) {
203 mem_copy(dst
+ ((xo
+ yo
) ^ swizzle
), src
+ xo
, xtile_span
);
206 mem_copy(dst
+ ((xo
+ yo
) ^ swizzle
), src
+ x2
, x3
- x2
);
213 * Copy texture data from linear to Y tile layout.
215 * \copydoc tile_copy_fn
218 linear_to_ytiled(uint32_t x0
, uint32_t x1
, uint32_t x2
, uint32_t x3
,
219 uint32_t y0
, uint32_t y1
,
220 char *dst
, const char *src
,
222 uint32_t swizzle_bit
,
223 mem_copy_fn mem_copy
)
225 /* Y tiles consist of columns that are 'ytile_span' wide (and the same height
226 * as the tile). Thus the destination offset for (x,y) is the sum of:
227 * (x % column_width) // position within column
228 * (x / column_width) * bytes_per_column // column number * bytes per column
231 * The copy destination offset for each range copied is the sum of
232 * an X offset 'xo0' or 'xo' and a Y offset 'yo.'
234 const uint32_t column_width
= ytile_span
;
235 const uint32_t bytes_per_column
= column_width
* ytile_height
;
237 uint32_t xo0
= (x0
% ytile_span
) + (x0
/ ytile_span
) * bytes_per_column
;
238 uint32_t xo1
= (x1
% ytile_span
) + (x1
/ ytile_span
) * bytes_per_column
;
240 /* Bit 9 of the destination offset control swizzling.
241 * Only the X offset contributes to bit 9 of the total offset,
242 * so swizzle can be calculated in advance for these X positions.
243 * Move bit 9 three places down to bit 6.
245 uint32_t swizzle0
= (xo0
>> 3) & swizzle_bit
;
246 uint32_t swizzle1
= (xo1
>> 3) & swizzle_bit
;
250 src
+= (ptrdiff_t)y0
* src_pitch
;
252 for (yo
= y0
* column_width
; yo
< y1
* column_width
; yo
+= column_width
) {
254 uint32_t swizzle
= swizzle1
;
256 mem_copy(dst
+ ((xo0
+ yo
) ^ swizzle0
), src
+ x0
, x1
- x0
);
258 /* Step by spans/columns. As it happens, the swizzle bit flips
259 * at each step so we don't need to calculate it explicitly.
261 for (x
= x1
; x
< x2
; x
+= ytile_span
) {
262 mem_copy(dst
+ ((xo
+ yo
) ^ swizzle
), src
+ x
, ytile_span
);
263 xo
+= bytes_per_column
;
264 swizzle
^= swizzle_bit
;
267 mem_copy(dst
+ ((xo
+ yo
) ^ swizzle
), src
+ x2
, x3
- x2
);
274 * Copy texture data from X tile layout to linear.
276 * \copydoc tile_copy_fn
279 xtiled_to_linear(uint32_t x0
, uint32_t x1
, uint32_t x2
, uint32_t x3
,
280 uint32_t y0
, uint32_t y1
,
281 char *dst
, const char *src
,
283 uint32_t swizzle_bit
,
284 mem_copy_fn mem_copy
)
286 /* The copy destination offset for each range copied is the sum of
287 * an X offset 'x0' or 'xo' and a Y offset 'yo.'
291 dst
+= (ptrdiff_t)y0
* dst_pitch
;
293 for (yo
= y0
* xtile_width
; yo
< y1
* xtile_width
; yo
+= xtile_width
) {
294 /* Bits 9 and 10 of the copy destination offset control swizzling.
295 * Only 'yo' contributes to those bits in the total offset,
296 * so calculate 'swizzle' just once per row.
297 * Move bits 9 and 10 three and four places respectively down
298 * to bit 6 and xor them.
300 uint32_t swizzle
= ((yo
>> 3) ^ (yo
>> 4)) & swizzle_bit
;
302 mem_copy(dst
+ x0
, src
+ ((x0
+ yo
) ^ swizzle
), x1
- x0
);
304 for (xo
= x1
; xo
< x2
; xo
+= xtile_span
) {
305 mem_copy(dst
+ xo
, src
+ ((xo
+ yo
) ^ swizzle
), xtile_span
);
308 mem_copy(dst
+ x2
, src
+ ((xo
+ yo
) ^ swizzle
), x3
- x2
);
315 * Copy texture data from Y tile layout to linear.
317 * \copydoc tile_copy_fn
320 ytiled_to_linear(uint32_t x0
, uint32_t x1
, uint32_t x2
, uint32_t x3
,
321 uint32_t y0
, uint32_t y1
,
322 char *dst
, const char *src
,
324 uint32_t swizzle_bit
,
325 mem_copy_fn mem_copy
)
327 /* Y tiles consist of columns that are 'ytile_span' wide (and the same height
328 * as the tile). Thus the destination offset for (x,y) is the sum of:
329 * (x % column_width) // position within column
330 * (x / column_width) * bytes_per_column // column number * bytes per column
333 * The copy destination offset for each range copied is the sum of
334 * an X offset 'xo0' or 'xo' and a Y offset 'yo.'
336 const uint32_t column_width
= ytile_span
;
337 const uint32_t bytes_per_column
= column_width
* ytile_height
;
339 uint32_t xo0
= (x0
% ytile_span
) + (x0
/ ytile_span
) * bytes_per_column
;
340 uint32_t xo1
= (x1
% ytile_span
) + (x1
/ ytile_span
) * bytes_per_column
;
342 /* Bit 9 of the destination offset control swizzling.
343 * Only the X offset contributes to bit 9 of the total offset,
344 * so swizzle can be calculated in advance for these X positions.
345 * Move bit 9 three places down to bit 6.
347 uint32_t swizzle0
= (xo0
>> 3) & swizzle_bit
;
348 uint32_t swizzle1
= (xo1
>> 3) & swizzle_bit
;
352 dst
+= (ptrdiff_t)y0
* dst_pitch
;
354 for (yo
= y0
* column_width
; yo
< y1
* column_width
; yo
+= column_width
) {
356 uint32_t swizzle
= swizzle1
;
358 mem_copy(dst
+ x0
, src
+ ((xo0
+ yo
) ^ swizzle0
), x1
- x0
);
360 /* Step by spans/columns. As it happens, the swizzle bit flips
361 * at each step so we don't need to calculate it explicitly.
363 for (x
= x1
; x
< x2
; x
+= ytile_span
) {
364 mem_copy(dst
+ x
, src
+ ((xo
+ yo
) ^ swizzle
), ytile_span
);
365 xo
+= bytes_per_column
;
366 swizzle
^= swizzle_bit
;
369 mem_copy(dst
+ x2
, src
+ ((xo
+ yo
) ^ swizzle
), x3
- x2
);
377 * Copy texture data from linear to X tile layout, faster.
379 * Same as \ref linear_to_xtiled but faster, because it passes constant
380 * parameters for common cases, allowing the compiler to inline code
381 * optimized for those cases.
383 * \copydoc tile_copy_fn
386 linear_to_xtiled_faster(uint32_t x0
, uint32_t x1
, uint32_t x2
, uint32_t x3
,
387 uint32_t y0
, uint32_t y1
,
388 char *dst
, const char *src
,
390 uint32_t swizzle_bit
,
391 mem_copy_fn mem_copy
)
393 if (x0
== 0 && x3
== xtile_width
&& y0
== 0 && y1
== xtile_height
) {
394 if (mem_copy
== memcpy
)
395 return linear_to_xtiled(0, 0, xtile_width
, xtile_width
, 0, xtile_height
,
396 dst
, src
, src_pitch
, swizzle_bit
, memcpy
);
397 else if (mem_copy
== rgba8_copy_aligned_dst
)
398 return linear_to_xtiled(0, 0, xtile_width
, xtile_width
, 0, xtile_height
,
399 dst
, src
, src_pitch
, swizzle_bit
,
400 rgba8_copy_aligned_dst
);
402 if (mem_copy
== memcpy
)
403 return linear_to_xtiled(x0
, x1
, x2
, x3
, y0
, y1
,
404 dst
, src
, src_pitch
, swizzle_bit
, memcpy
);
405 else if (mem_copy
== rgba8_copy_aligned_dst
)
406 return linear_to_xtiled(x0
, x1
, x2
, x3
, y0
, y1
,
407 dst
, src
, src_pitch
, swizzle_bit
,
408 rgba8_copy_aligned_dst
);
410 linear_to_xtiled(x0
, x1
, x2
, x3
, y0
, y1
,
411 dst
, src
, src_pitch
, swizzle_bit
, mem_copy
);
415 * Copy texture data from linear to Y tile layout, faster.
417 * Same as \ref linear_to_ytiled but faster, because it passes constant
418 * parameters for common cases, allowing the compiler to inline code
419 * optimized for those cases.
421 * \copydoc tile_copy_fn
424 linear_to_ytiled_faster(uint32_t x0
, uint32_t x1
, uint32_t x2
, uint32_t x3
,
425 uint32_t y0
, uint32_t y1
,
426 char *dst
, const char *src
,
428 uint32_t swizzle_bit
,
429 mem_copy_fn mem_copy
)
431 if (x0
== 0 && x3
== ytile_width
&& y0
== 0 && y1
== ytile_height
) {
432 if (mem_copy
== memcpy
)
433 return linear_to_ytiled(0, 0, ytile_width
, ytile_width
, 0, ytile_height
,
434 dst
, src
, src_pitch
, swizzle_bit
, memcpy
);
435 else if (mem_copy
== rgba8_copy_aligned_dst
)
436 return linear_to_ytiled(0, 0, ytile_width
, ytile_width
, 0, ytile_height
,
437 dst
, src
, src_pitch
, swizzle_bit
,
438 rgba8_copy_aligned_dst
);
440 if (mem_copy
== memcpy
)
441 return linear_to_ytiled(x0
, x1
, x2
, x3
, y0
, y1
,
442 dst
, src
, src_pitch
, swizzle_bit
, memcpy
);
443 else if (mem_copy
== rgba8_copy_aligned_dst
)
444 return linear_to_ytiled(x0
, x1
, x2
, x3
, y0
, y1
,
445 dst
, src
, src_pitch
, swizzle_bit
,
446 rgba8_copy_aligned_dst
);
448 linear_to_ytiled(x0
, x1
, x2
, x3
, y0
, y1
,
449 dst
, src
, src_pitch
, swizzle_bit
, mem_copy
);
453 * Copy texture data from X tile layout to linear, faster.
455 * Same as \ref xtile_to_linear but faster, because it passes constant
456 * parameters for common cases, allowing the compiler to inline code
457 * optimized for those cases.
459 * \copydoc tile_copy_fn
462 xtiled_to_linear_faster(uint32_t x0
, uint32_t x1
, uint32_t x2
, uint32_t x3
,
463 uint32_t y0
, uint32_t y1
,
464 char *dst
, const char *src
,
466 uint32_t swizzle_bit
,
467 mem_copy_fn mem_copy
)
469 if (x0
== 0 && x3
== xtile_width
&& y0
== 0 && y1
== xtile_height
) {
470 if (mem_copy
== memcpy
)
471 return xtiled_to_linear(0, 0, xtile_width
, xtile_width
, 0, xtile_height
,
472 dst
, src
, dst_pitch
, swizzle_bit
, memcpy
);
473 else if (mem_copy
== rgba8_copy_aligned_src
)
474 return xtiled_to_linear(0, 0, xtile_width
, xtile_width
, 0, xtile_height
,
475 dst
, src
, dst_pitch
, swizzle_bit
,
476 rgba8_copy_aligned_src
);
478 if (mem_copy
== memcpy
)
479 return xtiled_to_linear(x0
, x1
, x2
, x3
, y0
, y1
,
480 dst
, src
, dst_pitch
, swizzle_bit
, memcpy
);
481 else if (mem_copy
== rgba8_copy_aligned_src
)
482 return xtiled_to_linear(x0
, x1
, x2
, x3
, y0
, y1
,
483 dst
, src
, dst_pitch
, swizzle_bit
,
484 rgba8_copy_aligned_src
);
486 xtiled_to_linear(x0
, x1
, x2
, x3
, y0
, y1
,
487 dst
, src
, dst_pitch
, swizzle_bit
, mem_copy
);
491 * Copy texture data from Y tile layout to linear, faster.
493 * Same as \ref ytile_to_linear but faster, because it passes constant
494 * parameters for common cases, allowing the compiler to inline code
495 * optimized for those cases.
497 * \copydoc tile_copy_fn
500 ytiled_to_linear_faster(uint32_t x0
, uint32_t x1
, uint32_t x2
, uint32_t x3
,
501 uint32_t y0
, uint32_t y1
,
502 char *dst
, const char *src
,
504 uint32_t swizzle_bit
,
505 mem_copy_fn mem_copy
)
507 if (x0
== 0 && x3
== ytile_width
&& y0
== 0 && y1
== ytile_height
) {
508 if (mem_copy
== memcpy
)
509 return ytiled_to_linear(0, 0, ytile_width
, ytile_width
, 0, ytile_height
,
510 dst
, src
, dst_pitch
, swizzle_bit
, memcpy
);
511 else if (mem_copy
== rgba8_copy_aligned_src
)
512 return ytiled_to_linear(0, 0, ytile_width
, ytile_width
, 0, ytile_height
,
513 dst
, src
, dst_pitch
, swizzle_bit
,
514 rgba8_copy_aligned_src
);
516 if (mem_copy
== memcpy
)
517 return ytiled_to_linear(x0
, x1
, x2
, x3
, y0
, y1
,
518 dst
, src
, dst_pitch
, swizzle_bit
, memcpy
);
519 else if (mem_copy
== rgba8_copy_aligned_src
)
520 return ytiled_to_linear(x0
, x1
, x2
, x3
, y0
, y1
,
521 dst
, src
, dst_pitch
, swizzle_bit
,
522 rgba8_copy_aligned_src
);
524 ytiled_to_linear(x0
, x1
, x2
, x3
, y0
, y1
,
525 dst
, src
, dst_pitch
, swizzle_bit
, mem_copy
);
529 * Copy from linear to tiled texture.
531 * Divide the region given by X range [xt1, xt2) and Y range [yt1, yt2) into
532 * pieces that do not cross tile boundaries and copy each piece with a tile
533 * copy function (\ref tile_copy_fn).
534 * The X range is in bytes, i.e. pixels * bytes-per-pixel.
535 * The Y range is in pixels (i.e. unitless).
536 * 'dst' is the start of the texture and 'src' is the corresponding
537 * address to copy from, though copying begins at (xt1, yt1).
540 linear_to_tiled(uint32_t xt1
, uint32_t xt2
,
541 uint32_t yt1
, uint32_t yt2
,
542 char *dst
, const char *src
,
543 uint32_t dst_pitch
, int32_t src_pitch
,
546 mem_copy_fn mem_copy
)
548 tile_copy_fn tile_copy
;
552 uint32_t tw
, th
, span
;
553 uint32_t swizzle_bit
= has_swizzling
? 1<<6 : 0;
555 if (tiling
== I915_TILING_X
) {
559 tile_copy
= linear_to_xtiled_faster
;
560 } else if (tiling
== I915_TILING_Y
) {
564 tile_copy
= linear_to_ytiled_faster
;
566 unreachable("unsupported tiling");
569 /* Round out to tile boundaries. */
570 xt0
= ALIGN_DOWN(xt1
, tw
);
571 xt3
= ALIGN_UP (xt2
, tw
);
572 yt0
= ALIGN_DOWN(yt1
, th
);
573 yt3
= ALIGN_UP (yt2
, th
);
575 /* Loop over all tiles to which we have something to copy.
576 * 'xt' and 'yt' are the origin of the destination tile, whether copying
577 * copying a full or partial tile.
578 * tile_copy() copies one tile or partial tile.
579 * Looping x inside y is the faster memory access pattern.
581 for (yt
= yt0
; yt
< yt3
; yt
+= th
) {
582 for (xt
= xt0
; xt
< xt3
; xt
+= tw
) {
583 /* The area to update is [x0,x3) x [y0,y1).
584 * May not want the whole tile, hence the min and max.
586 uint32_t x0
= MAX2(xt1
, xt
);
587 uint32_t y0
= MAX2(yt1
, yt
);
588 uint32_t x3
= MIN2(xt2
, xt
+ tw
);
589 uint32_t y1
= MIN2(yt2
, yt
+ th
);
591 /* [x0,x3) is split into [x0,x1), [x1,x2), [x2,x3) such that
592 * the middle interval is the longest span-aligned part.
593 * The sub-ranges could be empty.
596 x1
= ALIGN_UP(x0
, span
);
600 x2
= ALIGN_DOWN(x3
, span
);
602 assert(x0
<= x1
&& x1
<= x2
&& x2
<= x3
);
603 assert(x1
- x0
< span
&& x3
- x2
< span
);
604 assert(x3
- x0
<= tw
);
605 assert((x2
- x1
) % span
== 0);
607 /* Translate by (xt,yt) for single-tile copier. */
608 tile_copy(x0
-xt
, x1
-xt
, x2
-xt
, x3
-xt
,
610 dst
+ (ptrdiff_t) xt
* th
+ (ptrdiff_t) yt
* dst_pitch
,
611 src
+ (ptrdiff_t) xt
+ (ptrdiff_t) yt
* src_pitch
,
620 * Copy from tiled to linear texture.
622 * Divide the region given by X range [xt1, xt2) and Y range [yt1, yt2) into
623 * pieces that do not cross tile boundaries and copy each piece with a tile
624 * copy function (\ref tile_copy_fn).
625 * The X range is in bytes, i.e. pixels * bytes-per-pixel.
626 * The Y range is in pixels (i.e. unitless).
627 * 'dst' is the start of the texture and 'src' is the corresponding
628 * address to copy from, though copying begins at (xt1, yt1).
631 tiled_to_linear(uint32_t xt1
, uint32_t xt2
,
632 uint32_t yt1
, uint32_t yt2
,
633 char *dst
, const char *src
,
634 int32_t dst_pitch
, uint32_t src_pitch
,
637 mem_copy_fn mem_copy
)
639 tile_copy_fn tile_copy
;
643 uint32_t tw
, th
, span
;
644 uint32_t swizzle_bit
= has_swizzling
? 1<<6 : 0;
646 if (tiling
== I915_TILING_X
) {
650 tile_copy
= xtiled_to_linear_faster
;
651 } else if (tiling
== I915_TILING_Y
) {
655 tile_copy
= ytiled_to_linear_faster
;
657 unreachable("unsupported tiling");
660 /* Round out to tile boundaries. */
661 xt0
= ALIGN_DOWN(xt1
, tw
);
662 xt3
= ALIGN_UP (xt2
, tw
);
663 yt0
= ALIGN_DOWN(yt1
, th
);
664 yt3
= ALIGN_UP (yt2
, th
);
666 /* Loop over all tiles to which we have something to copy.
667 * 'xt' and 'yt' are the origin of the destination tile, whether copying
668 * copying a full or partial tile.
669 * tile_copy() copies one tile or partial tile.
670 * Looping x inside y is the faster memory access pattern.
672 for (yt
= yt0
; yt
< yt3
; yt
+= th
) {
673 for (xt
= xt0
; xt
< xt3
; xt
+= tw
) {
674 /* The area to update is [x0,x3) x [y0,y1).
675 * May not want the whole tile, hence the min and max.
677 uint32_t x0
= MAX2(xt1
, xt
);
678 uint32_t y0
= MAX2(yt1
, yt
);
679 uint32_t x3
= MIN2(xt2
, xt
+ tw
);
680 uint32_t y1
= MIN2(yt2
, yt
+ th
);
682 /* [x0,x3) is split into [x0,x1), [x1,x2), [x2,x3) such that
683 * the middle interval is the longest span-aligned part.
684 * The sub-ranges could be empty.
687 x1
= ALIGN_UP(x0
, span
);
691 x2
= ALIGN_DOWN(x3
, span
);
693 assert(x0
<= x1
&& x1
<= x2
&& x2
<= x3
);
694 assert(x1
- x0
< span
&& x3
- x2
< span
);
695 assert(x3
- x0
<= tw
);
696 assert((x2
- x1
) % span
== 0);
698 /* Translate by (xt,yt) for single-tile copier. */
699 tile_copy(x0
-xt
, x1
-xt
, x2
-xt
, x3
-xt
,
701 dst
+ (ptrdiff_t) xt
+ (ptrdiff_t) yt
* dst_pitch
,
702 src
+ (ptrdiff_t) xt
* th
+ (ptrdiff_t) yt
* src_pitch
,
712 * Determine which copy function to use for the given format combination
714 * The only two possible copy functions which are ever returned are a
715 * direct memcpy and a RGBA <-> BGRA copy function. Since RGBA -> BGRA and
716 * BGRA -> RGBA are exactly the same operation (and memcpy is obviously
717 * symmetric), it doesn't matter whether the copy is from the tiled image
718 * to the untiled or vice versa. The copy function required is the same in
719 * either case so this function can be used.
721 * \param[in] tiledFormat The format of the tiled image
722 * \param[in] format The GL format of the client data
723 * \param[in] type The GL type of the client data
724 * \param[out] mem_copy Will be set to one of either the standard
725 * library's memcpy or a different copy function
726 * that performs an RGBA to BGRA conversion
727 * \param[out] cpp Number of bytes per channel
729 * \return true if the format and type combination are valid
731 bool intel_get_memcpy(mesa_format tiledFormat
, GLenum format
,
732 GLenum type
, mem_copy_fn
*mem_copy
, uint32_t *cpp
,
733 enum intel_memcpy_direction direction
)
735 if (type
== GL_UNSIGNED_INT_8_8_8_8_REV
&&
736 !(format
== GL_RGBA
|| format
== GL_BGRA
))
737 return false; /* Invalid type/format combination */
739 if ((tiledFormat
== MESA_FORMAT_L_UNORM8
&& format
== GL_LUMINANCE
) ||
740 (tiledFormat
== MESA_FORMAT_A_UNORM8
&& format
== GL_ALPHA
)) {
743 } else if ((tiledFormat
== MESA_FORMAT_B8G8R8A8_UNORM
) ||
744 (tiledFormat
== MESA_FORMAT_B8G8R8X8_UNORM
)) {
746 if (format
== GL_BGRA
) {
748 } else if (format
== GL_RGBA
) {
749 *mem_copy
= direction
== INTEL_UPLOAD
? rgba8_copy_aligned_dst
750 : rgba8_copy_aligned_src
;
752 } else if ((tiledFormat
== MESA_FORMAT_R8G8B8A8_UNORM
) ||
753 (tiledFormat
== MESA_FORMAT_R8G8B8X8_UNORM
)) {
755 if (format
== GL_BGRA
) {
756 /* Copying from RGBA to BGRA is the same as BGRA to RGBA so we can
757 * use the same function.
759 *mem_copy
= direction
== INTEL_UPLOAD
? rgba8_copy_aligned_dst
760 : rgba8_copy_aligned_src
;
761 } else if (format
== GL_RGBA
) {