2 * Mesa 3-D graphics library
4 * Copyright 2012 Intel Corporation
5 * Copyright 2013 Google
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sublicense, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
22 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
28 * Chad Versace <chad.versace@linux.intel.com>
29 * Frank Henigman <fjhenigman@google.com>
34 #include "util/macros.h"
35 #include "main/macros.h"
39 #if defined(__SSSE3__)
40 #include <tmmintrin.h>
41 #elif defined(__SSE2__)
42 #include <emmintrin.h>
45 #define FILE_DEBUG_FLAG DEBUG_TEXTURE
47 #define ALIGN_DOWN(a, b) ROUND_DOWN_TO(a, b)
48 #define ALIGN_UP(a, b) ALIGN(a, b)
50 /* Tile dimensions. Width and span are in bytes, height is in pixels (i.e.
51 * unitless). A "span" is the most number of bytes we can copy from linear
52 * to tiled without needing to calculate a new destination address.
54 static const uint32_t xtile_width
= 512;
55 static const uint32_t xtile_height
= 8;
56 static const uint32_t xtile_span
= 64;
57 static const uint32_t ytile_width
= 128;
58 static const uint32_t ytile_height
= 32;
59 static const uint32_t ytile_span
= 16;
61 static inline uint32_t
62 ror(uint32_t n
, uint32_t d
)
64 return (n
>> d
) | (n
<< (32 - d
));
67 // bswap32 already exists as a macro on some platforms (FreeBSD)
69 static inline uint32_t
72 #if defined(HAVE___BUILTIN_BSWAP32)
73 return __builtin_bswap32(n
);
76 ((n
>> 8) & 0x0000ff00) |
77 ((n
<< 8) & 0x00ff0000) |
84 * Copy RGBA to BGRA - swap R and B.
87 rgba8_copy(void *dst
, const void *src
, size_t bytes
)
90 uint32_t const *s
= src
;
92 assert(bytes
% 4 == 0);
95 *d
= ror(bswap32(*s
), 8);
104 static const uint8_t rgba8_permutation
[16] =
105 { 2,1,0,3, 6,5,4,7, 10,9,8,11, 14,13,12,15 };
108 rgba8_copy_16_aligned_dst(void *dst
, const void *src
)
111 _mm_shuffle_epi8(_mm_loadu_si128(src
),
112 *(__m128i
*)rgba8_permutation
));
116 rgba8_copy_16_aligned_src(void *dst
, const void *src
)
118 _mm_storeu_si128(dst
,
119 _mm_shuffle_epi8(_mm_load_si128(src
),
120 *(__m128i
*)rgba8_permutation
));
123 #elif defined(__SSE2__)
125 rgba8_copy_16_aligned_dst(void *dst
, const void *src
)
127 __m128i srcreg
, dstreg
, agmask
, ag
, rb
, br
;
129 agmask
= _mm_set1_epi32(0xFF00FF00);
130 srcreg
= _mm_loadu_si128((__m128i
*)src
);
132 rb
= _mm_andnot_si128(agmask
, srcreg
);
133 ag
= _mm_and_si128(agmask
, srcreg
);
134 br
= _mm_shufflehi_epi16(_mm_shufflelo_epi16(rb
, _MM_SHUFFLE(2, 3, 0, 1)),
135 _MM_SHUFFLE(2, 3, 0, 1));
136 dstreg
= _mm_or_si128(ag
, br
);
138 _mm_store_si128((__m128i
*)dst
, dstreg
);
142 rgba8_copy_16_aligned_src(void *dst
, const void *src
)
144 __m128i srcreg
, dstreg
, agmask
, ag
, rb
, br
;
146 agmask
= _mm_set1_epi32(0xFF00FF00);
147 srcreg
= _mm_load_si128((__m128i
*)src
);
149 rb
= _mm_andnot_si128(agmask
, srcreg
);
150 ag
= _mm_and_si128(agmask
, srcreg
);
151 br
= _mm_shufflehi_epi16(_mm_shufflelo_epi16(rb
, _MM_SHUFFLE(2, 3, 0, 1)),
152 _MM_SHUFFLE(2, 3, 0, 1));
153 dstreg
= _mm_or_si128(ag
, br
);
155 _mm_storeu_si128((__m128i
*)dst
, dstreg
);
160 * Copy RGBA to BGRA - swap R and B, with the destination 16-byte aligned.
163 rgba8_copy_aligned_dst(void *dst
, const void *src
, size_t bytes
)
165 assert(bytes
== 0 || !(((uintptr_t)dst
) & 0xf));
167 #if defined(__SSSE3__) || defined(__SSE2__)
169 rgba8_copy_16_aligned_dst(dst
+ 0, src
+ 0);
170 rgba8_copy_16_aligned_dst(dst
+ 16, src
+ 16);
171 rgba8_copy_16_aligned_dst(dst
+ 32, src
+ 32);
172 rgba8_copy_16_aligned_dst(dst
+ 48, src
+ 48);
176 while (bytes
>= 16) {
177 rgba8_copy_16_aligned_dst(dst
, src
);
184 rgba8_copy(dst
, src
, bytes
);
190 * Copy RGBA to BGRA - swap R and B, with the source 16-byte aligned.
193 rgba8_copy_aligned_src(void *dst
, const void *src
, size_t bytes
)
195 assert(bytes
== 0 || !(((uintptr_t)src
) & 0xf));
197 #if defined(__SSSE3__) || defined(__SSE2__)
199 rgba8_copy_16_aligned_src(dst
+ 0, src
+ 0);
200 rgba8_copy_16_aligned_src(dst
+ 16, src
+ 16);
201 rgba8_copy_16_aligned_src(dst
+ 32, src
+ 32);
202 rgba8_copy_16_aligned_src(dst
+ 48, src
+ 48);
206 while (bytes
>= 16) {
207 rgba8_copy_16_aligned_src(dst
, src
);
214 rgba8_copy(dst
, src
, bytes
);
220 * Each row from y0 to y1 is copied in three parts: [x0,x1), [x1,x2), [x2,x3).
221 * These ranges are in bytes, i.e. pixels * bytes-per-pixel.
222 * The first and last ranges must be shorter than a "span" (the longest linear
223 * stretch within a tile) and the middle must equal a whole number of spans.
224 * Ranges may be empty. The region copied must land entirely within one tile.
225 * 'dst' is the start of the tile and 'src' is the corresponding
226 * address to copy from, though copying begins at (x0, y0).
227 * To enable swizzling 'swizzle_bit' must be 1<<6, otherwise zero.
228 * Swizzling flips bit 6 in the copy destination offset, when certain other
229 * bits are set in it.
231 typedef void (*tile_copy_fn
)(uint32_t x0
, uint32_t x1
, uint32_t x2
, uint32_t x3
,
232 uint32_t y0
, uint32_t y1
,
233 char *dst
, const char *src
,
234 int32_t linear_pitch
,
235 uint32_t swizzle_bit
,
236 isl_memcpy_type copy_type
);
239 * Copy texture data from linear to X tile layout.
241 * \copydoc tile_copy_fn
243 * The mem_copy parameters allow the user to specify an alternative mem_copy
244 * function that, for instance, may do RGBA -> BGRA swizzling. The first
245 * function must handle any memory alignment while the second function must
246 * only handle 16-byte alignment in whichever side (source or destination) is
250 linear_to_xtiled(uint32_t x0
, uint32_t x1
, uint32_t x2
, uint32_t x3
,
251 uint32_t y0
, uint32_t y1
,
252 char *dst
, const char *src
,
254 uint32_t swizzle_bit
,
255 isl_mem_copy_fn mem_copy
,
256 isl_mem_copy_fn mem_copy_align16
)
258 /* The copy destination offset for each range copied is the sum of
259 * an X offset 'x0' or 'xo' and a Y offset 'yo.'
263 src
+= (ptrdiff_t)y0
* src_pitch
;
265 for (yo
= y0
* xtile_width
; yo
< y1
* xtile_width
; yo
+= xtile_width
) {
266 /* Bits 9 and 10 of the copy destination offset control swizzling.
267 * Only 'yo' contributes to those bits in the total offset,
268 * so calculate 'swizzle' just once per row.
269 * Move bits 9 and 10 three and four places respectively down
270 * to bit 6 and xor them.
272 uint32_t swizzle
= ((yo
>> 3) ^ (yo
>> 4)) & swizzle_bit
;
274 mem_copy(dst
+ ((x0
+ yo
) ^ swizzle
), src
+ x0
, x1
- x0
);
276 for (xo
= x1
; xo
< x2
; xo
+= xtile_span
) {
277 mem_copy_align16(dst
+ ((xo
+ yo
) ^ swizzle
), src
+ xo
, xtile_span
);
280 mem_copy_align16(dst
+ ((xo
+ yo
) ^ swizzle
), src
+ x2
, x3
- x2
);
287 * Copy texture data from linear to Y tile layout.
289 * \copydoc tile_copy_fn
292 linear_to_ytiled(uint32_t x0
, uint32_t x1
, uint32_t x2
, uint32_t x3
,
293 uint32_t y0
, uint32_t y3
,
294 char *dst
, const char *src
,
296 uint32_t swizzle_bit
,
297 isl_mem_copy_fn mem_copy
,
298 isl_mem_copy_fn mem_copy_align16
)
300 /* Y tiles consist of columns that are 'ytile_span' wide (and the same height
301 * as the tile). Thus the destination offset for (x,y) is the sum of:
302 * (x % column_width) // position within column
303 * (x / column_width) * bytes_per_column // column number * bytes per column
306 * The copy destination offset for each range copied is the sum of
307 * an X offset 'xo0' or 'xo' and a Y offset 'yo.'
309 const uint32_t column_width
= ytile_span
;
310 const uint32_t bytes_per_column
= column_width
* ytile_height
;
312 uint32_t y1
= MIN2(y3
, ALIGN_UP(y0
, 4));
313 uint32_t y2
= MAX2(y1
, ALIGN_DOWN(y3
, 4));
315 uint32_t xo0
= (x0
% ytile_span
) + (x0
/ ytile_span
) * bytes_per_column
;
316 uint32_t xo1
= (x1
% ytile_span
) + (x1
/ ytile_span
) * bytes_per_column
;
318 /* Bit 9 of the destination offset control swizzling.
319 * Only the X offset contributes to bit 9 of the total offset,
320 * so swizzle can be calculated in advance for these X positions.
321 * Move bit 9 three places down to bit 6.
323 uint32_t swizzle0
= (xo0
>> 3) & swizzle_bit
;
324 uint32_t swizzle1
= (xo1
>> 3) & swizzle_bit
;
328 src
+= (ptrdiff_t)y0
* src_pitch
;
331 for (yo
= y0
* column_width
; yo
< y1
* column_width
; yo
+= column_width
) {
333 uint32_t swizzle
= swizzle1
;
335 mem_copy(dst
+ ((xo0
+ yo
) ^ swizzle0
), src
+ x0
, x1
- x0
);
337 /* Step by spans/columns. As it happens, the swizzle bit flips
338 * at each step so we don't need to calculate it explicitly.
340 for (x
= x1
; x
< x2
; x
+= ytile_span
) {
341 mem_copy_align16(dst
+ ((xo
+ yo
) ^ swizzle
), src
+ x
, ytile_span
);
342 xo
+= bytes_per_column
;
343 swizzle
^= swizzle_bit
;
346 mem_copy_align16(dst
+ ((xo
+ yo
) ^ swizzle
), src
+ x2
, x3
- x2
);
352 for (yo
= y1
* column_width
; yo
< y2
* column_width
; yo
+= 4 * column_width
) {
354 uint32_t swizzle
= swizzle1
;
357 mem_copy(dst
+ ((xo0
+ yo
+ 0 * column_width
) ^ swizzle0
), src
+ x0
+ 0 * src_pitch
, x1
- x0
);
358 mem_copy(dst
+ ((xo0
+ yo
+ 1 * column_width
) ^ swizzle0
), src
+ x0
+ 1 * src_pitch
, x1
- x0
);
359 mem_copy(dst
+ ((xo0
+ yo
+ 2 * column_width
) ^ swizzle0
), src
+ x0
+ 2 * src_pitch
, x1
- x0
);
360 mem_copy(dst
+ ((xo0
+ yo
+ 3 * column_width
) ^ swizzle0
), src
+ x0
+ 3 * src_pitch
, x1
- x0
);
363 /* Step by spans/columns. As it happens, the swizzle bit flips
364 * at each step so we don't need to calculate it explicitly.
366 for (x
= x1
; x
< x2
; x
+= ytile_span
) {
367 mem_copy_align16(dst
+ ((xo
+ yo
+ 0 * column_width
) ^ swizzle
), src
+ x
+ 0 * src_pitch
, ytile_span
);
368 mem_copy_align16(dst
+ ((xo
+ yo
+ 1 * column_width
) ^ swizzle
), src
+ x
+ 1 * src_pitch
, ytile_span
);
369 mem_copy_align16(dst
+ ((xo
+ yo
+ 2 * column_width
) ^ swizzle
), src
+ x
+ 2 * src_pitch
, ytile_span
);
370 mem_copy_align16(dst
+ ((xo
+ yo
+ 3 * column_width
) ^ swizzle
), src
+ x
+ 3 * src_pitch
, ytile_span
);
371 xo
+= bytes_per_column
;
372 swizzle
^= swizzle_bit
;
376 mem_copy_align16(dst
+ ((xo
+ yo
+ 0 * column_width
) ^ swizzle
), src
+ x2
+ 0 * src_pitch
, x3
- x2
);
377 mem_copy_align16(dst
+ ((xo
+ yo
+ 1 * column_width
) ^ swizzle
), src
+ x2
+ 1 * src_pitch
, x3
- x2
);
378 mem_copy_align16(dst
+ ((xo
+ yo
+ 2 * column_width
) ^ swizzle
), src
+ x2
+ 2 * src_pitch
, x3
- x2
);
379 mem_copy_align16(dst
+ ((xo
+ yo
+ 3 * column_width
) ^ swizzle
), src
+ x2
+ 3 * src_pitch
, x3
- x2
);
382 src
+= 4 * src_pitch
;
386 for (yo
= y2
* column_width
; yo
< y3
* column_width
; yo
+= column_width
) {
388 uint32_t swizzle
= swizzle1
;
390 mem_copy(dst
+ ((xo0
+ yo
) ^ swizzle0
), src
+ x0
, x1
- x0
);
392 /* Step by spans/columns. As it happens, the swizzle bit flips
393 * at each step so we don't need to calculate it explicitly.
395 for (x
= x1
; x
< x2
; x
+= ytile_span
) {
396 mem_copy_align16(dst
+ ((xo
+ yo
) ^ swizzle
), src
+ x
, ytile_span
);
397 xo
+= bytes_per_column
;
398 swizzle
^= swizzle_bit
;
401 mem_copy_align16(dst
+ ((xo
+ yo
) ^ swizzle
), src
+ x2
, x3
- x2
);
409 * Copy texture data from X tile layout to linear.
411 * \copydoc tile_copy_fn
414 xtiled_to_linear(uint32_t x0
, uint32_t x1
, uint32_t x2
, uint32_t x3
,
415 uint32_t y0
, uint32_t y1
,
416 char *dst
, const char *src
,
418 uint32_t swizzle_bit
,
419 isl_mem_copy_fn mem_copy
,
420 isl_mem_copy_fn mem_copy_align16
)
422 /* The copy destination offset for each range copied is the sum of
423 * an X offset 'x0' or 'xo' and a Y offset 'yo.'
427 dst
+= (ptrdiff_t)y0
* dst_pitch
;
429 for (yo
= y0
* xtile_width
; yo
< y1
* xtile_width
; yo
+= xtile_width
) {
430 /* Bits 9 and 10 of the copy destination offset control swizzling.
431 * Only 'yo' contributes to those bits in the total offset,
432 * so calculate 'swizzle' just once per row.
433 * Move bits 9 and 10 three and four places respectively down
434 * to bit 6 and xor them.
436 uint32_t swizzle
= ((yo
>> 3) ^ (yo
>> 4)) & swizzle_bit
;
438 mem_copy(dst
+ x0
, src
+ ((x0
+ yo
) ^ swizzle
), x1
- x0
);
440 for (xo
= x1
; xo
< x2
; xo
+= xtile_span
) {
441 mem_copy_align16(dst
+ xo
, src
+ ((xo
+ yo
) ^ swizzle
), xtile_span
);
444 mem_copy_align16(dst
+ x2
, src
+ ((xo
+ yo
) ^ swizzle
), x3
- x2
);
451 * Copy texture data from Y tile layout to linear.
453 * \copydoc tile_copy_fn
456 ytiled_to_linear(uint32_t x0
, uint32_t x1
, uint32_t x2
, uint32_t x3
,
457 uint32_t y0
, uint32_t y3
,
458 char *dst
, const char *src
,
460 uint32_t swizzle_bit
,
461 isl_mem_copy_fn mem_copy
,
462 isl_mem_copy_fn mem_copy_align16
)
464 /* Y tiles consist of columns that are 'ytile_span' wide (and the same height
465 * as the tile). Thus the destination offset for (x,y) is the sum of:
466 * (x % column_width) // position within column
467 * (x / column_width) * bytes_per_column // column number * bytes per column
470 * The copy destination offset for each range copied is the sum of
471 * an X offset 'xo0' or 'xo' and a Y offset 'yo.'
473 const uint32_t column_width
= ytile_span
;
474 const uint32_t bytes_per_column
= column_width
* ytile_height
;
476 uint32_t y1
= MIN2(y3
, ALIGN_UP(y0
, 4));
477 uint32_t y2
= MAX2(y1
, ALIGN_DOWN(y3
, 4));
479 uint32_t xo0
= (x0
% ytile_span
) + (x0
/ ytile_span
) * bytes_per_column
;
480 uint32_t xo1
= (x1
% ytile_span
) + (x1
/ ytile_span
) * bytes_per_column
;
482 /* Bit 9 of the destination offset control swizzling.
483 * Only the X offset contributes to bit 9 of the total offset,
484 * so swizzle can be calculated in advance for these X positions.
485 * Move bit 9 three places down to bit 6.
487 uint32_t swizzle0
= (xo0
>> 3) & swizzle_bit
;
488 uint32_t swizzle1
= (xo1
>> 3) & swizzle_bit
;
492 dst
+= (ptrdiff_t)y0
* dst_pitch
;
495 for (yo
= y0
* column_width
; yo
< y1
* column_width
; yo
+= column_width
) {
497 uint32_t swizzle
= swizzle1
;
499 mem_copy(dst
+ x0
, src
+ ((xo0
+ yo
) ^ swizzle0
), x1
- x0
);
501 /* Step by spans/columns. As it happens, the swizzle bit flips
502 * at each step so we don't need to calculate it explicitly.
504 for (x
= x1
; x
< x2
; x
+= ytile_span
) {
505 mem_copy_align16(dst
+ x
, src
+ ((xo
+ yo
) ^ swizzle
), ytile_span
);
506 xo
+= bytes_per_column
;
507 swizzle
^= swizzle_bit
;
510 mem_copy_align16(dst
+ x2
, src
+ ((xo
+ yo
) ^ swizzle
), x3
- x2
);
516 for (yo
= y1
* column_width
; yo
< y2
* column_width
; yo
+= 4 * column_width
) {
518 uint32_t swizzle
= swizzle1
;
521 mem_copy(dst
+ x0
+ 0 * dst_pitch
, src
+ ((xo0
+ yo
+ 0 * column_width
) ^ swizzle0
), x1
- x0
);
522 mem_copy(dst
+ x0
+ 1 * dst_pitch
, src
+ ((xo0
+ yo
+ 1 * column_width
) ^ swizzle0
), x1
- x0
);
523 mem_copy(dst
+ x0
+ 2 * dst_pitch
, src
+ ((xo0
+ yo
+ 2 * column_width
) ^ swizzle0
), x1
- x0
);
524 mem_copy(dst
+ x0
+ 3 * dst_pitch
, src
+ ((xo0
+ yo
+ 3 * column_width
) ^ swizzle0
), x1
- x0
);
527 /* Step by spans/columns. As it happens, the swizzle bit flips
528 * at each step so we don't need to calculate it explicitly.
530 for (x
= x1
; x
< x2
; x
+= ytile_span
) {
531 mem_copy_align16(dst
+ x
+ 0 * dst_pitch
, src
+ ((xo
+ yo
+ 0 * column_width
) ^ swizzle
), ytile_span
);
532 mem_copy_align16(dst
+ x
+ 1 * dst_pitch
, src
+ ((xo
+ yo
+ 1 * column_width
) ^ swizzle
), ytile_span
);
533 mem_copy_align16(dst
+ x
+ 2 * dst_pitch
, src
+ ((xo
+ yo
+ 2 * column_width
) ^ swizzle
), ytile_span
);
534 mem_copy_align16(dst
+ x
+ 3 * dst_pitch
, src
+ ((xo
+ yo
+ 3 * column_width
) ^ swizzle
), ytile_span
);
535 xo
+= bytes_per_column
;
536 swizzle
^= swizzle_bit
;
540 mem_copy_align16(dst
+ x2
+ 0 * dst_pitch
, src
+ ((xo
+ yo
+ 0 * column_width
) ^ swizzle
), x3
- x2
);
541 mem_copy_align16(dst
+ x2
+ 1 * dst_pitch
, src
+ ((xo
+ yo
+ 1 * column_width
) ^ swizzle
), x3
- x2
);
542 mem_copy_align16(dst
+ x2
+ 2 * dst_pitch
, src
+ ((xo
+ yo
+ 2 * column_width
) ^ swizzle
), x3
- x2
);
543 mem_copy_align16(dst
+ x2
+ 3 * dst_pitch
, src
+ ((xo
+ yo
+ 3 * column_width
) ^ swizzle
), x3
- x2
);
546 dst
+= 4 * dst_pitch
;
550 for (yo
= y2
* column_width
; yo
< y3
* column_width
; yo
+= column_width
) {
552 uint32_t swizzle
= swizzle1
;
554 mem_copy(dst
+ x0
, src
+ ((xo0
+ yo
) ^ swizzle0
), x1
- x0
);
556 /* Step by spans/columns. As it happens, the swizzle bit flips
557 * at each step so we don't need to calculate it explicitly.
559 for (x
= x1
; x
< x2
; x
+= ytile_span
) {
560 mem_copy_align16(dst
+ x
, src
+ ((xo
+ yo
) ^ swizzle
), ytile_span
);
561 xo
+= bytes_per_column
;
562 swizzle
^= swizzle_bit
;
565 mem_copy_align16(dst
+ x2
, src
+ ((xo
+ yo
) ^ swizzle
), x3
- x2
);
572 #if defined(INLINE_SSE41)
573 static ALWAYS_INLINE
void *
574 _memcpy_streaming_load(void *dest
, const void *src
, size_t count
)
577 __m128i val
= _mm_stream_load_si128((__m128i
*)src
);
578 _mm_storeu_si128((__m128i
*)dest
, val
);
580 } else if (count
== 64) {
581 __m128i val0
= _mm_stream_load_si128(((__m128i
*)src
) + 0);
582 __m128i val1
= _mm_stream_load_si128(((__m128i
*)src
) + 1);
583 __m128i val2
= _mm_stream_load_si128(((__m128i
*)src
) + 2);
584 __m128i val3
= _mm_stream_load_si128(((__m128i
*)src
) + 3);
585 _mm_storeu_si128(((__m128i
*)dest
) + 0, val0
);
586 _mm_storeu_si128(((__m128i
*)dest
) + 1, val1
);
587 _mm_storeu_si128(((__m128i
*)dest
) + 2, val2
);
588 _mm_storeu_si128(((__m128i
*)dest
) + 3, val3
);
591 assert(count
< 64); /* and (count < 16) for ytiled */
592 return memcpy(dest
, src
, count
);
597 static isl_mem_copy_fn
598 choose_copy_function(isl_memcpy_type copy_type
)
603 case ISL_MEMCPY_BGRA8
:
605 case ISL_MEMCPY_STREAMING_LOAD
:
606 #if defined(INLINE_SSE41)
607 return _memcpy_streaming_load
;
609 unreachable("ISL_MEMCOPY_STREAMING_LOAD requires sse4.1");
611 case ISL_MEMCPY_INVALID
:
612 unreachable("invalid copy_type");
614 unreachable("unhandled copy_type");
619 * Copy texture data from linear to X tile layout, faster.
621 * Same as \ref linear_to_xtiled but faster, because it passes constant
622 * parameters for common cases, allowing the compiler to inline code
623 * optimized for those cases.
625 * \copydoc tile_copy_fn
628 linear_to_xtiled_faster(uint32_t x0
, uint32_t x1
, uint32_t x2
, uint32_t x3
,
629 uint32_t y0
, uint32_t y1
,
630 char *dst
, const char *src
,
632 uint32_t swizzle_bit
,
633 isl_memcpy_type copy_type
)
635 isl_mem_copy_fn mem_copy
= choose_copy_function(copy_type
);
637 if (x0
== 0 && x3
== xtile_width
&& y0
== 0 && y1
== xtile_height
) {
638 if (mem_copy
== memcpy
)
639 return linear_to_xtiled(0, 0, xtile_width
, xtile_width
, 0, xtile_height
,
640 dst
, src
, src_pitch
, swizzle_bit
, memcpy
, memcpy
);
641 else if (mem_copy
== rgba8_copy
)
642 return linear_to_xtiled(0, 0, xtile_width
, xtile_width
, 0, xtile_height
,
643 dst
, src
, src_pitch
, swizzle_bit
,
644 rgba8_copy
, rgba8_copy_aligned_dst
);
646 unreachable("not reached");
648 if (mem_copy
== memcpy
)
649 return linear_to_xtiled(x0
, x1
, x2
, x3
, y0
, y1
,
650 dst
, src
, src_pitch
, swizzle_bit
,
652 else if (mem_copy
== rgba8_copy
)
653 return linear_to_xtiled(x0
, x1
, x2
, x3
, y0
, y1
,
654 dst
, src
, src_pitch
, swizzle_bit
,
655 rgba8_copy
, rgba8_copy_aligned_dst
);
657 unreachable("not reached");
659 linear_to_xtiled(x0
, x1
, x2
, x3
, y0
, y1
,
660 dst
, src
, src_pitch
, swizzle_bit
, mem_copy
, mem_copy
);
664 * Copy texture data from linear to Y tile layout, faster.
666 * Same as \ref linear_to_ytiled but faster, because it passes constant
667 * parameters for common cases, allowing the compiler to inline code
668 * optimized for those cases.
670 * \copydoc tile_copy_fn
673 linear_to_ytiled_faster(uint32_t x0
, uint32_t x1
, uint32_t x2
, uint32_t x3
,
674 uint32_t y0
, uint32_t y1
,
675 char *dst
, const char *src
,
677 uint32_t swizzle_bit
,
678 isl_memcpy_type copy_type
)
680 isl_mem_copy_fn mem_copy
= choose_copy_function(copy_type
);
682 if (x0
== 0 && x3
== ytile_width
&& y0
== 0 && y1
== ytile_height
) {
683 if (mem_copy
== memcpy
)
684 return linear_to_ytiled(0, 0, ytile_width
, ytile_width
, 0, ytile_height
,
685 dst
, src
, src_pitch
, swizzle_bit
, memcpy
, memcpy
);
686 else if (mem_copy
== rgba8_copy
)
687 return linear_to_ytiled(0, 0, ytile_width
, ytile_width
, 0, ytile_height
,
688 dst
, src
, src_pitch
, swizzle_bit
,
689 rgba8_copy
, rgba8_copy_aligned_dst
);
691 unreachable("not reached");
693 if (mem_copy
== memcpy
)
694 return linear_to_ytiled(x0
, x1
, x2
, x3
, y0
, y1
,
695 dst
, src
, src_pitch
, swizzle_bit
, memcpy
, memcpy
);
696 else if (mem_copy
== rgba8_copy
)
697 return linear_to_ytiled(x0
, x1
, x2
, x3
, y0
, y1
,
698 dst
, src
, src_pitch
, swizzle_bit
,
699 rgba8_copy
, rgba8_copy_aligned_dst
);
701 unreachable("not reached");
703 linear_to_ytiled(x0
, x1
, x2
, x3
, y0
, y1
,
704 dst
, src
, src_pitch
, swizzle_bit
, mem_copy
, mem_copy
);
708 * Copy texture data from X tile layout to linear, faster.
710 * Same as \ref xtile_to_linear but faster, because it passes constant
711 * parameters for common cases, allowing the compiler to inline code
712 * optimized for those cases.
714 * \copydoc tile_copy_fn
717 xtiled_to_linear_faster(uint32_t x0
, uint32_t x1
, uint32_t x2
, uint32_t x3
,
718 uint32_t y0
, uint32_t y1
,
719 char *dst
, const char *src
,
721 uint32_t swizzle_bit
,
722 isl_memcpy_type copy_type
)
724 isl_mem_copy_fn mem_copy
= choose_copy_function(copy_type
);
726 if (x0
== 0 && x3
== xtile_width
&& y0
== 0 && y1
== xtile_height
) {
727 if (mem_copy
== memcpy
)
728 return xtiled_to_linear(0, 0, xtile_width
, xtile_width
, 0, xtile_height
,
729 dst
, src
, dst_pitch
, swizzle_bit
, memcpy
, memcpy
);
730 else if (mem_copy
== rgba8_copy
)
731 return xtiled_to_linear(0, 0, xtile_width
, xtile_width
, 0, xtile_height
,
732 dst
, src
, dst_pitch
, swizzle_bit
,
733 rgba8_copy
, rgba8_copy_aligned_src
);
734 #if defined(INLINE_SSE41)
735 else if (mem_copy
== _memcpy_streaming_load
)
736 return xtiled_to_linear(0, 0, xtile_width
, xtile_width
, 0, xtile_height
,
737 dst
, src
, dst_pitch
, swizzle_bit
,
738 memcpy
, _memcpy_streaming_load
);
741 unreachable("not reached");
743 if (mem_copy
== memcpy
)
744 return xtiled_to_linear(x0
, x1
, x2
, x3
, y0
, y1
,
745 dst
, src
, dst_pitch
, swizzle_bit
, memcpy
, memcpy
);
746 else if (mem_copy
== rgba8_copy
)
747 return xtiled_to_linear(x0
, x1
, x2
, x3
, y0
, y1
,
748 dst
, src
, dst_pitch
, swizzle_bit
,
749 rgba8_copy
, rgba8_copy_aligned_src
);
750 #if defined(INLINE_SSE41)
751 else if (mem_copy
== _memcpy_streaming_load
)
752 return xtiled_to_linear(x0
, x1
, x2
, x3
, y0
, y1
,
753 dst
, src
, dst_pitch
, swizzle_bit
,
754 memcpy
, _memcpy_streaming_load
);
757 unreachable("not reached");
759 xtiled_to_linear(x0
, x1
, x2
, x3
, y0
, y1
,
760 dst
, src
, dst_pitch
, swizzle_bit
, mem_copy
, mem_copy
);
764 * Copy texture data from Y tile layout to linear, faster.
766 * Same as \ref ytile_to_linear but faster, because it passes constant
767 * parameters for common cases, allowing the compiler to inline code
768 * optimized for those cases.
770 * \copydoc tile_copy_fn
773 ytiled_to_linear_faster(uint32_t x0
, uint32_t x1
, uint32_t x2
, uint32_t x3
,
774 uint32_t y0
, uint32_t y1
,
775 char *dst
, const char *src
,
777 uint32_t swizzle_bit
,
778 isl_memcpy_type copy_type
)
780 isl_mem_copy_fn mem_copy
= choose_copy_function(copy_type
);
782 if (x0
== 0 && x3
== ytile_width
&& y0
== 0 && y1
== ytile_height
) {
783 if (mem_copy
== memcpy
)
784 return ytiled_to_linear(0, 0, ytile_width
, ytile_width
, 0, ytile_height
,
785 dst
, src
, dst_pitch
, swizzle_bit
, memcpy
, memcpy
);
786 else if (mem_copy
== rgba8_copy
)
787 return ytiled_to_linear(0, 0, ytile_width
, ytile_width
, 0, ytile_height
,
788 dst
, src
, dst_pitch
, swizzle_bit
,
789 rgba8_copy
, rgba8_copy_aligned_src
);
790 #if defined(INLINE_SSE41)
791 else if (copy_type
== ISL_MEMCPY_STREAMING_LOAD
)
792 return ytiled_to_linear(0, 0, ytile_width
, ytile_width
, 0, ytile_height
,
793 dst
, src
, dst_pitch
, swizzle_bit
,
794 memcpy
, _memcpy_streaming_load
);
797 unreachable("not reached");
799 if (mem_copy
== memcpy
)
800 return ytiled_to_linear(x0
, x1
, x2
, x3
, y0
, y1
,
801 dst
, src
, dst_pitch
, swizzle_bit
, memcpy
, memcpy
);
802 else if (mem_copy
== rgba8_copy
)
803 return ytiled_to_linear(x0
, x1
, x2
, x3
, y0
, y1
,
804 dst
, src
, dst_pitch
, swizzle_bit
,
805 rgba8_copy
, rgba8_copy_aligned_src
);
806 #if defined(INLINE_SSE41)
807 else if (copy_type
== ISL_MEMCPY_STREAMING_LOAD
)
808 return ytiled_to_linear(x0
, x1
, x2
, x3
, y0
, y1
,
809 dst
, src
, dst_pitch
, swizzle_bit
,
810 memcpy
, _memcpy_streaming_load
);
813 unreachable("not reached");
815 ytiled_to_linear(x0
, x1
, x2
, x3
, y0
, y1
,
816 dst
, src
, dst_pitch
, swizzle_bit
, mem_copy
, mem_copy
);
820 * Copy from linear to tiled texture.
822 * Divide the region given by X range [xt1, xt2) and Y range [yt1, yt2) into
823 * pieces that do not cross tile boundaries and copy each piece with a tile
824 * copy function (\ref tile_copy_fn).
825 * The X range is in bytes, i.e. pixels * bytes-per-pixel.
826 * The Y range is in pixels (i.e. unitless).
827 * 'dst' is the address of (0, 0) in the destination tiled texture.
828 * 'src' is the address of (xt1, yt1) in the source linear texture.
831 intel_linear_to_tiled(uint32_t xt1
, uint32_t xt2
,
832 uint32_t yt1
, uint32_t yt2
,
833 char *dst
, const char *src
,
834 uint32_t dst_pitch
, int32_t src_pitch
,
836 enum isl_tiling tiling
,
837 isl_memcpy_type copy_type
)
839 tile_copy_fn tile_copy
;
843 uint32_t tw
, th
, span
;
844 uint32_t swizzle_bit
= has_swizzling
? 1<<6 : 0;
846 if (tiling
== ISL_TILING_X
) {
850 tile_copy
= linear_to_xtiled_faster
;
851 } else if (tiling
== ISL_TILING_Y0
) {
855 tile_copy
= linear_to_ytiled_faster
;
857 unreachable("unsupported tiling");
860 /* Round out to tile boundaries. */
861 xt0
= ALIGN_DOWN(xt1
, tw
);
862 xt3
= ALIGN_UP (xt2
, tw
);
863 yt0
= ALIGN_DOWN(yt1
, th
);
864 yt3
= ALIGN_UP (yt2
, th
);
866 /* Loop over all tiles to which we have something to copy.
867 * 'xt' and 'yt' are the origin of the destination tile, whether copying
868 * copying a full or partial tile.
869 * tile_copy() copies one tile or partial tile.
870 * Looping x inside y is the faster memory access pattern.
872 for (yt
= yt0
; yt
< yt3
; yt
+= th
) {
873 for (xt
= xt0
; xt
< xt3
; xt
+= tw
) {
874 /* The area to update is [x0,x3) x [y0,y1).
875 * May not want the whole tile, hence the min and max.
877 uint32_t x0
= MAX2(xt1
, xt
);
878 uint32_t y0
= MAX2(yt1
, yt
);
879 uint32_t x3
= MIN2(xt2
, xt
+ tw
);
880 uint32_t y1
= MIN2(yt2
, yt
+ th
);
882 /* [x0,x3) is split into [x0,x1), [x1,x2), [x2,x3) such that
883 * the middle interval is the longest span-aligned part.
884 * The sub-ranges could be empty.
887 x1
= ALIGN_UP(x0
, span
);
891 x2
= ALIGN_DOWN(x3
, span
);
893 assert(x0
<= x1
&& x1
<= x2
&& x2
<= x3
);
894 assert(x1
- x0
< span
&& x3
- x2
< span
);
895 assert(x3
- x0
<= tw
);
896 assert((x2
- x1
) % span
== 0);
898 /* Translate by (xt,yt) for single-tile copier. */
899 tile_copy(x0
-xt
, x1
-xt
, x2
-xt
, x3
-xt
,
901 dst
+ (ptrdiff_t)xt
* th
+ (ptrdiff_t)yt
* dst_pitch
,
902 src
+ (ptrdiff_t)xt
- xt1
+ ((ptrdiff_t)yt
- yt1
) * src_pitch
,
911 * Copy from tiled to linear texture.
913 * Divide the region given by X range [xt1, xt2) and Y range [yt1, yt2) into
914 * pieces that do not cross tile boundaries and copy each piece with a tile
915 * copy function (\ref tile_copy_fn).
916 * The X range is in bytes, i.e. pixels * bytes-per-pixel.
917 * The Y range is in pixels (i.e. unitless).
918 * 'dst' is the address of (xt1, yt1) in the destination linear texture.
919 * 'src' is the address of (0, 0) in the source tiled texture.
922 intel_tiled_to_linear(uint32_t xt1
, uint32_t xt2
,
923 uint32_t yt1
, uint32_t yt2
,
924 char *dst
, const char *src
,
925 int32_t dst_pitch
, uint32_t src_pitch
,
927 enum isl_tiling tiling
,
928 isl_memcpy_type copy_type
)
930 tile_copy_fn tile_copy
;
934 uint32_t tw
, th
, span
;
935 uint32_t swizzle_bit
= has_swizzling
? 1<<6 : 0;
937 if (tiling
== ISL_TILING_X
) {
941 tile_copy
= xtiled_to_linear_faster
;
942 } else if (tiling
== ISL_TILING_Y0
) {
946 tile_copy
= ytiled_to_linear_faster
;
948 unreachable("unsupported tiling");
951 #if defined(INLINE_SSE41)
952 if (copy_type
== ISL_MEMCPY_STREAMING_LOAD
) {
953 /* The hidden cacheline sized register used by movntdqa can apparently
954 * give you stale data, so do an mfence to invalidate it.
960 /* Round out to tile boundaries. */
961 xt0
= ALIGN_DOWN(xt1
, tw
);
962 xt3
= ALIGN_UP (xt2
, tw
);
963 yt0
= ALIGN_DOWN(yt1
, th
);
964 yt3
= ALIGN_UP (yt2
, th
);
966 /* Loop over all tiles to which we have something to copy.
967 * 'xt' and 'yt' are the origin of the destination tile, whether copying
968 * copying a full or partial tile.
969 * tile_copy() copies one tile or partial tile.
970 * Looping x inside y is the faster memory access pattern.
972 for (yt
= yt0
; yt
< yt3
; yt
+= th
) {
973 for (xt
= xt0
; xt
< xt3
; xt
+= tw
) {
974 /* The area to update is [x0,x3) x [y0,y1).
975 * May not want the whole tile, hence the min and max.
977 uint32_t x0
= MAX2(xt1
, xt
);
978 uint32_t y0
= MAX2(yt1
, yt
);
979 uint32_t x3
= MIN2(xt2
, xt
+ tw
);
980 uint32_t y1
= MIN2(yt2
, yt
+ th
);
982 /* [x0,x3) is split into [x0,x1), [x1,x2), [x2,x3) such that
983 * the middle interval is the longest span-aligned part.
984 * The sub-ranges could be empty.
987 x1
= ALIGN_UP(x0
, span
);
991 x2
= ALIGN_DOWN(x3
, span
);
993 assert(x0
<= x1
&& x1
<= x2
&& x2
<= x3
);
994 assert(x1
- x0
< span
&& x3
- x2
< span
);
995 assert(x3
- x0
<= tw
);
996 assert((x2
- x1
) % span
== 0);
998 /* Translate by (xt,yt) for single-tile copier. */
999 tile_copy(x0
-xt
, x1
-xt
, x2
-xt
, x3
-xt
,
1001 dst
+ (ptrdiff_t)xt
- xt1
+ ((ptrdiff_t)yt
- yt1
) * dst_pitch
,
1002 src
+ (ptrdiff_t)xt
* th
+ (ptrdiff_t)yt
* src_pitch
,