2 * Copyright (C) 2020 Collabora, Ltd.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
23 * Authors (Collabora):
24 * Alyssa Rosenzweig <alyssa.rosenzweig@collabora.com>
28 * Implements framebuffer format conversions in software for Midgard/Bifrost
29 * blend shaders. This pass is designed for a single render target; Midgard
30 * duplicates blend shaders for MRT to simplify everything. A particular
31 * framebuffer format may be categorized as 1) typed load available, 2) typed
32 * unpack available, or 3) software unpack only, and likewise for stores. The
33 * first two types are handled in the compiler backend directly, so this module
34 * is responsible for identifying type 3 formats (hardware dependent) and
35 * inserting appropriate ALU code to perform the conversion from the packed
36 * type to a designated unpacked type, and vice versa.
38 * The unpacked type depends on the format:
40 * - For 32-bit float formats, 32-bit floats.
41 * - For other floats, 16-bit floats.
42 * - For 32-bit ints, 32-bit ints.
43 * - For 8-bit ints, 8-bit ints.
44 * - For other ints, 16-bit ints.
46 * The rationale is to optimize blending and logic op instructions by using the
47 * smallest precision necessary to store the pixel losslessly.
50 #include "compiler/nir/nir.h"
51 #include "compiler/nir/nir_builder.h"
52 #include "compiler/nir/nir_format_convert.h"
53 #include "util/format/u_format.h"
54 #include "pan_lower_framebuffer.h"
55 #include "panfrost-quirks.h"
57 /* Determines the unpacked type best suiting a given format, so the rest of the
58 * pipeline may be adjusted accordingly */
61 pan_unpacked_type_for_format(const struct util_format_description
*desc
)
63 int c
= util_format_get_first_non_void_channel(desc
->format
);
66 unreachable("Void format not renderable");
68 bool large
= (desc
->channel
[c
].size
> 16);
69 bool bit8
= (desc
->channel
[c
].size
== 8);
70 assert(desc
->channel
[c
].size
<= 32);
72 if (desc
->channel
[c
].normalized
)
73 return large
? nir_type_float32
: nir_type_float16
;
75 switch (desc
->channel
[c
].type
) {
76 case UTIL_FORMAT_TYPE_UNSIGNED
:
77 return bit8
? nir_type_uint8
:
78 large
? nir_type_uint32
: nir_type_uint16
;
79 case UTIL_FORMAT_TYPE_SIGNED
:
80 return bit8
? nir_type_int8
:
81 large
? nir_type_int32
: nir_type_int16
;
82 case UTIL_FORMAT_TYPE_FLOAT
:
83 return large
? nir_type_float32
: nir_type_float16
;
85 unreachable("Format not renderable");
90 pan_format_class_load(const struct util_format_description
*desc
, unsigned quirks
)
92 /* Check if we can do anything better than software architecturally */
93 if (quirks
& MIDGARD_NO_TYPED_BLEND_LOADS
) {
94 return (quirks
& NO_BLEND_PACKS
)
95 ? PAN_FORMAT_SOFTWARE
: PAN_FORMAT_PACK
;
98 /* Some formats are missing as typed on some GPUs but have unpacks */
99 if (quirks
& MIDGARD_MISSING_LOADS
) {
100 switch (desc
->format
) {
101 case PIPE_FORMAT_R11G11B10_FLOAT
:
102 case PIPE_FORMAT_R10G10B10A2_UNORM
:
103 case PIPE_FORMAT_B10G10R10A2_UNORM
:
104 case PIPE_FORMAT_R10G10B10X2_UNORM
:
105 case PIPE_FORMAT_B10G10R10X2_UNORM
:
106 case PIPE_FORMAT_R10G10B10A2_UINT
:
107 return PAN_FORMAT_PACK
;
109 return PAN_FORMAT_NATIVE
;
113 /* Otherwise, we can do native */
114 return PAN_FORMAT_NATIVE
;
117 enum pan_format_class
118 pan_format_class_store(const struct util_format_description
*desc
, unsigned quirks
)
120 /* Check if we can do anything better than software architecturally */
121 if (quirks
& MIDGARD_NO_TYPED_BLEND_STORES
) {
122 return (quirks
& NO_BLEND_PACKS
)
123 ? PAN_FORMAT_SOFTWARE
: PAN_FORMAT_PACK
;
126 return PAN_FORMAT_NATIVE
;
129 /* Convenience method */
131 static enum pan_format_class
132 pan_format_class(const struct util_format_description
*desc
, unsigned quirks
, bool is_store
)
135 return pan_format_class_store(desc
, quirks
);
137 return pan_format_class_load(desc
, quirks
);
140 /* Software packs/unpacks, by format class. Packs take in the pixel value typed
141 * as `pan_unpacked_type_for_format` of the format and return an i32vec4
142 * suitable for storing (with components replicated to fill). Unpacks do the
143 * reverse but cannot rely on replication.
145 * Pure 32 formats (R32F ... RGBA32F) are 32 unpacked, so just need to
146 * replicate to fill */
149 pan_pack_pure_32(nir_builder
*b
, nir_ssa_def
*v
)
151 nir_ssa_def
*replicated
[4];
153 for (unsigned i
= 0; i
< 4; ++i
)
154 replicated
[i
] = nir_channel(b
, v
, i
% v
->num_components
);
156 return nir_vec(b
, replicated
, 4);
160 pan_unpack_pure_32(nir_builder
*b
, nir_ssa_def
*pack
, unsigned num_components
)
162 return nir_channels(b
, pack
, (1 << num_components
) - 1);
165 /* Pure x16 formats are x16 unpacked, so it's similar, but we need to pack
166 * upper/lower halves of course */
169 pan_pack_pure_16(nir_builder
*b
, nir_ssa_def
*v
)
171 nir_ssa_def
*replicated
[4];
173 for (unsigned i
= 0; i
< 4; ++i
) {
176 nir_ssa_def
*parts
[2] = {
177 nir_channel(b
, v
, (c
+ 0) % v
->num_components
),
178 nir_channel(b
, v
, (c
+ 1) % v
->num_components
)
181 replicated
[i
] = nir_pack_32_2x16(b
, nir_vec(b
, parts
, 2));
184 return nir_vec(b
, replicated
, 4);
188 pan_unpack_pure_16(nir_builder
*b
, nir_ssa_def
*pack
, unsigned num_components
)
190 nir_ssa_def
*unpacked
[4];
192 assert(num_components
<= 4);
194 for (unsigned i
= 0; i
< num_components
; i
+= 2) {
195 nir_ssa_def
*halves
=
196 nir_unpack_32_2x16(b
, nir_channel(b
, pack
, i
>> 1));
198 unpacked
[i
+ 0] = nir_channel(b
, halves
, 0);
199 unpacked
[i
+ 1] = nir_channel(b
, halves
, 1);
202 for (unsigned i
= num_components
; i
< 4; ++i
)
203 unpacked
[i
] = nir_imm_intN_t(b
, 0, 16);
205 return nir_vec(b
, unpacked
, 4);
208 /* And likewise for x8. pan_fill_4 fills a 4-channel vector with a n-channel
209 * vector (n <= 4), replicating as needed. pan_replicate_4 constructs a
210 * 4-channel vector from a scalar via replication */
213 pan_fill_4(nir_builder
*b
, nir_ssa_def
*v
)
216 assert(v
->num_components
<= 4);
218 for (unsigned j
= 0; j
< 4; ++j
)
219 q
[j
] = nir_channel(b
, v
, j
% v
->num_components
);
221 return nir_vec(b
, q
, 4);
225 pan_replicate_4(nir_builder
*b
, nir_ssa_def
*v
)
227 nir_ssa_def
*replicated
[4] = { v
, v
, v
, v
};
228 return nir_vec(b
, replicated
, 4);
232 pan_pack_pure_8(nir_builder
*b
, nir_ssa_def
*v
)
234 return pan_replicate_4(b
, nir_pack_32_4x8(b
, pan_fill_4(b
, v
)));
238 pan_unpack_pure_8(nir_builder
*b
, nir_ssa_def
*pack
, unsigned num_components
)
240 assert(num_components
<= 4);
241 nir_ssa_def
*unpacked
= nir_unpack_32_4x8(b
, nir_channel(b
, pack
, 0));
242 return nir_channels(b
, unpacked
, (1 << num_components
) - 1);
245 /* UNORM 8 is unpacked to f16 vec4. We could directly use the un/pack_unorm_4x8
246 * ops provided we replicate appropriately, but for packing we'd rather stay in
247 * 8/16-bit whereas the NIR op forces 32-bit, so we do it manually */
250 pan_pack_unorm_8(nir_builder
*b
, nir_ssa_def
*v
)
252 return pan_replicate_4(b
, nir_pack_32_4x8(b
,
253 nir_f2u8(b
, nir_fround_even(b
, nir_fmul(b
, nir_fsat(b
,
254 pan_fill_4(b
, v
)), nir_imm_float16(b
, 255.0))))));
258 pan_unpack_unorm_8(nir_builder
*b
, nir_ssa_def
*pack
, unsigned num_components
)
260 assert(num_components
<= 4);
261 nir_ssa_def
*unpacked
= nir_unpack_unorm_4x8(b
, nir_channel(b
, pack
, 0));
262 return nir_f2f16(b
, unpacked
);
265 /* UNORM 4 is also unpacked to f16, which prevents us from using the shared
266 * unpack which strongly assumes fp32. However, on the tilebuffer it is actually packed as:
268 * [AAAA] [0000] [BBBB] [0000] [GGGG] [0000] [RRRR] [0000]
270 * In other words, spacing it out so we're aligned to bytes and on top. So
273 * pack_32_4x8(f2u8_rte(v * 15.0) << 4)
277 pan_pack_unorm_small(nir_builder
*b
, nir_ssa_def
*v
,
278 nir_ssa_def
*scales
, nir_ssa_def
*shifts
)
280 nir_ssa_def
*f
= nir_fmul(b
, nir_fsat(b
, pan_fill_4(b
, v
)), scales
);
281 nir_ssa_def
*u8
= nir_f2u8(b
, nir_fround_even(b
, f
));
282 nir_ssa_def
*s
= nir_ishl(b
, u8
, shifts
);
283 nir_ssa_def
*repl
= nir_pack_32_4x8(b
, s
);
285 return pan_replicate_4(b
, repl
);
289 pan_unpack_unorm_small(nir_builder
*b
, nir_ssa_def
*pack
,
290 nir_ssa_def
*scales
, nir_ssa_def
*shifts
)
292 nir_ssa_def
*channels
= nir_unpack_32_4x8(b
, nir_channel(b
, pack
, 0));
293 nir_ssa_def
*raw
= nir_ushr(b
, nir_u2u16(b
, channels
), shifts
);
294 return nir_fmul(b
, nir_u2f16(b
, raw
), scales
);
298 pan_pack_unorm_4(nir_builder
*b
, nir_ssa_def
*v
)
300 return pan_pack_unorm_small(b
, v
,
301 nir_imm_vec4_16(b
, 15.0, 15.0, 15.0, 15.0),
302 nir_imm_ivec4(b
, 4, 4, 4, 4));
306 pan_unpack_unorm_4(nir_builder
*b
, nir_ssa_def
*v
)
308 return pan_unpack_unorm_small(b
, v
,
309 nir_imm_vec4_16(b
, 1.0 / 15.0, 1.0 / 15.0, 1.0 / 15.0, 1.0 / 15.0),
310 nir_imm_ivec4(b
, 4, 4, 4, 4));
313 /* UNORM RGB5_A1 and RGB565 are similar */
316 pan_pack_unorm_5551(nir_builder
*b
, nir_ssa_def
*v
)
318 return pan_pack_unorm_small(b
, v
,
319 nir_imm_vec4_16(b
, 31.0, 31.0, 31.0, 1.0),
320 nir_imm_ivec4(b
, 3, 3, 3, 7));
324 pan_unpack_unorm_5551(nir_builder
*b
, nir_ssa_def
*v
)
326 return pan_unpack_unorm_small(b
, v
,
327 nir_imm_vec4_16(b
, 1.0 / 31.0, 1.0 / 31.0, 1.0 / 31.0, 1.0),
328 nir_imm_ivec4(b
, 3, 3, 3, 7));
332 pan_pack_unorm_565(nir_builder
*b
, nir_ssa_def
*v
)
334 return pan_pack_unorm_small(b
, v
,
335 nir_imm_vec4_16(b
, 31.0, 63.0, 31.0, 0.0),
336 nir_imm_ivec4(b
, 3, 2, 3, 0));
340 pan_unpack_unorm_565(nir_builder
*b
, nir_ssa_def
*v
)
342 return pan_unpack_unorm_small(b
, v
,
343 nir_imm_vec4_16(b
, 1.0 / 31.0, 1.0 / 63.0, 1.0 / 31.0, 0.0),
344 nir_imm_ivec4(b
, 3, 2, 3, 0));
347 /* RGB10_A2 is packed in the tilebuffer as the bottom 3 bytes being the top
348 * 8-bits of RGB and the top byte being RGBA as 2-bits packed. As imirkin
349 * pointed out, this means free conversion to RGBX8 */
352 pan_pack_unorm_1010102(nir_builder
*b
, nir_ssa_def
*v
)
354 nir_ssa_def
*scale
= nir_imm_vec4_16(b
, 1023.0, 1023.0, 1023.0, 3.0);
355 nir_ssa_def
*s
= nir_f2u32(b
, nir_fround_even(b
, nir_f2f32(b
, nir_fmul(b
, nir_fsat(b
, v
), scale
))));
357 nir_ssa_def
*top8
= nir_ushr(b
, s
, nir_imm_ivec4(b
, 0x2, 0x2, 0x2, 0x2));
358 nir_ssa_def
*top8_rgb
= nir_pack_32_4x8(b
, nir_u2u8(b
, top8
));
360 nir_ssa_def
*bottom2
= nir_iand(b
, s
, nir_imm_ivec4(b
, 0x3, 0x3, 0x3, 0x3));
365 nir_ishl(b
, nir_channel(b
, bottom2
, 0), nir_imm_int(b
, 24 + 0)),
366 nir_ishl(b
, nir_channel(b
, bottom2
, 1), nir_imm_int(b
, 24 + 2))),
368 nir_ishl(b
, nir_channel(b
, bottom2
, 2), nir_imm_int(b
, 24 + 4)),
369 nir_ishl(b
, nir_channel(b
, bottom2
, 3), nir_imm_int(b
, 24 + 6))));
371 nir_ssa_def
*p
= nir_ior(b
, top
, top8_rgb
);
372 return pan_replicate_4(b
, p
);
376 pan_unpack_unorm_1010102(nir_builder
*b
, nir_ssa_def
*packed
)
378 nir_ssa_def
*p
= nir_channel(b
, packed
, 0);
379 nir_ssa_def
*bytes
= nir_unpack_32_4x8(b
, p
);
380 nir_ssa_def
*ubytes
= nir_u2u16(b
, bytes
);
382 nir_ssa_def
*shifts
= nir_ushr(b
, pan_replicate_4(b
, nir_channel(b
, ubytes
, 3)),
383 nir_imm_ivec4(b
, 0, 2, 4, 6));
384 nir_ssa_def
*precision
= nir_iand(b
, shifts
,
385 nir_i2i16(b
, nir_imm_ivec4(b
, 0x3, 0x3, 0x3, 0x3)));
387 nir_ssa_def
*top_rgb
= nir_ishl(b
, nir_channels(b
, ubytes
, 0x7), nir_imm_int(b
, 2));
388 top_rgb
= nir_ior(b
, nir_channels(b
, precision
, 0x7), top_rgb
);
390 nir_ssa_def
*chans
[4] = {
391 nir_channel(b
, top_rgb
, 0),
392 nir_channel(b
, top_rgb
, 1),
393 nir_channel(b
, top_rgb
, 2),
394 nir_channel(b
, precision
, 3)
397 nir_ssa_def
*scale
= nir_imm_vec4(b
, 1.0 / 1023.0, 1.0 / 1023.0, 1.0 / 1023.0, 1.0 / 3.0);
398 return nir_f2f16(b
, nir_fmul(b
, nir_u2f32(b
, nir_vec(b
, chans
, 4)), scale
));
401 /* On the other hand, the pure int RGB10_A2 is identical to the spec */
404 pan_pack_uint_1010102(nir_builder
*b
, nir_ssa_def
*v
)
406 nir_ssa_def
*shift
= nir_ishl(b
, nir_u2u32(b
, v
),
407 nir_imm_ivec4(b
, 0, 10, 20, 30));
409 nir_ssa_def
*p
= nir_ior(b
,
410 nir_ior(b
, nir_channel(b
, shift
, 0), nir_channel(b
, shift
, 1)),
411 nir_ior(b
, nir_channel(b
, shift
, 2), nir_channel(b
, shift
, 3)));
413 return pan_replicate_4(b
, p
);
417 pan_unpack_uint_1010102(nir_builder
*b
, nir_ssa_def
*packed
)
419 nir_ssa_def
*chan
= nir_channel(b
, packed
, 0);
421 nir_ssa_def
*shift
= nir_ushr(b
, pan_replicate_4(b
, chan
),
422 nir_imm_ivec4(b
, 0, 10, 20, 30));
424 nir_ssa_def
*mask
= nir_iand(b
, shift
,
425 nir_imm_ivec4(b
, 0x3ff, 0x3ff, 0x3ff, 0x3));
427 return nir_u2u16(b
, mask
);
430 /* NIR means we can *finally* catch a break */
433 pan_pack_r11g11b10(nir_builder
*b
, nir_ssa_def
*v
)
435 return pan_replicate_4(b
, nir_format_pack_11f11f10f(b
,
440 pan_unpack_r11g11b10(nir_builder
*b
, nir_ssa_def
*v
)
442 nir_ssa_def
*f32
= nir_format_unpack_11f11f10f(b
, nir_channel(b
, v
, 0));
443 nir_ssa_def
*f16
= nir_f2f16(b
, f32
);
445 /* Extend to vec4 with alpha */
446 nir_ssa_def
*components
[4] = {
447 nir_channel(b
, f16
, 0),
448 nir_channel(b
, f16
, 1),
449 nir_channel(b
, f16
, 2),
450 nir_imm_float16(b
, 1.0)
453 return nir_vec(b
, components
, 4);
456 /* Wrapper around sRGB conversion */
459 pan_linear_to_srgb(nir_builder
*b
, nir_ssa_def
*linear
)
461 nir_ssa_def
*rgb
= nir_channels(b
, linear
, 0x7);
463 /* TODO: fp16 native conversion */
464 nir_ssa_def
*srgb
= nir_f2f16(b
,
465 nir_format_linear_to_srgb(b
, nir_f2f32(b
, rgb
)));
467 nir_ssa_def
*comp
[4] = {
468 nir_channel(b
, srgb
, 0),
469 nir_channel(b
, srgb
, 1),
470 nir_channel(b
, srgb
, 2),
471 nir_channel(b
, linear
, 3),
474 return nir_vec(b
, comp
, 4);
478 pan_srgb_to_linear(nir_builder
*b
, nir_ssa_def
*srgb
)
480 nir_ssa_def
*rgb
= nir_channels(b
, srgb
, 0x7);
482 /* TODO: fp16 native conversion */
483 nir_ssa_def
*linear
= nir_f2f16(b
,
484 nir_format_srgb_to_linear(b
, nir_f2f32(b
, rgb
)));
486 nir_ssa_def
*comp
[4] = {
487 nir_channel(b
, linear
, 0),
488 nir_channel(b
, linear
, 1),
489 nir_channel(b
, linear
, 2),
490 nir_channel(b
, srgb
, 3),
493 return nir_vec(b
, comp
, 4);
498 /* Generic dispatches for un/pack regardless of format */
501 pan_is_unorm4(const struct util_format_description
*desc
)
503 switch (desc
->format
) {
504 case PIPE_FORMAT_B4G4R4A4_UNORM
:
505 case PIPE_FORMAT_B4G4R4X4_UNORM
:
506 case PIPE_FORMAT_A4R4_UNORM
:
507 case PIPE_FORMAT_R4A4_UNORM
:
508 case PIPE_FORMAT_A4B4G4R4_UNORM
:
509 case PIPE_FORMAT_R4G4B4A4_UNORM
:
518 pan_unpack(nir_builder
*b
,
519 const struct util_format_description
*desc
,
522 if (util_format_is_unorm8(desc
))
523 return pan_unpack_unorm_8(b
, packed
, desc
->nr_channels
);
525 if (pan_is_unorm4(desc
))
526 return pan_unpack_unorm_4(b
, packed
);
528 if (desc
->is_array
) {
529 int c
= util_format_get_first_non_void_channel(desc
->format
);
531 struct util_format_channel_description d
= desc
->channel
[c
];
533 if (d
.size
== 32 || d
.size
== 16) {
534 assert(!d
.normalized
);
535 assert(d
.type
== UTIL_FORMAT_TYPE_FLOAT
|| d
.pure_integer
);
537 return d
.size
== 32 ? pan_unpack_pure_32(b
, packed
, desc
->nr_channels
) :
538 pan_unpack_pure_16(b
, packed
, desc
->nr_channels
);
539 } else if (d
.size
== 8) {
540 assert(d
.pure_integer
);
541 return pan_unpack_pure_8(b
, packed
, desc
->nr_channels
);
543 unreachable("Unrenderable size");
547 switch (desc
->format
) {
548 case PIPE_FORMAT_B5G5R5A1_UNORM
:
549 case PIPE_FORMAT_R5G5B5A1_UNORM
:
550 return pan_unpack_unorm_5551(b
, packed
);
551 case PIPE_FORMAT_B5G6R5_UNORM
:
552 return pan_unpack_unorm_565(b
, packed
);
553 case PIPE_FORMAT_R10G10B10A2_UNORM
:
554 return pan_unpack_unorm_1010102(b
, packed
);
555 case PIPE_FORMAT_R10G10B10A2_UINT
:
556 return pan_unpack_uint_1010102(b
, packed
);
557 case PIPE_FORMAT_R11G11B10_FLOAT
:
558 return pan_unpack_r11g11b10(b
, packed
);
563 fprintf(stderr
, "%s\n", desc
->name
);
564 unreachable("Unknown format");
568 pan_pack(nir_builder
*b
,
569 const struct util_format_description
*desc
,
570 nir_ssa_def
*unpacked
)
572 if (desc
->colorspace
== UTIL_FORMAT_COLORSPACE_SRGB
)
573 unpacked
= pan_linear_to_srgb(b
, unpacked
);
575 if (util_format_is_unorm8(desc
))
576 return pan_pack_unorm_8(b
, unpacked
);
578 if (pan_is_unorm4(desc
))
579 return pan_pack_unorm_4(b
, unpacked
);
581 if (desc
->is_array
) {
582 int c
= util_format_get_first_non_void_channel(desc
->format
);
584 struct util_format_channel_description d
= desc
->channel
[c
];
586 if (d
.size
== 32 || d
.size
== 16) {
587 assert(!d
.normalized
);
588 assert(d
.type
== UTIL_FORMAT_TYPE_FLOAT
|| d
.pure_integer
);
590 return d
.size
== 32 ? pan_pack_pure_32(b
, unpacked
) :
591 pan_pack_pure_16(b
, unpacked
);
592 } else if (d
.size
== 8) {
593 assert(d
.pure_integer
);
594 return pan_pack_pure_8(b
, unpacked
);
596 unreachable("Unrenderable size");
600 switch (desc
->format
) {
601 case PIPE_FORMAT_B5G5R5A1_UNORM
:
602 case PIPE_FORMAT_R5G5B5A1_UNORM
:
603 return pan_pack_unorm_5551(b
, unpacked
);
604 case PIPE_FORMAT_B5G6R5_UNORM
:
605 return pan_pack_unorm_565(b
, unpacked
);
606 case PIPE_FORMAT_R10G10B10A2_UNORM
:
607 return pan_pack_unorm_1010102(b
, unpacked
);
608 case PIPE_FORMAT_R10G10B10A2_UINT
:
609 return pan_pack_uint_1010102(b
, unpacked
);
610 case PIPE_FORMAT_R11G11B10_FLOAT
:
611 return pan_pack_r11g11b10(b
, unpacked
);
616 fprintf(stderr
, "%s\n", desc
->name
);
617 unreachable("Unknown format");
621 pan_lower_fb_store(nir_shader
*shader
,
623 nir_intrinsic_instr
*intr
,
624 const struct util_format_description
*desc
,
627 /* For stores, add conversion before */
628 nir_ssa_def
*unpacked
= nir_ssa_for_src(b
, intr
->src
[1], 4);
629 nir_ssa_def
*packed
= pan_pack(b
, desc
, unpacked
);
631 nir_intrinsic_instr
*new =
632 nir_intrinsic_instr_create(shader
, nir_intrinsic_store_raw_output_pan
);
633 new->src
[0] = nir_src_for_ssa(packed
);
634 new->num_components
= 4;
635 nir_builder_instr_insert(b
, &new->instr
);
639 pan_lower_fb_load(nir_shader
*shader
,
641 nir_intrinsic_instr
*intr
,
642 const struct util_format_description
*desc
,
643 unsigned base
, unsigned quirks
)
645 nir_intrinsic_instr
*new = nir_intrinsic_instr_create(shader
,
646 nir_intrinsic_load_raw_output_pan
);
647 new->num_components
= 4;
649 nir_intrinsic_set_base(new, base
);
651 nir_ssa_dest_init(&new->instr
, &new->dest
, 4, 32, NULL
);
652 nir_builder_instr_insert(b
, &new->instr
);
654 /* Convert the raw value */
655 nir_ssa_def
*packed
= &new->dest
.ssa
;
656 nir_ssa_def
*unpacked
= pan_unpack(b
, desc
, packed
);
658 if (desc
->colorspace
== UTIL_FORMAT_COLORSPACE_SRGB
)
659 unpacked
= pan_srgb_to_linear(b
, unpacked
);
661 /* Convert to the size of the load intrinsic.
663 * We can assume that the type will match with the framebuffer format:
665 * Page 170 of the PDF of the OpenGL ES 3.0.6 spec says:
667 * If [UNORM or SNORM, convert to fixed-point]; otherwise no type
668 * conversion is applied. If the values written by the fragment shader
669 * do not match the format(s) of the corresponding color buffer(s),
670 * the result is undefined.
673 unsigned bits
= nir_dest_bit_size(intr
->dest
);
675 nir_alu_type src_type
;
676 if (desc
->channel
[0].pure_integer
) {
677 if (desc
->channel
[0].type
== UTIL_FORMAT_TYPE_SIGNED
)
678 src_type
= nir_type_int
;
680 src_type
= nir_type_uint
;
682 src_type
= nir_type_float
;
685 unpacked
= nir_convert_to_bit_size(b
, unpacked
, src_type
, bits
);
687 nir_src rewritten
= nir_src_for_ssa(unpacked
);
688 nir_ssa_def_rewrite_uses_after(&intr
->dest
.ssa
, rewritten
, &intr
->instr
);
692 pan_lower_framebuffer(nir_shader
*shader
, enum pipe_format
*rt_fmts
,
693 bool lower_store
, unsigned quirks
)
695 if (shader
->info
.stage
!= MESA_SHADER_FRAGMENT
)
698 bool progress
= false;
700 nir_foreach_function(func
, shader
) {
701 nir_foreach_block(block
, func
->impl
) {
702 nir_foreach_instr_safe(instr
, block
) {
703 if (instr
->type
!= nir_instr_type_intrinsic
)
706 nir_intrinsic_instr
*intr
= nir_instr_as_intrinsic(instr
);
708 bool is_load
= intr
->intrinsic
== nir_intrinsic_load_deref
;
709 bool is_store
= intr
->intrinsic
== nir_intrinsic_store_deref
;
711 if (!(is_load
|| (is_store
&& lower_store
)))
714 nir_variable
*var
= nir_intrinsic_get_var(intr
, 0);
716 if (var
->data
.mode
!= nir_var_shader_out
)
719 unsigned base
= var
->data
.driver_location
;
722 if (var
->data
.location
== FRAG_RESULT_COLOR
)
724 else if (var
->data
.location
>= FRAG_RESULT_DATA0
)
725 rt
= var
->data
.location
- FRAG_RESULT_DATA0
;
729 const struct util_format_description
*desc
=
730 util_format_description(rt_fmts
[rt
]);
732 enum pan_format_class fmt_class
=
733 pan_format_class(desc
, quirks
, is_store
);
736 if (fmt_class
== PAN_FORMAT_NATIVE
)
740 nir_builder_init(&b
, func
->impl
);
743 b
.cursor
= nir_before_instr(instr
);
744 pan_lower_fb_store(shader
, &b
, intr
, desc
, quirks
);
746 b
.cursor
= nir_after_instr(instr
);
747 pan_lower_fb_load(shader
, &b
, intr
, desc
, base
, quirks
);
750 nir_instr_remove(instr
);
756 nir_metadata_preserve(func
->impl
, nir_metadata_block_index
|
757 nir_metadata_dominance
);