2 * Copyright (C) 2020 Collabora, Ltd.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
23 * Authors (Collabora):
24 * Alyssa Rosenzweig <alyssa.rosenzweig@collabora.com>
28 * Implements framebuffer format conversions in software for Midgard/Bifrost
29 * blend shaders. This pass is designed for a single render target; Midgard
30 * duplicates blend shaders for MRT to simplify everything. A particular
31 * framebuffer format may be categorized as 1) typed load available, 2) typed
32 * unpack available, or 3) software unpack only, and likewise for stores. The
33 * first two types are handled in the compiler backend directly, so this module
34 * is responsible for identifying type 3 formats (hardware dependent) and
35 * inserting appropriate ALU code to perform the conversion from the packed
36 * type to a designated unpacked type, and vice versa.
38 * The unpacked type depends on the format:
40 * - For 32-bit float formats, 32-bit floats.
41 * - For other floats, 16-bit floats.
42 * - For 32-bit ints, 32-bit ints.
43 * - For 8-bit ints, 8-bit ints.
44 * - For other ints, 16-bit ints.
46 * The rationale is to optimize blending and logic op instructions by using the
47 * smallest precision necessary to store the pixel losslessly.
50 #include "compiler/nir/nir.h"
51 #include "compiler/nir/nir_builder.h"
52 #include "compiler/nir/nir_format_convert.h"
53 #include "util/format/u_format.h"
54 #include "pan_lower_framebuffer.h"
55 #include "panfrost-quirks.h"
57 /* Determines the unpacked type best suiting a given format, so the rest of the
58 * pipeline may be adjusted accordingly */
61 pan_unpacked_type_for_format(const struct util_format_description
*desc
)
63 int c
= util_format_get_first_non_void_channel(desc
->format
);
66 unreachable("Void format not renderable");
68 bool large
= (desc
->channel
[c
].size
> 16);
69 bool bit8
= (desc
->channel
[c
].size
== 8);
70 assert(desc
->channel
[c
].size
<= 32);
72 if (desc
->channel
[c
].normalized
)
73 return large
? nir_type_float32
: nir_type_float16
;
75 switch (desc
->channel
[c
].type
) {
76 case UTIL_FORMAT_TYPE_UNSIGNED
:
77 return bit8
? nir_type_uint8
:
78 large
? nir_type_uint32
: nir_type_uint16
;
79 case UTIL_FORMAT_TYPE_SIGNED
:
80 return bit8
? nir_type_int8
:
81 large
? nir_type_int32
: nir_type_int16
;
82 case UTIL_FORMAT_TYPE_FLOAT
:
83 return large
? nir_type_float32
: nir_type_float16
;
85 unreachable("Format not renderable");
90 pan_format_class_load(const struct util_format_description
*desc
, unsigned quirks
)
92 /* Pure integers can be loaded via EXT_framebuffer_fetch and should be
93 * handled as a raw load with a size conversion (it's cheap). Likewise,
94 * since float framebuffers are internally implemented as raw (i.e.
95 * integer) framebuffers with blend shaders to go back and forth, they
96 * should be s/w as well */
98 if (util_format_is_pure_integer(desc
->format
) || util_format_is_float(desc
->format
))
99 return PAN_FORMAT_SOFTWARE
;
101 /* Check if we can do anything better than software architecturally */
102 if (quirks
& MIDGARD_NO_TYPED_BLEND_LOADS
) {
103 return (quirks
& NO_BLEND_PACKS
)
104 ? PAN_FORMAT_SOFTWARE
: PAN_FORMAT_PACK
;
107 /* Some formats are missing as typed on some GPUs but have unpacks */
108 if (quirks
& MIDGARD_MISSING_LOADS
) {
109 switch (desc
->format
) {
110 case PIPE_FORMAT_R11G11B10_FLOAT
:
111 case PIPE_FORMAT_R10G10B10A2_UNORM
:
112 case PIPE_FORMAT_B10G10R10A2_UNORM
:
113 case PIPE_FORMAT_R10G10B10X2_UNORM
:
114 case PIPE_FORMAT_B10G10R10X2_UNORM
:
115 case PIPE_FORMAT_R10G10B10A2_UINT
:
116 return PAN_FORMAT_PACK
;
118 return PAN_FORMAT_NATIVE
;
122 /* Otherwise, we can do native */
123 return PAN_FORMAT_NATIVE
;
126 enum pan_format_class
127 pan_format_class_store(const struct util_format_description
*desc
, unsigned quirks
)
129 /* Check if we can do anything better than software architecturally */
130 if (quirks
& MIDGARD_NO_TYPED_BLEND_STORES
) {
131 return (quirks
& NO_BLEND_PACKS
)
132 ? PAN_FORMAT_SOFTWARE
: PAN_FORMAT_PACK
;
135 return PAN_FORMAT_NATIVE
;
138 /* Convenience method */
140 static enum pan_format_class
141 pan_format_class(const struct util_format_description
*desc
, unsigned quirks
, bool is_store
)
144 return pan_format_class_store(desc
, quirks
);
146 return pan_format_class_load(desc
, quirks
);
149 /* Software packs/unpacks, by format class. Packs take in the pixel value typed
150 * as `pan_unpacked_type_for_format` of the format and return an i32vec4
151 * suitable for storing (with components replicated to fill). Unpacks do the
152 * reverse but cannot rely on replication.
154 * Pure 32 formats (R32F ... RGBA32F) are 32 unpacked, so just need to
155 * replicate to fill */
158 pan_pack_pure_32(nir_builder
*b
, nir_ssa_def
*v
)
160 nir_ssa_def
*replicated
[4];
162 for (unsigned i
= 0; i
< 4; ++i
)
163 replicated
[i
] = nir_channel(b
, v
, i
% v
->num_components
);
165 return nir_vec(b
, replicated
, 4);
169 pan_unpack_pure_32(nir_builder
*b
, nir_ssa_def
*pack
, unsigned num_components
)
171 return nir_channels(b
, pack
, (1 << num_components
) - 1);
174 /* Pure x16 formats are x16 unpacked, so it's similar, but we need to pack
175 * upper/lower halves of course */
178 pan_pack_pure_16(nir_builder
*b
, nir_ssa_def
*v
)
180 nir_ssa_def
*replicated
[4];
182 for (unsigned i
= 0; i
< 4; ++i
) {
185 nir_ssa_def
*parts
[2] = {
186 nir_channel(b
, v
, (c
+ 0) % v
->num_components
),
187 nir_channel(b
, v
, (c
+ 1) % v
->num_components
)
190 replicated
[i
] = nir_pack_32_2x16(b
, nir_vec(b
, parts
, 2));
193 return nir_vec(b
, replicated
, 4);
197 pan_unpack_pure_16(nir_builder
*b
, nir_ssa_def
*pack
, unsigned num_components
)
199 nir_ssa_def
*unpacked
[4];
201 assert(num_components
<= 4);
203 for (unsigned i
= 0; i
< num_components
; i
+= 2) {
204 nir_ssa_def
*halves
=
205 nir_unpack_32_2x16(b
, nir_channel(b
, pack
, i
>> 1));
207 unpacked
[i
+ 0] = nir_channel(b
, halves
, 0);
208 unpacked
[i
+ 1] = nir_channel(b
, halves
, 1);
211 for (unsigned i
= num_components
; i
< 4; ++i
)
212 unpacked
[i
] = nir_imm_intN_t(b
, 0, 16);
214 return nir_vec(b
, unpacked
, 4);
217 /* And likewise for x8. pan_fill_4 fills a 4-channel vector with a n-channel
218 * vector (n <= 4), replicating as needed. pan_replicate_4 constructs a
219 * 4-channel vector from a scalar via replication */
222 pan_fill_4(nir_builder
*b
, nir_ssa_def
*v
)
225 assert(v
->num_components
<= 4);
227 for (unsigned j
= 0; j
< 4; ++j
)
228 q
[j
] = nir_channel(b
, v
, j
% v
->num_components
);
230 return nir_vec(b
, q
, 4);
234 pan_extend(nir_builder
*b
, nir_ssa_def
*v
, unsigned N
)
237 assert(v
->num_components
<= 4);
240 for (unsigned j
= 0; j
< v
->num_components
; ++j
)
241 q
[j
] = nir_channel(b
, v
, j
);
243 for (unsigned j
= v
->num_components
; j
< N
; ++j
)
244 q
[j
] = nir_imm_int(b
, 0);
246 return nir_vec(b
, q
, N
);
250 pan_replicate_4(nir_builder
*b
, nir_ssa_def
*v
)
252 nir_ssa_def
*replicated
[4] = { v
, v
, v
, v
};
253 return nir_vec(b
, replicated
, 4);
257 pan_pack_pure_8(nir_builder
*b
, nir_ssa_def
*v
)
259 return pan_replicate_4(b
, nir_pack_32_4x8(b
, pan_fill_4(b
, v
)));
263 pan_unpack_pure_8(nir_builder
*b
, nir_ssa_def
*pack
, unsigned num_components
)
265 assert(num_components
<= 4);
266 nir_ssa_def
*unpacked
= nir_unpack_32_4x8(b
, nir_channel(b
, pack
, 0));
267 return nir_channels(b
, unpacked
, (1 << num_components
) - 1);
270 /* UNORM 8 is unpacked to f16 vec4. We could directly use the un/pack_unorm_4x8
271 * ops provided we replicate appropriately, but for packing we'd rather stay in
272 * 8/16-bit whereas the NIR op forces 32-bit, so we do it manually */
275 pan_pack_unorm_8(nir_builder
*b
, nir_ssa_def
*v
)
277 return pan_replicate_4(b
, nir_pack_32_4x8(b
,
278 nir_f2u8(b
, nir_fround_even(b
, nir_fmul(b
, nir_fsat(b
,
279 pan_fill_4(b
, v
)), nir_imm_float16(b
, 255.0))))));
283 pan_unpack_unorm_8(nir_builder
*b
, nir_ssa_def
*pack
, unsigned num_components
)
285 assert(num_components
<= 4);
286 nir_ssa_def
*unpacked
= nir_unpack_unorm_4x8(b
, nir_channel(b
, pack
, 0));
287 return nir_f2fmp(b
, unpacked
);
290 /* UNORM 4 is also unpacked to f16, which prevents us from using the shared
291 * unpack which strongly assumes fp32. However, on the tilebuffer it is actually packed as:
293 * [AAAA] [0000] [BBBB] [0000] [GGGG] [0000] [RRRR] [0000]
295 * In other words, spacing it out so we're aligned to bytes and on top. So
298 * pack_32_4x8(f2u8_rte(v * 15.0) << 4)
302 pan_pack_unorm_small(nir_builder
*b
, nir_ssa_def
*v
,
303 nir_ssa_def
*scales
, nir_ssa_def
*shifts
)
305 nir_ssa_def
*f
= nir_fmul(b
, nir_fsat(b
, pan_fill_4(b
, v
)), scales
);
306 nir_ssa_def
*u8
= nir_f2u8(b
, nir_fround_even(b
, f
));
307 nir_ssa_def
*s
= nir_ishl(b
, u8
, shifts
);
308 nir_ssa_def
*repl
= nir_pack_32_4x8(b
, s
);
310 return pan_replicate_4(b
, repl
);
314 pan_unpack_unorm_small(nir_builder
*b
, nir_ssa_def
*pack
,
315 nir_ssa_def
*scales
, nir_ssa_def
*shifts
)
317 nir_ssa_def
*channels
= nir_unpack_32_4x8(b
, nir_channel(b
, pack
, 0));
318 nir_ssa_def
*raw
= nir_ushr(b
, nir_u2ump(b
, channels
), shifts
);
319 return nir_fmul(b
, nir_u2f16(b
, raw
), scales
);
323 pan_pack_unorm_4(nir_builder
*b
, nir_ssa_def
*v
)
325 return pan_pack_unorm_small(b
, v
,
326 nir_imm_vec4_16(b
, 15.0, 15.0, 15.0, 15.0),
327 nir_imm_ivec4(b
, 4, 4, 4, 4));
331 pan_unpack_unorm_4(nir_builder
*b
, nir_ssa_def
*v
)
333 return pan_unpack_unorm_small(b
, v
,
334 nir_imm_vec4_16(b
, 1.0 / 15.0, 1.0 / 15.0, 1.0 / 15.0, 1.0 / 15.0),
335 nir_imm_ivec4(b
, 4, 4, 4, 4));
338 /* UNORM RGB5_A1 and RGB565 are similar */
341 pan_pack_unorm_5551(nir_builder
*b
, nir_ssa_def
*v
)
343 return pan_pack_unorm_small(b
, v
,
344 nir_imm_vec4_16(b
, 31.0, 31.0, 31.0, 1.0),
345 nir_imm_ivec4(b
, 3, 3, 3, 7));
349 pan_unpack_unorm_5551(nir_builder
*b
, nir_ssa_def
*v
)
351 return pan_unpack_unorm_small(b
, v
,
352 nir_imm_vec4_16(b
, 1.0 / 31.0, 1.0 / 31.0, 1.0 / 31.0, 1.0),
353 nir_imm_ivec4(b
, 3, 3, 3, 7));
357 pan_pack_unorm_565(nir_builder
*b
, nir_ssa_def
*v
)
359 return pan_pack_unorm_small(b
, v
,
360 nir_imm_vec4_16(b
, 31.0, 63.0, 31.0, 0.0),
361 nir_imm_ivec4(b
, 3, 2, 3, 0));
365 pan_unpack_unorm_565(nir_builder
*b
, nir_ssa_def
*v
)
367 return pan_unpack_unorm_small(b
, v
,
368 nir_imm_vec4_16(b
, 1.0 / 31.0, 1.0 / 63.0, 1.0 / 31.0, 0.0),
369 nir_imm_ivec4(b
, 3, 2, 3, 0));
372 /* RGB10_A2 is packed in the tilebuffer as the bottom 3 bytes being the top
373 * 8-bits of RGB and the top byte being RGBA as 2-bits packed. As imirkin
374 * pointed out, this means free conversion to RGBX8 */
377 pan_pack_unorm_1010102(nir_builder
*b
, nir_ssa_def
*v
)
379 nir_ssa_def
*scale
= nir_imm_vec4_16(b
, 1023.0, 1023.0, 1023.0, 3.0);
380 nir_ssa_def
*s
= nir_f2u32(b
, nir_fround_even(b
, nir_f2f32(b
, nir_fmul(b
, nir_fsat(b
, v
), scale
))));
382 nir_ssa_def
*top8
= nir_ushr(b
, s
, nir_imm_ivec4(b
, 0x2, 0x2, 0x2, 0x2));
383 nir_ssa_def
*top8_rgb
= nir_pack_32_4x8(b
, nir_u2u8(b
, top8
));
385 nir_ssa_def
*bottom2
= nir_iand(b
, s
, nir_imm_ivec4(b
, 0x3, 0x3, 0x3, 0x3));
390 nir_ishl(b
, nir_channel(b
, bottom2
, 0), nir_imm_int(b
, 24 + 0)),
391 nir_ishl(b
, nir_channel(b
, bottom2
, 1), nir_imm_int(b
, 24 + 2))),
393 nir_ishl(b
, nir_channel(b
, bottom2
, 2), nir_imm_int(b
, 24 + 4)),
394 nir_ishl(b
, nir_channel(b
, bottom2
, 3), nir_imm_int(b
, 24 + 6))));
396 nir_ssa_def
*p
= nir_ior(b
, top
, top8_rgb
);
397 return pan_replicate_4(b
, p
);
401 pan_unpack_unorm_1010102(nir_builder
*b
, nir_ssa_def
*packed
)
403 nir_ssa_def
*p
= nir_channel(b
, packed
, 0);
404 nir_ssa_def
*bytes
= nir_unpack_32_4x8(b
, p
);
405 nir_ssa_def
*ubytes
= nir_u2ump(b
, bytes
);
407 nir_ssa_def
*shifts
= nir_ushr(b
, pan_replicate_4(b
, nir_channel(b
, ubytes
, 3)),
408 nir_imm_ivec4(b
, 0, 2, 4, 6));
409 nir_ssa_def
*precision
= nir_iand(b
, shifts
,
410 nir_i2imp(b
, nir_imm_ivec4(b
, 0x3, 0x3, 0x3, 0x3)));
412 nir_ssa_def
*top_rgb
= nir_ishl(b
, nir_channels(b
, ubytes
, 0x7), nir_imm_int(b
, 2));
413 top_rgb
= nir_ior(b
, nir_channels(b
, precision
, 0x7), top_rgb
);
415 nir_ssa_def
*chans
[4] = {
416 nir_channel(b
, top_rgb
, 0),
417 nir_channel(b
, top_rgb
, 1),
418 nir_channel(b
, top_rgb
, 2),
419 nir_channel(b
, precision
, 3)
422 nir_ssa_def
*scale
= nir_imm_vec4(b
, 1.0 / 1023.0, 1.0 / 1023.0, 1.0 / 1023.0, 1.0 / 3.0);
423 return nir_f2fmp(b
, nir_fmul(b
, nir_u2f32(b
, nir_vec(b
, chans
, 4)), scale
));
426 /* On the other hand, the pure int RGB10_A2 is identical to the spec */
429 pan_pack_uint_1010102(nir_builder
*b
, nir_ssa_def
*v
)
431 nir_ssa_def
*shift
= nir_ishl(b
, nir_u2u32(b
, v
),
432 nir_imm_ivec4(b
, 0, 10, 20, 30));
434 nir_ssa_def
*p
= nir_ior(b
,
435 nir_ior(b
, nir_channel(b
, shift
, 0), nir_channel(b
, shift
, 1)),
436 nir_ior(b
, nir_channel(b
, shift
, 2), nir_channel(b
, shift
, 3)));
438 return pan_replicate_4(b
, p
);
442 pan_unpack_uint_1010102(nir_builder
*b
, nir_ssa_def
*packed
)
444 nir_ssa_def
*chan
= nir_channel(b
, packed
, 0);
446 nir_ssa_def
*shift
= nir_ushr(b
, pan_replicate_4(b
, chan
),
447 nir_imm_ivec4(b
, 0, 10, 20, 30));
449 nir_ssa_def
*mask
= nir_iand(b
, shift
,
450 nir_imm_ivec4(b
, 0x3ff, 0x3ff, 0x3ff, 0x3));
452 return nir_u2ump(b
, mask
);
455 /* NIR means we can *finally* catch a break */
458 pan_pack_r11g11b10(nir_builder
*b
, nir_ssa_def
*v
)
460 return pan_replicate_4(b
, nir_format_pack_11f11f10f(b
,
465 pan_unpack_r11g11b10(nir_builder
*b
, nir_ssa_def
*v
)
467 nir_ssa_def
*f32
= nir_format_unpack_11f11f10f(b
, nir_channel(b
, v
, 0));
468 nir_ssa_def
*f16
= nir_f2fmp(b
, f32
);
470 /* Extend to vec4 with alpha */
471 nir_ssa_def
*components
[4] = {
472 nir_channel(b
, f16
, 0),
473 nir_channel(b
, f16
, 1),
474 nir_channel(b
, f16
, 2),
475 nir_imm_float16(b
, 1.0)
478 return nir_vec(b
, components
, 4);
481 /* Wrapper around sRGB conversion */
484 pan_linear_to_srgb(nir_builder
*b
, nir_ssa_def
*linear
)
486 nir_ssa_def
*rgb
= nir_channels(b
, linear
, 0x7);
488 /* TODO: fp16 native conversion */
489 nir_ssa_def
*srgb
= nir_f2fmp(b
,
490 nir_format_linear_to_srgb(b
, nir_f2f32(b
, rgb
)));
492 nir_ssa_def
*comp
[4] = {
493 nir_channel(b
, srgb
, 0),
494 nir_channel(b
, srgb
, 1),
495 nir_channel(b
, srgb
, 2),
496 nir_channel(b
, linear
, 3),
499 return nir_vec(b
, comp
, 4);
503 pan_srgb_to_linear(nir_builder
*b
, nir_ssa_def
*srgb
)
505 nir_ssa_def
*rgb
= nir_channels(b
, srgb
, 0x7);
507 /* TODO: fp16 native conversion */
508 nir_ssa_def
*linear
= nir_f2fmp(b
,
509 nir_format_srgb_to_linear(b
, nir_f2f32(b
, rgb
)));
511 nir_ssa_def
*comp
[4] = {
512 nir_channel(b
, linear
, 0),
513 nir_channel(b
, linear
, 1),
514 nir_channel(b
, linear
, 2),
515 nir_channel(b
, srgb
, 3),
518 return nir_vec(b
, comp
, 4);
523 /* Generic dispatches for un/pack regardless of format */
526 pan_is_unorm4(const struct util_format_description
*desc
)
528 switch (desc
->format
) {
529 case PIPE_FORMAT_B4G4R4A4_UNORM
:
530 case PIPE_FORMAT_B4G4R4X4_UNORM
:
531 case PIPE_FORMAT_A4R4_UNORM
:
532 case PIPE_FORMAT_R4A4_UNORM
:
533 case PIPE_FORMAT_A4B4G4R4_UNORM
:
534 case PIPE_FORMAT_R4G4B4A4_UNORM
:
543 pan_unpack(nir_builder
*b
,
544 const struct util_format_description
*desc
,
547 if (util_format_is_unorm8(desc
))
548 return pan_unpack_unorm_8(b
, packed
, desc
->nr_channels
);
550 if (pan_is_unorm4(desc
))
551 return pan_unpack_unorm_4(b
, packed
);
553 if (desc
->is_array
) {
554 int c
= util_format_get_first_non_void_channel(desc
->format
);
556 struct util_format_channel_description d
= desc
->channel
[c
];
558 if (d
.size
== 32 || d
.size
== 16) {
559 assert(!d
.normalized
);
560 assert(d
.type
== UTIL_FORMAT_TYPE_FLOAT
|| d
.pure_integer
);
562 return d
.size
== 32 ? pan_unpack_pure_32(b
, packed
, desc
->nr_channels
) :
563 pan_unpack_pure_16(b
, packed
, desc
->nr_channels
);
564 } else if (d
.size
== 8) {
565 assert(d
.pure_integer
);
566 return pan_unpack_pure_8(b
, packed
, desc
->nr_channels
);
568 unreachable("Unrenderable size");
572 switch (desc
->format
) {
573 case PIPE_FORMAT_B5G5R5A1_UNORM
:
574 case PIPE_FORMAT_R5G5B5A1_UNORM
:
575 return pan_unpack_unorm_5551(b
, packed
);
576 case PIPE_FORMAT_B5G6R5_UNORM
:
577 return pan_unpack_unorm_565(b
, packed
);
578 case PIPE_FORMAT_R10G10B10A2_UNORM
:
579 return pan_unpack_unorm_1010102(b
, packed
);
580 case PIPE_FORMAT_R10G10B10A2_UINT
:
581 return pan_unpack_uint_1010102(b
, packed
);
582 case PIPE_FORMAT_R11G11B10_FLOAT
:
583 return pan_unpack_r11g11b10(b
, packed
);
588 fprintf(stderr
, "%s\n", desc
->name
);
589 unreachable("Unknown format");
593 pan_pack(nir_builder
*b
,
594 const struct util_format_description
*desc
,
595 nir_ssa_def
*unpacked
)
597 if (desc
->colorspace
== UTIL_FORMAT_COLORSPACE_SRGB
)
598 unpacked
= pan_linear_to_srgb(b
, unpacked
);
600 if (util_format_is_unorm8(desc
))
601 return pan_pack_unorm_8(b
, unpacked
);
603 if (pan_is_unorm4(desc
))
604 return pan_pack_unorm_4(b
, unpacked
);
606 if (desc
->is_array
) {
607 int c
= util_format_get_first_non_void_channel(desc
->format
);
609 struct util_format_channel_description d
= desc
->channel
[c
];
611 if (d
.size
== 32 || d
.size
== 16) {
612 assert(!d
.normalized
);
613 assert(d
.type
== UTIL_FORMAT_TYPE_FLOAT
|| d
.pure_integer
);
615 return d
.size
== 32 ? pan_pack_pure_32(b
, unpacked
) :
616 pan_pack_pure_16(b
, unpacked
);
617 } else if (d
.size
== 8) {
618 assert(d
.pure_integer
);
619 return pan_pack_pure_8(b
, unpacked
);
621 unreachable("Unrenderable size");
625 switch (desc
->format
) {
626 case PIPE_FORMAT_B5G5R5A1_UNORM
:
627 case PIPE_FORMAT_R5G5B5A1_UNORM
:
628 return pan_pack_unorm_5551(b
, unpacked
);
629 case PIPE_FORMAT_B5G6R5_UNORM
:
630 return pan_pack_unorm_565(b
, unpacked
);
631 case PIPE_FORMAT_R10G10B10A2_UNORM
:
632 return pan_pack_unorm_1010102(b
, unpacked
);
633 case PIPE_FORMAT_R10G10B10A2_UINT
:
634 return pan_pack_uint_1010102(b
, unpacked
);
635 case PIPE_FORMAT_R11G11B10_FLOAT
:
636 return pan_pack_r11g11b10(b
, unpacked
);
641 fprintf(stderr
, "%s\n", desc
->name
);
642 unreachable("Unknown format");
646 pan_lower_fb_store(nir_shader
*shader
,
648 nir_intrinsic_instr
*intr
,
649 const struct util_format_description
*desc
,
652 /* For stores, add conversion before */
653 nir_ssa_def
*unpacked
= nir_ssa_for_src(b
, intr
->src
[1], 4);
654 nir_ssa_def
*packed
= pan_pack(b
, desc
, unpacked
);
656 nir_intrinsic_instr
*new =
657 nir_intrinsic_instr_create(shader
, nir_intrinsic_store_raw_output_pan
);
658 new->src
[0] = nir_src_for_ssa(packed
);
659 new->num_components
= 4;
660 nir_builder_instr_insert(b
, &new->instr
);
664 pan_sample_id(nir_builder
*b
, int sample
)
666 return (sample
>= 0) ? nir_imm_int(b
, sample
) : nir_load_sample_id(b
);
670 pan_lower_fb_load(nir_shader
*shader
,
672 nir_intrinsic_instr
*intr
,
673 const struct util_format_description
*desc
,
674 unsigned base
, int sample
, unsigned quirks
)
676 nir_intrinsic_instr
*new = nir_intrinsic_instr_create(shader
,
677 nir_intrinsic_load_raw_output_pan
);
678 new->num_components
= 4;
679 new->src
[0] = nir_src_for_ssa(pan_sample_id(b
, sample
));
681 nir_intrinsic_set_base(new, base
);
683 nir_ssa_dest_init(&new->instr
, &new->dest
, 4, 32, NULL
);
684 nir_builder_instr_insert(b
, &new->instr
);
686 /* Convert the raw value */
687 nir_ssa_def
*packed
= &new->dest
.ssa
;
688 nir_ssa_def
*unpacked
= pan_unpack(b
, desc
, packed
);
690 if (desc
->colorspace
== UTIL_FORMAT_COLORSPACE_SRGB
)
691 unpacked
= pan_srgb_to_linear(b
, unpacked
);
693 /* Convert to the size of the load intrinsic.
695 * We can assume that the type will match with the framebuffer format:
697 * Page 170 of the PDF of the OpenGL ES 3.0.6 spec says:
699 * If [UNORM or SNORM, convert to fixed-point]; otherwise no type
700 * conversion is applied. If the values written by the fragment shader
701 * do not match the format(s) of the corresponding color buffer(s),
702 * the result is undefined.
705 unsigned bits
= nir_dest_bit_size(intr
->dest
);
707 nir_alu_type src_type
;
708 if (desc
->channel
[0].pure_integer
) {
709 if (desc
->channel
[0].type
== UTIL_FORMAT_TYPE_SIGNED
)
710 src_type
= nir_type_int
;
712 src_type
= nir_type_uint
;
714 src_type
= nir_type_float
;
717 unpacked
= nir_convert_to_bit_size(b
, unpacked
, src_type
, bits
);
718 unpacked
= pan_extend(b
, unpacked
, nir_dest_num_components(intr
->dest
));
720 nir_src rewritten
= nir_src_for_ssa(unpacked
);
721 nir_ssa_def_rewrite_uses_after(&intr
->dest
.ssa
, rewritten
, &intr
->instr
);
725 pan_lower_framebuffer(nir_shader
*shader
, enum pipe_format
*rt_fmts
,
726 bool is_blend
, unsigned quirks
)
728 if (shader
->info
.stage
!= MESA_SHADER_FRAGMENT
)
731 bool progress
= false;
733 nir_foreach_function(func
, shader
) {
734 nir_foreach_block(block
, func
->impl
) {
735 nir_foreach_instr_safe(instr
, block
) {
736 if (instr
->type
!= nir_instr_type_intrinsic
)
739 nir_intrinsic_instr
*intr
= nir_instr_as_intrinsic(instr
);
741 bool is_load
= intr
->intrinsic
== nir_intrinsic_load_deref
;
742 bool is_store
= intr
->intrinsic
== nir_intrinsic_store_deref
;
744 if (!(is_load
|| (is_store
&& is_blend
)))
747 nir_variable
*var
= nir_intrinsic_get_var(intr
, 0);
749 if (var
->data
.mode
!= nir_var_shader_out
)
752 unsigned base
= var
->data
.driver_location
;
755 if (var
->data
.location
== FRAG_RESULT_COLOR
)
757 else if (var
->data
.location
>= FRAG_RESULT_DATA0
)
758 rt
= var
->data
.location
- FRAG_RESULT_DATA0
;
762 if (rt_fmts
[rt
] == PIPE_FORMAT_NONE
)
765 const struct util_format_description
*desc
=
766 util_format_description(rt_fmts
[rt
]);
768 enum pan_format_class fmt_class
=
769 pan_format_class(desc
, quirks
, is_store
);
772 if (fmt_class
== PAN_FORMAT_NATIVE
)
775 /* EXT_shader_framebuffer_fetch requires
777 * MSAA blend shaders are not yet handled, so
778 * for now always load sample 0. */
779 int sample
= is_blend
? 0 : -1;
782 nir_builder_init(&b
, func
->impl
);
785 b
.cursor
= nir_before_instr(instr
);
786 pan_lower_fb_store(shader
, &b
, intr
, desc
, quirks
);
788 b
.cursor
= nir_after_instr(instr
);
789 pan_lower_fb_load(shader
, &b
, intr
, desc
, base
, sample
, quirks
);
792 nir_instr_remove(instr
);
798 nir_metadata_preserve(func
->impl
, nir_metadata_block_index
|
799 nir_metadata_dominance
);