2 * Copyright (C) 2020 Collabora, Ltd.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
23 * Authors (Collabora):
24 * Alyssa Rosenzweig <alyssa.rosenzweig@collabora.com>
28 * Implements framebuffer format conversions in software for Midgard/Bifrost
29 * blend shaders. This pass is designed for a single render target; Midgard
30 * duplicates blend shaders for MRT to simplify everything. A particular
31 * framebuffer format may be categorized as 1) typed load available, 2) typed
32 * unpack available, or 3) software unpack only, and likewise for stores. The
33 * first two types are handled in the compiler backend directly, so this module
34 * is responsible for identifying type 3 formats (hardware dependent) and
35 * inserting appropriate ALU code to perform the conversion from the packed
36 * type to a designated unpacked type, and vice versa.
38 * The unpacked type depends on the format:
40 * - For 32-bit float formats, 32-bit floats.
41 * - For other floats, 16-bit floats.
42 * - For 32-bit ints, 32-bit ints.
43 * - For 8-bit ints, 8-bit ints.
44 * - For other ints, 16-bit ints.
46 * The rationale is to optimize blending and logic op instructions by using the
47 * smallest precision necessary to store the pixel losslessly.
50 #include "compiler/nir/nir.h"
51 #include "compiler/nir/nir_builder.h"
52 #include "compiler/nir/nir_format_convert.h"
53 #include "util/format/u_format.h"
54 #include "pan_lower_framebuffer.h"
55 #include "panfrost-quirks.h"
57 /* Determines the unpacked type best suiting a given format, so the rest of the
58 * pipeline may be adjusted accordingly */
61 pan_unpacked_type_for_format(const struct util_format_description
*desc
)
63 int c
= util_format_get_first_non_void_channel(desc
->format
);
66 unreachable("Void format not renderable");
68 bool large
= (desc
->channel
[c
].size
> 16);
69 bool bit8
= (desc
->channel
[c
].size
== 8);
70 assert(desc
->channel
[c
].size
<= 32);
72 if (desc
->channel
[c
].normalized
)
73 return large
? nir_type_float32
: nir_type_float16
;
75 switch (desc
->channel
[c
].type
) {
76 case UTIL_FORMAT_TYPE_UNSIGNED
:
77 return bit8
? nir_type_uint8
:
78 large
? nir_type_uint32
: nir_type_uint16
;
79 case UTIL_FORMAT_TYPE_SIGNED
:
80 return bit8
? nir_type_int8
:
81 large
? nir_type_int32
: nir_type_int16
;
82 case UTIL_FORMAT_TYPE_FLOAT
:
83 return large
? nir_type_float32
: nir_type_float16
;
85 unreachable("Format not renderable");
90 pan_format_class_load(const struct util_format_description
*desc
, unsigned quirks
)
92 /* Check if we can do anything better than software architecturally */
93 if (quirks
& MIDGARD_NO_TYPED_BLEND_LOADS
) {
94 return (quirks
& NO_BLEND_PACKS
)
95 ? PAN_FORMAT_SOFTWARE
: PAN_FORMAT_PACK
;
98 /* Some formats are missing as typed on some GPUs but have unpacks */
99 if (quirks
& MIDGARD_MISSING_LOADS
) {
100 switch (desc
->format
) {
101 case PIPE_FORMAT_R11G11B10_FLOAT
:
102 case PIPE_FORMAT_R10G10B10A2_UNORM
:
103 case PIPE_FORMAT_B10G10R10A2_UNORM
:
104 case PIPE_FORMAT_R10G10B10X2_UNORM
:
105 case PIPE_FORMAT_B10G10R10X2_UNORM
:
106 case PIPE_FORMAT_R10G10B10A2_UINT
:
107 return PAN_FORMAT_PACK
;
109 return PAN_FORMAT_NATIVE
;
113 /* Otherwise, we can do native */
114 return PAN_FORMAT_NATIVE
;
117 enum pan_format_class
118 pan_format_class_store(const struct util_format_description
*desc
, unsigned quirks
)
120 /* Check if we can do anything better than software architecturally */
121 if (quirks
& MIDGARD_NO_TYPED_BLEND_STORES
) {
122 return (quirks
& NO_BLEND_PACKS
)
123 ? PAN_FORMAT_SOFTWARE
: PAN_FORMAT_PACK
;
126 return PAN_FORMAT_NATIVE
;
129 /* Software packs/unpacks, by format class. Packs take in the pixel value typed
130 * as `pan_unpacked_type_for_format` of the format and return an i32vec4
131 * suitable for storing (with components replicated to fill). Unpacks do the
132 * reverse but cannot rely on replication.
134 * Pure 32 formats (R32F ... RGBA32F) are 32 unpacked, so just need to
135 * replicate to fill */
138 pan_pack_pure_32(nir_builder
*b
, nir_ssa_def
*v
)
140 nir_ssa_def
*replicated
[4];
142 for (unsigned i
= 0; i
< 4; ++i
)
143 replicated
[i
] = nir_channel(b
, v
, i
% v
->num_components
);
145 return nir_vec(b
, replicated
, 4);
149 pan_unpack_pure_32(nir_builder
*b
, nir_ssa_def
*pack
, unsigned num_components
)
151 return nir_channels(b
, pack
, (1 << num_components
) - 1);
154 /* Pure x16 formats are x16 unpacked, so it's similar, but we need to pack
155 * upper/lower halves of course */
158 pan_pack_pure_16(nir_builder
*b
, nir_ssa_def
*v
)
160 nir_ssa_def
*replicated
[4];
162 for (unsigned i
= 0; i
< 4; ++i
) {
165 nir_ssa_def
*parts
[2] = {
166 nir_channel(b
, v
, (c
+ 0) % v
->num_components
),
167 nir_channel(b
, v
, (c
+ 1) % v
->num_components
)
170 replicated
[i
] = nir_pack_32_2x16(b
, nir_vec(b
, parts
, 2));
173 return nir_vec(b
, replicated
, 4);
177 pan_unpack_pure_16(nir_builder
*b
, nir_ssa_def
*pack
, unsigned num_components
)
179 nir_ssa_def
*unpacked
[4];
181 assert(num_components
<= 4);
183 for (unsigned i
= 0; i
< num_components
; i
+= 2) {
184 nir_ssa_def
*halves
=
185 nir_unpack_32_2x16(b
, nir_channel(b
, pack
, i
>> 1));
187 unpacked
[i
+ 0] = nir_channel(b
, halves
, 0);
188 unpacked
[i
+ 1] = nir_channel(b
, halves
, 1);
191 for (unsigned i
= num_components
; i
< 4; ++i
)
192 unpacked
[i
] = nir_imm_intN_t(b
, 0, 16);
194 return nir_vec(b
, unpacked
, 4);
197 /* And likewise for x8. pan_fill_4 fills a 4-channel vector with a n-channel
198 * vector (n <= 4), replicating as needed. pan_replicate_4 constructs a
199 * 4-channel vector from a scalar via replication */
202 pan_fill_4(nir_builder
*b
, nir_ssa_def
*v
)
205 assert(v
->num_components
<= 4);
207 for (unsigned j
= 0; j
< 4; ++j
)
208 q
[j
] = nir_channel(b
, v
, j
% v
->num_components
);
210 return nir_vec(b
, q
, 4);
214 pan_replicate_4(nir_builder
*b
, nir_ssa_def
*v
)
216 nir_ssa_def
*replicated
[4] = { v
, v
, v
, v
};
217 return nir_vec(b
, replicated
, 4);
221 pan_pack_pure_8(nir_builder
*b
, nir_ssa_def
*v
)
223 return pan_replicate_4(b
, nir_pack_32_4x8(b
, pan_fill_4(b
, v
)));
227 pan_unpack_pure_8(nir_builder
*b
, nir_ssa_def
*pack
, unsigned num_components
)
229 assert(num_components
<= 4);
230 nir_ssa_def
*unpacked
= nir_unpack_32_4x8(b
, nir_channel(b
, pack
, 0));
231 return nir_channels(b
, unpacked
, (1 << num_components
) - 1);
234 /* UNORM 8 is unpacked to f16 vec4. We could directly use the un/pack_unorm_4x8
235 * ops provided we replicate appropriately, but for packing we'd rather stay in
236 * 8/16-bit whereas the NIR op forces 32-bit, so we do it manually */
239 pan_pack_unorm_8(nir_builder
*b
, nir_ssa_def
*v
)
241 return pan_replicate_4(b
, nir_pack_32_4x8(b
,
242 nir_f2u8(b
, nir_fround_even(b
, nir_fmul(b
, nir_fsat(b
,
243 pan_fill_4(b
, v
)), nir_imm_float16(b
, 255.0))))));
247 pan_unpack_unorm_8(nir_builder
*b
, nir_ssa_def
*pack
, unsigned num_components
)
249 assert(num_components
<= 4);
250 nir_ssa_def
*unpacked
= nir_unpack_unorm_4x8(b
, nir_channel(b
, pack
, 0));
251 return nir_f2f16(b
, unpacked
);
254 /* Generic dispatches for un/pack regardless of format */
257 pan_unpack(nir_builder
*b
,
258 const struct util_format_description
*desc
,
261 if (util_format_is_unorm8(desc
))
262 return pan_unpack_unorm_8(b
, packed
, desc
->nr_channels
);
264 if (desc
->is_array
) {
265 int c
= util_format_get_first_non_void_channel(desc
->format
);
267 struct util_format_channel_description d
= desc
->channel
[c
];
269 if (d
.size
== 32 || d
.size
== 16) {
270 assert(!d
.normalized
);
271 assert(d
.type
== UTIL_FORMAT_TYPE_FLOAT
|| d
.pure_integer
);
273 return d
.size
== 32 ? pan_unpack_pure_32(b
, packed
, desc
->nr_channels
) :
274 pan_unpack_pure_16(b
, packed
, desc
->nr_channels
);
275 } else if (d
.size
== 8) {
276 assert(d
.pure_integer
);
277 return pan_unpack_pure_8(b
, packed
, desc
->nr_channels
);
279 unreachable("Unrenderable size");
283 fprintf(stderr
, "%s\n", desc
->name
);
284 unreachable("Unknown format");
288 pan_pack(nir_builder
*b
,
289 const struct util_format_description
*desc
,
290 nir_ssa_def
*unpacked
)
292 if (util_format_is_unorm8(desc
))
293 return pan_pack_unorm_8(b
, unpacked
);
295 if (desc
->is_array
) {
296 int c
= util_format_get_first_non_void_channel(desc
->format
);
298 struct util_format_channel_description d
= desc
->channel
[c
];
300 if (d
.size
== 32 || d
.size
== 16) {
301 assert(!d
.normalized
);
302 assert(d
.type
== UTIL_FORMAT_TYPE_FLOAT
|| d
.pure_integer
);
304 return d
.size
== 32 ? pan_pack_pure_32(b
, unpacked
) :
305 pan_pack_pure_16(b
, unpacked
);
306 } else if (d
.size
== 8) {
307 assert(d
.pure_integer
);
308 return pan_pack_pure_8(b
, unpacked
);
310 unreachable("Unrenderable size");
314 fprintf(stderr
, "%s\n", desc
->name
);
315 unreachable("Unknown format");
319 pan_lower_fb_store(nir_shader
*shader
,
321 nir_intrinsic_instr
*intr
,
322 const struct util_format_description
*desc
,
325 /* For stores, add conversion before */
326 nir_ssa_def
*unpacked
= nir_ssa_for_src(b
, intr
->src
[1], 4);
327 nir_ssa_def
*packed
= pan_pack(b
, desc
, unpacked
);
329 nir_intrinsic_instr
*new =
330 nir_intrinsic_instr_create(shader
, nir_intrinsic_store_raw_output_pan
);
331 new->src
[0] = nir_src_for_ssa(packed
);
332 new->num_components
= 4;
333 nir_builder_instr_insert(b
, &new->instr
);
337 pan_lower_fb_load(nir_shader
*shader
,
339 nir_intrinsic_instr
*intr
,
340 const struct util_format_description
*desc
,
343 nir_intrinsic_instr
*new = nir_intrinsic_instr_create(shader
,
344 nir_intrinsic_load_raw_output_pan
);
345 new->num_components
= 4;
347 nir_ssa_dest_init(&new->instr
, &new->dest
, 4, 32, NULL
);
348 nir_builder_instr_insert(b
, &new->instr
);
350 /* Convert the raw value */
351 nir_ssa_def
*packed
= &new->dest
.ssa
;
352 nir_ssa_def
*unpacked
= pan_unpack(b
, desc
, packed
);
354 nir_src rewritten
= nir_src_for_ssa(unpacked
);
355 nir_ssa_def_rewrite_uses_after(&intr
->dest
.ssa
, rewritten
, &intr
->instr
);
359 pan_lower_framebuffer(nir_shader
*shader
,
360 const struct util_format_description
*desc
,
363 /* Blend shaders are represented as special fragment shaders */
364 assert(shader
->info
.stage
== MESA_SHADER_FRAGMENT
);
366 nir_foreach_function(func
, shader
) {
367 nir_foreach_block(block
, func
->impl
) {
368 nir_foreach_instr_safe(instr
, block
) {
369 if (instr
->type
!= nir_instr_type_intrinsic
)
372 nir_intrinsic_instr
*intr
= nir_instr_as_intrinsic(instr
);
374 bool is_load
= intr
->intrinsic
== nir_intrinsic_load_deref
;
375 bool is_store
= intr
->intrinsic
== nir_intrinsic_store_deref
;
377 if (!(is_load
|| is_store
))
380 /* Don't worry about MRT */
381 nir_variable
*var
= nir_intrinsic_get_var(intr
, 0);
383 if (var
->data
.location
!= FRAG_RESULT_COLOR
)
387 nir_builder_init(&b
, func
->impl
);
390 b
.cursor
= nir_before_instr(instr
);
391 pan_lower_fb_store(shader
, &b
, intr
, desc
, quirks
);
393 b
.cursor
= nir_after_instr(instr
);
394 pan_lower_fb_load(shader
, &b
, intr
, desc
, quirks
);
397 nir_instr_remove(instr
);
401 nir_metadata_preserve(func
->impl
, nir_metadata_block_index
|
402 nir_metadata_dominance
);