6e704f7763c65928e8fa1538df8f844d354bfa25
[mesa.git] / src / panfrost / util / pan_lower_framebuffer.c
1 /*
2 * Copyright (C) 2020 Collabora, Ltd.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors (Collabora):
24 * Alyssa Rosenzweig <alyssa.rosenzweig@collabora.com>
25 */
26
27 /**
28 * Implements framebuffer format conversions in software for Midgard/Bifrost
29 * blend shaders. This pass is designed for a single render target; Midgard
30 * duplicates blend shaders for MRT to simplify everything. A particular
31 * framebuffer format may be categorized as 1) typed load available, 2) typed
32 * unpack available, or 3) software unpack only, and likewise for stores. The
33 * first two types are handled in the compiler backend directly, so this module
34 * is responsible for identifying type 3 formats (hardware dependent) and
35 * inserting appropriate ALU code to perform the conversion from the packed
36 * type to a designated unpacked type, and vice versa.
37 *
38 * The unpacked type depends on the format:
39 *
40 * - For 32-bit float formats, 32-bit floats.
41 * - For other floats, 16-bit floats.
42 * - For 32-bit ints, 32-bit ints.
43 * - For 8-bit ints, 8-bit ints.
44 * - For other ints, 16-bit ints.
45 *
46 * The rationale is to optimize blending and logic op instructions by using the
47 * smallest precision necessary to store the pixel losslessly.
48 */
49
50 #include "compiler/nir/nir.h"
51 #include "compiler/nir/nir_builder.h"
52 #include "compiler/nir/nir_format_convert.h"
53 #include "util/format/u_format.h"
54 #include "pan_lower_framebuffer.h"
55 #include "panfrost-quirks.h"
56
57 /* Determines the unpacked type best suiting a given format, so the rest of the
58 * pipeline may be adjusted accordingly */
59
60 nir_alu_type
61 pan_unpacked_type_for_format(const struct util_format_description *desc)
62 {
63 int c = util_format_get_first_non_void_channel(desc->format);
64
65 if (c == -1)
66 unreachable("Void format not renderable");
67
68 bool large = (desc->channel[c].size > 16);
69 bool bit8 = (desc->channel[c].size == 8);
70 assert(desc->channel[c].size <= 32);
71
72 if (desc->channel[c].normalized)
73 return large ? nir_type_float32 : nir_type_float16;
74
75 switch (desc->channel[c].type) {
76 case UTIL_FORMAT_TYPE_UNSIGNED:
77 return bit8 ? nir_type_uint8 :
78 large ? nir_type_uint32 : nir_type_uint16;
79 case UTIL_FORMAT_TYPE_SIGNED:
80 return bit8 ? nir_type_int8 :
81 large ? nir_type_int32 : nir_type_int16;
82 case UTIL_FORMAT_TYPE_FLOAT:
83 return large ? nir_type_float32 : nir_type_float16;
84 default:
85 unreachable("Format not renderable");
86 }
87 }
88
89 enum pan_format_class
90 pan_format_class_load(const struct util_format_description *desc, unsigned quirks)
91 {
92 /* Check if we can do anything better than software architecturally */
93 if (quirks & MIDGARD_NO_TYPED_BLEND_LOADS) {
94 return (quirks & NO_BLEND_PACKS)
95 ? PAN_FORMAT_SOFTWARE : PAN_FORMAT_PACK;
96 }
97
98 /* Some formats are missing as typed on some GPUs but have unpacks */
99 if (quirks & MIDGARD_MISSING_LOADS) {
100 switch (desc->format) {
101 case PIPE_FORMAT_R11G11B10_FLOAT:
102 case PIPE_FORMAT_R10G10B10A2_UNORM:
103 case PIPE_FORMAT_B10G10R10A2_UNORM:
104 case PIPE_FORMAT_R10G10B10X2_UNORM:
105 case PIPE_FORMAT_B10G10R10X2_UNORM:
106 case PIPE_FORMAT_R10G10B10A2_UINT:
107 return PAN_FORMAT_PACK;
108 default:
109 return PAN_FORMAT_NATIVE;
110 }
111 }
112
113 /* Otherwise, we can do native */
114 return PAN_FORMAT_NATIVE;
115 }
116
117 enum pan_format_class
118 pan_format_class_store(const struct util_format_description *desc, unsigned quirks)
119 {
120 /* Check if we can do anything better than software architecturally */
121 if (quirks & MIDGARD_NO_TYPED_BLEND_STORES) {
122 return (quirks & NO_BLEND_PACKS)
123 ? PAN_FORMAT_SOFTWARE : PAN_FORMAT_PACK;
124 }
125
126 return PAN_FORMAT_NATIVE;
127 }
128
129 /* Convenience method */
130
131 static enum pan_format_class
132 pan_format_class(const struct util_format_description *desc, unsigned quirks, bool is_store)
133 {
134 if (is_store)
135 return pan_format_class_store(desc, quirks);
136 else
137 return pan_format_class_load(desc, quirks);
138 }
139
140 /* Software packs/unpacks, by format class. Packs take in the pixel value typed
141 * as `pan_unpacked_type_for_format` of the format and return an i32vec4
142 * suitable for storing (with components replicated to fill). Unpacks do the
143 * reverse but cannot rely on replication.
144 *
145 * Pure 32 formats (R32F ... RGBA32F) are 32 unpacked, so just need to
146 * replicate to fill */
147
148 static nir_ssa_def *
149 pan_pack_pure_32(nir_builder *b, nir_ssa_def *v)
150 {
151 nir_ssa_def *replicated[4];
152
153 for (unsigned i = 0; i < 4; ++i)
154 replicated[i] = nir_channel(b, v, i % v->num_components);
155
156 return nir_vec(b, replicated, 4);
157 }
158
159 static nir_ssa_def *
160 pan_unpack_pure_32(nir_builder *b, nir_ssa_def *pack, unsigned num_components)
161 {
162 return nir_channels(b, pack, (1 << num_components) - 1);
163 }
164
165 /* Pure x16 formats are x16 unpacked, so it's similar, but we need to pack
166 * upper/lower halves of course */
167
168 static nir_ssa_def *
169 pan_pack_pure_16(nir_builder *b, nir_ssa_def *v)
170 {
171 nir_ssa_def *replicated[4];
172
173 for (unsigned i = 0; i < 4; ++i) {
174 unsigned c = 2 * i;
175
176 nir_ssa_def *parts[2] = {
177 nir_channel(b, v, (c + 0) % v->num_components),
178 nir_channel(b, v, (c + 1) % v->num_components)
179 };
180
181 replicated[i] = nir_pack_32_2x16(b, nir_vec(b, parts, 2));
182 }
183
184 return nir_vec(b, replicated, 4);
185 }
186
187 static nir_ssa_def *
188 pan_unpack_pure_16(nir_builder *b, nir_ssa_def *pack, unsigned num_components)
189 {
190 nir_ssa_def *unpacked[4];
191
192 assert(num_components <= 4);
193
194 for (unsigned i = 0; i < num_components; i += 2) {
195 nir_ssa_def *halves =
196 nir_unpack_32_2x16(b, nir_channel(b, pack, i >> 1));
197
198 unpacked[i + 0] = nir_channel(b, halves, 0);
199 unpacked[i + 1] = nir_channel(b, halves, 1);
200 }
201
202 for (unsigned i = num_components; i < 4; ++i)
203 unpacked[i] = nir_imm_intN_t(b, 0, 16);
204
205 return nir_vec(b, unpacked, 4);
206 }
207
208 /* And likewise for x8. pan_fill_4 fills a 4-channel vector with a n-channel
209 * vector (n <= 4), replicating as needed. pan_replicate_4 constructs a
210 * 4-channel vector from a scalar via replication */
211
212 static nir_ssa_def *
213 pan_fill_4(nir_builder *b, nir_ssa_def *v)
214 {
215 nir_ssa_def *q[4];
216 assert(v->num_components <= 4);
217
218 for (unsigned j = 0; j < 4; ++j)
219 q[j] = nir_channel(b, v, j % v->num_components);
220
221 return nir_vec(b, q, 4);
222 }
223
224 static nir_ssa_def *
225 pan_replicate_4(nir_builder *b, nir_ssa_def *v)
226 {
227 nir_ssa_def *replicated[4] = { v, v, v, v };
228 return nir_vec(b, replicated, 4);
229 }
230
231 static nir_ssa_def *
232 pan_pack_pure_8(nir_builder *b, nir_ssa_def *v)
233 {
234 return pan_replicate_4(b, nir_pack_32_4x8(b, pan_fill_4(b, v)));
235 }
236
237 static nir_ssa_def *
238 pan_unpack_pure_8(nir_builder *b, nir_ssa_def *pack, unsigned num_components)
239 {
240 assert(num_components <= 4);
241 nir_ssa_def *unpacked = nir_unpack_32_4x8(b, nir_channel(b, pack, 0));
242 return nir_channels(b, unpacked, (1 << num_components) - 1);
243 }
244
245 /* UNORM 8 is unpacked to f16 vec4. We could directly use the un/pack_unorm_4x8
246 * ops provided we replicate appropriately, but for packing we'd rather stay in
247 * 8/16-bit whereas the NIR op forces 32-bit, so we do it manually */
248
249 static nir_ssa_def *
250 pan_pack_unorm_8(nir_builder *b, nir_ssa_def *v)
251 {
252 return pan_replicate_4(b, nir_pack_32_4x8(b,
253 nir_f2u8(b, nir_fround_even(b, nir_fmul(b, nir_fsat(b,
254 pan_fill_4(b, v)), nir_imm_float16(b, 255.0))))));
255 }
256
257 static nir_ssa_def *
258 pan_unpack_unorm_8(nir_builder *b, nir_ssa_def *pack, unsigned num_components)
259 {
260 assert(num_components <= 4);
261 nir_ssa_def *unpacked = nir_unpack_unorm_4x8(b, nir_channel(b, pack, 0));
262 return nir_f2f16(b, unpacked);
263 }
264
265 /* UNORM 4 is also unpacked to f16, which prevents us from using the shared
266 * unpack which strongly assumes fp32. However, on the tilebuffer it is actually packed as:
267 *
268 * [AAAA] [0000] [BBBB] [0000] [GGGG] [0000] [RRRR] [0000]
269 *
270 * In other words, spacing it out so we're aligned to bytes and on top. So
271 * pack as:
272 *
273 * pack_32_4x8(f2u8_rte(v * 15.0) << 4)
274 */
275
276 static nir_ssa_def *
277 pan_pack_unorm_small(nir_builder *b, nir_ssa_def *v,
278 nir_ssa_def *scales, nir_ssa_def *shifts)
279 {
280 nir_ssa_def *f = nir_fmul(b, nir_fsat(b, pan_fill_4(b, v)), scales);
281 nir_ssa_def *u8 = nir_f2u8(b, nir_fround_even(b, f));
282 nir_ssa_def *s = nir_ishl(b, u8, shifts);
283 nir_ssa_def *repl = nir_pack_32_4x8(b, s);
284
285 return pan_replicate_4(b, repl);
286 }
287
288 static nir_ssa_def *
289 pan_unpack_unorm_small(nir_builder *b, nir_ssa_def *pack,
290 nir_ssa_def *scales, nir_ssa_def *shifts)
291 {
292 nir_ssa_def *channels = nir_unpack_32_4x8(b, nir_channel(b, pack, 0));
293 nir_ssa_def *raw = nir_ushr(b, nir_u2u16(b, channels), shifts);
294 return nir_fmul(b, nir_u2f16(b, raw), scales);
295 }
296
297 static nir_ssa_def *
298 pan_pack_unorm_4(nir_builder *b, nir_ssa_def *v)
299 {
300 return pan_pack_unorm_small(b, v,
301 nir_imm_vec4_16(b, 15.0, 15.0, 15.0, 15.0),
302 nir_imm_ivec4(b, 4, 4, 4, 4));
303 }
304
305 static nir_ssa_def *
306 pan_unpack_unorm_4(nir_builder *b, nir_ssa_def *v)
307 {
308 return pan_unpack_unorm_small(b, v,
309 nir_imm_vec4_16(b, 1.0 / 15.0, 1.0 / 15.0, 1.0 / 15.0, 1.0 / 15.0),
310 nir_imm_ivec4(b, 4, 4, 4, 4));
311 }
312
313 /* UNORM RGB5_A1 and RGB565 are similar */
314
315 static nir_ssa_def *
316 pan_pack_unorm_5551(nir_builder *b, nir_ssa_def *v)
317 {
318 return pan_pack_unorm_small(b, v,
319 nir_imm_vec4_16(b, 31.0, 31.0, 31.0, 1.0),
320 nir_imm_ivec4(b, 3, 3, 3, 7));
321 }
322
323 static nir_ssa_def *
324 pan_unpack_unorm_5551(nir_builder *b, nir_ssa_def *v)
325 {
326 return pan_unpack_unorm_small(b, v,
327 nir_imm_vec4_16(b, 1.0 / 31.0, 1.0 / 31.0, 1.0 / 31.0, 1.0),
328 nir_imm_ivec4(b, 3, 3, 3, 7));
329 }
330
331 static nir_ssa_def *
332 pan_pack_unorm_565(nir_builder *b, nir_ssa_def *v)
333 {
334 return pan_pack_unorm_small(b, v,
335 nir_imm_vec4_16(b, 31.0, 63.0, 31.0, 0.0),
336 nir_imm_ivec4(b, 3, 2, 3, 0));
337 }
338
339 static nir_ssa_def *
340 pan_unpack_unorm_565(nir_builder *b, nir_ssa_def *v)
341 {
342 return pan_unpack_unorm_small(b, v,
343 nir_imm_vec4_16(b, 1.0 / 31.0, 1.0 / 63.0, 1.0 / 31.0, 0.0),
344 nir_imm_ivec4(b, 3, 2, 3, 0));
345 }
346
347 /* RGB10_A2 is packed in the tilebuffer as the bottom 3 bytes being the top
348 * 8-bits of RGB and the top byte being RGBA as 2-bits packed. As imirkin
349 * pointed out, this means free conversion to RGBX8 */
350
351 static nir_ssa_def *
352 pan_pack_unorm_1010102(nir_builder *b, nir_ssa_def *v)
353 {
354 nir_ssa_def *scale = nir_imm_vec4_16(b, 1023.0, 1023.0, 1023.0, 3.0);
355 nir_ssa_def *s = nir_f2u32(b, nir_fround_even(b, nir_f2f32(b, nir_fmul(b, nir_fsat(b, v), scale))));
356
357 nir_ssa_def *top8 = nir_ushr(b, s, nir_imm_ivec4(b, 0x2, 0x2, 0x2, 0x2));
358 nir_ssa_def *top8_rgb = nir_pack_32_4x8(b, nir_u2u8(b, top8));
359
360 nir_ssa_def *bottom2 = nir_iand(b, s, nir_imm_ivec4(b, 0x3, 0x3, 0x3, 0x3));
361
362 nir_ssa_def *top =
363 nir_ior(b,
364 nir_ior(b,
365 nir_ishl(b, nir_channel(b, bottom2, 0), nir_imm_int(b, 24 + 0)),
366 nir_ishl(b, nir_channel(b, bottom2, 1), nir_imm_int(b, 24 + 2))),
367 nir_ior(b,
368 nir_ishl(b, nir_channel(b, bottom2, 2), nir_imm_int(b, 24 + 4)),
369 nir_ishl(b, nir_channel(b, bottom2, 3), nir_imm_int(b, 24 + 6))));
370
371 nir_ssa_def *p = nir_ior(b, top, top8_rgb);
372 return pan_replicate_4(b, p);
373 }
374
375 static nir_ssa_def *
376 pan_unpack_unorm_1010102(nir_builder *b, nir_ssa_def *packed)
377 {
378 nir_ssa_def *p = nir_channel(b, packed, 0);
379 nir_ssa_def *bytes = nir_unpack_32_4x8(b, p);
380 nir_ssa_def *ubytes = nir_u2u16(b, bytes);
381
382 nir_ssa_def *shifts = nir_ushr(b, pan_replicate_4(b, nir_channel(b, ubytes, 3)),
383 nir_imm_ivec4(b, 0, 2, 4, 6));
384 nir_ssa_def *precision = nir_iand(b, shifts,
385 nir_i2i16(b, nir_imm_ivec4(b, 0x3, 0x3, 0x3, 0x3)));
386
387 nir_ssa_def *top_rgb = nir_ishl(b, nir_channels(b, ubytes, 0x7), nir_imm_int(b, 2));
388 top_rgb = nir_ior(b, nir_channels(b, precision, 0x7), top_rgb);
389
390 nir_ssa_def *chans [4] = {
391 nir_channel(b, top_rgb, 0),
392 nir_channel(b, top_rgb, 1),
393 nir_channel(b, top_rgb, 2),
394 nir_channel(b, precision, 3)
395 };
396
397 nir_ssa_def *scale = nir_imm_vec4(b, 1.0 / 1023.0, 1.0 / 1023.0, 1.0 / 1023.0, 1.0 / 3.0);
398 return nir_f2f16(b, nir_fmul(b, nir_u2f32(b, nir_vec(b, chans, 4)), scale));
399 }
400
401 /* On the other hand, the pure int RGB10_A2 is identical to the spec */
402
403 static nir_ssa_def *
404 pan_pack_uint_1010102(nir_builder *b, nir_ssa_def *v)
405 {
406 nir_ssa_def *shift = nir_ishl(b, nir_u2u32(b, v),
407 nir_imm_ivec4(b, 0, 10, 20, 30));
408
409 nir_ssa_def *p = nir_ior(b,
410 nir_ior(b, nir_channel(b, shift, 0), nir_channel(b, shift, 1)),
411 nir_ior(b, nir_channel(b, shift, 2), nir_channel(b, shift, 3)));
412
413 return pan_replicate_4(b, p);
414 }
415
416 static nir_ssa_def *
417 pan_unpack_uint_1010102(nir_builder *b, nir_ssa_def *packed)
418 {
419 nir_ssa_def *chan = nir_channel(b, packed, 0);
420
421 nir_ssa_def *shift = nir_ushr(b, pan_replicate_4(b, chan),
422 nir_imm_ivec4(b, 0, 10, 20, 30));
423
424 nir_ssa_def *mask = nir_iand(b, shift,
425 nir_imm_ivec4(b, 0x3ff, 0x3ff, 0x3ff, 0x3));
426
427 return nir_u2u16(b, mask);
428 }
429
430 /* NIR means we can *finally* catch a break */
431
432 static nir_ssa_def *
433 pan_pack_r11g11b10(nir_builder *b, nir_ssa_def *v)
434 {
435 return pan_replicate_4(b, nir_format_pack_11f11f10f(b,
436 nir_f2f32(b, v)));
437 }
438
439 static nir_ssa_def *
440 pan_unpack_r11g11b10(nir_builder *b, nir_ssa_def *v)
441 {
442 nir_ssa_def *f32 = nir_format_unpack_11f11f10f(b, nir_channel(b, v, 0));
443 nir_ssa_def *f16 = nir_f2f16(b, f32);
444
445 /* Extend to vec4 with alpha */
446 nir_ssa_def *components[4] = {
447 nir_channel(b, f16, 0),
448 nir_channel(b, f16, 1),
449 nir_channel(b, f16, 2),
450 nir_imm_float16(b, 1.0)
451 };
452
453 return nir_vec(b, components, 4);
454 }
455
456 /* Wrapper around sRGB conversion */
457
458 static nir_ssa_def *
459 pan_linear_to_srgb(nir_builder *b, nir_ssa_def *linear)
460 {
461 nir_ssa_def *rgb = nir_channels(b, linear, 0x7);
462
463 /* TODO: fp16 native conversion */
464 nir_ssa_def *srgb = nir_f2f16(b,
465 nir_format_linear_to_srgb(b, nir_f2f32(b, rgb)));
466
467 nir_ssa_def *comp[4] = {
468 nir_channel(b, srgb, 0),
469 nir_channel(b, srgb, 1),
470 nir_channel(b, srgb, 2),
471 nir_channel(b, linear, 3),
472 };
473
474 return nir_vec(b, comp, 4);
475 }
476
477 static nir_ssa_def *
478 pan_srgb_to_linear(nir_builder *b, nir_ssa_def *srgb)
479 {
480 nir_ssa_def *rgb = nir_channels(b, srgb, 0x7);
481
482 /* TODO: fp16 native conversion */
483 nir_ssa_def *linear = nir_f2f16(b,
484 nir_format_srgb_to_linear(b, nir_f2f32(b, rgb)));
485
486 nir_ssa_def *comp[4] = {
487 nir_channel(b, linear, 0),
488 nir_channel(b, linear, 1),
489 nir_channel(b, linear, 2),
490 nir_channel(b, srgb, 3),
491 };
492
493 return nir_vec(b, comp, 4);
494 }
495
496
497
498 /* Generic dispatches for un/pack regardless of format */
499
500 static bool
501 pan_is_unorm4(const struct util_format_description *desc)
502 {
503 switch (desc->format) {
504 case PIPE_FORMAT_B4G4R4A4_UNORM:
505 case PIPE_FORMAT_B4G4R4X4_UNORM:
506 case PIPE_FORMAT_A4R4_UNORM:
507 case PIPE_FORMAT_R4A4_UNORM:
508 case PIPE_FORMAT_A4B4G4R4_UNORM:
509 return true;
510 default:
511 return false;
512 }
513
514 }
515
516 static nir_ssa_def *
517 pan_unpack(nir_builder *b,
518 const struct util_format_description *desc,
519 nir_ssa_def *packed)
520 {
521 if (util_format_is_unorm8(desc))
522 return pan_unpack_unorm_8(b, packed, desc->nr_channels);
523
524 if (pan_is_unorm4(desc))
525 return pan_unpack_unorm_4(b, packed);
526
527 if (desc->is_array) {
528 int c = util_format_get_first_non_void_channel(desc->format);
529 assert(c >= 0);
530 struct util_format_channel_description d = desc->channel[c];
531
532 if (d.size == 32 || d.size == 16) {
533 assert(!d.normalized);
534 assert(d.type == UTIL_FORMAT_TYPE_FLOAT || d.pure_integer);
535
536 return d.size == 32 ? pan_unpack_pure_32(b, packed, desc->nr_channels) :
537 pan_unpack_pure_16(b, packed, desc->nr_channels);
538 } else if (d.size == 8) {
539 assert(d.pure_integer);
540 return pan_unpack_pure_8(b, packed, desc->nr_channels);
541 } else {
542 unreachable("Unrenderable size");
543 }
544 }
545
546 switch (desc->format) {
547 case PIPE_FORMAT_B5G5R5A1_UNORM:
548 return pan_unpack_unorm_5551(b, packed);
549 case PIPE_FORMAT_B5G6R5_UNORM:
550 return pan_unpack_unorm_565(b, packed);
551 case PIPE_FORMAT_R10G10B10A2_UNORM:
552 return pan_unpack_unorm_1010102(b, packed);
553 case PIPE_FORMAT_R10G10B10A2_UINT:
554 return pan_unpack_uint_1010102(b, packed);
555 case PIPE_FORMAT_R11G11B10_FLOAT:
556 return pan_unpack_r11g11b10(b, packed);
557 default:
558 break;
559 }
560
561 fprintf(stderr, "%s\n", desc->name);
562 unreachable("Unknown format");
563 }
564
565 static nir_ssa_def *
566 pan_pack(nir_builder *b,
567 const struct util_format_description *desc,
568 nir_ssa_def *unpacked)
569 {
570 if (desc->colorspace == UTIL_FORMAT_COLORSPACE_SRGB)
571 unpacked = pan_linear_to_srgb(b, unpacked);
572
573 if (util_format_is_unorm8(desc))
574 return pan_pack_unorm_8(b, unpacked);
575
576 if (pan_is_unorm4(desc))
577 return pan_pack_unorm_4(b, unpacked);
578
579 if (desc->is_array) {
580 int c = util_format_get_first_non_void_channel(desc->format);
581 assert(c >= 0);
582 struct util_format_channel_description d = desc->channel[c];
583
584 if (d.size == 32 || d.size == 16) {
585 assert(!d.normalized);
586 assert(d.type == UTIL_FORMAT_TYPE_FLOAT || d.pure_integer);
587
588 return d.size == 32 ? pan_pack_pure_32(b, unpacked) :
589 pan_pack_pure_16(b, unpacked);
590 } else if (d.size == 8) {
591 assert(d.pure_integer);
592 return pan_pack_pure_8(b, unpacked);
593 } else {
594 unreachable("Unrenderable size");
595 }
596 }
597
598 switch (desc->format) {
599 case PIPE_FORMAT_B5G5R5A1_UNORM:
600 return pan_pack_unorm_5551(b, unpacked);
601 case PIPE_FORMAT_B5G6R5_UNORM:
602 return pan_pack_unorm_565(b, unpacked);
603 case PIPE_FORMAT_R10G10B10A2_UNORM:
604 return pan_pack_unorm_1010102(b, unpacked);
605 case PIPE_FORMAT_R10G10B10A2_UINT:
606 return pan_pack_uint_1010102(b, unpacked);
607 case PIPE_FORMAT_R11G11B10_FLOAT:
608 return pan_pack_r11g11b10(b, unpacked);
609 default:
610 break;
611 }
612
613 fprintf(stderr, "%s\n", desc->name);
614 unreachable("Unknown format");
615 }
616
617 static void
618 pan_lower_fb_store(nir_shader *shader,
619 nir_builder *b,
620 nir_intrinsic_instr *intr,
621 const struct util_format_description *desc,
622 unsigned quirks)
623 {
624 /* For stores, add conversion before */
625 nir_ssa_def *unpacked = nir_ssa_for_src(b, intr->src[1], 4);
626 nir_ssa_def *packed = pan_pack(b, desc, unpacked);
627
628 nir_intrinsic_instr *new =
629 nir_intrinsic_instr_create(shader, nir_intrinsic_store_raw_output_pan);
630 new->src[0] = nir_src_for_ssa(packed);
631 new->num_components = 4;
632 nir_builder_instr_insert(b, &new->instr);
633 }
634
635 static void
636 pan_lower_fb_load(nir_shader *shader,
637 nir_builder *b,
638 nir_intrinsic_instr *intr,
639 const struct util_format_description *desc,
640 unsigned quirks)
641 {
642 nir_intrinsic_instr *new = nir_intrinsic_instr_create(shader,
643 nir_intrinsic_load_raw_output_pan);
644 new->num_components = 4;
645
646 nir_ssa_dest_init(&new->instr, &new->dest, 4, 32, NULL);
647 nir_builder_instr_insert(b, &new->instr);
648
649 /* Convert the raw value */
650 nir_ssa_def *packed = &new->dest.ssa;
651 nir_ssa_def *unpacked = pan_unpack(b, desc, packed);
652
653 if (desc->colorspace == UTIL_FORMAT_COLORSPACE_SRGB)
654 unpacked = pan_srgb_to_linear(b, unpacked);
655
656 nir_src rewritten = nir_src_for_ssa(unpacked);
657 nir_ssa_def_rewrite_uses_after(&intr->dest.ssa, rewritten, &intr->instr);
658 }
659
660 void
661 pan_lower_framebuffer(nir_shader *shader,
662 const struct util_format_description *desc,
663 unsigned quirks)
664 {
665 /* Blend shaders are represented as special fragment shaders */
666 assert(shader->info.stage == MESA_SHADER_FRAGMENT);
667
668 nir_foreach_function(func, shader) {
669 nir_foreach_block(block, func->impl) {
670 nir_foreach_instr_safe(instr, block) {
671 if (instr->type != nir_instr_type_intrinsic)
672 continue;
673
674 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
675
676 bool is_load = intr->intrinsic == nir_intrinsic_load_deref;
677 bool is_store = intr->intrinsic == nir_intrinsic_store_deref;
678
679 if (!(is_load || is_store))
680 continue;
681
682 enum pan_format_class fmt_class =
683 pan_format_class(desc, quirks, is_store);
684
685 /* Don't lower */
686 if (fmt_class == PAN_FORMAT_NATIVE)
687 continue;
688
689 /* Don't worry about MRT */
690 nir_variable *var = nir_intrinsic_get_var(intr, 0);
691
692 if (var->data.location != FRAG_RESULT_COLOR)
693 continue;
694
695 nir_builder b;
696 nir_builder_init(&b, func->impl);
697
698 if (is_store) {
699 b.cursor = nir_before_instr(instr);
700 pan_lower_fb_store(shader, &b, intr, desc, quirks);
701 } else {
702 b.cursor = nir_after_instr(instr);
703 pan_lower_fb_load(shader, &b, intr, desc, quirks);
704 }
705
706 nir_instr_remove(instr);
707 }
708 }
709
710 nir_metadata_preserve(func->impl, nir_metadata_block_index |
711 nir_metadata_dominance);
712 }
713 }