panfrost: Un/pack RGB565 and RGB5A1
[mesa.git] / src / panfrost / util / pan_lower_framebuffer.c
1 /*
2 * Copyright (C) 2020 Collabora, Ltd.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors (Collabora):
24 * Alyssa Rosenzweig <alyssa.rosenzweig@collabora.com>
25 */
26
27 /**
28 * Implements framebuffer format conversions in software for Midgard/Bifrost
29 * blend shaders. This pass is designed for a single render target; Midgard
30 * duplicates blend shaders for MRT to simplify everything. A particular
31 * framebuffer format may be categorized as 1) typed load available, 2) typed
32 * unpack available, or 3) software unpack only, and likewise for stores. The
33 * first two types are handled in the compiler backend directly, so this module
34 * is responsible for identifying type 3 formats (hardware dependent) and
35 * inserting appropriate ALU code to perform the conversion from the packed
36 * type to a designated unpacked type, and vice versa.
37 *
38 * The unpacked type depends on the format:
39 *
40 * - For 32-bit float formats, 32-bit floats.
41 * - For other floats, 16-bit floats.
42 * - For 32-bit ints, 32-bit ints.
43 * - For 8-bit ints, 8-bit ints.
44 * - For other ints, 16-bit ints.
45 *
46 * The rationale is to optimize blending and logic op instructions by using the
47 * smallest precision necessary to store the pixel losslessly.
48 */
49
50 #include "compiler/nir/nir.h"
51 #include "compiler/nir/nir_builder.h"
52 #include "compiler/nir/nir_format_convert.h"
53 #include "util/format/u_format.h"
54 #include "pan_lower_framebuffer.h"
55 #include "panfrost-quirks.h"
56
57 /* Determines the unpacked type best suiting a given format, so the rest of the
58 * pipeline may be adjusted accordingly */
59
60 nir_alu_type
61 pan_unpacked_type_for_format(const struct util_format_description *desc)
62 {
63 int c = util_format_get_first_non_void_channel(desc->format);
64
65 if (c == -1)
66 unreachable("Void format not renderable");
67
68 bool large = (desc->channel[c].size > 16);
69 bool bit8 = (desc->channel[c].size == 8);
70 assert(desc->channel[c].size <= 32);
71
72 if (desc->channel[c].normalized)
73 return large ? nir_type_float32 : nir_type_float16;
74
75 switch (desc->channel[c].type) {
76 case UTIL_FORMAT_TYPE_UNSIGNED:
77 return bit8 ? nir_type_uint8 :
78 large ? nir_type_uint32 : nir_type_uint16;
79 case UTIL_FORMAT_TYPE_SIGNED:
80 return bit8 ? nir_type_int8 :
81 large ? nir_type_int32 : nir_type_int16;
82 case UTIL_FORMAT_TYPE_FLOAT:
83 return large ? nir_type_float32 : nir_type_float16;
84 default:
85 unreachable("Format not renderable");
86 }
87 }
88
89 enum pan_format_class
90 pan_format_class_load(const struct util_format_description *desc, unsigned quirks)
91 {
92 /* Check if we can do anything better than software architecturally */
93 if (quirks & MIDGARD_NO_TYPED_BLEND_LOADS) {
94 return (quirks & NO_BLEND_PACKS)
95 ? PAN_FORMAT_SOFTWARE : PAN_FORMAT_PACK;
96 }
97
98 /* Some formats are missing as typed on some GPUs but have unpacks */
99 if (quirks & MIDGARD_MISSING_LOADS) {
100 switch (desc->format) {
101 case PIPE_FORMAT_R11G11B10_FLOAT:
102 case PIPE_FORMAT_R10G10B10A2_UNORM:
103 case PIPE_FORMAT_B10G10R10A2_UNORM:
104 case PIPE_FORMAT_R10G10B10X2_UNORM:
105 case PIPE_FORMAT_B10G10R10X2_UNORM:
106 case PIPE_FORMAT_R10G10B10A2_UINT:
107 return PAN_FORMAT_PACK;
108 default:
109 return PAN_FORMAT_NATIVE;
110 }
111 }
112
113 /* Otherwise, we can do native */
114 return PAN_FORMAT_NATIVE;
115 }
116
117 enum pan_format_class
118 pan_format_class_store(const struct util_format_description *desc, unsigned quirks)
119 {
120 /* Check if we can do anything better than software architecturally */
121 if (quirks & MIDGARD_NO_TYPED_BLEND_STORES) {
122 return (quirks & NO_BLEND_PACKS)
123 ? PAN_FORMAT_SOFTWARE : PAN_FORMAT_PACK;
124 }
125
126 return PAN_FORMAT_NATIVE;
127 }
128
129 /* Software packs/unpacks, by format class. Packs take in the pixel value typed
130 * as `pan_unpacked_type_for_format` of the format and return an i32vec4
131 * suitable for storing (with components replicated to fill). Unpacks do the
132 * reverse but cannot rely on replication.
133 *
134 * Pure 32 formats (R32F ... RGBA32F) are 32 unpacked, so just need to
135 * replicate to fill */
136
137 static nir_ssa_def *
138 pan_pack_pure_32(nir_builder *b, nir_ssa_def *v)
139 {
140 nir_ssa_def *replicated[4];
141
142 for (unsigned i = 0; i < 4; ++i)
143 replicated[i] = nir_channel(b, v, i % v->num_components);
144
145 return nir_vec(b, replicated, 4);
146 }
147
148 static nir_ssa_def *
149 pan_unpack_pure_32(nir_builder *b, nir_ssa_def *pack, unsigned num_components)
150 {
151 return nir_channels(b, pack, (1 << num_components) - 1);
152 }
153
154 /* Pure x16 formats are x16 unpacked, so it's similar, but we need to pack
155 * upper/lower halves of course */
156
157 static nir_ssa_def *
158 pan_pack_pure_16(nir_builder *b, nir_ssa_def *v)
159 {
160 nir_ssa_def *replicated[4];
161
162 for (unsigned i = 0; i < 4; ++i) {
163 unsigned c = 2 * i;
164
165 nir_ssa_def *parts[2] = {
166 nir_channel(b, v, (c + 0) % v->num_components),
167 nir_channel(b, v, (c + 1) % v->num_components)
168 };
169
170 replicated[i] = nir_pack_32_2x16(b, nir_vec(b, parts, 2));
171 }
172
173 return nir_vec(b, replicated, 4);
174 }
175
176 static nir_ssa_def *
177 pan_unpack_pure_16(nir_builder *b, nir_ssa_def *pack, unsigned num_components)
178 {
179 nir_ssa_def *unpacked[4];
180
181 assert(num_components <= 4);
182
183 for (unsigned i = 0; i < num_components; i += 2) {
184 nir_ssa_def *halves =
185 nir_unpack_32_2x16(b, nir_channel(b, pack, i >> 1));
186
187 unpacked[i + 0] = nir_channel(b, halves, 0);
188 unpacked[i + 1] = nir_channel(b, halves, 1);
189 }
190
191 for (unsigned i = num_components; i < 4; ++i)
192 unpacked[i] = nir_imm_intN_t(b, 0, 16);
193
194 return nir_vec(b, unpacked, 4);
195 }
196
197 /* And likewise for x8. pan_fill_4 fills a 4-channel vector with a n-channel
198 * vector (n <= 4), replicating as needed. pan_replicate_4 constructs a
199 * 4-channel vector from a scalar via replication */
200
201 static nir_ssa_def *
202 pan_fill_4(nir_builder *b, nir_ssa_def *v)
203 {
204 nir_ssa_def *q[4];
205 assert(v->num_components <= 4);
206
207 for (unsigned j = 0; j < 4; ++j)
208 q[j] = nir_channel(b, v, j % v->num_components);
209
210 return nir_vec(b, q, 4);
211 }
212
213 static nir_ssa_def *
214 pan_replicate_4(nir_builder *b, nir_ssa_def *v)
215 {
216 nir_ssa_def *replicated[4] = { v, v, v, v };
217 return nir_vec(b, replicated, 4);
218 }
219
220 static nir_ssa_def *
221 pan_pack_pure_8(nir_builder *b, nir_ssa_def *v)
222 {
223 return pan_replicate_4(b, nir_pack_32_4x8(b, pan_fill_4(b, v)));
224 }
225
226 static nir_ssa_def *
227 pan_unpack_pure_8(nir_builder *b, nir_ssa_def *pack, unsigned num_components)
228 {
229 assert(num_components <= 4);
230 nir_ssa_def *unpacked = nir_unpack_32_4x8(b, nir_channel(b, pack, 0));
231 return nir_channels(b, unpacked, (1 << num_components) - 1);
232 }
233
234 /* UNORM 8 is unpacked to f16 vec4. We could directly use the un/pack_unorm_4x8
235 * ops provided we replicate appropriately, but for packing we'd rather stay in
236 * 8/16-bit whereas the NIR op forces 32-bit, so we do it manually */
237
238 static nir_ssa_def *
239 pan_pack_unorm_8(nir_builder *b, nir_ssa_def *v)
240 {
241 return pan_replicate_4(b, nir_pack_32_4x8(b,
242 nir_f2u8(b, nir_fround_even(b, nir_fmul(b, nir_fsat(b,
243 pan_fill_4(b, v)), nir_imm_float16(b, 255.0))))));
244 }
245
246 static nir_ssa_def *
247 pan_unpack_unorm_8(nir_builder *b, nir_ssa_def *pack, unsigned num_components)
248 {
249 assert(num_components <= 4);
250 nir_ssa_def *unpacked = nir_unpack_unorm_4x8(b, nir_channel(b, pack, 0));
251 return nir_f2f16(b, unpacked);
252 }
253
254 /* UNORM 4 is also unpacked to f16, which prevents us from using the shared
255 * unpack which strongly assumes fp32. However, on the tilebuffer it is actually packed as:
256 *
257 * [AAAA] [0000] [BBBB] [0000] [GGGG] [0000] [RRRR] [0000]
258 *
259 * In other words, spacing it out so we're aligned to bytes and on top. So
260 * pack as:
261 *
262 * pack_32_4x8(f2u8_rte(v * 15.0) << 4)
263 */
264
265 static nir_ssa_def *
266 pan_pack_unorm_small(nir_builder *b, nir_ssa_def *v,
267 nir_ssa_def *scales, nir_ssa_def *shifts)
268 {
269 nir_ssa_def *f = nir_fmul(b, nir_fsat(b, pan_fill_4(b, v)), scales);
270 nir_ssa_def *u8 = nir_f2u8(b, nir_fround_even(b, f));
271 nir_ssa_def *s = nir_ishl(b, u8, shifts);
272 nir_ssa_def *repl = nir_pack_32_4x8(b, s);
273
274 return pan_replicate_4(b, repl);
275 }
276
277 static nir_ssa_def *
278 pan_unpack_unorm_small(nir_builder *b, nir_ssa_def *pack,
279 nir_ssa_def *scales, nir_ssa_def *shifts)
280 {
281 nir_ssa_def *channels = nir_unpack_32_4x8(b, nir_channel(b, pack, 0));
282 nir_ssa_def *raw = nir_ushr(b, nir_u2u16(b, channels), shifts);
283 return nir_fmul(b, nir_u2f16(b, raw), scales);
284 }
285
286 static nir_ssa_def *
287 pan_pack_unorm_4(nir_builder *b, nir_ssa_def *v)
288 {
289 return pan_pack_unorm_small(b, v,
290 nir_imm_vec4_16(b, 15.0, 15.0, 15.0, 15.0),
291 nir_imm_ivec4(b, 4, 4, 4, 4));
292 }
293
294 static nir_ssa_def *
295 pan_unpack_unorm_4(nir_builder *b, nir_ssa_def *v)
296 {
297 return pan_unpack_unorm_small(b, v,
298 nir_imm_vec4_16(b, 1.0 / 15.0, 1.0 / 15.0, 1.0 / 15.0, 1.0 / 15.0),
299 nir_imm_ivec4(b, 4, 4, 4, 4));
300 }
301
302 /* UNORM RGB5_A1 and RGB565 are similar */
303
304 static nir_ssa_def *
305 pan_pack_unorm_5551(nir_builder *b, nir_ssa_def *v)
306 {
307 return pan_pack_unorm_small(b, v,
308 nir_imm_vec4_16(b, 31.0, 31.0, 31.0, 1.0),
309 nir_imm_ivec4(b, 3, 3, 3, 7));
310 }
311
312 static nir_ssa_def *
313 pan_unpack_unorm_5551(nir_builder *b, nir_ssa_def *v)
314 {
315 return pan_unpack_unorm_small(b, v,
316 nir_imm_vec4_16(b, 1.0 / 31.0, 1.0 / 31.0, 1.0 / 31.0, 1.0),
317 nir_imm_ivec4(b, 3, 3, 3, 7));
318 }
319
320 static nir_ssa_def *
321 pan_pack_unorm_565(nir_builder *b, nir_ssa_def *v)
322 {
323 return pan_pack_unorm_small(b, v,
324 nir_imm_vec4_16(b, 31.0, 63.0, 31.0, 0.0),
325 nir_imm_ivec4(b, 3, 2, 3, 0));
326 }
327
328 static nir_ssa_def *
329 pan_unpack_unorm_565(nir_builder *b, nir_ssa_def *v)
330 {
331 return pan_unpack_unorm_small(b, v,
332 nir_imm_vec4_16(b, 1.0 / 31.0, 1.0 / 63.0, 1.0 / 31.0, 0.0),
333 nir_imm_ivec4(b, 3, 2, 3, 0));
334 }
335
336 /* Generic dispatches for un/pack regardless of format */
337
338 static bool
339 pan_is_unorm4(const struct util_format_description *desc)
340 {
341 switch (desc->format) {
342 case PIPE_FORMAT_B4G4R4A4_UNORM:
343 case PIPE_FORMAT_B4G4R4X4_UNORM:
344 case PIPE_FORMAT_A4R4_UNORM:
345 case PIPE_FORMAT_R4A4_UNORM:
346 case PIPE_FORMAT_A4B4G4R4_UNORM:
347 return true;
348 default:
349 return false;
350 }
351
352 }
353
354 static nir_ssa_def *
355 pan_unpack(nir_builder *b,
356 const struct util_format_description *desc,
357 nir_ssa_def *packed)
358 {
359 if (util_format_is_unorm8(desc))
360 return pan_unpack_unorm_8(b, packed, desc->nr_channels);
361
362 if (pan_is_unorm4(desc))
363 return pan_unpack_unorm_4(b, packed);
364
365 if (desc->is_array) {
366 int c = util_format_get_first_non_void_channel(desc->format);
367 assert(c >= 0);
368 struct util_format_channel_description d = desc->channel[c];
369
370 if (d.size == 32 || d.size == 16) {
371 assert(!d.normalized);
372 assert(d.type == UTIL_FORMAT_TYPE_FLOAT || d.pure_integer);
373
374 return d.size == 32 ? pan_unpack_pure_32(b, packed, desc->nr_channels) :
375 pan_unpack_pure_16(b, packed, desc->nr_channels);
376 } else if (d.size == 8) {
377 assert(d.pure_integer);
378 return pan_unpack_pure_8(b, packed, desc->nr_channels);
379 } else {
380 unreachable("Unrenderable size");
381 }
382 }
383
384 switch (desc->format) {
385 case PIPE_FORMAT_B5G5R5A1_UNORM:
386 return pan_unpack_unorm_5551(b, packed);
387 case PIPE_FORMAT_B5G6R5_UNORM:
388 return pan_unpack_unorm_565(b, packed);
389 default:
390 break;
391 }
392
393 fprintf(stderr, "%s\n", desc->name);
394 unreachable("Unknown format");
395 }
396
397 static nir_ssa_def *
398 pan_pack(nir_builder *b,
399 const struct util_format_description *desc,
400 nir_ssa_def *unpacked)
401 {
402 if (util_format_is_unorm8(desc))
403 return pan_pack_unorm_8(b, unpacked);
404
405 if (pan_is_unorm4(desc))
406 return pan_pack_unorm_4(b, unpacked);
407
408 if (desc->is_array) {
409 int c = util_format_get_first_non_void_channel(desc->format);
410 assert(c >= 0);
411 struct util_format_channel_description d = desc->channel[c];
412
413 if (d.size == 32 || d.size == 16) {
414 assert(!d.normalized);
415 assert(d.type == UTIL_FORMAT_TYPE_FLOAT || d.pure_integer);
416
417 return d.size == 32 ? pan_pack_pure_32(b, unpacked) :
418 pan_pack_pure_16(b, unpacked);
419 } else if (d.size == 8) {
420 assert(d.pure_integer);
421 return pan_pack_pure_8(b, unpacked);
422 } else {
423 unreachable("Unrenderable size");
424 }
425 }
426
427 switch (desc->format) {
428 case PIPE_FORMAT_B5G5R5A1_UNORM:
429 return pan_pack_unorm_5551(b, unpacked);
430 case PIPE_FORMAT_B5G6R5_UNORM:
431 return pan_pack_unorm_565(b, unpacked);
432 default:
433 break;
434 }
435
436 fprintf(stderr, "%s\n", desc->name);
437 unreachable("Unknown format");
438 }
439
440 static void
441 pan_lower_fb_store(nir_shader *shader,
442 nir_builder *b,
443 nir_intrinsic_instr *intr,
444 const struct util_format_description *desc,
445 unsigned quirks)
446 {
447 /* For stores, add conversion before */
448 nir_ssa_def *unpacked = nir_ssa_for_src(b, intr->src[1], 4);
449 nir_ssa_def *packed = pan_pack(b, desc, unpacked);
450
451 nir_intrinsic_instr *new =
452 nir_intrinsic_instr_create(shader, nir_intrinsic_store_raw_output_pan);
453 new->src[0] = nir_src_for_ssa(packed);
454 new->num_components = 4;
455 nir_builder_instr_insert(b, &new->instr);
456 }
457
458 static void
459 pan_lower_fb_load(nir_shader *shader,
460 nir_builder *b,
461 nir_intrinsic_instr *intr,
462 const struct util_format_description *desc,
463 unsigned quirks)
464 {
465 nir_intrinsic_instr *new = nir_intrinsic_instr_create(shader,
466 nir_intrinsic_load_raw_output_pan);
467 new->num_components = 4;
468
469 nir_ssa_dest_init(&new->instr, &new->dest, 4, 32, NULL);
470 nir_builder_instr_insert(b, &new->instr);
471
472 /* Convert the raw value */
473 nir_ssa_def *packed = &new->dest.ssa;
474 nir_ssa_def *unpacked = pan_unpack(b, desc, packed);
475
476 nir_src rewritten = nir_src_for_ssa(unpacked);
477 nir_ssa_def_rewrite_uses_after(&intr->dest.ssa, rewritten, &intr->instr);
478 }
479
480 void
481 pan_lower_framebuffer(nir_shader *shader,
482 const struct util_format_description *desc,
483 unsigned quirks)
484 {
485 /* Blend shaders are represented as special fragment shaders */
486 assert(shader->info.stage == MESA_SHADER_FRAGMENT);
487
488 nir_foreach_function(func, shader) {
489 nir_foreach_block(block, func->impl) {
490 nir_foreach_instr_safe(instr, block) {
491 if (instr->type != nir_instr_type_intrinsic)
492 continue;
493
494 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
495
496 bool is_load = intr->intrinsic == nir_intrinsic_load_deref;
497 bool is_store = intr->intrinsic == nir_intrinsic_store_deref;
498
499 if (!(is_load || is_store))
500 continue;
501
502 /* Don't worry about MRT */
503 nir_variable *var = nir_intrinsic_get_var(intr, 0);
504
505 if (var->data.location != FRAG_RESULT_COLOR)
506 continue;
507
508 nir_builder b;
509 nir_builder_init(&b, func->impl);
510
511 if (is_store) {
512 b.cursor = nir_before_instr(instr);
513 pan_lower_fb_store(shader, &b, intr, desc, quirks);
514 } else {
515 b.cursor = nir_after_instr(instr);
516 pan_lower_fb_load(shader, &b, intr, desc, quirks);
517 }
518
519 nir_instr_remove(instr);
520 }
521 }
522
523 nir_metadata_preserve(func->impl, nir_metadata_block_index |
524 nir_metadata_dominance);
525 }
526 }