nir/format_convert: Add pack/unpack for R11F_G11F_B10F
[mesa.git] / src / compiler / nir / nir_format_convert.h
1 /*
2 * Copyright © 2017 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "nir_builder.h"
25
26 static inline nir_ssa_def *
27 nir_shift(nir_builder *b, nir_ssa_def *value, int left_shift)
28 {
29 if (left_shift > 0)
30 return nir_ishl(b, value, nir_imm_int(b, left_shift));
31 else if (left_shift < 0)
32 return nir_ushr(b, value, nir_imm_int(b, -left_shift));
33 else
34 return value;
35 }
36
37 static inline nir_ssa_def *
38 nir_mask_shift(struct nir_builder *b, nir_ssa_def *src,
39 uint32_t mask, int left_shift)
40 {
41 return nir_shift(b, nir_iand(b, src, nir_imm_int(b, mask)), left_shift);
42 }
43
44 static inline nir_ssa_def *
45 nir_mask_shift_or(struct nir_builder *b, nir_ssa_def *dst, nir_ssa_def *src,
46 uint32_t src_mask, int src_left_shift)
47 {
48 return nir_ior(b, nir_mask_shift(b, src, src_mask, src_left_shift), dst);
49 }
50
51 static inline nir_ssa_def *
52 nir_format_unpack_uint(nir_builder *b, nir_ssa_def *packed,
53 const unsigned *bits, unsigned num_components)
54 {
55 assert(num_components >= 1 && num_components <= 4);
56 nir_ssa_def *comps[4];
57
58 if (bits[0] >= packed->bit_size) {
59 assert(bits[0] == packed->bit_size);
60 assert(num_components == 1);
61 return packed;
62 }
63
64 unsigned offset = 0;
65 for (unsigned i = 0; i < num_components; i++) {
66 assert(bits[i] < 32);
67 nir_ssa_def *mask = nir_imm_int(b, (1u << bits[i]) - 1);
68 comps[i] = nir_iand(b, nir_shift(b, packed, -offset), mask);
69 offset += bits[i];
70 }
71 assert(offset <= packed->bit_size);
72
73 return nir_vec(b, comps, num_components);
74 }
75
76 static inline nir_ssa_def *
77 nir_format_pack_uint_unmasked(nir_builder *b, nir_ssa_def *color,
78 const unsigned *bits, unsigned num_components)
79 {
80 assert(num_components >= 1 && num_components <= 4);
81 nir_ssa_def *packed = nir_imm_int(b, 0);
82 unsigned offset = 0;
83 for (unsigned i = 0; i < num_components; i++) {
84 packed = nir_ior(b, packed, nir_shift(b, nir_channel(b, color, i),
85 offset));
86 offset += bits[i];
87 }
88 assert(offset <= packed->bit_size);
89
90 return packed;
91 }
92
93 static inline nir_ssa_def *
94 nir_format_pack_uint(nir_builder *b, nir_ssa_def *color,
95 const unsigned *bits, unsigned num_components)
96 {
97 nir_const_value mask;
98 for (unsigned i = 0; i < num_components; i++) {
99 assert(bits[i] < 32);
100 mask.u32[i] = (1u << bits[i]) - 1;
101 }
102 nir_ssa_def *mask_imm = nir_build_imm(b, num_components, 32, mask);
103
104 return nir_format_pack_uint_unmasked(b, nir_iand(b, color, mask_imm),
105 bits, num_components);
106 }
107
108 static inline nir_ssa_def *
109 nir_format_linear_to_srgb(nir_builder *b, nir_ssa_def *c)
110 {
111 nir_ssa_def *linear = nir_fmul(b, c, nir_imm_float(b, 12.92f));
112 nir_ssa_def *curved =
113 nir_fsub(b, nir_fmul(b, nir_imm_float(b, 1.055f),
114 nir_fpow(b, c, nir_imm_float(b, 1.0 / 2.4))),
115 nir_imm_float(b, 0.055f));
116
117 return nir_fsat(b, nir_bcsel(b, nir_flt(b, c, nir_imm_float(b, 0.0031308f)),
118 linear, curved));
119 }
120
121 static inline nir_ssa_def *
122 nir_format_srgb_to_linear(nir_builder *b, nir_ssa_def *c)
123 {
124 nir_ssa_def *linear = nir_fdiv(b, c, nir_imm_float(b, 12.92f));
125 nir_ssa_def *curved =
126 nir_fpow(b, nir_fdiv(b, nir_fadd(b, c, nir_imm_float(b, 0.055f)),
127 nir_imm_float(b, 1.055f)),
128 nir_imm_float(b, 2.4f));
129
130 return nir_fsat(b, nir_bcsel(b, nir_fge(b, nir_imm_float(b, 0.04045f), c),
131 linear, curved));
132 }
133
134 static inline nir_ssa_def *
135 nir_format_unpack_11f11f10f(nir_builder *b, nir_ssa_def *packed)
136 {
137 nir_ssa_def *chans[3];
138 chans[0] = nir_mask_shift(b, packed, 0x000007ff, 4);
139 chans[1] = nir_mask_shift(b, packed, 0x003ff100, -7);
140 chans[2] = nir_mask_shift(b, packed, 0xffc00000, -17);
141
142 for (unsigned i = 0; i < 3; i++)
143 chans[i] = nir_unpack_half_2x16_split_x(b, chans[i]);
144
145 return nir_vec(b, chans, 3);
146 }
147
148 static inline nir_ssa_def *
149 nir_format_pack_r11g11b10f(nir_builder *b, nir_ssa_def *color)
150 {
151 /* 10 and 11-bit floats are unsigned. Clamp to non-negative */
152 nir_ssa_def *clamped = nir_fmax(b, color, nir_imm_float(b, 0));
153
154 nir_ssa_def *undef = nir_ssa_undef(b, 1, color->bit_size);
155 nir_ssa_def *p1 = nir_pack_half_2x16_split(b, nir_channel(b, clamped, 0),
156 nir_channel(b, clamped, 1));
157 nir_ssa_def *p2 = nir_pack_half_2x16_split(b, nir_channel(b, clamped, 2),
158 undef);
159
160 /* A 10 or 11-bit float has the same exponent as a 16-bit float but with
161 * fewer mantissa bits and no sign bit. All we have to do is throw away
162 * the sign bit and the bottom mantissa bits and shift it into place.
163 */
164 nir_ssa_def *packed = nir_imm_int(b, 0);
165 packed = nir_mask_shift_or(b, packed, p1, 0x00007ff0, -4);
166 packed = nir_mask_shift_or(b, packed, p1, 0x7ff00000, -9);
167 packed = nir_mask_shift_or(b, packed, p2, 0x00007fe0, 17);
168
169 return packed;
170 }