nir/format_convert: Add code for bitcasting vectors
[mesa.git] / src / compiler / nir / nir_format_convert.h
1 /*
2 * Copyright © 2017 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "nir_builder.h"
25
26 #include "util/format_rgb9e5.h"
27
28 static inline nir_ssa_def *
29 nir_shift(nir_builder *b, nir_ssa_def *value, int left_shift)
30 {
31 if (left_shift > 0)
32 return nir_ishl(b, value, nir_imm_int(b, left_shift));
33 else if (left_shift < 0)
34 return nir_ushr(b, value, nir_imm_int(b, -left_shift));
35 else
36 return value;
37 }
38
39 static inline nir_ssa_def *
40 nir_mask_shift(struct nir_builder *b, nir_ssa_def *src,
41 uint32_t mask, int left_shift)
42 {
43 return nir_shift(b, nir_iand(b, src, nir_imm_int(b, mask)), left_shift);
44 }
45
46 static inline nir_ssa_def *
47 nir_mask_shift_or(struct nir_builder *b, nir_ssa_def *dst, nir_ssa_def *src,
48 uint32_t src_mask, int src_left_shift)
49 {
50 return nir_ior(b, nir_mask_shift(b, src, src_mask, src_left_shift), dst);
51 }
52
53 static inline nir_ssa_def *
54 nir_format_unpack_uint(nir_builder *b, nir_ssa_def *packed,
55 const unsigned *bits, unsigned num_components)
56 {
57 assert(num_components >= 1 && num_components <= 4);
58 nir_ssa_def *comps[4];
59
60 if (bits[0] >= packed->bit_size) {
61 assert(bits[0] == packed->bit_size);
62 assert(num_components == 1);
63 return packed;
64 }
65
66 unsigned offset = 0;
67 for (unsigned i = 0; i < num_components; i++) {
68 assert(bits[i] < 32);
69 nir_ssa_def *mask = nir_imm_int(b, (1u << bits[i]) - 1);
70 comps[i] = nir_iand(b, nir_shift(b, packed, -offset), mask);
71 offset += bits[i];
72 }
73 assert(offset <= packed->bit_size);
74
75 return nir_vec(b, comps, num_components);
76 }
77
78 static inline nir_ssa_def *
79 nir_format_pack_uint_unmasked(nir_builder *b, nir_ssa_def *color,
80 const unsigned *bits, unsigned num_components)
81 {
82 assert(num_components >= 1 && num_components <= 4);
83 nir_ssa_def *packed = nir_imm_int(b, 0);
84 unsigned offset = 0;
85 for (unsigned i = 0; i < num_components; i++) {
86 packed = nir_ior(b, packed, nir_shift(b, nir_channel(b, color, i),
87 offset));
88 offset += bits[i];
89 }
90 assert(offset <= packed->bit_size);
91
92 return packed;
93 }
94
95 static inline nir_ssa_def *
96 nir_format_pack_uint(nir_builder *b, nir_ssa_def *color,
97 const unsigned *bits, unsigned num_components)
98 {
99 nir_const_value mask;
100 for (unsigned i = 0; i < num_components; i++) {
101 assert(bits[i] < 32);
102 mask.u32[i] = (1u << bits[i]) - 1;
103 }
104 nir_ssa_def *mask_imm = nir_build_imm(b, num_components, 32, mask);
105
106 return nir_format_pack_uint_unmasked(b, nir_iand(b, color, mask_imm),
107 bits, num_components);
108 }
109
110 static inline nir_ssa_def *
111 nir_format_bitcast_uint_vec_unmasked(nir_builder *b, nir_ssa_def *src,
112 unsigned src_bits, unsigned dst_bits)
113 {
114 assert(src_bits == 8 || src_bits == 16 || src_bits == 32);
115 assert(dst_bits == 8 || dst_bits == 16 || dst_bits == 32);
116
117 if (src_bits == dst_bits)
118 return src;
119
120 const unsigned dst_components =
121 DIV_ROUND_UP(src->num_components * src_bits, dst_bits);
122 assert(dst_components <= 4);
123
124 nir_ssa_def *dst_chan[4] = { };
125 if (dst_bits > src_bits) {
126 unsigned shift = 0;
127 unsigned dst_idx = 0;
128 for (unsigned i = 0; i < src->num_components; i++) {
129 nir_ssa_def *shifted = nir_ishl(b, nir_channel(b, src, i),
130 nir_imm_int(b, shift));
131 if (shift == 0) {
132 dst_chan[dst_idx] = shifted;
133 } else {
134 dst_chan[dst_idx] = nir_ior(b, dst_chan[dst_idx], shifted);
135 }
136
137 shift += src_bits;
138 if (shift >= dst_bits) {
139 dst_idx++;
140 shift = 0;
141 }
142 }
143 } else {
144 nir_ssa_def *mask = nir_imm_int(b, ~0u >> (32 - dst_bits));
145
146 unsigned src_idx = 0;
147 unsigned shift = 0;
148 for (unsigned i = 0; i < dst_components; i++) {
149 dst_chan[i] = nir_iand(b, nir_ushr(b, nir_channel(b, src, src_idx),
150 nir_imm_int(b, shift)),
151 mask);
152 shift += dst_bits;
153 if (shift >= src_bits) {
154 src_idx++;
155 shift = 0;
156 }
157 }
158 }
159
160 return nir_vec(b, dst_chan, dst_components);
161 }
162
163 static inline nir_ssa_def *
164 nir_format_linear_to_srgb(nir_builder *b, nir_ssa_def *c)
165 {
166 nir_ssa_def *linear = nir_fmul(b, c, nir_imm_float(b, 12.92f));
167 nir_ssa_def *curved =
168 nir_fsub(b, nir_fmul(b, nir_imm_float(b, 1.055f),
169 nir_fpow(b, c, nir_imm_float(b, 1.0 / 2.4))),
170 nir_imm_float(b, 0.055f));
171
172 return nir_fsat(b, nir_bcsel(b, nir_flt(b, c, nir_imm_float(b, 0.0031308f)),
173 linear, curved));
174 }
175
176 static inline nir_ssa_def *
177 nir_format_srgb_to_linear(nir_builder *b, nir_ssa_def *c)
178 {
179 nir_ssa_def *linear = nir_fdiv(b, c, nir_imm_float(b, 12.92f));
180 nir_ssa_def *curved =
181 nir_fpow(b, nir_fdiv(b, nir_fadd(b, c, nir_imm_float(b, 0.055f)),
182 nir_imm_float(b, 1.055f)),
183 nir_imm_float(b, 2.4f));
184
185 return nir_fsat(b, nir_bcsel(b, nir_fge(b, nir_imm_float(b, 0.04045f), c),
186 linear, curved));
187 }
188
189 static inline nir_ssa_def *
190 nir_format_unpack_11f11f10f(nir_builder *b, nir_ssa_def *packed)
191 {
192 nir_ssa_def *chans[3];
193 chans[0] = nir_mask_shift(b, packed, 0x000007ff, 4);
194 chans[1] = nir_mask_shift(b, packed, 0x003ff100, -7);
195 chans[2] = nir_mask_shift(b, packed, 0xffc00000, -17);
196
197 for (unsigned i = 0; i < 3; i++)
198 chans[i] = nir_unpack_half_2x16_split_x(b, chans[i]);
199
200 return nir_vec(b, chans, 3);
201 }
202
203 static inline nir_ssa_def *
204 nir_format_pack_r11g11b10f(nir_builder *b, nir_ssa_def *color)
205 {
206 /* 10 and 11-bit floats are unsigned. Clamp to non-negative */
207 nir_ssa_def *clamped = nir_fmax(b, color, nir_imm_float(b, 0));
208
209 nir_ssa_def *undef = nir_ssa_undef(b, 1, color->bit_size);
210 nir_ssa_def *p1 = nir_pack_half_2x16_split(b, nir_channel(b, clamped, 0),
211 nir_channel(b, clamped, 1));
212 nir_ssa_def *p2 = nir_pack_half_2x16_split(b, nir_channel(b, clamped, 2),
213 undef);
214
215 /* A 10 or 11-bit float has the same exponent as a 16-bit float but with
216 * fewer mantissa bits and no sign bit. All we have to do is throw away
217 * the sign bit and the bottom mantissa bits and shift it into place.
218 */
219 nir_ssa_def *packed = nir_imm_int(b, 0);
220 packed = nir_mask_shift_or(b, packed, p1, 0x00007ff0, -4);
221 packed = nir_mask_shift_or(b, packed, p1, 0x7ff00000, -9);
222 packed = nir_mask_shift_or(b, packed, p2, 0x00007fe0, 17);
223
224 return packed;
225 }
226
227 static inline nir_ssa_def *
228 nir_format_pack_r9g9b9e5(nir_builder *b, nir_ssa_def *color)
229 {
230 /* See also float3_to_rgb9e5 */
231
232 /* First, we need to clamp it to range. */
233 nir_ssa_def *clamped = nir_fmin(b, color, nir_imm_float(b, MAX_RGB9E5));
234
235 /* Get rid of negatives and NaN */
236 clamped = nir_bcsel(b, nir_ult(b, nir_imm_int(b, 0x7f800000), color),
237 nir_imm_float(b, 0), clamped);
238
239 /* maxrgb.u = MAX3(rc.u, gc.u, bc.u); */
240 nir_ssa_def *maxu = nir_umax(b, nir_channel(b, clamped, 0),
241 nir_umax(b, nir_channel(b, clamped, 1),
242 nir_channel(b, clamped, 2)));
243
244 /* maxrgb.u += maxrgb.u & (1 << (23-9)); */
245 maxu = nir_iadd(b, maxu, nir_iand(b, maxu, nir_imm_int(b, 1 << 14)));
246
247 /* exp_shared = MAX2((maxrgb.u >> 23), -RGB9E5_EXP_BIAS - 1 + 127) +
248 * 1 + RGB9E5_EXP_BIAS - 127;
249 */
250 nir_ssa_def *exp_shared =
251 nir_iadd(b, nir_umax(b, nir_ushr(b, maxu, nir_imm_int(b, 23)),
252 nir_imm_int(b, -RGB9E5_EXP_BIAS - 1 + 127)),
253 nir_imm_int(b, 1 + RGB9E5_EXP_BIAS - 127));
254
255 /* revdenom_biasedexp = 127 - (exp_shared - RGB9E5_EXP_BIAS -
256 * RGB9E5_MANTISSA_BITS) + 1;
257 */
258 nir_ssa_def *revdenom_biasedexp =
259 nir_isub(b, nir_imm_int(b, 127 + RGB9E5_EXP_BIAS +
260 RGB9E5_MANTISSA_BITS + 1),
261 exp_shared);
262
263 /* revdenom.u = revdenom_biasedexp << 23; */
264 nir_ssa_def *revdenom =
265 nir_ishl(b, revdenom_biasedexp, nir_imm_int(b, 23));
266
267 /* rm = (int) (rc.f * revdenom.f);
268 * gm = (int) (gc.f * revdenom.f);
269 * bm = (int) (bc.f * revdenom.f);
270 */
271 nir_ssa_def *mantissa =
272 nir_f2i32(b, nir_fmul(b, clamped, revdenom));
273
274 /* rm = (rm & 1) + (rm >> 1);
275 * gm = (gm & 1) + (gm >> 1);
276 * bm = (bm & 1) + (bm >> 1);
277 */
278 mantissa = nir_iadd(b, nir_iand(b, mantissa, nir_imm_int(b, 1)),
279 nir_ushr(b, mantissa, nir_imm_int(b, 1)));
280
281 nir_ssa_def *packed = nir_channel(b, mantissa, 0);
282 packed = nir_mask_shift_or(b, packed, nir_channel(b, mantissa, 1), ~0, 9);
283 packed = nir_mask_shift_or(b, packed, nir_channel(b, mantissa, 2), ~0, 18);
284 packed = nir_mask_shift_or(b, packed, exp_shared, ~0, 27);
285
286 return packed;
287 }