nir: Add a ptr_as_array deref type
[mesa.git] / src / compiler / nir / nir_format_convert.h
1 /*
2 * Copyright © 2017 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "nir_builder.h"
25
26 #include "util/format_rgb9e5.h"
27
28 static inline nir_ssa_def *
29 nir_shift(nir_builder *b, nir_ssa_def *value, int left_shift)
30 {
31 if (left_shift > 0)
32 return nir_ishl(b, value, nir_imm_int(b, left_shift));
33 else if (left_shift < 0)
34 return nir_ushr(b, value, nir_imm_int(b, -left_shift));
35 else
36 return value;
37 }
38
39 static inline nir_ssa_def *
40 nir_mask_shift(struct nir_builder *b, nir_ssa_def *src,
41 uint32_t mask, int left_shift)
42 {
43 return nir_shift(b, nir_iand(b, src, nir_imm_int(b, mask)), left_shift);
44 }
45
46 static inline nir_ssa_def *
47 nir_mask_shift_or(struct nir_builder *b, nir_ssa_def *dst, nir_ssa_def *src,
48 uint32_t src_mask, int src_left_shift)
49 {
50 return nir_ior(b, nir_mask_shift(b, src, src_mask, src_left_shift), dst);
51 }
52
53 static inline nir_ssa_def *
54 nir_format_mask_uvec(nir_builder *b, nir_ssa_def *src, const unsigned *bits)
55 {
56 nir_const_value mask;
57 for (unsigned i = 0; i < src->num_components; i++) {
58 assert(bits[i] < 32);
59 mask.u32[i] = (1u << bits[i]) - 1;
60 }
61 return nir_iand(b, src, nir_build_imm(b, src->num_components, 32, mask));
62 }
63
64 static inline nir_ssa_def *
65 nir_format_sign_extend_ivec(nir_builder *b, nir_ssa_def *src,
66 const unsigned *bits)
67 {
68 assert(src->num_components <= 4);
69 nir_ssa_def *comps[4];
70 for (unsigned i = 0; i < src->num_components; i++) {
71 nir_ssa_def *shift = nir_imm_int(b, src->bit_size - bits[i]);
72 comps[i] = nir_ishr(b, nir_ishl(b, nir_channel(b, src, i), shift), shift);
73 }
74 return nir_vec(b, comps, src->num_components);
75 }
76
77
78 static inline nir_ssa_def *
79 nir_format_unpack_int(nir_builder *b, nir_ssa_def *packed,
80 const unsigned *bits, unsigned num_components,
81 bool sign_extend)
82 {
83 assert(num_components >= 1 && num_components <= 4);
84 const unsigned bit_size = packed->bit_size;
85 nir_ssa_def *comps[4];
86
87 if (bits[0] >= bit_size) {
88 assert(bits[0] == bit_size);
89 assert(num_components == 1);
90 return packed;
91 }
92
93 unsigned next_chan = 0;
94 unsigned offset = 0;
95 for (unsigned i = 0; i < num_components; i++) {
96 assert(bits[i] < bit_size);
97 assert(offset + bits[i] <= bit_size);
98 nir_ssa_def *chan = nir_channel(b, packed, next_chan);
99 nir_ssa_def *lshift = nir_imm_int(b, bit_size - (offset + bits[i]));
100 nir_ssa_def *rshift = nir_imm_int(b, bit_size - bits[i]);
101 if (sign_extend)
102 comps[i] = nir_ishr(b, nir_ishl(b, chan, lshift), rshift);
103 else
104 comps[i] = nir_ushr(b, nir_ishl(b, chan, lshift), rshift);
105 offset += bits[i];
106 if (offset >= bit_size) {
107 next_chan++;
108 offset -= bit_size;
109 }
110 }
111
112 return nir_vec(b, comps, num_components);
113 }
114
115 static inline nir_ssa_def *
116 nir_format_unpack_uint(nir_builder *b, nir_ssa_def *packed,
117 const unsigned *bits, unsigned num_components)
118 {
119 return nir_format_unpack_int(b, packed, bits, num_components, false);
120 }
121
122 static inline nir_ssa_def *
123 nir_format_unpack_sint(nir_builder *b, nir_ssa_def *packed,
124 const unsigned *bits, unsigned num_components)
125 {
126 return nir_format_unpack_int(b, packed, bits, num_components, true);
127 }
128
129 static inline nir_ssa_def *
130 nir_format_pack_uint_unmasked(nir_builder *b, nir_ssa_def *color,
131 const unsigned *bits, unsigned num_components)
132 {
133 assert(num_components >= 1 && num_components <= 4);
134 nir_ssa_def *packed = nir_imm_int(b, 0);
135 unsigned offset = 0;
136 for (unsigned i = 0; i < num_components; i++) {
137 packed = nir_ior(b, packed, nir_shift(b, nir_channel(b, color, i),
138 offset));
139 offset += bits[i];
140 }
141 assert(offset <= packed->bit_size);
142
143 return packed;
144 }
145
146 static inline nir_ssa_def *
147 nir_format_pack_uint(nir_builder *b, nir_ssa_def *color,
148 const unsigned *bits, unsigned num_components)
149 {
150 return nir_format_pack_uint_unmasked(b, nir_format_mask_uvec(b, color, bits),
151 bits, num_components);
152 }
153
154 static inline nir_ssa_def *
155 nir_format_bitcast_uvec_unmasked(nir_builder *b, nir_ssa_def *src,
156 unsigned src_bits, unsigned dst_bits)
157 {
158 assert(src->bit_size >= src_bits && src->bit_size >= dst_bits);
159 assert(src_bits == 8 || src_bits == 16 || src_bits == 32);
160 assert(dst_bits == 8 || dst_bits == 16 || dst_bits == 32);
161
162 if (src_bits == dst_bits)
163 return src;
164
165 const unsigned dst_components =
166 DIV_ROUND_UP(src->num_components * src_bits, dst_bits);
167 assert(dst_components <= 4);
168
169 nir_ssa_def *dst_chan[4] = {0};
170 if (dst_bits > src_bits) {
171 unsigned shift = 0;
172 unsigned dst_idx = 0;
173 for (unsigned i = 0; i < src->num_components; i++) {
174 nir_ssa_def *shifted = nir_ishl(b, nir_channel(b, src, i),
175 nir_imm_int(b, shift));
176 if (shift == 0) {
177 dst_chan[dst_idx] = shifted;
178 } else {
179 dst_chan[dst_idx] = nir_ior(b, dst_chan[dst_idx], shifted);
180 }
181
182 shift += src_bits;
183 if (shift >= dst_bits) {
184 dst_idx++;
185 shift = 0;
186 }
187 }
188 } else {
189 nir_ssa_def *mask = nir_imm_int(b, ~0u >> (32 - dst_bits));
190
191 unsigned src_idx = 0;
192 unsigned shift = 0;
193 for (unsigned i = 0; i < dst_components; i++) {
194 dst_chan[i] = nir_iand(b, nir_ushr(b, nir_channel(b, src, src_idx),
195 nir_imm_int(b, shift)),
196 mask);
197 shift += dst_bits;
198 if (shift >= src_bits) {
199 src_idx++;
200 shift = 0;
201 }
202 }
203 }
204
205 return nir_vec(b, dst_chan, dst_components);
206 }
207
208 static inline nir_ssa_def *
209 _nir_format_norm_factor(nir_builder *b, const unsigned *bits,
210 unsigned num_components,
211 bool is_signed)
212 {
213 nir_const_value factor;
214 for (unsigned i = 0; i < num_components; i++) {
215 assert(bits[i] < 32);
216 factor.f32[i] = (1ul << (bits[i] - is_signed)) - 1;
217 }
218 return nir_build_imm(b, num_components, 32, factor);
219 }
220
221 static inline nir_ssa_def *
222 nir_format_unorm_to_float(nir_builder *b, nir_ssa_def *u, const unsigned *bits)
223 {
224 nir_ssa_def *factor =
225 _nir_format_norm_factor(b, bits, u->num_components, false);
226
227 return nir_fdiv(b, nir_u2f32(b, u), factor);
228 }
229
230 static inline nir_ssa_def *
231 nir_format_snorm_to_float(nir_builder *b, nir_ssa_def *s, const unsigned *bits)
232 {
233 nir_ssa_def *factor =
234 _nir_format_norm_factor(b, bits, s->num_components, true);
235
236 return nir_fmax(b, nir_fdiv(b, nir_i2f32(b, s), factor),
237 nir_imm_float(b, -1.0f));
238 }
239
240 static inline nir_ssa_def *
241 nir_format_float_to_unorm(nir_builder *b, nir_ssa_def *f, const unsigned *bits)
242 {
243 nir_ssa_def *factor =
244 _nir_format_norm_factor(b, bits, f->num_components, false);
245
246 /* Clamp to the range [0, 1] */
247 f = nir_fsat(b, f);
248
249 return nir_f2u32(b, nir_fround_even(b, nir_fmul(b, f, factor)));
250 }
251
252 static inline nir_ssa_def *
253 nir_format_float_to_snorm(nir_builder *b, nir_ssa_def *f, const unsigned *bits)
254 {
255 nir_ssa_def *factor =
256 _nir_format_norm_factor(b, bits, f->num_components, true);
257
258 /* Clamp to the range [-1, 1] */
259 f = nir_fmin(b, nir_fmax(b, f, nir_imm_float(b, -1)), nir_imm_float(b, 1));
260
261 return nir_f2i32(b, nir_fround_even(b, nir_fmul(b, f, factor)));
262 }
263
264 /* Converts a vector of floats to a vector of half-floats packed in the low 16
265 * bits.
266 */
267 static inline nir_ssa_def *
268 nir_format_float_to_half(nir_builder *b, nir_ssa_def *f)
269 {
270 nir_ssa_def *zero = nir_imm_float(b, 0);
271 nir_ssa_def *f16comps[4];
272 for (unsigned i = 0; i < f->num_components; i++)
273 f16comps[i] = nir_pack_half_2x16_split(b, nir_channel(b, f, i), zero);
274 return nir_vec(b, f16comps, f->num_components);
275 }
276
277 static inline nir_ssa_def *
278 nir_format_linear_to_srgb(nir_builder *b, nir_ssa_def *c)
279 {
280 nir_ssa_def *linear = nir_fmul(b, c, nir_imm_float(b, 12.92f));
281 nir_ssa_def *curved =
282 nir_fsub(b, nir_fmul(b, nir_imm_float(b, 1.055f),
283 nir_fpow(b, c, nir_imm_float(b, 1.0 / 2.4))),
284 nir_imm_float(b, 0.055f));
285
286 return nir_fsat(b, nir_bcsel(b, nir_flt(b, c, nir_imm_float(b, 0.0031308f)),
287 linear, curved));
288 }
289
290 static inline nir_ssa_def *
291 nir_format_srgb_to_linear(nir_builder *b, nir_ssa_def *c)
292 {
293 nir_ssa_def *linear = nir_fdiv(b, c, nir_imm_float(b, 12.92f));
294 nir_ssa_def *curved =
295 nir_fpow(b, nir_fdiv(b, nir_fadd(b, c, nir_imm_float(b, 0.055f)),
296 nir_imm_float(b, 1.055f)),
297 nir_imm_float(b, 2.4f));
298
299 return nir_fsat(b, nir_bcsel(b, nir_fge(b, nir_imm_float(b, 0.04045f), c),
300 linear, curved));
301 }
302
303 /* Clamps a vector of uints so they don't extend beyond the given number of
304 * bits per channel.
305 */
306 static inline nir_ssa_def *
307 nir_format_clamp_uint(nir_builder *b, nir_ssa_def *f, const unsigned *bits)
308 {
309 if (bits[0] == 32)
310 return f;
311
312 nir_const_value max;
313 for (unsigned i = 0; i < f->num_components; i++) {
314 assert(bits[i] < 32);
315 max.u32[i] = (1 << bits[i]) - 1;
316 }
317 return nir_umin(b, f, nir_build_imm(b, f->num_components, 32, max));
318 }
319
320 /* Clamps a vector of sints so they don't extend beyond the given number of
321 * bits per channel.
322 */
323 static inline nir_ssa_def *
324 nir_format_clamp_sint(nir_builder *b, nir_ssa_def *f, const unsigned *bits)
325 {
326 if (bits[0] == 32)
327 return f;
328
329 nir_const_value min, max;
330 for (unsigned i = 0; i < f->num_components; i++) {
331 assert(bits[i] < 32);
332 max.i32[i] = (1 << (bits[i] - 1)) - 1;
333 min.i32[i] = -(1 << (bits[i] - 1));
334 }
335 f = nir_imin(b, f, nir_build_imm(b, f->num_components, 32, max));
336 f = nir_imax(b, f, nir_build_imm(b, f->num_components, 32, min));
337
338 return f;
339 }
340
341 static inline nir_ssa_def *
342 nir_format_unpack_11f11f10f(nir_builder *b, nir_ssa_def *packed)
343 {
344 nir_ssa_def *chans[3];
345 chans[0] = nir_mask_shift(b, packed, 0x000007ff, 4);
346 chans[1] = nir_mask_shift(b, packed, 0x003ff800, -7);
347 chans[2] = nir_mask_shift(b, packed, 0xffc00000, -17);
348
349 for (unsigned i = 0; i < 3; i++)
350 chans[i] = nir_unpack_half_2x16_split_x(b, chans[i]);
351
352 return nir_vec(b, chans, 3);
353 }
354
355 static inline nir_ssa_def *
356 nir_format_pack_11f11f10f(nir_builder *b, nir_ssa_def *color)
357 {
358 /* 10 and 11-bit floats are unsigned. Clamp to non-negative */
359 nir_ssa_def *clamped = nir_fmax(b, color, nir_imm_float(b, 0));
360
361 nir_ssa_def *undef = nir_ssa_undef(b, 1, color->bit_size);
362 nir_ssa_def *p1 = nir_pack_half_2x16_split(b, nir_channel(b, clamped, 0),
363 nir_channel(b, clamped, 1));
364 nir_ssa_def *p2 = nir_pack_half_2x16_split(b, nir_channel(b, clamped, 2),
365 undef);
366
367 /* A 10 or 11-bit float has the same exponent as a 16-bit float but with
368 * fewer mantissa bits and no sign bit. All we have to do is throw away
369 * the sign bit and the bottom mantissa bits and shift it into place.
370 */
371 nir_ssa_def *packed = nir_imm_int(b, 0);
372 packed = nir_mask_shift_or(b, packed, p1, 0x00007ff0, -4);
373 packed = nir_mask_shift_or(b, packed, p1, 0x7ff00000, -9);
374 packed = nir_mask_shift_or(b, packed, p2, 0x00007fe0, 17);
375
376 return packed;
377 }
378
379 static inline nir_ssa_def *
380 nir_format_pack_r9g9b9e5(nir_builder *b, nir_ssa_def *color)
381 {
382 /* See also float3_to_rgb9e5 */
383
384 /* First, we need to clamp it to range. */
385 nir_ssa_def *clamped = nir_fmin(b, color, nir_imm_float(b, MAX_RGB9E5));
386
387 /* Get rid of negatives and NaN */
388 clamped = nir_bcsel(b, nir_ult(b, nir_imm_int(b, 0x7f800000), color),
389 nir_imm_float(b, 0), clamped);
390
391 /* maxrgb.u = MAX3(rc.u, gc.u, bc.u); */
392 nir_ssa_def *maxu = nir_umax(b, nir_channel(b, clamped, 0),
393 nir_umax(b, nir_channel(b, clamped, 1),
394 nir_channel(b, clamped, 2)));
395
396 /* maxrgb.u += maxrgb.u & (1 << (23-9)); */
397 maxu = nir_iadd(b, maxu, nir_iand(b, maxu, nir_imm_int(b, 1 << 14)));
398
399 /* exp_shared = MAX2((maxrgb.u >> 23), -RGB9E5_EXP_BIAS - 1 + 127) +
400 * 1 + RGB9E5_EXP_BIAS - 127;
401 */
402 nir_ssa_def *exp_shared =
403 nir_iadd(b, nir_umax(b, nir_ushr(b, maxu, nir_imm_int(b, 23)),
404 nir_imm_int(b, -RGB9E5_EXP_BIAS - 1 + 127)),
405 nir_imm_int(b, 1 + RGB9E5_EXP_BIAS - 127));
406
407 /* revdenom_biasedexp = 127 - (exp_shared - RGB9E5_EXP_BIAS -
408 * RGB9E5_MANTISSA_BITS) + 1;
409 */
410 nir_ssa_def *revdenom_biasedexp =
411 nir_isub(b, nir_imm_int(b, 127 + RGB9E5_EXP_BIAS +
412 RGB9E5_MANTISSA_BITS + 1),
413 exp_shared);
414
415 /* revdenom.u = revdenom_biasedexp << 23; */
416 nir_ssa_def *revdenom =
417 nir_ishl(b, revdenom_biasedexp, nir_imm_int(b, 23));
418
419 /* rm = (int) (rc.f * revdenom.f);
420 * gm = (int) (gc.f * revdenom.f);
421 * bm = (int) (bc.f * revdenom.f);
422 */
423 nir_ssa_def *mantissa =
424 nir_f2i32(b, nir_fmul(b, clamped, revdenom));
425
426 /* rm = (rm & 1) + (rm >> 1);
427 * gm = (gm & 1) + (gm >> 1);
428 * bm = (bm & 1) + (bm >> 1);
429 */
430 mantissa = nir_iadd(b, nir_iand(b, mantissa, nir_imm_int(b, 1)),
431 nir_ushr(b, mantissa, nir_imm_int(b, 1)));
432
433 nir_ssa_def *packed = nir_channel(b, mantissa, 0);
434 packed = nir_mask_shift_or(b, packed, nir_channel(b, mantissa, 1), ~0, 9);
435 packed = nir_mask_shift_or(b, packed, nir_channel(b, mantissa, 2), ~0, 18);
436 packed = nir_mask_shift_or(b, packed, exp_shared, ~0, 27);
437
438 return packed;
439 }