2 * Copyright © 2017 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 #include "nir_builder.h"
26 #include "util/format_rgb9e5.h"
28 static inline nir_ssa_def
*
29 nir_shift(nir_builder
*b
, nir_ssa_def
*value
, int left_shift
)
32 return nir_ishl(b
, value
, nir_imm_int(b
, left_shift
));
33 else if (left_shift
< 0)
34 return nir_ushr(b
, value
, nir_imm_int(b
, -left_shift
));
39 static inline nir_ssa_def
*
40 nir_mask_shift(struct nir_builder
*b
, nir_ssa_def
*src
,
41 uint32_t mask
, int left_shift
)
43 return nir_shift(b
, nir_iand(b
, src
, nir_imm_int(b
, mask
)), left_shift
);
46 static inline nir_ssa_def
*
47 nir_mask_shift_or(struct nir_builder
*b
, nir_ssa_def
*dst
, nir_ssa_def
*src
,
48 uint32_t src_mask
, int src_left_shift
)
50 return nir_ior(b
, nir_mask_shift(b
, src
, src_mask
, src_left_shift
), dst
);
53 static inline nir_ssa_def
*
54 nir_format_mask_uvec(nir_builder
*b
, nir_ssa_def
*src
, const unsigned *bits
)
57 for (unsigned i
= 0; i
< src
->num_components
; i
++) {
59 mask
.u32
[i
] = (1u << bits
[i
]) - 1;
61 return nir_iand(b
, src
, nir_build_imm(b
, src
->num_components
, 32, mask
));
64 static inline nir_ssa_def
*
65 nir_format_sign_extend_ivec(nir_builder
*b
, nir_ssa_def
*src
,
68 assert(src
->num_components
<= 4);
69 nir_ssa_def
*comps
[4];
70 for (unsigned i
= 0; i
< src
->num_components
; i
++) {
71 nir_ssa_def
*shift
= nir_imm_int(b
, src
->bit_size
- bits
[i
]);
72 comps
[i
] = nir_ishr(b
, nir_ishl(b
, nir_channel(b
, src
, i
), shift
), shift
);
74 return nir_vec(b
, comps
, src
->num_components
);
78 static inline nir_ssa_def
*
79 nir_format_unpack_int(nir_builder
*b
, nir_ssa_def
*packed
,
80 const unsigned *bits
, unsigned num_components
,
83 assert(num_components
>= 1 && num_components
<= 4);
84 const unsigned bit_size
= packed
->bit_size
;
85 nir_ssa_def
*comps
[4];
87 if (bits
[0] >= bit_size
) {
88 assert(bits
[0] == bit_size
);
89 assert(num_components
== 1);
93 unsigned next_chan
= 0;
95 for (unsigned i
= 0; i
< num_components
; i
++) {
96 assert(bits
[i
] < bit_size
);
97 assert(offset
+ bits
[i
] <= bit_size
);
98 nir_ssa_def
*chan
= nir_channel(b
, packed
, next_chan
);
99 nir_ssa_def
*lshift
= nir_imm_int(b
, bit_size
- (offset
+ bits
[i
]));
100 nir_ssa_def
*rshift
= nir_imm_int(b
, bit_size
- bits
[i
]);
102 comps
[i
] = nir_ishr(b
, nir_ishl(b
, chan
, lshift
), rshift
);
104 comps
[i
] = nir_ushr(b
, nir_ishl(b
, chan
, lshift
), rshift
);
106 if (offset
>= bit_size
) {
112 return nir_vec(b
, comps
, num_components
);
115 static inline nir_ssa_def
*
116 nir_format_unpack_uint(nir_builder
*b
, nir_ssa_def
*packed
,
117 const unsigned *bits
, unsigned num_components
)
119 return nir_format_unpack_int(b
, packed
, bits
, num_components
, false);
122 static inline nir_ssa_def
*
123 nir_format_unpack_sint(nir_builder
*b
, nir_ssa_def
*packed
,
124 const unsigned *bits
, unsigned num_components
)
126 return nir_format_unpack_int(b
, packed
, bits
, num_components
, true);
129 static inline nir_ssa_def
*
130 nir_format_pack_uint_unmasked(nir_builder
*b
, nir_ssa_def
*color
,
131 const unsigned *bits
, unsigned num_components
)
133 assert(num_components
>= 1 && num_components
<= 4);
134 nir_ssa_def
*packed
= nir_imm_int(b
, 0);
136 for (unsigned i
= 0; i
< num_components
; i
++) {
137 packed
= nir_ior(b
, packed
, nir_shift(b
, nir_channel(b
, color
, i
),
141 assert(offset
<= packed
->bit_size
);
146 static inline nir_ssa_def
*
147 nir_format_pack_uint(nir_builder
*b
, nir_ssa_def
*color
,
148 const unsigned *bits
, unsigned num_components
)
150 return nir_format_pack_uint_unmasked(b
, nir_format_mask_uvec(b
, color
, bits
),
151 bits
, num_components
);
154 static inline nir_ssa_def
*
155 nir_format_bitcast_uvec_unmasked(nir_builder
*b
, nir_ssa_def
*src
,
156 unsigned src_bits
, unsigned dst_bits
)
158 assert(src
->bit_size
>= src_bits
&& src
->bit_size
>= dst_bits
);
159 assert(src_bits
== 8 || src_bits
== 16 || src_bits
== 32);
160 assert(dst_bits
== 8 || dst_bits
== 16 || dst_bits
== 32);
162 if (src_bits
== dst_bits
)
165 const unsigned dst_components
=
166 DIV_ROUND_UP(src
->num_components
* src_bits
, dst_bits
);
167 assert(dst_components
<= 4);
169 nir_ssa_def
*dst_chan
[4] = {0};
170 if (dst_bits
> src_bits
) {
172 unsigned dst_idx
= 0;
173 for (unsigned i
= 0; i
< src
->num_components
; i
++) {
174 nir_ssa_def
*shifted
= nir_ishl(b
, nir_channel(b
, src
, i
),
175 nir_imm_int(b
, shift
));
177 dst_chan
[dst_idx
] = shifted
;
179 dst_chan
[dst_idx
] = nir_ior(b
, dst_chan
[dst_idx
], shifted
);
183 if (shift
>= dst_bits
) {
189 nir_ssa_def
*mask
= nir_imm_int(b
, ~0u >> (32 - dst_bits
));
191 unsigned src_idx
= 0;
193 for (unsigned i
= 0; i
< dst_components
; i
++) {
194 dst_chan
[i
] = nir_iand(b
, nir_ushr(b
, nir_channel(b
, src
, src_idx
),
195 nir_imm_int(b
, shift
)),
198 if (shift
>= src_bits
) {
205 return nir_vec(b
, dst_chan
, dst_components
);
208 static inline nir_ssa_def
*
209 _nir_format_norm_factor(nir_builder
*b
, const unsigned *bits
,
210 unsigned num_components
,
213 nir_const_value factor
;
214 for (unsigned i
= 0; i
< num_components
; i
++) {
215 assert(bits
[i
] < 32);
216 factor
.f32
[i
] = (1ul << (bits
[i
] - is_signed
)) - 1;
218 return nir_build_imm(b
, num_components
, 32, factor
);
221 static inline nir_ssa_def
*
222 nir_format_unorm_to_float(nir_builder
*b
, nir_ssa_def
*u
, const unsigned *bits
)
224 nir_ssa_def
*factor
=
225 _nir_format_norm_factor(b
, bits
, u
->num_components
, false);
227 return nir_fdiv(b
, nir_u2f32(b
, u
), factor
);
230 static inline nir_ssa_def
*
231 nir_format_snorm_to_float(nir_builder
*b
, nir_ssa_def
*s
, const unsigned *bits
)
233 nir_ssa_def
*factor
=
234 _nir_format_norm_factor(b
, bits
, s
->num_components
, true);
236 return nir_fmax(b
, nir_fdiv(b
, nir_i2f32(b
, s
), factor
),
237 nir_imm_float(b
, -1.0f
));
240 static inline nir_ssa_def
*
241 nir_format_float_to_unorm(nir_builder
*b
, nir_ssa_def
*f
, const unsigned *bits
)
243 nir_ssa_def
*factor
=
244 _nir_format_norm_factor(b
, bits
, f
->num_components
, false);
246 /* Clamp to the range [0, 1] */
249 return nir_f2u32(b
, nir_fround_even(b
, nir_fmul(b
, f
, factor
)));
252 static inline nir_ssa_def
*
253 nir_format_float_to_snorm(nir_builder
*b
, nir_ssa_def
*f
, const unsigned *bits
)
255 nir_ssa_def
*factor
=
256 _nir_format_norm_factor(b
, bits
, f
->num_components
, true);
258 /* Clamp to the range [-1, 1] */
259 f
= nir_fmin(b
, nir_fmax(b
, f
, nir_imm_float(b
, -1)), nir_imm_float(b
, 1));
261 return nir_f2i32(b
, nir_fround_even(b
, nir_fmul(b
, f
, factor
)));
264 /* Converts a vector of floats to a vector of half-floats packed in the low 16
267 static inline nir_ssa_def
*
268 nir_format_float_to_half(nir_builder
*b
, nir_ssa_def
*f
)
270 nir_ssa_def
*zero
= nir_imm_float(b
, 0);
271 nir_ssa_def
*f16comps
[4];
272 for (unsigned i
= 0; i
< f
->num_components
; i
++)
273 f16comps
[i
] = nir_pack_half_2x16_split(b
, nir_channel(b
, f
, i
), zero
);
274 return nir_vec(b
, f16comps
, f
->num_components
);
277 static inline nir_ssa_def
*
278 nir_format_linear_to_srgb(nir_builder
*b
, nir_ssa_def
*c
)
280 nir_ssa_def
*linear
= nir_fmul(b
, c
, nir_imm_float(b
, 12.92f
));
281 nir_ssa_def
*curved
=
282 nir_fsub(b
, nir_fmul(b
, nir_imm_float(b
, 1.055f
),
283 nir_fpow(b
, c
, nir_imm_float(b
, 1.0 / 2.4))),
284 nir_imm_float(b
, 0.055f
));
286 return nir_fsat(b
, nir_bcsel(b
, nir_flt(b
, c
, nir_imm_float(b
, 0.0031308f
)),
290 static inline nir_ssa_def
*
291 nir_format_srgb_to_linear(nir_builder
*b
, nir_ssa_def
*c
)
293 nir_ssa_def
*linear
= nir_fdiv(b
, c
, nir_imm_float(b
, 12.92f
));
294 nir_ssa_def
*curved
=
295 nir_fpow(b
, nir_fdiv(b
, nir_fadd(b
, c
, nir_imm_float(b
, 0.055f
)),
296 nir_imm_float(b
, 1.055f
)),
297 nir_imm_float(b
, 2.4f
));
299 return nir_fsat(b
, nir_bcsel(b
, nir_fge(b
, nir_imm_float(b
, 0.04045f
), c
),
303 /* Clamps a vector of uints so they don't extend beyond the given number of
306 static inline nir_ssa_def
*
307 nir_format_clamp_uint(nir_builder
*b
, nir_ssa_def
*f
, const unsigned *bits
)
313 for (unsigned i
= 0; i
< f
->num_components
; i
++) {
314 assert(bits
[i
] < 32);
315 max
.u32
[i
] = (1 << bits
[i
]) - 1;
317 return nir_umin(b
, f
, nir_build_imm(b
, f
->num_components
, 32, max
));
320 /* Clamps a vector of sints so they don't extend beyond the given number of
323 static inline nir_ssa_def
*
324 nir_format_clamp_sint(nir_builder
*b
, nir_ssa_def
*f
, const unsigned *bits
)
329 nir_const_value min
, max
;
330 for (unsigned i
= 0; i
< f
->num_components
; i
++) {
331 assert(bits
[i
] < 32);
332 max
.i32
[i
] = (1 << (bits
[i
] - 1)) - 1;
333 min
.i32
[i
] = -(1 << (bits
[i
] - 1));
335 f
= nir_imin(b
, f
, nir_build_imm(b
, f
->num_components
, 32, max
));
336 f
= nir_imax(b
, f
, nir_build_imm(b
, f
->num_components
, 32, min
));
341 static inline nir_ssa_def
*
342 nir_format_unpack_11f11f10f(nir_builder
*b
, nir_ssa_def
*packed
)
344 nir_ssa_def
*chans
[3];
345 chans
[0] = nir_mask_shift(b
, packed
, 0x000007ff, 4);
346 chans
[1] = nir_mask_shift(b
, packed
, 0x003ff800, -7);
347 chans
[2] = nir_mask_shift(b
, packed
, 0xffc00000, -17);
349 for (unsigned i
= 0; i
< 3; i
++)
350 chans
[i
] = nir_unpack_half_2x16_split_x(b
, chans
[i
]);
352 return nir_vec(b
, chans
, 3);
355 static inline nir_ssa_def
*
356 nir_format_pack_11f11f10f(nir_builder
*b
, nir_ssa_def
*color
)
358 /* 10 and 11-bit floats are unsigned. Clamp to non-negative */
359 nir_ssa_def
*clamped
= nir_fmax(b
, color
, nir_imm_float(b
, 0));
361 nir_ssa_def
*undef
= nir_ssa_undef(b
, 1, color
->bit_size
);
362 nir_ssa_def
*p1
= nir_pack_half_2x16_split(b
, nir_channel(b
, clamped
, 0),
363 nir_channel(b
, clamped
, 1));
364 nir_ssa_def
*p2
= nir_pack_half_2x16_split(b
, nir_channel(b
, clamped
, 2),
367 /* A 10 or 11-bit float has the same exponent as a 16-bit float but with
368 * fewer mantissa bits and no sign bit. All we have to do is throw away
369 * the sign bit and the bottom mantissa bits and shift it into place.
371 nir_ssa_def
*packed
= nir_imm_int(b
, 0);
372 packed
= nir_mask_shift_or(b
, packed
, p1
, 0x00007ff0, -4);
373 packed
= nir_mask_shift_or(b
, packed
, p1
, 0x7ff00000, -9);
374 packed
= nir_mask_shift_or(b
, packed
, p2
, 0x00007fe0, 17);
379 static inline nir_ssa_def
*
380 nir_format_pack_r9g9b9e5(nir_builder
*b
, nir_ssa_def
*color
)
382 /* See also float3_to_rgb9e5 */
384 /* First, we need to clamp it to range. */
385 nir_ssa_def
*clamped
= nir_fmin(b
, color
, nir_imm_float(b
, MAX_RGB9E5
));
387 /* Get rid of negatives and NaN */
388 clamped
= nir_bcsel(b
, nir_ult(b
, nir_imm_int(b
, 0x7f800000), color
),
389 nir_imm_float(b
, 0), clamped
);
391 /* maxrgb.u = MAX3(rc.u, gc.u, bc.u); */
392 nir_ssa_def
*maxu
= nir_umax(b
, nir_channel(b
, clamped
, 0),
393 nir_umax(b
, nir_channel(b
, clamped
, 1),
394 nir_channel(b
, clamped
, 2)));
396 /* maxrgb.u += maxrgb.u & (1 << (23-9)); */
397 maxu
= nir_iadd(b
, maxu
, nir_iand(b
, maxu
, nir_imm_int(b
, 1 << 14)));
399 /* exp_shared = MAX2((maxrgb.u >> 23), -RGB9E5_EXP_BIAS - 1 + 127) +
400 * 1 + RGB9E5_EXP_BIAS - 127;
402 nir_ssa_def
*exp_shared
=
403 nir_iadd(b
, nir_umax(b
, nir_ushr(b
, maxu
, nir_imm_int(b
, 23)),
404 nir_imm_int(b
, -RGB9E5_EXP_BIAS
- 1 + 127)),
405 nir_imm_int(b
, 1 + RGB9E5_EXP_BIAS
- 127));
407 /* revdenom_biasedexp = 127 - (exp_shared - RGB9E5_EXP_BIAS -
408 * RGB9E5_MANTISSA_BITS) + 1;
410 nir_ssa_def
*revdenom_biasedexp
=
411 nir_isub(b
, nir_imm_int(b
, 127 + RGB9E5_EXP_BIAS
+
412 RGB9E5_MANTISSA_BITS
+ 1),
415 /* revdenom.u = revdenom_biasedexp << 23; */
416 nir_ssa_def
*revdenom
=
417 nir_ishl(b
, revdenom_biasedexp
, nir_imm_int(b
, 23));
419 /* rm = (int) (rc.f * revdenom.f);
420 * gm = (int) (gc.f * revdenom.f);
421 * bm = (int) (bc.f * revdenom.f);
423 nir_ssa_def
*mantissa
=
424 nir_f2i32(b
, nir_fmul(b
, clamped
, revdenom
));
426 /* rm = (rm & 1) + (rm >> 1);
427 * gm = (gm & 1) + (gm >> 1);
428 * bm = (bm & 1) + (bm >> 1);
430 mantissa
= nir_iadd(b
, nir_iand(b
, mantissa
, nir_imm_int(b
, 1)),
431 nir_ushr(b
, mantissa
, nir_imm_int(b
, 1)));
433 nir_ssa_def
*packed
= nir_channel(b
, mantissa
, 0);
434 packed
= nir_mask_shift_or(b
, packed
, nir_channel(b
, mantissa
, 1), ~0, 9);
435 packed
= nir_mask_shift_or(b
, packed
, nir_channel(b
, mantissa
, 2), ~0, 18);
436 packed
= nir_mask_shift_or(b
, packed
, exp_shared
, ~0, 27);