2 * Copyright © 2015 Broadcom
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include "compiler/nir/nir_builder.h"
26 #include "util/format/u_format.h"
27 #include "util/u_helpers.h"
30 * Walks the NIR generated by TGSI-to-NIR or GLSL-to-NIR to lower its io
31 * intrinsics into something amenable to the VC4 architecture.
33 * Currently, it splits VS inputs and uniforms into scalars, drops any
34 * non-position outputs in coordinate shaders, and fixes up the addressing on
35 * indirect uniform loads. FS input and VS output scalarization is handled by
36 * nir_lower_io_to_scalar().
40 replace_intrinsic_with_vec(nir_builder
*b
, nir_intrinsic_instr
*intr
,
44 /* Batch things back together into a vector. This will get split by
45 * the later ALU scalarization pass.
47 nir_ssa_def
*vec
= nir_vec(b
, comps
, intr
->num_components
);
49 /* Replace the old intrinsic with a reference to our reconstructed
52 nir_ssa_def_rewrite_uses(&intr
->dest
.ssa
, nir_src_for_ssa(vec
));
53 nir_instr_remove(&intr
->instr
);
57 vc4_nir_unpack_8i(nir_builder
*b
, nir_ssa_def
*src
, unsigned chan
)
59 return nir_ubitfield_extract(b
,
61 nir_imm_int(b
, 8 * chan
),
65 /** Returns the 16 bit field as a sign-extended 32-bit value. */
67 vc4_nir_unpack_16i(nir_builder
*b
, nir_ssa_def
*src
, unsigned chan
)
69 return nir_ibitfield_extract(b
,
71 nir_imm_int(b
, 16 * chan
),
75 /** Returns the 16 bit field as an unsigned 32 bit value. */
77 vc4_nir_unpack_16u(nir_builder
*b
, nir_ssa_def
*src
, unsigned chan
)
80 return nir_iand(b
, src
, nir_imm_int(b
, 0xffff));
82 return nir_ushr(b
, src
, nir_imm_int(b
, 16));
87 vc4_nir_unpack_8f(nir_builder
*b
, nir_ssa_def
*src
, unsigned chan
)
89 return nir_channel(b
, nir_unpack_unorm_4x8(b
, src
), chan
);
93 vc4_nir_get_vattr_channel_vpm(struct vc4_compile
*c
,
95 nir_ssa_def
**vpm_reads
,
97 const struct util_format_description
*desc
)
99 const struct util_format_channel_description
*chan
=
100 &desc
->channel
[swiz
];
103 if (swiz
> PIPE_SWIZZLE_W
) {
104 return vc4_nir_get_swizzled_channel(b
, vpm_reads
, swiz
);
105 } else if (chan
->size
== 32 && chan
->type
== UTIL_FORMAT_TYPE_FLOAT
) {
106 return vc4_nir_get_swizzled_channel(b
, vpm_reads
, swiz
);
107 } else if (chan
->size
== 32 && chan
->type
== UTIL_FORMAT_TYPE_SIGNED
) {
108 if (chan
->normalized
) {
110 nir_i2f32(b
, vpm_reads
[swiz
]),
114 return nir_i2f32(b
, vpm_reads
[swiz
]);
116 } else if (chan
->size
== 8 &&
117 (chan
->type
== UTIL_FORMAT_TYPE_UNSIGNED
||
118 chan
->type
== UTIL_FORMAT_TYPE_SIGNED
)) {
119 nir_ssa_def
*vpm
= vpm_reads
[0];
120 if (chan
->type
== UTIL_FORMAT_TYPE_SIGNED
) {
121 temp
= nir_ixor(b
, vpm
, nir_imm_int(b
, 0x80808080));
122 if (chan
->normalized
) {
123 return nir_fsub(b
, nir_fmul(b
,
124 vc4_nir_unpack_8f(b
, temp
, swiz
),
125 nir_imm_float(b
, 2.0)),
126 nir_imm_float(b
, 1.0));
130 vc4_nir_unpack_8i(b
, temp
,
132 nir_imm_float(b
, -128.0));
135 if (chan
->normalized
) {
136 return vc4_nir_unpack_8f(b
, vpm
, swiz
);
138 return nir_i2f32(b
, vc4_nir_unpack_8i(b
, vpm
, swiz
));
141 } else if (chan
->size
== 16 &&
142 (chan
->type
== UTIL_FORMAT_TYPE_UNSIGNED
||
143 chan
->type
== UTIL_FORMAT_TYPE_SIGNED
)) {
144 nir_ssa_def
*vpm
= vpm_reads
[swiz
/ 2];
146 /* Note that UNPACK_16F eats a half float, not ints, so we use
147 * UNPACK_16_I for all of these.
149 if (chan
->type
== UTIL_FORMAT_TYPE_SIGNED
) {
150 temp
= nir_i2f32(b
, vc4_nir_unpack_16i(b
, vpm
, swiz
& 1));
151 if (chan
->normalized
) {
152 return nir_fmul(b
, temp
,
153 nir_imm_float(b
, 1/32768.0f
));
158 temp
= nir_i2f32(b
, vc4_nir_unpack_16u(b
, vpm
, swiz
& 1));
159 if (chan
->normalized
) {
160 return nir_fmul(b
, temp
,
161 nir_imm_float(b
, 1 / 65535.0));
172 vc4_nir_lower_vertex_attr(struct vc4_compile
*c
, nir_builder
*b
,
173 nir_intrinsic_instr
*intr
)
175 b
->cursor
= nir_before_instr(&intr
->instr
);
177 int attr
= nir_intrinsic_base(intr
);
178 enum pipe_format format
= c
->vs_key
->attr_formats
[attr
];
179 uint32_t attr_size
= util_format_get_blocksize(format
);
181 /* We only accept direct outputs and TGSI only ever gives them to us
182 * with an offset value of 0.
184 assert(nir_src_as_uint(intr
->src
[0]) == 0);
186 /* Generate dword loads for the VPM values (Since these intrinsics may
187 * be reordered, the actual reads will be generated at the top of the
188 * shader by ntq_setup_inputs().
190 nir_ssa_def
*vpm_reads
[4];
191 for (int i
= 0; i
< align(attr_size
, 4) / 4; i
++) {
192 nir_intrinsic_instr
*intr_comp
=
193 nir_intrinsic_instr_create(c
->s
,
194 nir_intrinsic_load_input
);
195 intr_comp
->num_components
= 1;
196 nir_intrinsic_set_base(intr_comp
, nir_intrinsic_base(intr
));
197 nir_intrinsic_set_component(intr_comp
, i
);
198 intr_comp
->src
[0] = nir_src_for_ssa(nir_imm_int(b
, 0));
199 nir_ssa_dest_init(&intr_comp
->instr
, &intr_comp
->dest
, 1, 32, NULL
);
200 nir_builder_instr_insert(b
, &intr_comp
->instr
);
202 vpm_reads
[i
] = &intr_comp
->dest
.ssa
;
205 bool format_warned
= false;
206 const struct util_format_description
*desc
=
207 util_format_description(format
);
209 nir_ssa_def
*dests
[4];
210 for (int i
= 0; i
< intr
->num_components
; i
++) {
211 uint8_t swiz
= desc
->swizzle
[i
];
212 dests
[i
] = vc4_nir_get_vattr_channel_vpm(c
, b
, vpm_reads
, swiz
,
216 if (!format_warned
) {
218 "vtx element %d unsupported type: %s\n",
219 attr
, util_format_name(format
));
220 format_warned
= true;
222 dests
[i
] = nir_imm_float(b
, 0.0);
226 replace_intrinsic_with_vec(b
, intr
, dests
);
230 vc4_nir_lower_fs_input(struct vc4_compile
*c
, nir_builder
*b
,
231 nir_intrinsic_instr
*intr
)
233 b
->cursor
= nir_after_instr(&intr
->instr
);
235 if (nir_intrinsic_base(intr
) >= VC4_NIR_TLB_COLOR_READ_INPUT
&&
236 nir_intrinsic_base(intr
) < (VC4_NIR_TLB_COLOR_READ_INPUT
+
238 /* This doesn't need any lowering. */
242 nir_variable
*input_var
=
243 nir_find_variable_with_driver_location(c
->s
, nir_var_shader_in
,
244 nir_intrinsic_base(intr
));
247 int comp
= nir_intrinsic_component(intr
);
249 /* Lower away point coordinates, and fix up PNTC. */
250 if (util_varying_is_point_coord(input_var
->data
.location
,
251 c
->fs_key
->point_sprite_mask
)) {
252 assert(intr
->num_components
== 1);
254 nir_ssa_def
*result
= &intr
->dest
.ssa
;
259 /* If we're not rendering points, we need to set a
260 * defined value for the input that would come from
263 if (!c
->fs_key
->is_points
)
264 result
= nir_imm_float(b
, 0.0);
267 result
= nir_imm_float(b
, 0.0);
270 result
= nir_imm_float(b
, 1.0);
274 if (c
->fs_key
->point_coord_upper_left
&& comp
== 1)
275 result
= nir_fsub(b
, nir_imm_float(b
, 1.0), result
);
277 if (result
!= &intr
->dest
.ssa
) {
278 nir_ssa_def_rewrite_uses_after(&intr
->dest
.ssa
,
279 nir_src_for_ssa(result
),
280 result
->parent_instr
);
286 vc4_nir_lower_output(struct vc4_compile
*c
, nir_builder
*b
,
287 nir_intrinsic_instr
*intr
)
289 nir_variable
*output_var
=
290 nir_find_variable_with_driver_location(c
->s
, nir_var_shader_out
,
291 nir_intrinsic_base(intr
));
294 if (c
->stage
== QSTAGE_COORD
&&
295 output_var
->data
.location
!= VARYING_SLOT_POS
&&
296 output_var
->data
.location
!= VARYING_SLOT_PSIZ
) {
297 nir_instr_remove(&intr
->instr
);
303 vc4_nir_lower_uniform(struct vc4_compile
*c
, nir_builder
*b
,
304 nir_intrinsic_instr
*intr
)
306 b
->cursor
= nir_before_instr(&intr
->instr
);
308 /* Generate scalar loads equivalent to the original vector. */
309 nir_ssa_def
*dests
[4];
310 for (unsigned i
= 0; i
< intr
->num_components
; i
++) {
311 nir_intrinsic_instr
*intr_comp
=
312 nir_intrinsic_instr_create(c
->s
, intr
->intrinsic
);
313 intr_comp
->num_components
= 1;
314 nir_ssa_dest_init(&intr_comp
->instr
, &intr_comp
->dest
, 1,
315 intr
->dest
.ssa
.bit_size
, NULL
);
317 /* Convert the uniform offset to bytes. If it happens
318 * to be a constant, constant-folding will clean up
321 nir_intrinsic_set_base(intr_comp
,
322 nir_intrinsic_base(intr
) * 16 +
324 nir_intrinsic_set_range(intr_comp
,
325 nir_intrinsic_range(intr
) * 16 - i
* 4);
328 nir_src_for_ssa(nir_ishl(b
, intr
->src
[0].ssa
,
331 dests
[i
] = &intr_comp
->dest
.ssa
;
333 nir_builder_instr_insert(b
, &intr_comp
->instr
);
336 replace_intrinsic_with_vec(b
, intr
, dests
);
340 vc4_nir_lower_io_instr(struct vc4_compile
*c
, nir_builder
*b
,
341 struct nir_instr
*instr
)
343 if (instr
->type
!= nir_instr_type_intrinsic
)
345 nir_intrinsic_instr
*intr
= nir_instr_as_intrinsic(instr
);
347 switch (intr
->intrinsic
) {
348 case nir_intrinsic_load_input
:
349 if (c
->stage
== QSTAGE_FRAG
)
350 vc4_nir_lower_fs_input(c
, b
, intr
);
352 vc4_nir_lower_vertex_attr(c
, b
, intr
);
355 case nir_intrinsic_store_output
:
356 vc4_nir_lower_output(c
, b
, intr
);
359 case nir_intrinsic_load_uniform
:
360 vc4_nir_lower_uniform(c
, b
, intr
);
363 case nir_intrinsic_load_user_clip_plane
:
370 vc4_nir_lower_io_impl(struct vc4_compile
*c
, nir_function_impl
*impl
)
373 nir_builder_init(&b
, impl
);
375 nir_foreach_block(block
, impl
) {
376 nir_foreach_instr_safe(instr
, block
)
377 vc4_nir_lower_io_instr(c
, &b
, instr
);
380 nir_metadata_preserve(impl
, nir_metadata_block_index
|
381 nir_metadata_dominance
);
387 vc4_nir_lower_io(nir_shader
*s
, struct vc4_compile
*c
)
389 nir_foreach_function(function
, s
) {
391 vc4_nir_lower_io_impl(c
, function
->impl
);