2 * Copyright © 2015 Broadcom
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include "compiler/nir/nir_builder.h"
26 #include "util/u_format.h"
29 * Walks the NIR generated by TGSI-to-NIR to lower its io intrinsics into
30 * something amenable to the VC4 architecture.
32 * Currently, it split inputs, outputs, and uniforms into scalars, drops any
33 * non-position outputs in coordinate shaders, and fixes up the addressing on
34 * indirect uniform loads.
38 replace_intrinsic_with_vec4(nir_builder
*b
, nir_intrinsic_instr
*intr
,
42 /* Batch things back together into a vec4. This will get split by the
43 * later ALU scalarization pass.
45 nir_ssa_def
*vec
= nir_vec4(b
, comps
[0], comps
[1], comps
[2], comps
[3]);
47 /* Replace the old intrinsic with a reference to our reconstructed
50 nir_ssa_def_rewrite_uses(&intr
->dest
.ssa
, nir_src_for_ssa(vec
));
51 nir_instr_remove(&intr
->instr
);
55 vc4_nir_unpack_8i(nir_builder
*b
, nir_ssa_def
*src
, unsigned chan
)
57 return nir_ubitfield_extract(b
,
59 nir_imm_int(b
, 8 * chan
),
63 /** Returns the 16 bit field as a sign-extended 32-bit value. */
65 vc4_nir_unpack_16i(nir_builder
*b
, nir_ssa_def
*src
, unsigned chan
)
67 return nir_ibitfield_extract(b
,
69 nir_imm_int(b
, 16 * chan
),
73 /** Returns the 16 bit field as an unsigned 32 bit value. */
75 vc4_nir_unpack_16u(nir_builder
*b
, nir_ssa_def
*src
, unsigned chan
)
78 return nir_iand(b
, src
, nir_imm_int(b
, 0xffff));
80 return nir_ushr(b
, src
, nir_imm_int(b
, 16));
85 vc4_nir_unpack_8f(nir_builder
*b
, nir_ssa_def
*src
, unsigned chan
)
87 return nir_channel(b
, nir_unpack_unorm_4x8(b
, src
), chan
);
91 vc4_nir_get_vattr_channel_vpm(struct vc4_compile
*c
,
93 nir_ssa_def
**vpm_reads
,
95 const struct util_format_description
*desc
)
97 const struct util_format_channel_description
*chan
=
101 if (swiz
> PIPE_SWIZZLE_W
) {
102 return vc4_nir_get_swizzled_channel(b
, vpm_reads
, swiz
);
103 } else if (chan
->size
== 32 && chan
->type
== UTIL_FORMAT_TYPE_FLOAT
) {
104 return vc4_nir_get_swizzled_channel(b
, vpm_reads
, swiz
);
105 } else if (chan
->size
== 32 && chan
->type
== UTIL_FORMAT_TYPE_SIGNED
) {
106 if (chan
->normalized
) {
108 nir_i2f(b
, vpm_reads
[swiz
]),
112 return nir_i2f(b
, vpm_reads
[swiz
]);
114 } else if (chan
->size
== 8 &&
115 (chan
->type
== UTIL_FORMAT_TYPE_UNSIGNED
||
116 chan
->type
== UTIL_FORMAT_TYPE_SIGNED
)) {
117 nir_ssa_def
*vpm
= vpm_reads
[0];
118 if (chan
->type
== UTIL_FORMAT_TYPE_SIGNED
) {
119 temp
= nir_ixor(b
, vpm
, nir_imm_int(b
, 0x80808080));
120 if (chan
->normalized
) {
121 return nir_fsub(b
, nir_fmul(b
,
122 vc4_nir_unpack_8f(b
, temp
, swiz
),
123 nir_imm_float(b
, 2.0)),
124 nir_imm_float(b
, 1.0));
128 vc4_nir_unpack_8i(b
, temp
,
130 nir_imm_float(b
, -128.0));
133 if (chan
->normalized
) {
134 return vc4_nir_unpack_8f(b
, vpm
, swiz
);
136 return nir_i2f(b
, vc4_nir_unpack_8i(b
, vpm
, swiz
));
139 } else if (chan
->size
== 16 &&
140 (chan
->type
== UTIL_FORMAT_TYPE_UNSIGNED
||
141 chan
->type
== UTIL_FORMAT_TYPE_SIGNED
)) {
142 nir_ssa_def
*vpm
= vpm_reads
[swiz
/ 2];
144 /* Note that UNPACK_16F eats a half float, not ints, so we use
145 * UNPACK_16_I for all of these.
147 if (chan
->type
== UTIL_FORMAT_TYPE_SIGNED
) {
148 temp
= nir_i2f(b
, vc4_nir_unpack_16i(b
, vpm
, swiz
& 1));
149 if (chan
->normalized
) {
150 return nir_fmul(b
, temp
,
151 nir_imm_float(b
, 1/32768.0f
));
156 temp
= nir_i2f(b
, vc4_nir_unpack_16u(b
, vpm
, swiz
& 1));
157 if (chan
->normalized
) {
158 return nir_fmul(b
, temp
,
159 nir_imm_float(b
, 1 / 65535.0));
170 vc4_nir_lower_vertex_attr(struct vc4_compile
*c
, nir_builder
*b
,
171 nir_intrinsic_instr
*intr
)
173 b
->cursor
= nir_before_instr(&intr
->instr
);
175 int attr
= nir_intrinsic_base(intr
);
176 enum pipe_format format
= c
->vs_key
->attr_formats
[attr
];
177 uint32_t attr_size
= util_format_get_blocksize(format
);
179 /* All TGSI-to-NIR inputs are vec4. */
180 assert(intr
->num_components
== 4);
182 /* We only accept direct outputs and TGSI only ever gives them to us
183 * with an offset value of 0.
185 assert(nir_src_as_const_value(intr
->src
[0]) &&
186 nir_src_as_const_value(intr
->src
[0])->u32
[0] == 0);
188 /* Generate dword loads for the VPM values (Since these intrinsics may
189 * be reordered, the actual reads will be generated at the top of the
190 * shader by ntq_setup_inputs().
192 nir_ssa_def
*vpm_reads
[4];
193 for (int i
= 0; i
< align(attr_size
, 4) / 4; i
++) {
194 nir_intrinsic_instr
*intr_comp
=
195 nir_intrinsic_instr_create(c
->s
,
196 nir_intrinsic_load_input
);
197 intr_comp
->num_components
= 1;
198 nir_intrinsic_set_base(intr_comp
,
199 nir_intrinsic_base(intr
) * 4 + i
);
200 intr_comp
->src
[0] = nir_src_for_ssa(nir_imm_int(b
, 0));
201 nir_ssa_dest_init(&intr_comp
->instr
, &intr_comp
->dest
, 1, 32, NULL
);
202 nir_builder_instr_insert(b
, &intr_comp
->instr
);
204 vpm_reads
[i
] = &intr_comp
->dest
.ssa
;
207 bool format_warned
= false;
208 const struct util_format_description
*desc
=
209 util_format_description(format
);
211 nir_ssa_def
*dests
[4];
212 for (int i
= 0; i
< 4; i
++) {
213 uint8_t swiz
= desc
->swizzle
[i
];
214 dests
[i
] = vc4_nir_get_vattr_channel_vpm(c
, b
, vpm_reads
, swiz
,
218 if (!format_warned
) {
220 "vtx element %d unsupported type: %s\n",
221 attr
, util_format_name(format
));
222 format_warned
= true;
224 dests
[i
] = nir_imm_float(b
, 0.0);
228 replace_intrinsic_with_vec4(b
, intr
, dests
);
232 vc4_nir_lower_fs_input(struct vc4_compile
*c
, nir_builder
*b
,
233 nir_intrinsic_instr
*intr
)
235 b
->cursor
= nir_before_instr(&intr
->instr
);
237 if (nir_intrinsic_base(intr
) >= VC4_NIR_TLB_COLOR_READ_INPUT
&&
238 nir_intrinsic_base(intr
) < (VC4_NIR_TLB_COLOR_READ_INPUT
+
240 /* This doesn't need any lowering. */
244 nir_variable
*input_var
= NULL
;
245 nir_foreach_variable(var
, &c
->s
->inputs
) {
246 if (var
->data
.driver_location
== nir_intrinsic_base(intr
)) {
253 /* All TGSI-to-NIR inputs are vec4. */
254 assert(intr
->num_components
== 4);
256 /* We only accept direct inputs and TGSI only ever gives them to us
257 * with an offset value of 0.
259 assert(nir_src_as_const_value(intr
->src
[0]) &&
260 nir_src_as_const_value(intr
->src
[0])->u32
[0] == 0);
262 /* Generate scalar loads equivalent to the original VEC4. */
263 nir_ssa_def
*dests
[4];
264 for (unsigned i
= 0; i
< intr
->num_components
; i
++) {
265 nir_intrinsic_instr
*intr_comp
=
266 nir_intrinsic_instr_create(c
->s
, nir_intrinsic_load_input
);
267 intr_comp
->num_components
= 1;
268 nir_intrinsic_set_base(intr_comp
,
269 nir_intrinsic_base(intr
) * 4 + i
);
270 intr_comp
->src
[0] = nir_src_for_ssa(nir_imm_int(b
, 0));
272 nir_ssa_dest_init(&intr_comp
->instr
, &intr_comp
->dest
, 1, 32, NULL
);
273 nir_builder_instr_insert(b
, &intr_comp
->instr
);
275 dests
[i
] = &intr_comp
->dest
.ssa
;
278 if (input_var
->data
.location
>= VARYING_SLOT_VAR0
) {
279 if (c
->fs_key
->point_sprite_mask
&
280 (1 << (input_var
->data
.location
-
281 VARYING_SLOT_VAR0
))) {
282 if (!c
->fs_key
->is_points
) {
283 dests
[0] = nir_imm_float(b
, 0.0);
284 dests
[1] = nir_imm_float(b
, 0.0);
286 if (c
->fs_key
->point_coord_upper_left
) {
287 dests
[1] = nir_fsub(b
,
288 nir_imm_float(b
, 1.0),
291 dests
[2] = nir_imm_float(b
, 0.0);
292 dests
[3] = nir_imm_float(b
, 1.0);
296 replace_intrinsic_with_vec4(b
, intr
, dests
);
300 vc4_nir_lower_output(struct vc4_compile
*c
, nir_builder
*b
,
301 nir_intrinsic_instr
*intr
)
303 nir_variable
*output_var
= NULL
;
304 nir_foreach_variable(var
, &c
->s
->outputs
) {
305 if (var
->data
.driver_location
== nir_intrinsic_base(intr
)) {
312 if (c
->stage
== QSTAGE_COORD
&&
313 output_var
->data
.location
!= VARYING_SLOT_POS
&&
314 output_var
->data
.location
!= VARYING_SLOT_PSIZ
) {
315 nir_instr_remove(&intr
->instr
);
319 /* Color output is lowered by vc4_nir_lower_blend(). */
320 if (c
->stage
== QSTAGE_FRAG
&&
321 (output_var
->data
.location
== FRAG_RESULT_COLOR
||
322 output_var
->data
.location
== FRAG_RESULT_DATA0
||
323 output_var
->data
.location
== FRAG_RESULT_SAMPLE_MASK
)) {
324 nir_intrinsic_set_base(intr
, nir_intrinsic_base(intr
) * 4);
328 /* All TGSI-to-NIR outputs are VEC4. */
329 assert(intr
->num_components
== 4);
331 /* We only accept direct outputs and TGSI only ever gives them to us
332 * with an offset value of 0.
334 assert(nir_src_as_const_value(intr
->src
[1]) &&
335 nir_src_as_const_value(intr
->src
[1])->u32
[0] == 0);
337 b
->cursor
= nir_before_instr(&intr
->instr
);
339 for (unsigned i
= 0; i
< intr
->num_components
; i
++) {
340 nir_intrinsic_instr
*intr_comp
=
341 nir_intrinsic_instr_create(c
->s
, nir_intrinsic_store_output
);
342 intr_comp
->num_components
= 1;
343 nir_intrinsic_set_base(intr_comp
,
344 nir_intrinsic_base(intr
) * 4 + i
);
346 assert(intr
->src
[0].is_ssa
);
348 nir_src_for_ssa(nir_channel(b
, intr
->src
[0].ssa
, i
));
349 intr_comp
->src
[1] = nir_src_for_ssa(nir_imm_int(b
, 0));
350 nir_builder_instr_insert(b
, &intr_comp
->instr
);
353 nir_instr_remove(&intr
->instr
);
357 vc4_nir_lower_uniform(struct vc4_compile
*c
, nir_builder
*b
,
358 nir_intrinsic_instr
*intr
)
360 /* All TGSI-to-NIR uniform loads are vec4, but we need byte offsets
363 if (intr
->num_components
== 1)
365 assert(intr
->num_components
== 4);
367 b
->cursor
= nir_before_instr(&intr
->instr
);
369 /* Generate scalar loads equivalent to the original VEC4. */
370 nir_ssa_def
*dests
[4];
371 for (unsigned i
= 0; i
< intr
->num_components
; i
++) {
372 nir_intrinsic_instr
*intr_comp
=
373 nir_intrinsic_instr_create(c
->s
, intr
->intrinsic
);
374 intr_comp
->num_components
= 1;
375 nir_ssa_dest_init(&intr_comp
->instr
, &intr_comp
->dest
, 1, 32, NULL
);
377 /* Convert the uniform offset to bytes. If it happens to be a
378 * constant, constant-folding will clean up the shift for us.
380 nir_intrinsic_set_base(intr_comp
,
381 nir_intrinsic_base(intr
) * 16 + i
* 4);
384 nir_src_for_ssa(nir_ishl(b
, intr
->src
[0].ssa
,
387 dests
[i
] = &intr_comp
->dest
.ssa
;
389 nir_builder_instr_insert(b
, &intr_comp
->instr
);
392 replace_intrinsic_with_vec4(b
, intr
, dests
);
396 vc4_nir_lower_io_instr(struct vc4_compile
*c
, nir_builder
*b
,
397 struct nir_instr
*instr
)
399 if (instr
->type
!= nir_instr_type_intrinsic
)
401 nir_intrinsic_instr
*intr
= nir_instr_as_intrinsic(instr
);
403 switch (intr
->intrinsic
) {
404 case nir_intrinsic_load_input
:
405 if (c
->stage
== QSTAGE_FRAG
)
406 vc4_nir_lower_fs_input(c
, b
, intr
);
408 vc4_nir_lower_vertex_attr(c
, b
, intr
);
411 case nir_intrinsic_store_output
:
412 vc4_nir_lower_output(c
, b
, intr
);
415 case nir_intrinsic_load_uniform
:
416 vc4_nir_lower_uniform(c
, b
, intr
);
419 case nir_intrinsic_load_user_clip_plane
:
426 vc4_nir_lower_io_impl(struct vc4_compile
*c
, nir_function_impl
*impl
)
429 nir_builder_init(&b
, impl
);
431 nir_foreach_block(block
, impl
) {
432 nir_foreach_instr_safe(instr
, block
)
433 vc4_nir_lower_io_instr(c
, &b
, instr
);
436 nir_metadata_preserve(impl
, nir_metadata_block_index
|
437 nir_metadata_dominance
);
443 vc4_nir_lower_io(nir_shader
*s
, struct vc4_compile
*c
)
445 nir_foreach_function(function
, s
) {
447 vc4_nir_lower_io_impl(c
, function
->impl
);