2 * Copyright © 2014 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Connor Abbott (cwabbott0@gmail.com)
29 #include "nir_builder.h"
32 sanitize_32bit_sysval(nir_builder
*b
, nir_intrinsic_instr
*intrin
)
34 assert(intrin
->dest
.is_ssa
);
35 const unsigned bit_size
= intrin
->dest
.ssa
.bit_size
;
39 intrin
->dest
.ssa
.bit_size
= 32;
40 return nir_u2u(b
, &intrin
->dest
.ssa
, bit_size
);
44 build_global_group_size(nir_builder
*b
, unsigned bit_size
)
46 nir_ssa_def
*group_size
= nir_load_local_group_size(b
);
47 nir_ssa_def
*num_work_groups
= nir_load_num_work_groups(b
, bit_size
);
48 return nir_imul(b
, nir_u2u(b
, group_size
, bit_size
),
53 lower_system_value_filter(const nir_instr
*instr
, const void *_state
)
55 return instr
->type
== nir_instr_type_intrinsic
;
59 lower_system_value_instr(nir_builder
*b
, nir_instr
*instr
, void *_state
)
61 nir_intrinsic_instr
*intrin
= nir_instr_as_intrinsic(instr
);
63 /* All the intrinsics we care about are loads */
64 if (!nir_intrinsic_infos
[intrin
->intrinsic
].has_dest
)
67 assert(intrin
->dest
.is_ssa
);
68 const unsigned bit_size
= intrin
->dest
.ssa
.bit_size
;
70 switch (intrin
->intrinsic
) {
71 case nir_intrinsic_load_vertex_id
:
72 if (b
->shader
->options
->vertex_id_zero_based
) {
73 return nir_iadd(b
, nir_load_vertex_id_zero_base(b
),
74 nir_load_first_vertex(b
));
79 case nir_intrinsic_load_base_vertex
:
81 * From the OpenGL 4.6 (11.1.3.9 Shader Inputs) specification:
83 * "gl_BaseVertex holds the integer value passed to the baseVertex
84 * parameter to the command that resulted in the current shader
85 * invocation. In the case where the command has no baseVertex
86 * parameter, the value of gl_BaseVertex is zero."
88 if (b
->shader
->options
->lower_base_vertex
) {
89 return nir_iand(b
, nir_load_is_indexed_draw(b
),
90 nir_load_first_vertex(b
));
95 case nir_intrinsic_load_helper_invocation
:
96 if (b
->shader
->options
->lower_helper_invocation
) {
98 tmp
= nir_ishl(b
, nir_imm_int(b
, 1),
99 nir_load_sample_id_no_per_sample(b
));
100 tmp
= nir_iand(b
, nir_load_sample_mask_in(b
), tmp
);
101 return nir_inot(b
, nir_i2b(b
, tmp
));
106 case nir_intrinsic_load_local_invocation_id
:
107 case nir_intrinsic_load_local_invocation_index
:
108 case nir_intrinsic_load_local_group_size
:
109 return sanitize_32bit_sysval(b
, intrin
);
111 case nir_intrinsic_load_deref
: {
112 nir_deref_instr
*deref
= nir_src_as_deref(intrin
->src
[0]);
113 if (deref
->mode
!= nir_var_system_value
)
116 if (deref
->deref_type
!= nir_deref_type_var
) {
117 /* The only one system value that is an array and that is
118 * gl_SampleMask which is always an array of one element.
120 assert(deref
->deref_type
== nir_deref_type_array
);
121 deref
= nir_deref_instr_parent(deref
);
122 assert(deref
->deref_type
== nir_deref_type_var
);
123 assert(deref
->var
->data
.location
== SYSTEM_VALUE_SAMPLE_MASK_IN
);
125 nir_variable
*var
= deref
->var
;
127 switch (var
->data
.location
) {
128 case SYSTEM_VALUE_INSTANCE_INDEX
:
129 return nir_iadd(b
, nir_load_instance_id(b
),
130 nir_load_base_instance(b
));
132 case SYSTEM_VALUE_SUBGROUP_EQ_MASK
:
133 case SYSTEM_VALUE_SUBGROUP_GE_MASK
:
134 case SYSTEM_VALUE_SUBGROUP_GT_MASK
:
135 case SYSTEM_VALUE_SUBGROUP_LE_MASK
:
136 case SYSTEM_VALUE_SUBGROUP_LT_MASK
: {
137 nir_intrinsic_op op
=
138 nir_intrinsic_from_system_value(var
->data
.location
);
139 nir_intrinsic_instr
*load
= nir_intrinsic_instr_create(b
->shader
, op
);
140 nir_ssa_dest_init_for_type(&load
->instr
, &load
->dest
,
142 load
->num_components
= load
->dest
.ssa
.num_components
;
143 nir_builder_instr_insert(b
, &load
->instr
);
144 return &load
->dest
.ssa
;
147 case SYSTEM_VALUE_DEVICE_INDEX
:
148 if (b
->shader
->options
->lower_device_index_to_zero
)
149 return nir_imm_int(b
, 0);
152 case SYSTEM_VALUE_GLOBAL_GROUP_SIZE
:
153 return build_global_group_size(b
, bit_size
);
155 case SYSTEM_VALUE_BARYCENTRIC_LINEAR_PIXEL
:
156 return nir_load_barycentric(b
, nir_intrinsic_load_barycentric_pixel
,
157 INTERP_MODE_NOPERSPECTIVE
);
159 case SYSTEM_VALUE_BARYCENTRIC_LINEAR_CENTROID
:
160 return nir_load_barycentric(b
, nir_intrinsic_load_barycentric_centroid
,
161 INTERP_MODE_NOPERSPECTIVE
);
163 case SYSTEM_VALUE_BARYCENTRIC_LINEAR_SAMPLE
:
164 return nir_load_barycentric(b
, nir_intrinsic_load_barycentric_sample
,
165 INTERP_MODE_NOPERSPECTIVE
);
167 case SYSTEM_VALUE_BARYCENTRIC_PERSP_PIXEL
:
168 return nir_load_barycentric(b
, nir_intrinsic_load_barycentric_pixel
,
171 case SYSTEM_VALUE_BARYCENTRIC_PERSP_CENTROID
:
172 return nir_load_barycentric(b
, nir_intrinsic_load_barycentric_centroid
,
175 case SYSTEM_VALUE_BARYCENTRIC_PERSP_SAMPLE
:
176 return nir_load_barycentric(b
, nir_intrinsic_load_barycentric_sample
,
179 case SYSTEM_VALUE_BARYCENTRIC_PULL_MODEL
:
180 return nir_load_barycentric(b
, nir_intrinsic_load_barycentric_model
,
187 nir_intrinsic_op sysval_op
=
188 nir_intrinsic_from_system_value(var
->data
.location
);
189 return nir_load_system_value(b
, sysval_op
, 0,
190 intrin
->dest
.ssa
.num_components
,
191 intrin
->dest
.ssa
.bit_size
);
200 nir_lower_system_values(nir_shader
*shader
)
202 bool progress
= nir_shader_lower_instructions(shader
,
203 lower_system_value_filter
,
204 lower_system_value_instr
,
207 /* We're going to delete the variables so we need to clean up all those
208 * derefs we left lying around.
211 nir_remove_dead_derefs(shader
);
213 nir_foreach_variable_with_modes_safe(var
, shader
, nir_var_system_value
)
214 exec_node_remove(&var
->node
);
220 lower_compute_system_value_filter(const nir_instr
*instr
, const void *_options
)
222 return instr
->type
== nir_instr_type_intrinsic
;
226 lower_compute_system_value_instr(nir_builder
*b
,
227 nir_instr
*instr
, void *_options
)
229 nir_intrinsic_instr
*intrin
= nir_instr_as_intrinsic(instr
);
230 const nir_lower_compute_system_values_options
*options
= _options
;
232 /* All the intrinsics we care about are loads */
233 if (!nir_intrinsic_infos
[intrin
->intrinsic
].has_dest
)
236 assert(intrin
->dest
.is_ssa
);
237 const unsigned bit_size
= intrin
->dest
.ssa
.bit_size
;
239 switch (intrin
->intrinsic
) {
240 case nir_intrinsic_load_local_invocation_id
:
241 /* If lower_cs_local_id_from_index is true, then we derive the local
242 * index from the local id.
244 if (b
->shader
->options
->lower_cs_local_id_from_index
) {
245 /* We lower gl_LocalInvocationID from gl_LocalInvocationIndex based
248 * gl_LocalInvocationID.x =
249 * gl_LocalInvocationIndex % gl_WorkGroupSize.x;
250 * gl_LocalInvocationID.y =
251 * (gl_LocalInvocationIndex / gl_WorkGroupSize.x) %
252 * gl_WorkGroupSize.y;
253 * gl_LocalInvocationID.z =
254 * (gl_LocalInvocationIndex /
255 * (gl_WorkGroupSize.x * gl_WorkGroupSize.y)) %
256 * gl_WorkGroupSize.z;
258 * However, the final % gl_WorkGroupSize.z does nothing unless we
259 * accidentally end up with a gl_LocalInvocationIndex that is too
260 * large so it can safely be omitted.
262 nir_ssa_def
*local_index
= nir_load_local_invocation_index(b
);
263 nir_ssa_def
*local_size
= nir_load_local_group_size(b
);
265 /* Because no hardware supports a local workgroup size greater than
266 * about 1K, this calculation can be done in 32-bit and can save some
269 nir_ssa_def
*id_x
, *id_y
, *id_z
;
270 id_x
= nir_umod(b
, local_index
,
271 nir_channel(b
, local_size
, 0));
272 id_y
= nir_umod(b
, nir_udiv(b
, local_index
,
273 nir_channel(b
, local_size
, 0)),
274 nir_channel(b
, local_size
, 1));
275 id_z
= nir_udiv(b
, local_index
,
276 nir_imul(b
, nir_channel(b
, local_size
, 0),
277 nir_channel(b
, local_size
, 1)));
278 return nir_u2u(b
, nir_vec3(b
, id_x
, id_y
, id_z
), bit_size
);
283 case nir_intrinsic_load_local_invocation_index
:
284 /* If lower_cs_local_index_from_id is true, then we derive the local
285 * index from the local id.
287 if (b
->shader
->options
->lower_cs_local_index_from_id
) {
288 /* From the GLSL man page for gl_LocalInvocationIndex:
290 * "The value of gl_LocalInvocationIndex is equal to
291 * gl_LocalInvocationID.z * gl_WorkGroupSize.x *
292 * gl_WorkGroupSize.y + gl_LocalInvocationID.y *
293 * gl_WorkGroupSize.x + gl_LocalInvocationID.x"
295 nir_ssa_def
*local_id
= nir_load_local_invocation_id(b
);
297 nir_ssa_def
*size_x
=
298 nir_imm_int(b
, b
->shader
->info
.cs
.local_size
[0]);
299 nir_ssa_def
*size_y
=
300 nir_imm_int(b
, b
->shader
->info
.cs
.local_size
[1]);
302 /* Because no hardware supports a local workgroup size greater than
303 * about 1K, this calculation can be done in 32-bit and can save some
307 index
= nir_imul(b
, nir_channel(b
, local_id
, 2),
308 nir_imul(b
, size_x
, size_y
));
309 index
= nir_iadd(b
, index
,
310 nir_imul(b
, nir_channel(b
, local_id
, 1), size_x
));
311 index
= nir_iadd(b
, index
, nir_channel(b
, local_id
, 0));
312 return nir_u2u(b
, index
, bit_size
);
317 case nir_intrinsic_load_local_group_size
:
318 if (b
->shader
->info
.cs
.local_size_variable
) {
319 /* If the local work group size is variable it can't be lowered at
320 * this point. We do, however, have to make sure that the intrinsic
325 /* using a 32 bit constant is safe here as no device/driver needs more
326 * than 32 bits for the local size */
327 nir_const_value local_size_const
[3];
328 memset(local_size_const
, 0, sizeof(local_size_const
));
329 local_size_const
[0].u32
= b
->shader
->info
.cs
.local_size
[0];
330 local_size_const
[1].u32
= b
->shader
->info
.cs
.local_size
[1];
331 local_size_const
[2].u32
= b
->shader
->info
.cs
.local_size
[2];
332 return nir_u2u(b
, nir_build_imm(b
, 3, 32, local_size_const
), bit_size
);
335 case nir_intrinsic_load_global_invocation_id_zero_base
: {
336 if ((options
&& options
->has_base_work_group_id
) ||
337 !b
->shader
->options
->has_cs_global_id
) {
338 nir_ssa_def
*group_size
= nir_load_local_group_size(b
);
339 nir_ssa_def
*group_id
= nir_load_work_group_id(b
, bit_size
);
340 nir_ssa_def
*local_id
= nir_load_local_invocation_id(b
);
342 return nir_iadd(b
, nir_imul(b
, group_id
,
343 nir_u2u(b
, group_size
, bit_size
)),
344 nir_u2u(b
, local_id
, bit_size
));
350 case nir_intrinsic_load_global_invocation_id
: {
351 if (options
&& options
->has_base_global_invocation_id
)
352 return nir_iadd(b
, nir_load_global_invocation_id_zero_base(b
, bit_size
),
353 nir_load_base_global_invocation_id(b
, bit_size
));
354 else if (!b
->shader
->options
->has_cs_global_id
)
355 return nir_load_global_invocation_id_zero_base(b
, bit_size
);
360 case nir_intrinsic_load_global_invocation_index
: {
361 /* OpenCL's global_linear_id explicitly removes the global offset before computing this */
362 assert(b
->shader
->info
.stage
== MESA_SHADER_KERNEL
);
363 nir_ssa_def
*global_base_id
= nir_load_base_global_invocation_id(b
, bit_size
);
364 nir_ssa_def
*global_id
= nir_isub(b
, nir_load_global_invocation_id(b
, bit_size
), global_base_id
);
365 nir_ssa_def
*global_size
= build_global_group_size(b
, bit_size
);
367 /* index = id.x + ((id.y + (id.z * size.y)) * size.x) */
369 index
= nir_imul(b
, nir_channel(b
, global_id
, 2),
370 nir_channel(b
, global_size
, 1));
371 index
= nir_iadd(b
, nir_channel(b
, global_id
, 1), index
);
372 index
= nir_imul(b
, nir_channel(b
, global_size
, 0), index
);
373 index
= nir_iadd(b
, nir_channel(b
, global_id
, 0), index
);
377 case nir_intrinsic_load_work_group_id
: {
378 if (options
&& options
->has_base_work_group_id
)
379 return nir_iadd(b
, nir_u2u(b
, nir_load_work_group_id_zero_base(b
), bit_size
),
380 nir_load_base_work_group_id(b
, bit_size
));
391 nir_lower_compute_system_values(nir_shader
*shader
,
392 const nir_lower_compute_system_values_options
*options
)
394 if (shader
->info
.stage
!= MESA_SHADER_COMPUTE
&&
395 shader
->info
.stage
!= MESA_SHADER_KERNEL
)
398 return nir_shader_lower_instructions(shader
,
399 lower_compute_system_value_filter
,
400 lower_compute_system_value_instr
,