2 * Copyright © 2014 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Connor Abbott (cwabbott0@gmail.com)
29 #include "nir_builder.h"
32 sanitize_32bit_sysval(nir_builder
*b
, nir_intrinsic_instr
*intrin
)
34 assert(intrin
->dest
.is_ssa
);
35 const unsigned bit_size
= intrin
->dest
.ssa
.bit_size
;
39 intrin
->dest
.ssa
.bit_size
= 32;
40 return nir_u2u(b
, &intrin
->dest
.ssa
, bit_size
);
44 build_global_group_size(nir_builder
*b
, unsigned bit_size
)
46 nir_ssa_def
*group_size
= nir_load_local_group_size(b
);
47 nir_ssa_def
*num_work_groups
= nir_load_num_work_groups(b
);
48 return nir_imul(b
, nir_u2u(b
, group_size
, bit_size
),
49 nir_u2u(b
, num_work_groups
, bit_size
));
53 lower_system_value_filter(const nir_instr
*instr
, const void *_state
)
55 return instr
->type
== nir_instr_type_intrinsic
;
59 lower_system_value_instr(nir_builder
*b
, nir_instr
*instr
, void *_state
)
61 nir_intrinsic_instr
*intrin
= nir_instr_as_intrinsic(instr
);
63 /* All the intrinsics we care about are loads */
64 if (!nir_intrinsic_infos
[intrin
->intrinsic
].has_dest
)
67 assert(intrin
->dest
.is_ssa
);
68 const unsigned bit_size
= intrin
->dest
.ssa
.bit_size
;
70 switch (intrin
->intrinsic
) {
71 case nir_intrinsic_load_vertex_id
:
72 if (b
->shader
->options
->vertex_id_zero_based
) {
73 return nir_iadd(b
, nir_load_vertex_id_zero_base(b
),
74 nir_load_first_vertex(b
));
79 case nir_intrinsic_load_base_vertex
:
81 * From the OpenGL 4.6 (11.1.3.9 Shader Inputs) specification:
83 * "gl_BaseVertex holds the integer value passed to the baseVertex
84 * parameter to the command that resulted in the current shader
85 * invocation. In the case where the command has no baseVertex
86 * parameter, the value of gl_BaseVertex is zero."
88 if (b
->shader
->options
->lower_base_vertex
) {
89 return nir_iand(b
, nir_load_is_indexed_draw(b
),
90 nir_load_first_vertex(b
));
95 case nir_intrinsic_load_local_invocation_id
:
96 /* If lower_cs_local_id_from_index is true, then we derive the local
97 * index from the local id.
99 if (b
->shader
->options
->lower_cs_local_id_from_index
) {
100 /* We lower gl_LocalInvocationID from gl_LocalInvocationIndex based
103 * gl_LocalInvocationID.x =
104 * gl_LocalInvocationIndex % gl_WorkGroupSize.x;
105 * gl_LocalInvocationID.y =
106 * (gl_LocalInvocationIndex / gl_WorkGroupSize.x) %
107 * gl_WorkGroupSize.y;
108 * gl_LocalInvocationID.z =
109 * (gl_LocalInvocationIndex /
110 * (gl_WorkGroupSize.x * gl_WorkGroupSize.y)) %
111 * gl_WorkGroupSize.z;
113 * However, the final % gl_WorkGroupSize.z does nothing unless we
114 * accidentally end up with a gl_LocalInvocationIndex that is too
115 * large so it can safely be omitted.
117 nir_ssa_def
*local_index
= nir_load_local_invocation_index(b
);
118 nir_ssa_def
*local_size
= nir_load_local_group_size(b
);
120 /* Because no hardware supports a local workgroup size greater than
121 * about 1K, this calculation can be done in 32-bit and can save some
124 nir_ssa_def
*id_x
, *id_y
, *id_z
;
125 id_x
= nir_umod(b
, local_index
,
126 nir_channel(b
, local_size
, 0));
127 id_y
= nir_umod(b
, nir_udiv(b
, local_index
,
128 nir_channel(b
, local_size
, 0)),
129 nir_channel(b
, local_size
, 1));
130 id_z
= nir_udiv(b
, local_index
,
131 nir_imul(b
, nir_channel(b
, local_size
, 0),
132 nir_channel(b
, local_size
, 1)));
133 return nir_u2u(b
, nir_vec3(b
, id_x
, id_y
, id_z
), bit_size
);
135 return sanitize_32bit_sysval(b
, intrin
);
138 case nir_intrinsic_load_local_invocation_index
:
139 /* If lower_cs_local_index_from_id is true, then we derive the local
140 * index from the local id.
142 if (b
->shader
->options
->lower_cs_local_index_from_id
) {
143 /* From the GLSL man page for gl_LocalInvocationIndex:
145 * "The value of gl_LocalInvocationIndex is equal to
146 * gl_LocalInvocationID.z * gl_WorkGroupSize.x *
147 * gl_WorkGroupSize.y + gl_LocalInvocationID.y *
148 * gl_WorkGroupSize.x + gl_LocalInvocationID.x"
150 nir_ssa_def
*local_id
= nir_load_local_invocation_id(b
);
152 nir_ssa_def
*size_x
=
153 nir_imm_int(b
, b
->shader
->info
.cs
.local_size
[0]);
154 nir_ssa_def
*size_y
=
155 nir_imm_int(b
, b
->shader
->info
.cs
.local_size
[1]);
157 /* Because no hardware supports a local workgroup size greater than
158 * about 1K, this calculation can be done in 32-bit and can save some
162 index
= nir_imul(b
, nir_channel(b
, local_id
, 2),
163 nir_imul(b
, size_x
, size_y
));
164 index
= nir_iadd(b
, index
,
165 nir_imul(b
, nir_channel(b
, local_id
, 1), size_x
));
166 index
= nir_iadd(b
, index
, nir_channel(b
, local_id
, 0));
167 return nir_u2u(b
, index
, bit_size
);
169 return sanitize_32bit_sysval(b
, intrin
);
172 case nir_intrinsic_load_local_group_size
:
173 if (b
->shader
->info
.cs
.local_size_variable
) {
174 /* If the local work group size is variable it can't be lowered at
175 * this point. We do, however, have to make sure that the intrinsic
178 return sanitize_32bit_sysval(b
, intrin
);
180 /* using a 32 bit constant is safe here as no device/driver needs more
181 * than 32 bits for the local size */
182 nir_const_value local_size_const
[3];
183 memset(local_size_const
, 0, sizeof(local_size_const
));
184 local_size_const
[0].u32
= b
->shader
->info
.cs
.local_size
[0];
185 local_size_const
[1].u32
= b
->shader
->info
.cs
.local_size
[1];
186 local_size_const
[2].u32
= b
->shader
->info
.cs
.local_size
[2];
187 return nir_u2u(b
, nir_build_imm(b
, 3, 32, local_size_const
), bit_size
);
190 case nir_intrinsic_load_global_invocation_id
: {
191 nir_ssa_def
*group_size
= nir_load_local_group_size(b
);
192 nir_ssa_def
*group_id
= nir_load_work_group_id(b
);
193 nir_ssa_def
*local_id
= nir_load_local_invocation_id(b
);
195 return nir_iadd(b
, nir_imul(b
, nir_u2u(b
, group_id
, bit_size
),
196 nir_u2u(b
, group_size
, bit_size
)),
197 nir_u2u(b
, local_id
, bit_size
));
200 case nir_intrinsic_load_global_invocation_index
: {
201 nir_ssa_def
*global_id
= nir_load_global_invocation_id(b
, bit_size
);
202 nir_ssa_def
*global_size
= build_global_group_size(b
, bit_size
);
204 /* index = id.x + ((id.y + (id.z * size.y)) * size.x) */
206 index
= nir_imul(b
, nir_channel(b
, global_id
, 2),
207 nir_channel(b
, global_size
, 1));
208 index
= nir_iadd(b
, nir_channel(b
, global_id
, 1), index
);
209 index
= nir_imul(b
, nir_channel(b
, global_size
, 0), index
);
210 index
= nir_iadd(b
, nir_channel(b
, global_id
, 0), index
);
214 case nir_intrinsic_load_helper_invocation
:
215 if (b
->shader
->options
->lower_helper_invocation
) {
217 tmp
= nir_ishl(b
, nir_imm_int(b
, 1),
218 nir_load_sample_id_no_per_sample(b
));
219 tmp
= nir_iand(b
, nir_load_sample_mask_in(b
), tmp
);
220 return nir_inot(b
, nir_i2b(b
, tmp
));
225 case nir_intrinsic_load_deref
: {
226 nir_deref_instr
*deref
= nir_src_as_deref(intrin
->src
[0]);
227 if (deref
->mode
!= nir_var_system_value
)
230 if (deref
->deref_type
!= nir_deref_type_var
) {
231 /* The only one system value that is an array and that is
232 * gl_SampleMask which is always an array of one element.
234 assert(deref
->deref_type
== nir_deref_type_array
);
235 deref
= nir_deref_instr_parent(deref
);
236 assert(deref
->deref_type
== nir_deref_type_var
);
237 assert(deref
->var
->data
.location
== SYSTEM_VALUE_SAMPLE_MASK_IN
);
239 nir_variable
*var
= deref
->var
;
241 switch (var
->data
.location
) {
242 case SYSTEM_VALUE_INSTANCE_INDEX
:
243 return nir_iadd(b
, nir_load_instance_id(b
),
244 nir_load_base_instance(b
));
246 case SYSTEM_VALUE_SUBGROUP_EQ_MASK
:
247 case SYSTEM_VALUE_SUBGROUP_GE_MASK
:
248 case SYSTEM_VALUE_SUBGROUP_GT_MASK
:
249 case SYSTEM_VALUE_SUBGROUP_LE_MASK
:
250 case SYSTEM_VALUE_SUBGROUP_LT_MASK
: {
251 nir_intrinsic_op op
=
252 nir_intrinsic_from_system_value(var
->data
.location
);
253 nir_intrinsic_instr
*load
= nir_intrinsic_instr_create(b
->shader
, op
);
254 nir_ssa_dest_init_for_type(&load
->instr
, &load
->dest
,
256 load
->num_components
= load
->dest
.ssa
.num_components
;
257 nir_builder_instr_insert(b
, &load
->instr
);
258 return &load
->dest
.ssa
;
261 case SYSTEM_VALUE_DEVICE_INDEX
:
262 if (b
->shader
->options
->lower_device_index_to_zero
)
263 return nir_imm_int(b
, 0);
266 case SYSTEM_VALUE_GLOBAL_GROUP_SIZE
:
267 return build_global_group_size(b
, bit_size
);
273 nir_intrinsic_op sysval_op
=
274 nir_intrinsic_from_system_value(var
->data
.location
);
275 return nir_load_system_value(b
, sysval_op
, 0,
276 intrin
->dest
.ssa
.bit_size
);
285 nir_lower_system_values(nir_shader
*shader
)
287 bool progress
= nir_shader_lower_instructions(shader
,
288 lower_system_value_filter
,
289 lower_system_value_instr
,
292 /* We're going to delete the variables so we need to clean up all those
293 * derefs we left lying around.
296 nir_remove_dead_derefs(shader
);
298 exec_list_make_empty(&shader
->system_values
);