2 * Copyright © 2014 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Connor Abbott (cwabbott0@gmail.com)
29 #include "nir_builder.h"
32 build_local_group_size(nir_builder
*b
, unsigned bit_size
)
34 nir_ssa_def
*local_size
;
37 * If the local work group size is variable it can't be lowered at this
38 * point, but its intrinsic can still be used.
40 if (b
->shader
->info
.cs
.local_size_variable
) {
41 local_size
= nir_load_local_group_size(b
);
43 /* using a 32 bit constant is safe here as no device/driver needs more
44 * than 32 bits for the local size */
45 nir_const_value local_size_const
[3];
46 memset(local_size_const
, 0, sizeof(local_size_const
));
47 local_size_const
[0].u32
= b
->shader
->info
.cs
.local_size
[0];
48 local_size_const
[1].u32
= b
->shader
->info
.cs
.local_size
[1];
49 local_size_const
[2].u32
= b
->shader
->info
.cs
.local_size
[2];
50 local_size
= nir_build_imm(b
, 3, 32, local_size_const
);
53 return nir_u2u(b
, local_size
, bit_size
);
57 build_local_invocation_id(nir_builder
*b
, unsigned bit_size
)
59 /* If lower_cs_local_id_from_index is true, then we derive the local
60 * index from the local id.
62 if (b
->shader
->options
->lower_cs_local_id_from_index
) {
63 /* We lower gl_LocalInvocationID from gl_LocalInvocationIndex based
66 * gl_LocalInvocationID.x =
67 * gl_LocalInvocationIndex % gl_WorkGroupSize.x;
68 * gl_LocalInvocationID.y =
69 * (gl_LocalInvocationIndex / gl_WorkGroupSize.x) %
71 * gl_LocalInvocationID.z =
72 * (gl_LocalInvocationIndex /
73 * (gl_WorkGroupSize.x * gl_WorkGroupSize.y)) %
76 * However, the final % gl_WorkGroupSize.z does nothing unless we
77 * accidentally end up with a gl_LocalInvocationIndex that is too
78 * large so it can safely be omitted.
80 nir_ssa_def
*local_index
= nir_load_local_invocation_index(b
);
81 nir_ssa_def
*local_size
= build_local_group_size(b
, 32);
83 /* Because no hardware supports a local workgroup size greater than
84 * about 1K, this calculation can be done in 32-bit and can save some
87 nir_ssa_def
*id_x
, *id_y
, *id_z
;
88 id_x
= nir_umod(b
, local_index
,
89 nir_channel(b
, local_size
, 0));
90 id_y
= nir_umod(b
, nir_udiv(b
, local_index
,
91 nir_channel(b
, local_size
, 0)),
92 nir_channel(b
, local_size
, 1));
93 id_z
= nir_udiv(b
, local_index
,
94 nir_imul(b
, nir_channel(b
, local_size
, 0),
95 nir_channel(b
, local_size
, 1)));
96 return nir_u2u(b
, nir_vec3(b
, id_x
, id_y
, id_z
), bit_size
);
98 return nir_u2u(b
, nir_load_local_invocation_id(b
), bit_size
);
103 build_global_group_size(nir_builder
*b
, unsigned bit_size
)
105 nir_ssa_def
*group_size
= build_local_group_size(b
, bit_size
);
106 nir_ssa_def
*num_work_groups
= nir_u2u(b
, nir_load_num_work_groups(b
), bit_size
);
107 return nir_imul(b
, group_size
, num_work_groups
);
111 build_global_invocation_id(nir_builder
*b
, unsigned bit_size
)
113 /* From the GLSL man page for gl_GlobalInvocationID:
115 * "The value of gl_GlobalInvocationID is equal to
116 * gl_WorkGroupID * gl_WorkGroupSize + gl_LocalInvocationID"
118 nir_ssa_def
*group_size
= build_local_group_size(b
, bit_size
);
119 nir_ssa_def
*group_id
= nir_u2u(b
, nir_load_work_group_id(b
), bit_size
);
120 nir_ssa_def
*local_id
= build_local_invocation_id(b
, bit_size
);
122 return nir_iadd(b
, nir_imul(b
, group_id
, group_size
), local_id
);
126 lower_system_value_filter(const nir_instr
*instr
, const void *_state
)
128 return instr
->type
== nir_instr_type_intrinsic
;
132 lower_system_value_instr(nir_builder
*b
, nir_instr
*instr
, void *_state
)
134 nir_intrinsic_instr
*intrin
= nir_instr_as_intrinsic(instr
);
135 if (intrin
->intrinsic
== nir_intrinsic_load_deref
) {
136 nir_deref_instr
*deref
= nir_src_as_deref(intrin
->src
[0]);
137 if (deref
->mode
!= nir_var_system_value
)
140 if (deref
->deref_type
!= nir_deref_type_var
) {
141 /* The only one system value that is an array and that is
142 * gl_SampleMask which is always an array of one element.
144 assert(deref
->deref_type
== nir_deref_type_array
);
145 deref
= nir_deref_instr_parent(deref
);
146 assert(deref
->deref_type
== nir_deref_type_var
);
147 assert(deref
->var
->data
.location
== SYSTEM_VALUE_SAMPLE_MASK_IN
);
149 nir_variable
*var
= deref
->var
;
151 unsigned bit_size
= nir_dest_bit_size(intrin
->dest
);
152 switch (var
->data
.location
) {
153 case SYSTEM_VALUE_GLOBAL_INVOCATION_ID
:
154 return build_global_invocation_id(b
, bit_size
);
156 case SYSTEM_VALUE_GLOBAL_INVOCATION_INDEX
: {
157 nir_ssa_def
*global_id
= build_global_invocation_id(b
, bit_size
);
158 nir_ssa_def
*global_size
= build_global_group_size(b
, bit_size
);
160 /* index = id.x + ((id.y + (id.z * size.y)) * size.x) */
162 index
= nir_imul(b
, nir_channel(b
, global_id
, 2),
163 nir_channel(b
, global_size
, 1));
164 index
= nir_iadd(b
, nir_channel(b
, global_id
, 1), index
);
165 index
= nir_imul(b
, nir_channel(b
, global_size
, 0), index
);
166 index
= nir_iadd(b
, nir_channel(b
, global_id
, 0), index
);
170 case SYSTEM_VALUE_LOCAL_INVOCATION_INDEX
: {
171 /* If lower_cs_local_index_from_id is true, then we derive the local
172 * index from the local id.
174 if (!b
->shader
->options
->lower_cs_local_index_from_id
)
177 /* From the GLSL man page for gl_LocalInvocationIndex:
179 * "The value of gl_LocalInvocationIndex is equal to
180 * gl_LocalInvocationID.z * gl_WorkGroupSize.x *
181 * gl_WorkGroupSize.y + gl_LocalInvocationID.y *
182 * gl_WorkGroupSize.x + gl_LocalInvocationID.x"
184 nir_ssa_def
*local_id
= nir_load_local_invocation_id(b
);
186 nir_ssa_def
*size_x
=
187 nir_imm_int(b
, b
->shader
->info
.cs
.local_size
[0]);
188 nir_ssa_def
*size_y
=
189 nir_imm_int(b
, b
->shader
->info
.cs
.local_size
[1]);
191 /* Because no hardware supports a local workgroup size greater than
192 * about 1K, this calculation can be done in 32-bit and can save some
196 index
= nir_imul(b
, nir_channel(b
, local_id
, 2),
197 nir_imul(b
, size_x
, size_y
));
198 index
= nir_iadd(b
, index
,
199 nir_imul(b
, nir_channel(b
, local_id
, 1), size_x
));
200 index
= nir_iadd(b
, index
, nir_channel(b
, local_id
, 0));
201 index
= nir_u2u(b
, index
, bit_size
);
205 case SYSTEM_VALUE_LOCAL_INVOCATION_ID
:
206 return build_local_invocation_id(b
, bit_size
);
208 case SYSTEM_VALUE_LOCAL_GROUP_SIZE
:
209 return build_local_group_size(b
, bit_size
);
211 case SYSTEM_VALUE_VERTEX_ID
:
212 if (b
->shader
->options
->vertex_id_zero_based
) {
213 return nir_iadd(b
, nir_load_vertex_id_zero_base(b
),
214 nir_load_first_vertex(b
));
218 case SYSTEM_VALUE_BASE_VERTEX
:
220 * From the OpenGL 4.6 (11.1.3.9 Shader Inputs) specification:
222 * "gl_BaseVertex holds the integer value passed to the baseVertex
223 * parameter to the command that resulted in the current shader
224 * invocation. In the case where the command has no baseVertex
225 * parameter, the value of gl_BaseVertex is zero."
227 if (b
->shader
->options
->lower_base_vertex
) {
228 return nir_iand(b
, nir_load_is_indexed_draw(b
),
229 nir_load_first_vertex(b
));
233 case SYSTEM_VALUE_HELPER_INVOCATION
:
234 if (b
->shader
->options
->lower_helper_invocation
) {
239 nir_load_sample_id_no_per_sample(b
));
242 nir_load_sample_mask_in(b
),
245 return nir_inot(b
, nir_i2b(b
, tmp
));
249 case SYSTEM_VALUE_INSTANCE_INDEX
:
250 return nir_iadd(b
, nir_load_instance_id(b
),
251 nir_load_base_instance(b
));
253 case SYSTEM_VALUE_SUBGROUP_EQ_MASK
:
254 case SYSTEM_VALUE_SUBGROUP_GE_MASK
:
255 case SYSTEM_VALUE_SUBGROUP_GT_MASK
:
256 case SYSTEM_VALUE_SUBGROUP_LE_MASK
:
257 case SYSTEM_VALUE_SUBGROUP_LT_MASK
: {
258 nir_intrinsic_op op
=
259 nir_intrinsic_from_system_value(var
->data
.location
);
260 nir_intrinsic_instr
*load
= nir_intrinsic_instr_create(b
->shader
, op
);
261 nir_ssa_dest_init_for_type(&load
->instr
, &load
->dest
,
263 load
->num_components
= load
->dest
.ssa
.num_components
;
264 nir_builder_instr_insert(b
, &load
->instr
);
265 return &load
->dest
.ssa
;
268 case SYSTEM_VALUE_DEVICE_INDEX
:
269 if (b
->shader
->options
->lower_device_index_to_zero
)
270 return nir_imm_int(b
, 0);
273 case SYSTEM_VALUE_GLOBAL_GROUP_SIZE
:
274 return build_global_group_size(b
, bit_size
);
280 nir_intrinsic_op sysval_op
=
281 nir_intrinsic_from_system_value(var
->data
.location
);
282 return nir_load_system_value(b
, sysval_op
, 0,
283 intrin
->dest
.ssa
.bit_size
);
290 nir_lower_system_values(nir_shader
*shader
)
292 bool progress
= nir_shader_lower_instructions(shader
,
293 lower_system_value_filter
,
294 lower_system_value_instr
,
297 /* We're going to delete the variables so we need to clean up all those
298 * derefs we left lying around.
301 nir_remove_dead_derefs(shader
);
303 exec_list_make_empty(&shader
->system_values
);