2 * Copyright (c) 2016 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include "compiler/nir/nir_builder.h"
27 struct lower_intrinsics_state
{
29 unsigned dispatch_width
;
30 nir_function_impl
*impl
;
33 unsigned local_workgroup_size
;
37 lower_cs_intrinsics_convert_block(struct lower_intrinsics_state
*state
,
40 bool progress
= false;
41 nir_builder
*b
= &state
->builder
;
42 nir_shader
*nir
= state
->nir
;
44 /* Reuse calculated values inside the block. */
45 nir_ssa_def
*local_index
= NULL
;
46 nir_ssa_def
*local_id
= NULL
;
48 nir_foreach_instr_safe(instr
, block
) {
49 if (instr
->type
!= nir_instr_type_intrinsic
)
52 nir_intrinsic_instr
*intrinsic
= nir_instr_as_intrinsic(instr
);
54 b
->cursor
= nir_after_instr(&intrinsic
->instr
);
57 switch (intrinsic
->intrinsic
) {
58 case nir_intrinsic_barrier
: {
59 /* Our HW barrier instruction doesn't do a memory barrier for us but
60 * the GLSL barrier() intrinsic does for shared memory. Insert a
61 * shared memory barrier before every barrier().
63 b
->cursor
= nir_before_instr(&intrinsic
->instr
);
65 nir_intrinsic_instr
*shared_barrier
=
66 nir_intrinsic_instr_create(b
->shader
,
67 nir_intrinsic_memory_barrier_shared
);
68 nir_builder_instr_insert(b
, &shared_barrier
->instr
);
72 case nir_intrinsic_load_local_invocation_index
:
73 case nir_intrinsic_load_local_invocation_id
: {
74 /* First time we are using those, so let's calculate them. */
78 nir_ssa_def
*subgroup_id
;
79 if (state
->local_workgroup_size
<= state
->dispatch_width
)
80 subgroup_id
= nir_imm_int(b
, 0);
82 subgroup_id
= nir_load_subgroup_id(b
);
84 nir_ssa_def
*thread_local_id
=
85 nir_imul_imm(b
, subgroup_id
, state
->dispatch_width
);
86 nir_ssa_def
*channel
= nir_load_subgroup_invocation(b
);
87 nir_ssa_def
*linear
= nir_iadd(b
, channel
, thread_local_id
);
89 nir_ssa_def
*size_x
= nir_imm_int(b
, nir
->info
.cs
.local_size
[0]);
90 nir_ssa_def
*size_y
= nir_imm_int(b
, nir
->info
.cs
.local_size
[1]);
92 /* The local invocation index and ID must respect the following
94 * gl_LocalInvocationID.x =
95 * gl_LocalInvocationIndex % gl_WorkGroupSize.x;
96 * gl_LocalInvocationID.y =
97 * (gl_LocalInvocationIndex / gl_WorkGroupSize.x) %
99 * gl_LocalInvocationID.z =
100 * (gl_LocalInvocationIndex /
101 * (gl_WorkGroupSize.x * gl_WorkGroupSize.y)) %
102 * gl_WorkGroupSize.z;
104 * However, the final % gl_WorkGroupSize.z does nothing unless we
105 * accidentally end up with a gl_LocalInvocationIndex that is too
106 * large so it can safely be omitted.
109 if (state
->nir
->info
.cs
.derivative_group
!= DERIVATIVE_GROUP_QUADS
) {
110 /* If we are not grouping in quads, just set the local invocatio
111 * index linearly, and calculate local invocation ID from that.
113 local_index
= linear
;
115 nir_ssa_def
*id_x
, *id_y
, *id_z
;
116 id_x
= nir_umod(b
, local_index
, size_x
);
117 id_y
= nir_umod(b
, nir_udiv(b
, local_index
, size_x
), size_y
);
118 id_z
= nir_udiv(b
, local_index
, nir_imul(b
, size_x
, size_y
));
119 local_id
= nir_vec3(b
, id_x
, id_y
, id_z
);
121 /* For quads, first we figure out the 2x2 grid the invocation
122 * belongs to -- treating extra Z layers as just more rows.
123 * Then map that into local invocation ID (trivial) and local
124 * invocation index. Skipping Z simplify index calculation.
127 nir_ssa_def
*one
= nir_imm_int(b
, 1);
128 nir_ssa_def
*double_size_x
= nir_ishl(b
, size_x
, one
);
130 /* ID within a pair of rows, where each group of 4 is 2x2 quad. */
131 nir_ssa_def
*row_pair_id
= nir_umod(b
, linear
, double_size_x
);
132 nir_ssa_def
*y_row_pairs
= nir_udiv(b
, linear
, double_size_x
);
136 nir_iand(b
, row_pair_id
, one
),
137 nir_iand(b
, nir_ishr(b
, row_pair_id
, one
),
138 nir_imm_int(b
, 0xfffffffe)));
141 nir_ishl(b
, y_row_pairs
, one
),
142 nir_iand(b
, nir_ishr(b
, row_pair_id
, one
), one
));
144 local_id
= nir_vec3(b
, x
,
145 nir_umod(b
, y
, size_y
),
146 nir_udiv(b
, y
, size_y
));
147 local_index
= nir_iadd(b
, x
, nir_imul(b
, y
, size_x
));
153 if (intrinsic
->intrinsic
== nir_intrinsic_load_local_invocation_id
)
156 sysval
= local_index
;
160 case nir_intrinsic_load_subgroup_id
:
161 if (state
->local_workgroup_size
> 8)
164 /* For small workgroup sizes, we know subgroup_id will be zero */
165 sysval
= nir_imm_int(b
, 0);
168 case nir_intrinsic_load_num_subgroups
: {
169 unsigned local_workgroup_size
=
170 nir
->info
.cs
.local_size
[0] * nir
->info
.cs
.local_size
[1] *
171 nir
->info
.cs
.local_size
[2];
172 unsigned num_subgroups
=
173 DIV_ROUND_UP(local_workgroup_size
, state
->dispatch_width
);
174 sysval
= nir_imm_int(b
, num_subgroups
);
182 nir_ssa_def_rewrite_uses(&intrinsic
->dest
.ssa
, nir_src_for_ssa(sysval
));
183 nir_instr_remove(&intrinsic
->instr
);
185 state
->progress
= true;
192 lower_cs_intrinsics_convert_impl(struct lower_intrinsics_state
*state
)
194 nir_builder_init(&state
->builder
, state
->impl
);
196 nir_foreach_block(block
, state
->impl
) {
197 lower_cs_intrinsics_convert_block(state
, block
);
200 nir_metadata_preserve(state
->impl
,
201 nir_metadata_block_index
| nir_metadata_dominance
);
205 brw_nir_lower_cs_intrinsics(nir_shader
*nir
,
206 unsigned dispatch_width
)
208 assert(nir
->info
.stage
== MESA_SHADER_COMPUTE
);
210 struct lower_intrinsics_state state
= {
212 .dispatch_width
= dispatch_width
,
215 assert(!nir
->info
.cs
.local_size_variable
);
216 state
.local_workgroup_size
= nir
->info
.cs
.local_size
[0] *
217 nir
->info
.cs
.local_size
[1] *
218 nir
->info
.cs
.local_size
[2];
220 /* Constraints from NV_compute_shader_derivatives. */
221 if (nir
->info
.cs
.derivative_group
== DERIVATIVE_GROUP_QUADS
) {
222 assert(nir
->info
.cs
.local_size
[0] % 2 == 0);
223 assert(nir
->info
.cs
.local_size
[1] % 2 == 0);
224 } else if (nir
->info
.cs
.derivative_group
== DERIVATIVE_GROUP_LINEAR
) {
225 assert(state
.local_workgroup_size
% 4 == 0);
228 nir_foreach_function(function
, nir
) {
229 if (function
->impl
) {
230 state
.impl
= function
->impl
;
231 lower_cs_intrinsics_convert_impl(&state
);
235 return state
.progress
;