2 * Copyright © 2016 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include "nir/nir_builder.h"
28 * This file implements the lowering required for VK_KHR_multiview. We
29 * implement multiview using instanced rendering. The number of instances in
30 * each draw call is multiplied by the number of views in the subpass. Then,
31 * in the shader, we divide gl_InstanceId by the number of views and use
32 * gl_InstanceId % view_count to compute the actual ViewIndex.
35 struct lower_multiview_state
{
40 nir_ssa_def
*instance_id
;
41 nir_ssa_def
*view_index
;
45 build_instance_id(struct lower_multiview_state
*state
)
47 assert(state
->builder
.shader
->info
.stage
== MESA_SHADER_VERTEX
);
49 if (state
->instance_id
== NULL
) {
50 nir_builder
*b
= &state
->builder
;
52 b
->cursor
= nir_before_block(nir_start_block(b
->impl
));
54 /* We use instancing for implementing multiview. The actual instance id
55 * is given by dividing instance_id by the number of views in this
59 nir_idiv(b
, nir_load_instance_id(b
),
60 nir_imm_int(b
, _mesa_bitcount(state
->view_mask
)));
63 return state
->instance_id
;
67 build_view_index(struct lower_multiview_state
*state
)
69 if (state
->view_index
== NULL
) {
70 nir_builder
*b
= &state
->builder
;
72 b
->cursor
= nir_before_block(nir_start_block(b
->impl
));
74 assert(state
->view_mask
!= 0);
75 if (_mesa_bitcount(state
->view_mask
) == 1) {
76 /* Set the view index directly. */
77 state
->view_index
= nir_imm_int(b
, ffs(state
->view_mask
) - 1);
78 } else if (state
->builder
.shader
->info
.stage
== MESA_SHADER_VERTEX
) {
79 /* We only support 16 viewports */
80 assert((state
->view_mask
& 0xffff0000) == 0);
82 /* We use instancing for implementing multiview. The compacted view
83 * id is given by instance_id % view_count. We then have to convert
84 * that to an actual view id.
86 nir_ssa_def
*compacted
=
87 nir_umod(b
, nir_load_instance_id(b
),
88 nir_imm_int(b
, _mesa_bitcount(state
->view_mask
)));
90 if (util_is_power_of_two_or_zero(state
->view_mask
+ 1)) {
91 /* If we have a full view mask, then compacted is what we want */
92 state
->view_index
= compacted
;
94 /* Now we define a map from compacted view index to the actual
95 * view index that's based on the view_mask. The map is given by
96 * 16 nibbles, each of which is a value from 0 to 15.
100 for_each_bit(bit
, state
->view_mask
) {
102 remap
|= (uint64_t)bit
<< (i
++ * 4);
105 nir_ssa_def
*shift
= nir_imul(b
, compacted
, nir_imm_int(b
, 4));
107 /* One of these days, when we have int64 everywhere, this will be
110 nir_ssa_def
*shifted
;
111 if (remap
<= UINT32_MAX
) {
112 shifted
= nir_ushr(b
, nir_imm_int(b
, remap
), shift
);
114 nir_ssa_def
*shifted_low
=
115 nir_ushr(b
, nir_imm_int(b
, remap
), shift
);
116 nir_ssa_def
*shifted_high
=
117 nir_ushr(b
, nir_imm_int(b
, remap
>> 32),
118 nir_isub(b
, shift
, nir_imm_int(b
, 32)));
119 shifted
= nir_bcsel(b
, nir_ilt(b
, shift
, nir_imm_int(b
, 32)),
120 shifted_low
, shifted_high
);
122 state
->view_index
= nir_iand(b
, shifted
, nir_imm_int(b
, 0xf));
125 const struct glsl_type
*type
= glsl_int_type();
126 if (b
->shader
->info
.stage
== MESA_SHADER_TESS_CTRL
||
127 b
->shader
->info
.stage
== MESA_SHADER_GEOMETRY
)
128 type
= glsl_array_type(type
, 1);
130 nir_variable
*idx_var
=
131 nir_variable_create(b
->shader
, nir_var_shader_in
,
133 idx_var
->data
.location
= VARYING_SLOT_VIEW_INDEX
;
134 if (b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
)
135 idx_var
->data
.interpolation
= INTERP_MODE_FLAT
;
137 nir_deref_instr
*deref
= nir_build_deref_var(b
, idx_var
);
138 if (glsl_type_is_array(type
))
139 deref
= nir_build_deref_array(b
, deref
, nir_imm_int(b
, 0));
141 state
->view_index
= nir_load_deref(b
, deref
);
145 return state
->view_index
;
149 anv_nir_lower_multiview(nir_shader
*shader
, uint32_t view_mask
)
151 assert(shader
->info
.stage
!= MESA_SHADER_COMPUTE
);
153 /* If multiview isn't enabled, we have nothing to do. */
157 struct lower_multiview_state state
= {
158 .view_mask
= view_mask
,
161 /* This pass assumes a single entrypoint */
162 nir_function_impl
*entrypoint
= nir_shader_get_entrypoint(shader
);
164 nir_builder_init(&state
.builder
, entrypoint
);
166 bool progress
= false;
167 nir_foreach_block(block
, entrypoint
) {
168 nir_foreach_instr_safe(instr
, block
) {
169 if (instr
->type
!= nir_instr_type_intrinsic
)
172 nir_intrinsic_instr
*load
= nir_instr_as_intrinsic(instr
);
174 if (load
->intrinsic
!= nir_intrinsic_load_instance_id
&&
175 load
->intrinsic
!= nir_intrinsic_load_view_index
)
178 assert(load
->dest
.is_ssa
);
181 if (load
->intrinsic
== nir_intrinsic_load_instance_id
) {
182 value
= build_instance_id(&state
);
184 assert(load
->intrinsic
== nir_intrinsic_load_view_index
);
185 value
= build_view_index(&state
);
188 nir_ssa_def_rewrite_uses(&load
->dest
.ssa
, nir_src_for_ssa(value
));
190 nir_instr_remove(&load
->instr
);
195 /* The view index is available in all stages but the instance id is only
196 * available in the VS. If it's not a fragment shader, we need to pass
197 * the view index on to the next stage.
199 if (shader
->info
.stage
!= MESA_SHADER_FRAGMENT
) {
200 nir_ssa_def
*view_index
= build_view_index(&state
);
202 nir_builder
*b
= &state
.builder
;
204 assert(view_index
->parent_instr
->block
== nir_start_block(entrypoint
));
205 b
->cursor
= nir_after_instr(view_index
->parent_instr
);
207 /* Unless there is only one possible view index (that would be set
208 * directly), pass it to the next stage. */
209 if (_mesa_bitcount(state
.view_mask
) != 1) {
210 nir_variable
*view_index_out
=
211 nir_variable_create(shader
, nir_var_shader_out
,
212 glsl_int_type(), "view index");
213 view_index_out
->data
.location
= VARYING_SLOT_VIEW_INDEX
;
214 nir_store_var(b
, view_index_out
, view_index
, 0x1);
217 nir_variable
*layer_id_out
=
218 nir_variable_create(shader
, nir_var_shader_out
,
219 glsl_int_type(), "layer ID");
220 layer_id_out
->data
.location
= VARYING_SLOT_LAYER
;
221 nir_store_var(b
, layer_id_out
, view_index
, 0x1);
227 nir_metadata_preserve(entrypoint
, nir_metadata_block_index
|
228 nir_metadata_dominance
);