2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include "glsl/nir/nir_builder.h"
27 struct apply_pipeline_layout_state
{
32 const struct anv_pipeline_layout
*layout
;
37 static nir_intrinsic_op
38 lowered_op(nir_intrinsic_op op
)
41 case nir_intrinsic_load_ubo_vk
:
42 return nir_intrinsic_load_ubo
;
43 case nir_intrinsic_load_ubo_vk_indirect
:
44 return nir_intrinsic_load_ubo_indirect
;
45 case nir_intrinsic_load_ssbo_vk
:
46 return nir_intrinsic_load_ssbo
;
47 case nir_intrinsic_load_ssbo_vk_indirect
:
48 return nir_intrinsic_load_ssbo_indirect
;
49 case nir_intrinsic_store_ssbo_vk
:
50 return nir_intrinsic_store_ssbo
;
51 case nir_intrinsic_store_ssbo_vk_indirect
:
52 return nir_intrinsic_store_ssbo_indirect
;
54 unreachable("Invalid intrinsic for lowering");
59 get_surface_index(unsigned set
, unsigned binding
,
60 struct apply_pipeline_layout_state
*state
)
62 assert(set
< state
->layout
->num_sets
);
63 struct anv_descriptor_set_layout
*set_layout
=
64 state
->layout
->set
[set
].layout
;
66 assert(binding
< set_layout
->binding_count
);
68 assert(set_layout
->binding
[binding
].stage
[state
->stage
].surface_index
>= 0);
70 uint32_t surface_index
=
71 state
->layout
->set
[set
].stage
[state
->stage
].surface_start
+
72 set_layout
->binding
[binding
].stage
[state
->stage
].surface_index
;
74 assert(surface_index
< state
->layout
->stage
[state
->stage
].surface_count
);
80 try_lower_intrinsic(nir_intrinsic_instr
*intrin
,
81 struct apply_pipeline_layout_state
*state
)
83 nir_builder
*b
= &state
->builder
;
86 switch (intrin
->intrinsic
) {
87 case nir_intrinsic_load_ubo_vk
:
88 case nir_intrinsic_load_ubo_vk_indirect
:
89 case nir_intrinsic_load_ssbo_vk
:
90 case nir_intrinsic_load_ssbo_vk_indirect
:
93 case nir_intrinsic_store_ssbo_vk
:
94 case nir_intrinsic_store_ssbo_vk_indirect
:
101 b
->cursor
= nir_before_instr(&intrin
->instr
);
103 uint32_t set
= intrin
->const_index
[0];
104 uint32_t binding
= intrin
->const_index
[1];
106 uint32_t surface_index
= get_surface_index(set
, binding
, state
);
108 nir_const_value
*const_block_idx
=
109 nir_src_as_const_value(intrin
->src
[block_idx_src
]);
111 nir_ssa_def
*block_index
;
112 if (const_block_idx
) {
113 block_index
= nir_imm_int(b
, surface_index
+ const_block_idx
->u
[0]);
115 block_index
= nir_iadd(b
, nir_imm_int(b
, surface_index
),
116 nir_ssa_for_src(b
, intrin
->src
[block_idx_src
], 1));
119 nir_instr_rewrite_src(&intrin
->instr
, &intrin
->src
[block_idx_src
],
120 nir_src_for_ssa(block_index
));
122 intrin
->intrinsic
= lowered_op(intrin
->intrinsic
);
123 /* Shift the offset indices down */
124 intrin
->const_index
[0] = intrin
->const_index
[2];
125 intrin
->const_index
[1] = intrin
->const_index
[3];
131 lower_tex(nir_tex_instr
*tex
, struct apply_pipeline_layout_state
*state
)
133 /* No one should have come by and lowered it already */
134 assert(tex
->sampler
);
136 unsigned set
= tex
->sampler
->var
->data
.descriptor_set
;
137 unsigned binding
= tex
->sampler
->var
->data
.binding
;
139 tex
->sampler_index
= get_surface_index(set
, binding
, state
);
141 if (tex
->sampler
->deref
.child
) {
142 assert(tex
->sampler
->deref
.child
->deref_type
== nir_deref_type_array
);
143 nir_deref_array
*deref_array
=
144 nir_deref_as_array(tex
->sampler
->deref
.child
);
146 tex
->sampler_index
+= deref_array
->base_offset
;
148 if (deref_array
->deref_array_type
== nir_deref_array_type_indirect
) {
149 nir_tex_src
*new_srcs
= rzalloc_array(tex
, nir_tex_src
,
152 for (unsigned i
= 0; i
< tex
->num_srcs
; i
++) {
153 new_srcs
[i
].src_type
= tex
->src
[i
].src_type
;
154 nir_instr_move_src(&tex
->instr
, &new_srcs
[i
].src
, &tex
->src
[i
].src
);
157 ralloc_free(tex
->src
);
160 /* Now we can go ahead and move the source over to being a
161 * first-class texture source.
163 tex
->src
[tex
->num_srcs
].src_type
= nir_tex_src_sampler_offset
;
165 nir_instr_move_src(&tex
->instr
, &tex
->src
[tex
->num_srcs
- 1].src
,
166 &deref_array
->indirect
);
174 apply_pipeline_layout_block(nir_block
*block
, void *void_state
)
176 struct apply_pipeline_layout_state
*state
= void_state
;
178 nir_foreach_instr_safe(block
, instr
) {
179 switch (instr
->type
) {
180 case nir_instr_type_intrinsic
:
181 if (try_lower_intrinsic(nir_instr_as_intrinsic(instr
), state
))
182 state
->progress
= true;
184 case nir_instr_type_tex
:
185 lower_tex(nir_instr_as_tex(instr
), state
);
186 /* All texture instructions need lowering */
187 state
->progress
= true;
198 anv_nir_apply_pipeline_layout(nir_shader
*shader
,
199 const struct anv_pipeline_layout
*layout
)
201 struct apply_pipeline_layout_state state
= {
203 .stage
= anv_vk_shader_stage_for_mesa_stage(shader
->stage
),
207 nir_foreach_overload(shader
, overload
) {
208 if (overload
->impl
) {
209 nir_builder_init(&state
.builder
, overload
->impl
);
210 nir_foreach_block(overload
->impl
, apply_pipeline_layout_block
, &state
);
211 nir_metadata_preserve(overload
->impl
, nir_metadata_block_index
|
212 nir_metadata_dominance
);
216 return state
.progress
;