2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include "glsl/nir/nir_builder.h"
27 struct apply_pipeline_layout_state
{
32 const struct anv_pipeline_layout
*layout
;
38 get_surface_index(unsigned set
, unsigned binding
,
39 struct apply_pipeline_layout_state
*state
)
41 assert(set
< state
->layout
->num_sets
);
42 struct anv_descriptor_set_layout
*set_layout
=
43 state
->layout
->set
[set
].layout
;
45 assert(binding
< set_layout
->binding_count
);
47 assert(set_layout
->binding
[binding
].stage
[state
->stage
].surface_index
>= 0);
49 uint32_t surface_index
=
50 state
->layout
->set
[set
].stage
[state
->stage
].surface_start
+
51 set_layout
->binding
[binding
].stage
[state
->stage
].surface_index
;
53 assert(surface_index
< state
->layout
->stage
[state
->stage
].surface_count
);
59 lower_res_index_intrinsic(nir_intrinsic_instr
*intrin
,
60 struct apply_pipeline_layout_state
*state
)
62 nir_builder
*b
= &state
->builder
;
64 b
->cursor
= nir_before_instr(&intrin
->instr
);
66 uint32_t set
= intrin
->const_index
[0];
67 uint32_t binding
= intrin
->const_index
[1];
69 uint32_t surface_index
= get_surface_index(set
, binding
, state
);
71 nir_const_value
*const_block_idx
=
72 nir_src_as_const_value(intrin
->src
[0]);
74 nir_ssa_def
*block_index
;
75 if (const_block_idx
) {
76 block_index
= nir_imm_int(b
, surface_index
+ const_block_idx
->u
[0]);
78 block_index
= nir_iadd(b
, nir_imm_int(b
, surface_index
),
79 nir_ssa_for_src(b
, intrin
->src
[0], 1));
82 assert(intrin
->dest
.is_ssa
);
83 nir_ssa_def_rewrite_uses(&intrin
->dest
.ssa
, nir_src_for_ssa(block_index
));
84 nir_instr_remove(&intrin
->instr
);
88 lower_tex(nir_tex_instr
*tex
, struct apply_pipeline_layout_state
*state
)
90 /* No one should have come by and lowered it already */
93 unsigned set
= tex
->sampler
->var
->data
.descriptor_set
;
94 unsigned binding
= tex
->sampler
->var
->data
.binding
;
96 tex
->sampler_index
= get_surface_index(set
, binding
, state
);
98 if (tex
->sampler
->deref
.child
) {
99 assert(tex
->sampler
->deref
.child
->deref_type
== nir_deref_type_array
);
100 nir_deref_array
*deref_array
=
101 nir_deref_as_array(tex
->sampler
->deref
.child
);
103 tex
->sampler_index
+= deref_array
->base_offset
;
105 if (deref_array
->deref_array_type
== nir_deref_array_type_indirect
) {
106 nir_tex_src
*new_srcs
= rzalloc_array(tex
, nir_tex_src
,
109 for (unsigned i
= 0; i
< tex
->num_srcs
; i
++) {
110 new_srcs
[i
].src_type
= tex
->src
[i
].src_type
;
111 nir_instr_move_src(&tex
->instr
, &new_srcs
[i
].src
, &tex
->src
[i
].src
);
114 ralloc_free(tex
->src
);
117 /* Now we can go ahead and move the source over to being a
118 * first-class texture source.
120 tex
->src
[tex
->num_srcs
].src_type
= nir_tex_src_sampler_offset
;
122 nir_instr_move_src(&tex
->instr
, &tex
->src
[tex
->num_srcs
- 1].src
,
123 &deref_array
->indirect
);
131 apply_pipeline_layout_block(nir_block
*block
, void *void_state
)
133 struct apply_pipeline_layout_state
*state
= void_state
;
135 nir_foreach_instr_safe(block
, instr
) {
136 switch (instr
->type
) {
137 case nir_instr_type_intrinsic
: {
138 nir_intrinsic_instr
*intrin
= nir_instr_as_intrinsic(instr
);
139 if (intrin
->intrinsic
== nir_intrinsic_vulkan_resource_index
) {
140 lower_res_index_intrinsic(intrin
, state
);
141 state
->progress
= true;
145 case nir_instr_type_tex
:
146 lower_tex(nir_instr_as_tex(instr
), state
);
147 /* All texture instructions need lowering */
148 state
->progress
= true;
159 anv_nir_apply_pipeline_layout(nir_shader
*shader
,
160 const struct anv_pipeline_layout
*layout
)
162 struct apply_pipeline_layout_state state
= {
164 .stage
= anv_vk_shader_stage_for_mesa_stage(shader
->stage
),
168 nir_foreach_overload(shader
, overload
) {
169 if (overload
->impl
) {
170 nir_builder_init(&state
.builder
, overload
->impl
);
171 nir_foreach_block(overload
->impl
, apply_pipeline_layout_block
, &state
);
172 nir_metadata_preserve(overload
->impl
, nir_metadata_block_index
|
173 nir_metadata_dominance
);
177 return state
.progress
;