Merge branch 'master' of ../mesa into vulkan
[mesa.git] / src / vulkan / anv_nir_apply_dynamic_offsets.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "anv_nir.h"
25 #include "glsl/nir/nir_builder.h"
26
27 struct apply_dynamic_offsets_state {
28 nir_shader *shader;
29 nir_builder builder;
30
31 VkShaderStage stage;
32 struct anv_pipeline_layout *layout;
33
34 uint32_t indices_start;
35 };
36
37 static bool
38 apply_dynamic_offsets_block(nir_block *block, void *void_state)
39 {
40 struct apply_dynamic_offsets_state *state = void_state;
41 struct anv_descriptor_set_layout *set_layout;
42 const struct anv_descriptor_slot *slot;
43
44 nir_foreach_instr_safe(block, instr) {
45 if (instr->type != nir_instr_type_intrinsic)
46 continue;
47
48 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
49
50 bool has_indirect = false;
51 uint32_t set, binding;
52 switch (intrin->intrinsic) {
53 case nir_intrinsic_load_ubo_indirect:
54 has_indirect = true;
55 /* fallthrough */
56 case nir_intrinsic_load_ubo: {
57 set = intrin->const_index[0];
58
59 nir_const_value *const_binding = nir_src_as_const_value(intrin->src[0]);
60 if (const_binding) {
61 binding = const_binding->u[0];
62 } else {
63 assert(0 && "need more info from the ir for this.");
64 }
65 break;
66 }
67 default:
68 continue; /* the loop */
69 }
70
71 set_layout = state->layout->set[set].layout;
72 slot = &set_layout->stage[state->stage].surface_start[binding];
73 if (slot->dynamic_slot < 0)
74 continue;
75
76 uint32_t dynamic_index = state->layout->set[set].dynamic_offset_start +
77 slot->dynamic_slot;
78
79 state->builder.cursor = nir_before_instr(&intrin->instr);
80
81 nir_intrinsic_instr *offset_load =
82 nir_intrinsic_instr_create(state->shader, nir_intrinsic_load_uniform);
83 offset_load->num_components = 1;
84 offset_load->const_index[0] = state->indices_start + dynamic_index;
85 offset_load->const_index[1] = 0;
86 nir_ssa_dest_init(&offset_load->instr, &offset_load->dest, 1, NULL);
87 nir_builder_instr_insert(&state->builder, &offset_load->instr);
88
89 nir_ssa_def *offset = &offset_load->dest.ssa;
90 if (has_indirect) {
91 assert(intrin->src[1].is_ssa);
92 offset = nir_iadd(&state->builder, intrin->src[1].ssa, offset);
93 }
94
95 assert(intrin->dest.is_ssa);
96
97 nir_intrinsic_instr *new_load =
98 nir_intrinsic_instr_create(state->shader,
99 nir_intrinsic_load_ubo_indirect);
100 new_load->num_components = intrin->num_components;
101 new_load->const_index[0] = intrin->const_index[0];
102 new_load->const_index[1] = intrin->const_index[1];
103 nir_src_copy(&new_load->src[0], &intrin->src[0], &new_load->instr);
104 new_load->src[1] = nir_src_for_ssa(offset);
105 nir_ssa_dest_init(&new_load->instr, &new_load->dest,
106 intrin->dest.ssa.num_components,
107 intrin->dest.ssa.name);
108 nir_builder_instr_insert(&state->builder, &new_load->instr);
109
110 nir_ssa_def_rewrite_uses(&intrin->dest.ssa,
111 nir_src_for_ssa(&new_load->dest.ssa));
112
113 nir_instr_remove(&intrin->instr);
114 }
115
116 return true;
117 }
118
119 void
120 anv_nir_apply_dynamic_offsets(struct anv_pipeline *pipeline,
121 nir_shader *shader,
122 struct brw_stage_prog_data *prog_data)
123 {
124 struct apply_dynamic_offsets_state state = {
125 .shader = shader,
126 .stage = anv_vk_shader_stage_for_mesa_stage(shader->stage),
127 .layout = pipeline->layout,
128 .indices_start = shader->num_uniforms,
129 };
130
131 if (!state.layout || !state.layout->stage[state.stage].has_dynamic_offsets)
132 return;
133
134 nir_foreach_overload(shader, overload) {
135 if (overload->impl) {
136 nir_builder_init(&state.builder, overload->impl);
137 nir_foreach_block(overload->impl, apply_dynamic_offsets_block, &state);
138 nir_metadata_preserve(overload->impl, nir_metadata_block_index |
139 nir_metadata_dominance);
140 }
141 }
142
143 struct anv_push_constants *null_data = NULL;
144 for (unsigned i = 0; i < MAX_DYNAMIC_BUFFERS; i++)
145 prog_data->param[i + shader->num_uniforms] =
146 (const gl_constant_value *)&null_data->dynamic_offsets[i];
147
148 shader->num_uniforms += MAX_DYNAMIC_BUFFERS;
149 }