zink: handle more glsl->spirv builtin translation
[mesa.git] / src / gallium / drivers / zink / zink_draw.c
1 #include "zink_compiler.h"
2 #include "zink_context.h"
3 #include "zink_program.h"
4 #include "zink_resource.h"
5 #include "zink_screen.h"
6 #include "zink_state.h"
7
8 #include "indices/u_primconvert.h"
9 #include "util/hash_table.h"
10 #include "util/u_debug.h"
11 #include "util/u_helpers.h"
12 #include "util/u_inlines.h"
13 #include "util/u_prim.h"
14
15 static VkDescriptorSet
16 allocate_descriptor_set(struct zink_screen *screen,
17 struct zink_batch *batch,
18 struct zink_gfx_program *prog)
19 {
20 assert(batch->descs_left >= prog->num_descriptors);
21 VkDescriptorSetAllocateInfo dsai;
22 memset((void *)&dsai, 0, sizeof(dsai));
23 dsai.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO;
24 dsai.pNext = NULL;
25 dsai.descriptorPool = batch->descpool;
26 dsai.descriptorSetCount = 1;
27 dsai.pSetLayouts = &prog->dsl;
28
29 VkDescriptorSet desc_set;
30 if (vkAllocateDescriptorSets(screen->dev, &dsai, &desc_set) != VK_SUCCESS) {
31 debug_printf("ZINK: failed to allocate descriptor set :/");
32 return VK_NULL_HANDLE;
33 }
34
35 batch->descs_left -= prog->num_descriptors;
36 return desc_set;
37 }
38
39 static void
40 zink_bind_vertex_buffers(struct zink_batch *batch, struct zink_context *ctx)
41 {
42 VkBuffer buffers[PIPE_MAX_ATTRIBS];
43 VkDeviceSize buffer_offsets[PIPE_MAX_ATTRIBS];
44 const struct zink_vertex_elements_state *elems = ctx->element_state;
45 for (unsigned i = 0; i < elems->hw_state.num_bindings; i++) {
46 struct pipe_vertex_buffer *vb = ctx->buffers + ctx->element_state->binding_map[i];
47 assert(vb);
48 if (vb->buffer.resource) {
49 struct zink_resource *res = zink_resource(vb->buffer.resource);
50 buffers[i] = res->buffer;
51 buffer_offsets[i] = vb->buffer_offset;
52 zink_batch_reference_resoure(batch, res);
53 } else {
54 buffers[i] = zink_resource(ctx->dummy_buffer)->buffer;
55 buffer_offsets[i] = 0;
56 }
57 }
58
59 if (elems->hw_state.num_bindings > 0)
60 vkCmdBindVertexBuffers(batch->cmdbuf, 0,
61 elems->hw_state.num_bindings,
62 buffers, buffer_offsets);
63 }
64
65 static struct zink_gfx_program *
66 get_gfx_program(struct zink_context *ctx)
67 {
68 if (ctx->dirty_program) {
69 struct hash_entry *entry = _mesa_hash_table_search(ctx->program_cache,
70 ctx->gfx_stages);
71 if (!entry) {
72 struct zink_gfx_program *prog;
73 prog = zink_create_gfx_program(zink_screen(ctx->base.screen),
74 ctx->gfx_stages);
75 entry = _mesa_hash_table_insert(ctx->program_cache, prog->stages, prog);
76 if (!entry)
77 return NULL;
78 }
79 ctx->curr_program = entry->data;
80 ctx->dirty_program = false;
81 }
82
83 assert(ctx->curr_program);
84 return ctx->curr_program;
85 }
86
87 static bool
88 line_width_needed(enum pipe_prim_type reduced_prim,
89 VkPolygonMode polygon_mode)
90 {
91 switch (reduced_prim) {
92 case PIPE_PRIM_POINTS:
93 return false;
94
95 case PIPE_PRIM_LINES:
96 return true;
97
98 case PIPE_PRIM_TRIANGLES:
99 return polygon_mode == VK_POLYGON_MODE_LINE;
100
101 default:
102 unreachable("unexpected reduced prim");
103 }
104 }
105
106 void
107 zink_draw_vbo(struct pipe_context *pctx,
108 const struct pipe_draw_info *dinfo)
109 {
110 struct zink_context *ctx = zink_context(pctx);
111 struct zink_screen *screen = zink_screen(pctx->screen);
112 struct zink_rasterizer_state *rast_state = ctx->rast_state;
113
114 if (dinfo->mode >= PIPE_PRIM_QUADS ||
115 dinfo->mode == PIPE_PRIM_LINE_LOOP ||
116 dinfo->index_size == 1) {
117 if (!u_trim_pipe_prim(dinfo->mode, (unsigned *)&dinfo->count))
118 return;
119
120 util_primconvert_save_rasterizer_state(ctx->primconvert, &rast_state->base);
121 util_primconvert_draw_vbo(ctx->primconvert, dinfo);
122 return;
123 }
124
125 struct zink_gfx_program *gfx_program = get_gfx_program(ctx);
126 if (!gfx_program)
127 return;
128
129 VkPipeline pipeline = zink_get_gfx_pipeline(screen, gfx_program,
130 &ctx->gfx_pipeline_state,
131 dinfo->mode);
132
133 enum pipe_prim_type reduced_prim = u_reduced_prim(dinfo->mode);
134
135 bool depth_bias = false;
136 switch (reduced_prim) {
137 case PIPE_PRIM_POINTS:
138 depth_bias = rast_state->offset_point;
139 break;
140
141 case PIPE_PRIM_LINES:
142 depth_bias = rast_state->offset_line;
143 break;
144
145 case PIPE_PRIM_TRIANGLES:
146 depth_bias = rast_state->offset_tri;
147 break;
148
149 default:
150 unreachable("unexpected reduced prim");
151 }
152
153 unsigned index_offset = 0;
154 struct pipe_resource *index_buffer = NULL;
155 if (dinfo->index_size > 0) {
156 if (dinfo->has_user_indices) {
157 if (!util_upload_index_buffer(pctx, dinfo, &index_buffer, &index_offset, 4)) {
158 debug_printf("util_upload_index_buffer() failed\n");
159 return;
160 }
161 } else
162 index_buffer = dinfo->index.resource;
163 }
164
165 VkWriteDescriptorSet wds[PIPE_SHADER_TYPES * PIPE_MAX_CONSTANT_BUFFERS + PIPE_SHADER_TYPES * PIPE_MAX_SHADER_SAMPLER_VIEWS];
166 VkDescriptorBufferInfo buffer_infos[PIPE_SHADER_TYPES * PIPE_MAX_CONSTANT_BUFFERS];
167 VkDescriptorImageInfo image_infos[PIPE_SHADER_TYPES * PIPE_MAX_SHADER_SAMPLER_VIEWS];
168 int num_wds = 0, num_buffer_info = 0, num_image_info = 0;
169
170 struct zink_resource *transitions[PIPE_SHADER_TYPES * PIPE_MAX_SHADER_SAMPLER_VIEWS];
171 int num_transitions = 0;
172
173 for (int i = 0; i < ARRAY_SIZE(ctx->gfx_stages); i++) {
174 struct zink_shader *shader = ctx->gfx_stages[i];
175 if (!shader)
176 continue;
177
178 for (int j = 0; j < shader->num_bindings; j++) {
179 int index = shader->bindings[j].index;
180 if (shader->bindings[j].type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER) {
181 assert(ctx->ubos[i][index].buffer_size > 0);
182 assert(ctx->ubos[i][index].buffer_size <= screen->props.limits.maxUniformBufferRange);
183 assert(ctx->ubos[i][index].buffer);
184 struct zink_resource *res = zink_resource(ctx->ubos[i][index].buffer);
185 buffer_infos[num_buffer_info].buffer = res->buffer;
186 buffer_infos[num_buffer_info].offset = ctx->ubos[i][index].buffer_offset;
187 buffer_infos[num_buffer_info].range = ctx->ubos[i][index].buffer_size;
188 wds[num_wds].pBufferInfo = buffer_infos + num_buffer_info;
189 ++num_buffer_info;
190 } else {
191 struct pipe_sampler_view *psampler_view = ctx->image_views[i][index];
192 assert(psampler_view);
193 struct zink_sampler_view *sampler_view = zink_sampler_view(psampler_view);
194
195 struct zink_resource *res = zink_resource(psampler_view->texture);
196 VkImageLayout layout = res->layout;
197 if (layout != VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL &&
198 layout != VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL &&
199 layout != VK_IMAGE_LAYOUT_GENERAL) {
200 transitions[num_transitions++] = res;
201 layout = VK_IMAGE_LAYOUT_GENERAL;
202 }
203 image_infos[num_image_info].imageLayout = layout;
204 image_infos[num_image_info].imageView = sampler_view->image_view;
205 image_infos[num_image_info].sampler = ctx->samplers[i][index];
206 wds[num_wds].pImageInfo = image_infos + num_image_info;
207 ++num_image_info;
208 }
209
210 wds[num_wds].sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
211 wds[num_wds].pNext = NULL;
212 wds[num_wds].dstBinding = shader->bindings[j].binding;
213 wds[num_wds].dstArrayElement = 0;
214 wds[num_wds].descriptorCount = 1;
215 wds[num_wds].descriptorType = shader->bindings[j].type;
216 ++num_wds;
217 }
218 }
219
220 struct zink_batch *batch;
221 if (num_transitions > 0) {
222 batch = zink_batch_no_rp(ctx);
223
224 for (int i = 0; i < num_transitions; ++i)
225 zink_resource_barrier(batch->cmdbuf, transitions[i],
226 transitions[i]->aspect,
227 VK_IMAGE_LAYOUT_GENERAL);
228 }
229
230 batch = zink_batch_rp(ctx);
231
232 if (batch->descs_left < gfx_program->num_descriptors) {
233 ctx->base.flush(&ctx->base, NULL, 0);
234 batch = zink_batch_rp(ctx);
235 assert(batch->descs_left >= gfx_program->num_descriptors);
236 }
237
238 VkDescriptorSet desc_set = allocate_descriptor_set(screen, batch,
239 gfx_program);
240 assert(desc_set != VK_NULL_HANDLE);
241
242 for (int i = 0; i < ARRAY_SIZE(ctx->gfx_stages); i++) {
243 struct zink_shader *shader = ctx->gfx_stages[i];
244 if (!shader)
245 continue;
246
247 for (int j = 0; j < shader->num_bindings; j++) {
248 int index = shader->bindings[j].index;
249 if (shader->bindings[j].type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER) {
250 struct zink_resource *res = zink_resource(ctx->ubos[i][index].buffer);
251 zink_batch_reference_resoure(batch, res);
252 } else {
253 struct zink_sampler_view *sampler_view = zink_sampler_view(ctx->image_views[i][index]);
254 zink_batch_reference_sampler_view(batch, sampler_view);
255 }
256 }
257 }
258
259 vkCmdSetViewport(batch->cmdbuf, 0, ctx->num_viewports, ctx->viewports);
260 if (ctx->rast_state->base.scissor)
261 vkCmdSetScissor(batch->cmdbuf, 0, ctx->num_viewports, ctx->scissors);
262 else if (ctx->fb_state.width && ctx->fb_state.height) {
263 VkRect2D fb_scissor = {};
264 fb_scissor.extent.width = ctx->fb_state.width;
265 fb_scissor.extent.height = ctx->fb_state.height;
266 vkCmdSetScissor(batch->cmdbuf, 0, 1, &fb_scissor);
267 }
268
269 if (line_width_needed(reduced_prim, rast_state->hw_state.polygon_mode)) {
270 if (screen->feats.wideLines || ctx->line_width == 1.0f)
271 vkCmdSetLineWidth(batch->cmdbuf, ctx->line_width);
272 else
273 debug_printf("BUG: wide lines not supported, needs fallback!");
274 }
275
276 vkCmdSetStencilReference(batch->cmdbuf, VK_STENCIL_FACE_FRONT_BIT, ctx->stencil_ref.ref_value[0]);
277 vkCmdSetStencilReference(batch->cmdbuf, VK_STENCIL_FACE_BACK_BIT, ctx->stencil_ref.ref_value[1]);
278
279 if (depth_bias)
280 vkCmdSetDepthBias(batch->cmdbuf, rast_state->offset_units, rast_state->offset_clamp, rast_state->offset_scale);
281 else
282 vkCmdSetDepthBias(batch->cmdbuf, 0.0f, 0.0f, 0.0f);
283
284 if (ctx->gfx_pipeline_state.blend_state->need_blend_constants)
285 vkCmdSetBlendConstants(batch->cmdbuf, ctx->blend_constants);
286
287 if (num_wds > 0) {
288 for (int i = 0; i < num_wds; ++i)
289 wds[i].dstSet = desc_set;
290 vkUpdateDescriptorSets(screen->dev, num_wds, wds, 0, NULL);
291 }
292
293 vkCmdBindPipeline(batch->cmdbuf, VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline);
294 vkCmdBindDescriptorSets(batch->cmdbuf, VK_PIPELINE_BIND_POINT_GRAPHICS,
295 gfx_program->layout, 0, 1, &desc_set, 0, NULL);
296 zink_bind_vertex_buffers(batch, ctx);
297
298 if (dinfo->index_size > 0) {
299 assert(dinfo->index_size != 1);
300 VkIndexType index_type = dinfo->index_size == 2 ? VK_INDEX_TYPE_UINT16 : VK_INDEX_TYPE_UINT32;
301 struct zink_resource *res = zink_resource(index_buffer);
302 vkCmdBindIndexBuffer(batch->cmdbuf, res->buffer, index_offset, index_type);
303 zink_batch_reference_resoure(batch, res);
304 vkCmdDrawIndexed(batch->cmdbuf,
305 dinfo->count, dinfo->instance_count,
306 dinfo->start, dinfo->index_bias, dinfo->start_instance);
307 } else
308 vkCmdDraw(batch->cmdbuf, dinfo->count, dinfo->instance_count, dinfo->start, dinfo->start_instance);
309
310 if (dinfo->index_size > 0 && dinfo->has_user_indices)
311 pipe_resource_reference(&index_buffer, NULL);
312 }