vk/0.170.2: Make destructors return void
[mesa.git] / src / vulkan / anv_pipeline.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <assert.h>
25 #include <stdbool.h>
26 #include <string.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29
30 #include "anv_private.h"
31
32 // Shader functions
33
34 VkResult anv_CreateShaderModule(
35 VkDevice _device,
36 const VkShaderModuleCreateInfo* pCreateInfo,
37 VkShaderModule* pShaderModule)
38 {
39 ANV_FROM_HANDLE(anv_device, device, _device);
40 struct anv_shader_module *module;
41
42 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO);
43 assert(pCreateInfo->flags == 0);
44
45 module = anv_device_alloc(device, sizeof(*module) + pCreateInfo->codeSize, 8,
46 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
47 if (module == NULL)
48 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
49
50 module->nir = NULL;
51 module->size = pCreateInfo->codeSize;
52 memcpy(module->data, pCreateInfo->pCode, module->size);
53
54 *pShaderModule = anv_shader_module_to_handle(module);
55
56 return VK_SUCCESS;
57 }
58
59 void anv_DestroyShaderModule(
60 VkDevice _device,
61 VkShaderModule _module)
62 {
63 ANV_FROM_HANDLE(anv_device, device, _device);
64 ANV_FROM_HANDLE(anv_shader_module, module, _module);
65
66 anv_device_free(device, module);
67 }
68
69 VkResult anv_CreateShader(
70 VkDevice _device,
71 const VkShaderCreateInfo* pCreateInfo,
72 VkShader* pShader)
73 {
74 ANV_FROM_HANDLE(anv_device, device, _device);
75 ANV_FROM_HANDLE(anv_shader_module, module, pCreateInfo->module);
76 struct anv_shader *shader;
77
78 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SHADER_CREATE_INFO);
79 assert(pCreateInfo->flags == 0);
80
81 const char *name = pCreateInfo->pName ? pCreateInfo->pName : "main";
82 size_t name_len = strlen(name);
83
84 if (strcmp(name, "main") != 0) {
85 anv_finishme("Multiple shaders per module not really supported");
86 }
87
88 shader = anv_device_alloc(device, sizeof(*shader) + name_len + 1, 8,
89 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
90 if (shader == NULL)
91 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
92
93 shader->module = module;
94 memcpy(shader->entrypoint, name, name_len + 1);
95
96 *pShader = anv_shader_to_handle(shader);
97
98 return VK_SUCCESS;
99 }
100
101 void anv_DestroyShader(
102 VkDevice _device,
103 VkShader _shader)
104 {
105 ANV_FROM_HANDLE(anv_device, device, _device);
106 ANV_FROM_HANDLE(anv_shader, shader, _shader);
107
108 anv_device_free(device, shader);
109 }
110
111
112 VkResult anv_CreatePipelineCache(
113 VkDevice device,
114 const VkPipelineCacheCreateInfo* pCreateInfo,
115 VkPipelineCache* pPipelineCache)
116 {
117 pPipelineCache->handle = 1;
118
119 stub_return(VK_SUCCESS);
120 }
121
122 void anv_DestroyPipelineCache(
123 VkDevice _device,
124 VkPipelineCache _cache)
125 {
126 }
127
128 size_t anv_GetPipelineCacheSize(
129 VkDevice device,
130 VkPipelineCache pipelineCache)
131 {
132 stub_return(0);
133 }
134
135 VkResult anv_GetPipelineCacheData(
136 VkDevice device,
137 VkPipelineCache pipelineCache,
138 void* pData)
139 {
140 stub_return(VK_UNSUPPORTED);
141 }
142
143 VkResult anv_MergePipelineCaches(
144 VkDevice device,
145 VkPipelineCache destCache,
146 uint32_t srcCacheCount,
147 const VkPipelineCache* pSrcCaches)
148 {
149 stub_return(VK_UNSUPPORTED);
150 }
151
152 void anv_DestroyPipeline(
153 VkDevice _device,
154 VkPipeline _pipeline)
155 {
156 ANV_FROM_HANDLE(anv_device, device, _device);
157 ANV_FROM_HANDLE(anv_pipeline, pipeline, _pipeline);
158
159 anv_compiler_free(pipeline);
160 anv_reloc_list_finish(&pipeline->batch_relocs, pipeline->device);
161 anv_state_stream_finish(&pipeline->program_stream);
162 anv_state_pool_free(&device->dynamic_state_pool, pipeline->blend_state);
163 anv_device_free(pipeline->device, pipeline);
164 }
165
166 static const uint32_t vk_to_gen_primitive_type[] = {
167 [VK_PRIMITIVE_TOPOLOGY_POINT_LIST] = _3DPRIM_POINTLIST,
168 [VK_PRIMITIVE_TOPOLOGY_LINE_LIST] = _3DPRIM_LINELIST,
169 [VK_PRIMITIVE_TOPOLOGY_LINE_STRIP] = _3DPRIM_LINESTRIP,
170 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST] = _3DPRIM_TRILIST,
171 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP] = _3DPRIM_TRISTRIP,
172 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN] = _3DPRIM_TRIFAN,
173 [VK_PRIMITIVE_TOPOLOGY_LINE_LIST_ADJ] = _3DPRIM_LINELIST_ADJ,
174 [VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_ADJ] = _3DPRIM_LINESTRIP_ADJ,
175 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_ADJ] = _3DPRIM_TRILIST_ADJ,
176 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_ADJ] = _3DPRIM_TRISTRIP_ADJ,
177 [VK_PRIMITIVE_TOPOLOGY_PATCH] = _3DPRIM_PATCHLIST_1
178 };
179
180 VkResult
181 anv_pipeline_init(struct anv_pipeline *pipeline, struct anv_device *device,
182 const VkGraphicsPipelineCreateInfo *pCreateInfo,
183 const struct anv_graphics_pipeline_create_info *extra)
184 {
185 VkResult result;
186
187 pipeline->device = device;
188 pipeline->layout = anv_pipeline_layout_from_handle(pCreateInfo->layout);
189 memset(pipeline->shaders, 0, sizeof(pipeline->shaders));
190
191 result = anv_reloc_list_init(&pipeline->batch_relocs, device);
192 if (result != VK_SUCCESS) {
193 anv_device_free(device, pipeline);
194 return result;
195 }
196 pipeline->batch.next = pipeline->batch.start = pipeline->batch_data;
197 pipeline->batch.end = pipeline->batch.start + sizeof(pipeline->batch_data);
198 pipeline->batch.relocs = &pipeline->batch_relocs;
199
200 anv_state_stream_init(&pipeline->program_stream,
201 &device->instruction_block_pool);
202
203 for (uint32_t i = 0; i < pCreateInfo->stageCount; i++) {
204 pipeline->shaders[pCreateInfo->pStages[i].stage] =
205 anv_shader_from_handle(pCreateInfo->pStages[i].shader);
206 }
207
208 if (pCreateInfo->pTessellationState)
209 anv_finishme("VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_STATE_CREATE_INFO");
210 if (pCreateInfo->pViewportState)
211 anv_finishme("VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO");
212 if (pCreateInfo->pMultisampleState)
213 anv_finishme("VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO");
214
215 pipeline->use_repclear = extra && extra->use_repclear;
216
217 anv_compiler_run(device->compiler, pipeline);
218
219 const struct brw_wm_prog_data *wm_prog_data = &pipeline->wm_prog_data;
220
221 pipeline->ps_ksp2 = 0;
222 pipeline->ps_grf_start2 = 0;
223 if (pipeline->ps_simd8 != NO_KERNEL) {
224 pipeline->ps_ksp0 = pipeline->ps_simd8;
225 pipeline->ps_grf_start0 = wm_prog_data->base.dispatch_grf_start_reg;
226 if (pipeline->ps_simd16 != NO_KERNEL) {
227 pipeline->ps_ksp2 = pipeline->ps_simd16;
228 pipeline->ps_grf_start2 = wm_prog_data->dispatch_grf_start_reg_16;
229 }
230 } else if (pipeline->ps_simd16 != NO_KERNEL) {
231 pipeline->ps_ksp0 = pipeline->ps_simd16;
232 pipeline->ps_grf_start0 = wm_prog_data->dispatch_grf_start_reg_16;
233 } else {
234 unreachable("no ps shader");
235 }
236
237 const VkPipelineVertexInputStateCreateInfo *vi_info =
238 pCreateInfo->pVertexInputState;
239 pipeline->vb_used = 0;
240 for (uint32_t i = 0; i < vi_info->bindingCount; i++) {
241 const VkVertexInputBindingDescription *desc =
242 &vi_info->pVertexBindingDescriptions[i];
243
244 pipeline->vb_used |= 1 << desc->binding;
245 pipeline->binding_stride[desc->binding] = desc->strideInBytes;
246
247 /* Step rate is programmed per vertex element (attribute), not
248 * binding. Set up a map of which bindings step per instance, for
249 * reference by vertex element setup. */
250 switch (desc->stepRate) {
251 default:
252 case VK_VERTEX_INPUT_STEP_RATE_VERTEX:
253 pipeline->instancing_enable[desc->binding] = false;
254 break;
255 case VK_VERTEX_INPUT_STEP_RATE_INSTANCE:
256 pipeline->instancing_enable[desc->binding] = true;
257 break;
258 }
259 }
260
261 const VkPipelineInputAssemblyStateCreateInfo *ia_info =
262 pCreateInfo->pInputAssemblyState;
263 pipeline->primitive_restart = ia_info->primitiveRestartEnable;
264 pipeline->topology = vk_to_gen_primitive_type[ia_info->topology];
265
266 if (extra && extra->use_rectlist)
267 pipeline->topology = _3DPRIM_RECTLIST;
268
269 return VK_SUCCESS;
270 }
271
272 VkResult
273 anv_graphics_pipeline_create(
274 VkDevice _device,
275 const VkGraphicsPipelineCreateInfo *pCreateInfo,
276 const struct anv_graphics_pipeline_create_info *extra,
277 VkPipeline *pPipeline)
278 {
279 ANV_FROM_HANDLE(anv_device, device, _device);
280
281 switch (device->info.gen) {
282 case 7:
283 return gen7_graphics_pipeline_create(_device, pCreateInfo, extra, pPipeline);
284 case 8:
285 return gen8_graphics_pipeline_create(_device, pCreateInfo, extra, pPipeline);
286 default:
287 unreachable("unsupported gen\n");
288 }
289 }
290
291 VkResult anv_CreateGraphicsPipelines(
292 VkDevice _device,
293 VkPipelineCache pipelineCache,
294 uint32_t count,
295 const VkGraphicsPipelineCreateInfo* pCreateInfos,
296 VkPipeline* pPipelines)
297 {
298 VkResult result = VK_SUCCESS;
299
300 unsigned i = 0;
301 for (; i < count; i++) {
302 result = anv_graphics_pipeline_create(_device, &pCreateInfos[i],
303 NULL, &pPipelines[i]);
304 if (result != VK_SUCCESS) {
305 for (unsigned j = 0; j < i; j++) {
306 anv_DestroyPipeline(_device, pPipelines[j]);
307 }
308
309 return result;
310 }
311 }
312
313 return VK_SUCCESS;
314 }
315
316 static VkResult anv_compute_pipeline_create(
317 VkDevice _device,
318 const VkComputePipelineCreateInfo* pCreateInfo,
319 VkPipeline* pPipeline)
320 {
321 ANV_FROM_HANDLE(anv_device, device, _device);
322
323 switch (device->info.gen) {
324 case 7:
325 return gen7_compute_pipeline_create(_device, pCreateInfo, pPipeline);
326 case 8:
327 return gen8_compute_pipeline_create(_device, pCreateInfo, pPipeline);
328 default:
329 unreachable("unsupported gen\n");
330 }
331 }
332
333 VkResult anv_CreateComputePipelines(
334 VkDevice _device,
335 VkPipelineCache pipelineCache,
336 uint32_t count,
337 const VkComputePipelineCreateInfo* pCreateInfos,
338 VkPipeline* pPipelines)
339 {
340 VkResult result = VK_SUCCESS;
341
342 unsigned i = 0;
343 for (; i < count; i++) {
344 result = anv_compute_pipeline_create(_device, &pCreateInfos[i],
345 &pPipelines[i]);
346 if (result != VK_SUCCESS) {
347 for (unsigned j = 0; j < i; j++) {
348 anv_DestroyPipeline(_device, pPipelines[j]);
349 }
350
351 return result;
352 }
353 }
354
355 return VK_SUCCESS;
356 }
357
358 // Pipeline layout functions
359
360 VkResult anv_CreatePipelineLayout(
361 VkDevice _device,
362 const VkPipelineLayoutCreateInfo* pCreateInfo,
363 VkPipelineLayout* pPipelineLayout)
364 {
365 ANV_FROM_HANDLE(anv_device, device, _device);
366 struct anv_pipeline_layout *layout;
367
368 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO);
369
370 layout = anv_device_alloc(device, sizeof(*layout), 8,
371 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
372 if (layout == NULL)
373 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
374
375 layout->num_sets = pCreateInfo->descriptorSetCount;
376
377 uint32_t surface_start[VK_SHADER_STAGE_NUM] = { 0, };
378 uint32_t sampler_start[VK_SHADER_STAGE_NUM] = { 0, };
379
380 for (uint32_t s = 0; s < VK_SHADER_STAGE_NUM; s++) {
381 layout->stage[s].has_dynamic_offsets = false;
382 layout->stage[s].surface_count = 0;
383 layout->stage[s].sampler_count = 0;
384 }
385
386 uint32_t num_dynamic_offsets = 0;
387 for (uint32_t i = 0; i < pCreateInfo->descriptorSetCount; i++) {
388 ANV_FROM_HANDLE(anv_descriptor_set_layout, set_layout,
389 pCreateInfo->pSetLayouts[i]);
390
391 layout->set[i].layout = set_layout;
392 layout->set[i].dynamic_offset_start = num_dynamic_offsets;
393 num_dynamic_offsets += set_layout->num_dynamic_buffers;
394 for (uint32_t s = 0; s < VK_SHADER_STAGE_NUM; s++) {
395 if (set_layout->num_dynamic_buffers > 0)
396 layout->stage[s].has_dynamic_offsets = true;
397
398 layout->set[i].stage[s].surface_start = surface_start[s];
399 surface_start[s] += set_layout->stage[s].surface_count;
400 layout->set[i].stage[s].sampler_start = sampler_start[s];
401 sampler_start[s] += set_layout->stage[s].sampler_count;
402
403 layout->stage[s].surface_count += set_layout->stage[s].surface_count;
404 layout->stage[s].sampler_count += set_layout->stage[s].sampler_count;
405 }
406 }
407
408 *pPipelineLayout = anv_pipeline_layout_to_handle(layout);
409
410 return VK_SUCCESS;
411 }
412
413 void anv_DestroyPipelineLayout(
414 VkDevice _device,
415 VkPipelineLayout _pipelineLayout)
416 {
417 ANV_FROM_HANDLE(anv_device, device, _device);
418 ANV_FROM_HANDLE(anv_pipeline_layout, pipeline_layout, _pipelineLayout);
419
420 anv_device_free(device, pipeline_layout);
421 }