vk/pipeline: Be more sloppy about shader entrypoint names
[mesa.git] / src / vulkan / anv_pipeline.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <assert.h>
25 #include <stdbool.h>
26 #include <string.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29
30 #include "anv_private.h"
31
32 // Shader functions
33
34 VkResult anv_CreateShaderModule(
35 VkDevice _device,
36 const VkShaderModuleCreateInfo* pCreateInfo,
37 VkShaderModule* pShaderModule)
38 {
39 ANV_FROM_HANDLE(anv_device, device, _device);
40 struct anv_shader_module *module;
41
42 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO);
43 assert(pCreateInfo->flags == 0);
44
45 module = anv_device_alloc(device, sizeof(*module) + pCreateInfo->codeSize, 8,
46 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
47 if (module == NULL)
48 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
49
50 module->size = pCreateInfo->codeSize;
51 memcpy(module->data, pCreateInfo->pCode, module->size);
52
53 *pShaderModule = anv_shader_module_to_handle(module);
54
55 return VK_SUCCESS;
56 }
57
58 VkResult anv_DestroyShaderModule(
59 VkDevice _device,
60 VkShaderModule _module)
61 {
62 ANV_FROM_HANDLE(anv_device, device, _device);
63 ANV_FROM_HANDLE(anv_shader_module, module, _module);
64
65 anv_device_free(device, module);
66
67 return VK_SUCCESS;
68 }
69
70 VkResult anv_CreateShader(
71 VkDevice _device,
72 const VkShaderCreateInfo* pCreateInfo,
73 VkShader* pShader)
74 {
75 ANV_FROM_HANDLE(anv_device, device, _device);
76 ANV_FROM_HANDLE(anv_shader_module, module, pCreateInfo->module);
77 struct anv_shader *shader;
78
79 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SHADER_CREATE_INFO);
80 assert(pCreateInfo->flags == 0);
81
82 const char *name = pCreateInfo->pName ? pCreateInfo->pName : "main";
83 size_t name_len = strlen(name);
84
85 if (strcmp(name, "main") != 0) {
86 anv_finishme("Multiple shaders per module not really supported");
87 }
88
89 shader = anv_device_alloc(device, sizeof(*shader) + name_len + 1, 8,
90 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
91 if (shader == NULL)
92 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
93
94 shader->module = module;
95 memcpy(shader->entrypoint, name, name_len + 1);
96
97 *pShader = anv_shader_to_handle(shader);
98
99 return VK_SUCCESS;
100 }
101
102 VkResult anv_DestroyShader(
103 VkDevice _device,
104 VkShader _shader)
105 {
106 ANV_FROM_HANDLE(anv_device, device, _device);
107 ANV_FROM_HANDLE(anv_shader, shader, _shader);
108
109 anv_device_free(device, shader);
110
111 return VK_SUCCESS;
112 }
113
114
115 VkResult anv_CreatePipelineCache(
116 VkDevice device,
117 const VkPipelineCacheCreateInfo* pCreateInfo,
118 VkPipelineCache* pPipelineCache)
119 {
120 pPipelineCache->handle = 1;
121
122 stub_return(VK_SUCCESS);
123 }
124
125 VkResult anv_DestroyPipelineCache(
126 VkDevice _device,
127 VkPipelineCache _cache)
128 {
129 /* VkPipelineCache is a dummy object. */
130 return VK_SUCCESS;
131 }
132
133 size_t anv_GetPipelineCacheSize(
134 VkDevice device,
135 VkPipelineCache pipelineCache)
136 {
137 stub_return(0);
138 }
139
140 VkResult anv_GetPipelineCacheData(
141 VkDevice device,
142 VkPipelineCache pipelineCache,
143 void* pData)
144 {
145 stub_return(VK_UNSUPPORTED);
146 }
147
148 VkResult anv_MergePipelineCaches(
149 VkDevice device,
150 VkPipelineCache destCache,
151 uint32_t srcCacheCount,
152 const VkPipelineCache* pSrcCaches)
153 {
154 stub_return(VK_UNSUPPORTED);
155 }
156
157 // Pipeline functions
158
159 static void
160 emit_vertex_input(struct anv_pipeline *pipeline,
161 const VkPipelineVertexInputStateCreateInfo *info)
162 {
163 const uint32_t num_dwords = 1 + info->attributeCount * 2;
164 uint32_t *p;
165 bool instancing_enable[32];
166
167 pipeline->vb_used = 0;
168 for (uint32_t i = 0; i < info->bindingCount; i++) {
169 const VkVertexInputBindingDescription *desc =
170 &info->pVertexBindingDescriptions[i];
171
172 pipeline->vb_used |= 1 << desc->binding;
173 pipeline->binding_stride[desc->binding] = desc->strideInBytes;
174
175 /* Step rate is programmed per vertex element (attribute), not
176 * binding. Set up a map of which bindings step per instance, for
177 * reference by vertex element setup. */
178 switch (desc->stepRate) {
179 default:
180 case VK_VERTEX_INPUT_STEP_RATE_VERTEX:
181 instancing_enable[desc->binding] = false;
182 break;
183 case VK_VERTEX_INPUT_STEP_RATE_INSTANCE:
184 instancing_enable[desc->binding] = true;
185 break;
186 }
187 }
188
189 p = anv_batch_emitn(&pipeline->batch, num_dwords,
190 GEN8_3DSTATE_VERTEX_ELEMENTS);
191
192 for (uint32_t i = 0; i < info->attributeCount; i++) {
193 const VkVertexInputAttributeDescription *desc =
194 &info->pVertexAttributeDescriptions[i];
195 const struct anv_format *format = anv_format_for_vk_format(desc->format);
196
197 struct GEN8_VERTEX_ELEMENT_STATE element = {
198 .VertexBufferIndex = desc->binding,
199 .Valid = true,
200 .SourceElementFormat = format->surface_format,
201 .EdgeFlagEnable = false,
202 .SourceElementOffset = desc->offsetInBytes,
203 .Component0Control = VFCOMP_STORE_SRC,
204 .Component1Control = format->num_channels >= 2 ? VFCOMP_STORE_SRC : VFCOMP_STORE_0,
205 .Component2Control = format->num_channels >= 3 ? VFCOMP_STORE_SRC : VFCOMP_STORE_0,
206 .Component3Control = format->num_channels >= 4 ? VFCOMP_STORE_SRC : VFCOMP_STORE_1_FP
207 };
208 GEN8_VERTEX_ELEMENT_STATE_pack(NULL, &p[1 + i * 2], &element);
209
210 anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_VF_INSTANCING,
211 .InstancingEnable = instancing_enable[desc->binding],
212 .VertexElementIndex = i,
213 /* Vulkan so far doesn't have an instance divisor, so
214 * this is always 1 (ignored if not instancing). */
215 .InstanceDataStepRate = 1);
216 }
217
218 anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_VF_SGVS,
219 .VertexIDEnable = pipeline->vs_prog_data.uses_vertexid,
220 .VertexIDComponentNumber = 2,
221 .VertexIDElementOffset = info->bindingCount,
222 .InstanceIDEnable = pipeline->vs_prog_data.uses_instanceid,
223 .InstanceIDComponentNumber = 3,
224 .InstanceIDElementOffset = info->bindingCount);
225 }
226
227 static void
228 emit_ia_state(struct anv_pipeline *pipeline,
229 const VkPipelineInputAssemblyStateCreateInfo *info,
230 const struct anv_pipeline_create_info *extra)
231 {
232 static const uint32_t vk_to_gen_primitive_type[] = {
233 [VK_PRIMITIVE_TOPOLOGY_POINT_LIST] = _3DPRIM_POINTLIST,
234 [VK_PRIMITIVE_TOPOLOGY_LINE_LIST] = _3DPRIM_LINELIST,
235 [VK_PRIMITIVE_TOPOLOGY_LINE_STRIP] = _3DPRIM_LINESTRIP,
236 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST] = _3DPRIM_TRILIST,
237 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP] = _3DPRIM_TRISTRIP,
238 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN] = _3DPRIM_TRIFAN,
239 [VK_PRIMITIVE_TOPOLOGY_LINE_LIST_ADJ] = _3DPRIM_LINELIST_ADJ,
240 [VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_ADJ] = _3DPRIM_LINESTRIP_ADJ,
241 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_ADJ] = _3DPRIM_TRILIST_ADJ,
242 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_ADJ] = _3DPRIM_TRISTRIP_ADJ,
243 [VK_PRIMITIVE_TOPOLOGY_PATCH] = _3DPRIM_PATCHLIST_1
244 };
245 uint32_t topology = vk_to_gen_primitive_type[info->topology];
246
247 if (extra && extra->use_rectlist)
248 topology = _3DPRIM_RECTLIST;
249
250 struct GEN8_3DSTATE_VF vf = {
251 GEN8_3DSTATE_VF_header,
252 .IndexedDrawCutIndexEnable = info->primitiveRestartEnable,
253 };
254 GEN8_3DSTATE_VF_pack(NULL, pipeline->state_vf, &vf);
255
256 anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_VF_TOPOLOGY,
257 .PrimitiveTopologyType = topology);
258 }
259
260 static void
261 emit_rs_state(struct anv_pipeline *pipeline,
262 const VkPipelineRasterStateCreateInfo *info,
263 const struct anv_pipeline_create_info *extra)
264 {
265 static const uint32_t vk_to_gen_cullmode[] = {
266 [VK_CULL_MODE_NONE] = CULLMODE_NONE,
267 [VK_CULL_MODE_FRONT] = CULLMODE_FRONT,
268 [VK_CULL_MODE_BACK] = CULLMODE_BACK,
269 [VK_CULL_MODE_FRONT_AND_BACK] = CULLMODE_BOTH
270 };
271
272 static const uint32_t vk_to_gen_fillmode[] = {
273 [VK_FILL_MODE_POINTS] = RASTER_POINT,
274 [VK_FILL_MODE_WIREFRAME] = RASTER_WIREFRAME,
275 [VK_FILL_MODE_SOLID] = RASTER_SOLID
276 };
277
278 static const uint32_t vk_to_gen_front_face[] = {
279 [VK_FRONT_FACE_CCW] = CounterClockwise,
280 [VK_FRONT_FACE_CW] = Clockwise
281 };
282
283 struct GEN8_3DSTATE_SF sf = {
284 GEN8_3DSTATE_SF_header,
285 .ViewportTransformEnable = !(extra && extra->disable_viewport),
286 .TriangleStripListProvokingVertexSelect = 0,
287 .LineStripListProvokingVertexSelect = 0,
288 .TriangleFanProvokingVertexSelect = 0,
289 .PointWidthSource = pipeline->writes_point_size ? Vertex : State,
290 .PointWidth = 1.0,
291 };
292
293 /* FINISHME: VkBool32 rasterizerDiscardEnable; */
294
295 GEN8_3DSTATE_SF_pack(NULL, pipeline->state_sf, &sf);
296
297 struct GEN8_3DSTATE_RASTER raster = {
298 GEN8_3DSTATE_RASTER_header,
299 .FrontWinding = vk_to_gen_front_face[info->frontFace],
300 .CullMode = vk_to_gen_cullmode[info->cullMode],
301 .FrontFaceFillMode = vk_to_gen_fillmode[info->fillMode],
302 .BackFaceFillMode = vk_to_gen_fillmode[info->fillMode],
303 .ScissorRectangleEnable = !(extra && extra->disable_scissor),
304 .ViewportZClipTestEnable = info->depthClipEnable
305 };
306
307 GEN8_3DSTATE_RASTER_pack(NULL, pipeline->state_raster, &raster);
308
309 anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_SBE,
310 .ForceVertexURBEntryReadLength = false,
311 .ForceVertexURBEntryReadOffset = false,
312 .PointSpriteTextureCoordinateOrigin = UPPERLEFT,
313 .NumberofSFOutputAttributes =
314 pipeline->wm_prog_data.num_varying_inputs);
315
316 }
317
318 static void
319 emit_cb_state(struct anv_pipeline *pipeline,
320 const VkPipelineColorBlendStateCreateInfo *info)
321 {
322 struct anv_device *device = pipeline->device;
323
324 static const uint32_t vk_to_gen_logic_op[] = {
325 [VK_LOGIC_OP_COPY] = LOGICOP_COPY,
326 [VK_LOGIC_OP_CLEAR] = LOGICOP_CLEAR,
327 [VK_LOGIC_OP_AND] = LOGICOP_AND,
328 [VK_LOGIC_OP_AND_REVERSE] = LOGICOP_AND_REVERSE,
329 [VK_LOGIC_OP_AND_INVERTED] = LOGICOP_AND_INVERTED,
330 [VK_LOGIC_OP_NOOP] = LOGICOP_NOOP,
331 [VK_LOGIC_OP_XOR] = LOGICOP_XOR,
332 [VK_LOGIC_OP_OR] = LOGICOP_OR,
333 [VK_LOGIC_OP_NOR] = LOGICOP_NOR,
334 [VK_LOGIC_OP_EQUIV] = LOGICOP_EQUIV,
335 [VK_LOGIC_OP_INVERT] = LOGICOP_INVERT,
336 [VK_LOGIC_OP_OR_REVERSE] = LOGICOP_OR_REVERSE,
337 [VK_LOGIC_OP_COPY_INVERTED] = LOGICOP_COPY_INVERTED,
338 [VK_LOGIC_OP_OR_INVERTED] = LOGICOP_OR_INVERTED,
339 [VK_LOGIC_OP_NAND] = LOGICOP_NAND,
340 [VK_LOGIC_OP_SET] = LOGICOP_SET,
341 };
342
343 static const uint32_t vk_to_gen_blend[] = {
344 [VK_BLEND_ZERO] = BLENDFACTOR_ZERO,
345 [VK_BLEND_ONE] = BLENDFACTOR_ONE,
346 [VK_BLEND_SRC_COLOR] = BLENDFACTOR_SRC_COLOR,
347 [VK_BLEND_ONE_MINUS_SRC_COLOR] = BLENDFACTOR_INV_SRC_COLOR,
348 [VK_BLEND_DEST_COLOR] = BLENDFACTOR_DST_COLOR,
349 [VK_BLEND_ONE_MINUS_DEST_COLOR] = BLENDFACTOR_INV_DST_COLOR,
350 [VK_BLEND_SRC_ALPHA] = BLENDFACTOR_SRC_ALPHA,
351 [VK_BLEND_ONE_MINUS_SRC_ALPHA] = BLENDFACTOR_INV_SRC_ALPHA,
352 [VK_BLEND_DEST_ALPHA] = BLENDFACTOR_DST_ALPHA,
353 [VK_BLEND_ONE_MINUS_DEST_ALPHA] = BLENDFACTOR_INV_DST_ALPHA,
354 [VK_BLEND_CONSTANT_COLOR] = BLENDFACTOR_CONST_COLOR,
355 [VK_BLEND_ONE_MINUS_CONSTANT_COLOR] = BLENDFACTOR_INV_CONST_COLOR,
356 [VK_BLEND_CONSTANT_ALPHA] = BLENDFACTOR_CONST_ALPHA,
357 [VK_BLEND_ONE_MINUS_CONSTANT_ALPHA] = BLENDFACTOR_INV_CONST_ALPHA,
358 [VK_BLEND_SRC_ALPHA_SATURATE] = BLENDFACTOR_SRC_ALPHA_SATURATE,
359 [VK_BLEND_SRC1_COLOR] = BLENDFACTOR_SRC1_COLOR,
360 [VK_BLEND_ONE_MINUS_SRC1_COLOR] = BLENDFACTOR_INV_SRC1_COLOR,
361 [VK_BLEND_SRC1_ALPHA] = BLENDFACTOR_SRC1_ALPHA,
362 [VK_BLEND_ONE_MINUS_SRC1_ALPHA] = BLENDFACTOR_INV_SRC1_ALPHA,
363 };
364
365 static const uint32_t vk_to_gen_blend_op[] = {
366 [VK_BLEND_OP_ADD] = BLENDFUNCTION_ADD,
367 [VK_BLEND_OP_SUBTRACT] = BLENDFUNCTION_SUBTRACT,
368 [VK_BLEND_OP_REVERSE_SUBTRACT] = BLENDFUNCTION_REVERSE_SUBTRACT,
369 [VK_BLEND_OP_MIN] = BLENDFUNCTION_MIN,
370 [VK_BLEND_OP_MAX] = BLENDFUNCTION_MAX,
371 };
372
373 uint32_t num_dwords = 1 + info->attachmentCount * 2;
374 pipeline->blend_state =
375 anv_state_pool_alloc(&device->dynamic_state_pool, num_dwords * 4, 64);
376
377 struct GEN8_BLEND_STATE blend_state = {
378 .AlphaToCoverageEnable = info->alphaToCoverageEnable,
379 };
380
381 uint32_t *state = pipeline->blend_state.map;
382 GEN8_BLEND_STATE_pack(NULL, state, &blend_state);
383
384 for (uint32_t i = 0; i < info->attachmentCount; i++) {
385 const VkPipelineColorBlendAttachmentState *a = &info->pAttachments[i];
386
387 struct GEN8_BLEND_STATE_ENTRY entry = {
388 .LogicOpEnable = info->logicOpEnable,
389 .LogicOpFunction = vk_to_gen_logic_op[info->logicOp],
390 .ColorBufferBlendEnable = a->blendEnable,
391 .PreBlendSourceOnlyClampEnable = false,
392 .PreBlendColorClampEnable = false,
393 .PostBlendColorClampEnable = false,
394 .SourceBlendFactor = vk_to_gen_blend[a->srcBlendColor],
395 .DestinationBlendFactor = vk_to_gen_blend[a->destBlendColor],
396 .ColorBlendFunction = vk_to_gen_blend_op[a->blendOpColor],
397 .SourceAlphaBlendFactor = vk_to_gen_blend[a->srcBlendAlpha],
398 .DestinationAlphaBlendFactor = vk_to_gen_blend[a->destBlendAlpha],
399 .AlphaBlendFunction = vk_to_gen_blend_op[a->blendOpAlpha],
400 .WriteDisableAlpha = !(a->channelWriteMask & VK_CHANNEL_A_BIT),
401 .WriteDisableRed = !(a->channelWriteMask & VK_CHANNEL_R_BIT),
402 .WriteDisableGreen = !(a->channelWriteMask & VK_CHANNEL_G_BIT),
403 .WriteDisableBlue = !(a->channelWriteMask & VK_CHANNEL_B_BIT),
404 };
405
406 GEN8_BLEND_STATE_ENTRY_pack(NULL, state + i * 2 + 1, &entry);
407 }
408
409 anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_BLEND_STATE_POINTERS,
410 .BlendStatePointer = pipeline->blend_state.offset,
411 .BlendStatePointerValid = true);
412 }
413
414 static const uint32_t vk_to_gen_compare_op[] = {
415 [VK_COMPARE_OP_NEVER] = COMPAREFUNCTION_NEVER,
416 [VK_COMPARE_OP_LESS] = COMPAREFUNCTION_LESS,
417 [VK_COMPARE_OP_EQUAL] = COMPAREFUNCTION_EQUAL,
418 [VK_COMPARE_OP_LESS_EQUAL] = COMPAREFUNCTION_LEQUAL,
419 [VK_COMPARE_OP_GREATER] = COMPAREFUNCTION_GREATER,
420 [VK_COMPARE_OP_NOT_EQUAL] = COMPAREFUNCTION_NOTEQUAL,
421 [VK_COMPARE_OP_GREATER_EQUAL] = COMPAREFUNCTION_GEQUAL,
422 [VK_COMPARE_OP_ALWAYS] = COMPAREFUNCTION_ALWAYS,
423 };
424
425 static const uint32_t vk_to_gen_stencil_op[] = {
426 [VK_STENCIL_OP_KEEP] = 0,
427 [VK_STENCIL_OP_ZERO] = 0,
428 [VK_STENCIL_OP_REPLACE] = 0,
429 [VK_STENCIL_OP_INC_CLAMP] = 0,
430 [VK_STENCIL_OP_DEC_CLAMP] = 0,
431 [VK_STENCIL_OP_INVERT] = 0,
432 [VK_STENCIL_OP_INC_WRAP] = 0,
433 [VK_STENCIL_OP_DEC_WRAP] = 0
434 };
435
436 static void
437 emit_ds_state(struct anv_pipeline *pipeline,
438 const VkPipelineDepthStencilStateCreateInfo *info)
439 {
440 if (info == NULL) {
441 /* We're going to OR this together with the dynamic state. We need
442 * to make sure it's initialized to something useful.
443 */
444 memset(pipeline->state_wm_depth_stencil, 0,
445 sizeof(pipeline->state_wm_depth_stencil));
446 return;
447 }
448
449 /* VkBool32 depthBoundsEnable; // optional (depth_bounds_test) */
450
451 struct GEN8_3DSTATE_WM_DEPTH_STENCIL wm_depth_stencil = {
452 .DepthTestEnable = info->depthTestEnable,
453 .DepthBufferWriteEnable = info->depthWriteEnable,
454 .DepthTestFunction = vk_to_gen_compare_op[info->depthCompareOp],
455 .DoubleSidedStencilEnable = true,
456
457 .StencilTestEnable = info->stencilTestEnable,
458 .StencilFailOp = vk_to_gen_stencil_op[info->front.stencilFailOp],
459 .StencilPassDepthPassOp = vk_to_gen_stencil_op[info->front.stencilPassOp],
460 .StencilPassDepthFailOp = vk_to_gen_stencil_op[info->front.stencilDepthFailOp],
461 .StencilTestFunction = vk_to_gen_compare_op[info->front.stencilCompareOp],
462 .BackfaceStencilFailOp = vk_to_gen_stencil_op[info->back.stencilFailOp],
463 .BackfaceStencilPassDepthPassOp = vk_to_gen_stencil_op[info->back.stencilPassOp],
464 .BackfaceStencilPassDepthFailOp =vk_to_gen_stencil_op[info->back.stencilDepthFailOp],
465 .BackfaceStencilTestFunction = vk_to_gen_compare_op[info->back.stencilCompareOp],
466 };
467
468 GEN8_3DSTATE_WM_DEPTH_STENCIL_pack(NULL, pipeline->state_wm_depth_stencil, &wm_depth_stencil);
469 }
470
471 VkResult
472 anv_pipeline_create(
473 VkDevice _device,
474 const VkGraphicsPipelineCreateInfo* pCreateInfo,
475 const struct anv_pipeline_create_info * extra,
476 VkPipeline* pPipeline)
477 {
478 ANV_FROM_HANDLE(anv_device, device, _device);
479 struct anv_pipeline *pipeline;
480 VkResult result;
481 uint32_t offset, length;
482
483 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO);
484
485 pipeline = anv_device_alloc(device, sizeof(*pipeline), 8,
486 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
487 if (pipeline == NULL)
488 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
489
490 pipeline->device = device;
491 pipeline->layout = anv_pipeline_layout_from_handle(pCreateInfo->layout);
492 memset(pipeline->shaders, 0, sizeof(pipeline->shaders));
493
494 result = anv_reloc_list_init(&pipeline->batch.relocs, device);
495 if (result != VK_SUCCESS) {
496 anv_device_free(device, pipeline);
497 return result;
498 }
499 pipeline->batch.next = pipeline->batch.start = pipeline->batch_data;
500 pipeline->batch.end = pipeline->batch.start + sizeof(pipeline->batch_data);
501
502 anv_state_stream_init(&pipeline->program_stream,
503 &device->instruction_block_pool);
504
505 for (uint32_t i = 0; i < pCreateInfo->stageCount; i++) {
506 pipeline->shaders[pCreateInfo->pStages[i].stage] =
507 anv_shader_from_handle(pCreateInfo->pStages[i].shader);
508 }
509
510 if (pCreateInfo->pTessellationState)
511 anv_finishme("VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_STATE_CREATE_INFO");
512 if (pCreateInfo->pViewportState)
513 anv_finishme("VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO");
514 if (pCreateInfo->pMultisampleState)
515 anv_finishme("VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO");
516
517 pipeline->use_repclear = extra && extra->use_repclear;
518
519 anv_compiler_run(device->compiler, pipeline);
520
521 /* FIXME: The compiler dead-codes FS inputs when we don't have a VS, so we
522 * hard code this to num_attributes - 2. This is because the attributes
523 * include VUE header and position, which aren't counted as varying
524 * inputs. */
525 if (pipeline->vs_simd8 == NO_KERNEL) {
526 pipeline->wm_prog_data.num_varying_inputs =
527 pCreateInfo->pVertexInputState->attributeCount - 2;
528 }
529
530 assert(pCreateInfo->pVertexInputState);
531 emit_vertex_input(pipeline, pCreateInfo->pVertexInputState);
532 assert(pCreateInfo->pInputAssemblyState);
533 emit_ia_state(pipeline, pCreateInfo->pInputAssemblyState, extra);
534 assert(pCreateInfo->pRasterState);
535 emit_rs_state(pipeline, pCreateInfo->pRasterState, extra);
536 emit_ds_state(pipeline, pCreateInfo->pDepthStencilState);
537 emit_cb_state(pipeline, pCreateInfo->pColorBlendState);
538
539 anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_VF_STATISTICS,
540 .StatisticsEnable = true);
541 anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_HS, .Enable = false);
542 anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_TE, .TEEnable = false);
543 anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_DS, .FunctionEnable = false);
544 anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_STREAMOUT, .SOFunctionEnable = false);
545
546 anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_PUSH_CONSTANT_ALLOC_VS,
547 .ConstantBufferOffset = 0,
548 .ConstantBufferSize = 4);
549 anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_PUSH_CONSTANT_ALLOC_GS,
550 .ConstantBufferOffset = 4,
551 .ConstantBufferSize = 4);
552 anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_PUSH_CONSTANT_ALLOC_PS,
553 .ConstantBufferOffset = 8,
554 .ConstantBufferSize = 4);
555
556 anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_WM_CHROMAKEY,
557 .ChromaKeyKillEnable = false);
558 anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_SBE_SWIZ);
559 anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_AA_LINE_PARAMETERS);
560
561 anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_CLIP,
562 .ClipEnable = true,
563 .ViewportXYClipTestEnable = !(extra && extra->disable_viewport),
564 .MinimumPointWidth = 0.125,
565 .MaximumPointWidth = 255.875);
566
567 anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_WM,
568 .StatisticsEnable = true,
569 .LineEndCapAntialiasingRegionWidth = _05pixels,
570 .LineAntialiasingRegionWidth = _10pixels,
571 .EarlyDepthStencilControl = NORMAL,
572 .ForceThreadDispatchEnable = NORMAL,
573 .PointRasterizationRule = RASTRULE_UPPER_RIGHT,
574 .BarycentricInterpolationMode =
575 pipeline->wm_prog_data.barycentric_interp_modes);
576
577 uint32_t samples = 1;
578 uint32_t log2_samples = __builtin_ffs(samples) - 1;
579 bool enable_sampling = samples > 1 ? true : false;
580
581 anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_MULTISAMPLE,
582 .PixelPositionOffsetEnable = enable_sampling,
583 .PixelLocation = CENTER,
584 .NumberofMultisamples = log2_samples);
585
586 anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_SAMPLE_MASK,
587 .SampleMask = 0xffff);
588
589 anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_URB_VS,
590 .VSURBStartingAddress = pipeline->urb.vs_start,
591 .VSURBEntryAllocationSize = pipeline->urb.vs_size - 1,
592 .VSNumberofURBEntries = pipeline->urb.nr_vs_entries);
593
594 anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_URB_GS,
595 .GSURBStartingAddress = pipeline->urb.gs_start,
596 .GSURBEntryAllocationSize = pipeline->urb.gs_size - 1,
597 .GSNumberofURBEntries = pipeline->urb.nr_gs_entries);
598
599 anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_URB_HS,
600 .HSURBStartingAddress = pipeline->urb.vs_start,
601 .HSURBEntryAllocationSize = 0,
602 .HSNumberofURBEntries = 0);
603
604 anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_URB_DS,
605 .DSURBStartingAddress = pipeline->urb.vs_start,
606 .DSURBEntryAllocationSize = 0,
607 .DSNumberofURBEntries = 0);
608
609 const struct brw_gs_prog_data *gs_prog_data = &pipeline->gs_prog_data;
610 offset = 1;
611 length = (gs_prog_data->base.vue_map.num_slots + 1) / 2 - offset;
612
613 if (pipeline->gs_vec4 == NO_KERNEL)
614 anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_GS, .Enable = false);
615 else
616 anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_GS,
617 .SingleProgramFlow = false,
618 .KernelStartPointer = pipeline->gs_vec4,
619 .VectorMaskEnable = Vmask,
620 .SamplerCount = 0,
621 .BindingTableEntryCount = 0,
622 .ExpectedVertexCount = pipeline->gs_vertex_count,
623
624 .ScratchSpaceBasePointer = pipeline->scratch_start[VK_SHADER_STAGE_GEOMETRY],
625 .PerThreadScratchSpace = ffs(gs_prog_data->base.base.total_scratch / 2048),
626
627 .OutputVertexSize = gs_prog_data->output_vertex_size_hwords * 2 - 1,
628 .OutputTopology = gs_prog_data->output_topology,
629 .VertexURBEntryReadLength = gs_prog_data->base.urb_read_length,
630 .DispatchGRFStartRegisterForURBData =
631 gs_prog_data->base.base.dispatch_grf_start_reg,
632
633 .MaximumNumberofThreads = device->info.max_gs_threads,
634 .ControlDataHeaderSize = gs_prog_data->control_data_header_size_hwords,
635 //pipeline->gs_prog_data.dispatch_mode |
636 .StatisticsEnable = true,
637 .IncludePrimitiveID = gs_prog_data->include_primitive_id,
638 .ReorderMode = TRAILING,
639 .Enable = true,
640
641 .ControlDataFormat = gs_prog_data->control_data_format,
642
643 /* FIXME: mesa sets this based on ctx->Transform.ClipPlanesEnabled:
644 * UserClipDistanceClipTestEnableBitmask_3DSTATE_GS(v)
645 * UserClipDistanceCullTestEnableBitmask(v)
646 */
647
648 .VertexURBEntryOutputReadOffset = offset,
649 .VertexURBEntryOutputLength = length);
650
651 const struct brw_vue_prog_data *vue_prog_data = &pipeline->vs_prog_data.base;
652 /* Skip the VUE header and position slots */
653 offset = 1;
654 length = (vue_prog_data->vue_map.num_slots + 1) / 2 - offset;
655
656 if (pipeline->vs_simd8 == NO_KERNEL || (extra && extra->disable_vs))
657 anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_VS,
658 .FunctionEnable = false,
659 .VertexURBEntryOutputReadOffset = 1,
660 /* Even if VS is disabled, SBE still gets the amount of
661 * vertex data to read from this field. We use attribute
662 * count - 1, as we don't count the VUE header here. */
663 .VertexURBEntryOutputLength =
664 DIV_ROUND_UP(pCreateInfo->pVertexInputState->attributeCount - 1, 2));
665 else
666 anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_VS,
667 .KernelStartPointer = pipeline->vs_simd8,
668 .SingleVertexDispatch = Multiple,
669 .VectorMaskEnable = Dmask,
670 .SamplerCount = 0,
671 .BindingTableEntryCount =
672 vue_prog_data->base.binding_table.size_bytes / 4,
673 .ThreadDispatchPriority = Normal,
674 .FloatingPointMode = IEEE754,
675 .IllegalOpcodeExceptionEnable = false,
676 .AccessesUAV = false,
677 .SoftwareExceptionEnable = false,
678
679 .ScratchSpaceBasePointer = pipeline->scratch_start[VK_SHADER_STAGE_VERTEX],
680 .PerThreadScratchSpace = ffs(vue_prog_data->base.total_scratch / 2048),
681
682 .DispatchGRFStartRegisterForURBData =
683 vue_prog_data->base.dispatch_grf_start_reg,
684 .VertexURBEntryReadLength = vue_prog_data->urb_read_length,
685 .VertexURBEntryReadOffset = 0,
686
687 .MaximumNumberofThreads = device->info.max_vs_threads - 1,
688 .StatisticsEnable = false,
689 .SIMD8DispatchEnable = true,
690 .VertexCacheDisable = false,
691 .FunctionEnable = true,
692
693 .VertexURBEntryOutputReadOffset = offset,
694 .VertexURBEntryOutputLength = length,
695 .UserClipDistanceClipTestEnableBitmask = 0,
696 .UserClipDistanceCullTestEnableBitmask = 0);
697
698 const struct brw_wm_prog_data *wm_prog_data = &pipeline->wm_prog_data;
699 uint32_t ksp0, ksp2, grf_start0, grf_start2;
700
701 ksp2 = 0;
702 grf_start2 = 0;
703 if (pipeline->ps_simd8 != NO_KERNEL) {
704 ksp0 = pipeline->ps_simd8;
705 grf_start0 = wm_prog_data->base.dispatch_grf_start_reg;
706 if (pipeline->ps_simd16 != NO_KERNEL) {
707 ksp2 = pipeline->ps_simd16;
708 grf_start2 = wm_prog_data->dispatch_grf_start_reg_16;
709 }
710 } else if (pipeline->ps_simd16 != NO_KERNEL) {
711 ksp0 = pipeline->ps_simd16;
712 grf_start0 = wm_prog_data->dispatch_grf_start_reg_16;
713 } else {
714 unreachable("no ps shader");
715 }
716
717 anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_PS,
718 .KernelStartPointer0 = ksp0,
719
720 .SingleProgramFlow = false,
721 .VectorMaskEnable = true,
722 .SamplerCount = 1,
723
724 .ScratchSpaceBasePointer = pipeline->scratch_start[VK_SHADER_STAGE_FRAGMENT],
725 .PerThreadScratchSpace = ffs(wm_prog_data->base.total_scratch / 2048),
726
727 .MaximumNumberofThreadsPerPSD = 64 - 2,
728 .PositionXYOffsetSelect = wm_prog_data->uses_pos_offset ?
729 POSOFFSET_SAMPLE: POSOFFSET_NONE,
730 .PushConstantEnable = wm_prog_data->base.nr_params > 0,
731 ._8PixelDispatchEnable = pipeline->ps_simd8 != NO_KERNEL,
732 ._16PixelDispatchEnable = pipeline->ps_simd16 != NO_KERNEL,
733 ._32PixelDispatchEnable = false,
734
735 .DispatchGRFStartRegisterForConstantSetupData0 = grf_start0,
736 .DispatchGRFStartRegisterForConstantSetupData1 = 0,
737 .DispatchGRFStartRegisterForConstantSetupData2 = grf_start2,
738
739 .KernelStartPointer1 = 0,
740 .KernelStartPointer2 = ksp2);
741
742 bool per_sample_ps = false;
743 anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_PS_EXTRA,
744 .PixelShaderValid = true,
745 .PixelShaderKillsPixel = wm_prog_data->uses_kill,
746 .PixelShaderComputedDepthMode = wm_prog_data->computed_depth_mode,
747 .AttributeEnable = wm_prog_data->num_varying_inputs > 0,
748 .oMaskPresenttoRenderTarget = wm_prog_data->uses_omask,
749 .PixelShaderIsPerSample = per_sample_ps);
750
751 *pPipeline = anv_pipeline_to_handle(pipeline);
752
753 return VK_SUCCESS;
754 }
755
756 VkResult anv_DestroyPipeline(
757 VkDevice _device,
758 VkPipeline _pipeline)
759 {
760 ANV_FROM_HANDLE(anv_device, device, _device);
761 ANV_FROM_HANDLE(anv_pipeline, pipeline, _pipeline);
762
763 anv_compiler_free(pipeline);
764 anv_reloc_list_finish(&pipeline->batch.relocs, pipeline->device);
765 anv_state_stream_finish(&pipeline->program_stream);
766 anv_state_pool_free(&device->dynamic_state_pool, pipeline->blend_state);
767 anv_device_free(pipeline->device, pipeline);
768
769 return VK_SUCCESS;
770 }
771
772 VkResult anv_CreateGraphicsPipelines(
773 VkDevice _device,
774 VkPipelineCache pipelineCache,
775 uint32_t count,
776 const VkGraphicsPipelineCreateInfo* pCreateInfos,
777 VkPipeline* pPipelines)
778 {
779 VkResult result = VK_SUCCESS;
780
781 unsigned i = 0;
782 for (; i < count; i++) {
783 result = anv_pipeline_create(_device, &pCreateInfos[i],
784 NULL, &pPipelines[i]);
785 if (result != VK_SUCCESS) {
786 for (unsigned j = 0; j < i; j++) {
787 anv_DestroyPipeline(_device, pPipelines[j]);
788 }
789
790 return result;
791 }
792 }
793
794 return VK_SUCCESS;
795 }
796
797 static VkResult anv_compute_pipeline_create(
798 VkDevice _device,
799 const VkComputePipelineCreateInfo* pCreateInfo,
800 VkPipeline* pPipeline)
801 {
802 ANV_FROM_HANDLE(anv_device, device, _device);
803 struct anv_pipeline *pipeline;
804 VkResult result;
805
806 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO);
807
808 pipeline = anv_device_alloc(device, sizeof(*pipeline), 8,
809 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
810 if (pipeline == NULL)
811 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
812
813 pipeline->device = device;
814 pipeline->layout = anv_pipeline_layout_from_handle(pCreateInfo->layout);
815
816 result = anv_reloc_list_init(&pipeline->batch.relocs, device);
817 if (result != VK_SUCCESS) {
818 anv_device_free(device, pipeline);
819 return result;
820 }
821 pipeline->batch.next = pipeline->batch.start = pipeline->batch_data;
822 pipeline->batch.end = pipeline->batch.start + sizeof(pipeline->batch_data);
823
824 anv_state_stream_init(&pipeline->program_stream,
825 &device->instruction_block_pool);
826
827 memset(pipeline->shaders, 0, sizeof(pipeline->shaders));
828
829 pipeline->shaders[VK_SHADER_STAGE_COMPUTE] =
830 anv_shader_from_handle(pCreateInfo->cs.shader);
831
832 pipeline->use_repclear = false;
833
834 anv_compiler_run(device->compiler, pipeline);
835
836 const struct brw_cs_prog_data *cs_prog_data = &pipeline->cs_prog_data;
837
838 anv_batch_emit(&pipeline->batch, GEN8_MEDIA_VFE_STATE,
839 .ScratchSpaceBasePointer = pipeline->scratch_start[VK_SHADER_STAGE_FRAGMENT],
840 .PerThreadScratchSpace = ffs(cs_prog_data->base.total_scratch / 2048),
841 .ScratchSpaceBasePointerHigh = 0,
842 .StackSize = 0,
843
844 .MaximumNumberofThreads = device->info.max_cs_threads - 1,
845 .NumberofURBEntries = 2,
846 .ResetGatewayTimer = true,
847 .BypassGatewayControl = true,
848 .URBEntryAllocationSize = 2,
849 .CURBEAllocationSize = 0);
850
851 struct brw_cs_prog_data *prog_data = &pipeline->cs_prog_data;
852 uint32_t group_size = prog_data->local_size[0] *
853 prog_data->local_size[1] * prog_data->local_size[2];
854 pipeline->cs_thread_width_max = DIV_ROUND_UP(group_size, prog_data->simd_size);
855 uint32_t remainder = group_size & (prog_data->simd_size - 1);
856
857 if (remainder > 0)
858 pipeline->cs_right_mask = ~0u >> (32 - remainder);
859 else
860 pipeline->cs_right_mask = ~0u >> (32 - prog_data->simd_size);
861
862
863 *pPipeline = anv_pipeline_to_handle(pipeline);
864
865 return VK_SUCCESS;
866 }
867
868 VkResult anv_CreateComputePipelines(
869 VkDevice _device,
870 VkPipelineCache pipelineCache,
871 uint32_t count,
872 const VkComputePipelineCreateInfo* pCreateInfos,
873 VkPipeline* pPipelines)
874 {
875 VkResult result = VK_SUCCESS;
876
877 unsigned i = 0;
878 for (; i < count; i++) {
879 result = anv_compute_pipeline_create(_device, &pCreateInfos[i],
880 &pPipelines[i]);
881 if (result != VK_SUCCESS) {
882 for (unsigned j = 0; j < i; j++) {
883 anv_DestroyPipeline(_device, pPipelines[j]);
884 }
885
886 return result;
887 }
888 }
889
890 return VK_SUCCESS;
891 }
892
893 // Pipeline layout functions
894
895 VkResult anv_CreatePipelineLayout(
896 VkDevice _device,
897 const VkPipelineLayoutCreateInfo* pCreateInfo,
898 VkPipelineLayout* pPipelineLayout)
899 {
900 ANV_FROM_HANDLE(anv_device, device, _device);
901 struct anv_pipeline_layout *layout;
902
903 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO);
904
905 layout = anv_device_alloc(device, sizeof(*layout), 8,
906 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
907 if (layout == NULL)
908 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
909
910 layout->num_sets = pCreateInfo->descriptorSetCount;
911
912 uint32_t surface_start[VK_SHADER_STAGE_NUM] = { 0, };
913 uint32_t sampler_start[VK_SHADER_STAGE_NUM] = { 0, };
914
915 for (uint32_t s = 0; s < VK_SHADER_STAGE_NUM; s++) {
916 layout->stage[s].surface_count = 0;
917 layout->stage[s].sampler_count = 0;
918 }
919
920 for (uint32_t i = 0; i < pCreateInfo->descriptorSetCount; i++) {
921 ANV_FROM_HANDLE(anv_descriptor_set_layout, set_layout,
922 pCreateInfo->pSetLayouts[i]);
923
924 layout->set[i].layout = set_layout;
925 for (uint32_t s = 0; s < VK_SHADER_STAGE_NUM; s++) {
926 layout->set[i].surface_start[s] = surface_start[s];
927 surface_start[s] += set_layout->stage[s].surface_count;
928 layout->set[i].sampler_start[s] = sampler_start[s];
929 sampler_start[s] += set_layout->stage[s].sampler_count;
930
931 layout->stage[s].surface_count += set_layout->stage[s].surface_count;
932 layout->stage[s].sampler_count += set_layout->stage[s].sampler_count;
933 }
934 }
935
936 *pPipelineLayout = anv_pipeline_layout_to_handle(layout);
937
938 return VK_SUCCESS;
939 }
940
941 VkResult anv_DestroyPipelineLayout(
942 VkDevice _device,
943 VkPipelineLayout _pipelineLayout)
944 {
945 ANV_FROM_HANDLE(anv_device, device, _device);
946 ANV_FROM_HANDLE(anv_pipeline_layout, pipeline_layout, _pipelineLayout);
947
948 anv_device_free(device, pipeline_layout);
949
950 return VK_SUCCESS;
951 }