vk: Split the dynamic state binding function into one per state
[mesa.git] / src / vulkan / pipeline.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <assert.h>
25 #include <stdbool.h>
26 #include <string.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29
30 #include "private.h"
31
32 // Shader functions
33
34 VkResult anv_CreateShaderModule(
35 VkDevice _device,
36 const VkShaderModuleCreateInfo* pCreateInfo,
37 VkShader* pShaderModule)
38 {
39 ANV_FROM_HANDLE(anv_device, device, _device);
40 struct anv_shader_module *module;
41
42 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO);
43 assert(pCreateInfo->flags == 0);
44
45 module = anv_device_alloc(device, sizeof(*module) + pCreateInfo->codeSize, 8,
46 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
47 if (module == NULL)
48 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
49
50 module->size = pCreateInfo->codeSize;
51 memcpy(module->data, pCreateInfo->pCode, module->size);
52
53 *pShaderModule = anv_shader_module_to_handle(module);
54
55 return VK_SUCCESS;
56 }
57
58 VkResult anv_DestroyShaderModule(
59 VkDevice _device,
60 VkShaderModule _module)
61 {
62 ANV_FROM_HANDLE(anv_device, device, _device);
63 ANV_FROM_HANDLE(anv_shader_module, module, _module);
64
65 anv_device_free(device, module);
66
67 return VK_SUCCESS;
68 }
69
70 VkResult anv_CreateShader(
71 VkDevice _device,
72 const VkShaderCreateInfo* pCreateInfo,
73 VkShader* pShader)
74 {
75 ANV_FROM_HANDLE(anv_device, device, _device);
76 ANV_FROM_HANDLE(anv_shader_module, module, pCreateInfo->module);
77 struct anv_shader *shader;
78
79 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SHADER_CREATE_INFO);
80 assert(pCreateInfo->flags == 0);
81
82 size_t name_len = strlen(pCreateInfo->pName);
83
84 if (strcmp(pCreateInfo->pName, "main") != 0) {
85 anv_finishme("Multiple shaders per module not really supported");
86 }
87
88 shader = anv_device_alloc(device, sizeof(*shader) + name_len + 1, 8,
89 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
90 if (shader == NULL)
91 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
92
93 shader->module = module;
94 memcpy(shader->entrypoint, pCreateInfo->pName, name_len + 1);
95
96 *pShader = anv_shader_to_handle(shader);
97
98 return VK_SUCCESS;
99 }
100
101 VkResult anv_DestroyShader(
102 VkDevice _device,
103 VkShader _shader)
104 {
105 ANV_FROM_HANDLE(anv_device, device, _device);
106 ANV_FROM_HANDLE(anv_shader, shader, _shader);
107
108 anv_device_free(device, shader);
109
110 return VK_SUCCESS;
111 }
112
113
114 VkResult anv_CreatePipelineCache(
115 VkDevice device,
116 const VkPipelineCacheCreateInfo* pCreateInfo,
117 VkPipelineCache* pPipelineCache)
118 {
119 *pPipelineCache = 1;
120
121 stub_return(VK_SUCCESS);
122 }
123
124 VkResult anv_DestroyPipelineCache(
125 VkDevice _device,
126 VkPipelineCache _cache)
127 {
128 /* VkPipelineCache is a dummy object. */
129 return VK_SUCCESS;
130 }
131
132 size_t anv_GetPipelineCacheSize(
133 VkDevice device,
134 VkPipelineCache pipelineCache)
135 {
136 stub_return(0);
137 }
138
139 VkResult anv_GetPipelineCacheData(
140 VkDevice device,
141 VkPipelineCache pipelineCache,
142 void* pData)
143 {
144 stub_return(VK_UNSUPPORTED);
145 }
146
147 VkResult anv_MergePipelineCaches(
148 VkDevice device,
149 VkPipelineCache destCache,
150 uint32_t srcCacheCount,
151 const VkPipelineCache* pSrcCaches)
152 {
153 stub_return(VK_UNSUPPORTED);
154 }
155
156 // Pipeline functions
157
158 static void
159 emit_vertex_input(struct anv_pipeline *pipeline,
160 const VkPipelineVertexInputStateCreateInfo *info)
161 {
162 const uint32_t num_dwords = 1 + info->attributeCount * 2;
163 uint32_t *p;
164 bool instancing_enable[32];
165
166 pipeline->vb_used = 0;
167 for (uint32_t i = 0; i < info->bindingCount; i++) {
168 const VkVertexInputBindingDescription *desc =
169 &info->pVertexBindingDescriptions[i];
170
171 pipeline->vb_used |= 1 << desc->binding;
172 pipeline->binding_stride[desc->binding] = desc->strideInBytes;
173
174 /* Step rate is programmed per vertex element (attribute), not
175 * binding. Set up a map of which bindings step per instance, for
176 * reference by vertex element setup. */
177 switch (desc->stepRate) {
178 default:
179 case VK_VERTEX_INPUT_STEP_RATE_VERTEX:
180 instancing_enable[desc->binding] = false;
181 break;
182 case VK_VERTEX_INPUT_STEP_RATE_INSTANCE:
183 instancing_enable[desc->binding] = true;
184 break;
185 }
186 }
187
188 p = anv_batch_emitn(&pipeline->batch, num_dwords,
189 GEN8_3DSTATE_VERTEX_ELEMENTS);
190
191 for (uint32_t i = 0; i < info->attributeCount; i++) {
192 const VkVertexInputAttributeDescription *desc =
193 &info->pVertexAttributeDescriptions[i];
194 const struct anv_format *format = anv_format_for_vk_format(desc->format);
195
196 struct GEN8_VERTEX_ELEMENT_STATE element = {
197 .VertexBufferIndex = desc->binding,
198 .Valid = true,
199 .SourceElementFormat = format->surface_format,
200 .EdgeFlagEnable = false,
201 .SourceElementOffset = desc->offsetInBytes,
202 .Component0Control = VFCOMP_STORE_SRC,
203 .Component1Control = format->num_channels >= 2 ? VFCOMP_STORE_SRC : VFCOMP_STORE_0,
204 .Component2Control = format->num_channels >= 3 ? VFCOMP_STORE_SRC : VFCOMP_STORE_0,
205 .Component3Control = format->num_channels >= 4 ? VFCOMP_STORE_SRC : VFCOMP_STORE_1_FP
206 };
207 GEN8_VERTEX_ELEMENT_STATE_pack(NULL, &p[1 + i * 2], &element);
208
209 anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_VF_INSTANCING,
210 .InstancingEnable = instancing_enable[desc->binding],
211 .VertexElementIndex = i,
212 /* Vulkan so far doesn't have an instance divisor, so
213 * this is always 1 (ignored if not instancing). */
214 .InstanceDataStepRate = 1);
215 }
216
217 anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_VF_SGVS,
218 .VertexIDEnable = pipeline->vs_prog_data.uses_vertexid,
219 .VertexIDComponentNumber = 2,
220 .VertexIDElementOffset = info->bindingCount,
221 .InstanceIDEnable = pipeline->vs_prog_data.uses_instanceid,
222 .InstanceIDComponentNumber = 3,
223 .InstanceIDElementOffset = info->bindingCount);
224 }
225
226 static void
227 emit_ia_state(struct anv_pipeline *pipeline,
228 const VkPipelineIaStateCreateInfo *info,
229 const struct anv_pipeline_create_info *extra)
230 {
231 static const uint32_t vk_to_gen_primitive_type[] = {
232 [VK_PRIMITIVE_TOPOLOGY_POINT_LIST] = _3DPRIM_POINTLIST,
233 [VK_PRIMITIVE_TOPOLOGY_LINE_LIST] = _3DPRIM_LINELIST,
234 [VK_PRIMITIVE_TOPOLOGY_LINE_STRIP] = _3DPRIM_LINESTRIP,
235 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST] = _3DPRIM_TRILIST,
236 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP] = _3DPRIM_TRISTRIP,
237 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN] = _3DPRIM_TRIFAN,
238 [VK_PRIMITIVE_TOPOLOGY_LINE_LIST_ADJ] = _3DPRIM_LINELIST_ADJ,
239 [VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_ADJ] = _3DPRIM_LINESTRIP_ADJ,
240 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_ADJ] = _3DPRIM_TRILIST_ADJ,
241 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_ADJ] = _3DPRIM_TRISTRIP_ADJ,
242 [VK_PRIMITIVE_TOPOLOGY_PATCH] = _3DPRIM_PATCHLIST_1
243 };
244 uint32_t topology = vk_to_gen_primitive_type[info->topology];
245
246 if (extra && extra->use_rectlist)
247 topology = _3DPRIM_RECTLIST;
248
249 struct GEN8_3DSTATE_VF vf = {
250 GEN8_3DSTATE_VF_header,
251 .IndexedDrawCutIndexEnable = info->primitiveRestartEnable,
252 };
253 GEN8_3DSTATE_VF_pack(NULL, pipeline->state_vf, &vf);
254
255 anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_VF_TOPOLOGY,
256 .PrimitiveTopologyType = topology);
257 }
258
259 static void
260 emit_rs_state(struct anv_pipeline *pipeline,
261 const VkPipelineRsStateCreateInfo *info,
262 const struct anv_pipeline_create_info *extra)
263 {
264 static const uint32_t vk_to_gen_cullmode[] = {
265 [VK_CULL_MODE_NONE] = CULLMODE_NONE,
266 [VK_CULL_MODE_FRONT] = CULLMODE_FRONT,
267 [VK_CULL_MODE_BACK] = CULLMODE_BACK,
268 [VK_CULL_MODE_FRONT_AND_BACK] = CULLMODE_BOTH
269 };
270
271 static const uint32_t vk_to_gen_fillmode[] = {
272 [VK_FILL_MODE_POINTS] = RASTER_POINT,
273 [VK_FILL_MODE_WIREFRAME] = RASTER_WIREFRAME,
274 [VK_FILL_MODE_SOLID] = RASTER_SOLID
275 };
276
277 static const uint32_t vk_to_gen_front_face[] = {
278 [VK_FRONT_FACE_CCW] = CounterClockwise,
279 [VK_FRONT_FACE_CW] = Clockwise
280 };
281
282 struct GEN8_3DSTATE_SF sf = {
283 GEN8_3DSTATE_SF_header,
284 .ViewportTransformEnable = !(extra && extra->disable_viewport),
285 .TriangleStripListProvokingVertexSelect = 0,
286 .LineStripListProvokingVertexSelect = 0,
287 .TriangleFanProvokingVertexSelect = 0,
288 .PointWidthSource = pipeline->writes_point_size ? Vertex : State,
289 .PointWidth = 1.0,
290 };
291
292 /* FINISHME: VkBool32 rasterizerDiscardEnable; */
293
294 GEN8_3DSTATE_SF_pack(NULL, pipeline->state_sf, &sf);
295
296 struct GEN8_3DSTATE_RASTER raster = {
297 GEN8_3DSTATE_RASTER_header,
298 .FrontWinding = vk_to_gen_front_face[info->frontFace],
299 .CullMode = vk_to_gen_cullmode[info->cullMode],
300 .FrontFaceFillMode = vk_to_gen_fillmode[info->fillMode],
301 .BackFaceFillMode = vk_to_gen_fillmode[info->fillMode],
302 .ScissorRectangleEnable = !(extra && extra->disable_scissor),
303 .ViewportZClipTestEnable = info->depthClipEnable
304 };
305
306 GEN8_3DSTATE_RASTER_pack(NULL, pipeline->state_raster, &raster);
307
308 anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_SBE,
309 .ForceVertexURBEntryReadLength = false,
310 .ForceVertexURBEntryReadOffset = false,
311 .PointSpriteTextureCoordinateOrigin = UPPERLEFT,
312 .NumberofSFOutputAttributes =
313 pipeline->wm_prog_data.num_varying_inputs);
314
315 }
316
317 static void
318 emit_cb_state(struct anv_pipeline *pipeline,
319 const VkPipelineCbStateCreateInfo *info)
320 {
321 struct anv_device *device = pipeline->device;
322
323 static const uint32_t vk_to_gen_logic_op[] = {
324 [VK_LOGIC_OP_COPY] = LOGICOP_COPY,
325 [VK_LOGIC_OP_CLEAR] = LOGICOP_CLEAR,
326 [VK_LOGIC_OP_AND] = LOGICOP_AND,
327 [VK_LOGIC_OP_AND_REVERSE] = LOGICOP_AND_REVERSE,
328 [VK_LOGIC_OP_AND_INVERTED] = LOGICOP_AND_INVERTED,
329 [VK_LOGIC_OP_NOOP] = LOGICOP_NOOP,
330 [VK_LOGIC_OP_XOR] = LOGICOP_XOR,
331 [VK_LOGIC_OP_OR] = LOGICOP_OR,
332 [VK_LOGIC_OP_NOR] = LOGICOP_NOR,
333 [VK_LOGIC_OP_EQUIV] = LOGICOP_EQUIV,
334 [VK_LOGIC_OP_INVERT] = LOGICOP_INVERT,
335 [VK_LOGIC_OP_OR_REVERSE] = LOGICOP_OR_REVERSE,
336 [VK_LOGIC_OP_COPY_INVERTED] = LOGICOP_COPY_INVERTED,
337 [VK_LOGIC_OP_OR_INVERTED] = LOGICOP_OR_INVERTED,
338 [VK_LOGIC_OP_NAND] = LOGICOP_NAND,
339 [VK_LOGIC_OP_SET] = LOGICOP_SET,
340 };
341
342 static const uint32_t vk_to_gen_blend[] = {
343 [VK_BLEND_ZERO] = BLENDFACTOR_ZERO,
344 [VK_BLEND_ONE] = BLENDFACTOR_ONE,
345 [VK_BLEND_SRC_COLOR] = BLENDFACTOR_SRC_COLOR,
346 [VK_BLEND_ONE_MINUS_SRC_COLOR] = BLENDFACTOR_INV_SRC_COLOR,
347 [VK_BLEND_DEST_COLOR] = BLENDFACTOR_DST_COLOR,
348 [VK_BLEND_ONE_MINUS_DEST_COLOR] = BLENDFACTOR_INV_DST_COLOR,
349 [VK_BLEND_SRC_ALPHA] = BLENDFACTOR_SRC_ALPHA,
350 [VK_BLEND_ONE_MINUS_SRC_ALPHA] = BLENDFACTOR_INV_SRC_ALPHA,
351 [VK_BLEND_DEST_ALPHA] = BLENDFACTOR_DST_ALPHA,
352 [VK_BLEND_ONE_MINUS_DEST_ALPHA] = BLENDFACTOR_INV_DST_ALPHA,
353 [VK_BLEND_CONSTANT_COLOR] = BLENDFACTOR_CONST_COLOR,
354 [VK_BLEND_ONE_MINUS_CONSTANT_COLOR] = BLENDFACTOR_INV_CONST_COLOR,
355 [VK_BLEND_CONSTANT_ALPHA] = BLENDFACTOR_CONST_ALPHA,
356 [VK_BLEND_ONE_MINUS_CONSTANT_ALPHA] = BLENDFACTOR_INV_CONST_ALPHA,
357 [VK_BLEND_SRC_ALPHA_SATURATE] = BLENDFACTOR_SRC_ALPHA_SATURATE,
358 [VK_BLEND_SRC1_COLOR] = BLENDFACTOR_SRC1_COLOR,
359 [VK_BLEND_ONE_MINUS_SRC1_COLOR] = BLENDFACTOR_INV_SRC1_COLOR,
360 [VK_BLEND_SRC1_ALPHA] = BLENDFACTOR_SRC1_ALPHA,
361 [VK_BLEND_ONE_MINUS_SRC1_ALPHA] = BLENDFACTOR_INV_SRC1_ALPHA,
362 };
363
364 static const uint32_t vk_to_gen_blend_op[] = {
365 [VK_BLEND_OP_ADD] = BLENDFUNCTION_ADD,
366 [VK_BLEND_OP_SUBTRACT] = BLENDFUNCTION_SUBTRACT,
367 [VK_BLEND_OP_REVERSE_SUBTRACT] = BLENDFUNCTION_REVERSE_SUBTRACT,
368 [VK_BLEND_OP_MIN] = BLENDFUNCTION_MIN,
369 [VK_BLEND_OP_MAX] = BLENDFUNCTION_MAX,
370 };
371
372 uint32_t num_dwords = 1 + info->attachmentCount * 2;
373 pipeline->blend_state =
374 anv_state_pool_alloc(&device->dynamic_state_pool, num_dwords * 4, 64);
375
376 struct GEN8_BLEND_STATE blend_state = {
377 .AlphaToCoverageEnable = info->alphaToCoverageEnable,
378 };
379
380 uint32_t *state = pipeline->blend_state.map;
381 GEN8_BLEND_STATE_pack(NULL, state, &blend_state);
382
383 for (uint32_t i = 0; i < info->attachmentCount; i++) {
384 const VkPipelineCbAttachmentState *a = &info->pAttachments[i];
385
386 struct GEN8_BLEND_STATE_ENTRY entry = {
387 .LogicOpEnable = info->logicOpEnable,
388 .LogicOpFunction = vk_to_gen_logic_op[info->logicOp],
389 .ColorBufferBlendEnable = a->blendEnable,
390 .PreBlendSourceOnlyClampEnable = false,
391 .PreBlendColorClampEnable = false,
392 .PostBlendColorClampEnable = false,
393 .SourceBlendFactor = vk_to_gen_blend[a->srcBlendColor],
394 .DestinationBlendFactor = vk_to_gen_blend[a->destBlendColor],
395 .ColorBlendFunction = vk_to_gen_blend_op[a->blendOpColor],
396 .SourceAlphaBlendFactor = vk_to_gen_blend[a->srcBlendAlpha],
397 .DestinationAlphaBlendFactor = vk_to_gen_blend[a->destBlendAlpha],
398 .AlphaBlendFunction = vk_to_gen_blend_op[a->blendOpAlpha],
399 .WriteDisableAlpha = !(a->channelWriteMask & VK_CHANNEL_A_BIT),
400 .WriteDisableRed = !(a->channelWriteMask & VK_CHANNEL_R_BIT),
401 .WriteDisableGreen = !(a->channelWriteMask & VK_CHANNEL_G_BIT),
402 .WriteDisableBlue = !(a->channelWriteMask & VK_CHANNEL_B_BIT),
403 };
404
405 GEN8_BLEND_STATE_ENTRY_pack(NULL, state + i * 2 + 1, &entry);
406 }
407
408 anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_BLEND_STATE_POINTERS,
409 .BlendStatePointer = pipeline->blend_state.offset,
410 .BlendStatePointerValid = true);
411 }
412
413 static const uint32_t vk_to_gen_compare_op[] = {
414 [VK_COMPARE_OP_NEVER] = COMPAREFUNCTION_NEVER,
415 [VK_COMPARE_OP_LESS] = COMPAREFUNCTION_LESS,
416 [VK_COMPARE_OP_EQUAL] = COMPAREFUNCTION_EQUAL,
417 [VK_COMPARE_OP_LESS_EQUAL] = COMPAREFUNCTION_LEQUAL,
418 [VK_COMPARE_OP_GREATER] = COMPAREFUNCTION_GREATER,
419 [VK_COMPARE_OP_NOT_EQUAL] = COMPAREFUNCTION_NOTEQUAL,
420 [VK_COMPARE_OP_GREATER_EQUAL] = COMPAREFUNCTION_GEQUAL,
421 [VK_COMPARE_OP_ALWAYS] = COMPAREFUNCTION_ALWAYS,
422 };
423
424 static const uint32_t vk_to_gen_stencil_op[] = {
425 [VK_STENCIL_OP_KEEP] = 0,
426 [VK_STENCIL_OP_ZERO] = 0,
427 [VK_STENCIL_OP_REPLACE] = 0,
428 [VK_STENCIL_OP_INC_CLAMP] = 0,
429 [VK_STENCIL_OP_DEC_CLAMP] = 0,
430 [VK_STENCIL_OP_INVERT] = 0,
431 [VK_STENCIL_OP_INC_WRAP] = 0,
432 [VK_STENCIL_OP_DEC_WRAP] = 0
433 };
434
435 static void
436 emit_ds_state(struct anv_pipeline *pipeline,
437 const VkPipelineDsStateCreateInfo *info)
438 {
439 if (info == NULL) {
440 /* We're going to OR this together with the dynamic state. We need
441 * to make sure it's initialized to something useful.
442 */
443 memset(pipeline->state_wm_depth_stencil, 0,
444 sizeof(pipeline->state_wm_depth_stencil));
445 return;
446 }
447
448 /* VkBool32 depthBoundsEnable; // optional (depth_bounds_test) */
449
450 struct GEN8_3DSTATE_WM_DEPTH_STENCIL wm_depth_stencil = {
451 .DepthTestEnable = info->depthTestEnable,
452 .DepthBufferWriteEnable = info->depthWriteEnable,
453 .DepthTestFunction = vk_to_gen_compare_op[info->depthCompareOp],
454 .DoubleSidedStencilEnable = true,
455
456 .StencilTestEnable = info->stencilTestEnable,
457 .StencilFailOp = vk_to_gen_stencil_op[info->front.stencilFailOp],
458 .StencilPassDepthPassOp = vk_to_gen_stencil_op[info->front.stencilPassOp],
459 .StencilPassDepthFailOp = vk_to_gen_stencil_op[info->front.stencilDepthFailOp],
460 .StencilTestFunction = vk_to_gen_compare_op[info->front.stencilCompareOp],
461 .BackfaceStencilFailOp = vk_to_gen_stencil_op[info->back.stencilFailOp],
462 .BackfaceStencilPassDepthPassOp = vk_to_gen_stencil_op[info->back.stencilPassOp],
463 .BackfaceStencilPassDepthFailOp =vk_to_gen_stencil_op[info->back.stencilDepthFailOp],
464 .BackfaceStencilTestFunction = vk_to_gen_compare_op[info->back.stencilCompareOp],
465 };
466
467 GEN8_3DSTATE_WM_DEPTH_STENCIL_pack(NULL, pipeline->state_wm_depth_stencil, &wm_depth_stencil);
468 }
469
470 static void
471 anv_pipeline_destroy(struct anv_device *device,
472 struct anv_object *object,
473 VkObjectType obj_type)
474 {
475 struct anv_pipeline *pipeline = (struct anv_pipeline*) object;
476
477 assert(obj_type == VK_OBJECT_TYPE_PIPELINE);
478
479 anv_DestroyPipeline(anv_device_to_handle(device),
480 anv_pipeline_to_handle(pipeline));
481 }
482
483 VkResult
484 anv_pipeline_create(
485 VkDevice _device,
486 const VkGraphicsPipelineCreateInfo* pCreateInfo,
487 const struct anv_pipeline_create_info * extra,
488 VkPipeline* pPipeline)
489 {
490 ANV_FROM_HANDLE(anv_device, device, _device);
491 struct anv_pipeline *pipeline;
492 VkResult result;
493 uint32_t offset, length;
494
495 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO);
496
497 pipeline = anv_device_alloc(device, sizeof(*pipeline), 8,
498 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
499 if (pipeline == NULL)
500 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
501
502 pipeline->base.destructor = anv_pipeline_destroy;
503 pipeline->device = device;
504 pipeline->layout = anv_pipeline_layout_from_handle(pCreateInfo->layout);
505 memset(pipeline->shaders, 0, sizeof(pipeline->shaders));
506
507 result = anv_reloc_list_init(&pipeline->batch.relocs, device);
508 if (result != VK_SUCCESS) {
509 anv_device_free(device, pipeline);
510 return result;
511 }
512 pipeline->batch.next = pipeline->batch.start = pipeline->batch_data;
513 pipeline->batch.end = pipeline->batch.start + sizeof(pipeline->batch_data);
514
515 anv_state_stream_init(&pipeline->program_stream,
516 &device->instruction_block_pool);
517
518 for (uint32_t i = 0; i < pCreateInfo->stageCount; i++) {
519 pipeline->shaders[pCreateInfo->pStages[i].stage] =
520 anv_shader_from_handle(pCreateInfo->pStages[i].shader);
521 }
522
523 if (pCreateInfo->pTessState)
524 anv_finishme("VK_STRUCTURE_TYPE_PIPELINE_TESS_STATE_CREATE_INFO");
525 if (pCreateInfo->pVpState)
526 anv_finishme("VK_STRUCTURE_TYPE_PIPELINE_VP_STATE_CREATE_INFO");
527 if (pCreateInfo->pMsState)
528 anv_finishme("VK_STRUCTURE_TYPE_PIPELINE_MS_STATE_CREATE_INFO");
529
530 pipeline->use_repclear = extra && extra->use_repclear;
531
532 anv_compiler_run(device->compiler, pipeline);
533
534 /* FIXME: The compiler dead-codes FS inputs when we don't have a VS, so we
535 * hard code this to num_attributes - 2. This is because the attributes
536 * include VUE header and position, which aren't counted as varying
537 * inputs. */
538 if (pipeline->vs_simd8 == NO_KERNEL) {
539 pipeline->wm_prog_data.num_varying_inputs =
540 pCreateInfo->pVertexInputState->attributeCount - 2;
541 }
542
543 assert(pCreateInfo->pVertexInputState);
544 emit_vertex_input(pipeline, pCreateInfo->pVertexInputState);
545 assert(pCreateInfo->pIaState);
546 emit_ia_state(pipeline, pCreateInfo->pIaState, extra);
547 assert(pCreateInfo->pRsState);
548 emit_rs_state(pipeline, pCreateInfo->pRsState, extra);
549 emit_ds_state(pipeline, pCreateInfo->pDsState);
550 emit_cb_state(pipeline, pCreateInfo->pCbState);
551
552 anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_VF_STATISTICS,
553 .StatisticsEnable = true);
554 anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_HS, .Enable = false);
555 anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_TE, .TEEnable = false);
556 anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_DS, .FunctionEnable = false);
557 anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_STREAMOUT, .SOFunctionEnable = false);
558
559 anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_PUSH_CONSTANT_ALLOC_VS,
560 .ConstantBufferOffset = 0,
561 .ConstantBufferSize = 4);
562 anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_PUSH_CONSTANT_ALLOC_GS,
563 .ConstantBufferOffset = 4,
564 .ConstantBufferSize = 4);
565 anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_PUSH_CONSTANT_ALLOC_PS,
566 .ConstantBufferOffset = 8,
567 .ConstantBufferSize = 4);
568
569 anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_WM_CHROMAKEY,
570 .ChromaKeyKillEnable = false);
571 anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_SBE_SWIZ);
572 anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_AA_LINE_PARAMETERS);
573
574 anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_CLIP,
575 .ClipEnable = true,
576 .ViewportXYClipTestEnable = !(extra && extra->disable_viewport),
577 .MinimumPointWidth = 0.125,
578 .MaximumPointWidth = 255.875);
579
580 anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_WM,
581 .StatisticsEnable = true,
582 .LineEndCapAntialiasingRegionWidth = _05pixels,
583 .LineAntialiasingRegionWidth = _10pixels,
584 .EarlyDepthStencilControl = NORMAL,
585 .ForceThreadDispatchEnable = NORMAL,
586 .PointRasterizationRule = RASTRULE_UPPER_RIGHT,
587 .BarycentricInterpolationMode =
588 pipeline->wm_prog_data.barycentric_interp_modes);
589
590 uint32_t samples = 1;
591 uint32_t log2_samples = __builtin_ffs(samples) - 1;
592 bool enable_sampling = samples > 1 ? true : false;
593
594 anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_MULTISAMPLE,
595 .PixelPositionOffsetEnable = enable_sampling,
596 .PixelLocation = CENTER,
597 .NumberofMultisamples = log2_samples);
598
599 anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_SAMPLE_MASK,
600 .SampleMask = 0xffff);
601
602 anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_URB_VS,
603 .VSURBStartingAddress = pipeline->urb.vs_start,
604 .VSURBEntryAllocationSize = pipeline->urb.vs_size - 1,
605 .VSNumberofURBEntries = pipeline->urb.nr_vs_entries);
606
607 anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_URB_GS,
608 .GSURBStartingAddress = pipeline->urb.gs_start,
609 .GSURBEntryAllocationSize = pipeline->urb.gs_size - 1,
610 .GSNumberofURBEntries = pipeline->urb.nr_gs_entries);
611
612 anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_URB_HS,
613 .HSURBStartingAddress = pipeline->urb.vs_start,
614 .HSURBEntryAllocationSize = 0,
615 .HSNumberofURBEntries = 0);
616
617 anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_URB_DS,
618 .DSURBStartingAddress = pipeline->urb.vs_start,
619 .DSURBEntryAllocationSize = 0,
620 .DSNumberofURBEntries = 0);
621
622 const struct brw_gs_prog_data *gs_prog_data = &pipeline->gs_prog_data;
623 offset = 1;
624 length = (gs_prog_data->base.vue_map.num_slots + 1) / 2 - offset;
625
626 if (pipeline->gs_vec4 == NO_KERNEL)
627 anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_GS, .Enable = false);
628 else
629 anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_GS,
630 .SingleProgramFlow = false,
631 .KernelStartPointer = pipeline->gs_vec4,
632 .VectorMaskEnable = Vmask,
633 .SamplerCount = 0,
634 .BindingTableEntryCount = 0,
635 .ExpectedVertexCount = pipeline->gs_vertex_count,
636
637 .ScratchSpaceBasePointer = pipeline->scratch_start[VK_SHADER_STAGE_GEOMETRY],
638 .PerThreadScratchSpace = ffs(gs_prog_data->base.base.total_scratch / 2048),
639
640 .OutputVertexSize = gs_prog_data->output_vertex_size_hwords * 2 - 1,
641 .OutputTopology = gs_prog_data->output_topology,
642 .VertexURBEntryReadLength = gs_prog_data->base.urb_read_length,
643 .DispatchGRFStartRegisterForURBData =
644 gs_prog_data->base.base.dispatch_grf_start_reg,
645
646 .MaximumNumberofThreads = device->info.max_gs_threads,
647 .ControlDataHeaderSize = gs_prog_data->control_data_header_size_hwords,
648 //pipeline->gs_prog_data.dispatch_mode |
649 .StatisticsEnable = true,
650 .IncludePrimitiveID = gs_prog_data->include_primitive_id,
651 .ReorderMode = TRAILING,
652 .Enable = true,
653
654 .ControlDataFormat = gs_prog_data->control_data_format,
655
656 /* FIXME: mesa sets this based on ctx->Transform.ClipPlanesEnabled:
657 * UserClipDistanceClipTestEnableBitmask_3DSTATE_GS(v)
658 * UserClipDistanceCullTestEnableBitmask(v)
659 */
660
661 .VertexURBEntryOutputReadOffset = offset,
662 .VertexURBEntryOutputLength = length);
663
664 const struct brw_vue_prog_data *vue_prog_data = &pipeline->vs_prog_data.base;
665 /* Skip the VUE header and position slots */
666 offset = 1;
667 length = (vue_prog_data->vue_map.num_slots + 1) / 2 - offset;
668
669 if (pipeline->vs_simd8 == NO_KERNEL || (extra && extra->disable_vs))
670 anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_VS,
671 .FunctionEnable = false,
672 .VertexURBEntryOutputReadOffset = 1,
673 /* Even if VS is disabled, SBE still gets the amount of
674 * vertex data to read from this field. We use attribute
675 * count - 1, as we don't count the VUE header here. */
676 .VertexURBEntryOutputLength =
677 DIV_ROUND_UP(pCreateInfo->pVertexInputState->attributeCount - 1, 2));
678 else
679 anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_VS,
680 .KernelStartPointer = pipeline->vs_simd8,
681 .SingleVertexDispatch = Multiple,
682 .VectorMaskEnable = Dmask,
683 .SamplerCount = 0,
684 .BindingTableEntryCount =
685 vue_prog_data->base.binding_table.size_bytes / 4,
686 .ThreadDispatchPriority = Normal,
687 .FloatingPointMode = IEEE754,
688 .IllegalOpcodeExceptionEnable = false,
689 .AccessesUAV = false,
690 .SoftwareExceptionEnable = false,
691
692 .ScratchSpaceBasePointer = pipeline->scratch_start[VK_SHADER_STAGE_VERTEX],
693 .PerThreadScratchSpace = ffs(vue_prog_data->base.total_scratch / 2048),
694
695 .DispatchGRFStartRegisterForURBData =
696 vue_prog_data->base.dispatch_grf_start_reg,
697 .VertexURBEntryReadLength = vue_prog_data->urb_read_length,
698 .VertexURBEntryReadOffset = 0,
699
700 .MaximumNumberofThreads = device->info.max_vs_threads - 1,
701 .StatisticsEnable = false,
702 .SIMD8DispatchEnable = true,
703 .VertexCacheDisable = false,
704 .FunctionEnable = true,
705
706 .VertexURBEntryOutputReadOffset = offset,
707 .VertexURBEntryOutputLength = length,
708 .UserClipDistanceClipTestEnableBitmask = 0,
709 .UserClipDistanceCullTestEnableBitmask = 0);
710
711 const struct brw_wm_prog_data *wm_prog_data = &pipeline->wm_prog_data;
712 uint32_t ksp0, ksp2, grf_start0, grf_start2;
713
714 ksp2 = 0;
715 grf_start2 = 0;
716 if (pipeline->ps_simd8 != NO_KERNEL) {
717 ksp0 = pipeline->ps_simd8;
718 grf_start0 = wm_prog_data->base.dispatch_grf_start_reg;
719 if (pipeline->ps_simd16 != NO_KERNEL) {
720 ksp2 = pipeline->ps_simd16;
721 grf_start2 = wm_prog_data->dispatch_grf_start_reg_16;
722 }
723 } else if (pipeline->ps_simd16 != NO_KERNEL) {
724 ksp0 = pipeline->ps_simd16;
725 grf_start0 = wm_prog_data->dispatch_grf_start_reg_16;
726 } else {
727 unreachable("no ps shader");
728 }
729
730 anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_PS,
731 .KernelStartPointer0 = ksp0,
732
733 .SingleProgramFlow = false,
734 .VectorMaskEnable = true,
735 .SamplerCount = 1,
736
737 .ScratchSpaceBasePointer = pipeline->scratch_start[VK_SHADER_STAGE_FRAGMENT],
738 .PerThreadScratchSpace = ffs(wm_prog_data->base.total_scratch / 2048),
739
740 .MaximumNumberofThreadsPerPSD = 64 - 2,
741 .PositionXYOffsetSelect = wm_prog_data->uses_pos_offset ?
742 POSOFFSET_SAMPLE: POSOFFSET_NONE,
743 .PushConstantEnable = wm_prog_data->base.nr_params > 0,
744 ._8PixelDispatchEnable = pipeline->ps_simd8 != NO_KERNEL,
745 ._16PixelDispatchEnable = pipeline->ps_simd16 != NO_KERNEL,
746 ._32PixelDispatchEnable = false,
747
748 .DispatchGRFStartRegisterForConstantSetupData0 = grf_start0,
749 .DispatchGRFStartRegisterForConstantSetupData1 = 0,
750 .DispatchGRFStartRegisterForConstantSetupData2 = grf_start2,
751
752 .KernelStartPointer1 = 0,
753 .KernelStartPointer2 = ksp2);
754
755 bool per_sample_ps = false;
756 anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_PS_EXTRA,
757 .PixelShaderValid = true,
758 .PixelShaderKillsPixel = wm_prog_data->uses_kill,
759 .PixelShaderComputedDepthMode = wm_prog_data->computed_depth_mode,
760 .AttributeEnable = wm_prog_data->num_varying_inputs > 0,
761 .oMaskPresenttoRenderTarget = wm_prog_data->uses_omask,
762 .PixelShaderIsPerSample = per_sample_ps);
763
764 *pPipeline = anv_pipeline_to_handle(pipeline);
765
766 return VK_SUCCESS;
767 }
768
769 VkResult anv_DestroyPipeline(
770 VkDevice _device,
771 VkPipeline _pipeline)
772 {
773 ANV_FROM_HANDLE(anv_device, device, _device);
774 ANV_FROM_HANDLE(anv_pipeline, pipeline, _pipeline);
775
776 anv_compiler_free(pipeline);
777 anv_reloc_list_finish(&pipeline->batch.relocs, pipeline->device);
778 anv_state_stream_finish(&pipeline->program_stream);
779 anv_state_pool_free(&device->dynamic_state_pool, pipeline->blend_state);
780 anv_device_free(pipeline->device, pipeline);
781
782 return VK_SUCCESS;
783 }
784
785 VkResult anv_CreateGraphicsPipelines(
786 VkDevice _device,
787 VkPipelineCache pipelineCache,
788 uint32_t count,
789 const VkGraphicsPipelineCreateInfo* pCreateInfos,
790 VkPipeline* pPipelines)
791 {
792 ANV_FROM_HANDLE(anv_device, device, _device);
793 VkResult result = VK_SUCCESS;
794
795 unsigned i = 0;
796 for (; i < count; i++) {
797 result = anv_pipeline_create(_device, &pCreateInfos[i],
798 NULL, &pPipelines[i]);
799 if (result != VK_SUCCESS) {
800 for (unsigned j = 0; j < i; j++) {
801 anv_pipeline_destroy(device, (struct anv_object *)pPipelines[j],
802 VK_OBJECT_TYPE_PIPELINE);
803 }
804
805 return result;
806 }
807 }
808
809 return VK_SUCCESS;
810 }
811
812 static VkResult anv_compute_pipeline_create(
813 VkDevice _device,
814 const VkComputePipelineCreateInfo* pCreateInfo,
815 VkPipeline* pPipeline)
816 {
817 ANV_FROM_HANDLE(anv_device, device, _device);
818 struct anv_pipeline *pipeline;
819 VkResult result;
820
821 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO);
822
823 pipeline = anv_device_alloc(device, sizeof(*pipeline), 8,
824 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
825 if (pipeline == NULL)
826 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
827
828 pipeline->base.destructor = anv_pipeline_destroy;
829 pipeline->device = device;
830 pipeline->layout = anv_pipeline_layout_from_handle(pCreateInfo->layout);
831
832 result = anv_reloc_list_init(&pipeline->batch.relocs, device);
833 if (result != VK_SUCCESS) {
834 anv_device_free(device, pipeline);
835 return result;
836 }
837 pipeline->batch.next = pipeline->batch.start = pipeline->batch_data;
838 pipeline->batch.end = pipeline->batch.start + sizeof(pipeline->batch_data);
839
840 anv_state_stream_init(&pipeline->program_stream,
841 &device->instruction_block_pool);
842
843 memset(pipeline->shaders, 0, sizeof(pipeline->shaders));
844
845 pipeline->shaders[VK_SHADER_STAGE_COMPUTE] =
846 anv_shader_from_handle(pCreateInfo->cs.shader);
847
848 pipeline->use_repclear = false;
849
850 anv_compiler_run(device->compiler, pipeline);
851
852 const struct brw_cs_prog_data *cs_prog_data = &pipeline->cs_prog_data;
853
854 anv_batch_emit(&pipeline->batch, GEN8_MEDIA_VFE_STATE,
855 .ScratchSpaceBasePointer = pipeline->scratch_start[VK_SHADER_STAGE_FRAGMENT],
856 .PerThreadScratchSpace = ffs(cs_prog_data->base.total_scratch / 2048),
857 .ScratchSpaceBasePointerHigh = 0,
858 .StackSize = 0,
859
860 .MaximumNumberofThreads = device->info.max_cs_threads - 1,
861 .NumberofURBEntries = 2,
862 .ResetGatewayTimer = true,
863 .BypassGatewayControl = true,
864 .URBEntryAllocationSize = 2,
865 .CURBEAllocationSize = 0);
866
867 struct brw_cs_prog_data *prog_data = &pipeline->cs_prog_data;
868 uint32_t group_size = prog_data->local_size[0] *
869 prog_data->local_size[1] * prog_data->local_size[2];
870 pipeline->cs_thread_width_max = DIV_ROUND_UP(group_size, prog_data->simd_size);
871 uint32_t remainder = group_size & (prog_data->simd_size - 1);
872
873 if (remainder > 0)
874 pipeline->cs_right_mask = ~0u >> (32 - remainder);
875 else
876 pipeline->cs_right_mask = ~0u >> (32 - prog_data->simd_size);
877
878
879 *pPipeline = anv_pipeline_to_handle(pipeline);
880
881 return VK_SUCCESS;
882 }
883
884 VkResult anv_CreateComputePipelines(
885 VkDevice _device,
886 VkPipelineCache pipelineCache,
887 uint32_t count,
888 const VkComputePipelineCreateInfo* pCreateInfos,
889 VkPipeline* pPipelines)
890 {
891 ANV_FROM_HANDLE(anv_device, device, _device);
892 VkResult result = VK_SUCCESS;
893
894 unsigned i = 0;
895 for (; i < count; i++) {
896 result = anv_compute_pipeline_create(_device, &pCreateInfos[i],
897 &pPipelines[i]);
898 if (result != VK_SUCCESS) {
899 for (unsigned j = 0; j < i; j++) {
900 anv_pipeline_destroy(device, (struct anv_object *)pPipelines[j],
901 VK_OBJECT_TYPE_PIPELINE);
902 }
903
904 return result;
905 }
906 }
907
908 return VK_SUCCESS;
909 }
910
911 // Pipeline layout functions
912
913 VkResult anv_CreatePipelineLayout(
914 VkDevice _device,
915 const VkPipelineLayoutCreateInfo* pCreateInfo,
916 VkPipelineLayout* pPipelineLayout)
917 {
918 ANV_FROM_HANDLE(anv_device, device, _device);
919 struct anv_pipeline_layout *layout;
920
921 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO);
922
923 layout = anv_device_alloc(device, sizeof(*layout), 8,
924 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
925 if (layout == NULL)
926 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
927
928 layout->num_sets = pCreateInfo->descriptorSetCount;
929
930 uint32_t surface_start[VK_SHADER_STAGE_NUM] = { 0, };
931 uint32_t sampler_start[VK_SHADER_STAGE_NUM] = { 0, };
932
933 for (uint32_t s = 0; s < VK_SHADER_STAGE_NUM; s++) {
934 layout->stage[s].surface_count = 0;
935 layout->stage[s].sampler_count = 0;
936 }
937
938 for (uint32_t i = 0; i < pCreateInfo->descriptorSetCount; i++) {
939 ANV_FROM_HANDLE(anv_descriptor_set_layout, set_layout,
940 pCreateInfo->pSetLayouts[i]);
941
942 layout->set[i].layout = set_layout;
943 for (uint32_t s = 0; s < VK_SHADER_STAGE_NUM; s++) {
944 layout->set[i].surface_start[s] = surface_start[s];
945 surface_start[s] += set_layout->stage[s].surface_count;
946 layout->set[i].sampler_start[s] = sampler_start[s];
947 sampler_start[s] += set_layout->stage[s].sampler_count;
948
949 layout->stage[s].surface_count += set_layout->stage[s].surface_count;
950 layout->stage[s].sampler_count += set_layout->stage[s].sampler_count;
951 }
952 }
953
954 *pPipelineLayout = anv_pipeline_layout_to_handle(layout);
955
956 return VK_SUCCESS;
957 }
958
959 VkResult anv_DestroyPipelineLayout(
960 VkDevice _device,
961 VkPipelineLayout _pipelineLayout)
962 {
963 ANV_FROM_HANDLE(anv_device, device, _device);
964 ANV_FROM_HANDLE(anv_pipeline_layout, pipeline_layout, _pipelineLayout);
965
966 anv_device_free(device, pipeline_layout);
967
968 return VK_SUCCESS;
969 }