vk/vulkan.h: Switch from GetImageSubresourceInfo to GetImageSubresourceLayout
[mesa.git] / src / vulkan / pipeline.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <assert.h>
25 #include <stdbool.h>
26 #include <string.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29
30 #include "private.h"
31
32 // Shader functions
33
34 VkResult anv_CreateShader(
35 VkDevice _device,
36 const VkShaderCreateInfo* pCreateInfo,
37 VkShader* pShader)
38 {
39 struct anv_device *device = (struct anv_device *) _device;
40 struct anv_shader *shader;
41
42 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SHADER_CREATE_INFO);
43
44 shader = anv_device_alloc(device, sizeof(*shader) + pCreateInfo->codeSize, 8,
45 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
46 if (shader == NULL)
47 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
48
49 shader->size = pCreateInfo->codeSize;
50 memcpy(shader->data, pCreateInfo->pCode, shader->size);
51
52 *pShader = (VkShader) shader;
53
54 return VK_SUCCESS;
55 }
56
57 // Pipeline functions
58
59 static void
60 emit_vertex_input(struct anv_pipeline *pipeline, VkPipelineVertexInputStateCreateInfo *info)
61 {
62 const uint32_t num_dwords = 1 + info->attributeCount * 2;
63 uint32_t *p;
64 bool instancing_enable[32];
65
66 pipeline->vb_used = 0;
67 for (uint32_t i = 0; i < info->bindingCount; i++) {
68 const VkVertexInputBindingDescription *desc =
69 &info->pVertexBindingDescriptions[i];
70
71 pipeline->vb_used |= 1 << desc->binding;
72 pipeline->binding_stride[desc->binding] = desc->strideInBytes;
73
74 /* Step rate is programmed per vertex element (attribute), not
75 * binding. Set up a map of which bindings step per instance, for
76 * reference by vertex element setup. */
77 switch (desc->stepRate) {
78 default:
79 case VK_VERTEX_INPUT_STEP_RATE_VERTEX:
80 instancing_enable[desc->binding] = false;
81 break;
82 case VK_VERTEX_INPUT_STEP_RATE_INSTANCE:
83 instancing_enable[desc->binding] = true;
84 break;
85 }
86 }
87
88 p = anv_batch_emitn(&pipeline->batch, num_dwords,
89 GEN8_3DSTATE_VERTEX_ELEMENTS);
90
91 for (uint32_t i = 0; i < info->attributeCount; i++) {
92 const VkVertexInputAttributeDescription *desc =
93 &info->pVertexAttributeDescriptions[i];
94 const struct anv_format *format = anv_format_for_vk_format(desc->format);
95
96 struct GEN8_VERTEX_ELEMENT_STATE element = {
97 .VertexBufferIndex = desc->binding,
98 .Valid = true,
99 .SourceElementFormat = format->surface_format,
100 .EdgeFlagEnable = false,
101 .SourceElementOffset = desc->offsetInBytes,
102 .Component0Control = VFCOMP_STORE_SRC,
103 .Component1Control = format->num_channels >= 2 ? VFCOMP_STORE_SRC : VFCOMP_STORE_0,
104 .Component2Control = format->num_channels >= 3 ? VFCOMP_STORE_SRC : VFCOMP_STORE_0,
105 .Component3Control = format->num_channels >= 4 ? VFCOMP_STORE_SRC : VFCOMP_STORE_1_FP
106 };
107 GEN8_VERTEX_ELEMENT_STATE_pack(NULL, &p[1 + i * 2], &element);
108
109 anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_VF_INSTANCING,
110 .InstancingEnable = instancing_enable[desc->binding],
111 .VertexElementIndex = i,
112 /* Vulkan so far doesn't have an instance divisor, so
113 * this is always 1 (ignored if not instancing). */
114 .InstanceDataStepRate = 1);
115 }
116
117 anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_VF_SGVS,
118 .VertexIDEnable = pipeline->vs_prog_data.uses_vertexid,
119 .VertexIDComponentNumber = 2,
120 .VertexIDElementOffset = info->bindingCount,
121 .InstanceIDEnable = pipeline->vs_prog_data.uses_instanceid,
122 .InstanceIDComponentNumber = 3,
123 .InstanceIDElementOffset = info->bindingCount);
124 }
125
126 static void
127 emit_ia_state(struct anv_pipeline *pipeline,
128 VkPipelineIaStateCreateInfo *info,
129 const struct anv_pipeline_create_info *extra)
130 {
131 static const uint32_t vk_to_gen_primitive_type[] = {
132 [VK_PRIMITIVE_TOPOLOGY_POINT_LIST] = _3DPRIM_POINTLIST,
133 [VK_PRIMITIVE_TOPOLOGY_LINE_LIST] = _3DPRIM_LINELIST,
134 [VK_PRIMITIVE_TOPOLOGY_LINE_STRIP] = _3DPRIM_LINESTRIP,
135 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST] = _3DPRIM_TRILIST,
136 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP] = _3DPRIM_TRISTRIP,
137 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN] = _3DPRIM_TRIFAN,
138 [VK_PRIMITIVE_TOPOLOGY_LINE_LIST_ADJ] = _3DPRIM_LINELIST_ADJ,
139 [VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_ADJ] = _3DPRIM_LINESTRIP_ADJ,
140 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_ADJ] = _3DPRIM_TRILIST_ADJ,
141 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_ADJ] = _3DPRIM_TRISTRIP_ADJ,
142 [VK_PRIMITIVE_TOPOLOGY_PATCH] = _3DPRIM_PATCHLIST_1
143 };
144 uint32_t topology = vk_to_gen_primitive_type[info->topology];
145
146 if (extra && extra->use_rectlist)
147 topology = _3DPRIM_RECTLIST;
148
149 struct GEN8_3DSTATE_VF vf = {
150 GEN8_3DSTATE_VF_header,
151 .IndexedDrawCutIndexEnable = info->primitiveRestartEnable,
152 };
153 GEN8_3DSTATE_VF_pack(NULL, pipeline->state_vf, &vf);
154
155 anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_VF_TOPOLOGY,
156 .PrimitiveTopologyType = topology);
157 }
158
159 static void
160 emit_rs_state(struct anv_pipeline *pipeline, VkPipelineRsStateCreateInfo *info,
161 const struct anv_pipeline_create_info *extra)
162 {
163 static const uint32_t vk_to_gen_cullmode[] = {
164 [VK_CULL_MODE_NONE] = CULLMODE_NONE,
165 [VK_CULL_MODE_FRONT] = CULLMODE_FRONT,
166 [VK_CULL_MODE_BACK] = CULLMODE_BACK,
167 [VK_CULL_MODE_FRONT_AND_BACK] = CULLMODE_BOTH
168 };
169
170 static const uint32_t vk_to_gen_fillmode[] = {
171 [VK_FILL_MODE_POINTS] = RASTER_POINT,
172 [VK_FILL_MODE_WIREFRAME] = RASTER_WIREFRAME,
173 [VK_FILL_MODE_SOLID] = RASTER_SOLID
174 };
175
176 static const uint32_t vk_to_gen_front_face[] = {
177 [VK_FRONT_FACE_CCW] = CounterClockwise,
178 [VK_FRONT_FACE_CW] = Clockwise
179 };
180
181 struct GEN8_3DSTATE_SF sf = {
182 GEN8_3DSTATE_SF_header,
183 .ViewportTransformEnable = !(extra && extra->disable_viewport),
184 .TriangleStripListProvokingVertexSelect = 0,
185 .LineStripListProvokingVertexSelect = 0,
186 .TriangleFanProvokingVertexSelect = 0,
187 .PointWidthSource = pipeline->writes_point_size ? Vertex : State,
188 .PointWidth = 1.0,
189 };
190
191 /* FINISHME: bool32_t rasterizerDiscardEnable; */
192
193 GEN8_3DSTATE_SF_pack(NULL, pipeline->state_sf, &sf);
194
195 struct GEN8_3DSTATE_RASTER raster = {
196 GEN8_3DSTATE_RASTER_header,
197 .FrontWinding = vk_to_gen_front_face[info->frontFace],
198 .CullMode = vk_to_gen_cullmode[info->cullMode],
199 .FrontFaceFillMode = vk_to_gen_fillmode[info->fillMode],
200 .BackFaceFillMode = vk_to_gen_fillmode[info->fillMode],
201 .ScissorRectangleEnable = !(extra && extra->disable_scissor),
202 .ViewportZClipTestEnable = info->depthClipEnable
203 };
204
205 GEN8_3DSTATE_RASTER_pack(NULL, pipeline->state_raster, &raster);
206
207 anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_SBE,
208 .ForceVertexURBEntryReadLength = false,
209 .ForceVertexURBEntryReadOffset = false,
210 .PointSpriteTextureCoordinateOrigin = UPPERLEFT,
211 .NumberofSFOutputAttributes =
212 pipeline->wm_prog_data.num_varying_inputs);
213
214 }
215
216 static void
217 emit_cb_state(struct anv_pipeline *pipeline, VkPipelineCbStateCreateInfo *info)
218 {
219 struct anv_device *device = pipeline->device;
220
221 static const uint32_t vk_to_gen_logic_op[] = {
222 [VK_LOGIC_OP_COPY] = LOGICOP_COPY,
223 [VK_LOGIC_OP_CLEAR] = LOGICOP_CLEAR,
224 [VK_LOGIC_OP_AND] = LOGICOP_AND,
225 [VK_LOGIC_OP_AND_REVERSE] = LOGICOP_AND_REVERSE,
226 [VK_LOGIC_OP_AND_INVERTED] = LOGICOP_AND_INVERTED,
227 [VK_LOGIC_OP_NOOP] = LOGICOP_NOOP,
228 [VK_LOGIC_OP_XOR] = LOGICOP_XOR,
229 [VK_LOGIC_OP_OR] = LOGICOP_OR,
230 [VK_LOGIC_OP_NOR] = LOGICOP_NOR,
231 [VK_LOGIC_OP_EQUIV] = LOGICOP_EQUIV,
232 [VK_LOGIC_OP_INVERT] = LOGICOP_INVERT,
233 [VK_LOGIC_OP_OR_REVERSE] = LOGICOP_OR_REVERSE,
234 [VK_LOGIC_OP_COPY_INVERTED] = LOGICOP_COPY_INVERTED,
235 [VK_LOGIC_OP_OR_INVERTED] = LOGICOP_OR_INVERTED,
236 [VK_LOGIC_OP_NAND] = LOGICOP_NAND,
237 [VK_LOGIC_OP_SET] = LOGICOP_SET,
238 };
239
240 static const uint32_t vk_to_gen_blend[] = {
241 [VK_BLEND_ZERO] = BLENDFACTOR_ZERO,
242 [VK_BLEND_ONE] = BLENDFACTOR_ONE,
243 [VK_BLEND_SRC_COLOR] = BLENDFACTOR_SRC_COLOR,
244 [VK_BLEND_ONE_MINUS_SRC_COLOR] = BLENDFACTOR_INV_SRC_COLOR,
245 [VK_BLEND_DEST_COLOR] = BLENDFACTOR_DST_COLOR,
246 [VK_BLEND_ONE_MINUS_DEST_COLOR] = BLENDFACTOR_INV_DST_COLOR,
247 [VK_BLEND_SRC_ALPHA] = BLENDFACTOR_SRC_ALPHA,
248 [VK_BLEND_ONE_MINUS_SRC_ALPHA] = BLENDFACTOR_INV_SRC_ALPHA,
249 [VK_BLEND_DEST_ALPHA] = BLENDFACTOR_DST_ALPHA,
250 [VK_BLEND_ONE_MINUS_DEST_ALPHA] = BLENDFACTOR_INV_DST_ALPHA,
251 [VK_BLEND_CONSTANT_COLOR] = BLENDFACTOR_CONST_COLOR,
252 [VK_BLEND_ONE_MINUS_CONSTANT_COLOR] = BLENDFACTOR_INV_CONST_COLOR,
253 [VK_BLEND_CONSTANT_ALPHA] = BLENDFACTOR_CONST_ALPHA,
254 [VK_BLEND_ONE_MINUS_CONSTANT_ALPHA] = BLENDFACTOR_INV_CONST_ALPHA,
255 [VK_BLEND_SRC_ALPHA_SATURATE] = BLENDFACTOR_SRC_ALPHA_SATURATE,
256 [VK_BLEND_SRC1_COLOR] = BLENDFACTOR_SRC1_COLOR,
257 [VK_BLEND_ONE_MINUS_SRC1_COLOR] = BLENDFACTOR_INV_SRC1_COLOR,
258 [VK_BLEND_SRC1_ALPHA] = BLENDFACTOR_SRC1_ALPHA,
259 [VK_BLEND_ONE_MINUS_SRC1_ALPHA] = BLENDFACTOR_INV_SRC1_ALPHA,
260 };
261
262 static const uint32_t vk_to_gen_blend_op[] = {
263 [VK_BLEND_OP_ADD] = BLENDFUNCTION_ADD,
264 [VK_BLEND_OP_SUBTRACT] = BLENDFUNCTION_SUBTRACT,
265 [VK_BLEND_OP_REVERSE_SUBTRACT] = BLENDFUNCTION_REVERSE_SUBTRACT,
266 [VK_BLEND_OP_MIN] = BLENDFUNCTION_MIN,
267 [VK_BLEND_OP_MAX] = BLENDFUNCTION_MAX,
268 };
269
270 uint32_t num_dwords = 1 + info->attachmentCount * 2;
271 pipeline->blend_state =
272 anv_state_pool_alloc(&device->dynamic_state_pool, num_dwords * 4, 64);
273
274 struct GEN8_BLEND_STATE blend_state = {
275 .AlphaToCoverageEnable = info->alphaToCoverageEnable,
276 };
277
278 uint32_t *state = pipeline->blend_state.map;
279 GEN8_BLEND_STATE_pack(NULL, state, &blend_state);
280
281 for (uint32_t i = 0; i < info->attachmentCount; i++) {
282 const VkPipelineCbAttachmentState *a = &info->pAttachments[i];
283
284 struct GEN8_BLEND_STATE_ENTRY entry = {
285 .LogicOpEnable = info->logicOpEnable,
286 .LogicOpFunction = vk_to_gen_logic_op[info->logicOp],
287 .ColorBufferBlendEnable = a->blendEnable,
288 .PreBlendSourceOnlyClampEnable = false,
289 .PreBlendColorClampEnable = false,
290 .PostBlendColorClampEnable = false,
291 .SourceBlendFactor = vk_to_gen_blend[a->srcBlendColor],
292 .DestinationBlendFactor = vk_to_gen_blend[a->destBlendColor],
293 .ColorBlendFunction = vk_to_gen_blend_op[a->blendOpColor],
294 .SourceAlphaBlendFactor = vk_to_gen_blend[a->srcBlendAlpha],
295 .DestinationAlphaBlendFactor = vk_to_gen_blend[a->destBlendAlpha],
296 .AlphaBlendFunction = vk_to_gen_blend_op[a->blendOpAlpha],
297 .WriteDisableAlpha = !(a->channelWriteMask & VK_CHANNEL_A_BIT),
298 .WriteDisableRed = !(a->channelWriteMask & VK_CHANNEL_R_BIT),
299 .WriteDisableGreen = !(a->channelWriteMask & VK_CHANNEL_G_BIT),
300 .WriteDisableBlue = !(a->channelWriteMask & VK_CHANNEL_B_BIT),
301 };
302
303 GEN8_BLEND_STATE_ENTRY_pack(NULL, state + i * 2 + 1, &entry);
304 }
305
306 anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_BLEND_STATE_POINTERS,
307 .BlendStatePointer = pipeline->blend_state.offset,
308 .BlendStatePointerValid = true);
309 }
310
311 static const uint32_t vk_to_gen_compare_op[] = {
312 [VK_COMPARE_OP_NEVER] = COMPAREFUNCTION_NEVER,
313 [VK_COMPARE_OP_LESS] = COMPAREFUNCTION_LESS,
314 [VK_COMPARE_OP_EQUAL] = COMPAREFUNCTION_EQUAL,
315 [VK_COMPARE_OP_LESS_EQUAL] = COMPAREFUNCTION_LEQUAL,
316 [VK_COMPARE_OP_GREATER] = COMPAREFUNCTION_GREATER,
317 [VK_COMPARE_OP_NOT_EQUAL] = COMPAREFUNCTION_NOTEQUAL,
318 [VK_COMPARE_OP_GREATER_EQUAL] = COMPAREFUNCTION_GEQUAL,
319 [VK_COMPARE_OP_ALWAYS] = COMPAREFUNCTION_ALWAYS,
320 };
321
322 static const uint32_t vk_to_gen_stencil_op[] = {
323 [VK_STENCIL_OP_KEEP] = 0,
324 [VK_STENCIL_OP_ZERO] = 0,
325 [VK_STENCIL_OP_REPLACE] = 0,
326 [VK_STENCIL_OP_INC_CLAMP] = 0,
327 [VK_STENCIL_OP_DEC_CLAMP] = 0,
328 [VK_STENCIL_OP_INVERT] = 0,
329 [VK_STENCIL_OP_INC_WRAP] = 0,
330 [VK_STENCIL_OP_DEC_WRAP] = 0
331 };
332
333 static void
334 emit_ds_state(struct anv_pipeline *pipeline, VkPipelineDsStateCreateInfo *info)
335 {
336 if (info == NULL) {
337 /* We're going to OR this together with the dynamic state. We need
338 * to make sure it's initialized to something useful.
339 */
340 memset(pipeline->state_wm_depth_stencil, 0,
341 sizeof(pipeline->state_wm_depth_stencil));
342 return;
343 }
344
345 /* bool32_t depthBoundsEnable; // optional (depth_bounds_test) */
346
347 struct GEN8_3DSTATE_WM_DEPTH_STENCIL wm_depth_stencil = {
348 .DepthTestEnable = info->depthTestEnable,
349 .DepthBufferWriteEnable = info->depthWriteEnable,
350 .DepthTestFunction = vk_to_gen_compare_op[info->depthCompareOp],
351 .DoubleSidedStencilEnable = true,
352
353 .StencilTestEnable = info->stencilTestEnable,
354 .StencilFailOp = vk_to_gen_stencil_op[info->front.stencilFailOp],
355 .StencilPassDepthPassOp = vk_to_gen_stencil_op[info->front.stencilPassOp],
356 .StencilPassDepthFailOp = vk_to_gen_stencil_op[info->front.stencilDepthFailOp],
357 .StencilTestFunction = vk_to_gen_compare_op[info->front.stencilCompareOp],
358 .BackfaceStencilFailOp = vk_to_gen_stencil_op[info->back.stencilFailOp],
359 .BackfaceStencilPassDepthPassOp = vk_to_gen_stencil_op[info->back.stencilPassOp],
360 .BackfaceStencilPassDepthFailOp =vk_to_gen_stencil_op[info->back.stencilDepthFailOp],
361 .BackfaceStencilTestFunction = vk_to_gen_compare_op[info->back.stencilCompareOp],
362 };
363
364 GEN8_3DSTATE_WM_DEPTH_STENCIL_pack(NULL, pipeline->state_wm_depth_stencil, &wm_depth_stencil);
365 }
366
367 VkResult anv_CreateGraphicsPipeline(
368 VkDevice device,
369 const VkGraphicsPipelineCreateInfo* pCreateInfo,
370 VkPipeline* pPipeline)
371 {
372 return anv_pipeline_create(device, pCreateInfo, NULL, pPipeline);
373 }
374
375 static void
376 anv_pipeline_destroy(struct anv_device *device,
377 struct anv_object *object,
378 VkObjectType obj_type)
379 {
380 struct anv_pipeline *pipeline = (struct anv_pipeline*) object;
381
382 assert(obj_type == VK_OBJECT_TYPE_PIPELINE);
383
384 anv_compiler_free(pipeline);
385 anv_reloc_list_finish(&pipeline->batch.relocs, pipeline->device);
386 anv_state_stream_finish(&pipeline->program_stream);
387 anv_state_pool_free(&device->dynamic_state_pool, pipeline->blend_state);
388 anv_device_free(pipeline->device, pipeline);
389 }
390
391 VkResult
392 anv_pipeline_create(
393 VkDevice _device,
394 const VkGraphicsPipelineCreateInfo* pCreateInfo,
395 const struct anv_pipeline_create_info * extra,
396 VkPipeline* pPipeline)
397 {
398 struct anv_device *device = (struct anv_device *) _device;
399 struct anv_pipeline *pipeline;
400 const struct anv_common *common;
401 VkPipelineShaderStageCreateInfo *shader_create_info;
402 VkPipelineIaStateCreateInfo *ia_info = NULL;
403 VkPipelineRsStateCreateInfo *rs_info = NULL;
404 VkPipelineDsStateCreateInfo *ds_info = NULL;
405 VkPipelineCbStateCreateInfo *cb_info = NULL;
406 VkPipelineVertexInputStateCreateInfo *vi_info = NULL;
407 VkResult result;
408 uint32_t offset, length;
409
410 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO);
411
412 pipeline = anv_device_alloc(device, sizeof(*pipeline), 8,
413 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
414 if (pipeline == NULL)
415 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
416
417 pipeline->base.destructor = anv_pipeline_destroy;
418 pipeline->device = device;
419 pipeline->layout = (struct anv_pipeline_layout *) pCreateInfo->layout;
420 memset(pipeline->shaders, 0, sizeof(pipeline->shaders));
421
422 result = anv_reloc_list_init(&pipeline->batch.relocs, device);
423 if (result != VK_SUCCESS) {
424 anv_device_free(device, pipeline);
425 return result;
426 }
427 pipeline->batch.next = pipeline->batch.start = pipeline->batch_data;
428 pipeline->batch.end = pipeline->batch.start + sizeof(pipeline->batch_data);
429
430 anv_state_stream_init(&pipeline->program_stream,
431 &device->instruction_block_pool);
432
433 for (common = pCreateInfo->pNext; common; common = common->pNext) {
434 switch (common->sType) {
435 case VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO:
436 vi_info = (VkPipelineVertexInputStateCreateInfo *) common;
437 break;
438 case VK_STRUCTURE_TYPE_PIPELINE_IA_STATE_CREATE_INFO:
439 ia_info = (VkPipelineIaStateCreateInfo *) common;
440 break;
441 case VK_STRUCTURE_TYPE_PIPELINE_TESS_STATE_CREATE_INFO:
442 anv_finishme("VK_STRUCTURE_TYPE_PIPELINE_TESS_STATE_CREATE_INFO");
443 break;
444 case VK_STRUCTURE_TYPE_PIPELINE_VP_STATE_CREATE_INFO:
445 anv_finishme("VK_STRUCTURE_TYPE_PIPELINE_VP_STATE_CREATE_INFO");
446 break;
447 case VK_STRUCTURE_TYPE_PIPELINE_RS_STATE_CREATE_INFO:
448 rs_info = (VkPipelineRsStateCreateInfo *) common;
449 break;
450 case VK_STRUCTURE_TYPE_PIPELINE_MS_STATE_CREATE_INFO:
451 anv_finishme("VK_STRUCTURE_TYPE_PIPELINE_MS_STATE_CREATE_INFO");
452 break;
453 case VK_STRUCTURE_TYPE_PIPELINE_CB_STATE_CREATE_INFO:
454 cb_info = (VkPipelineCbStateCreateInfo *) common;
455 break;
456 case VK_STRUCTURE_TYPE_PIPELINE_DS_STATE_CREATE_INFO:
457 ds_info = (VkPipelineDsStateCreateInfo *) common;
458 break;
459 case VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO:
460 shader_create_info = (VkPipelineShaderStageCreateInfo *) common;
461 pipeline->shaders[shader_create_info->shader.stage] =
462 (struct anv_shader *) shader_create_info->shader.shader;
463 break;
464 default:
465 break;
466 }
467 }
468
469 pipeline->use_repclear = extra && extra->use_repclear;
470
471 anv_compiler_run(device->compiler, pipeline);
472
473 /* FIXME: The compiler dead-codes FS inputs when we don't have a VS, so we
474 * hard code this to num_attributes - 2. This is because the attributes
475 * include VUE header and position, which aren't counted as varying
476 * inputs. */
477 if (pipeline->vs_simd8 == NO_KERNEL)
478 pipeline->wm_prog_data.num_varying_inputs = vi_info->attributeCount - 2;
479
480 assert(vi_info);
481 emit_vertex_input(pipeline, vi_info);
482 assert(ia_info);
483 emit_ia_state(pipeline, ia_info, extra);
484 assert(rs_info);
485 emit_rs_state(pipeline, rs_info, extra);
486 emit_ds_state(pipeline, ds_info);
487 emit_cb_state(pipeline, cb_info);
488
489 anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_VF_STATISTICS,
490 .StatisticsEnable = true);
491 anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_HS, .Enable = false);
492 anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_TE, .TEEnable = false);
493 anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_DS, .FunctionEnable = false);
494 anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_STREAMOUT, .SOFunctionEnable = false);
495
496 anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_PUSH_CONSTANT_ALLOC_VS,
497 .ConstantBufferOffset = 0,
498 .ConstantBufferSize = 4);
499 anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_PUSH_CONSTANT_ALLOC_GS,
500 .ConstantBufferOffset = 4,
501 .ConstantBufferSize = 4);
502 anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_PUSH_CONSTANT_ALLOC_PS,
503 .ConstantBufferOffset = 8,
504 .ConstantBufferSize = 4);
505
506 anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_WM_CHROMAKEY,
507 .ChromaKeyKillEnable = false);
508 anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_SBE_SWIZ);
509 anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_AA_LINE_PARAMETERS);
510
511 anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_CLIP,
512 .ClipEnable = true,
513 .ViewportXYClipTestEnable = !(extra && extra->disable_viewport),
514 .MinimumPointWidth = 0.125,
515 .MaximumPointWidth = 255.875);
516
517 anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_WM,
518 .StatisticsEnable = true,
519 .LineEndCapAntialiasingRegionWidth = _05pixels,
520 .LineAntialiasingRegionWidth = _10pixels,
521 .EarlyDepthStencilControl = NORMAL,
522 .ForceThreadDispatchEnable = NORMAL,
523 .PointRasterizationRule = RASTRULE_UPPER_RIGHT,
524 .BarycentricInterpolationMode =
525 pipeline->wm_prog_data.barycentric_interp_modes);
526
527 uint32_t samples = 1;
528 uint32_t log2_samples = __builtin_ffs(samples) - 1;
529 bool enable_sampling = samples > 1 ? true : false;
530
531 anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_MULTISAMPLE,
532 .PixelPositionOffsetEnable = enable_sampling,
533 .PixelLocation = CENTER,
534 .NumberofMultisamples = log2_samples);
535
536 anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_SAMPLE_MASK,
537 .SampleMask = 0xffff);
538
539 anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_URB_VS,
540 .VSURBStartingAddress = pipeline->urb.vs_start,
541 .VSURBEntryAllocationSize = pipeline->urb.vs_size - 1,
542 .VSNumberofURBEntries = pipeline->urb.nr_vs_entries);
543
544 anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_URB_GS,
545 .GSURBStartingAddress = pipeline->urb.gs_start,
546 .GSURBEntryAllocationSize = pipeline->urb.gs_size - 1,
547 .GSNumberofURBEntries = pipeline->urb.nr_gs_entries);
548
549 anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_URB_HS,
550 .HSURBStartingAddress = pipeline->urb.vs_start,
551 .HSURBEntryAllocationSize = 0,
552 .HSNumberofURBEntries = 0);
553
554 anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_URB_DS,
555 .DSURBStartingAddress = pipeline->urb.vs_start,
556 .DSURBEntryAllocationSize = 0,
557 .DSNumberofURBEntries = 0);
558
559 const struct brw_gs_prog_data *gs_prog_data = &pipeline->gs_prog_data;
560 offset = 1;
561 length = (gs_prog_data->base.vue_map.num_slots + 1) / 2 - offset;
562
563 if (pipeline->gs_vec4 == NO_KERNEL)
564 anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_GS, .Enable = false);
565 else
566 anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_GS,
567 .SingleProgramFlow = false,
568 .KernelStartPointer = pipeline->gs_vec4,
569 .VectorMaskEnable = Vmask,
570 .SamplerCount = 0,
571 .BindingTableEntryCount = 0,
572 .ExpectedVertexCount = pipeline->gs_vertex_count,
573
574 .ScratchSpaceBasePointer = pipeline->scratch_start[VK_SHADER_STAGE_GEOMETRY],
575 .PerThreadScratchSpace = ffs(gs_prog_data->base.base.total_scratch / 2048),
576
577 .OutputVertexSize = gs_prog_data->output_vertex_size_hwords * 2 - 1,
578 .OutputTopology = gs_prog_data->output_topology,
579 .VertexURBEntryReadLength = gs_prog_data->base.urb_read_length,
580 .DispatchGRFStartRegisterForURBData =
581 gs_prog_data->base.base.dispatch_grf_start_reg,
582
583 .MaximumNumberofThreads = device->info.max_gs_threads,
584 .ControlDataHeaderSize = gs_prog_data->control_data_header_size_hwords,
585 //pipeline->gs_prog_data.dispatch_mode |
586 .StatisticsEnable = true,
587 .IncludePrimitiveID = gs_prog_data->include_primitive_id,
588 .ReorderMode = TRAILING,
589 .Enable = true,
590
591 .ControlDataFormat = gs_prog_data->control_data_format,
592
593 /* FIXME: mesa sets this based on ctx->Transform.ClipPlanesEnabled:
594 * UserClipDistanceClipTestEnableBitmask_3DSTATE_GS(v)
595 * UserClipDistanceCullTestEnableBitmask(v)
596 */
597
598 .VertexURBEntryOutputReadOffset = offset,
599 .VertexURBEntryOutputLength = length);
600
601 const struct brw_vue_prog_data *vue_prog_data = &pipeline->vs_prog_data.base;
602 /* Skip the VUE header and position slots */
603 offset = 1;
604 length = (vue_prog_data->vue_map.num_slots + 1) / 2 - offset;
605
606 if (pipeline->vs_simd8 == NO_KERNEL || (extra && extra->disable_vs))
607 anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_VS,
608 .FunctionEnable = false,
609 .VertexURBEntryOutputReadOffset = 1,
610 /* Even if VS is disabled, SBE still gets the amount of
611 * vertex data to read from this field. We use attribute
612 * count - 1, as we don't count the VUE header here. */
613 .VertexURBEntryOutputLength =
614 DIV_ROUND_UP(vi_info->attributeCount - 1, 2));
615 else
616 anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_VS,
617 .KernelStartPointer = pipeline->vs_simd8,
618 .SingleVertexDispatch = Multiple,
619 .VectorMaskEnable = Dmask,
620 .SamplerCount = 0,
621 .BindingTableEntryCount =
622 vue_prog_data->base.binding_table.size_bytes / 4,
623 .ThreadDispatchPriority = Normal,
624 .FloatingPointMode = IEEE754,
625 .IllegalOpcodeExceptionEnable = false,
626 .AccessesUAV = false,
627 .SoftwareExceptionEnable = false,
628
629 .ScratchSpaceBasePointer = pipeline->scratch_start[VK_SHADER_STAGE_VERTEX],
630 .PerThreadScratchSpace = ffs(vue_prog_data->base.total_scratch / 2048),
631
632 .DispatchGRFStartRegisterForURBData =
633 vue_prog_data->base.dispatch_grf_start_reg,
634 .VertexURBEntryReadLength = vue_prog_data->urb_read_length,
635 .VertexURBEntryReadOffset = 0,
636
637 .MaximumNumberofThreads = device->info.max_vs_threads - 1,
638 .StatisticsEnable = false,
639 .SIMD8DispatchEnable = true,
640 .VertexCacheDisable = false,
641 .FunctionEnable = true,
642
643 .VertexURBEntryOutputReadOffset = offset,
644 .VertexURBEntryOutputLength = length,
645 .UserClipDistanceClipTestEnableBitmask = 0,
646 .UserClipDistanceCullTestEnableBitmask = 0);
647
648 const struct brw_wm_prog_data *wm_prog_data = &pipeline->wm_prog_data;
649 uint32_t ksp0, ksp2, grf_start0, grf_start2;
650
651 ksp2 = 0;
652 grf_start2 = 0;
653 if (pipeline->ps_simd8 != NO_KERNEL) {
654 ksp0 = pipeline->ps_simd8;
655 grf_start0 = wm_prog_data->base.dispatch_grf_start_reg;
656 if (pipeline->ps_simd16 != NO_KERNEL) {
657 ksp2 = pipeline->ps_simd16;
658 grf_start2 = wm_prog_data->dispatch_grf_start_reg_16;
659 }
660 } else if (pipeline->ps_simd16 != NO_KERNEL) {
661 ksp0 = pipeline->ps_simd16;
662 grf_start0 = wm_prog_data->dispatch_grf_start_reg_16;
663 } else {
664 unreachable("no ps shader");
665 }
666
667 anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_PS,
668 .KernelStartPointer0 = ksp0,
669
670 .SingleProgramFlow = false,
671 .VectorMaskEnable = true,
672 .SamplerCount = 1,
673
674 .ScratchSpaceBasePointer = pipeline->scratch_start[VK_SHADER_STAGE_FRAGMENT],
675 .PerThreadScratchSpace = ffs(wm_prog_data->base.total_scratch / 2048),
676
677 .MaximumNumberofThreadsPerPSD = 64 - 2,
678 .PositionXYOffsetSelect = wm_prog_data->uses_pos_offset ?
679 POSOFFSET_SAMPLE: POSOFFSET_NONE,
680 .PushConstantEnable = wm_prog_data->base.nr_params > 0,
681 ._8PixelDispatchEnable = pipeline->ps_simd8 != NO_KERNEL,
682 ._16PixelDispatchEnable = pipeline->ps_simd16 != NO_KERNEL,
683 ._32PixelDispatchEnable = false,
684
685 .DispatchGRFStartRegisterForConstantSetupData0 = grf_start0,
686 .DispatchGRFStartRegisterForConstantSetupData1 = 0,
687 .DispatchGRFStartRegisterForConstantSetupData2 = grf_start2,
688
689 .KernelStartPointer1 = 0,
690 .KernelStartPointer2 = ksp2);
691
692 bool per_sample_ps = false;
693 anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_PS_EXTRA,
694 .PixelShaderValid = true,
695 .PixelShaderKillsPixel = wm_prog_data->uses_kill,
696 .PixelShaderComputedDepthMode = wm_prog_data->computed_depth_mode,
697 .AttributeEnable = wm_prog_data->num_varying_inputs > 0,
698 .oMaskPresenttoRenderTarget = wm_prog_data->uses_omask,
699 .PixelShaderIsPerSample = per_sample_ps);
700
701 *pPipeline = (VkPipeline) pipeline;
702
703 return VK_SUCCESS;
704 }
705
706 VkResult anv_CreateGraphicsPipelineDerivative(
707 VkDevice device,
708 const VkGraphicsPipelineCreateInfo* pCreateInfo,
709 VkPipeline basePipeline,
710 VkPipeline* pPipeline)
711 {
712 stub_return(VK_UNSUPPORTED);
713 }
714
715 VkResult anv_CreateComputePipeline(
716 VkDevice _device,
717 const VkComputePipelineCreateInfo* pCreateInfo,
718 VkPipeline* pPipeline)
719 {
720 struct anv_device *device = (struct anv_device *) _device;
721 struct anv_pipeline *pipeline;
722 VkResult result;
723
724 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO);
725
726 pipeline = anv_device_alloc(device, sizeof(*pipeline), 8,
727 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
728 if (pipeline == NULL)
729 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
730
731 pipeline->base.destructor = anv_pipeline_destroy;
732 pipeline->device = device;
733 pipeline->layout = (struct anv_pipeline_layout *) pCreateInfo->layout;
734
735 result = anv_reloc_list_init(&pipeline->batch.relocs, device);
736 if (result != VK_SUCCESS) {
737 anv_device_free(device, pipeline);
738 return result;
739 }
740 pipeline->batch.next = pipeline->batch.start = pipeline->batch_data;
741 pipeline->batch.end = pipeline->batch.start + sizeof(pipeline->batch_data);
742
743 anv_state_stream_init(&pipeline->program_stream,
744 &device->instruction_block_pool);
745
746 memset(pipeline->shaders, 0, sizeof(pipeline->shaders));
747
748 pipeline->shaders[VK_SHADER_STAGE_COMPUTE] =
749 (struct anv_shader *) pCreateInfo->cs.shader;
750
751 pipeline->use_repclear = false;
752
753 anv_compiler_run(device->compiler, pipeline);
754
755 const struct brw_cs_prog_data *cs_prog_data = &pipeline->cs_prog_data;
756
757 anv_batch_emit(&pipeline->batch, GEN8_MEDIA_VFE_STATE,
758 .ScratchSpaceBasePointer = pipeline->scratch_start[VK_SHADER_STAGE_FRAGMENT],
759 .PerThreadScratchSpace = ffs(cs_prog_data->base.total_scratch / 2048),
760 .ScratchSpaceBasePointerHigh = 0,
761 .StackSize = 0,
762
763 .MaximumNumberofThreads = device->info.max_cs_threads - 1,
764 .NumberofURBEntries = 2,
765 .ResetGatewayTimer = true,
766 .BypassGatewayControl = true,
767 .URBEntryAllocationSize = 2,
768 .CURBEAllocationSize = 0);
769
770 struct brw_cs_prog_data *prog_data = &pipeline->cs_prog_data;
771 uint32_t group_size = prog_data->local_size[0] *
772 prog_data->local_size[1] * prog_data->local_size[2];
773 pipeline->cs_thread_width_max = DIV_ROUND_UP(group_size, prog_data->simd_size);
774 uint32_t remainder = group_size & (prog_data->simd_size - 1);
775
776 if (remainder > 0)
777 pipeline->cs_right_mask = ~0u >> (32 - remainder);
778 else
779 pipeline->cs_right_mask = ~0u >> (32 - prog_data->simd_size);
780
781
782 *pPipeline = (VkPipeline) pipeline;
783
784 return VK_SUCCESS;
785 }
786
787 VkResult anv_StorePipeline(
788 VkDevice device,
789 VkPipeline pipeline,
790 size_t* pDataSize,
791 void* pData)
792 {
793 stub_return(VK_UNSUPPORTED);
794 }
795
796 VkResult anv_LoadPipeline(
797 VkDevice device,
798 size_t dataSize,
799 const void* pData,
800 VkPipeline* pPipeline)
801 {
802 stub_return(VK_UNSUPPORTED);
803 }
804
805 VkResult anv_LoadPipelineDerivative(
806 VkDevice device,
807 size_t dataSize,
808 const void* pData,
809 VkPipeline basePipeline,
810 VkPipeline* pPipeline)
811 {
812 stub_return(VK_UNSUPPORTED);
813 }
814
815 // Pipeline layout functions
816
817 VkResult anv_CreatePipelineLayout(
818 VkDevice _device,
819 const VkPipelineLayoutCreateInfo* pCreateInfo,
820 VkPipelineLayout* pPipelineLayout)
821 {
822 struct anv_device *device = (struct anv_device *) _device;
823 struct anv_pipeline_layout *layout;
824
825 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO);
826
827 layout = anv_device_alloc(device, sizeof(*layout), 8,
828 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
829 if (layout == NULL)
830 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
831
832 layout->num_sets = pCreateInfo->descriptorSetCount;
833
834 uint32_t surface_start[VK_SHADER_STAGE_NUM] = { 0, };
835 uint32_t sampler_start[VK_SHADER_STAGE_NUM] = { 0, };
836
837 for (uint32_t s = 0; s < VK_SHADER_STAGE_NUM; s++) {
838 layout->stage[s].surface_count = 0;
839 layout->stage[s].sampler_count = 0;
840 }
841
842 for (uint32_t i = 0; i < pCreateInfo->descriptorSetCount; i++) {
843 struct anv_descriptor_set_layout *set_layout =
844 (struct anv_descriptor_set_layout *) pCreateInfo->pSetLayouts[i];
845
846 layout->set[i].layout = set_layout;
847 for (uint32_t s = 0; s < VK_SHADER_STAGE_NUM; s++) {
848 layout->set[i].surface_start[s] = surface_start[s];
849 surface_start[s] += set_layout->stage[s].surface_count;
850 layout->set[i].sampler_start[s] = sampler_start[s];
851 sampler_start[s] += set_layout->stage[s].sampler_count;
852
853 layout->stage[s].surface_count += set_layout->stage[s].surface_count;
854 layout->stage[s].sampler_count += set_layout->stage[s].sampler_count;
855 }
856 }
857
858 *pPipelineLayout = (VkPipelineLayout) layout;
859
860 return VK_SUCCESS;
861 }