vk/pipeline: Default to a SamplerCount of 1 for PS
[mesa.git] / src / vulkan / pipeline.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <assert.h>
25 #include <stdbool.h>
26 #include <string.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29
30 #include "private.h"
31
32 // Shader functions
33
34 VkResult VKAPI vkCreateShader(
35 VkDevice _device,
36 const VkShaderCreateInfo* pCreateInfo,
37 VkShader* pShader)
38 {
39 struct anv_device *device = (struct anv_device *) _device;
40 struct anv_shader *shader;
41
42 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SHADER_CREATE_INFO);
43
44 shader = anv_device_alloc(device, sizeof(*shader) + pCreateInfo->codeSize, 8,
45 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
46 if (shader == NULL)
47 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
48
49 shader->size = pCreateInfo->codeSize;
50 memcpy(shader->data, pCreateInfo->pCode, shader->size);
51
52 *pShader = (VkShader) shader;
53
54 return VK_SUCCESS;
55 }
56
57 // Pipeline functions
58
59 static void
60 emit_vertex_input(struct anv_pipeline *pipeline, VkPipelineVertexInputCreateInfo *info)
61 {
62 const uint32_t num_dwords = 1 + info->attributeCount * 2;
63 uint32_t *p;
64 bool instancing_enable[32];
65
66 for (uint32_t i = 0; i < info->bindingCount; i++) {
67 const VkVertexInputBindingDescription *desc =
68 &info->pVertexBindingDescriptions[i];
69
70 pipeline->binding_stride[desc->binding] = desc->strideInBytes;
71
72 /* Step rate is programmed per vertex element (attribute), not
73 * binding. Set up a map of which bindings step per instance, for
74 * reference by vertex element setup. */
75 switch (desc->stepRate) {
76 default:
77 case VK_VERTEX_INPUT_STEP_RATE_VERTEX:
78 instancing_enable[desc->binding] = false;
79 break;
80 case VK_VERTEX_INPUT_STEP_RATE_INSTANCE:
81 instancing_enable[desc->binding] = true;
82 break;
83 }
84 }
85
86 p = anv_batch_emitn(&pipeline->batch, num_dwords,
87 GEN8_3DSTATE_VERTEX_ELEMENTS);
88
89 for (uint32_t i = 0; i < info->attributeCount; i++) {
90 const VkVertexInputAttributeDescription *desc =
91 &info->pVertexAttributeDescriptions[i];
92 const struct anv_format *format = anv_format_for_vk_format(desc->format);
93
94 struct GEN8_VERTEX_ELEMENT_STATE element = {
95 .VertexBufferIndex = desc->binding,
96 .Valid = true,
97 .SourceElementFormat = format->format,
98 .EdgeFlagEnable = false,
99 .SourceElementOffset = desc->offsetInBytes,
100 .Component0Control = VFCOMP_STORE_SRC,
101 .Component1Control = format->channels >= 2 ? VFCOMP_STORE_SRC : VFCOMP_STORE_0,
102 .Component2Control = format->channels >= 3 ? VFCOMP_STORE_SRC : VFCOMP_STORE_0,
103 .Component3Control = format->channels >= 4 ? VFCOMP_STORE_SRC : VFCOMP_STORE_1_FP
104 };
105 GEN8_VERTEX_ELEMENT_STATE_pack(NULL, &p[1 + i * 2], &element);
106
107 anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_VF_INSTANCING,
108 .InstancingEnable = instancing_enable[desc->binding],
109 .VertexElementIndex = i,
110 /* Vulkan so far doesn't have an instance divisor, so
111 * this is always 1 (ignored if not instancing). */
112 .InstanceDataStepRate = 1);
113 }
114
115 anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_VF_SGVS,
116 .VertexIDEnable = pipeline->vs_prog_data.uses_vertexid,
117 .VertexIDComponentNumber = 2,
118 .VertexIDElementOffset = info->bindingCount,
119 .InstanceIDEnable = pipeline->vs_prog_data.uses_instanceid,
120 .InstanceIDComponentNumber = 3,
121 .InstanceIDElementOffset = info->bindingCount);
122 }
123
124 static void
125 emit_ia_state(struct anv_pipeline *pipeline,
126 VkPipelineIaStateCreateInfo *info,
127 const struct anv_pipeline_create_info *extra)
128 {
129 static const uint32_t vk_to_gen_primitive_type[] = {
130 [VK_PRIMITIVE_TOPOLOGY_POINT_LIST] = _3DPRIM_POINTLIST,
131 [VK_PRIMITIVE_TOPOLOGY_LINE_LIST] = _3DPRIM_LINELIST,
132 [VK_PRIMITIVE_TOPOLOGY_LINE_STRIP] = _3DPRIM_LINESTRIP,
133 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST] = _3DPRIM_TRILIST,
134 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP] = _3DPRIM_TRISTRIP,
135 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN] = _3DPRIM_TRIFAN,
136 [VK_PRIMITIVE_TOPOLOGY_LINE_LIST_ADJ] = _3DPRIM_LINELIST_ADJ,
137 [VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_ADJ] = _3DPRIM_LISTSTRIP_ADJ,
138 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_ADJ] = _3DPRIM_TRILIST_ADJ,
139 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_ADJ] = _3DPRIM_TRISTRIP_ADJ,
140 [VK_PRIMITIVE_TOPOLOGY_PATCH] = _3DPRIM_PATCHLIST_1
141 };
142 uint32_t topology = vk_to_gen_primitive_type[info->topology];
143
144 if (extra && extra->use_rectlist)
145 topology = _3DPRIM_RECTLIST;
146
147 anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_VF,
148 .IndexedDrawCutIndexEnable = info->primitiveRestartEnable,
149 .CutIndex = info->primitiveRestartIndex);
150 anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_VF_TOPOLOGY,
151 .PrimitiveTopologyType = topology);
152 }
153
154 static void
155 emit_rs_state(struct anv_pipeline *pipeline, VkPipelineRsStateCreateInfo *info,
156 const struct anv_pipeline_create_info *extra)
157
158 {
159 static const uint32_t vk_to_gen_cullmode[] = {
160 [VK_CULL_MODE_NONE] = CULLMODE_NONE,
161 [VK_CULL_MODE_FRONT] = CULLMODE_FRONT,
162 [VK_CULL_MODE_BACK] = CULLMODE_BACK,
163 [VK_CULL_MODE_FRONT_AND_BACK] = CULLMODE_BOTH
164 };
165
166 static const uint32_t vk_to_gen_fillmode[] = {
167 [VK_FILL_MODE_POINTS] = RASTER_POINT,
168 [VK_FILL_MODE_WIREFRAME] = RASTER_WIREFRAME,
169 [VK_FILL_MODE_SOLID] = RASTER_SOLID
170 };
171
172 static const uint32_t vk_to_gen_front_face[] = {
173 [VK_FRONT_FACE_CCW] = CounterClockwise,
174 [VK_FRONT_FACE_CW] = Clockwise
175 };
176
177 static const uint32_t vk_to_gen_coordinate_origin[] = {
178 [VK_COORDINATE_ORIGIN_UPPER_LEFT] = UPPERLEFT,
179 [VK_COORDINATE_ORIGIN_LOWER_LEFT] = LOWERLEFT
180 };
181
182 struct GEN8_3DSTATE_SF sf = {
183 GEN8_3DSTATE_SF_header,
184 .ViewportTransformEnable = !(extra && extra->disable_viewport),
185 .TriangleStripListProvokingVertexSelect =
186 info->provokingVertex == VK_PROVOKING_VERTEX_FIRST ? 0 : 2,
187 .LineStripListProvokingVertexSelect =
188 info->provokingVertex == VK_PROVOKING_VERTEX_FIRST ? 0 : 1,
189 .TriangleFanProvokingVertexSelect =
190 info->provokingVertex == VK_PROVOKING_VERTEX_FIRST ? 0 : 2,
191 .PointWidthSource = info->programPointSize ? Vertex : State,
192 };
193
194 /* bool32_t rasterizerDiscardEnable; */
195
196
197 GEN8_3DSTATE_SF_pack(NULL, pipeline->state_sf, &sf);
198
199 anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_RASTER,
200 .FrontWinding = vk_to_gen_front_face[info->frontFace],
201 .CullMode = vk_to_gen_cullmode[info->cullMode],
202 .FrontFaceFillMode = vk_to_gen_fillmode[info->fillMode],
203 .BackFaceFillMode = vk_to_gen_fillmode[info->fillMode],
204 .ScissorRectangleEnable = !(extra && extra->disable_scissor),
205 .ViewportZClipTestEnable = info->depthClipEnable);
206
207 anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_SBE,
208 .ForceVertexURBEntryReadLength = false,
209 .ForceVertexURBEntryReadOffset = false,
210 .PointSpriteTextureCoordinateOrigin =
211 vk_to_gen_coordinate_origin[info->pointOrigin],
212 .NumberofSFOutputAttributes =
213 pipeline->wm_prog_data.num_varying_inputs);
214
215 }
216
217 VkResult VKAPI vkCreateGraphicsPipeline(
218 VkDevice device,
219 const VkGraphicsPipelineCreateInfo* pCreateInfo,
220 VkPipeline* pPipeline)
221 {
222 return anv_pipeline_create(device, pCreateInfo, NULL, pPipeline);
223 }
224
225
226 VkResult
227 anv_pipeline_create(
228 VkDevice _device,
229 const VkGraphicsPipelineCreateInfo* pCreateInfo,
230 const struct anv_pipeline_create_info * extra,
231 VkPipeline* pPipeline)
232 {
233 struct anv_device *device = (struct anv_device *) _device;
234 struct anv_pipeline *pipeline;
235 const struct anv_common *common;
236 VkPipelineShaderStageCreateInfo *shader_create_info;
237 VkPipelineIaStateCreateInfo *ia_info;
238 VkPipelineRsStateCreateInfo *rs_info;
239 VkPipelineVertexInputCreateInfo *vi_info;
240 VkResult result;
241 uint32_t offset, length;
242
243 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO);
244
245 pipeline = anv_device_alloc(device, sizeof(*pipeline), 8,
246 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
247 if (pipeline == NULL)
248 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
249
250 pipeline->device = device;
251 pipeline->layout = (struct anv_pipeline_layout *) pCreateInfo->layout;
252 memset(pipeline->shaders, 0, sizeof(pipeline->shaders));
253 result = anv_batch_init(&pipeline->batch, device);
254 if (result != VK_SUCCESS)
255 goto fail;
256
257 for (common = pCreateInfo->pNext; common; common = common->pNext) {
258 switch (common->sType) {
259 case VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_CREATE_INFO:
260 vi_info = (VkPipelineVertexInputCreateInfo *) common;
261 break;
262 case VK_STRUCTURE_TYPE_PIPELINE_IA_STATE_CREATE_INFO:
263 ia_info = (VkPipelineIaStateCreateInfo *) common;
264 break;
265 case VK_STRUCTURE_TYPE_PIPELINE_TESS_STATE_CREATE_INFO:
266 case VK_STRUCTURE_TYPE_PIPELINE_VP_STATE_CREATE_INFO:
267 break;
268 case VK_STRUCTURE_TYPE_PIPELINE_RS_STATE_CREATE_INFO:
269 rs_info = (VkPipelineRsStateCreateInfo *) common;
270 break;
271 case VK_STRUCTURE_TYPE_PIPELINE_MS_STATE_CREATE_INFO:
272 case VK_STRUCTURE_TYPE_PIPELINE_CB_STATE_CREATE_INFO:
273 case VK_STRUCTURE_TYPE_PIPELINE_DS_STATE_CREATE_INFO:
274 case VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO:
275 shader_create_info = (VkPipelineShaderStageCreateInfo *) common;
276 pipeline->shaders[shader_create_info->shader.stage] =
277 (struct anv_shader *) shader_create_info->shader.shader;
278 break;
279 default:
280 break;
281 }
282 }
283
284 pipeline->use_repclear = extra && extra->use_repclear;
285
286 anv_compiler_run(device->compiler, pipeline);
287
288 /* FIXME: The compiler dead-codes FS inputs when we don't have a VS, so we
289 * hard code this to num_attributes - 2. This is because the attributes
290 * include VUE header and position, which aren't counted as varying
291 * inputs. */
292 if (pipeline->vs_simd8 == NO_KERNEL)
293 pipeline->wm_prog_data.num_varying_inputs = vi_info->attributeCount - 2;
294
295 emit_vertex_input(pipeline, vi_info);
296 emit_ia_state(pipeline, ia_info, extra);
297 emit_rs_state(pipeline, rs_info, extra);
298
299 anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_CLIP,
300 .ClipEnable = true,
301 .ViewportXYClipTestEnable = !(extra && extra->disable_viewport));
302
303 anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_WM,
304 .StatisticsEnable = true,
305 .LineEndCapAntialiasingRegionWidth = _05pixels,
306 .LineAntialiasingRegionWidth = _10pixels,
307 .EarlyDepthStencilControl = NORMAL,
308 .ForceThreadDispatchEnable = NORMAL,
309 .PointRasterizationRule = RASTRULE_UPPER_RIGHT,
310 .BarycentricInterpolationMode =
311 pipeline->wm_prog_data.barycentric_interp_modes);
312
313 uint32_t samples = 1;
314 uint32_t log2_samples = __builtin_ffs(samples) - 1;
315 bool enable_sampling = samples > 1 ? true : false;
316
317 anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_MULTISAMPLE,
318 .PixelPositionOffsetEnable = enable_sampling,
319 .PixelLocation = CENTER,
320 .NumberofMultisamples = log2_samples);
321
322 anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_URB_VS,
323 .VSURBStartingAddress = pipeline->urb.vs_start,
324 .VSURBEntryAllocationSize = pipeline->urb.vs_size - 1,
325 .VSNumberofURBEntries = pipeline->urb.nr_vs_entries);
326
327 anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_URB_GS,
328 .GSURBStartingAddress = pipeline->urb.gs_start,
329 .GSURBEntryAllocationSize = pipeline->urb.gs_size - 1,
330 .GSNumberofURBEntries = pipeline->urb.nr_gs_entries);
331
332 anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_URB_HS,
333 .HSURBStartingAddress = pipeline->urb.vs_start,
334 .HSURBEntryAllocationSize = 0,
335 .HSNumberofURBEntries = 0);
336
337 anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_URB_DS,
338 .DSURBStartingAddress = pipeline->urb.vs_start,
339 .DSURBEntryAllocationSize = 0,
340 .DSNumberofURBEntries = 0);
341
342 const struct brw_gs_prog_data *gs_prog_data = &pipeline->gs_prog_data;
343 offset = 1;
344 length = (gs_prog_data->base.vue_map.num_slots + 1) / 2 - offset;
345
346 if (pipeline->gs_vec4 == NO_KERNEL)
347 anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_GS, .Enable = false);
348 else
349 anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_GS,
350 .SingleProgramFlow = false,
351 .KernelStartPointer = pipeline->gs_vec4,
352 .VectorMaskEnable = Vmask,
353 .SamplerCount = 0,
354 .BindingTableEntryCount = 0,
355 .ExpectedVertexCount = pipeline->gs_vertex_count,
356
357 .PerThreadScratchSpace = 0,
358 .ScratchSpaceBasePointer = 0,
359
360 .OutputVertexSize = gs_prog_data->output_vertex_size_hwords * 2 - 1,
361 .OutputTopology = gs_prog_data->output_topology,
362 .VertexURBEntryReadLength = gs_prog_data->base.urb_read_length,
363 .DispatchGRFStartRegisterForURBData =
364 gs_prog_data->base.base.dispatch_grf_start_reg,
365
366 .MaximumNumberofThreads = device->info.max_gs_threads,
367 .ControlDataHeaderSize = gs_prog_data->control_data_header_size_hwords,
368 //pipeline->gs_prog_data.dispatch_mode |
369 .StatisticsEnable = true,
370 .IncludePrimitiveID = gs_prog_data->include_primitive_id,
371 .ReorderMode = TRAILING,
372 .Enable = true,
373
374 .ControlDataFormat = gs_prog_data->control_data_format,
375
376 /* FIXME: mesa sets this based on ctx->Transform.ClipPlanesEnabled:
377 * UserClipDistanceClipTestEnableBitmask_3DSTATE_GS(v)
378 * UserClipDistanceCullTestEnableBitmask(v)
379 */
380
381 .VertexURBEntryOutputReadOffset = offset,
382 .VertexURBEntryOutputLength = length);
383
384 //trp_generate_blend_hw_cmds(batch, pipeline);
385
386 const struct brw_vue_prog_data *vue_prog_data = &pipeline->vs_prog_data.base;
387 /* Skip the VUE header and position slots */
388 offset = 1;
389 length = (vue_prog_data->vue_map.num_slots + 1) / 2 - offset;
390
391 if (pipeline->vs_simd8 == NO_KERNEL || (extra && extra->disable_vs))
392 anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_VS,
393 .FunctionEnable = false,
394 .VertexURBEntryOutputReadOffset = 1,
395 /* Even if VS is disabled, SBE still gets the amount of
396 * vertex data to read from this field. We use attribute
397 * count - 1, as we don't count the VUE header here. */
398 .VertexURBEntryOutputLength =
399 DIV_ROUND_UP(vi_info->attributeCount - 1, 2));
400 else
401 anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_VS,
402 .KernelStartPointer = pipeline->vs_simd8,
403 .SingleVertexDispatch = Multiple,
404 .VectorMaskEnable = Dmask,
405 .SamplerCount = 0,
406 .BindingTableEntryCount =
407 vue_prog_data->base.binding_table.size_bytes / 4,
408 .ThreadDispatchPriority = Normal,
409 .FloatingPointMode = IEEE754,
410 .IllegalOpcodeExceptionEnable = false,
411 .AccessesUAV = false,
412 .SoftwareExceptionEnable = false,
413
414 /* FIXME: pointer needs to be assigned outside as it aliases
415 * PerThreadScratchSpace.
416 */
417 .ScratchSpaceBasePointer = 0,
418 .PerThreadScratchSpace = 0,
419
420 .DispatchGRFStartRegisterForURBData =
421 vue_prog_data->base.dispatch_grf_start_reg,
422 .VertexURBEntryReadLength = vue_prog_data->urb_read_length,
423 .VertexURBEntryReadOffset = 0,
424
425 .MaximumNumberofThreads = device->info.max_vs_threads - 1,
426 .StatisticsEnable = false,
427 .SIMD8DispatchEnable = true,
428 .VertexCacheDisable = ia_info->disableVertexReuse,
429 .FunctionEnable = true,
430
431 .VertexURBEntryOutputReadOffset = offset,
432 .VertexURBEntryOutputLength = length,
433 .UserClipDistanceClipTestEnableBitmask = 0,
434 .UserClipDistanceCullTestEnableBitmask = 0);
435
436 const struct brw_wm_prog_data *wm_prog_data = &pipeline->wm_prog_data;
437 uint32_t ksp0, ksp2, grf_start0, grf_start2;
438
439 ksp2 = 0;
440 grf_start2 = 0;
441 if (pipeline->ps_simd8 != NO_KERNEL) {
442 ksp0 = pipeline->ps_simd8;
443 grf_start0 = wm_prog_data->base.dispatch_grf_start_reg;
444 if (pipeline->ps_simd16 != NO_KERNEL) {
445 ksp2 = pipeline->ps_simd16;
446 grf_start2 = wm_prog_data->dispatch_grf_start_reg_16;
447 }
448 } else if (pipeline->ps_simd16 != NO_KERNEL) {
449 ksp0 = pipeline->ps_simd16;
450 grf_start0 = wm_prog_data->dispatch_grf_start_reg_16;
451 } else {
452 unreachable("no ps shader");
453 }
454
455 anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_PS,
456 .KernelStartPointer0 = ksp0,
457
458 .SingleProgramFlow = false,
459 .VectorMaskEnable = true,
460 .SamplerCount = 1,
461
462 .ScratchSpaceBasePointer = 0,
463 .PerThreadScratchSpace = 0,
464
465 .MaximumNumberofThreadsPerPSD = 64 - 2,
466 .PositionXYOffsetSelect = wm_prog_data->uses_pos_offset ?
467 POSOFFSET_SAMPLE: POSOFFSET_NONE,
468 .PushConstantEnable = wm_prog_data->base.nr_params > 0,
469 ._8PixelDispatchEnable = pipeline->ps_simd8 != NO_KERNEL,
470 ._16PixelDispatchEnable = pipeline->ps_simd16 != NO_KERNEL,
471 ._32PixelDispatchEnable = false,
472
473 .DispatchGRFStartRegisterForConstantSetupData0 = grf_start0,
474 .DispatchGRFStartRegisterForConstantSetupData1 = 0,
475 .DispatchGRFStartRegisterForConstantSetupData2 = grf_start2,
476
477 .KernelStartPointer1 = 0,
478 .KernelStartPointer2 = ksp2);
479
480 bool per_sample_ps = false;
481 anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_PS_EXTRA,
482 .PixelShaderValid = true,
483 .PixelShaderKillsPixel = wm_prog_data->uses_kill,
484 .PixelShaderComputedDepthMode = wm_prog_data->computed_depth_mode,
485 .AttributeEnable = wm_prog_data->num_varying_inputs > 0,
486 .oMaskPresenttoRenderTarget = wm_prog_data->uses_omask,
487 .PixelShaderIsPerSample = per_sample_ps);
488
489 *pPipeline = (VkPipeline) pipeline;
490
491 return VK_SUCCESS;
492
493 fail:
494 anv_device_free(device, pipeline);
495
496 return result;
497 }
498
499 VkResult
500 anv_pipeline_destroy(struct anv_pipeline *pipeline)
501 {
502 anv_compiler_free(pipeline);
503 anv_batch_finish(&pipeline->batch, pipeline->device);
504 anv_device_free(pipeline->device, pipeline);
505
506 return VK_SUCCESS;
507 }
508
509 VkResult VKAPI vkCreateGraphicsPipelineDerivative(
510 VkDevice device,
511 const VkGraphicsPipelineCreateInfo* pCreateInfo,
512 VkPipeline basePipeline,
513 VkPipeline* pPipeline)
514 {
515 stub_return(VK_UNSUPPORTED);
516 }
517
518 VkResult VKAPI vkCreateComputePipeline(
519 VkDevice device,
520 const VkComputePipelineCreateInfo* pCreateInfo,
521 VkPipeline* pPipeline)
522 {
523 stub_return(VK_UNSUPPORTED);
524 }
525
526 VkResult VKAPI vkStorePipeline(
527 VkDevice device,
528 VkPipeline pipeline,
529 size_t* pDataSize,
530 void* pData)
531 {
532 stub_return(VK_UNSUPPORTED);
533 }
534
535 VkResult VKAPI vkLoadPipeline(
536 VkDevice device,
537 size_t dataSize,
538 const void* pData,
539 VkPipeline* pPipeline)
540 {
541 stub_return(VK_UNSUPPORTED);
542 }
543
544 VkResult VKAPI vkLoadPipelineDerivative(
545 VkDevice device,
546 size_t dataSize,
547 const void* pData,
548 VkPipeline basePipeline,
549 VkPipeline* pPipeline)
550 {
551 stub_return(VK_UNSUPPORTED);
552 }
553
554 // Pipeline layout functions
555
556 VkResult VKAPI vkCreatePipelineLayout(
557 VkDevice _device,
558 const VkPipelineLayoutCreateInfo* pCreateInfo,
559 VkPipelineLayout* pPipelineLayout)
560 {
561 struct anv_device *device = (struct anv_device *) _device;
562 struct anv_pipeline_layout *layout;
563 struct anv_pipeline_layout_entry *sampler_entry, *surface_entry;
564 uint32_t sampler_total, surface_total;
565 size_t size;
566
567 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO);
568
569 sampler_total = 0;
570 surface_total = 0;
571 for (uint32_t i = 0; i < pCreateInfo->descriptorSetCount; i++) {
572 struct anv_descriptor_set_layout *set_layout =
573 (struct anv_descriptor_set_layout *) pCreateInfo->pSetLayouts[i];
574 for (uint32_t j = 0; j < set_layout->count; j++) {
575 sampler_total += set_layout->sampler_total;
576 surface_total += set_layout->surface_total;
577 }
578 }
579
580 size = sizeof(*layout) +
581 (sampler_total + surface_total) * sizeof(layout->entries[0]);
582 layout = anv_device_alloc(device, size, 8,
583 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
584 if (layout == NULL)
585 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
586
587 sampler_entry = layout->entries;
588 surface_entry = layout->entries + sampler_total;
589 for (uint32_t s = 0; s < VK_NUM_SHADER_STAGE; s++) {
590 layout->stage[s].sampler_entries = sampler_entry;
591 layout->stage[s].surface_entries = surface_entry;
592
593 for (uint32_t i = 0; i < pCreateInfo->descriptorSetCount; i++) {
594 struct anv_descriptor_set_layout *set_layout =
595 (struct anv_descriptor_set_layout *) pCreateInfo->pSetLayouts[i];
596 for (uint32_t j = 0; j < set_layout->count; j++) {
597 if (set_layout->bindings[j].mask & (1 << s)) {
598 switch (set_layout->bindings[j].type) {
599 case VK_DESCRIPTOR_TYPE_SAMPLER:
600 sampler_entry->type = set_layout->bindings[j].type;
601 sampler_entry->set = i;
602 sampler_entry->index = j;
603 sampler_entry++;
604 break;
605
606 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
607 sampler_entry->type = set_layout->bindings[j].type;
608 sampler_entry->set = i;
609 sampler_entry->index = j;
610 sampler_entry++;
611 /* fall through */
612
613 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
614 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
615 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
616 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
617 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
618 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
619 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
620 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
621 surface_entry->type = set_layout->bindings[j].type;
622 surface_entry->set = i;
623 surface_entry->index = j;
624 surface_entry++;
625 break;
626
627 default:
628 break;
629 }
630 }
631 }
632 }
633
634 layout->stage[s].sampler_count =
635 sampler_entry - layout->stage[s].sampler_entries;
636 layout->stage[s].surface_count =
637 surface_entry - layout->stage[s].surface_entries;
638 }
639
640 *pPipelineLayout = (VkPipelineLayout) layout;
641
642 return VK_SUCCESS;
643 }