vk: Actually implement some sort of destructor for all object types
[mesa.git] / src / vulkan / pipeline.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <assert.h>
25 #include <stdbool.h>
26 #include <string.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29
30 #include "private.h"
31
32 // Shader functions
33
34 VkResult anv_CreateShader(
35 VkDevice _device,
36 const VkShaderCreateInfo* pCreateInfo,
37 VkShader* pShader)
38 {
39 struct anv_device *device = (struct anv_device *) _device;
40 struct anv_shader *shader;
41
42 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SHADER_CREATE_INFO);
43
44 shader = anv_device_alloc(device, sizeof(*shader) + pCreateInfo->codeSize, 8,
45 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
46 if (shader == NULL)
47 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
48
49 shader->size = pCreateInfo->codeSize;
50 memcpy(shader->data, pCreateInfo->pCode, shader->size);
51
52 *pShader = (VkShader) shader;
53
54 return VK_SUCCESS;
55 }
56
57 // Pipeline functions
58
59 static void
60 emit_vertex_input(struct anv_pipeline *pipeline, VkPipelineVertexInputCreateInfo *info)
61 {
62 const uint32_t num_dwords = 1 + info->attributeCount * 2;
63 uint32_t *p;
64 bool instancing_enable[32];
65
66 pipeline->vb_used = 0;
67 for (uint32_t i = 0; i < info->bindingCount; i++) {
68 const VkVertexInputBindingDescription *desc =
69 &info->pVertexBindingDescriptions[i];
70
71 pipeline->vb_used |= 1 << desc->binding;
72 pipeline->binding_stride[desc->binding] = desc->strideInBytes;
73
74 /* Step rate is programmed per vertex element (attribute), not
75 * binding. Set up a map of which bindings step per instance, for
76 * reference by vertex element setup. */
77 switch (desc->stepRate) {
78 default:
79 case VK_VERTEX_INPUT_STEP_RATE_VERTEX:
80 instancing_enable[desc->binding] = false;
81 break;
82 case VK_VERTEX_INPUT_STEP_RATE_INSTANCE:
83 instancing_enable[desc->binding] = true;
84 break;
85 }
86 }
87
88 p = anv_batch_emitn(&pipeline->batch, num_dwords,
89 GEN8_3DSTATE_VERTEX_ELEMENTS);
90
91 for (uint32_t i = 0; i < info->attributeCount; i++) {
92 const VkVertexInputAttributeDescription *desc =
93 &info->pVertexAttributeDescriptions[i];
94 const struct anv_format *format = anv_format_for_vk_format(desc->format);
95
96 struct GEN8_VERTEX_ELEMENT_STATE element = {
97 .VertexBufferIndex = desc->binding,
98 .Valid = true,
99 .SourceElementFormat = format->format,
100 .EdgeFlagEnable = false,
101 .SourceElementOffset = desc->offsetInBytes,
102 .Component0Control = VFCOMP_STORE_SRC,
103 .Component1Control = format->channels >= 2 ? VFCOMP_STORE_SRC : VFCOMP_STORE_0,
104 .Component2Control = format->channels >= 3 ? VFCOMP_STORE_SRC : VFCOMP_STORE_0,
105 .Component3Control = format->channels >= 4 ? VFCOMP_STORE_SRC : VFCOMP_STORE_1_FP
106 };
107 GEN8_VERTEX_ELEMENT_STATE_pack(NULL, &p[1 + i * 2], &element);
108
109 anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_VF_INSTANCING,
110 .InstancingEnable = instancing_enable[desc->binding],
111 .VertexElementIndex = i,
112 /* Vulkan so far doesn't have an instance divisor, so
113 * this is always 1 (ignored if not instancing). */
114 .InstanceDataStepRate = 1);
115 }
116
117 anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_VF_SGVS,
118 .VertexIDEnable = pipeline->vs_prog_data.uses_vertexid,
119 .VertexIDComponentNumber = 2,
120 .VertexIDElementOffset = info->bindingCount,
121 .InstanceIDEnable = pipeline->vs_prog_data.uses_instanceid,
122 .InstanceIDComponentNumber = 3,
123 .InstanceIDElementOffset = info->bindingCount);
124 }
125
126 static void
127 emit_ia_state(struct anv_pipeline *pipeline,
128 VkPipelineIaStateCreateInfo *info,
129 const struct anv_pipeline_create_info *extra)
130 {
131 static const uint32_t vk_to_gen_primitive_type[] = {
132 [VK_PRIMITIVE_TOPOLOGY_POINT_LIST] = _3DPRIM_POINTLIST,
133 [VK_PRIMITIVE_TOPOLOGY_LINE_LIST] = _3DPRIM_LINELIST,
134 [VK_PRIMITIVE_TOPOLOGY_LINE_STRIP] = _3DPRIM_LINESTRIP,
135 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST] = _3DPRIM_TRILIST,
136 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP] = _3DPRIM_TRISTRIP,
137 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN] = _3DPRIM_TRIFAN,
138 [VK_PRIMITIVE_TOPOLOGY_LINE_LIST_ADJ] = _3DPRIM_LINELIST_ADJ,
139 [VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_ADJ] = _3DPRIM_LISTSTRIP_ADJ,
140 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_ADJ] = _3DPRIM_TRILIST_ADJ,
141 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_ADJ] = _3DPRIM_TRISTRIP_ADJ,
142 [VK_PRIMITIVE_TOPOLOGY_PATCH] = _3DPRIM_PATCHLIST_1
143 };
144 uint32_t topology = vk_to_gen_primitive_type[info->topology];
145
146 if (extra && extra->use_rectlist)
147 topology = _3DPRIM_RECTLIST;
148
149 anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_VF,
150 .IndexedDrawCutIndexEnable = info->primitiveRestartEnable,
151 .CutIndex = info->primitiveRestartIndex);
152 anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_VF_TOPOLOGY,
153 .PrimitiveTopologyType = topology);
154 }
155
156 static void
157 emit_rs_state(struct anv_pipeline *pipeline, VkPipelineRsStateCreateInfo *info,
158 const struct anv_pipeline_create_info *extra)
159
160 {
161 static const uint32_t vk_to_gen_cullmode[] = {
162 [VK_CULL_MODE_NONE] = CULLMODE_NONE,
163 [VK_CULL_MODE_FRONT] = CULLMODE_FRONT,
164 [VK_CULL_MODE_BACK] = CULLMODE_BACK,
165 [VK_CULL_MODE_FRONT_AND_BACK] = CULLMODE_BOTH
166 };
167
168 static const uint32_t vk_to_gen_fillmode[] = {
169 [VK_FILL_MODE_POINTS] = RASTER_POINT,
170 [VK_FILL_MODE_WIREFRAME] = RASTER_WIREFRAME,
171 [VK_FILL_MODE_SOLID] = RASTER_SOLID
172 };
173
174 static const uint32_t vk_to_gen_front_face[] = {
175 [VK_FRONT_FACE_CCW] = CounterClockwise,
176 [VK_FRONT_FACE_CW] = Clockwise
177 };
178
179 static const uint32_t vk_to_gen_coordinate_origin[] = {
180 [VK_COORDINATE_ORIGIN_UPPER_LEFT] = UPPERLEFT,
181 [VK_COORDINATE_ORIGIN_LOWER_LEFT] = LOWERLEFT
182 };
183
184 struct GEN8_3DSTATE_SF sf = {
185 GEN8_3DSTATE_SF_header,
186 .ViewportTransformEnable = !(extra && extra->disable_viewport),
187 .TriangleStripListProvokingVertexSelect =
188 info->provokingVertex == VK_PROVOKING_VERTEX_FIRST ? 0 : 2,
189 .LineStripListProvokingVertexSelect =
190 info->provokingVertex == VK_PROVOKING_VERTEX_FIRST ? 0 : 1,
191 .TriangleFanProvokingVertexSelect =
192 info->provokingVertex == VK_PROVOKING_VERTEX_FIRST ? 0 : 2,
193 .PointWidthSource = info->programPointSize ? Vertex : State,
194 };
195
196 /* bool32_t rasterizerDiscardEnable; */
197
198
199 GEN8_3DSTATE_SF_pack(NULL, pipeline->state_sf, &sf);
200
201 anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_RASTER,
202 .FrontWinding = vk_to_gen_front_face[info->frontFace],
203 .CullMode = vk_to_gen_cullmode[info->cullMode],
204 .FrontFaceFillMode = vk_to_gen_fillmode[info->fillMode],
205 .BackFaceFillMode = vk_to_gen_fillmode[info->fillMode],
206 .ScissorRectangleEnable = !(extra && extra->disable_scissor),
207 .ViewportZClipTestEnable = info->depthClipEnable);
208
209 anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_SBE,
210 .ForceVertexURBEntryReadLength = false,
211 .ForceVertexURBEntryReadOffset = false,
212 .PointSpriteTextureCoordinateOrigin =
213 vk_to_gen_coordinate_origin[info->pointOrigin],
214 .NumberofSFOutputAttributes =
215 pipeline->wm_prog_data.num_varying_inputs);
216
217 }
218
219 VkResult anv_CreateGraphicsPipeline(
220 VkDevice device,
221 const VkGraphicsPipelineCreateInfo* pCreateInfo,
222 VkPipeline* pPipeline)
223 {
224 return anv_pipeline_create(device, pCreateInfo, NULL, pPipeline);
225 }
226
227 static void
228 anv_pipeline_destroy(struct anv_device *device,
229 struct anv_object *object,
230 VkObjectType obj_type)
231 {
232 struct anv_pipeline *pipeline = (struct anv_pipeline*) object;
233
234 assert(obj_type == VK_OBJECT_TYPE_PIPELINE);
235
236 anv_compiler_free(pipeline);
237 anv_batch_finish(&pipeline->batch, pipeline->device);
238 anv_device_free(pipeline->device, pipeline);
239 }
240
241 VkResult
242 anv_pipeline_create(
243 VkDevice _device,
244 const VkGraphicsPipelineCreateInfo* pCreateInfo,
245 const struct anv_pipeline_create_info * extra,
246 VkPipeline* pPipeline)
247 {
248 struct anv_device *device = (struct anv_device *) _device;
249 struct anv_pipeline *pipeline;
250 const struct anv_common *common;
251 VkPipelineShaderStageCreateInfo *shader_create_info;
252 VkPipelineIaStateCreateInfo *ia_info;
253 VkPipelineRsStateCreateInfo *rs_info;
254 VkPipelineVertexInputCreateInfo *vi_info;
255 VkResult result;
256 uint32_t offset, length;
257
258 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO);
259
260 pipeline = anv_device_alloc(device, sizeof(*pipeline), 8,
261 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
262 if (pipeline == NULL)
263 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
264
265 pipeline->base.destructor = anv_pipeline_destroy;
266 pipeline->device = device;
267 pipeline->layout = (struct anv_pipeline_layout *) pCreateInfo->layout;
268 memset(pipeline->shaders, 0, sizeof(pipeline->shaders));
269 result = anv_batch_init(&pipeline->batch, device);
270 if (result != VK_SUCCESS)
271 goto fail;
272
273 anv_state_stream_init(&pipeline->program_stream,
274 &device->instruction_block_pool);
275
276 for (common = pCreateInfo->pNext; common; common = common->pNext) {
277 switch (common->sType) {
278 case VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_CREATE_INFO:
279 vi_info = (VkPipelineVertexInputCreateInfo *) common;
280 break;
281 case VK_STRUCTURE_TYPE_PIPELINE_IA_STATE_CREATE_INFO:
282 ia_info = (VkPipelineIaStateCreateInfo *) common;
283 break;
284 case VK_STRUCTURE_TYPE_PIPELINE_TESS_STATE_CREATE_INFO:
285 case VK_STRUCTURE_TYPE_PIPELINE_VP_STATE_CREATE_INFO:
286 break;
287 case VK_STRUCTURE_TYPE_PIPELINE_RS_STATE_CREATE_INFO:
288 rs_info = (VkPipelineRsStateCreateInfo *) common;
289 break;
290 case VK_STRUCTURE_TYPE_PIPELINE_MS_STATE_CREATE_INFO:
291 case VK_STRUCTURE_TYPE_PIPELINE_CB_STATE_CREATE_INFO:
292 case VK_STRUCTURE_TYPE_PIPELINE_DS_STATE_CREATE_INFO:
293 case VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO:
294 shader_create_info = (VkPipelineShaderStageCreateInfo *) common;
295 pipeline->shaders[shader_create_info->shader.stage] =
296 (struct anv_shader *) shader_create_info->shader.shader;
297 break;
298 default:
299 break;
300 }
301 }
302
303 pipeline->use_repclear = extra && extra->use_repclear;
304
305 anv_compiler_run(device->compiler, pipeline);
306
307 /* FIXME: The compiler dead-codes FS inputs when we don't have a VS, so we
308 * hard code this to num_attributes - 2. This is because the attributes
309 * include VUE header and position, which aren't counted as varying
310 * inputs. */
311 if (pipeline->vs_simd8 == NO_KERNEL)
312 pipeline->wm_prog_data.num_varying_inputs = vi_info->attributeCount - 2;
313
314 emit_vertex_input(pipeline, vi_info);
315 emit_ia_state(pipeline, ia_info, extra);
316 emit_rs_state(pipeline, rs_info, extra);
317
318 anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_CLIP,
319 .ClipEnable = true,
320 .ViewportXYClipTestEnable = !(extra && extra->disable_viewport));
321
322 anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_WM,
323 .StatisticsEnable = true,
324 .LineEndCapAntialiasingRegionWidth = _05pixels,
325 .LineAntialiasingRegionWidth = _10pixels,
326 .EarlyDepthStencilControl = NORMAL,
327 .ForceThreadDispatchEnable = NORMAL,
328 .PointRasterizationRule = RASTRULE_UPPER_RIGHT,
329 .BarycentricInterpolationMode =
330 pipeline->wm_prog_data.barycentric_interp_modes);
331
332 uint32_t samples = 1;
333 uint32_t log2_samples = __builtin_ffs(samples) - 1;
334 bool enable_sampling = samples > 1 ? true : false;
335
336 anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_MULTISAMPLE,
337 .PixelPositionOffsetEnable = enable_sampling,
338 .PixelLocation = CENTER,
339 .NumberofMultisamples = log2_samples);
340
341 anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_URB_VS,
342 .VSURBStartingAddress = pipeline->urb.vs_start,
343 .VSURBEntryAllocationSize = pipeline->urb.vs_size - 1,
344 .VSNumberofURBEntries = pipeline->urb.nr_vs_entries);
345
346 anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_URB_GS,
347 .GSURBStartingAddress = pipeline->urb.gs_start,
348 .GSURBEntryAllocationSize = pipeline->urb.gs_size - 1,
349 .GSNumberofURBEntries = pipeline->urb.nr_gs_entries);
350
351 anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_URB_HS,
352 .HSURBStartingAddress = pipeline->urb.vs_start,
353 .HSURBEntryAllocationSize = 0,
354 .HSNumberofURBEntries = 0);
355
356 anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_URB_DS,
357 .DSURBStartingAddress = pipeline->urb.vs_start,
358 .DSURBEntryAllocationSize = 0,
359 .DSNumberofURBEntries = 0);
360
361 const struct brw_gs_prog_data *gs_prog_data = &pipeline->gs_prog_data;
362 offset = 1;
363 length = (gs_prog_data->base.vue_map.num_slots + 1) / 2 - offset;
364
365 if (pipeline->gs_vec4 == NO_KERNEL)
366 anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_GS, .Enable = false);
367 else
368 anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_GS,
369 .SingleProgramFlow = false,
370 .KernelStartPointer = pipeline->gs_vec4,
371 .VectorMaskEnable = Vmask,
372 .SamplerCount = 0,
373 .BindingTableEntryCount = 0,
374 .ExpectedVertexCount = pipeline->gs_vertex_count,
375
376 .PerThreadScratchSpace = 0,
377 .ScratchSpaceBasePointer = 0,
378
379 .OutputVertexSize = gs_prog_data->output_vertex_size_hwords * 2 - 1,
380 .OutputTopology = gs_prog_data->output_topology,
381 .VertexURBEntryReadLength = gs_prog_data->base.urb_read_length,
382 .DispatchGRFStartRegisterForURBData =
383 gs_prog_data->base.base.dispatch_grf_start_reg,
384
385 .MaximumNumberofThreads = device->info.max_gs_threads,
386 .ControlDataHeaderSize = gs_prog_data->control_data_header_size_hwords,
387 //pipeline->gs_prog_data.dispatch_mode |
388 .StatisticsEnable = true,
389 .IncludePrimitiveID = gs_prog_data->include_primitive_id,
390 .ReorderMode = TRAILING,
391 .Enable = true,
392
393 .ControlDataFormat = gs_prog_data->control_data_format,
394
395 /* FIXME: mesa sets this based on ctx->Transform.ClipPlanesEnabled:
396 * UserClipDistanceClipTestEnableBitmask_3DSTATE_GS(v)
397 * UserClipDistanceCullTestEnableBitmask(v)
398 */
399
400 .VertexURBEntryOutputReadOffset = offset,
401 .VertexURBEntryOutputLength = length);
402
403 //trp_generate_blend_hw_cmds(batch, pipeline);
404
405 const struct brw_vue_prog_data *vue_prog_data = &pipeline->vs_prog_data.base;
406 /* Skip the VUE header and position slots */
407 offset = 1;
408 length = (vue_prog_data->vue_map.num_slots + 1) / 2 - offset;
409
410 if (pipeline->vs_simd8 == NO_KERNEL || (extra && extra->disable_vs))
411 anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_VS,
412 .FunctionEnable = false,
413 .VertexURBEntryOutputReadOffset = 1,
414 /* Even if VS is disabled, SBE still gets the amount of
415 * vertex data to read from this field. We use attribute
416 * count - 1, as we don't count the VUE header here. */
417 .VertexURBEntryOutputLength =
418 DIV_ROUND_UP(vi_info->attributeCount - 1, 2));
419 else
420 anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_VS,
421 .KernelStartPointer = pipeline->vs_simd8,
422 .SingleVertexDispatch = Multiple,
423 .VectorMaskEnable = Dmask,
424 .SamplerCount = 0,
425 .BindingTableEntryCount =
426 vue_prog_data->base.binding_table.size_bytes / 4,
427 .ThreadDispatchPriority = Normal,
428 .FloatingPointMode = IEEE754,
429 .IllegalOpcodeExceptionEnable = false,
430 .AccessesUAV = false,
431 .SoftwareExceptionEnable = false,
432
433 /* FIXME: pointer needs to be assigned outside as it aliases
434 * PerThreadScratchSpace.
435 */
436 .ScratchSpaceBasePointer = 0,
437 .PerThreadScratchSpace = 0,
438
439 .DispatchGRFStartRegisterForURBData =
440 vue_prog_data->base.dispatch_grf_start_reg,
441 .VertexURBEntryReadLength = vue_prog_data->urb_read_length,
442 .VertexURBEntryReadOffset = 0,
443
444 .MaximumNumberofThreads = device->info.max_vs_threads - 1,
445 .StatisticsEnable = false,
446 .SIMD8DispatchEnable = true,
447 .VertexCacheDisable = ia_info->disableVertexReuse,
448 .FunctionEnable = true,
449
450 .VertexURBEntryOutputReadOffset = offset,
451 .VertexURBEntryOutputLength = length,
452 .UserClipDistanceClipTestEnableBitmask = 0,
453 .UserClipDistanceCullTestEnableBitmask = 0);
454
455 const struct brw_wm_prog_data *wm_prog_data = &pipeline->wm_prog_data;
456 uint32_t ksp0, ksp2, grf_start0, grf_start2;
457
458 ksp2 = 0;
459 grf_start2 = 0;
460 if (pipeline->ps_simd8 != NO_KERNEL) {
461 ksp0 = pipeline->ps_simd8;
462 grf_start0 = wm_prog_data->base.dispatch_grf_start_reg;
463 if (pipeline->ps_simd16 != NO_KERNEL) {
464 ksp2 = pipeline->ps_simd16;
465 grf_start2 = wm_prog_data->dispatch_grf_start_reg_16;
466 }
467 } else if (pipeline->ps_simd16 != NO_KERNEL) {
468 ksp0 = pipeline->ps_simd16;
469 grf_start0 = wm_prog_data->dispatch_grf_start_reg_16;
470 } else {
471 unreachable("no ps shader");
472 }
473
474 anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_PS,
475 .KernelStartPointer0 = ksp0,
476
477 .SingleProgramFlow = false,
478 .VectorMaskEnable = true,
479 .SamplerCount = 1,
480
481 .ScratchSpaceBasePointer = 0,
482 .PerThreadScratchSpace = 0,
483
484 .MaximumNumberofThreadsPerPSD = 64 - 2,
485 .PositionXYOffsetSelect = wm_prog_data->uses_pos_offset ?
486 POSOFFSET_SAMPLE: POSOFFSET_NONE,
487 .PushConstantEnable = wm_prog_data->base.nr_params > 0,
488 ._8PixelDispatchEnable = pipeline->ps_simd8 != NO_KERNEL,
489 ._16PixelDispatchEnable = pipeline->ps_simd16 != NO_KERNEL,
490 ._32PixelDispatchEnable = false,
491
492 .DispatchGRFStartRegisterForConstantSetupData0 = grf_start0,
493 .DispatchGRFStartRegisterForConstantSetupData1 = 0,
494 .DispatchGRFStartRegisterForConstantSetupData2 = grf_start2,
495
496 .KernelStartPointer1 = 0,
497 .KernelStartPointer2 = ksp2);
498
499 bool per_sample_ps = false;
500 anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_PS_EXTRA,
501 .PixelShaderValid = true,
502 .PixelShaderKillsPixel = wm_prog_data->uses_kill,
503 .PixelShaderComputedDepthMode = wm_prog_data->computed_depth_mode,
504 .AttributeEnable = wm_prog_data->num_varying_inputs > 0,
505 .oMaskPresenttoRenderTarget = wm_prog_data->uses_omask,
506 .PixelShaderIsPerSample = per_sample_ps);
507
508 *pPipeline = (VkPipeline) pipeline;
509
510 return VK_SUCCESS;
511
512 fail:
513 anv_device_free(device, pipeline);
514
515 return result;
516 }
517
518 VkResult anv_CreateGraphicsPipelineDerivative(
519 VkDevice device,
520 const VkGraphicsPipelineCreateInfo* pCreateInfo,
521 VkPipeline basePipeline,
522 VkPipeline* pPipeline)
523 {
524 stub_return(VK_UNSUPPORTED);
525 }
526
527 VkResult anv_CreateComputePipeline(
528 VkDevice device,
529 const VkComputePipelineCreateInfo* pCreateInfo,
530 VkPipeline* pPipeline)
531 {
532 stub_return(VK_UNSUPPORTED);
533 }
534
535 VkResult anv_StorePipeline(
536 VkDevice device,
537 VkPipeline pipeline,
538 size_t* pDataSize,
539 void* pData)
540 {
541 stub_return(VK_UNSUPPORTED);
542 }
543
544 VkResult anv_LoadPipeline(
545 VkDevice device,
546 size_t dataSize,
547 const void* pData,
548 VkPipeline* pPipeline)
549 {
550 stub_return(VK_UNSUPPORTED);
551 }
552
553 VkResult anv_LoadPipelineDerivative(
554 VkDevice device,
555 size_t dataSize,
556 const void* pData,
557 VkPipeline basePipeline,
558 VkPipeline* pPipeline)
559 {
560 stub_return(VK_UNSUPPORTED);
561 }
562
563 // Pipeline layout functions
564
565 VkResult anv_CreatePipelineLayout(
566 VkDevice _device,
567 const VkPipelineLayoutCreateInfo* pCreateInfo,
568 VkPipelineLayout* pPipelineLayout)
569 {
570 struct anv_device *device = (struct anv_device *) _device;
571 struct anv_pipeline_layout *layout;
572
573 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO);
574
575 layout = anv_device_alloc(device, sizeof(*layout), 8,
576 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
577 if (layout == NULL)
578 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
579
580 layout->num_sets = pCreateInfo->descriptorSetCount;
581
582 uint32_t surface_start[VK_NUM_SHADER_STAGE] = { 0, };
583 uint32_t sampler_start[VK_NUM_SHADER_STAGE] = { 0, };
584
585 for (uint32_t s = 0; s < VK_NUM_SHADER_STAGE; s++) {
586 layout->stage[s].surface_count = 0;
587 layout->stage[s].sampler_count = 0;
588 }
589
590 for (uint32_t i = 0; i < pCreateInfo->descriptorSetCount; i++) {
591 struct anv_descriptor_set_layout *set_layout =
592 (struct anv_descriptor_set_layout *) pCreateInfo->pSetLayouts[i];
593
594 layout->set[i].layout = set_layout;
595 for (uint32_t s = 0; s < VK_NUM_SHADER_STAGE; s++) {
596 layout->set[i].surface_start[s] = surface_start[s];
597 surface_start[s] += set_layout->stage[s].surface_count;
598 layout->set[i].sampler_start[s] = sampler_start[s];
599 sampler_start[s] += set_layout->stage[s].sampler_count;
600
601 layout->stage[s].surface_count += set_layout->stage[s].surface_count;
602 layout->stage[s].sampler_count += set_layout->stage[s].sampler_count;
603 }
604 }
605
606 *pPipelineLayout = (VkPipelineLayout) layout;
607
608 return VK_SUCCESS;
609 }