2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
34 VkResult VKAPI
vkCreateShader(
36 const VkShaderCreateInfo
* pCreateInfo
,
39 struct anv_device
*device
= (struct anv_device
*) _device
;
40 struct anv_shader
*shader
;
42 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_SHADER_CREATE_INFO
);
44 shader
= anv_device_alloc(device
, sizeof(*shader
) + pCreateInfo
->codeSize
, 8,
45 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
47 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
49 shader
->size
= pCreateInfo
->codeSize
;
50 memcpy(shader
->data
, pCreateInfo
->pCode
, shader
->size
);
52 *pShader
= (VkShader
) shader
;
60 emit_vertex_input(struct anv_pipeline
*pipeline
, VkPipelineVertexInputCreateInfo
*info
)
62 const uint32_t num_dwords
= 1 + info
->attributeCount
* 2;
64 bool instancing_enable
[32];
66 for (uint32_t i
= 0; i
< info
->bindingCount
; i
++) {
67 const VkVertexInputBindingDescription
*desc
=
68 &info
->pVertexBindingDescriptions
[i
];
70 pipeline
->binding_stride
[desc
->binding
] = desc
->strideInBytes
;
72 /* Step rate is programmed per vertex element (attribute), not
73 * binding. Set up a map of which bindings step per instance, for
74 * reference by vertex element setup. */
75 switch (desc
->stepRate
) {
77 case VK_VERTEX_INPUT_STEP_RATE_VERTEX
:
78 instancing_enable
[desc
->binding
] = false;
80 case VK_VERTEX_INPUT_STEP_RATE_INSTANCE
:
81 instancing_enable
[desc
->binding
] = true;
86 p
= anv_batch_emitn(&pipeline
->batch
, num_dwords
,
87 GEN8_3DSTATE_VERTEX_ELEMENTS
);
89 for (uint32_t i
= 0; i
< info
->attributeCount
; i
++) {
90 const VkVertexInputAttributeDescription
*desc
=
91 &info
->pVertexAttributeDescriptions
[i
];
92 const struct anv_format
*format
= anv_format_for_vk_format(desc
->format
);
94 struct GEN8_VERTEX_ELEMENT_STATE element
= {
95 .VertexBufferIndex
= desc
->binding
,
97 .SourceElementFormat
= format
->format
,
98 .EdgeFlagEnable
= false,
99 .SourceElementOffset
= desc
->offsetInBytes
,
100 .Component0Control
= VFCOMP_STORE_SRC
,
101 .Component1Control
= format
->channels
>= 2 ? VFCOMP_STORE_SRC
: VFCOMP_STORE_0
,
102 .Component2Control
= format
->channels
>= 3 ? VFCOMP_STORE_SRC
: VFCOMP_STORE_0
,
103 .Component3Control
= format
->channels
>= 4 ? VFCOMP_STORE_SRC
: VFCOMP_STORE_1_FP
105 GEN8_VERTEX_ELEMENT_STATE_pack(NULL
, &p
[1 + i
* 2], &element
);
107 anv_batch_emit(&pipeline
->batch
, GEN8_3DSTATE_VF_INSTANCING
,
108 .InstancingEnable
= instancing_enable
[desc
->binding
],
109 .VertexElementIndex
= i
,
110 /* Vulkan so far doesn't have an instance divisor, so
111 * this is always 1 (ignored if not instancing). */
112 .InstanceDataStepRate
= 1);
115 anv_batch_emit(&pipeline
->batch
, GEN8_3DSTATE_VF_SGVS
,
116 .VertexIDEnable
= pipeline
->vs_prog_data
.uses_vertexid
,
117 .VertexIDComponentNumber
= 2,
118 .VertexIDElementOffset
= info
->bindingCount
,
119 .InstanceIDEnable
= pipeline
->vs_prog_data
.uses_instanceid
,
120 .InstanceIDComponentNumber
= 3,
121 .InstanceIDElementOffset
= info
->bindingCount
);
125 emit_ia_state(struct anv_pipeline
*pipeline
,
126 VkPipelineIaStateCreateInfo
*info
,
127 const struct anv_pipeline_create_info
*extra
)
129 static const uint32_t vk_to_gen_primitive_type
[] = {
130 [VK_PRIMITIVE_TOPOLOGY_POINT_LIST
] = _3DPRIM_POINTLIST
,
131 [VK_PRIMITIVE_TOPOLOGY_LINE_LIST
] = _3DPRIM_LINELIST
,
132 [VK_PRIMITIVE_TOPOLOGY_LINE_STRIP
] = _3DPRIM_LINESTRIP
,
133 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST
] = _3DPRIM_TRILIST
,
134 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP
] = _3DPRIM_TRISTRIP
,
135 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN
] = _3DPRIM_TRIFAN
,
136 [VK_PRIMITIVE_TOPOLOGY_LINE_LIST_ADJ
] = _3DPRIM_LINELIST_ADJ
,
137 [VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_ADJ
] = _3DPRIM_LISTSTRIP_ADJ
,
138 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_ADJ
] = _3DPRIM_TRILIST_ADJ
,
139 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_ADJ
] = _3DPRIM_TRISTRIP_ADJ
,
140 [VK_PRIMITIVE_TOPOLOGY_PATCH
] = _3DPRIM_PATCHLIST_1
142 uint32_t topology
= vk_to_gen_primitive_type
[info
->topology
];
144 if (extra
&& extra
->use_rectlist
)
145 topology
= _3DPRIM_RECTLIST
;
147 anv_batch_emit(&pipeline
->batch
, GEN8_3DSTATE_VF
,
148 .IndexedDrawCutIndexEnable
= info
->primitiveRestartEnable
,
149 .CutIndex
= info
->primitiveRestartIndex
);
150 anv_batch_emit(&pipeline
->batch
, GEN8_3DSTATE_VF_TOPOLOGY
,
151 .PrimitiveTopologyType
= topology
);
155 emit_rs_state(struct anv_pipeline
*pipeline
, VkPipelineRsStateCreateInfo
*info
,
156 const struct anv_pipeline_create_info
*extra
)
159 static const uint32_t vk_to_gen_cullmode
[] = {
160 [VK_CULL_MODE_NONE
] = CULLMODE_NONE
,
161 [VK_CULL_MODE_FRONT
] = CULLMODE_FRONT
,
162 [VK_CULL_MODE_BACK
] = CULLMODE_BACK
,
163 [VK_CULL_MODE_FRONT_AND_BACK
] = CULLMODE_BOTH
166 static const uint32_t vk_to_gen_fillmode
[] = {
167 [VK_FILL_MODE_POINTS
] = RASTER_POINT
,
168 [VK_FILL_MODE_WIREFRAME
] = RASTER_WIREFRAME
,
169 [VK_FILL_MODE_SOLID
] = RASTER_SOLID
172 static const uint32_t vk_to_gen_front_face
[] = {
173 [VK_FRONT_FACE_CCW
] = CounterClockwise
,
174 [VK_FRONT_FACE_CW
] = Clockwise
177 static const uint32_t vk_to_gen_coordinate_origin
[] = {
178 [VK_COORDINATE_ORIGIN_UPPER_LEFT
] = UPPERLEFT
,
179 [VK_COORDINATE_ORIGIN_LOWER_LEFT
] = LOWERLEFT
182 struct GEN8_3DSTATE_SF sf
= {
183 GEN8_3DSTATE_SF_header
,
184 .ViewportTransformEnable
= !(extra
&& extra
->disable_viewport
),
185 .TriangleStripListProvokingVertexSelect
=
186 info
->provokingVertex
== VK_PROVOKING_VERTEX_FIRST
? 0 : 2,
187 .LineStripListProvokingVertexSelect
=
188 info
->provokingVertex
== VK_PROVOKING_VERTEX_FIRST
? 0 : 1,
189 .TriangleFanProvokingVertexSelect
=
190 info
->provokingVertex
== VK_PROVOKING_VERTEX_FIRST
? 0 : 2,
191 .PointWidthSource
= info
->programPointSize
? Vertex
: State
,
194 /* bool32_t rasterizerDiscardEnable; */
197 GEN8_3DSTATE_SF_pack(NULL
, pipeline
->state_sf
, &sf
);
199 anv_batch_emit(&pipeline
->batch
, GEN8_3DSTATE_RASTER
,
200 .FrontWinding
= vk_to_gen_front_face
[info
->frontFace
],
201 .CullMode
= vk_to_gen_cullmode
[info
->cullMode
],
202 .FrontFaceFillMode
= vk_to_gen_fillmode
[info
->fillMode
],
203 .BackFaceFillMode
= vk_to_gen_fillmode
[info
->fillMode
],
204 .ScissorRectangleEnable
= !(extra
&& extra
->disable_scissor
),
205 .ViewportZClipTestEnable
= info
->depthClipEnable
);
207 anv_batch_emit(&pipeline
->batch
, GEN8_3DSTATE_SBE
,
208 .ForceVertexURBEntryReadLength
= false,
209 .ForceVertexURBEntryReadOffset
= false,
210 .PointSpriteTextureCoordinateOrigin
=
211 vk_to_gen_coordinate_origin
[info
->pointOrigin
],
212 .NumberofSFOutputAttributes
=
213 pipeline
->wm_prog_data
.num_varying_inputs
);
217 VkResult VKAPI
vkCreateGraphicsPipeline(
219 const VkGraphicsPipelineCreateInfo
* pCreateInfo
,
220 VkPipeline
* pPipeline
)
222 return anv_pipeline_create(device
, pCreateInfo
, NULL
, pPipeline
);
229 const VkGraphicsPipelineCreateInfo
* pCreateInfo
,
230 const struct anv_pipeline_create_info
* extra
,
231 VkPipeline
* pPipeline
)
233 struct anv_device
*device
= (struct anv_device
*) _device
;
234 struct anv_pipeline
*pipeline
;
235 const struct anv_common
*common
;
236 VkPipelineShaderStageCreateInfo
*shader_create_info
;
237 VkPipelineIaStateCreateInfo
*ia_info
;
238 VkPipelineRsStateCreateInfo
*rs_info
;
239 VkPipelineVertexInputCreateInfo
*vi_info
;
241 uint32_t offset
, length
;
243 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO
);
245 pipeline
= anv_device_alloc(device
, sizeof(*pipeline
), 8,
246 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
247 if (pipeline
== NULL
)
248 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
250 pipeline
->device
= device
;
251 pipeline
->layout
= (struct anv_pipeline_layout
*) pCreateInfo
->layout
;
252 memset(pipeline
->shaders
, 0, sizeof(pipeline
->shaders
));
253 result
= anv_batch_init(&pipeline
->batch
, device
);
254 if (result
!= VK_SUCCESS
)
257 for (common
= pCreateInfo
->pNext
; common
; common
= common
->pNext
) {
258 switch (common
->sType
) {
259 case VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_CREATE_INFO
:
260 vi_info
= (VkPipelineVertexInputCreateInfo
*) common
;
262 case VK_STRUCTURE_TYPE_PIPELINE_IA_STATE_CREATE_INFO
:
263 ia_info
= (VkPipelineIaStateCreateInfo
*) common
;
265 case VK_STRUCTURE_TYPE_PIPELINE_TESS_STATE_CREATE_INFO
:
266 case VK_STRUCTURE_TYPE_PIPELINE_VP_STATE_CREATE_INFO
:
268 case VK_STRUCTURE_TYPE_PIPELINE_RS_STATE_CREATE_INFO
:
269 rs_info
= (VkPipelineRsStateCreateInfo
*) common
;
271 case VK_STRUCTURE_TYPE_PIPELINE_MS_STATE_CREATE_INFO
:
272 case VK_STRUCTURE_TYPE_PIPELINE_CB_STATE_CREATE_INFO
:
273 case VK_STRUCTURE_TYPE_PIPELINE_DS_STATE_CREATE_INFO
:
274 case VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO
:
275 shader_create_info
= (VkPipelineShaderStageCreateInfo
*) common
;
276 pipeline
->shaders
[shader_create_info
->shader
.stage
] =
277 (struct anv_shader
*) shader_create_info
->shader
.shader
;
284 pipeline
->use_repclear
= extra
&& extra
->use_repclear
;
286 anv_compiler_run(device
->compiler
, pipeline
);
288 /* FIXME: The compiler dead-codes FS inputs when we don't have a VS, so we
289 * hard code this to num_attributes - 2. This is because the attributes
290 * include VUE header and position, which aren't counted as varying
292 if (pipeline
->vs_simd8
== NO_KERNEL
)
293 pipeline
->wm_prog_data
.num_varying_inputs
= vi_info
->attributeCount
- 2;
295 emit_vertex_input(pipeline
, vi_info
);
296 emit_ia_state(pipeline
, ia_info
, extra
);
297 emit_rs_state(pipeline
, rs_info
, extra
);
299 anv_batch_emit(&pipeline
->batch
, GEN8_3DSTATE_CLIP
,
301 .ViewportXYClipTestEnable
= !(extra
&& extra
->disable_viewport
));
303 anv_batch_emit(&pipeline
->batch
, GEN8_3DSTATE_WM
,
304 .StatisticsEnable
= true,
305 .LineEndCapAntialiasingRegionWidth
= _05pixels
,
306 .LineAntialiasingRegionWidth
= _10pixels
,
307 .EarlyDepthStencilControl
= NORMAL
,
308 .ForceThreadDispatchEnable
= NORMAL
,
309 .PointRasterizationRule
= RASTRULE_UPPER_RIGHT
,
310 .BarycentricInterpolationMode
=
311 pipeline
->wm_prog_data
.barycentric_interp_modes
);
313 uint32_t samples
= 1;
314 uint32_t log2_samples
= __builtin_ffs(samples
) - 1;
315 bool enable_sampling
= samples
> 1 ? true : false;
317 anv_batch_emit(&pipeline
->batch
, GEN8_3DSTATE_MULTISAMPLE
,
318 .PixelPositionOffsetEnable
= enable_sampling
,
319 .PixelLocation
= CENTER
,
320 .NumberofMultisamples
= log2_samples
);
322 anv_batch_emit(&pipeline
->batch
, GEN8_3DSTATE_URB_VS
,
323 .VSURBStartingAddress
= pipeline
->urb
.vs_start
,
324 .VSURBEntryAllocationSize
= pipeline
->urb
.vs_size
- 1,
325 .VSNumberofURBEntries
= pipeline
->urb
.nr_vs_entries
);
327 anv_batch_emit(&pipeline
->batch
, GEN8_3DSTATE_URB_GS
,
328 .GSURBStartingAddress
= pipeline
->urb
.gs_start
,
329 .GSURBEntryAllocationSize
= pipeline
->urb
.gs_size
- 1,
330 .GSNumberofURBEntries
= pipeline
->urb
.nr_gs_entries
);
332 anv_batch_emit(&pipeline
->batch
, GEN8_3DSTATE_URB_HS
,
333 .HSURBStartingAddress
= pipeline
->urb
.vs_start
,
334 .HSURBEntryAllocationSize
= 0,
335 .HSNumberofURBEntries
= 0);
337 anv_batch_emit(&pipeline
->batch
, GEN8_3DSTATE_URB_DS
,
338 .DSURBStartingAddress
= pipeline
->urb
.vs_start
,
339 .DSURBEntryAllocationSize
= 0,
340 .DSNumberofURBEntries
= 0);
342 const struct brw_gs_prog_data
*gs_prog_data
= &pipeline
->gs_prog_data
;
344 length
= (gs_prog_data
->base
.vue_map
.num_slots
+ 1) / 2 - offset
;
346 if (pipeline
->gs_vec4
== NO_KERNEL
)
347 anv_batch_emit(&pipeline
->batch
, GEN8_3DSTATE_GS
, .Enable
= false);
349 anv_batch_emit(&pipeline
->batch
, GEN8_3DSTATE_GS
,
350 .SingleProgramFlow
= false,
351 .KernelStartPointer
= pipeline
->gs_vec4
,
352 .VectorMaskEnable
= Vmask
,
354 .BindingTableEntryCount
= 0,
355 .ExpectedVertexCount
= pipeline
->gs_vertex_count
,
357 .PerThreadScratchSpace
= 0,
358 .ScratchSpaceBasePointer
= 0,
360 .OutputVertexSize
= gs_prog_data
->output_vertex_size_hwords
* 2 - 1,
361 .OutputTopology
= gs_prog_data
->output_topology
,
362 .VertexURBEntryReadLength
= gs_prog_data
->base
.urb_read_length
,
363 .DispatchGRFStartRegisterForURBData
=
364 gs_prog_data
->base
.base
.dispatch_grf_start_reg
,
366 .MaximumNumberofThreads
= device
->info
.max_gs_threads
,
367 .ControlDataHeaderSize
= gs_prog_data
->control_data_header_size_hwords
,
368 //pipeline->gs_prog_data.dispatch_mode |
369 .StatisticsEnable
= true,
370 .IncludePrimitiveID
= gs_prog_data
->include_primitive_id
,
371 .ReorderMode
= TRAILING
,
374 .ControlDataFormat
= gs_prog_data
->control_data_format
,
376 /* FIXME: mesa sets this based on ctx->Transform.ClipPlanesEnabled:
377 * UserClipDistanceClipTestEnableBitmask_3DSTATE_GS(v)
378 * UserClipDistanceCullTestEnableBitmask(v)
381 .VertexURBEntryOutputReadOffset
= offset
,
382 .VertexURBEntryOutputLength
= length
);
384 //trp_generate_blend_hw_cmds(batch, pipeline);
386 const struct brw_vue_prog_data
*vue_prog_data
= &pipeline
->vs_prog_data
.base
;
387 /* Skip the VUE header and position slots */
389 length
= (vue_prog_data
->vue_map
.num_slots
+ 1) / 2 - offset
;
391 if (pipeline
->vs_simd8
== NO_KERNEL
|| (extra
&& extra
->disable_vs
))
392 anv_batch_emit(&pipeline
->batch
, GEN8_3DSTATE_VS
,
393 .FunctionEnable
= false,
394 .VertexURBEntryOutputReadOffset
= 1,
395 /* Even if VS is disabled, SBE still gets the amount of
396 * vertex data to read from this field. We use attribute
397 * count - 1, as we don't count the VUE header here. */
398 .VertexURBEntryOutputLength
=
399 DIV_ROUND_UP(vi_info
->attributeCount
- 1, 2));
401 anv_batch_emit(&pipeline
->batch
, GEN8_3DSTATE_VS
,
402 .KernelStartPointer
= pipeline
->vs_simd8
,
403 .SingleVertexDispatch
= Multiple
,
404 .VectorMaskEnable
= Dmask
,
406 .BindingTableEntryCount
=
407 vue_prog_data
->base
.binding_table
.size_bytes
/ 4,
408 .ThreadDispatchPriority
= Normal
,
409 .FloatingPointMode
= IEEE754
,
410 .IllegalOpcodeExceptionEnable
= false,
411 .AccessesUAV
= false,
412 .SoftwareExceptionEnable
= false,
414 /* FIXME: pointer needs to be assigned outside as it aliases
415 * PerThreadScratchSpace.
417 .ScratchSpaceBasePointer
= 0,
418 .PerThreadScratchSpace
= 0,
420 .DispatchGRFStartRegisterForURBData
=
421 vue_prog_data
->base
.dispatch_grf_start_reg
,
422 .VertexURBEntryReadLength
= vue_prog_data
->urb_read_length
,
423 .VertexURBEntryReadOffset
= 0,
425 .MaximumNumberofThreads
= device
->info
.max_vs_threads
- 1,
426 .StatisticsEnable
= false,
427 .SIMD8DispatchEnable
= true,
428 .VertexCacheDisable
= ia_info
->disableVertexReuse
,
429 .FunctionEnable
= true,
431 .VertexURBEntryOutputReadOffset
= offset
,
432 .VertexURBEntryOutputLength
= length
,
433 .UserClipDistanceClipTestEnableBitmask
= 0,
434 .UserClipDistanceCullTestEnableBitmask
= 0);
436 const struct brw_wm_prog_data
*wm_prog_data
= &pipeline
->wm_prog_data
;
437 uint32_t ksp0
, ksp2
, grf_start0
, grf_start2
;
441 if (pipeline
->ps_simd8
!= NO_KERNEL
) {
442 ksp0
= pipeline
->ps_simd8
;
443 grf_start0
= wm_prog_data
->base
.dispatch_grf_start_reg
;
444 if (pipeline
->ps_simd16
!= NO_KERNEL
) {
445 ksp2
= pipeline
->ps_simd16
;
446 grf_start2
= wm_prog_data
->dispatch_grf_start_reg_16
;
448 } else if (pipeline
->ps_simd16
!= NO_KERNEL
) {
449 ksp0
= pipeline
->ps_simd16
;
450 grf_start0
= wm_prog_data
->dispatch_grf_start_reg_16
;
452 unreachable("no ps shader");
455 anv_batch_emit(&pipeline
->batch
, GEN8_3DSTATE_PS
,
456 .KernelStartPointer0
= ksp0
,
458 .SingleProgramFlow
= false,
459 .VectorMaskEnable
= true,
462 .ScratchSpaceBasePointer
= 0,
463 .PerThreadScratchSpace
= 0,
465 .MaximumNumberofThreadsPerPSD
= 64 - 2,
466 .PositionXYOffsetSelect
= wm_prog_data
->uses_pos_offset
?
467 POSOFFSET_SAMPLE
: POSOFFSET_NONE
,
468 .PushConstantEnable
= wm_prog_data
->base
.nr_params
> 0,
469 ._8PixelDispatchEnable
= pipeline
->ps_simd8
!= NO_KERNEL
,
470 ._16PixelDispatchEnable
= pipeline
->ps_simd16
!= NO_KERNEL
,
471 ._32PixelDispatchEnable
= false,
473 .DispatchGRFStartRegisterForConstantSetupData0
= grf_start0
,
474 .DispatchGRFStartRegisterForConstantSetupData1
= 0,
475 .DispatchGRFStartRegisterForConstantSetupData2
= grf_start2
,
477 .KernelStartPointer1
= 0,
478 .KernelStartPointer2
= ksp2
);
480 bool per_sample_ps
= false;
481 anv_batch_emit(&pipeline
->batch
, GEN8_3DSTATE_PS_EXTRA
,
482 .PixelShaderValid
= true,
483 .PixelShaderKillsPixel
= wm_prog_data
->uses_kill
,
484 .PixelShaderComputedDepthMode
= wm_prog_data
->computed_depth_mode
,
485 .AttributeEnable
= wm_prog_data
->num_varying_inputs
> 0,
486 .oMaskPresenttoRenderTarget
= wm_prog_data
->uses_omask
,
487 .PixelShaderIsPerSample
= per_sample_ps
);
489 *pPipeline
= (VkPipeline
) pipeline
;
494 anv_device_free(device
, pipeline
);
500 anv_pipeline_destroy(struct anv_pipeline
*pipeline
)
502 anv_compiler_free(pipeline
);
503 anv_batch_finish(&pipeline
->batch
, pipeline
->device
);
504 anv_device_free(pipeline
->device
, pipeline
);
509 VkResult VKAPI
vkCreateGraphicsPipelineDerivative(
511 const VkGraphicsPipelineCreateInfo
* pCreateInfo
,
512 VkPipeline basePipeline
,
513 VkPipeline
* pPipeline
)
515 stub_return(VK_UNSUPPORTED
);
518 VkResult VKAPI
vkCreateComputePipeline(
520 const VkComputePipelineCreateInfo
* pCreateInfo
,
521 VkPipeline
* pPipeline
)
523 stub_return(VK_UNSUPPORTED
);
526 VkResult VKAPI
vkStorePipeline(
532 stub_return(VK_UNSUPPORTED
);
535 VkResult VKAPI
vkLoadPipeline(
539 VkPipeline
* pPipeline
)
541 stub_return(VK_UNSUPPORTED
);
544 VkResult VKAPI
vkLoadPipelineDerivative(
548 VkPipeline basePipeline
,
549 VkPipeline
* pPipeline
)
551 stub_return(VK_UNSUPPORTED
);
554 // Pipeline layout functions
556 VkResult VKAPI
vkCreatePipelineLayout(
558 const VkPipelineLayoutCreateInfo
* pCreateInfo
,
559 VkPipelineLayout
* pPipelineLayout
)
561 struct anv_device
*device
= (struct anv_device
*) _device
;
562 struct anv_pipeline_layout
*layout
;
564 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO
);
566 layout
= anv_device_alloc(device
, sizeof(*layout
), 8,
567 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
569 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
571 layout
->num_sets
= pCreateInfo
->descriptorSetCount
;
573 uint32_t surface_start
[VK_NUM_SHADER_STAGE
] = { 0, };
574 uint32_t sampler_start
[VK_NUM_SHADER_STAGE
] = { 0, };
576 for (uint32_t s
= 0; s
< VK_NUM_SHADER_STAGE
; s
++) {
577 layout
->stage
[s
].surface_count
= 0;
578 layout
->stage
[s
].sampler_count
= 0;
581 for (uint32_t i
= 0; i
< pCreateInfo
->descriptorSetCount
; i
++) {
582 struct anv_descriptor_set_layout
*set_layout
=
583 (struct anv_descriptor_set_layout
*) pCreateInfo
->pSetLayouts
[i
];
585 layout
->set
[i
].layout
= set_layout
;
586 for (uint32_t s
= 0; s
< VK_NUM_SHADER_STAGE
; s
++) {
587 layout
->set
[i
].surface_start
[s
] = surface_start
[s
];
588 surface_start
[s
] += set_layout
->stage
[s
].surface_count
;
589 layout
->set
[i
].sampler_start
[s
] = sampler_start
[s
];
590 sampler_start
[s
] += set_layout
->stage
[s
].sampler_count
;
592 layout
->stage
[s
].surface_count
+= set_layout
->stage
[s
].surface_count
;
593 layout
->stage
[s
].sampler_count
+= set_layout
->stage
[s
].sampler_count
;
597 *pPipelineLayout
= (VkPipelineLayout
) layout
;