2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
34 VkResult VKAPI
vkCreateShader(
36 const VkShaderCreateInfo
* pCreateInfo
,
39 struct anv_device
*device
= (struct anv_device
*) _device
;
40 struct anv_shader
*shader
;
42 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_SHADER_CREATE_INFO
);
44 shader
= anv_device_alloc(device
, sizeof(*shader
) + pCreateInfo
->codeSize
, 8,
45 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
47 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
49 shader
->size
= pCreateInfo
->codeSize
;
50 memcpy(shader
->data
, pCreateInfo
->pCode
, shader
->size
);
52 *pShader
= (VkShader
) shader
;
60 emit_vertex_input(struct anv_pipeline
*pipeline
, VkPipelineVertexInputCreateInfo
*info
)
62 const uint32_t num_dwords
= 1 + info
->attributeCount
* 2;
64 bool instancing_enable
[32];
66 for (uint32_t i
= 0; i
< info
->bindingCount
; i
++) {
67 const VkVertexInputBindingDescription
*desc
=
68 &info
->pVertexBindingDescriptions
[i
];
70 pipeline
->binding_stride
[desc
->binding
] = desc
->strideInBytes
;
72 /* Step rate is programmed per vertex element (attribute), not
73 * binding. Set up a map of which bindings step per instance, for
74 * reference by vertex element setup. */
75 switch (desc
->stepRate
) {
77 case VK_VERTEX_INPUT_STEP_RATE_VERTEX
:
78 instancing_enable
[desc
->binding
] = false;
80 case VK_VERTEX_INPUT_STEP_RATE_INSTANCE
:
81 instancing_enable
[desc
->binding
] = true;
86 p
= anv_batch_emitn(&pipeline
->batch
, num_dwords
,
87 GEN8_3DSTATE_VERTEX_ELEMENTS
);
89 for (uint32_t i
= 0; i
< info
->attributeCount
; i
++) {
90 const VkVertexInputAttributeDescription
*desc
=
91 &info
->pVertexAttributeDescriptions
[i
];
92 const struct anv_format
*format
= anv_format_for_vk_format(desc
->format
);
94 struct GEN8_VERTEX_ELEMENT_STATE element
= {
95 .VertexBufferIndex
= desc
->location
,
97 .SourceElementFormat
= format
->format
,
98 .EdgeFlagEnable
= false,
99 .SourceElementOffset
= desc
->offsetInBytes
,
100 .Component0Control
= VFCOMP_STORE_SRC
,
101 .Component1Control
= format
->channels
>= 2 ? VFCOMP_STORE_SRC
: VFCOMP_STORE_0
,
102 .Component2Control
= format
->channels
>= 3 ? VFCOMP_STORE_SRC
: VFCOMP_STORE_0
,
103 .Component3Control
= format
->channels
>= 4 ? VFCOMP_STORE_SRC
: VFCOMP_STORE_1_FP
105 GEN8_VERTEX_ELEMENT_STATE_pack(NULL
, &p
[1 + i
* 2], &element
);
107 anv_batch_emit(&pipeline
->batch
, GEN8_3DSTATE_VF_INSTANCING
,
108 .InstancingEnable
= instancing_enable
[desc
->binding
],
109 .VertexElementIndex
= i
,
110 /* Vulkan so far doesn't have an instance divisor, so
111 * this is always 1 (ignored if not instancing). */
112 .InstanceDataStepRate
= 1);
115 anv_batch_emit(&pipeline
->batch
, GEN8_3DSTATE_VF_SGVS
,
116 .VertexIDEnable
= pipeline
->vs_prog_data
.uses_vertexid
,
117 .VertexIDComponentNumber
= 2,
118 .VertexIDElementOffset
= info
->bindingCount
,
119 .InstanceIDEnable
= pipeline
->vs_prog_data
.uses_instanceid
,
120 .InstanceIDComponentNumber
= 3,
121 .InstanceIDElementOffset
= info
->bindingCount
);
125 emit_ia_state(struct anv_pipeline
*pipeline
, VkPipelineIaStateCreateInfo
*info
)
127 static const uint32_t vk_to_gen_primitive_type
[] = {
128 [VK_PRIMITIVE_TOPOLOGY_POINT_LIST
] = _3DPRIM_POINTLIST
,
129 [VK_PRIMITIVE_TOPOLOGY_LINE_LIST
] = _3DPRIM_LINELIST
,
130 [VK_PRIMITIVE_TOPOLOGY_LINE_STRIP
] = _3DPRIM_LINESTRIP
,
131 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST
] = _3DPRIM_TRILIST
,
132 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP
] = _3DPRIM_TRISTRIP
,
133 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN
] = _3DPRIM_TRIFAN
,
134 [VK_PRIMITIVE_TOPOLOGY_LINE_LIST_ADJ
] = _3DPRIM_LINELIST_ADJ
,
135 [VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_ADJ
] = _3DPRIM_LISTSTRIP_ADJ
,
136 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_ADJ
] = _3DPRIM_TRILIST_ADJ
,
137 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_ADJ
] = _3DPRIM_TRISTRIP_ADJ
,
138 [VK_PRIMITIVE_TOPOLOGY_PATCH
] = _3DPRIM_PATCHLIST_1
141 anv_batch_emit(&pipeline
->batch
, GEN8_3DSTATE_VF
,
142 .IndexedDrawCutIndexEnable
= info
->primitiveRestartEnable
,
143 .CutIndex
= info
->primitiveRestartIndex
);
144 anv_batch_emit(&pipeline
->batch
, GEN8_3DSTATE_VF_TOPOLOGY
,
145 .PrimitiveTopologyType
= vk_to_gen_primitive_type
[info
->topology
]);
149 emit_rs_state(struct anv_pipeline
*pipeline
, VkPipelineRsStateCreateInfo
*info
)
151 static const uint32_t vk_to_gen_cullmode
[] = {
152 [VK_CULL_MODE_NONE
] = CULLMODE_NONE
,
153 [VK_CULL_MODE_FRONT
] = CULLMODE_FRONT
,
154 [VK_CULL_MODE_BACK
] = CULLMODE_BACK
,
155 [VK_CULL_MODE_FRONT_AND_BACK
] = CULLMODE_BOTH
158 static const uint32_t vk_to_gen_fillmode
[] = {
159 [VK_FILL_MODE_POINTS
] = RASTER_POINT
,
160 [VK_FILL_MODE_WIREFRAME
] = RASTER_WIREFRAME
,
161 [VK_FILL_MODE_SOLID
] = RASTER_SOLID
164 static const uint32_t vk_to_gen_front_face
[] = {
165 [VK_FRONT_FACE_CCW
] = CounterClockwise
,
166 [VK_FRONT_FACE_CW
] = Clockwise
169 static const uint32_t vk_to_gen_coordinate_origin
[] = {
170 [VK_COORDINATE_ORIGIN_UPPER_LEFT
] = UPPERLEFT
,
171 [VK_COORDINATE_ORIGIN_LOWER_LEFT
] = LOWERLEFT
174 struct GEN8_3DSTATE_SF sf
= {
175 GEN8_3DSTATE_SF_header
,
176 .ViewportTransformEnable
= true,
177 .TriangleStripListProvokingVertexSelect
=
178 info
->provokingVertex
== VK_PROVOKING_VERTEX_FIRST
? 0 : 2,
179 .LineStripListProvokingVertexSelect
=
180 info
->provokingVertex
== VK_PROVOKING_VERTEX_FIRST
? 0 : 1,
181 .TriangleFanProvokingVertexSelect
=
182 info
->provokingVertex
== VK_PROVOKING_VERTEX_FIRST
? 0 : 2,
183 .PointWidthSource
= info
->programPointSize
? Vertex
: State
,
186 /* bool32_t rasterizerDiscardEnable; */
189 GEN8_3DSTATE_SF_pack(NULL
, pipeline
->state_sf
, &sf
);
191 anv_batch_emit(&pipeline
->batch
, GEN8_3DSTATE_RASTER
,
192 .FrontWinding
= vk_to_gen_front_face
[info
->frontFace
],
193 .CullMode
= vk_to_gen_cullmode
[info
->cullMode
],
194 .FrontFaceFillMode
= vk_to_gen_fillmode
[info
->fillMode
],
195 .BackFaceFillMode
= vk_to_gen_fillmode
[info
->fillMode
],
196 .ScissorRectangleEnable
= true,
197 .ViewportZClipTestEnable
= info
->depthClipEnable
);
199 anv_batch_emit(&pipeline
->batch
, GEN8_3DSTATE_SBE
,
200 .ForceVertexURBEntryReadLength
= false,
201 .ForceVertexURBEntryReadOffset
= false,
202 .PointSpriteTextureCoordinateOrigin
=
203 vk_to_gen_coordinate_origin
[info
->pointOrigin
],
204 .NumberofSFOutputAttributes
=
205 pipeline
->wm_prog_data
.num_varying_inputs
);
209 VkResult VKAPI
vkCreateGraphicsPipeline(
211 const VkGraphicsPipelineCreateInfo
* pCreateInfo
,
212 VkPipeline
* pPipeline
)
214 struct anv_device
*device
= (struct anv_device
*) _device
;
215 struct anv_pipeline
*pipeline
;
216 const struct anv_common
*common
;
217 VkPipelineShaderStageCreateInfo
*shader_create_info
;
218 VkPipelineIaStateCreateInfo
*ia_info
;
219 VkPipelineRsStateCreateInfo
*rs_info
;
220 VkPipelineVertexInputCreateInfo
*vi_info
;
222 uint32_t offset
, length
;
224 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO
);
226 pipeline
= anv_device_alloc(device
, sizeof(*pipeline
), 8,
227 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
228 if (pipeline
== NULL
)
229 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
231 pipeline
->device
= device
;
232 pipeline
->layout
= (struct anv_pipeline_layout
*) pCreateInfo
->layout
;
233 memset(pipeline
->shaders
, 0, sizeof(pipeline
->shaders
));
234 result
= anv_batch_init(&pipeline
->batch
, device
);
235 if (result
!= VK_SUCCESS
)
238 for (common
= pCreateInfo
->pNext
; common
; common
= common
->pNext
) {
239 switch (common
->sType
) {
240 case VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_CREATE_INFO
:
241 vi_info
= (VkPipelineVertexInputCreateInfo
*) common
;
243 case VK_STRUCTURE_TYPE_PIPELINE_IA_STATE_CREATE_INFO
:
244 ia_info
= (VkPipelineIaStateCreateInfo
*) common
;
246 case VK_STRUCTURE_TYPE_PIPELINE_TESS_STATE_CREATE_INFO
:
247 case VK_STRUCTURE_TYPE_PIPELINE_VP_STATE_CREATE_INFO
:
249 case VK_STRUCTURE_TYPE_PIPELINE_RS_STATE_CREATE_INFO
:
250 rs_info
= (VkPipelineRsStateCreateInfo
*) common
;
252 case VK_STRUCTURE_TYPE_PIPELINE_MS_STATE_CREATE_INFO
:
253 case VK_STRUCTURE_TYPE_PIPELINE_CB_STATE_CREATE_INFO
:
254 case VK_STRUCTURE_TYPE_PIPELINE_DS_STATE_CREATE_INFO
:
255 case VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO
:
256 shader_create_info
= (VkPipelineShaderStageCreateInfo
*) common
;
257 pipeline
->shaders
[shader_create_info
->shader
.stage
] =
258 (struct anv_shader
*) shader_create_info
->shader
.shader
;
265 pipeline
->use_repclear
= false;
267 anv_compiler_run(device
->compiler
, pipeline
);
269 /* FIXME: The compiler dead-codes FS inputs when we don't have a VS, so we
270 * hard code this to num_attributes - 2. This is because the attributes
271 * include VUE header and position, which aren't counted as varying
273 if (pipeline
->vs_simd8
== NO_KERNEL
)
274 pipeline
->wm_prog_data
.num_varying_inputs
= vi_info
->attributeCount
- 2;
276 emit_vertex_input(pipeline
, vi_info
);
277 emit_ia_state(pipeline
, ia_info
);
278 emit_rs_state(pipeline
, rs_info
);
280 anv_batch_emit(&pipeline
->batch
, GEN8_3DSTATE_WM
,
281 .StatisticsEnable
= true,
282 .LineEndCapAntialiasingRegionWidth
= _05pixels
,
283 .LineAntialiasingRegionWidth
= _10pixels
,
284 .EarlyDepthStencilControl
= NORMAL
,
285 .ForceThreadDispatchEnable
= NORMAL
,
286 .PointRasterizationRule
= RASTRULE_UPPER_RIGHT
,
287 .BarycentricInterpolationMode
=
288 pipeline
->wm_prog_data
.barycentric_interp_modes
);
290 uint32_t samples
= 1;
291 uint32_t log2_samples
= __builtin_ffs(samples
) - 1;
292 bool enable_sampling
= samples
> 1 ? true : false;
294 anv_batch_emit(&pipeline
->batch
, GEN8_3DSTATE_MULTISAMPLE
,
295 .PixelPositionOffsetEnable
= enable_sampling
,
296 .PixelLocation
= CENTER
,
297 .NumberofMultisamples
= log2_samples
);
299 anv_batch_emit(&pipeline
->batch
, GEN8_3DSTATE_URB_VS
,
300 .VSURBStartingAddress
= pipeline
->urb
.vs_start
,
301 .VSURBEntryAllocationSize
= pipeline
->urb
.vs_size
- 1,
302 .VSNumberofURBEntries
= pipeline
->urb
.nr_vs_entries
);
304 anv_batch_emit(&pipeline
->batch
, GEN8_3DSTATE_URB_GS
,
305 .GSURBStartingAddress
= pipeline
->urb
.gs_start
,
306 .GSURBEntryAllocationSize
= pipeline
->urb
.gs_size
- 1,
307 .GSNumberofURBEntries
= pipeline
->urb
.nr_gs_entries
);
309 anv_batch_emit(&pipeline
->batch
, GEN8_3DSTATE_URB_HS
,
310 .HSURBStartingAddress
= pipeline
->urb
.vs_start
,
311 .HSURBEntryAllocationSize
= 0,
312 .HSNumberofURBEntries
= 0);
314 anv_batch_emit(&pipeline
->batch
, GEN8_3DSTATE_URB_DS
,
315 .DSURBStartingAddress
= pipeline
->urb
.vs_start
,
316 .DSURBEntryAllocationSize
= 0,
317 .DSNumberofURBEntries
= 0);
319 const struct brw_gs_prog_data
*gs_prog_data
= &pipeline
->gs_prog_data
;
321 length
= (gs_prog_data
->base
.vue_map
.num_slots
+ 1) / 2 - offset
;
323 if (pipeline
->gs_vec4
== NO_KERNEL
)
324 anv_batch_emit(&pipeline
->batch
, GEN8_3DSTATE_GS
, .Enable
= false);
326 anv_batch_emit(&pipeline
->batch
, GEN8_3DSTATE_GS
,
327 .SingleProgramFlow
= false,
328 .KernelStartPointer
= pipeline
->gs_vec4
,
329 .VectorMaskEnable
= Vmask
,
331 .BindingTableEntryCount
= 0,
332 .ExpectedVertexCount
= pipeline
->gs_vertex_count
,
334 .PerThreadScratchSpace
= 0,
335 .ScratchSpaceBasePointer
= 0,
337 .OutputVertexSize
= gs_prog_data
->output_vertex_size_hwords
* 2 - 1,
338 .OutputTopology
= gs_prog_data
->output_topology
,
339 .VertexURBEntryReadLength
= gs_prog_data
->base
.urb_read_length
,
340 .DispatchGRFStartRegisterForURBData
=
341 gs_prog_data
->base
.base
.dispatch_grf_start_reg
,
343 .MaximumNumberofThreads
= device
->info
.max_gs_threads
,
344 .ControlDataHeaderSize
= gs_prog_data
->control_data_header_size_hwords
,
345 //pipeline->gs_prog_data.dispatch_mode |
346 .StatisticsEnable
= true,
347 .IncludePrimitiveID
= gs_prog_data
->include_primitive_id
,
348 .ReorderMode
= TRAILING
,
351 .ControlDataFormat
= gs_prog_data
->control_data_format
,
353 /* FIXME: mesa sets this based on ctx->Transform.ClipPlanesEnabled:
354 * UserClipDistanceClipTestEnableBitmask_3DSTATE_GS(v)
355 * UserClipDistanceCullTestEnableBitmask(v)
358 .VertexURBEntryOutputReadOffset
= offset
,
359 .VertexURBEntryOutputLength
= length
);
361 //trp_generate_blend_hw_cmds(batch, pipeline);
363 const struct brw_vue_prog_data
*vue_prog_data
= &pipeline
->vs_prog_data
.base
;
364 /* Skip the VUE header and position slots */
366 length
= (vue_prog_data
->vue_map
.num_slots
+ 1) / 2 - offset
;
368 if (pipeline
->vs_simd8
== NO_KERNEL
)
369 anv_batch_emit(&pipeline
->batch
, GEN8_3DSTATE_VS
,
370 .FunctionEnable
= false,
371 .VertexURBEntryOutputReadOffset
= 1,
372 /* Even if VS is disabled, SBE still gets the amount of
373 * vertex data to read from this field. We use attribute
374 * count - 1, as we don't count the VUE header here. */
375 .VertexURBEntryOutputLength
=
376 DIV_ROUND_UP(vi_info
->attributeCount
- 1, 2));
378 anv_batch_emit(&pipeline
->batch
, GEN8_3DSTATE_VS
,
379 .KernelStartPointer
= pipeline
->vs_simd8
,
380 .SingleVertexDispatch
= Multiple
,
381 .VectorMaskEnable
= Dmask
,
383 .BindingTableEntryCount
=
384 vue_prog_data
->base
.binding_table
.size_bytes
/ 4,
385 .ThreadDispatchPriority
= Normal
,
386 .FloatingPointMode
= IEEE754
,
387 .IllegalOpcodeExceptionEnable
= false,
388 .AccessesUAV
= false,
389 .SoftwareExceptionEnable
= false,
391 /* FIXME: pointer needs to be assigned outside as it aliases
392 * PerThreadScratchSpace.
394 .ScratchSpaceBasePointer
= 0,
395 .PerThreadScratchSpace
= 0,
397 .DispatchGRFStartRegisterForURBData
=
398 vue_prog_data
->base
.dispatch_grf_start_reg
,
399 .VertexURBEntryReadLength
= vue_prog_data
->urb_read_length
,
400 .VertexURBEntryReadOffset
= 0,
402 .MaximumNumberofThreads
= device
->info
.max_vs_threads
- 1,
403 .StatisticsEnable
= false,
404 .SIMD8DispatchEnable
= true,
405 .VertexCacheDisable
= ia_info
->disableVertexReuse
,
406 .FunctionEnable
= true,
408 .VertexURBEntryOutputReadOffset
= offset
,
409 .VertexURBEntryOutputLength
= length
,
410 .UserClipDistanceClipTestEnableBitmask
= 0,
411 .UserClipDistanceCullTestEnableBitmask
= 0);
413 const struct brw_wm_prog_data
*wm_prog_data
= &pipeline
->wm_prog_data
;
414 uint32_t ksp0
, ksp2
, grf_start0
, grf_start2
;
418 if (pipeline
->ps_simd8
!= NO_KERNEL
) {
419 ksp0
= pipeline
->ps_simd8
;
420 grf_start0
= wm_prog_data
->base
.dispatch_grf_start_reg
;
421 if (pipeline
->ps_simd16
!= NO_KERNEL
) {
422 ksp2
= pipeline
->ps_simd16
;
423 grf_start2
= wm_prog_data
->dispatch_grf_start_reg_16
;
425 } else if (pipeline
->ps_simd16
!= NO_KERNEL
) {
426 ksp0
= pipeline
->ps_simd16
;
427 grf_start0
= wm_prog_data
->dispatch_grf_start_reg_16
;
429 unreachable("no ps shader");
432 anv_batch_emit(&pipeline
->batch
, GEN8_3DSTATE_PS
,
433 .KernelStartPointer0
= ksp0
,
435 .SingleProgramFlow
= false,
436 .VectorMaskEnable
= true,
439 .ScratchSpaceBasePointer
= 0,
440 .PerThreadScratchSpace
= 0,
442 .MaximumNumberofThreadsPerPSD
= 64 - 2,
443 .PositionXYOffsetSelect
= wm_prog_data
->uses_pos_offset
?
444 POSOFFSET_SAMPLE
: POSOFFSET_NONE
,
445 .PushConstantEnable
= wm_prog_data
->base
.nr_params
> 0,
446 ._8PixelDispatchEnable
= pipeline
->ps_simd8
!= NO_KERNEL
,
447 ._16PixelDispatchEnable
= pipeline
->ps_simd16
!= NO_KERNEL
,
448 ._32PixelDispatchEnable
= false,
450 .DispatchGRFStartRegisterForConstantSetupData0
= grf_start0
,
451 .DispatchGRFStartRegisterForConstantSetupData1
= 0,
452 .DispatchGRFStartRegisterForConstantSetupData2
= grf_start2
,
454 .KernelStartPointer1
= 0,
455 .KernelStartPointer2
= ksp2
);
457 bool per_sample_ps
= false;
458 anv_batch_emit(&pipeline
->batch
, GEN8_3DSTATE_PS_EXTRA
,
459 .PixelShaderValid
= true,
460 .PixelShaderKillsPixel
= wm_prog_data
->uses_kill
,
461 .PixelShaderComputedDepthMode
= wm_prog_data
->computed_depth_mode
,
462 .AttributeEnable
= wm_prog_data
->num_varying_inputs
> 0,
463 .oMaskPresenttoRenderTarget
= wm_prog_data
->uses_omask
,
464 .PixelShaderIsPerSample
= per_sample_ps
);
466 *pPipeline
= (VkPipeline
) pipeline
;
471 anv_device_free(device
, pipeline
);
477 anv_pipeline_destroy(struct anv_pipeline
*pipeline
)
479 anv_compiler_free(pipeline
);
480 anv_batch_finish(&pipeline
->batch
, pipeline
->device
);
481 anv_device_free(pipeline
->device
, pipeline
);
486 VkResult VKAPI
vkCreateGraphicsPipelineDerivative(
488 const VkGraphicsPipelineCreateInfo
* pCreateInfo
,
489 VkPipeline basePipeline
,
490 VkPipeline
* pPipeline
)
492 return VK_UNSUPPORTED
;
495 VkResult VKAPI
vkCreateComputePipeline(
497 const VkComputePipelineCreateInfo
* pCreateInfo
,
498 VkPipeline
* pPipeline
)
500 return VK_UNSUPPORTED
;
503 VkResult VKAPI
vkStorePipeline(
509 return VK_UNSUPPORTED
;
512 VkResult VKAPI
vkLoadPipeline(
516 VkPipeline
* pPipeline
)
518 return VK_UNSUPPORTED
;
521 VkResult VKAPI
vkLoadPipelineDerivative(
525 VkPipeline basePipeline
,
526 VkPipeline
* pPipeline
)
528 return VK_UNSUPPORTED
;
531 // Pipeline layout functions
533 VkResult VKAPI
vkCreatePipelineLayout(
535 const VkPipelineLayoutCreateInfo
* pCreateInfo
,
536 VkPipelineLayout
* pPipelineLayout
)
538 struct anv_device
*device
= (struct anv_device
*) _device
;
539 struct anv_pipeline_layout
*layout
;
540 struct anv_pipeline_layout_entry
*entry
;
544 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO
);
547 for (uint32_t i
= 0; i
< pCreateInfo
->descriptorSetCount
; i
++) {
548 struct anv_descriptor_set_layout
*set_layout
=
549 (struct anv_descriptor_set_layout
*) pCreateInfo
->pSetLayouts
[i
];
550 for (uint32_t j
= 0; j
< set_layout
->count
; j
++)
551 total
+= set_layout
->total
;
554 size
= sizeof(*layout
) + total
* sizeof(layout
->entries
[0]);
555 layout
= anv_device_alloc(device
, size
, 8,
556 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
558 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
560 entry
= layout
->entries
;
561 for (uint32_t s
= 0; s
< VK_NUM_SHADER_STAGE
; s
++) {
562 layout
->stage
[s
].entries
= entry
;
564 for (uint32_t i
= 0; i
< pCreateInfo
->descriptorSetCount
; i
++) {
565 struct anv_descriptor_set_layout
*set_layout
=
566 (struct anv_descriptor_set_layout
*) pCreateInfo
->pSetLayouts
[i
];
567 for (uint32_t j
= 0; j
< set_layout
->count
; j
++)
568 if (set_layout
->bindings
[j
].mask
& (1 << s
)) {
569 entry
->type
= set_layout
->bindings
[j
].type
;
576 layout
->stage
[s
].count
= entry
- layout
->stage
[s
].entries
;
579 *pPipelineLayout
= (VkPipelineLayout
) layout
;